HttpWebResponse from WebException

using (var response = (HttpWebResponse)((Func<WebResponse>)(() =>
{
  try { return(request.GetResponse());}
  catch (WebException ex) { return(ex.Response); }
}))()) //<-- too funny
using (var responseStream = response.GetResponseStream())
// ReSharper disable once AssignNullToNotNullAttribute
using (var readStream = new StreamReader(responseStream, Encoding.UTF8))
{
  return String.Format("{0} {1}. {2}", (int)response.StatusCode, response.StatusCode, readStream.ReadToEnd());
}

Light Custom Configuration Section

using System;
using System.Collections.Generic;
using System.Linq;
using System.Text;
using System.Configuration;

namespace WebServiceClient
{
    /*URLElement class below represents a bundle of properties (URL, password, etc) for each endpoint we need to send to... 

    And the following custom app.configuration section allows us to maintain a list of these URLElements. 
    copied from here: http://www.abhisheksur.com/2011/09/writing-custom-configurationsection-to.html 
    and here: http://stackoverflow.com/questions/1755421/c-sharp-appsettings-is-there-a-easy-way-to-put-a-collection-into-appsetting 
    The basic gist is 3 fairly light implementation classes -- 
    URLElement = individual elements, URLCollection = collection of elements, and URLSection = new custom app.config section. 
    */
    public class URLSection : ConfigurationSection
    {
        //nugget: it seems that the [ConfigurationProperty()] attribute does not work on a static property (maybe that's true for all attributes??), 
        //so the _URLs *instance* propery satisfies this attribute requirement, but made it private since don't plan on using it directly. 
        //then exposing the URLs collection as a public *static* property. 
        //this rigamarole merely allows for the slighty more succint "URLSection.URLs" from the calling code rather than "URLSection.urlSection.URLs" 
        public static URLCollection URLs { get { return _urlSection._URLs; } }

        [ConfigurationProperty("URLs")]
        private URLCollection _URLs { get { return this["URLs"] as URLCollection; } }
        private static readonly URLSection _urlSection = ConfigurationManager.GetSection("URLSection") as URLSection;
    }

    //nugget: the xml tag name of the element level nodes must be "" by default 
    //to change to something else, it looks like one must implement a few more overrides like the ElementName & ConfigurationElementCollectionType properties. 
    //leaving it as the default seems just fine for current needs. 
    public class URLCollection : ConfigurationElementCollection
    {
        public URLElement this[int index] { get { return (URLElement)BaseGet(index); } }
        protected override ConfigurationElement CreateNewElement() { return new URLElement(); }
        protected override object GetElementKey(ConfigurationElement element) { return ((URLElement)(element)).Name; }
    }

    public class URLElement : ConfigurationElement
    {
        [ConfigurationProperty("Name", IsKey = true, IsRequired = true)]
        public string Name
        {
            get { return (string)this["Name"]; }
            set { this["Name"] = value; }
        }

        [ConfigurationProperty("Url", IsRequired = true)]
        public string Url
        {
            get { return (string)this["Url"]; }
            set { this["Url"] = value; }
        }

        [ConfigurationProperty("ContextID", IsRequired = true)]
        public string ContextID
        {
            get { return (string)this["ContextID"]; }
            set { this["ContextID"] = value; }
        }

        [ConfigurationProperty("Password", IsRequired = true)]
        public string Password
        {
            get { return (string)this["Password"]; }
            set { this["Password"] = value; }
        }

        [ConfigurationProperty("IgnoreWebServiceException", DefaultValue = false)]
        public bool IgnoreWebServiceException
        {
            get { return (bool)this["IgnoreWebServiceException"]; }
            set { this["IgnoreWebServiceException"] = value; }
        }

    }

}

SQL Server Table-Valued Stored Procedure Parameters <=> ADO.Net

Nutshell:

  1. Declare a User Defined Type (UDT)
  2. Declare a stored proc parm of that UDT
  3. Fill an ADO.Net DataTable with the same columns as the UDT
  4. Assign the DataTable to a Parameter of an ADO.Net SqlCommand corresponding to the sproc

Notes:

Code Examples:

  1. File_UDT.sql
    CREATE TYPE File_UDT AS TABLE
    (
      FullPath varchar(900) PRIMARY KEY, 
      ModifiedDate datetime, 
      [Size] bigint
    )
    GO
    
    GRANT EXECUTE ON TYPE::dbo.File_UDT TO PUBLIC
    GO
  2. Files_UploadCompare.sql
    CREATE PROCEDURE [dbo].[Files_UploadCompare]
    @BackupProfileID INT,
    @NextDiscNumber INT = NULL OUT,
    @AllFiles File_UDT READONLY -- <= *****
    AS BEGIN
            
    SET TRANSACTION ISOLATION LEVEL READ UNCOMMITTED;
    
    -- new approach, simply return all files which don't match something already in the database 
    -- then we don't have to worry about partial results left in the tables ... 
    -- we just upload the current batch of files when we're with each burn and then start fresh with the next batch selection from there
    -- there will be no records in FileArchive unless they've been put there specifically as marking a "finalized" MediaSubset
    
    SELECT *,
      CONVERT(BIT, 0) AS Selected,
      CONVERT(BIT, 0) AS SkipError
    FROM @AllFiles a
    WHERE NOT EXISTS(
      SELECT 1
      FROM FileArchive fa
      JOIN [File] f ON fa.FileID = f.FileID
      WHERE f.FullPath = a.FullPath AND fa.ModifiedDate = a.ModifiedDate AND fa.Size = a.Size
    )
    
    DECLARE @IncrementalID int
    SELECT @IncrementalID = MAX(IncrementalID) FROM [Incremental] WHERE BackupProfileID = BackupProfileID
    
    SELECT @NextDiscNumber = isnull(COUNT(1),0)+1 FROM MediaSubset WHERE IncrementalID = @IncrementalID
    
    END
    
  3. FileSystemNode.cs
    static private void ScanFolder(FolderNode folder, DataTable IncludedFiles)
    {
      DirectoryInfo dir = new DirectoryInfo(folder.FullPath);
      FileInfo[] files = dir.GetFiles("*.*", folder.IsSubSelected ? SearchOption.TopDirectoryOnly : SearchOption.AllDirectories);
      foreach (FileInfo file in files)
      {
        DataRow r = IncludedFiles.NewRow();
        r["FullPath"] = file.FullName;
        r["ModifiedDate"] = file.LastWriteTimeUtc;
        r["Size"] = file.Length; //megabytes
        IncludedFiles.Rows.Add(r);
      }
    }  
    
  4. MainWindow.xaml.cs
    using (Proc Files_UploadCompare = new Proc("Files_UploadCompare"))
    {
      Files_UploadCompare["@BackupProfileID"] = (int)cbxBackupProfiles.SelectedValue;
      Files_UploadCompare["@AllFiles"] = IncludedFilesTable; // <= ******
      WorkingFilesTable = Files_UploadCompare.ExecuteDataTable();
      lblCurrentDisc.Content = Files_UploadCompare["@NextDiscNumber"].ToString();
    }

Tips:

  • (from here): If the login that SqlCommandBuilder.DeriveParameters is run under does not have permission to access the UDT, no error will be thrown – the method will return successfully, but the SqlCommand.Parameters collection will not contain the UDT parameter.!!!
  • Granting permissions on a type (from here): GRANT EXECUTE ON TYPE::dbo.MyType TO public;

Links:

CAC (SmartCard) Enabling ASP.Net on IIS

  • The only configuration settings required are (IIS7 screenshots below):
    • Require SSL (this represents server side)
    • and either Accept or Require Client Certificates … “Accept” will populate the SmartCard’s cert info to your ASP.Net Request object (if it’s provided) but won’t deny access if one hasn’t been provided, “Require” will deny access unless a valid SmartCard Cert has been provided.

Tips:

  • One key thing to be aware of how this works is that the server will send a list of Trusted Root Certificates down to the client/browser and then the browser will compare that list to the Trusted Roots represented by the CAC present and only if there’s a match will it prompt for the Certificate and PIN input.  Therefore, both the Server and the client must have the same Trusted Root Certs installed for this to work, the easiest way to do this for the DoD CAC’s is to grab the latest install_root.exe and fire that up.
  • Another key thing I discovered was that after you get the certs installed, go ahead and do a reboot, I was still getting 403 access denied errors that simply disappeared after I rebooted.
  • Throw these lines in a ASP.Net wizard generated project’s Default.aspx to see the basic Cert info… the .Subject property is the juiciest looking info, there may be other properties of value.
    • <%=Page.Request.ClientCertificate.IsPresent%>
    • <%=Page.Request.ClientCertificate.Subject%>
  • It’s probably also helpful to go ahead and make sure your server side SSL cert is properly named & not expired, such that you don’t get any warnings when you browse to the page… I was getting some errors related to that when I was working with the Client Cert’s required.
    • this reference was helpful, see the section titled “Generate a Self Signed Certificate with the Correct Common Name”
    • this is the basic command you need to generate your own SSL cert for testing: SelfSSL /N:CN=www.whatever.com /V:9999
    • find SelfSSL in the IIS6 Reskit

image image

2011 Q4 .Net State of the Union

  • .Net Framework v4 New Features
    • Parallel Linq Extensions
  • C# 4.0 New Features (all good stuff IMPO, variance being the hardest to grok)
    • Named and Optional Parameters – already use this quite a bit
    • Dynamic Support – handy way to ignore the complexity of ‘dynamically’ generated type declarations (e.g. linq results & COM Interop)
    • Co/Contra-Variance – primarily intended to make .Net Framework methods with Generic type parameters like IEnumerable<T> “work like we’d expect” as is often quoted in explanatory texts (look for Jon Skeet and Eric Lippert).  It removes a couple rather unexpected annoyances that C# 3 would’ve snagged on.
      • Covariance represents “upcastable”.  Concerns types coming out of an API.  Represented by “out” keyword, e.g. public interface IEnumerable<out T>
      • Contravariance is “downcastable”. Typically concerns types passed into an API.  e.g. public interface IComparer<in T>
      • Invariance is when something must go both in and out of a method and it can’t be declared differently on either side of the interface, it must be the same coming and going.
    • COM Interop
      • Dynamic Vars
      • Optional Parms
      • Optimized interop assembly file size
  • WPF4 New Features
    • New Controls – *DataGrid*, Calendar/DatePicker
    • Touch
    • Fancy Win7 Taskbar support
    • Easements
  • Silverlight 4 New Features
    • New Controls – ViewBox (auto resize), RichTextBox
    • Out-Of-Browser Support
    • Printing support
    • Drag and drop, clipboard access
    • More WPF compatible XAML parser
    • DataBinding improvements – StringFormat, collection grouping, INotifyDataErrorInfo or IDataErrorInfo support,
    • WCF Data Services enhancements – Direct POCO mapping via Linq queries on Open Data Protocol feeds.
    • Dynamic Support

Evolving a custom ADO.Net based repository

Full GitHub Source

demo post

Concept

A framework for maintaining column specific repository consistency with database update “side effects”.

Prerequisites

  • Stored procedures = business rules
    yep, i said it… My data layer is basically a home grown spin on ADO.Net Typed DataSets i.e. “Business” Class wrappers around ADO.Net classes (DataSets, DataRelations, DataTables, DataViews, DataRows, DataRowViews, etc).  I like to keep the majority of my business rules in stored procedures (“procs”).  I’ve experienced sustained, maintainable progress on LOB projects facilitated by an evolving relational model.  It’s often beneficial to meet growing awareness of business entity relationship requirements entirely in the proc queries with no changes necessary in higher layers.  Being able to change how a particular list is populated WITHOUT REQUIRING A BINARY RELEASE can be very powerful.  I realize this may all seem controversial to an OO mindset but it’s served me well over multiple database oriented projects. If your project is not inherently table oriented, please stop right here. This is very much a relationally oriented design approach. If one is fortunate enough to have the freedom to design the database as part of the overall solution scope and therefore stored procedures are fair game, then to not take advantage of procs as “business methods”, is throwing away a huge asset. If one is not that lucky, and I realize big corporate projects tend not to be, then I completely understand taking great OO measures to insulate one’s beautiful architecture away from the messy legacy database structure. EntityFramework welcomes you 🙂  Otherwise, I feel that remaining near and dear to ones mother database is a very fruitful relationship.  Procs are easily maintainable and deployable – no binaries, very scriptable.
  • Naturally, accepting dependence on a database for business rules does imply that our application must be generally connected, to a database. One could argue this doesn’t fly for disconnected client scenarios, i.e. mobile device. However, it’s not far fetched to have a local database which provides this support which then updates to the big mother database (cloud, etc) when connectivity is restored. One could still leverage the readily deployable nature of stored procs to provide the right business smarts to the local DB. Indeed, a tiered relational centric model vs typical tiered OO centric architectures which relegate relational technology to the last tier only 🙂
  • MS SQL Server 2005+ – This post includes the usage of the SS 2005+ “OUTPUT” syntax. I’d be interested to know whether other DB’s support this but it’s more of a convenience and possibly mild performance benefit vs critical requirement.

Business Example

To frame a case which demonstrates the need for typical business requirements driven side effects, take a look at the adjacent screenshot.

In this scenario there is a household with some people in it (aka members or clients). In this business domain only one person can be the sponsor of a household at any given time. Likewise there can be only one spouse set, the spouse which is not the sponsor. These designations are maintained as flags on the Clients database table. In this example, we’re exploring what needs to happen when the sponsor changes from one person to another. This can happen when the existing sponsor leaves the business system which grants this privilege, yet the spouse remains in the system and can therefore assume the sponsorship privilege and nothing else needs to change.

So, in the pictured UI, the current sponsor is Sgt. John Snuffy. To effect this desired change, the user would select the “Set Sponsor” button on the spouse entry (Mrs. Jane Snuffy). As is typical tiered design, this button fires a Business Object method – SetSponsor(…)

By design, my Business Class methods tend to be fairly light wrappers around proc calls. For example:

public void SetSponsor(string NewSponsorClientGUID, bool FixExistingPackageLinks)
{
  using (iTRAACProc Sponsor_SetSponsor = new iTRAACProc("Sponsor_SetSponsor"))
  {
    Sponsor_SetSponsor["@SponsorGUID"] = GUID;
    Sponsor_SetSponsor["@NewSponsorClientGUID"] = NewSponsorClientGUID;
    Sponsor_SetSponsor["@FixExistingPackageLinks"] = FixExistingPackageLinks;
    TableCache(Sponsor_SetSponsor);
    HouseMembers = HouseMembers; //for some reason OnPropertyChanged("HouseMembers") didn't refresh the Members Grid, i don't have a good guess but this little hack worked immediately so i'm moving on
  }
}

full source

Line #8 above is the huckleberry. The TableCache method is implemented in the BusinessBase class… it fires the sproc and then goes into the DataSet.Merge() logic explained below…

While we’re looking at this code, let me quickly divert to explain the “Proc” class . Nutshell: Proc is a convenient wrapper around ADO.Net SqlCommand. Among other things it does the SqlCommandBuilder.DeriveParameters() + caching thing that you’ll find in many similar wrappers like this (e.g. Microsoft’s Data Access Application Block – I just didn’t fall in love with their API and wanted my own spin). DeriveParameters() removes the dreary burden of all that boring proc parm definition boilerplate code prior to each proc call (add param by name, set the datatype, etc.) and just pulls all that out of the database metadata that already knows all that information anyway – brilliant. Therefore we get right to the point of assigning values to named proc parms and firing the query. SqlClientHelpders.cs contains the Proc class as well as all kinds of data helper methods that have evolved over several projects. I wouldn’t want to start a database project without it at this point.

iTRAAC is the name of the project I pulled this example from. iTRAACProc is a very light subclass that assigns a few common domain specific parms (e.g. UserID) before handing off to the base Proc class. Conveniently, the Proc class’ parm[“@name”] indexer ignores anything that’s not declared on the specified proc, so only procs that actually require these parms will receive them.

Ok so back to our scenario… Besides setting the flag on Jane’s record to indicate she is now the sponsor, we also need to remove the sponsorship flag from John as well as flip the spouse flag from Jane to John (other queries and reports depend on having those flags consistent)… and oh, by the way, we also want to log all of this to the audit table so there’s a historical reference of what changes brought us to the current state of a household.  We want to drive all of this from the database proc logic and once the database has changed we want the UI to magically update to reflect all these changes and additions (including the new audit record aka “Diary” in the UI). So this is where we’ve arrived at what I call side effects (maybe there’s a better term?). That is – corresponding to a relatively innocent looking user action, our desired business rules will drive various values to be changed and entirely new rows to be added that are not directly maintained by the user. This is not simple CRUD table maintenance, this is real business rules with all the crazy interconnections that must be supported 🙂

Update-proc example (full source):

SET @TableNames = 'Client'
UPDATE iTRAAC.dbo.tblClients
SET StatusFlags = CASE WHEN RowGUID = @NewSponsorClientGUID THEN StatusFlags | POWER(2,0)
                  ELSE StatusFlags &amp; ~POWER(2,0) END
OUTPUT INSERTED.RowGUID, CONVERT(BIT, INSERTED.StatusFlags &amp; POWER(2,0)) AS IsSponsor
WHERE SponsorGUID = @SponsorGUID
AND RowGUID IN (@OldSponsorClientGUID, @NewSponsorClientGUID)

Line #1 is pertinent. By convention, all procs which need to participate in the approach I’m proposing in this post, must have a @TableNames OUTPUT parameter.  This is a CSV list of table names corresponding to each resultset returned from the proc (in sequential order).  This way, the proc generically informs the datalayer what must be merged into the client data cache (i.e. repository).

Line #5 above is cool – rather than reSELECTing the modified data…OUTPUT lets us leverage that UPDATE already knows what rows it hit. I dig it. Back on the client side, the datalayer takes that PARTIAL (i.e. very column specific) result-set and Merges back it into the cache like so (full source):

//nugget: DataSet.Merge(DataTable) has become a real linchpin in the whole data roundtrip approach
//nugget: in a nutshell, update procs return a bare minimum of updated fields in a return resultset along with a corresponding CSV list of @TableNames
DataTable cachedTable = dsCache.Tables[tableName];
dsCache.Merge(incomingTable, false, (cachedTable == null) ? MissingSchemaAction.AddWithKey : MissingSchemaAction.Ignore); //PreserveChanges pretty much has to be false in order to count on what comes back getting slammed in

The Big Picture

image

What this approach tees up is that your procs can drive an unlimited amount of side effects which can be granularly returned to the client side cache.

Since you can pick and choose exactly which columns are returned (via standard selects or OUTPUT clause) you can weave a fine tuned blend between exactly which fields are allowed to come back in the side effects and blast into the client cache vs what fields may have pending uncommitted user edits in the cache. That’s pretty cool.

View->ViewModel (MVVM) environments with robust declarative databinding, like WPF, really shine when you see all of these side effects immediately manifest on the UI just by bringing the data back into the BusinessObject(DataSet) cache (that the UI is bound to).  The procs are very much in control of the business logic and ultimately what’s displayed, yet without being coupled to the UI. Great stuff.

Additional perks in the code provided:

  • An interesting “union-like” benefit in the datalayer – I ran into requirements where the most appealing clean design was to modularize subroutine procs that would be called from other procs. Fair enough so far. On top of that I found need to return these field level data changes (aka side effects) for the same entity table, from multiple procs in the subroutine chain. e.g. Client –> Proc1 –> SubProc2 & SubProc3. The impact of burdening the T-SQL proc layer with capturing the multiple proc results and union’ing them together is ugly design. It wound up being very clean and convenient to defer the union of these multiple selects to the TableCache C# datalayer logic. The “union” effect is readily implemented by looping through the tables of the same name and using ADO.Net’s “DataTable.merge()” to apply each incoming rowset to the existing entities in the repository cache. Including matching primary keys in the incoming rowsets facilitates updates to cached entities vs inserts.
  • Handy initial client side rows – I should say, this next bit is actually a technique that’s struck me as convenient yet it’s not specifically dependent on the TableCache approach … these building blocks do all however play into each other to nicely address what I’ll call the “new row dilemma” … that is, one typically needs some blank rows to databind to when you’re creating a new record in the UI… but it’s often undesirable to physically manifest these rows in the database until you’re sure they’re really going to be committed… it really stinks to sacrifice data integrity constraints just to allow for initial empty rows… a typical solution is to DataTable.Rows.AddRow() on the client and leave the DB out of it until you commit fully validated rows… but now client code is responsible for initializing new rows. I hate that for a couple reasons. First, I want that logic in the procs, where I can evolve it at will at the database tier w/o needing to deploy a new client binary. Secondly, for client logic consistency, it’s much cleaner for new row logic to work exactly the same way as existing row logic. So the execution goes something like this:
    1. New row event on client generates a brand new GUID PK (Some form of very unique ID seem fairly necessary to allow the client to do this autonomously from the DB
    2. But otherwise the client logic just flows into the standard “GetEntityByID” proc call, passing the new GUID none the wiser whether it’s new or existing… i.e. zero logic flow difference between new record and vs existing record, nirvana :).
    3. Of course this fresh GUID won’t get a row hit which conditionally falls into the new row logic where I return a “fake” row populated with whatever defaults I desire… take note, I’m not actually inserting a row into the table and then selecting that back out, I’m doing a select with “hard coded” values and no “from table” clause… that way I don’t insert junk data nor forsake constraints, but the new row logic is kept in the proc layer – beautiful.
    4. Lastly, when committing to the DB, you fire the typical upsert proc which checks if it’s doing an insert or update by seeing if the PK exists and acting accordingly.

YASBE – Open Source Code Incremental Backup Windows WPF Application

The reason I started this little project is none of the premier backup packages currently support Bluray… I know that sounds amazing but check out the help forums for stuff like Acronis and Paragon and Yosemite… it’s not a pretty picture out there currently with regards to Bluray… and of course, I had already bought my BD drive before I started to realize how dismal this all was… so I was inclined to find a solution 🙂

I’ll admit right up front, the UI is a a bit cluttered and terse… classic, goodenoughforownpurposesinthetimei*had syndrome

image

  • full source svn repo Update 3/24/15 * Google is shutting down code.google.com 🙁 but provided easy migration to github: New source link.
  • Basically, it just works like a champ… I really like how it came together… WPF is awesome… it all feels very peppy & responsive on my “aging” Quad Core 2…
  • currently implemented on sql server 2008 (express)… should be relatively database agnostic in theory, but…
  • The one big sql server 2008 dependency that I do use is SQL Server table*valued stored proc parameters.
  • install the default database structure via .BAK file
  • This SQL Server table proc parm approach is a particularly fun optimization I’ve been itching to implement to see how it hangs together in lieu of using it elsewhere (whenever I can finally get my work to upgrade to SQL Server 2008!!! 🙂
  • Anyway, as far as the actual application goes, see screenshot, it’s WPF 4 code with a lot of little tricks I’ve learned along the way with my other much larger scope WPF LOB project at work.
  • YASBE (“Yet Another Simple Backup Enabler”) immediately presents the typical checkboxed include/exclude filesystem tree where you select which folders are in and out… you can of course simply select a root drive letter if you’re organized to have everything you care about on one big data drive.
  • I underestimated the complexity of rolling my own folders treeview but I like the work I achieved in the .Net IO FileSystem code & the corresponding WPF TreeView XAML here (search for “TreeView”)… I’ve seen other examples of loading a WPF TreeView (telerik knowledge base etc)… but I feel like i did mine a little tighter… easier to copy/understand I think… the tree is efficiently lazy loading… ie it only scans the next set of folders down when you expand a parent
  • Then one would typically hit the “Select Next Disc’s Worth Of Files” button and YASBE cranks down the list until it’s included 25GB worth of new/changed files that are candidates for going to a Blu*ray disk.
  • the .Net DirectoryInfo.GetFiles() appears to be adequately performant on my average desktop hardware … it scans my 200GB+ of photos and other important documents in <16 seconds… actually it scans all those files, –AND* uploads it to sql server (via table stored proc parameter) and does the comparison to all the previously recorded date stamps to determine what is new/changed… –AND* sends that recordset back to the client and displays it on a datagrid, all that in 16 seconds… I’m absolutely pleased with that… I feel that the master blast of all those file records up to SQL Server using the table valued stored proc parm really nicely optimizes that step.
  • Then one would hit “copy to staging folder”… wait quite a bit longer to copy 25 GB to your Blu*ray’s staging folder (actually it’s effectively more around 23GB max from what I’ve read)
  • Then I highly recommend you burn your Blu*ray by drag/dropping your burn staging folder into Nero BurnLite (which is free)
  • Nero BurnLite has been 100% reliable for me and it’s a perfectly bare bones data disc burning software, exactly what I want, without any other fluff.
  • I had major reliability problems with Windows 7’s built*in disc burning facility!!!… I coastered 5 out of 6 tries before I bailed and went to Nero… it becomes mentally painful trial and error at 25GB a pop for a cheap arse like me :)… Yet Win7 seems absolutely fine for DVD/CD burning… I’ve burned those successfully w/o a glitch.
  • Here’s the interesting anecdotal evidence, after the burn, Nero spews out a list of mandatory renames for files that somehow wouldn’t fit the disc’s file system… which is UDF I believe… I’m wondering if Win7 doesn’t perform that kind of necessary bulletproofing and that’s why the burns would always fail several minutes in, after wildly jumping around between random %complete estimates and a schizophrenic progress bar.
  • Nero methodically clicks off percent*completes nice & fast … seems like 25GB only takes about 15mins… very doable… I did 8 x 25GB discs to cover my whole photo library while working on other things and it went by like clockwork.

ASP.Net (& Ajax) Notes

It’s been long enough that the little things tripped me when I fired up a new project… so here they are for “next time”:

  • new Guid() – this probably isn’t the one you want… it initializes to all zeros (i.e. {00000000-0000-0000-0000-000000000000})
    • System.Guid.NewGuid() is the one that generates a fresh unique value
  • Getting “Automatic Compilation” to work (i.e. just uploading your source files to the web server and not your bin folder)
    • Check the <%@ Page > directive & make sure it says CodeFile=”” not CodeBehind=””
    • If not then you’ll be getting errors like “Could not load type …”
    • CodeFile came along with ASP.Net 2.0 and corresponds to a “Web Project” (aka “Web Site”) vs. a “Web Application” which was the only option in ASP.Net 1.x
    • MSDN @ Page Reference 
    • Would love to know what bits need to be twiddled to make CodeFile the default rather than creating a new Web Site project and copying everything over???
    • This guy really goes deep into the differences between Web Sites and Apps <geez, who knew?!?>
  • “Invalid FORMATETC structure”… just reload all the controls in the Ajax toolbox… how annoying
  • “Ajax client-side framework failed to load” error … this is when the shine on all that spiffy new Ajax stuff gets dull in a hurry
    • Absolutely TUUUUNS!! of reasons out there for this… for me it wound up working when I disabled debugging in the web.config on the production server: <compilation debug="False" strict="false"> … ok, yeah, i know that’s not really an answer but i don’t debug on that server anyway so for me it’s a permanent temporary fix 😉
    • Other folks said fire the Add/Remove Programs >  Repair option on .Net Framework 3.5 SP1 (didn’t help for me)
    • or make sure .AXD Extension is setup (was already)
      • IIS6 > {application folder} > (right mouse) Properties > Configuration button > Application extensions
      • IIS7 > Handler Mappings
      • Make sure to uncheck “Verify that file exists”
  • Login / Authentication
    • Great Login control FAQ: http://forums.asp.net/t/1403132.aspx
      • Notably, it shows the necessary “FormsAuthentication.Authenticate(username, password)” bits that’ll let you skip the built in SqlServer Membership support that would otherwise fire by default… handy if you’re going for a quick n’ dirty.
    • Another Q n’ D: throw some hard coded user/passwords under the web.config Forms Authentication tags:     <system.web>
            <authentication mode="Forms">
              <forms>
                <credentials passwordFormat="Clear">
                  <user name="admin" password="" />
                  <user name="user" password="" />
                </credentials>
              </forms>
            </authentication>
    • And on an intranet where everybody is signed into Windows already, just use Windows authentication for a drop dead easy single-sign-on implementation:
      • <authentication mode="Windows">
      • then Page.User.Identity.Name is their Windows login name… just take that and run with it for your own data based role security, etc.
  • Hierarchical Grid aka Nested Collapsable GridViews via Free/Stock ASP.Net AJAX controls
    • Here’s the baseline example that looks really good: http://mosesofegypt.net/post/Master5cDetail-with-CollapsiblePanelExtender-and-edit-detail-with-HoverMenuExtender-using-GridView.aspx
    • My beef was with the HoverMenu… i think it looks cheesy (screenshot, live demo)… i just wanted to use normal <asp:CommandField>’s to edit GridView rows.
    • Problem was, whenever you would click any of the CommandFields links, the corresponding nested GridView and Panel would just “go away” once the Async Postback’s refresh completed… the Panel appeared to be collapsed but even if you tried to expand it the GridView content was totally gone.
    • Banged all around trying to figure out why by debugging through the events that fired… was about to give up and then finally read something that mentioned Submit buttons behave differently than LinkButtons…
    • So I ditched the CommandField and just tossed in my own LinkButtons to do the same thing and voila, the Grid maintains its state… again, who knew!?
  • DataBinding nested controls declaratively – this was a good tip… but only for READ ONLY stuff
    • rather than messing with code behind, just declare your nested List control’s DataSource like this:
    • DataSource='<%#((DataRowView)Container.DataItem).CreateChildView("ProviderOfficeContact")%>’
    • unfortunately the limited way that _RowEditing just tosses all existing data contexts conflicts with this elegant approach, so you can’t go into edit mode with this kind of approach and have to stick with doing the nested DataSource assignment in the parent’s RowDataBound handler.
  • GridView.RowEditing event – you must set a DataSource and re-GridView.DataBind() in order for the the state of the controls to change to edit mode… what an amazingly inefficient architecture they’ve provided with this databinding stuff… you have to keep running back to the database to get data that hasn’t necessarily changed at all
      {
        gvOffices.EditIndex = e.NewEditIndex;
        gvOffices.DataSource = dataset; //DataMember was set initially and thankfully it persists <unlike anything else in this architecture>
        gvOffices.DataBind();
      }
  • GridView.RowUpdating event
    • if we’re manually databinding (vs a DataSource control) we have to to extract values ourselves (i.e. GridViewUpdateEventArgs.NewValues wil be empty)
    • here’s a good reference to get the values out with the minimum of fuss
    • that page also gives a quick blurb on how to access the “DataKeys”: (sender as GridView).DataKeys[e.RowIndex].Values[fieldName]
  • jQuery:
    • don’t forget to set the DOCTYPE!!! I know this is posted all over the jQuery tutorials, but it’s so subtle i keep forgetting and bang my head for a good hour before i remember:
    • live() vs bind() – live() makes sure you bind to elements that might not be currently available… exactly what you need when you’re doing some Ajax that creates elements dynamically (stumbled on this here, thanks Arnold!!)