Index: linux-2.6.19-rc2-mm2/kernel/sched.c
===================================================================
--- linux-2.6.19-rc2-mm2.orig/kernel/sched.c	2006-10-24 22:01:49.590095860 -0500
+++ linux-2.6.19-rc2-mm2/kernel/sched.c	2006-10-24 23:24:53.441733175 -0500
@@ -6251,6 +6251,25 @@ static cpumask_t sched_domain_node_span(
 
 int sched_smt_power_savings = 0, sched_mc_power_savings = 0;
 
+#define SD(x) \
+{ switch (x) {
+	case 0: return iD_SMT;
+	case 1: SD_MC;
+	case 2: SD_PHYS'
+	case 3: SD_NODE'
+	case 4: SD_NODES;
+	case 5: SD_ALLNODES;
+  }
+}
+
+for(i=0;i<NR_PER_CPU_SCHED_DOMAINS;i++) {
+}
+for(i=0;i<NR_PER_NODE_SCHED_DOMAINS;i++) {
+}
+
+static struct sched_domain init_sd(int n)
+{
+}
 /*
  * SMT sched-domains:
  */
@@ -6458,9 +6477,60 @@ static inline unsigned long cpu_offset(i
 	return jiffies + cpu * HZ / NR_CPUS;
 }
 
+static int build_sched_domains(const cpumask_t *cpu_map)
+{
+	int i;
+	int node;
+	int cpu;
+	struct sched_domain *sd_curr[NR_CPUS];
+
+	for_each_possible_cpu(cpu)
+		cpu_rq(cpu)->sd = NULL;
+
+	for (i = 0; i < NR_SCHED_DOMAINS; i++) {
+		for_each_possible_cpu(cpu) {
+			sg = kmalloc_node(sizeof(struct sched_group, cpu_to_node(cpu));
+			sd = kmalloc_node(sizeof(struct sched_domain), cpu_to_node(cpu));
+			*sd = SD(i);
+			sd->parent = cpu_rq(cpu)->sd;
+			cpu_rq(cpu)->sd = sd;
+			/* Link up sched_group */
+			allocate sched_group and put it into proper list.
+			sd->group = sg;
+		}
+	}
+	for (i = 0; i < NR_NUMA_SCHED_DOMAINS; i++) {
+		for_each_possible_node(node) {
+			/*
+			 * need to do multiuple sg allocs here so that we can build
+			 * a node local chain instead of having off node accesses
+			 */
+			sg = kmalloc_node(sizeof(struct sched_group, cpu_to_node(cpu));
+			sd = kmalloc_node(sizeof(struct sched_domain), cpu_to_node(cpu));
+			*sd = SD(i);
+			sd->parent = cpu_rq(cpu)->sd;
+			cpu_rq(cpu)->sd = sd;
+			/* Link up sched_group *(/
+			allocate sched_group and put it into proper list.
+			sd->group = sg;
+		}
+	}
+}
+
 /*
  * Build sched domains for a given set of cpus and attach the sched domains
  * to the individual cpus
+ *
+ * Sched domains are build by first creating a global sched domain (all)
+ * and equally balancing over all processors.
+ *
+ * Depending on the features present new levels may be added.
+ * 1. [NUMA] potentially multiple node levels. (allnodes, 16 node group (chassis, etc)
+ * 2. Mandatory [physical] per processor balacing
+ * 2. [Multicore]. Create MC domain level
+ * 3. [Hyperthreading], Create Hyperthreading domains
+ *
+ * create_sched_domain(SD spec)
  */
 static int build_sched_domains(const cpumask_t *cpu_map)
 {
Index: linux-2.6.19-rc2-mm2/include/linux/sched.h
===================================================================
--- linux-2.6.19-rc2-mm2.orig/include/linux/sched.h	2006-10-24 22:01:34.000000000 -0500
+++ linux-2.6.19-rc2-mm2/include/linux/sched.h	2006-10-24 22:54:37.667463339 -0500
@@ -692,6 +692,7 @@ struct sched_domain {
 	unsigned int forkexec_idx;
 	int flags;			/* See SD_* */
 	unsigned int balance_interval;	/* initialise to 1. units in ms. */
+	int (*to_group)(int, cpumask_t *);/* Conversion to node */
 
 	/* Runtime fields. */
 	unsigned long next_balance;	/* init to jiffies. units in jiffies */