Index: linux-2.6.19-rc3/kernel/sched.c =================================================================== --- linux-2.6.19-rc3.orig/kernel/sched.c 2006-10-25 18:39:32.000000000 -0500 +++ linux-2.6.19-rc3/kernel/sched.c 2006-10-25 20:15:36.192581792 -0500 @@ -6338,6 +6338,140 @@ static inline unsigned long cpu_offset(i return jiffies + cpu * HZ / NR_CPUS; } +int insert_sched_domain(struct sched_domain *top_layer, + struct sched_domain *bottom_layer, + struct sched_domain template, + int (*to_group)(int, cpumask_t *). + const struct cpumask_t *map) +{ + struct sched_domains *sd[NR_CPUS]; + struct sched_group *sg[NR_CPUS]; + struct sched_group *groups[NR_CPUS]; + cpumask_t sched_group_mask; /* At which cpu did the sched group start */ + int sg_group = -1; /* The group we are currently generating */ + cpumask_t covered = CPU_MASK_NONE; + + /* Figure out span */ + /* Generate sched_groups */ + for_each_cpu_in_mask(cpu, map) { + n = group_fn(cpu, map); + g = groups[n]; + if (!g) { + g = groups[n] = kmalloc_node(sizeof(struct sched_group), GFP_KERNEL, + cpu_to_node(cpu)); + if (!g) + return -ENNOMEM; + g->cpumask = CPU_MASK_NONE; + g->cpu_power = 0; + } + cpu_set(cpu, g); + sg[i] = g; + } + /* Linkup groups */ + + +{ + struct sched_group *first = NULL, *last = NULL; + cpumask_t covered = CPU_MASK_NONE; + int i; + + for_each_cpu_mask(i, span) { + int group = group_fn(i, cpu_map); + struct sched_group *sg = &groups[group]; + int j; + + if (cpu_isset(i, covered)) + continue; + + sg->cpumask = CPU_MASK_NONE; + sg->cpu_power = 0; + + for_each_cpu_mask(j, span) { + if (group_fn(j, cpu_map) != group) + continue; + + cpu_set(j, covered); + cpu_set(j, sg->cpumask); + } + if (!first) + first = sg; + if (last) + last->next = sg; + last = sg; + } + last->next = first; + + + for_each_cpu_in(cpu, map) { + sd[cpu] = kmalloc_node(sizeof(struct sched_domain), GFP_KERNEL, cpu_to_node(cpu)); + sg[cpu] = kmalloc_node(sizeof(struct sched_group), GFP_KERNEL, cpu_to_node(cpu)); + if (!sd[cpu] || !sg[cpu]) + panic("out of memory in scheduler"); + sd[cpu] = template; + sd->span = calc this groups span; + sd->next_balance = cpu_offset(cpu); + sd->groups = + sg[cpu]->next = sg; + sg[cpu]->mask - xxx; + } + /* Allocate sched_groups */ + /* Link up sched_groups */ + /* Calculate power */ + for_each_cpu(cpu, map) + if (!bottom_layer[cpu]) + bottom_layer[cpu] = new[cpu]; +} + +static int build_sched_domains_new(const cpumask_t *map) +{ + struct sched_domain *top_layer[NR_CPUS]; + struct sched_domain *bottom_layer[NR_CPUS]; + + memset(top_layer, 0, sizeof(top_layer)); + memset(bottom_layer, 0, sizeof(bottom_layer)); + +#ifdef CONFIG_SCHED_SMT + /* Per cpu domains */ + if (have_smt) + insert_sched_domain(top_layer, bottom_layer, + SD_SIBLING_INIT, cpu_to_cpu_group, map); +#endif +#ifdef CONFIG_SCHED_MC + if (have_multicore) + insert_sched_domain(top_layer, bottom_layer, + SD_MC_INIT, cpu_to_cpu_coregroup, map); +#endif + + insert_sched_domain(top_layer, bottom_layer, + SD_CPU_INIT, map, node_to_cpumask, + cpu_to_phys_group); + +#ifdef CONFIG_NUMA + if (more than one node) + /* Per 16 node domains */ + insert_sched_domain(top_layer, bottom_layer, + SD_NODE_INIT, cpu_to_allnodes_group, map); + + if (more than 16 nodes) + insert_sched_domains(top_layer, bottom_layer, + SD_ALLNODES_INIT, cpu_to_allnodes_group,map); +#endif + + /* Attach the domains */ + for_each_cpu_mask(cpu, *cpu_map) + cpu_attach_domain(bottom_layer[cpu], cpu); + + /* + * Tune cache-hot values: + */ + calibrate_migration_costs(map); + return 0; + +error: + free_sched_groups(map); + return -ENOMEM; +} + /* * Build sched domains for a given set of cpus and attach the sched domains * to the individual cpus