Index: linux-2.6.19-rc1-mm1/kernel/sched.c =================================================================== --- linux-2.6.19-rc1-mm1.orig/kernel/sched.c 2006-10-15 23:37:50.406923202 -0500 +++ linux-2.6.19-rc1-mm1/kernel/sched.c 2006-10-15 23:58:21.983780778 -0500 @@ -5625,6 +5625,9 @@ static int __init isolated_cpu_setup(cha __setup ("isolcpus=", isolated_cpu_setup); +#define PERCPU_TO_ARRAY(__array,__percpu) \ + { int cpu; for_each_possible_cpu(cpu) { (__array)[cpu] = &per_cpu(__percpu, cpu); } + /* * init_sched_build_groups takes an array of groups, the cpumask we wish * to span, and a pointer to a function which identifies what group a CPU @@ -5637,7 +5640,7 @@ __setup ("isolcpus=", isolated_cpu_setup * and ->cpu_power to 0. */ static void -init_sched_build_groups(struct sched_group groups[], cpumask_t span, +init_sched_build_groups(struct sched_group **groups, cpumask_t span, const cpumask_t *cpu_map, int (*group_fn)(int cpu, const cpumask_t *cpu_map)) { @@ -5647,7 +5650,7 @@ init_sched_build_groups(struct sched_gro for_each_cpu_mask(i, span) { int group = group_fn(i, cpu_map); - struct sched_group *sg = &groups[group]; + struct sched_group *sg = groups[group]; int j; if (cpu_isset(i, covered)) @@ -6233,7 +6236,7 @@ int sched_smt_power_savings = 0, sched_m */ #ifdef CONFIG_SCHED_SMT static DEFINE_PER_CPU(struct sched_domain, cpu_domains); -static struct sched_group sched_group_cpus[NR_CPUS]; +static DEFINE_PER_CPU(struct sched_group, sched_group_cpus); static int cpu_to_cpu_group(int cpu, const cpumask_t *cpu_map) { @@ -6246,7 +6249,7 @@ static int cpu_to_cpu_group(int cpu, con */ #ifdef CONFIG_SCHED_MC static DEFINE_PER_CPU(struct sched_domain, core_domains); -static struct sched_group sched_group_core[NR_CPUS]; +static DEFINE_PER_CPU(struct sched_group sched_group_core); #endif #if defined(CONFIG_SCHED_MC) && defined(CONFIG_SCHED_SMT) @@ -6264,7 +6267,7 @@ static int cpu_to_core_group(int cpu, co #endif static DEFINE_PER_CPU(struct sched_domain, phys_domains); -static struct sched_group sched_group_phys[NR_CPUS]; +static DEFINE_PER_CPU(struct sched_group, sched_group_phys); static int cpu_to_phys_group(int cpu, const cpumask_t *cpu_map) { @@ -6288,14 +6291,14 @@ static int cpu_to_phys_group(int cpu, co * gets dynamically allocated. */ static DEFINE_PER_CPU(struct sched_domain, node_domains); -static struct sched_group **sched_group_nodes_bycpu[NR_CPUS]; +static DEFINE_PER_CPU(struct sched_group, sched_group_nodes[MAX_NUMNODES]); static DEFINE_PER_CPU(struct sched_domain, allnodes_domains); -static struct sched_group *sched_group_allnodes_bycpu[NR_CPUS]; +static DEFINE_PER_CPU(struct sched_group, sched_group_allnodes); static int cpu_to_allnodes_group(int cpu, const cpumask_t *cpu_map) { - return cpu_to_node(cpu); + return node_to_first_cpu(cpu_to_node(cpu)); } static void init_numa_sched_groups_power(struct sched_group *group_head) { @@ -6325,53 +6328,9 @@ next_sg: } #endif -#ifdef CONFIG_NUMA -/* Free memory allocated for various sched_group structures */ -static void free_sched_groups(const cpumask_t *cpu_map) -{ - int cpu, i; - - for_each_cpu_mask(cpu, *cpu_map) { - struct sched_group *sched_group_allnodes - = sched_group_allnodes_bycpu[cpu]; - struct sched_group **sched_group_nodes - = sched_group_nodes_bycpu[cpu]; - - if (sched_group_allnodes) { - kfree(sched_group_allnodes); - sched_group_allnodes_bycpu[cpu] = NULL; - } - - if (!sched_group_nodes) - continue; - - for (i = 0; i < MAX_NUMNODES; i++) { - cpumask_t nodemask = node_to_cpumask(i); - struct sched_group *oldsg, *sg = sched_group_nodes[i]; - - cpus_and(nodemask, nodemask, *cpu_map); - if (cpus_empty(nodemask)) - continue; - - if (sg == NULL) - continue; - sg = sg->next; -next_sg: - oldsg = sg; - sg = sg->next; - kfree(oldsg); - if (oldsg != sched_group_nodes[i]) - goto next_sg; - } - kfree(sched_group_nodes); - sched_group_nodes_bycpu[cpu] = NULL; - } -} -#else static void free_sched_groups(const cpumask_t *cpu_map) { } -#endif /* * Initialize sched groups cpu_power. @@ -6433,21 +6392,7 @@ static int build_sched_domains(const cpu { int i; struct sched_domain *sd; -#ifdef CONFIG_NUMA - struct sched_group **sched_group_nodes = NULL; - struct sched_group *sched_group_allnodes = NULL; - - /* - * Allocate the per-node list of sched groups - */ - sched_group_nodes = kzalloc(sizeof(struct sched_group*)*MAX_NUMNODES, - GFP_KERNEL); - if (!sched_group_nodes) { - printk(KERN_WARNING "Can not alloc sched group node list\n"); - return -ENOMEM; - } - sched_group_nodes_bycpu[first_cpu(*cpu_map)] = sched_group_nodes; -#endif + struct sched_group *scharray[NR_CPUS]; /* * Set up domains for cpus specified by the cpu_map. @@ -6462,25 +6407,11 @@ static int build_sched_domains(const cpu #ifdef CONFIG_NUMA if (cpus_weight(*cpu_map) > SD_NODES_PER_DOMAIN*cpus_weight(nodemask)) { - if (!sched_group_allnodes) { - sched_group_allnodes - = kmalloc_node(sizeof(struct sched_group) - * MAX_NUMNODES, - GFP_KERNEL, - cpu_to_node(i)); - if (!sched_group_allnodes) { - printk(KERN_WARNING - "Can not alloc allnodes sched group\n"); - goto error; - } - sched_group_allnodes_bycpu[i] - = sched_group_allnodes; - } sd = &per_cpu(allnodes_domains, i); *sd = SD_ALLNODES_INIT; sd->span = *cpu_map; group = cpu_to_allnodes_group(i, cpu_map); - sd->groups = &sched_group_allnodes[group]; + sd->groups = &per_cpu(sched_group_allnodes, group); p = sd; } else p = NULL; @@ -6502,7 +6433,7 @@ static int build_sched_domains(const cpu sd->parent = p; if (p) p->child = sd; - sd->groups = &sched_group_phys[group]; + sd->groups = &per_cpu(sched_group_phys, group); #ifdef CONFIG_SCHED_MC p = sd; @@ -6513,7 +6444,7 @@ static int build_sched_domains(const cpu cpus_and(sd->span, sd->span, *cpu_map); sd->parent = p; p->child = sd; - sd->groups = &sched_group_core[group]; + sd->groups = &per_cpu(sched_group_core, group); #endif #ifdef CONFIG_SCHED_SMT @@ -6525,7 +6456,7 @@ static int build_sched_domains(const cpu cpus_and(sd->span, sd->span, *cpu_map); sd->parent = p; p->child = sd; - sd->groups = &sched_group_cpus[group]; + sd->groups = &per_cpu(sched_group_cpus, group); #endif } @@ -6536,8 +6467,8 @@ static int build_sched_domains(const cpu cpus_and(this_sibling_map, this_sibling_map, *cpu_map); if (i != first_cpu(this_sibling_map)) continue; - - init_sched_build_groups(sched_group_cpus, this_sibling_map, + PERCPU_TO_ARRAY(scharray, sched_group_cpus); + init_sched_build_groups(scharray, this_sibling_map, cpu_map, &cpu_to_cpu_group); } #endif @@ -6549,7 +6480,8 @@ static int build_sched_domains(const cpu cpus_and(this_core_map, this_core_map, *cpu_map); if (i != first_cpu(this_core_map)) continue; - init_sched_build_groups(sched_group_core, this_core_map, + PERCPU_TO_ARRAY(scharray, sched_group_core); + init_sched_build_groups(scharray, this_core_map, cpu_map, &cpu_to_core_group); } #endif @@ -6563,14 +6495,15 @@ static int build_sched_domains(const cpu if (cpus_empty(nodemask)) continue; - init_sched_build_groups(sched_group_phys, nodemask, + PERCPU_TO_ARRAY(scharray, sched_group_phys); + init_sched_build_groups(scharray, nodemask, cpu_map, &cpu_to_phys_group); } #ifdef CONFIG_NUMA /* Set up node groups */ - if (sched_group_allnodes) - init_sched_build_groups(sched_group_allnodes, *cpu_map, + PERCPU_TO_ARRAY(scharray, sched_group_allnodes); + init_sched_build_groups(scharray, *cpu_map, cpu_map, &cpu_to_allnodes_group); for (i = 0; i < MAX_NUMNODES; i++) { @@ -6582,10 +6515,6 @@ static int build_sched_domains(const cpu int j; cpus_and(nodemask, nodemask, *cpu_map); - if (cpus_empty(nodemask)) { - sched_group_nodes[i] = NULL; - continue; - } domainspan = sched_domain_node_span(i); cpus_and(domainspan, domainspan, *cpu_map);