From: "Vegard Nossum" I have attached two patches. The sched one fixes Andrew's boot problem. The x86 one is untested, but I believe it is better to BUG than silently corrupt some arbitrary memory. (Then the callers can be found easily and fixed at least.) Cc: Mike Travis Cc: Ingo Molnar Signed-off-by: Andrew Morton --- arch/x86/kernel/setup.c | 2 ++ kernel/sched.c | 10 +++++----- 2 files changed, 7 insertions(+), 5 deletions(-) diff -puN arch/x86/kernel/setup.c~fix-x86_64-splat arch/x86/kernel/setup.c --- a/arch/x86/kernel/setup.c~fix-x86_64-splat +++ a/arch/x86/kernel/setup.c @@ -385,6 +385,7 @@ cpumask_t *_node_to_cpumask_ptr(int node dump_stack(); return &cpu_online_map; } + BUG_ON(node >= nr_node_ids); return &node_to_cpumask_map[node]; } EXPORT_SYMBOL(_node_to_cpumask_ptr); @@ -400,6 +401,7 @@ cpumask_t node_to_cpumask(int node) dump_stack(); return cpu_online_map; } + BUG_ON(node >= nr_node_ids); return node_to_cpumask_map[node]; } EXPORT_SYMBOL(node_to_cpumask); diff -puN kernel/sched.c~fix-x86_64-splat kernel/sched.c --- a/kernel/sched.c~fix-x86_64-splat +++ a/kernel/sched.c @@ -6770,7 +6770,7 @@ static void free_sched_groups(const cpum if (!sched_group_nodes) continue; - for (i = 0; i < MAX_NUMNODES; i++) { + for (i = 0; i < nr_node_ids; i++) { struct sched_group *oldsg, *sg = sched_group_nodes[i]; *nodemask = node_to_cpumask(i); @@ -7097,7 +7097,7 @@ static int __build_sched_domains(const c #endif /* Set up physical groups */ - for (i = 0; i < MAX_NUMNODES; i++) { + for (i = 0; i < nr_node_ids; i++) { SCHED_CPUMASK_VAR(nodemask, allmasks); SCHED_CPUMASK_VAR(send_covered, allmasks); @@ -7121,7 +7121,7 @@ static int __build_sched_domains(const c send_covered, tmpmask); } - for (i = 0; i < MAX_NUMNODES; i++) { + for (i = 0; i < nr_node_ids; i++) { /* Set up node groups */ struct sched_group *sg, *prev; SCHED_CPUMASK_VAR(nodemask, allmasks); @@ -7160,9 +7160,9 @@ static int __build_sched_domains(const c cpus_or(*covered, *covered, *nodemask); prev = sg; - for (j = 0; j < MAX_NUMNODES; j++) { + for (j = 0; j < nr_node_ids; j++) { SCHED_CPUMASK_VAR(notcovered, allmasks); - int n = (i + j) % MAX_NUMNODES; + int n = (i + j) % nr_node_ids; node_to_cpumask_ptr(pnodemask, n); cpus_complement(*notcovered, *covered); _