From: KAMEZAWA Hiroyuki for_each_cpu() actually iterates across all possible CPUs. We've had mistakes in the past where people were using for_each_cpu() where they should have been iterating across only online or present CPUs. This is inefficient and possibly buggy. We're renaming for_each_cpu() to for_each_possible_cpu() to avoid this in the future. This patch replaces for_each_cpu with for_each_possible_cpu. for sparc64. Signed-off-by: KAMEZAWA Hiroyuki Signed-off-by: Andrew Morton --- arch/sparc64/kernel/pci_sun4v.c | 2 +- arch/sparc64/kernel/setup.c | 2 +- arch/sparc64/kernel/smp.c | 6 +++--- include/asm-sparc64/percpu.h | 2 +- 4 files changed, 6 insertions(+), 6 deletions(-) diff -puN arch/sparc64/kernel/pci_sun4v.c~for_each_possible_cpu-sparc64 arch/sparc64/kernel/pci_sun4v.c --- devel/arch/sparc64/kernel/pci_sun4v.c~for_each_possible_cpu-sparc64 2006-03-28 14:49:36.000000000 -0800 +++ devel-akpm/arch/sparc64/kernel/pci_sun4v.c 2006-03-28 14:49:36.000000000 -0800 @@ -1092,7 +1092,7 @@ void sun4v_pci_init(int node, char *mode } } - for_each_cpu(i) { + for_each_possible_cpu(i) { unsigned long page = get_zeroed_page(GFP_ATOMIC); if (!page) diff -puN arch/sparc64/kernel/setup.c~for_each_possible_cpu-sparc64 arch/sparc64/kernel/setup.c --- devel/arch/sparc64/kernel/setup.c~for_each_possible_cpu-sparc64 2006-03-28 14:49:36.000000000 -0800 +++ devel-akpm/arch/sparc64/kernel/setup.c 2006-03-28 14:49:36.000000000 -0800 @@ -535,7 +535,7 @@ static int __init topology_init(void) while (!cpu_find_by_instance(ncpus_probed, NULL, NULL)) ncpus_probed++; - for_each_cpu(i) { + for_each_possible_cpu(i) { struct cpu *p = kzalloc(sizeof(*p), GFP_KERNEL); if (p) { register_cpu(p, i, NULL); diff -puN arch/sparc64/kernel/smp.c~for_each_possible_cpu-sparc64 arch/sparc64/kernel/smp.c --- devel/arch/sparc64/kernel/smp.c~for_each_possible_cpu-sparc64 2006-03-28 14:49:36.000000000 -0800 +++ devel-akpm/arch/sparc64/kernel/smp.c 2006-03-28 14:49:36.000000000 -0800 @@ -1278,7 +1278,7 @@ int setup_profiling_timer(unsigned int m return -EINVAL; spin_lock_irqsave(&prof_setup_lock, flags); - for_each_cpu(i) + for_each_possible_cpu(i) prof_multiplier(i) = multiplier; current_tick_offset = (timer_tick_offset / multiplier); spin_unlock_irqrestore(&prof_setup_lock, flags); @@ -1306,12 +1306,12 @@ void __init smp_prepare_cpus(unsigned in } } - for_each_cpu(i) { + for_each_possible_cpu(i) { if (tlb_type == hypervisor) { int j; /* XXX get this mapping from machine description */ - for_each_cpu(j) { + for_each_possible_cpu(j) { if ((j >> 2) == (i >> 2)) cpu_set(j, cpu_sibling_map[i]); } diff -puN include/asm-sparc64/percpu.h~for_each_possible_cpu-sparc64 include/asm-sparc64/percpu.h --- devel/include/asm-sparc64/percpu.h~for_each_possible_cpu-sparc64 2006-03-28 14:49:36.000000000 -0800 +++ devel-akpm/include/asm-sparc64/percpu.h 2006-03-28 14:49:36.000000000 -0800 @@ -26,7 +26,7 @@ register unsigned long __local_per_cpu_o #define percpu_modcopy(pcpudst, src, size) \ do { \ unsigned int __i; \ - for_each_cpu(__i) \ + for_each_possible_cpu(__i) \ memcpy((pcpudst)+__per_cpu_offset(__i), \ (src), (size)); \ } while (0) _