--- mm/cpu_alloc.c | 26 +++++++++++++++++--------- 1 file changed, 17 insertions(+), 9 deletions(-) Index: linux-2.6/mm/cpu_alloc.c =================================================================== --- linux-2.6.orig/mm/cpu_alloc.c 2007-11-13 16:38:22.694565334 -0800 +++ linux-2.6/mm/cpu_alloc.c 2007-11-13 16:42:14.083153734 -0800 @@ -68,7 +68,6 @@ static DEFINE_SPINLOCK(cpu_alloc_map_loc static unsigned long *cpu_alloc_map = NULL; static int cpu_alloc_map_order = -1; /* Size of the bitmap in page order */ static unsigned long active_blocks; /* Number of block allocated on each cpu */ -static unsigned long units_free; /* Number of available units */ static unsigned long units_total; /* Total units that are managed */ static unsigned long units_reserved; /* Units reserved by boot allocations */ /* @@ -259,7 +258,6 @@ static int expand_cpu_area(gfp_t flags) active_blocks++; units_total += UNITS_PER_BLOCK; - units_free += UNITS_PER_BLOCK; err = 0; out: return err; @@ -267,11 +265,24 @@ out: void * __init boot_cpu_alloc(unsigned long size) { + unsigned long flags; unsigned long x = units_reserved; unsigned long units = size_to_units(size); + /* + * Locking is really not necessary during boot + * but expand_cpu_area() unlocks and relocks. + * If we do not perform locking here then + * + * 1. The cpu_alloc_map_lock is locked when + * we exit boot causing a hang on the next cpu_alloc(). + * 2. lockdep will get upset if we do not consistently + * handle things. + */ + spin_lock_irqsave(&cpu_alloc_map_lock, flags); while (units_reserved + units > units_total) expand_cpu_area(BOOT_ALLOC); + spin_unlock_irqrestore(&cpu_alloc_map_lock, flags); units_reserved += units; return cpu_area + x * UNIT_SIZE; } @@ -287,7 +298,6 @@ void * __init boot_cpu_alloc(unsigned lo static u8 cpu_area[NR_CPUS * ALLOC_SIZE]; static DECLARE_BITMAP(cpu_alloc_map, UNITS_PER_BLOCK); -static int units_free = UNITS_PER_BLOCK; #define cpu_alloc_map_order CONFIG_CPU_AREA_ORDER #define units_total UNITS_PER_BLOCK @@ -360,7 +370,7 @@ restart: if (first) first_free = start; - if (start >= units_total) { + if (start >= units_total - units_reserved) { if (expand_cpu_area(gfpflags)) goto out_of_memory; goto restart; @@ -370,7 +380,7 @@ restart: * Check alignment and that there is enough space after * the starting unit. */ - if (start % (align / UNIT_SIZE) == 0 && + if ((start + units_reserved) % (align / UNIT_SIZE) == 0 && find_next_bit(cpu_alloc_map, map_size, start + 1) >= start + units) break; @@ -381,13 +391,12 @@ restart: if (first) first_free = start + units; - while (start + units > units_total) { + while (start + units > units_total - units_reserved) { if (expand_cpu_area(gfpflags)) goto out_of_memory; } set_map(start, units); - units_free -= units; __count_vm_events(CPU_BYTES, units * UNIT_SIZE); spin_unlock_irqrestore(&cpu_alloc_map_lock, flags); @@ -423,12 +432,11 @@ void cpu_free(void *start, unsigned long BUG_ON(p < (cpu_area + units_reserved * UNIT_SIZE)); index = (p - cpu_area) / UNIT_SIZE - units_reserved; BUG_ON(!test_bit(index, cpu_alloc_map) || - index >= units_total); + index >= units_total - units_reserved); spin_lock_irqsave(&cpu_alloc_map_lock, flags); clear_map(index, units); - units_free += units; __count_vm_events(CPU_BYTES, -units * UNIT_SIZE); if (index < first_free) first_free = index;