---
 mm/cpu_alloc.c |   22 +++++++++++++++++++++-
 1 file changed, 21 insertions(+), 1 deletion(-)

Index: linux-2.6/mm/cpu_alloc.c
===================================================================
--- linux-2.6.orig/mm/cpu_alloc.c	2007-11-05 13:53:02.710292470 -0800
+++ linux-2.6/mm/cpu_alloc.c	2007-11-05 14:41:13.098534117 -0800
@@ -58,6 +58,8 @@ void *cpu_area_alloc_block(unsigned long
 {
 	struct page *page = alloc_pages_node(node,
 			flags, get_order(size));
+	printk("cpu_area_alloc_block(%d,%tx, %d) = %p\n",
+					size, flags, node, page);
 	if (page)
 		return page_address(page);
 	return NULL;
@@ -147,6 +149,7 @@ int cpu_area_populate_basepages(void *st
 int __attribute__((weak)) cpu_area_populate(void *start, unsigned long size,
 					gfp_t flags, int node)
 {
+	printk("populate_cpu_area(%p, %ld, %d)\n", start, size, node);
 	return cpu_area_populate_basepages(start, size, flags, node);
 }
 
@@ -164,6 +167,7 @@ static int expand_cpu_area(gfp_t flags)
 	unsigned long *new_map = NULL;
 	void *start;
 
+	printk(KERN_CRIT "expand_cpu_area(%lx)\n", flags);
 	if (active_blocks == MAX_BLOCKS)
 		goto out;
 
@@ -181,6 +185,7 @@ static int expand_cpu_area(gfp_t flags)
 			flags, cpu_to_node(cpu));
 
 		if (err) {
+			printk("error=%d\n", err);
 			spin_lock(&cpu_alloc_map_lock);
 			goto out;
 		}
@@ -189,6 +194,7 @@ static int expand_cpu_area(gfp_t flags)
 	if (map_order > cpu_alloc_map_order) {
 		new_map = cpu_area_alloc_block(PAGE_SIZE << map_order,
 						flags | __GFP_ZERO, 0);
+		printk("new map=%p\n", new_map);
 		if (!new_map)
 			goto out;
 	}
@@ -199,6 +205,7 @@ static int expand_cpu_area(gfp_t flags)
 	 * the cpu area size as needed.
 	 */
 	if (blocks != active_blocks) {
+		printk("Concurrent alloc\n");
 		if (new_map)
 			free_pages((unsigned long)new_map,
 						map_order);
@@ -215,6 +222,7 @@ static int expand_cpu_area(gfp_t flags)
 				PAGE_SIZE << cpu_alloc_map_order);
 		cpu_alloc_map = new_map;
 		cpu_alloc_map_order = map_order;
+		printk("installed new map=%p order=%d\n", new_map, map_order);
 	}
 	active_blocks++;
 	units_total += UNITS_PER_BLOCK;
@@ -230,7 +238,7 @@ out:
  * machines that do not have MMU support.
  */
 #define MAX_BLOCKS 1
-#define BLOCK_SIZE (1UL << (CONFIG_CPU_AREA_ORDER + PAGE_SHIFT))
+#define BLOCK_SIZE (1UL << (CONFIG_CPU_AREA_ALLOC_ORDER + PAGE_SHIFT))
 
 u8 cpu_area[NR_CPUS * BLOCK_SIZE];
 static DECLARE_BITMAP(cpu_alloc_map, UNITS_PER_BLOCK);
@@ -289,6 +297,8 @@ void *cpu_alloc(unsigned long size, gfp_
 	int first;
 	unsigned long map_size;
 
+	printk("cpu_alloc(%ld, %lx, %ld) first_free=%d units_total=%d\n",
+			size, gfpflags, align, first_free, units_total);
 	BUG_ON(gfpflags & ~(GFP_RECLAIM_MASK | __GFP_ZERO));
 	spin_lock(&cpu_alloc_map_lock);
 restart:
@@ -323,6 +333,13 @@ restart:
 			goto out_of_memory;
 	}
 
+	while (start + units > units_total) {
+		printk("expanding start+units=%d units_total=%d\n", start + units, units_total);
+		if (expand_cpu_area(gfpflags))
+			goto out_of_memory;
+	}
+
+	printk("Setting maps %d %d\n", start, units);
 	set_map(start, units);
 	units_free -= units;
 	__count_vm_events(CPU_BYTES, units * UNIT_SIZE);
@@ -332,9 +349,11 @@ restart:
 	if (gfpflags & __GFP_ZERO) {
 		int cpu;
 
+		printk("zeroing\n");
 		for_each_possible_cpu(cpu)
 			memset(CPU_PTR(ptr, cpu), 0, size);
 	}
+	printk("return=%p\n", ptr);
 	return ptr;
 out_of_memory:
 	spin_unlock(&cpu_alloc_map_lock);
@@ -352,6 +371,7 @@ void cpu_free(void *start, unsigned long
 	int index;
 	u8 *p = start;
 
+	printk("cpu_free(%p, %ld)\n", start, size);
 	BUG_ON(p < cpu_area);
 	index = (p - cpu_area) / UNIT_SIZE;
 	BUG_ON(!test_bit(index, cpu_alloc_map) ||