---
 include/linux/percpu.h |   10 +----
 init/main.c            |    2 +
 mm/cpu_alloc.c         |   83 +++++++++++++++++++++++++++++++++++++++----------
 3 files changed, 72 insertions(+), 23 deletions(-)

Index: linux-2.6/include/linux/percpu.h
===================================================================
--- linux-2.6.orig/include/linux/percpu.h	2008-01-28 18:49:41.000000000 -0800
+++ linux-2.6/include/linux/percpu.h	2008-01-28 18:55:25.000000000 -0800
@@ -33,16 +33,12 @@
 #define EXPORT_PER_CPU_SYMBOL(var) EXPORT_SYMBOL(per_cpu__##var)
 #define EXPORT_PER_CPU_SYMBOL_GPL(var) EXPORT_SYMBOL_GPL(per_cpu__##var)
 
+extern unsigned long cpu_alloc_size;
+
 /* Enough to cover all DEFINE_PER_CPUs in kernel, including modules. */
 #ifndef PERCPU_ENOUGH_ROOM
-#ifdef CONFIG_MODULES
-#define PERCPU_MODULE_RESERVE	8192
-#else
-#define PERCPU_MODULE_RESERVE	0
-#endif
-
 #define PERCPU_ENOUGH_ROOM						\
-	(__per_cpu_end - __per_cpu_start + PERCPU_MODULE_RESERVE)
+	(__per_cpu_end - __per_cpu_start + cpu_alloc_size)
 #endif	/* PERCPU_ENOUGH_ROOM */
 
 /*
Index: linux-2.6/mm/cpu_alloc.c
===================================================================
--- linux-2.6.orig/mm/cpu_alloc.c	2008-01-28 18:49:41.000000000 -0800
+++ linux-2.6/mm/cpu_alloc.c	2008-01-28 18:54:44.000000000 -0800
@@ -15,8 +15,48 @@
 #include <linux/mm.h>
 #include <linux/mmzone.h>
 #include <linux/module.h>
-#include <linux/percpu.h>
 #include <linux/bitmap.h>
+#include <linux/bootmem.h>
+#include <linux/percpu.h>
+#include <asm/sections.h>
+
+/*
+ * Can be configred from the kernel command line
+ */
+
+#ifdef PERCPU_ENOUGH_ROOM
+
+/* Preset size of percpu area (IA64) */
+#define CPU_ALLOC_SIZE (PERCPU_ENOUGH_ROOM - (__per_cpu_end-__per_cpu_start))
+unsigned long cpu_alloc_size = CPU_ALLOC_SIZE;
+
+#else
+
+/* Dynamic sizable percpu area */
+
+/*
+ * Default sizes (in addition to the space already occupied by
+ * percpu definitions).
+ */
+#ifndef CONFIG_MODULES
+#define CPU_ALLOC_SIZE 8192
+#else
+#define CPU_ALLOC_SIZE 16384
+#endif
+
+unsigned long cpu_alloc_size = CPU_ALLOC_SIZE;
+
+static int __init setup_cpusize(char *str)
+{
+	get_option (&str, &cpu_alloc_size);
+
+	return 1;
+}
+
+__setup("cpusize=", setup_cpusize);
+
+
+#endif
 
 /*
  * Basic allocation unit. A bit map is created to track the use of each
@@ -24,7 +64,6 @@
  */
 
 #define UNIT_SIZE sizeof(int)
-#define UNITS (PERCPU_MODULE_RESERVE / UNIT_SIZE)
 
 /*
  * How many units are needed for an object of a given size
@@ -39,16 +78,17 @@ static int size_to_units(unsigned long s
  */
 static DEFINE_SPINLOCK(cpu_alloc_map_lock);
 static unsigned long units_reserved;	/* Units reserved by boot allocations */
+static unsigned long units;
 
-static DECLARE_BITMAP(cpu_alloc_map, UNITS);
+static unsigned long *cpu_alloc_map;
 
 void * __init boot_cpu_alloc(unsigned long size)
 {
 	unsigned long x = units_reserved;
 
 	units_reserved += size_to_units(size);
-	BUG_ON(units_reserved > UNITS);
-	return __per_cpu_load + x * UNIT_SIZE;
+	BUG_ON(units_reserved > size_to_units(cpu_alloc_size));
+	return __per_cpu_start + x * UNIT_SIZE;
 }
 
 static int first_free;		/* First known free unit */
@@ -98,9 +138,8 @@ void *cpu_alloc(unsigned long size, gfp_
 
 	for ( ; ; ) {
 
-		start = find_next_zero_bit(cpu_alloc_map,
-				PERCPU_MODULE_RESERVE, start);
-		if (start >= UNITS - units_reserved)
+		start = find_next_zero_bit(cpu_alloc_map, units, start);
+		if (start >= units)
 			goto out_of_memory;
 
 		if (first)
@@ -111,8 +150,8 @@ void *cpu_alloc(unsigned long size, gfp_
 		 * the starting unit.
 		 */
 		if ((start + units_reserved) % (align / UNIT_SIZE) == 0 &&
-			find_next_bit(cpu_alloc_map, PERCPU_MODULE_RESERVE,
-					start + 1) >= start + units)
+			find_next_bit(cpu_alloc_map, units, start + 1) >=
+								start + units)
 				break;
 		start++;
 		first = 0;
@@ -121,7 +160,7 @@ void *cpu_alloc(unsigned long size, gfp_
 	if (first)
 		first_free = start + units;
 
-	if (start + units > UNITS - units_reserved)
+	if (start + units > units)
 		goto out_of_memory;
 
 	set_map(start, units);
@@ -129,7 +168,7 @@ void *cpu_alloc(unsigned long size, gfp_
 
 	spin_unlock_irqrestore(&cpu_alloc_map_lock, flags);
 
-	ptr = __per_cpu_load + (start + units_reserved) * UNIT_SIZE;
+	ptr = __per_cpu_start + (start + units_reserved) * UNIT_SIZE;
 
 	if (gfpflags & __GFP_ZERO) {
 		int cpu;
@@ -154,13 +193,13 @@ void cpu_free(void *start, unsigned long
 {
 	int units = size_to_units(size);
 	int index;
-	u8 *p = start;
+	char *p = start;
 	unsigned long flags;
 
-	BUG_ON(p < (__per_cpu_load + units_reserved * UNIT_SIZE));
-	index = (p - __per_cpu_load) / UNIT_SIZE - units_reserved;
+	BUG_ON(p < (__per_cpu_start + units_reserved * UNIT_SIZE));
+	index = (p - __per_cpu_start) / UNIT_SIZE - units_reserved;
 	BUG_ON(!test_bit(index, cpu_alloc_map) ||
-			index >= UNITS - units_reserved);
+			index >= units);
 
 	spin_lock_irqsave(&cpu_alloc_map_lock, flags);
 
@@ -172,3 +211,15 @@ void cpu_free(void *start, unsigned long
 	spin_unlock_irqrestore(&cpu_alloc_map_lock, flags);
 }
 EXPORT_SYMBOL(cpu_free);
+
+/*
+ * Setup cpu_alloc. All boot time boot_cpu_alloc() must have been completed.
+ */
+static void cpu_alloc_init(void)
+{
+	units = size_to_units(cpu_alloc_size) - units_reserved;
+	cpu_alloc_map = alloc_bootmem(units * UNIT_SIZE);
+}
+
+
+
Index: linux-2.6/init/main.c
===================================================================
--- linux-2.6.orig/init/main.c	2008-01-28 18:49:41.000000000 -0800
+++ linux-2.6/init/main.c	2008-01-28 18:50:00.000000000 -0800
@@ -89,6 +89,7 @@ extern void pidmap_init(void);
 extern void prio_tree_init(void);
 extern void radix_tree_init(void);
 extern void free_initmem(void);
+extern void cpu_alloc_init(void);
 #ifdef	CONFIG_ACPI
 extern void acpi_early_init(void);
 #else
@@ -567,6 +568,7 @@ asmlinkage void __init start_kernel(void
 				"enabled *very* early, fixing it\n");
 		local_irq_disable();
 	}
+	cpu_alloc_init();
 	sort_main_extable();
 	trap_init();
 	rcu_init();