--- arch/x86/kernel/setup64.c | 34 ++++++++++++++-------------------- init/main.c | 18 ++++++++++-------- 2 files changed, 24 insertions(+), 28 deletions(-) Index: linux-2.6/init/main.c =================================================================== --- linux-2.6.orig/init/main.c 2007-11-14 12:00:29.977593497 -0800 +++ linux-2.6/init/main.c 2007-11-15 21:33:32.001129814 -0800 @@ -370,18 +370,20 @@ EXPORT_SYMBOL(__per_cpu_offset); static void __init setup_per_cpu_areas(void) { - unsigned long size, i; char *ptr; - unsigned long nr_possible_cpus = num_possible_cpus(); - /* Copy section for each CPU (we discard the original) */ - size = ALIGN(PERCPU_ENOUGH_ROOM, PAGE_SIZE); - ptr = alloc_bootmem_pages(size * nr_possible_cpus); + ptr = boot_cpu_alloc(ALIGN(PERCPU_ENOUGH_ROOM, PAGE_SIZE)); + + if (!ptr) + printk("Cannot allocate per cpu areas\n"); for_each_possible_cpu(i) { - __per_cpu_offset[i] = ptr - __per_cpu_start; - memcpy(ptr, __per_cpu_start, __per_cpu_end - __per_cpu_start); - ptr += size; + void *p = CPU_PTR(ptr, cpu); + + __per_cpu_offset[cpu] = p - __per_cpu_start; + + memcpy(p, __per_cpu_start, + __per_cpu_end - __per_cpu_start); } } #endif /* !__GENERIC_PER_CPU */ Index: linux-2.6/arch/x86/kernel/setup64.c =================================================================== --- linux-2.6.orig/arch/x86/kernel/setup64.c 2007-11-14 12:00:29.986843710 -0800 +++ linux-2.6/arch/x86/kernel/setup64.c 2007-11-15 21:36:58.727534438 -0800 @@ -80,42 +81,35 @@ static int __init nonx32_setup(char *str __setup("noexec32=", nonx32_setup); /* - * Great future plan: - * Declare PDA itself and support (irqstack,tss,pgd) as per cpu data. - * Always point %gs to its beginning + * Per cpu areas are placed directly after the pda. The + * gs segment register can be used to access pda data as well + * as per cpu data and cpu_alloc data. */ void __init setup_per_cpu_areas(void) { int i; - unsigned long size; + char *base; #ifdef CONFIG_HOTPLUG_CPU prefill_possible_map(); #endif /* Copy section for each CPU (we discard the original) */ - size = PERCPU_ENOUGH_ROOM; + base = boot_cpu_alloc(PERCPU_ENOUGH_ROOM); + if (!base) + panic("Cannot allocate cpu data\n"); - printk(KERN_INFO "PERCPU: Allocating %lu bytes of per cpu data\n", size); + printk(KERN_INFO "PERCPU: Allocating %lu bytes of per cpu data\n", + PERCPU_ENOUGH_ROOM); for_each_cpu_mask (i, cpu_possible_map) { - char *ptr; + cpu_pda(i)->data_offset = CPU_PTR(base, i) - __per_cpu_start; - if (!NODE_DATA(cpu_to_node(i))) { - printk("cpu with no node %d, num_online_nodes %d\n", - i, num_online_nodes()); - ptr = alloc_bootmem_pages(size); - } else { - ptr = alloc_bootmem_pages_node(NODE_DATA(cpu_to_node(i)), size); - } - if (!ptr) - panic("Cannot allocate cpu data for CPU %d\n", i); - cpu_pda(i)->data_offset = ptr - __per_cpu_start; - memcpy(ptr, __per_cpu_start, __per_cpu_end - __per_cpu_start); + memcpy(CPU_PTR(base, i), __per_cpu_start, __per_cpu_end - __per_cpu_start); } -} +} void pda_init(int cpu) -{ +{ struct x8664_pda *pda = cpu_pda(cpu); /* Setup up data that may be needed in __get_free_pages early */