X86_64: Declare pda as per cpu data thereby moving it into the cpu area Declare the pda as a per cpu variable. This will have the effect of moving the pda data into the cpu area managed by cpu alloc. The boot_pdas are only needed in head64.c so move the declaration over there and make it static. Remove the code that allocates special pda data structures. Signed-off-by: Christoph Lameter --- arch/x86/kernel/head64.c | 6 ++++++ arch/x86/kernel/setup64.c | 19 ++++++++++++++++--- arch/x86/kernel/smpboot_64.c | 16 ---------------- include/asm-x86/pda.h | 1 - 4 files changed, 22 insertions(+), 20 deletions(-) Index: linux-2.6/arch/x86/kernel/setup64.c =================================================================== --- linux-2.6.orig/arch/x86/kernel/setup64.c 2007-11-16 19:15:42.434539206 -0800 +++ linux-2.6/arch/x86/kernel/setup64.c 2007-11-16 19:57:12.894179747 -0800 @@ -30,7 +30,8 @@ cpumask_t cpu_initialized __cpuinitdata struct x8664_pda *_cpu_pda[NR_CPUS] __read_mostly; EXPORT_SYMBOL(_cpu_pda); -struct x8664_pda boot_cpu_pda[NR_CPUS] __cacheline_aligned; + +static DEFINE_PER_CPU(struct x8664_pda, pda); struct desc_ptr idt_descr = { 256 * 16 - 1, (unsigned long) idt_table }; @@ -101,9 +102,21 @@ void __init setup_per_cpu_areas(void) printk(KERN_INFO "PERCPU: Allocating %lu bytes of per cpu data\n", PERCPU_ENOUGH_ROOM); for_each_cpu_mask (i, cpu_possible_map) { - cpu_pda(i)->data_offset = CPU_PTR(base, i) - __per_cpu_start; + char *base_for_cpu = CPU_PTR(base, i); + struct x8664_pda *pda_for_cpu; - memcpy(CPU_PTR(base, i), __per_cpu_start, __per_cpu_end - __per_cpu_start); + /* + * Must setup the data offset in the boot pda first because + * we may need the data offset to calculate the per cpu address. + */ + cpu_pda(i)->data_offset = base_for_cpu - __per_cpu_start; + + memcpy(base_for_cpu, __per_cpu_start, __per_cpu_end - __per_cpu_start); + + pda_for_cpu = &per_cpu(pda, i); + /* Relocate the pda */ + memcpy(pda_for_cpu, cpu_pda(i), sizeof(struct x8664_pda)); + cpu_pda(i) = pda_for_cpu; } count_vm_events(CPU_BYTES, PERCPU_ENOUGH_ROOM); } Index: linux-2.6/arch/x86/kernel/smpboot_64.c =================================================================== --- linux-2.6.orig/arch/x86/kernel/smpboot_64.c 2007-11-16 19:35:52.193687882 -0800 +++ linux-2.6/arch/x86/kernel/smpboot_64.c 2007-11-16 19:36:17.238687579 -0800 @@ -556,22 +556,6 @@ static int __cpuinit do_boot_cpu(int cpu return -1; } - /* Allocate node local memory for AP pdas */ - if (cpu_pda(cpu) == &boot_cpu_pda[cpu]) { - struct x8664_pda *newpda, *pda; - int node = cpu_to_node(cpu); - pda = cpu_pda(cpu); - newpda = kmalloc_node(sizeof (struct x8664_pda), GFP_ATOMIC, - node); - if (newpda) { - memcpy(newpda, pda, sizeof (struct x8664_pda)); - cpu_pda(cpu) = newpda; - } else - printk(KERN_ERR - "Could not allocate node local PDA for CPU %d on node %d\n", - cpu, node); - } - alternatives_smp_switch(1); c_idle.idle = get_idle_for_cpu(cpu); Index: linux-2.6/arch/x86/kernel/head64.c =================================================================== --- linux-2.6.orig/arch/x86/kernel/head64.c 2007-11-16 19:57:27.082180371 -0800 +++ linux-2.6/arch/x86/kernel/head64.c 2007-11-16 20:02:02.600829296 -0800 @@ -20,6 +20,12 @@ #include #include +/* + * Only used before the per cpu areas are setup. The use for the non possible + * cpus continues after boot + */ +static struct x8664_pda boot_cpu_pda[NR_CPUS]; + static void __init zap_identity_mappings(void) { pgd_t *pgd = pgd_offset_k(0UL); Index: linux-2.6/include/asm-x86/pda.h =================================================================== --- linux-2.6.orig/include/asm-x86/pda.h 2007-11-16 19:43:11.046180084 -0800 +++ linux-2.6/include/asm-x86/pda.h 2007-11-16 19:43:34.160333462 -0800 @@ -39,7 +39,6 @@ struct x8664_pda { } ____cacheline_aligned_in_smp; extern struct x8664_pda *_cpu_pda[]; -extern struct x8664_pda boot_cpu_pda[]; #define cpu_pda(i) (_cpu_pda[i])