Index: linux-2.6.21-rc4-mm1/include/linux/slub_def.h =================================================================== --- linux-2.6.21-rc4-mm1.orig/include/linux/slub_def.h 2007-03-22 20:10:21.000000000 -0700 +++ linux-2.6.21-rc4-mm1/include/linux/slub_def.h 2007-03-22 20:16:30.000000000 -0700 @@ -55,7 +55,6 @@ struct kmem_cache { #ifdef CONFIG_NUMA struct kmem_cache_node *node[MAX_NUMNODES]; #endif - struct page *cpu_slab[NR_CPUS]; }; /* Index: linux-2.6.21-rc4-mm1/mm/slub.c =================================================================== --- linux-2.6.21-rc4-mm1.orig/mm/slub.c 2007-03-22 20:11:12.000000000 -0700 +++ linux-2.6.21-rc4-mm1/mm/slub.c 2007-03-22 20:19:08.000000000 -0700 @@ -102,6 +102,12 @@ int slab_is_available(void) { return slab_state >= UP; } +#define MAX_SLAB 256 + +struct kmem_cache slabs[MAX_SLAB]; + +DEFINE_PER_CPU(struct page *cpuslab)[MAX_SLAB]; + /* A list of all slab caches on the system */ static DECLARE_RWSEM(slub_lock); LIST_HEAD(slab_caches); @@ -1097,17 +1103,18 @@ static void flush_all(struct kmem_cache * Fastpath is not possible if we need to get a new slab or have * debugging enabled (which means all slabs are marked with PageError) */ -static __always_inline void *slab_alloc(struct kmem_cache *s, +static __always_inline void *slab_alloc(void *x, gfp_t gfpflags, int node) { struct page *page; void **object; unsigned long flags; int cpu; + struct kmem_cache *s = TO_SLAB(x); + int slabnr = TO_NR(x); local_irq_save(flags); - cpu = smp_processor_id(); - page = s->cpu_slab[cpu]; + page = __get_cpu_var(cpu_slab)[slabnr]; if (!page) goto new_slab;