Index: linux-2.6/include/linux/slub_def.h =================================================================== --- linux-2.6.orig/include/linux/slub_def.h 2009-10-19 23:49:53.000000000 -0500 +++ linux-2.6/include/linux/slub_def.h 2009-10-19 23:53:29.000000000 -0500 @@ -34,10 +34,12 @@ ORDER_FALLBACK, /* Number of times fallback was necessary */ NR_SLUB_STAT_ITEMS }; +#define NR_SLUB_PAGES_PER_CPU 6 + struct kmem_cache_cpu { void **freelist; /* Pointer to first free per cpu object */ - struct page *page; /* The slab from which we are allocating */ int node; /* The node of the page (or -1 for debug) */ + struct page *page[NR_SLUB_PAGES_PER_CPU]; /* The slabs from which we are allocating */ #ifdef CONFIG_SLUB_STATS unsigned stat[NR_SLUB_STAT_ITEMS]; #endif Index: linux-2.6/mm/slub.c =================================================================== --- linux-2.6.orig/mm/slub.c 2009-10-19 23:53:32.000000000 -0500 +++ linux-2.6/mm/slub.c 2009-10-20 00:19:36.000000000 -0500 @@ -1856,8 +1856,17 @@ if (!(s->flags & SLAB_DEBUG_OBJECTS)) debug_check_no_obj_freed(object, s->objsize); - if (likely(page == __this_cpu_read(s->cpu_slab->page) && - __this_cpu_read(s->cpu_slab->node) >= 0)) { + /* ID slab */ + for (i = 0; i < NR_SLUB_CPU_PAGES; i++) { + struct page *p = __this_cpu_read(s->cpu_slab->page[i]); + + if (!p) + break; + if (p == page) + break; + } + + if (likely(p) && __this_cpu_read(s->cpu_slab->node) >= 0) { set_freepointer(s, object, __this_cpu_read(s->cpu_slab->freelist)); __this_cpu_write(s->cpu_slab->freelist, object); stat(s, FREE_FASTPATH);