Index: linux-2.6.19-mm1/include/linux/slub_def.h =================================================================== --- linux-2.6.19-mm1.orig/include/linux/slub_def.h 2006-12-13 19:59:29.000000000 -0800 +++ linux-2.6.19-mm1/include/linux/slub_def.h 2006-12-13 20:28:52.000000000 -0800 @@ -23,15 +23,17 @@ int flush_active; struct delayed_work flush; #endif +#ifdef CONFIG_NUMA + spinlock_t list_lock; + list_head partial; +#endif } ____cacheline_aligned_in_smp; /* * Slab cache management. */ struct kmem_cache { - spinlock_t list_lock; /* Protecty partial list and nr_partial */ - struct list_head partial; - unsigned long nr_partial; + atomic_long_t nr_partial; atomic_long_t nr_slabs; /* Total slabs used */ int offset; /* Free pointer offset. */ int size; /* Total size of an object */ @@ -49,8 +51,9 @@ struct list_head list; /* List of slabs */ #ifdef CONFIG_NUMA struct active_slab *active[NR_CPUS]; + struct active_slab *node; #else - struct active_slab active[NR_CPUS] ____cacheline_aligned_in_smp; + struct active_slab active[NR_CPUS + 1] ____cacheline_aligned_in_smp; #endif }; Index: linux-2.6.19-mm1/mm/slub.c =================================================================== --- linux-2.6.19-mm1.orig/mm/slub.c 2006-12-13 20:16:40.000000000 -0800 +++ linux-2.6.19-mm1/mm/slub.c 2006-12-13 20:42:19.000000000 -0800 @@ -97,8 +97,11 @@ #define ACTIVE_SLAB_SLAB &kmalloc_caches[ACTIVE_SLAB_NR - KMALLOC_SHIFT_LOW] #define ACTIVE_SLAB(__s,__cpu) ((__s)->active[__cpu]) + +#define ACTIVE_NODE(__s, __page) ((__s)->node[page_to_nid(page)]) #else #define ACTIVE_SLAB(__s,__cpu) (&(__s)->active[__cpu]) +#define ACTIVE_NODE(__s, __page) ((__s->active[NR_CPUS]) #endif /******************************************************************** @@ -232,23 +235,27 @@ */ static void __always_inline add_partial(struct kmem_cache *s, struct page *page) { + struct active_slab *a = ACTIVE_NODE(s, page); + if (page->inuse == s->objects) { printk("Slab %s page=%p adding fully used slab\n", s->name, page); dump_stack(); } - spin_lock(&s->list_lock); - s->nr_partial++; - list_add_tail(&page->lru, &s->partial); - spin_unlock(&s->list_lock); + spin_lock(&a->list_lock); + list_add_tail(&page->lru, &a->partial); + spin_unlock(&a->list_lock); + atomic_long_inc(&s->nr_partial); } static void __always_inline remove_partial(struct kmem_cache *s, struct page *page) { - spin_lock(&s->list_lock); + struct active_slab *a = ACTIVE_NODE(s, page); + + spin_lock(&a->list_lock); list_del(&page->lru); - s->nr_partial--; - spin_unlock(&s->list_lock); + spin_unlock(&a->list_lock); + atomic_long_dec(&s->nr_partial); } /* @@ -261,7 +268,7 @@ { if (bit_spin_trylock(PG_locked, &page->flags)) { list_del(&page->lru); - s->nr_partial--; + atomic_long_dec(&s->nr_partial); return 1; } return 0; @@ -289,15 +296,25 @@ */ static struct page *get_partial(struct kmem_cache *s, gfp_t flags, int node) { - struct page *page; + struct page *page = NULL; + struct zone **z; /* Racy check. If we mistakenly see no partial slabs then we * just allocate an empty slab. If we mistakenly try to get a * partial slab then get_partials() will return NULL. */ - if (!s->nr_partial) + if (!atomic_long_read(&s->nr_partial)) return NULL; + for (z = NODE_DATA(node)->zonelists[policy_zone]; *z; z++) { + int node = zone_to_nid(*z); + struct active_slab *a = ACTIVE_NODE(s, node); + + if (!list_empty(&a->partial)) { + /* Check if there is anything on the list */ + } + } + spin_lock(&s->list_lock); page = numa_partial(s, flags, node);