--- mm/slub.c | 23 +++++++++++++++++++++-- 1 file changed, 21 insertions(+), 2 deletions(-) Index: linux-2.6/mm/slub.c =================================================================== --- linux-2.6.orig/mm/slub.c 2008-02-04 18:29:05.000000000 -0800 +++ linux-2.6/mm/slub.c 2008-02-04 18:36:09.000000000 -0800 @@ -1081,12 +1081,14 @@ static struct page *new_slab(struct kmem { struct page *page; struct kmem_cache_node *n; + int locked = 0; void *start; void *last; void *p; BUG_ON(flags & GFP_SLAB_BUG_MASK); +redo: page = allocate_slab(s, flags & (GFP_RECLAIM_MASK | GFP_CONSTRAINT_MASK), node); if (!page) @@ -1117,7 +1119,18 @@ static struct page *new_slab(struct kmem page->freelist = start; page->inuse = 0; + + if (n && n->nr_partials < MIN_PARTIAL) { + if (!locked) { + locked = 1; + spinlock_irqsave(&n->list_lock); + } + __add_partial(n, page, 1); + } + out: + if (locked) + spin_unlock_irqrestore(&n->list_lock); return page; } @@ -1197,15 +1210,21 @@ static __always_inline int slab_trylock( /* * Management of partially allocated slabs */ -static void add_partial(struct kmem_cache_node *n, +static inline void __add_partial(struct kmem_cache_node *n, struct page *page, int tail) { - spin_lock(&n->list_lock); n->nr_partial++; if (tail) list_add_tail(&page->lru, &n->partial); else list_add(&page->lru, &n->partial); +} + +static void add_partial(struct kmem_cache_node *n, + struct page *page, int tail) +{ + spin_lock(&n->list_lock); + __add_partial(n, page, tail); spin_unlock(&n->list_lock); }