--- mm/slub.c | 85 ++++++++++++++++++++++++++++++++++++++++---------------------- 1 file changed, 55 insertions(+), 30 deletions(-) Index: linux-2.6/mm/slub.c =================================================================== --- linux-2.6.orig/mm/slub.c 2008-02-05 22:24:57.389264077 -0800 +++ linux-2.6/mm/slub.c 2008-02-05 22:26:33.049716218 -0800 @@ -239,6 +239,9 @@ struct track { enum track_item { TRACK_ALLOC, TRACK_FREE }; +static void __add_partial(struct kmem_cache_node *n, + struct page *page, int tail); + #if defined(CONFIG_SYSFS) && defined(CONFIG_SLUB_DEBUG) static int sysfs_slab_add(struct kmem_cache *); static int sysfs_slab_alias(struct kmem_cache *, const char *); @@ -1107,45 +1110,61 @@ static void setup_object(struct kmem_cac static struct page *new_slab(struct kmem_cache *s, gfp_t flags, int node) { struct page *page; - struct kmem_cache_node *n; + struct kmem_cache_node *n = NULL; void *start; void *last; void *p; + int nr_partial = n ? n->nr_partial : MIN_PARTIAL; + int slabs = max_t(int, 1, nr_partial - MIN_PARTIAL); + struct page *l[MIN_PARTIAL]; + int i; BUG_ON(flags & GFP_SLAB_BUG_MASK); - page = allocate_slab(s, - flags & (GFP_RECLAIM_MASK | GFP_CONSTRAINT_MASK), node); - if (!page) - goto out; + for (i = 0; i < slabs; i++) { + l[i] = page = allocate_slab(s, + flags & (GFP_RECLAIM_MASK | GFP_CONSTRAINT_MASK), node); + if (!page) + break; + page->slab = s; - n = get_node(s, page_to_nid(page)); - if (n) - atomic_long_inc(&n->nr_slabs); - page->slab = s; - page->flags |= 1 << PG_slab; - if (s->flags & (SLAB_DEBUG_FREE | SLAB_RED_ZONE | SLAB_POISON | - SLAB_STORE_USER | SLAB_TRACE)) - SetSlabDebug(page); + page->flags |= 1 << PG_slab; + if (s->flags & (SLAB_DEBUG_FREE | SLAB_RED_ZONE | SLAB_POISON | + SLAB_STORE_USER | SLAB_TRACE)) + SetSlabDebug(page); + + start = page_address(page); + page->end = start + 1; + + if (unlikely(s->flags & SLAB_POISON)) + memset(start, POISON_INUSE, PAGE_SIZE << s->order); + + last = start; + for_each_object(p, s, start) { + setup_object(s, page, last); + set_freepointer(s, last, p); + last = p; + } + setup_object(s, page, last); + set_freepointer(s, last, page->end); - start = page_address(page); - page->end = start + 1; + page->freelist = start; + page->inuse = 0; + stat(get_cpu_slab(s, raw_smp_processor_id()), ALLOC_SLAB); + } - if (unlikely(s->flags & SLAB_POISON)) - memset(start, POISON_INUSE, PAGE_SIZE << s->order); - last = start; - for_each_object(p, s, start) { - setup_object(s, page, last); - set_freepointer(s, last, p); - last = p; - } - setup_object(s, page, last); - set_freepointer(s, last, page->end); + n = get_node(s, page_to_nid(page)); + if (n) { + int j; + unsigned long flags; - page->freelist = start; - page->inuse = 0; -out: + atomic_long_add(i, &n->nr_slabs); + spin_lock_irqsave(&n->list_lock, flags); + for (j = 0; j < i - 1; j++) + __add_partial(n, l[j], 1); + spin_unlock_irqrestore(&n->list_lock, flags); + } return page; } @@ -1227,15 +1246,21 @@ static __always_inline int slab_trylock( /* * Management of partially allocated slabs */ -static void add_partial(struct kmem_cache_node *n, +static void __add_partial(struct kmem_cache_node *n, struct page *page, int tail) { - spin_lock(&n->list_lock); n->nr_partial++; if (tail) list_add_tail(&page->lru, &n->partial); else list_add(&page->lru, &n->partial); +} + +static void add_partial(struct kmem_cache_node *n, + struct page *page, int tail) +{ + spin_lock(&n->list_lock); + __add_partial(n, page, tail); spin_unlock(&n->list_lock); }