--- include/linux/slub_def.h | 2 mm/slub.c | 127 +++++++++++++++++++++++++++++++++++------------ 2 files changed, 97 insertions(+), 32 deletions(-) Index: linux-2.6/include/linux/slub_def.h =================================================================== --- linux-2.6.orig/include/linux/slub_def.h 2008-02-04 20:07:15.601362536 -0800 +++ linux-2.6/include/linux/slub_def.h 2008-02-04 20:30:47.338652367 -0800 @@ -50,6 +50,8 @@ struct kmem_cache_node { #ifdef CONFIG_SLUB_DEBUG struct list_head full; #endif + int nr_free; /* Number of free objects */ + struct list_head freelist; }; /* Index: linux-2.6/mm/slub.c =================================================================== --- linux-2.6.orig/mm/slub.c 2008-02-04 20:11:03.638208980 -0800 +++ linux-2.6/mm/slub.c 2008-02-04 22:01:16.777036681 -0800 @@ -1106,45 +1106,94 @@ static void setup_object(struct kmem_cac static struct page *new_slab(struct kmem_cache *s, gfp_t flags, int node) { - struct page *page; + struct page *page = NULL; struct kmem_cache_node *n; - void *start; - void *last; - void *p; + LIST_HEAD(list); + int nr = 0; + int limit = MIN_PARTIAL; BUG_ON(flags & GFP_SLAB_BUG_MASK); - page = allocate_slab(s, - flags & (GFP_RECLAIM_MASK | GFP_CONSTRAINT_MASK), node); - if (!page) - goto out; + n = get_node(s, node); + if (!n) + limit = 1; - n = get_node(s, page_to_nid(page)); - if (n) - atomic_long_inc(&n->nr_slabs); - page->slab = s; - page->flags |= 1 << PG_slab; - if (s->flags & (SLAB_DEBUG_FREE | SLAB_RED_ZONE | SLAB_POISON | - SLAB_STORE_USER | SLAB_TRACE)) - SetSlabDebug(page); + if (n && n->nr_free) { + unsigned long flags; + + printk("Getting slab from queue %s\n", s->name); + spin_lock_irqsave(&n->list_lock, flags); + if (n->nr_free) { + page = container_of(n->freelist.prev, struct page, lru); + list_del(&page->lru); + n->nr_free--; + } else + page = NULL; - start = page_address(page); - page->end = start + 1; + spin_unlock_irqrestore(&n->list_lock, flags); + if (page) + goto out; + } + + /* Prep a list of new pages */ + while (nr < limit) { + void *start; + void *last; + void *p; - if (unlikely(s->flags & SLAB_POISON)) - memset(start, POISON_INUSE, PAGE_SIZE << s->order); + printk("Extending free queue %s\n", s->name); + page = allocate_slab(s, + flags & (GFP_RECLAIM_MASK | GFP_CONSTRAINT_MASK), node); + if (!page || page_to_nid(page) != node) + break; - last = start; - for_each_object(p, s, start) { + page->slab = s; + page->flags |= 1 << PG_slab; + if (s->flags & (SLAB_DEBUG_FREE | SLAB_RED_ZONE | SLAB_POISON | + SLAB_STORE_USER | SLAB_TRACE)) + SetSlabDebug(page); + + start = page_address(page); + page->end = start + 1; + + if (unlikely(s->flags & SLAB_POISON)) + memset(start, POISON_INUSE, PAGE_SIZE << s->order); + + last = start; + for_each_object(p, s, start) { + setup_object(s, page, last); + set_freepointer(s, last, p); + last = p; + } setup_object(s, page, last); - set_freepointer(s, last, p); - last = p; + set_freepointer(s, last, page->end); + + page->freelist = start; + page->inuse = 0; + list_add(&page->lru, &list); + nr++; + } + + if (!page && nr) { + printk("Taking page off free queue %s\n", s->name); + page = container_of(list.next, struct page, lru); + list_del(&page->lru); + nr--; + } + + if (n && nr) { + /* Put the pages onto the per node lists */ + printk("Moving free queue %s\n", s->name); + spin_lock_irqsave(&n->list_lock, flags); + list_move(&list, &n->freelist); + n->nr_free = nr; + spin_unlock_irqrestore(&n->list_lock, flags); + atomic_long_add(nr, &n->nr_slabs); } - setup_object(s, page, last); - set_freepointer(s, last, page->end); - page->freelist = start; - page->inuse = 0; + n = get_node(s, page_to_nid(page)); + if (n) + atomic_long_inc(&n->nr_slabs); out: return page; } @@ -1195,11 +1244,24 @@ static void free_slab(struct kmem_cache static void discard_slab(struct kmem_cache *s, struct page *page) { struct kmem_cache_node *n = get_node(s, page_to_nid(page)); + struct page *p2; + unsigned long flags; - atomic_long_dec(&n->nr_slabs); - reset_page_mapcount(page); - __ClearPageSlab(page); - free_slab(s, page); + spin_lock_irqsave(&n->list_lock, flags); + list_add(&page->lru, &n->freelist); + n->nr_free++; + if (n->nr_free >= MIN_PARTIAL) { + printk("Draining freelist for %s\n", s->name); + list_for_each_entry_safe(page, p2, &n->freelist, lru) { + list_del(&page->lru); + reset_page_mapcount(page); + __ClearPageSlab(page); + free_slab(s, page); + n->nr_free--; + atomic_long_dec(&n->nr_slabs); + } + } + spin_unlock_irqrestore(&n->list_lock, flags); } /* @@ -1997,6 +2059,7 @@ static void init_kmem_cache_node(struct #ifdef CONFIG_SLUB_DEBUG INIT_LIST_HEAD(&n->full); #endif + INIT_LIST_HEAD(&n->freelist); } #ifdef CONFIG_SMP