Index: linux-2.6.20-rc1/include/linux/slub_def.h =================================================================== --- linux-2.6.20-rc1.orig/include/linux/slub_def.h 2006-12-15 18:50:47.000000000 -0800 +++ linux-2.6.20-rc1/include/linux/slub_def.h 2006-12-15 18:51:51.000000000 -0800 @@ -31,9 +31,8 @@ * Must be less than a cacheline for bootstrap to work. */ struct node_slab { - spinlock_t list_lock; - struct list_head partial; - unsigned long nr_partial; + struct page *page; + atomic_long_t nr_partial; atomic_long_t nr_slabs; /* Total slabs used */ } ____cacheline_aligned_in_smp; Index: linux-2.6.20-rc1/mm/slub.c =================================================================== --- linux-2.6.20-rc1.orig/mm/slub.c 2006-12-15 18:51:55.000000000 -0800 +++ linux-2.6.20-rc1/mm/slub.c 2006-12-15 19:13:44.000000000 -0800 @@ -183,37 +183,12 @@ { struct node_slab *n = NODE_INFO(s, page_to_nid(page)); - spin_lock(&n->list_lock); - n->nr_partial++; - list_add_tail(&page->lru, &n->partial); - spin_unlock(&n->list_lock); -} - -static void __always_inline remove_partial(struct kmem_cache *s, - struct page *page) -{ - struct node_slab *n = NODE_INFO(s, page_to_nid(page)); + do { + struct page *old = n->page; + page->lru.next = old; - spin_lock(&n->list_lock); - list_del(&page->lru); - n->nr_partial--; - spin_unlock(&n->list_lock); -} - -/* - * Lock page and remove it from the partial list - * - * Must hold list_lock - */ -static __always_inline int lock_and_del_slab(struct node_slab *n, - struct page *page) -{ - if (slab_trylock(page)) { - list_del(&page->lru); - n->nr_partial--; - return 1; - } - return 0; + } while (cmpxchg(&n->page, old, page) != old); + atomic_inc(&n->nr_partial); } /* @@ -223,21 +198,27 @@ { struct page *page; +redo: /* * Racy check. If we mistakenly see no partial slabs then we * just allocate an empty slab. If we mistakenly try to get a * partial slab then get_partials() will return NULL. */ - if (!n->nr_partial) + if (!atomic_read(&n->nr_partial)) return NULL; - spin_lock(&n->list_lock); - list_for_each_entry(page, &n->partial, lru) - if (lock_and_del_slab(n, page)) - goto out; - page = NULL; -out: - spin_unlock(&n->list_lock); + page = n->page; + if (!page) + return NUL; + + slab_lock(page); + newpage = page->lru.next; + + if (cmpxchg(&n->page, page, newpage) != page) { + slab_unlock(page); + goto retry; + } + atomic_long_dec(&n->nr_partial); return page; } @@ -846,7 +827,7 @@ /* * Slab is empty. */ - remove_partial(s, page); + // remove_partial(s, page); too expensive slab_unlock(page); discard_slab(s, page); out: @@ -1037,8 +1018,8 @@ #else n = s->node; #endif - spin_lock_init(&n->list_lock); - INIT_LIST_HEAD(&n->partial); + atomic_long_set(&n->nr_partials, 0); + n->page = NULL; if (page) { putback_slab(s, page); atomic_long_set(&n->nr_slabs, 1); @@ -1282,17 +1263,18 @@ static int free_list(struct node_slab *n) { int slabs_inuse = 0; - unsigned long flags; struct page *page, *h; + unsigned long flags; - spin_lock_irqsave(&n->list_lock, flags); - list_for_each_entry_safe(page, h, &n->partial, lru) - if (!page->inuse) { - list_del(&n->partial); - discard_slab(page->slab, page); - } else - slabs_inuse++; - spin_unlock_irqrestore(&n->list_lock, flags); + local_irqsave(flags); + while (1) { + page = get_partial_node(n); + if (!page) + break; + discard_slab(page->slab, page); + slabs_inuse++; + }; + local_irqrestore(flags); return slabs_inuse; }