--- mm/slub.c | 32 -------------------------------- 1 file changed, 32 deletions(-) Index: linux-2.6/mm/slub.c =================================================================== --- linux-2.6.orig/mm/slub.c 2007-08-03 22:05:44.000000000 -0700 +++ linux-2.6/mm/slub.c 2007-08-03 22:09:35.000000000 -0700 @@ -2463,13 +2463,8 @@ int kmem_cache_shrink(struct kmem_cache struct kmem_cache_node *n; struct page *page; struct page *t; - struct list_head *slabs_by_inuse = - kmalloc(sizeof(struct list_head) * s->objects, GFP_KERNEL); unsigned long flags; - if (!slabs_by_inuse) - return -ENOMEM; - flush_all(s); for_each_online_node(node) { n = get_node(s, node); @@ -2477,17 +2472,8 @@ int kmem_cache_shrink(struct kmem_cache if (!n->nr_partial) continue; - for (i = 0; i < s->objects; i++) - INIT_LIST_HEAD(slabs_by_inuse + i); - spin_lock_irqsave(&n->list_lock, flags); - /* - * Build lists indexed by the items in use in each slab. - * - * Note that concurrent frees may occur while we hold the - * list_lock. page->inuse here is the upper limit. - */ list_for_each_entry_safe(page, t, &n->partial, lru) { if (!page->inuse && slab_trylock(page)) { /* @@ -2499,28 +2485,10 @@ int kmem_cache_shrink(struct kmem_cache n->nr_partial--; slab_unlock(page); discard_slab(s, page); - } else { - if (n->nr_partial > MAX_PARTIAL) - list_move(&page->lru, - slabs_by_inuse + page->inuse); } } - - if (n->nr_partial <= MAX_PARTIAL) - goto out; - - /* - * Rebuild the partial list with the slabs filled up most - * first and the least used slabs at the end. - */ - for (i = s->objects - 1; i >= 0; i--) - list_splice(slabs_by_inuse + i, n->partial.prev); - - out: spin_unlock_irqrestore(&n->list_lock, flags); } - - kfree(slabs_by_inuse); return 0; } EXPORT_SYMBOL(kmem_cache_shrink);