From d15b887b5d485c061881317db203d178e60532ec Mon Sep 17 00:00:00 2001 From: Christoph Lameter Date: Thu, 9 Aug 2007 07:51:44 -0700 Subject: SLUB: Move count_partial() Move the counting function for objects in partial slabs so that it is placed before kmem_cache_shrink. We will need to use it to establish the fragmentation ratio of per node slab lists. Signed-off-by: Christoph Lameter --- mm/slub.c | 26 +++++++++++++------------- 1 file changed, 13 insertions(+), 13 deletions(-) Index: linux-2.6/mm/slub.c =================================================================== --- linux-2.6.orig/mm/slub.c 2007-10-17 13:35:55.000000000 -0700 +++ linux-2.6/mm/slub.c 2007-10-17 13:56:45.000000000 -0700 @@ -2620,6 +2620,19 @@ void kfree(const void *x) } EXPORT_SYMBOL(kfree); +static unsigned long count_partial(struct kmem_cache_node *n) +{ + unsigned long flags; + unsigned long x = 0; + struct page *page; + + spin_lock_irqsave(&n->list_lock, flags); + list_for_each_entry(page, &n->partial, lru) + x += page->inuse; + spin_unlock_irqrestore(&n->list_lock, flags); + return x; +} + /* * kmem_cache_shrink removes empty slabs from the partial lists and sorts * the remaining slabs by the number of items in use. The slabs with the @@ -3356,19 +3369,6 @@ static int list_locations(struct kmem_ca return n; } -static unsigned long count_partial(struct kmem_cache_node *n) -{ - unsigned long flags; - unsigned long x = 0; - struct page *page; - - spin_lock_irqsave(&n->list_lock, flags); - list_for_each_entry(page, &n->partial, lru) - x += page->inuse; - spin_unlock_irqrestore(&n->list_lock, flags); - return x; -} - enum slab_stat_type { SL_FULL, SL_PARTIAL,