From 210cdcc0d3aea0851db663ce721229ac0eca471c Mon Sep 17 00:00:00 2001 From: Christoph Lameter Date: Thu, 9 Aug 2007 07:51:47 -0700 Subject: [PATCH] SLUB: Add KICKABLE to avoid repeated kick() attempts Add a flag KICKABLE to be set on slabs with a defragmentation method Clear the flag if a kick action is not successful in reducing the number of objects in a slab. The KICKABLE flag is set again when all objeccts of the slab have been allocated and it is removed from the partial lists. Reviewed-by: Rik van Riel Signed-off-by: Christoph Lameter --- mm/slub.c | 31 +++++++++++++++++++++++++++++-- 1 file changed, 29 insertions(+), 2 deletions(-) Index: linux-2.6/mm/slub.c =================================================================== --- linux-2.6.orig/mm/slub.c 2008-03-01 15:55:00.798018771 -0800 +++ linux-2.6/mm/slub.c 2008-03-01 15:56:26.642422798 -0800 @@ -101,6 +101,7 @@ */ #define FROZEN (1 << PG_active) +#define KICKABLE (1 << PG_dirty) #ifdef CONFIG_SLUB_DEBUG #define SLABDEBUG (1 << PG_error) @@ -138,6 +139,21 @@ static inline void ClearSlabDebug(struct page->flags &= ~SLABDEBUG; } +static inline int SlabKickable(struct page *page) +{ + return page->flags & KICKABLE; +} + +static inline void SetSlabKickable(struct page *page) +{ + page->flags |= KICKABLE; +} + +static inline void ClearSlabKickable(struct page *page) +{ + page->flags &= ~KICKABLE; +} + /* * Issues still to be resolved: * @@ -1110,6 +1126,8 @@ static struct page *new_slab(struct kmem if (s->flags & (SLAB_DEBUG_FREE | SLAB_RED_ZONE | SLAB_POISON | SLAB_STORE_USER | SLAB_TRACE)) SetSlabDebug(page); + if (s->kick) + SetSlabKickable(page); start = page_address(page); @@ -1185,6 +1203,8 @@ static void discard_slab(struct kmem_cac atomic_long_dec(&n->nr_slabs); atomic_long_sub(slab_objects(s, page), &n->total_objects); + reset_page_mapcount(page); + ClearSlabKickable(page); __ClearPageSlab(page); free_slab(s, page); } @@ -1365,6 +1385,8 @@ static void unfreeze_slab(struct kmem_ca stat(c, DEACTIVATE_FULL); if (SlabDebug(page) && (s->flags & SLAB_STORE_USER)) add_full(n, page); + if (s->kick) + SetSlabKickable(page); } slab_unlock(page); } else { @@ -2783,7 +2805,7 @@ static int kmem_cache_vacate(struct page s = page->slab; objects = slab_objects(s, page); map = scratch + max_defrag_slab_objects * sizeof(void **); - if (!page->inuse || !s->kick) + if (!page->inuse || !s->kick || !SlabKickable(page)) goto out; /* Determine used objects */ @@ -2820,6 +2842,8 @@ out: * Check the result and unfreeze the slab */ leftover = page->inuse; + if (leftover) + ClearSlabKickable(page); unfreeze_slab(s, page, leftover > 0); local_irq_restore(flags); return leftover; @@ -2867,8 +2891,11 @@ static unsigned long __kmem_cache_shrink spin_lock_irqsave(&n->list_lock, flags); list_for_each_entry_safe(page, page2, &n->partial, lru) { - if (page->inuse > slab_objects(s, page) / 4) + + if (page->inuse > slab_objects(s, page) / 4 || + (s->kick && !SlabKickable(page))) continue; + if (!slab_trylock(page)) continue;