[SLUB] Add SlabReclaimable() to avoid repeated reclaim attempts Add a flags SlabReclaimable() that is set on slabs that have a method that allows defrag/reclaim. Clear the flag if a reclaim action is not successful in reducing the number of objects in a slab. The reclaim flag is set again if all objects have been allocated from it. Signed-off-by: Christoph Lameter --- mm/slub.c | 42 ++++++++++++++++++++++++++++++++++++------ 1 file changed, 36 insertions(+), 6 deletions(-) Index: linux-2.6/mm/slub.c =================================================================== --- linux-2.6.orig/mm/slub.c 2007-08-08 20:36:18.000000000 -0700 +++ linux-2.6/mm/slub.c 2007-08-08 20:36:42.000000000 -0700 @@ -107,6 +107,8 @@ #define SLABDEBUG 0 #endif +#define SLABRECLAIMABLE (1 << PG_dirty) + static inline int SlabFrozen(struct page *page) { return page->flags & FROZEN; @@ -137,6 +139,21 @@ static inline void ClearSlabDebug(struct page->flags &= ~SLABDEBUG; } +static inline int SlabReclaimable(struct page *page) +{ + return page->flags & SLABRECLAIMABLE; +} + +static inline void SetSlabReclaimable(struct page *page) +{ + page->flags |= SLABRECLAIMABLE; +} + +static inline void ClearSlabReclaimable(struct page *page) +{ + page->flags &= ~SLABRECLAIMABLE; +} + /* * Issues still to be resolved: * @@ -1126,6 +1143,8 @@ static struct page *new_slab(struct kmem if (s->flags & (SLAB_DEBUG_FREE | SLAB_RED_ZONE | SLAB_POISON | SLAB_STORE_USER | SLAB_TRACE)) SetSlabDebug(page); + if (s->kick) + SetSlabReclaimable(page); out: if (flags & __GFP_WAIT) @@ -1183,6 +1202,7 @@ static void discard_slab(struct kmem_cac atomic_long_dec(&n->nr_slabs); reset_page_mapcount(page); __ClearPageSlab(page); + ClearSlabReclaimable(page); free_slab(s, page); } @@ -1356,8 +1376,12 @@ static void unfreeze_slab(struct kmem_ca if (page->freelist) add_partial(n, page, tail); - else if (SlabDebug(page) && (s->flags & SLAB_STORE_USER)) - add_full(n, page); + else { + if (SlabDebug(page) && (s->flags & SLAB_STORE_USER)) + add_full(n, page); + if (s->kick && !SlabReclaimable(page)) + SetSlabReclaimable(page); + } slab_unlock(page); } else { @@ -2511,7 +2535,7 @@ int kmem_cache_isolate_slab(struct page struct kmem_cache *s; int rc = -ENOENT; - if (!PageSlab(page) || SlabFrozen(page)) + if (!PageSlab(page) || SlabFrozen(page) || !SlabReclaimable(page)) return rc; /* @@ -2581,7 +2605,7 @@ static int kmem_cache_vacate(struct page struct kmem_cache *s; unsigned long *map; int leftover; - int objects; + int objects = -1; void *private; unsigned long flags; int tail = 1; @@ -2591,7 +2615,7 @@ static int kmem_cache_vacate(struct page slab_lock(page); s = page->slab; - map = scratch + s->objects * sizeof(void **); + map = scratch + max_defrag_slab_objects * sizeof(void **); if (!page->inuse || !s->kick) goto out; @@ -2625,10 +2649,13 @@ static int kmem_cache_vacate(struct page local_irq_save(flags); slab_lock(page); tail = 0; -out: + /* * Check the result and unfreeze the slab */ + if (page->inuse == objects) + ClearSlabReclaimable(page); +out: leftover = page->inuse; unfreeze_slab(s, page, tail); local_irq_restore(flags); @@ -2683,6 +2710,9 @@ static unsigned long __kmem_cache_shrink if (inuse > s->objects / 4) continue; + if (s->kick && !SlabReclaimable(page)) + continue; + if (!slab_trylock(page)) continue;