Index: linux-2.6.21-rc3/include/linux/slab.h =================================================================== --- linux-2.6.21-rc3.orig/include/linux/slab.h 2007-03-07 22:17:25.000000000 -0800 +++ linux-2.6.21-rc3/include/linux/slab.h 2007-03-07 22:21:58.000000000 -0800 @@ -33,11 +33,14 @@ #define SLAB_DESTROY_BY_RCU 0x00080000UL /* Defer freeing slabs to RCU */ #define SLAB_MEM_SPREAD 0x00100000UL /* Spread some memory over cpuset */ #define SLAB_TRACE 0x00200000UL /* Trace allocations and frees */ +#define SLAB_DEFRAG 0x00400000UL /* DTOR is function for object removal */ /* Flags passed to a constructor functions */ #define SLAB_CTOR_CONSTRUCTOR 0x001UL /* If not set, then deconstructor */ #define SLAB_CTOR_ATOMIC 0x002UL /* Tell constructor it can't sleep */ #define SLAB_CTOR_VERIFY 0x004UL /* Tell constructor it's a verify call */ +#define SLAB_DTOR_DESTRUCTOR 0x001UL /* DTOR called for object destruct */ +#define SLAB_DTOR_FREE 0x002UL /* DTOR called for object removal */ /* * struct kmem_cache related prototypes Index: linux-2.6.21-rc3/mm/slub.c =================================================================== --- linux-2.6.21-rc3.orig/mm/slub.c 2007-03-07 22:20:07.000000000 -0800 +++ linux-2.6.21-rc3/mm/slub.c 2007-03-08 10:17:05.000000000 -0800 @@ -597,7 +597,7 @@ void *start = page_address(page); void *end = start + (pages << PAGE_SHIFT); void *p; - int mode = 1; + int mode = SLAB_CTOR_CONSTRUCTOR; if (!(flags & __GFP_WAIT)) mode |= SLAB_CTOR_ATOMIC; @@ -680,7 +680,7 @@ for (p = start; p <= end - s->size; p += s->size) { if (s->dtor) - s->dtor(p, s, 0); + s->dtor(p, s, SLAB_DTOR_DESTRUCTOR); else check_object(s, page, p, 0); } @@ -2181,12 +2181,68 @@ #endif /*************************************************************** - * Compatiblility definitions + * Shrink Cache Support **************************************************************/ +static void attempt_to_free_objects(struct kmem_cache *s, struct page *page) +{ + /* on the sparsely populated attempt to free all objects */ + dtor(s, SLAB_DTOR_FREE); +} + +static void shrink_node(struct kmem_cache *s, struct kmem_cache_node *n) +{ + LIST_HEAD(list); + struct page *page; + struct page *page2; + + /* + * First loop to extract candidate slabs from the + * partial lists. + */ + spin_lock_irq(&n->list_lock); + list_for_each_entry_safe(page, page2, &n->partial, lru) { + if (page->inuse < s->objects / 3) { + if (slab_trylock(page)) { + SetPageActive(page); + list_move(&page->lru, &list); + slab_unlock(page); + } + } + } + spin_unlock_irq(&n->list_lock); + + /* Do the actual freeing */ + list_for_each_entry(page, &list, lru) + attempt_to_free_objects(page); + + /* Return slabs to the partial list */ + list_for_each_entry_safe(page, page2, &list, lru) { + list_del(&page->lru); + slab_lock(page); + ClearPageActive(page); + putback_slab(s, page); + } +} + int kmem_cache_shrink(struct kmem_cache *s) { + int node; + flush_all(s); + if (!(s->flags & SLAB_DEFRAG) || s->objects < 4) + return 0; + + for_each_online_node(node) { + struct kmem_cache_node *n = s->node[node]; + + if (node != numa_node_id()) + if (n->nr_partial < 5) + inc some counter; + queue_delayed_work_on(first_cpu(node_to_cpumask(node)), shrink_node); + } + shrink_node(s); + /* wait till complete; */ return 0; } EXPORT_SYMBOL(kmem_cache_shrink); Index: linux-2.6.21-rc3/include/linux/slub_def.h =================================================================== --- linux-2.6.21-rc3.orig/include/linux/slub_def.h 2007-03-07 22:35:04.000000000 -0800 +++ linux-2.6.21-rc3/include/linux/slub_def.h 2007-03-07 23:31:49.000000000 -0800 @@ -15,6 +15,7 @@ unsigned long nr_partial; atomic_long_t nr_slabs; struct list_head partial; + struct delayed_work work; }; /* @@ -36,6 +37,7 @@ const char *name; /* Name (only for display!) */ char *aliases; /* Slabs merged into this one */ struct list_head list; /* List of slabs */ + struct mutex nodework; /* Per node work queued */ #ifdef CONFIG_SMP struct mutex flushing; atomic_t cpu_slabs; /*