--- fs/dcache.c | 1 + include/linux/slub_def.h | 2 ++ mm/slub.c | 36 +++++++++++++++++++++++++++++++++--- 3 files changed, 36 insertions(+), 3 deletions(-) Index: slub/fs/dcache.c =================================================================== --- slub.orig/fs/dcache.c 2007-05-21 18:18:22.000000000 -0700 +++ slub/fs/dcache.c 2007-05-21 18:18:45.000000000 -0700 @@ -524,6 +524,7 @@ static void prune_dcache(int count, stru dentry_stat.nr_unused++; } spin_unlock(&dcache_lock); + kmem_cache_shrink(dentry_cache); } /* Index: slub/include/linux/slub_def.h =================================================================== --- slub.orig/include/linux/slub_def.h 2007-05-21 18:19:26.000000000 -0700 +++ slub/include/linux/slub_def.h 2007-05-21 18:26:05.000000000 -0700 @@ -43,6 +43,8 @@ struct kmem_cache { void (*ctor)(void *, struct kmem_cache *, unsigned long); int inuse; /* Offset to metadata */ int align; /* Alignment */ + unsigned long shrink_next; /* Next kmem_cache_shrink */ + unsigned long shrink_partial; const char *name; /* Name (only for display!) */ struct list_head list; /* List of slab caches */ struct kobject kobj; /* For sysfs */ Index: slub/mm/slub.c =================================================================== --- slub.orig/mm/slub.c 2007-05-21 18:18:48.000000000 -0700 +++ slub/mm/slub.c 2007-05-21 18:40:59.000000000 -0700 @@ -2334,14 +2334,39 @@ int kmem_cache_shrink(struct kmem_cache struct kmem_cache_node *n; struct page *page; struct page *t; - struct list_head *slabs_by_inuse = - kmalloc(sizeof(struct list_head) * s->objects, GFP_KERNEL); + struct list_head *slabs_by_inuse; unsigned long flags; + unsigned long x; + unsigned long seconds = 10; + + if (time_before(jiffies, s->shrink_next)) + return 0; + + x = 0; + for_each_online_node(node) + x += get_node(s, node)->nr_partial; + + if (x <= MAX_PARTIAL) { + seconds = 60; + goto next_shrink; + } + + if (x < s->shrink_partial) { + /* Reduce by 10% for next check */ + s->shrink_partial = (s->shrink_partial * 98) / 100; + goto next_shrink; + } + + s->shrink_partial = x; + + slabs_by_inuse = + kmalloc(sizeof(struct list_head) * s->objects, GFP_KERNEL); if (!slabs_by_inuse) return -ENOMEM; flush_all(s); + x = 0; for_each_online_node(node) { n = get_node(s, node); @@ -2389,10 +2414,15 @@ int kmem_cache_shrink(struct kmem_cache out: spin_unlock_irqrestore(&n->list_lock, flags); + x += n->nr_partial; } kfree(slabs_by_inuse); - return 0; + s->shrink_partial = x; + printk(KERN_INFO "SLUB shrink %s. %ld partial slabs.\n", s->name, x); +next_shrink: + s->shrink_next = jiffies + seconds * HZ; + return x == 0; } EXPORT_SYMBOL(kmem_cache_shrink);