Support targeted reclaim and slab defrag for dentry cache This is an experimental patch for locking review only. I am not that familiar with dentry cache locking. We setup the dcache cache a bit differently using the new APIs and define a get_reference() and kick_object() function for the dentry cache. get_dentry_reference simply works by incrementing the dentry refcount if its not already zero. If it is zero then the slab called us while another processor is in the process of freeing the object. The other process will finish this free as soon as we return from this call. So fail. kick_dentry_object() is called after get_dentry_reference() has been used and after the slab has dropped all of its own locks. Trying to use the dentry pruning here. Hope that is correct. Signed-off-by: Christoph Lameter --- fs/dcache.c | 50 ++++++++++++++++++++++++++++++++++++++++---------- 1 file changed, 40 insertions(+), 10 deletions(-) Index: slub/fs/dcache.c =================================================================== --- slub.orig/fs/dcache.c 2007-05-09 11:29:13.000000000 -0700 +++ slub/fs/dcache.c 2007-05-09 11:40:31.000000000 -0700 @@ -2114,18 +2114,49 @@ static void __init dcache_init_early(voi INIT_HLIST_HEAD(&dentry_hashtable[loop]); } +/* + * The slab is holding locks on the current slab. We can just + * get a reference + */ +static int get_dentry_reference(struct kmem_cache *s, void *private) +{ + struct dentry *dentry = private; + + return atomic_inc_not_zero(&dentry->d_count); +} + +/* + * Slab has dropped all the locks. Get rid of the + * refcount we obtained earlier and also rid of the + * object. + */ +static void kick_dentry_object(struct kmem_cache *s, void *private) +{ + struct dentry *dentry = private; + + spin_lock(&dentry->d_lock); + if (atomic_read(&dentry->d_count) > 1) { + spin_unlock(&dentry->d_lock); + dput(dentry); + } + spin_lock(&dcache_lock); + prune_one_dentry(dentry, 1); + spin_unlock(&dcache_lock); +} + +static struct kmem_cache_ops dentry_kmem_cache_ops = { + .get_reference = get_dentry_reference, + .kick_object = kick_dentry_object +}; + static void __init dcache_init(unsigned long mempages) { int loop; - /* - * A constructor could be added for stable state like the lists, - * but it is probably not worth it because of the cache nature - * of the dcache. - */ - dentry_cache = KMEM_CACHE(dentry, - SLAB_RECLAIM_ACCOUNT|SLAB_PANIC|SLAB_MEM_SPREAD); - + dentry_cache = KMEM_CACHE_OPS(dentry, + SLAB_RECLAIM_ACCOUNT|SLAB_PANIC|SLAB_MEM_SPREAD, + &dentry_kmem_cache_ops); + register_shrinker(&dcache_shrinker); /* Hash may have been set up in dcache_init_early */ @@ -2173,8 +2204,7 @@ void __init vfs_caches_init(unsigned lon names_cachep = kmem_cache_create("names_cache", PATH_MAX, 0, SLAB_HWCACHE_ALIGN|SLAB_PANIC, NULL, NULL); - filp_cachep = kmem_cache_create("filp", sizeof(struct file), 0, - SLAB_HWCACHE_ALIGN|SLAB_PANIC, NULL, NULL); + filp_cachep = KMEM_CACHE(file, SLAB_HWCACHE_ALIGN|SLAB_PANIC); dcache_init(mempages); inode_init(mempages);