Index: slub/fs/dcache.c =================================================================== --- slub.orig/fs/dcache.c 2007-05-31 17:49:03.000000000 -0700 +++ slub/fs/dcache.c 2007-05-31 18:21:54.000000000 -0700 @@ -951,6 +951,7 @@ struct dentry *d_alloc(struct dentry * p if (parent) list_add(&dentry->d_u.d_child, &parent->d_subdirs); dentry_stat.nr_dentry++; + dentry->d_flags |= DCACHE_ENTRY_VALID; spin_unlock(&dcache_lock); return dentry; @@ -2108,18 +2109,108 @@ static void __init dcache_init_early(voi INIT_HLIST_HEAD(&dentry_hashtable[loop]); } +/* + * Remove a dcache entry either by reclaiming it or moving it + * to a different memory locations + * + * Called with dcache_lock held. + * + * return 1 for success and 0 for failure. + * + * If we fail then we should not drop the reference count. + */ +static int move_or_reclaim_dentry(struct dentry *d) +{ + return 0; +} + +/* + * The slab is holding off frees. Thus we can safely examine + * dentries without the danger of them being freed from under us. + */ +static void *get_dentries(struct kmem_cache *s, int nr, void **v) +{ + struct dentry *dentry; + int i; + + spin_lock(&dcache_lock); + + for (i = 0; i < nr; i++) { + dentry = v[i]; + /* + * if DCACHE_ENTRY_VALID is not set then the dentry + * may be already in the process of being freed. + */ + if (dentry->d_flags & DCACHE_ENTRY_VALID) + dget_locked(dentry); + else + v[i] = NULL; + } + + spin_unlock(&dcache_lock); + return 0; +} + +/* + * Slab has dropped all the locks. Get rid of the + * refcount we obtained earlier and also rid of the + * object. + */ +static void kick_dentries(struct kmem_cache *s, int nr, void **v, void *private) +{ + struct dentry *dentry; + int abort = 0; + int i; + + spin_lock(&dcache_lock); + + for (i = 0; i < nr; i++) { + dentry = v[i]; + if (!dentry) + continue; + + if (abort) + goto abort_next; + + if (move_or_reclaim_dentry(dentry)) + /* Successfully moved object */ + continue; + + /* + * Failed to free a dentry, so we will just drop + * the refcount for the rest of the objects. + * For that we do not need the dcache_lock + */ + abort = 1; + spin_unlock(&dcache_lock); +abort_next: + dput(dentry); + } + + if (!abort) { + /* Success. We still hold the dcache_lock. Drop it */ + spin_unlock(&dcache_lock); + /* + * dentries are freed using RCU so we need to wait until RCU + * operations are complete + */ + synchronize_rcu(); + } +} + +static struct kmem_cache_ops dentry_kmem_cache_ops = { + .get = get_dentries, + .kick = kick_dentries, +}; + static void __init dcache_init(unsigned long mempages) { int loop; - /* - * A constructor could be added for stable state like the lists, - * but it is probably not worth it because of the cache nature - * of the dcache. - */ - dentry_cache = KMEM_CACHE(dentry, - SLAB_RECLAIM_ACCOUNT|SLAB_PANIC|SLAB_MEM_SPREAD); - + dentry_cache = KMEM_CACHE_OPS(dentry, + SLAB_RECLAIM_ACCOUNT|SLAB_PANIC|SLAB_MEM_SPREAD, + &dentry_kmem_cache_ops); + register_shrinker(&dcache_shrinker); /* Hash may have been set up in dcache_init_early */ Index: slub/include/linux/dcache.h =================================================================== --- slub.orig/include/linux/dcache.h 2007-05-31 17:49:03.000000000 -0700 +++ slub/include/linux/dcache.h 2007-05-31 18:18:35.000000000 -0700 @@ -177,6 +177,12 @@ d_iput: no no no yes #define DCACHE_INOTIFY_PARENT_WATCHED 0x0020 /* Parent inode is watched */ +#define DCACHE_ENTRY_VALID 0x0040 + /* + * This dentry is valid and not in the process of being created or + * destroyed. + */ + extern spinlock_t dcache_lock; /**