--- fs/dcache.c | 16 ++++++++++------ fs/inode.c | 48 ++++++++++++++++++++++++++---------------------- mm/slub.c | 11 ++++++++--- 3 files changed, 44 insertions(+), 31 deletions(-) Index: slub/mm/slub.c =================================================================== --- slub.orig/mm/slub.c 2007-05-18 00:13:40.000000000 -0700 +++ slub/mm/slub.c 2007-05-18 00:50:28.000000000 -0700 @@ -2384,6 +2384,8 @@ static int __kmem_cache_vacate(struct km int objects; void *private; + printk(KERN_ERR "__kmem_cache_vacate(%s, %p, %tx) inuse=%d\n", + s->name, page, flags, page->inuse); if (!page->inuse) goto out; @@ -2431,6 +2433,7 @@ out: resequence_freelist(s, page); unfreeze_slab(s, page); local_irq_restore(flags); + printk(KERN_ERR "leftover=%d\n", leftover); return leftover; } @@ -2570,15 +2573,16 @@ int kmem_cache_shrink(struct kmem_cache slab_unlock(page); discard_slab(s, page); } else { - if (n->nr_partial > MAX_PARTIAL) + if (n->nr_partial > MIN_PARTIAL) list_move(&page->lru, slabs_by_inuse + page->inuse); } } - if (n->nr_partial <= MAX_PARTIAL) + if (n->nr_partial <= MIN_PARTIAL) goto out; + printk(KERN_ERR "rebuilding partial list for %s\n", s->name); /* * Rebuild the partial list with the slabs filled up most * first and the least used slabs at the end. @@ -2593,8 +2597,9 @@ int kmem_cache_shrink(struct kmem_cache if (!s->ops->get || !s->ops->kick) goto out; + printk(KERN_ERR "Defrag pass\n"); /* Take objects with just a few objects off the tail */ - while (n->nr_partial > MAX_PARTIAL) { + while (n->nr_partial > MIN_PARTIAL) { page = container_of(n->partial.prev, struct page, lru); /* Index: slub/fs/dcache.c =================================================================== --- slub.orig/fs/dcache.c 2007-05-18 00:13:40.000000000 -0700 +++ slub/fs/dcache.c 2007-05-18 00:13:42.000000000 -0700 @@ -2138,6 +2138,8 @@ static void *get_dentries(struct kmem_ca else { dget_locked(dentry); abort = atomic_read(&dentry->d_count) > 1; + if (abort) + printk(KERN_ERR "Abort due to refcount=%d\n", atomic_read(&dentry->d_count)); } } spin_unlock(&dcache_lock); @@ -2155,27 +2157,29 @@ static void kick_dentries(struct kmem_ca unsigned long abort = (unsigned long)private; int i; + printk(KERN_ERR "kick_dentry_object(%s, %d, %p, %p)\n", + s->name, nr, v, private); spin_lock(&dcache_lock); for (i = 0; i < nr; i++) { dentry = v[i]; if (!dentry) continue; - if (abort) {{ - dput(dentry); - spin_lock(&dcache_lock); - continue; - } + if (abort) + goto put_dentry; spin_lock(&dentry->d_lock); - if (atomic_read(&dentry->d_count) > 1) + if (atomic_read(&dentry->d_count) > 1) { /* * Reference count was increased. * We need to abandon the freeing of * objects. */ abort = 1; + printk(KERN_ERR "Abort due to refcount=%d\n", atomic_read(&dentry->d_count)); spin_unlock(&dentry->d_lock); +put_dentry: + spin_unlock(&dcache_lock); dput(dentry); spin_lock(&dcache_lock); continue; Index: slub/fs/inode.c =================================================================== --- slub.orig/fs/inode.c 2007-05-18 00:13:41.000000000 -0700 +++ slub/fs/inode.c 2007-05-18 00:31:41.000000000 -0700 @@ -1387,7 +1387,7 @@ void *fs_get_inodes(struct kmem_cache *s int i; for (i = 0; i < nr; i++) - v[i] -= offset; + v[i] += offset; return get_inodes(s, nr, v); } @@ -1399,50 +1399,54 @@ void kick_inodes(struct kmem_cache *s, i int i; int abort = 0; LIST_HEAD(freeable); + struct super_block *sb; - /* Remove possible dentry references to the inodes */ for (i = 0; i < nr; i++) { inode = v[i]; if (!inode) continue; + if (inode_has_buffers(inode) || inode->i_data.nrpages) { + if (remove_inode_buffers(inode)) + invalidate_mapping_pages(&inode->i_data, + 0, -1); + } + + if (inode->i_state & I_DIRTY) + write_inode_now(inode, 1); + if (atomic_read(&inode->i_count) > 1) d_prune_aliases(inode); } + printk(KERN_ERR "kick_inodes(%s, %d, %p, %p)\n", + s->name, nr, v, private); mutex_lock(&iprune_mutex); - spin_lock(&inode_lock); for (i = 0; i < nr; i++) { inode = v[i]; if (!inode) continue; - if (abort || inode->i_state || atomic_read(&inode->i_count) > 1) { - /* Inode in use */ - iput(inode); - abort = 1; + sb = inode->i_sb; + iput(inode); + printk(KERN_ERR "item %d = %p refs=%d state=%x\n", + i, inode, atomic_read(&inode->i_count), inode->i_state); + + if (abort || !(sb->s_flags & MS_ACTIVE)) continue; - } - if (inode_has_buffers(inode) || inode->i_data.nrpages) { + spin_lock(&inode_lock); + if (!can_unuse(inode)) { + printk(KERN_ERR "Abort\n"); + abort = 1; spin_unlock(&inode_lock); - if (remove_inode_buffers(inode)) - invalidate_mapping_pages(&inode->i_data, - 0, -1); - iput(inode); - spin_lock(&inode_lock); - if (!can_unuse(inode)) { - abort = 1; - continue; - } - } else - iput(inode); - + continue; + } list_move(&inode->i_list, &freeable); inode->i_state |= I_FREEING; inodes_stat.nr_unused--; + spin_unlock(&inode_lock); } - spin_unlock(&inode_lock); dispose_list(&freeable); mutex_unlock(&iprune_mutex); }