Index: linux-2.6.17/mm/slab.c =================================================================== --- linux-2.6.17.orig/mm/slab.c 2006-06-19 19:27:34.229410762 -0700 +++ linux-2.6.17/mm/slab.c 2006-06-19 22:25:46.343155265 -0700 @@ -3782,6 +3782,7 @@ static int try_reclaim_one(kmem_cache_t if (unlikely(p == list)) { /* Empty list */ spin_unlock_irq(&l3->list_lock); + printk(KERN_ERR "List empty\n"); return -1; } @@ -3789,11 +3790,13 @@ static int try_reclaim_one(kmem_cache_t if (unlikely(slabp->marker == marker)) { /* We already did this one. Pass complete */ spin_unlock_irq(&l3->list_lock); + printk(KERN_ERR "Pass complete\n"); return -1; } - list_move(p, list); + list_move(&slabp->list, list); spin_unlock_irq(&l3->list_lock); + min_free = 0; if (slabp->inuse == cachep->num) { /* @@ -3808,6 +3811,7 @@ static int try_reclaim_one(kmem_cache_t min_free = 1; } + printk(KERN_ERR "Considering slab at %p %d/%d used. %d min_free.\n", cachep, slabp->inuse, cachep->num, min_free); /* * First pass over the slab: We free the first min_free objects * with refcnt one and establish the higest refcnt in the block. @@ -3828,6 +3832,7 @@ static int try_reclaim_one(kmem_cache_t max_refcnt = max(max_refcnt, refcnt); } + printk(KERN_ERR "max refcnt = %d. min_free=%d\n", max_refcnt, min_free); if (max_refcnt > 0 && slabp->inuse > 1) /* Cannot free the block */ @@ -3845,6 +3850,7 @@ static int try_reclaim_one(kmem_cache_t if (refcnt >= 0) cachep->dtor(objp, cachep, SLAB_DTOR_FREE); } + printk(KERN_ERR "Succesfully freed block\n"); return 1; } @@ -3880,6 +3886,8 @@ long kmem_cache_reclaim(kmem_cache_t *ca int slabs_to_free = (pages + ((1 << cachep->gfporder) -1)) >> cachep->gfporder; + printk(KERN_INFO "kmem_cache_reclaim(%p, %ld)\n", cachep, pages); + /* * Push cached objects into the lists. That way more pages may * be freeable. @@ -3889,6 +3897,7 @@ long kmem_cache_reclaim(kmem_cache_t *ca /* First we reclaim from the freelists */ nr_freed = drain_freelist(cachep, l3, slabs_to_free); + printk(KERN_INFO "%d slabs freed by drain_freelist\n", nr_freed); /* * We can only do more in depth reclaim if we have * a destructor and the flags indicate that the refcnt @@ -3906,10 +3915,13 @@ long kmem_cache_reclaim(kmem_cache_t *ca if (atomic_read(&l3->reclaim) > 1) goto dec; + /* Try to free items from the full lists */ nr_freed += reclaim_scan(cachep, l3, slabs_to_free - nr_freed, &l3->slabs_full, marker); + printk(KERN_ERR "%d slab free after full list reclaim\n",nr_freed); + if (nr_freed >= slabs_to_free) goto dec; @@ -3921,11 +3933,13 @@ long kmem_cache_reclaim(kmem_cache_t *ca * At this point we have freed all freeable slabs of the cache * and we have freed a mininum number of objects free for each slab */ + printk(KERN_ERR "%d slab free after partial list reclaim\n", nr_freed); dec: atomic_dec(&l3->reclaim); /* Drop the large free lists that we may have build while scanning */ drain_freelist(cachep, l3, slabs_to_free); + printk(KERN_INFO "%d slabs freed by last drain_freelist\n", nr_freed); done: return nr_freed << cachep->gfporder; @@ -4147,8 +4161,12 @@ ssize_t slabinfo_write(struct file *file return -EINVAL; *tmp = '\0'; tmp++; - if (sscanf(tmp, " %d %d %d", &limit, &batchcount, &shared) != 3) - return -EINVAL; + if (sscanf(tmp, " %d %d %d", &limit, &batchcount, &shared) != 3) { + if (sscanf(tmp, " %d", &limit) != 1) + return -EINVAL; + batchcount = -123; + printk(KERN_ERR "Reclaim %d pages from %s\n", limit, kbuf); + } /* Find the cache in the chain of caches. */ mutex_lock(&cache_chain_mutex); @@ -4160,6 +4178,9 @@ ssize_t slabinfo_write(struct file *file if (!strcmp(cachep->name, kbuf)) { if (limit < 1 || batchcount < 1 || batchcount > limit || shared < 0) { + if (batchcount == -123) + printk(KERN_ERR "kmem_cache_reclaim(%s,%d)->%ld\n", + kbuf, limit, kmem_cache_reclaim(cachep, limit)); res = 0; } else { res = do_tune_cpucache(cachep, limit, Index: linux-2.6.17/include/linux/fs.h =================================================================== --- linux-2.6.17.orig/include/linux/fs.h 2006-06-17 18:49:35.000000000 -0700 +++ linux-2.6.17/include/linux/fs.h 2006-06-19 20:42:34.974309355 -0700 @@ -476,13 +476,13 @@ static inline int mapping_writably_mappe #endif struct inode { + atomic_t i_count; + umode_t i_mode; struct hlist_node i_hash; struct list_head i_list; struct list_head i_sb_list; struct list_head i_dentry; unsigned long i_ino; - atomic_t i_count; - umode_t i_mode; unsigned int i_nlink; uid_t i_uid; gid_t i_gid; Index: linux-2.6.17/fs/inode.c =================================================================== --- linux-2.6.17.orig/fs/inode.c 2006-06-19 19:28:54.005732436 -0700 +++ linux-2.6.17/fs/inode.c 2006-06-19 20:31:40.214094714 -0700 @@ -1297,10 +1297,14 @@ void inode_dtor(void *p, kmem_cache_t *c if (!(flags & SLAB_DTOR_FREE)) return; + printk(KERN_ERR "Free inode object at %p\n", inode); + spin_lock(&inode_lock); /* Do not free a busy inode */ - if (inode->i_state || atomic_read(&inode->i_count)) + if (inode->i_state || atomic_read(&inode->i_count)) { + printk(KERN_ERR "inode busy\n"); goto unlock; + } if (inode_has_buffers(inode) || inode->i_data.nrpages) { __iget(inode); @@ -1309,8 +1313,10 @@ void inode_dtor(void *p, kmem_cache_t *c invalidate_inode_pages(&inode->i_data); iput(inode); spin_lock(&inode_lock); - if (atomic_read(&inode->i_count) || !can_unuse(inode)) + if (atomic_read(&inode->i_count) || !can_unuse(inode)) { + printk(KERN_ERR "Reread did not work\n"); goto unlock; + } } inode->i_state |= I_FREEING; @@ -1346,7 +1352,7 @@ void __init inode_init(unsigned long mem sizeof(struct inode), 0, (SLAB_RECLAIM_ACCOUNT|SLAB_PANIC| - SLAB_MEM_SPREAD), + SLAB_MEM_SPREAD|SLAB_RECLAIM), init_once, inode_dtor); set_shrinker(DEFAULT_SEEKS, shrink_icache_memory);