From: Manfred Spraul Maintenance work from Alexander Nyberg With the patch applied, echo "size-4096 0 0 0" > /proc/slabinfo walks the objects in the size-4096 slab, printing out the calling address of whoever allocated that object. It is for leak detection. Signed-off-by: Andrew Morton --- mm/slab.c | 46 +++++++++++++++++++++++++++++++++++++++++++--- 1 files changed, 43 insertions(+), 3 deletions(-) diff -puN mm/slab.c~slab-leak-detector mm/slab.c --- devel/mm/slab.c~slab-leak-detector 2005-12-10 21:53:02.000000000 -0800 +++ devel-akpm/mm/slab.c 2005-12-10 21:53:02.000000000 -0800 @@ -199,7 +199,7 @@ * is less than 512 (PAGE_SIZE<<3), but greater than 256. */ -typedef unsigned int kmem_bufctl_t; +typedef unsigned long kmem_bufctl_t; #define BUFCTL_END (((kmem_bufctl_t)(~0U))-0) #define BUFCTL_FREE (((kmem_bufctl_t)(~0U))-1) #define SLAB_LIMIT (((kmem_bufctl_t)(~0U))-2) @@ -2401,7 +2401,7 @@ static void check_slabp(kmem_cache_t * c i < sizeof(slabp) + cachep->num * sizeof(kmem_bufctl_t); i++) { if ((i % 16) == 0) - printk("\n%03x:", i); + printk("\n%04lx:", i); printk(" %02x", ((unsigned char *)slabp)[i]); } printk("\n"); @@ -2558,6 +2558,15 @@ static void *cache_alloc_debugcheck_afte *dbg_redzone1(cachep, objp) = RED_ACTIVE; *dbg_redzone2(cachep, objp) = RED_ACTIVE; } + { + int objnr; + struct slab *slabp; + + slabp = page_get_slab(virt_to_page(objp)); + + objnr = (objp - slabp->s_mem) / cachep->objsize; + slab_bufctl(slabp)[objnr] = (unsigned long)caller; + } objp += obj_dbghead(cachep); if (cachep->ctor && cachep->flags & SLAB_POISON) { unsigned long ctor_flags = SLAB_CTOR_CONSTRUCTOR; @@ -2708,7 +2717,7 @@ static void free_block(kmem_cache_t * ca check_spinlock_acquired_node(cachep, node); check_slabp(cachep, slabp); -#if DEBUG +#if 0 /* disabled, not compatible with leak detection */ /* Verify that the slab belongs to the intended node */ WARN_ON(slabp->nodeid != node); @@ -3589,6 +3598,36 @@ struct seq_operations slabinfo_op = { .show = s_show, }; +static void do_dump_slabp(kmem_cache_t *cachep) +{ +#if DEBUG + struct list_head *q; + int node; + + check_irq_on(); + spin_lock_irq(&cachep->spinlock); + for_each_online_node(node) { + struct kmem_list3 *rl3 = cachep->nodelists[node]; + spin_lock(&rl3->list_lock); + + list_for_each(q, &rl3->slabs_full) { + int i; + struct slab *slabp = list_entry(q, struct slab, list); + + for (i = 0; i < cachep->num; i++) { + unsigned long sym = slab_bufctl(slabp)[i]; + + printk("obj %p/%d: %p", slabp, i, (void *)sym); + print_symbol(" <%s>", sym); + printk("\n"); + } + } + spin_unlock(&rl3->list_lock); + } + spin_unlock_irq(&cachep->spinlock); +#endif +} + #define MAX_SLABINFO_WRITE 128 /** * slabinfo_write - Tuning for the slab allocator @@ -3628,6 +3667,7 @@ ssize_t slabinfo_write(struct file *file if (limit < 1 || batchcount < 1 || batchcount > limit || shared < 0) { + do_dump_slabp(cachep); res = 0; } else { res = do_tune_cpucache(cachep, limit, _