--- mm/slub.c | 15 +++++++++++++-- 1 file changed, 13 insertions(+), 2 deletions(-) Index: linux-2.6/mm/slub.c =================================================================== --- linux-2.6.orig/mm/slub.c 2008-03-07 00:22:55.115938033 -0800 +++ linux-2.6/mm/slub.c 2008-03-07 00:29:27.513739441 -0800 @@ -1689,11 +1689,14 @@ static __always_inline void *slab_alloc( object = c->base + offset(old); if (unlikely(is_end(object) || !node_match(c, node))) { object = __slab_alloc(s, gfpflags, node, addr, c); + printk(KERN_CRIT "slow alloc %s object=%p\n", s->name, object); break; } stat(c, ALLOC_FASTPATH); - /* No need to increment the MSB counter here, because only - * object free will lead to counter re-use. */ + /* + * No need to increment the MSB counter here, because only + * object free will lead to counter re-use. + */ new = make_version(sequence(old), object[c->offset]); result = cmpxchg_local(&c->freelist, old, new); #ifdef CONFIG_DEBUG_VM @@ -1708,6 +1711,8 @@ static __always_inline void *slab_alloc( */ WARN_ON(sequence(result) - sequence(old) > HALF_MASK); #endif + printk(KERN_CRIT "alloc %s old=%p new=%p result=%p object=%p\n", + s->name, old, new, result, object); } while (result != old); #else unsigned long flags; @@ -1844,6 +1849,8 @@ static __always_inline void slab_free(st #ifdef SLUB_FASTPATH void *old, *new, *result; + printk(KERN_CRIT "slab_free %s object=%p\n", s->name, object); + c = get_cpu_slab(s, raw_smp_processor_id()); debug_check_no_locks_freed(object, s->objsize); do { @@ -1861,6 +1868,7 @@ static __always_inline void slab_free(st * since the freelist pointers are unique per slab. */ if (unlikely(page != c->page || c->node < 0)) { + printk("slow free\n"); __slab_free(s, page, x, addr, c->offset); break; } @@ -1880,6 +1888,8 @@ static __always_inline void slab_free(st */ WARN_ON(sequence(result) - sequence(old) > HALF_MASK); #endif + printk(KERN_CRIT "slab_free %s old=%p new=%p result=%p object=%p\n", + s->name, old, new, result, object); } while (result != old); #else unsigned long flags; @@ -2927,6 +2937,7 @@ int kmem_cache_shrink(struct kmem_cache kmalloc(sizeof(struct list_head) * s->max_objects, GFP_KERNEL); unsigned long flags; + printk("kmem_cache_shrink %s\n", s->name); if (!slabs_by_inuse) return -ENOMEM;