--- mm/slub.c | 76 ++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++-- 1 file changed, 74 insertions(+), 2 deletions(-) Index: linux-2.6.22-rc6-mm1/mm/slub.c =================================================================== --- linux-2.6.22-rc6-mm1.orig/mm/slub.c 2007-07-09 15:48:41.000000000 -0700 +++ linux-2.6.22-rc6-mm1/mm/slub.c 2007-07-09 16:43:54.000000000 -0700 @@ -1469,6 +1469,7 @@ static void *__slab_alloc(struct kmem_ca struct page *new; unsigned long flags; + printk("__slab_alloc(%s, %x, %d, %p)\n", s->name, gfpflags, node, addr); local_irq_save(flags); if (!c->page) goto new_slab; @@ -1498,6 +1499,7 @@ out: if (unlikely((gfpflags & __GFP_ZERO))) memset(object, 0, c->objsize); + printk("__slab_alloc_end(%s)\n", s->name); return object; another_slab: @@ -1568,20 +1570,35 @@ static void __always_inline *slab_alloc( void **object; struct kmem_cache_cpu *c; + printk("slab_alloc(%s, %x, %d, %p)\n", + s->name, gfpflags, node, addr); preempt_disable(); c = get_cpu_slab(s, smp_processor_id()); redo: object = c->freelist; if (unlikely(!object || !node_match(c, node))) +slow: return __slab_alloc(s, gfpflags, node, addr, c); - if (cmpxchg_local(&c->freelist, object, object[c->offset]) != object) + if (cmpxchg_local(&c->freelist, object, object[c->offset]) != object) { + printk("slab_alloc(%s) redo\n", s->name); goto redo; + } + + if (!check_valid_pointer(s, virt_to_head_page(object), object)) { + printk("slab_alloc inv pointer(%s, %x, %d, %p) object=%p freelist=%p objsize=%d %p-%p\n", + s->name, gfpflags, node, addr, object, c->freelist, c->objsize, + page_address(virt_to_head_page(object)), + page_address(virt_to_head_page(object)) + (PAGE_SIZE << s->order)); + WARN_ON(1); + goto slow; + } preempt_enable(); if (unlikely((gfpflags & __GFP_ZERO))) memset(object, 0, c->objsize); + printk("slab_alloc_end(%s)\n", s->name); return object; } @@ -1614,6 +1631,8 @@ static void __slab_free(struct kmem_cach void **object = (void *)x; unsigned long flags; + printk("__slab_free(%s, %p, %p, %p, %d)\n", + s->name, page, x, addr, offset); local_irq_save(flags); slab_lock(page); @@ -1642,6 +1661,8 @@ out_unlock: slab_unlock(page); local_irq_restore(flags); preempt_enable(); + printk("__slab_free out(%s, %p, %p, %p, %d)\n", + s->name, page, x, addr, offset); return; slab_empty: @@ -1655,6 +1676,9 @@ slab_empty: local_irq_restore(flags); preempt_enable(); discard_slab(s, page); + printk("__slab_free out(%s, %p, %p, %p, %d)\n", + s->name, page, x, addr, offset); + return; debug: @@ -1663,6 +1687,26 @@ debug: goto checks_ok; } +static int on_fl(struct kmem_cache *s, struct page *page, void **x, + void **search, const char *str) +{ + int rc = 0; + + for ( ; x ; x = get_freepointer(s, x)) { + if (x == search) + rc = 1; + + if (!check_valid_pointer(s, page, x)) { + printk(KERN_ERR "Invalid pointer %s %s %p %p-%p\n", + s->name, str, x, + page_address(page), + page_address(page) + (PAGE_SIZE << s->order)); + return rc; + } + } + return rc; +} + /* * Fastpath with forced inlining to produce a kfree and kmem_cache_free that * can perform fastpath freeing without additional function calls. @@ -1680,19 +1724,47 @@ static void __always_inline slab_free(st void **object = (void *)x; struct kmem_cache_cpu *c; void **freelist; + char *str; + + printk("slab_free(%s, %p, %p, %p)\n", + s->name, page, x, addr); preempt_disable(); c = get_cpu_slab(s, smp_processor_id()); + + if (!check_valid_pointer(s, page, object)) { + str = "invalid pointer"; +msg: + printk("slab_free(%s, %s, %p, %p, %p) freelist=%p objsize=%d\n", + str, s->name, page, x, addr, c->freelist, c->objsize); + WARN_ON(1); + preempt_enable(); + return; + } + if (on_fl(s, page, page->freelist, x, "page freelist")) { + str = "page freelist corrupt"; + goto msg; + } + if (on_fl(s, page, c->freelist, x, "cpu_freelist")) { + str = "cpu freelist corrupt"; + goto msg; + } + + redo: freelist = c->freelist; if (unlikely(page != c->page || !freelist)) return __slab_free(s, page, x, addr, c->offset); object[c->offset] = freelist; - if (cmpxchg_local(&c->freelist, freelist, object) != object) + if (cmpxchg_local(&c->freelist, freelist, object) != object) { + printk("slab_free(%s) redo\n", s->name); goto redo; + } preempt_enable(); + printk("slab_free end (%s, %p, %p, %p)\n", + s->name, page, x, addr); } void kmem_cache_free(struct kmem_cache *s, void *x)