--- mm/slub.c | 19 +++++++++++++++++++ 1 file changed, 19 insertions(+) Index: linux-2.6.22-rc6-mm1/mm/slub.c =================================================================== --- linux-2.6.22-rc6-mm1.orig/mm/slub.c 2007-07-07 17:19:51.000000000 -0700 +++ linux-2.6.22-rc6-mm1/mm/slub.c 2007-07-07 17:45:57.000000000 -0700 @@ -1559,6 +1559,11 @@ static void __always_inline *slab_alloc( local_irq_save(flags); c = get_cpu_slab(s, smp_processor_id()); + if (c->objsize != s->objsize) + printk("slab_alloc(%s, %d) kmem objsize=%d cpu objsize=%d\n", + s->name, node, s->objsize, c->objsize); + WARN_ON(c->page && c->freelist && virt_to_head_page(c->freelist)!= c->page); + WARN_ON(c->page && page_to_nid(c->page) != c->node); if (unlikely(!c->page || !c->freelist || !node_match(c, node))) @@ -1668,6 +1673,11 @@ static void __always_inline slab_free(st local_irq_save(flags); c = get_cpu_slab(s, smp_processor_id()); + if (c->objsize != s->objsize) + printk("slab_free(%s, %p) kmem objsize=%d cpu objsize=%d\n", + s->name, x, s->objsize, c->objsize); + WARN_ON(c->page && c->freelist && virt_to_head_page(c->freelist) != c->page); + WARN_ON(c->page && page_to_nid(c->page) != c->node); if (likely(page == c->page && c->freelist)) { object[c->offset] = c->freelist; c->freelist = object; @@ -3174,12 +3184,21 @@ struct kmem_cache *kmem_cache_create(con down_write(&slub_lock); s = find_mergeable(size, align, flags, ctor, ops); if (s) { + int cpu; + s->refcount++; /* * Adjust the object sizes so that we clear * the complete object on kzalloc. */ s->objsize = max(s->objsize, (int)size); + + /* + * And then we need to update the object size in the + * per cpu structures + */ + for_each_online_cpu(cpu) + get_cpu_slab(s, cpu)->objsize = s->objsize; s->inuse = max_t(int, s->inuse, ALIGN(size, sizeof(void *))); up_write(&slub_lock);