--- mm/slub.c | 133 +++++++++++++++++++++++++++++--------------------------------- 1 file changed, 63 insertions(+), 70 deletions(-) Index: linux-2.6.21-rc5-mm4/mm/slub.c =================================================================== --- linux-2.6.21-rc5-mm4.orig/mm/slub.c 2007-04-08 22:09:03.000000000 -0700 +++ linux-2.6.21-rc5-mm4/mm/slub.c 2007-04-08 23:11:29.000000000 -0700 @@ -246,9 +246,6 @@ static void set_track(struct kmem_cache memset(p, 0, sizeof(struct track)); } -#define set_tracking(__s, __o, __a) set_track(__s, __o, __a, \ - __builtin_return_address(0)) - static void init_tracking(struct kmem_cache *s, void *object) { if (s->flags & SLAB_STORE_USER) { @@ -1133,8 +1130,8 @@ static void flush_all(struct kmem_cache * Fastpath is not possible if we need to get a new slab or have * debugging enabled (which means all slabs are marked with PageError) */ -static __always_inline void *slab_alloc(struct kmem_cache *s, - gfp_t gfpflags, int node) +static void *slab_alloc(struct kmem_cache *s, + gfp_t gfpflags, int node, void *addr) { struct page *page; void **object; @@ -1215,20 +1212,20 @@ debug: if (!alloc_object_checks(s, page, object)) goto another_slab; if (s->flags & SLAB_STORE_USER) - set_tracking(s, object, TRACK_ALLOC); + set_track(s, object, TRACK_ALLOC, addr); goto have_object; } void *kmem_cache_alloc(struct kmem_cache *s, gfp_t gfpflags) { - return slab_alloc(s, gfpflags, -1); + return slab_alloc(s, gfpflags, -1, __builtin_return_address(0)); } EXPORT_SYMBOL(kmem_cache_alloc); #ifdef CONFIG_NUMA void *kmem_cache_alloc_node(struct kmem_cache *s, gfp_t gfpflags, int node) { - return slab_alloc(s, gfpflags, node); + return slab_alloc(s, gfpflags, node, __builtin_return_address(0)); } EXPORT_SYMBOL(kmem_cache_alloc_node); #endif @@ -1239,7 +1236,8 @@ EXPORT_SYMBOL(kmem_cache_alloc_node); * * No special cachelines need to be read */ -static void slab_free(struct kmem_cache *s, struct page *page, void *x) +static void slab_free(struct kmem_cache *s, struct page *page, + void *x, void *addr) { void *prior; void **object = (void *)x; @@ -1294,6 +1292,8 @@ debug: if (free_object_checks(s, page, x)) { if (!PageActive(page) && !page->freelist) remove_full(s, page); + if (s->flags & SLAB_STORE_USER) + set_track(s, x, TRACK_FREE, addr); goto checks_ok; } goto out_unlock; @@ -1307,9 +1307,7 @@ void kmem_cache_free(struct kmem_cache * page = compound_head(page); - if (unlikely(PageError(page) && (s->flags & SLAB_STORE_USER))) - set_tracking(s, x, TRACK_FREE); - slab_free(s, page, x); + slab_free(s, page, x, __builtin_return_address(0)); } EXPORT_SYMBOL(kmem_cache_free); @@ -1412,7 +1410,7 @@ static unsigned long calculate_alignment static void init_kmem_cache_node(struct kmem_cache_node *n) { - memset(n, 0, sizeof(struct kmem_cache_node)); + n->nr_partial = 0; atomic_long_set(&n->nr_slabs, 0); spin_lock_init(&n->list_lock); INIT_LIST_HEAD(&n->partial); @@ -1447,41 +1445,51 @@ static int init_kmem_cache_nodes(struct for_each_online_node(node) { struct kmem_cache_node *n; - if (local_node == node) + if (local_node == node) { n = &s->local_node; - else + s->node[node] = n; + init_kmem_cache_node(n); + } else if (slab_state == DOWN) { + struct page *page; + int nnode; + /* * No kmalloc_node yet so do it by hand. * We know that this is the first slab on the * node for this slabcache. There are no concurrent * accesses possible. */ - struct page *page; - BUG_ON(s->size < sizeof(struct kmem_cache_node)); page = new_slab(kmalloc_caches, gfpflags, node); /* new_slab() disables interupts */ local_irq_enable(); BUG_ON(!page); + nnode = page_to_nid(page); n = page->freelist; + BUG_ON(!n); page->freelist = get_freepointer(kmalloc_caches, n); page->inuse++; - } else + s->node[node] = n; + if (node != nnode) + printk(KERN_ERR "Wrong during alloc node %d instead of %d\n", + nnode, node); + init_kmem_cache_node(n); + atomic_long_inc(&n->nr_slabs); + add_partial(kmalloc_caches, page); + } else { n = kmem_cache_alloc_node(kmalloc_caches, gfpflags, node); - if (!n) { - free_kmem_cache_nodes(s); - return 0; - } - - s->node[node] = n; - init_kmem_cache_node(n); + if (!n) { + free_kmem_cache_nodes(s); + return 0; + } - if (slab_state == DOWN) - atomic_long_inc(&n->nr_slabs); + s->node[node] = n; + init_kmem_cache_node(n); + } } #else init_kmem_cache_node(&s->local_node); @@ -1868,7 +1876,7 @@ void *__kmalloc(size_t size, gfp_t flags struct kmem_cache *s = get_slab(size, flags); if (s) - return kmem_cache_alloc(s, flags); + return slab_alloc(s, flags, -1, __builtin_return_address(0)); return NULL; } EXPORT_SYMBOL(__kmalloc); @@ -1879,7 +1887,7 @@ void *__kmalloc_node(size_t size, gfp_t struct kmem_cache *s = get_slab(size, flags); if (s) - return kmem_cache_alloc_node(s, flags, node); + return slab_alloc(s, flags, node, __builtin_return_address(0)); return NULL; } EXPORT_SYMBOL(__kmalloc_node); @@ -1925,12 +1933,9 @@ void kfree(const void *x) return; page = compound_head(virt_to_page(x)); - s = page->slab; - if (unlikely(PageError(page) && (s->flags & SLAB_STORE_USER))) - set_tracking(s, (void *)x, TRACK_FREE); - slab_free(s, page, (void *)x); + slab_free(s, page, (void *)x, __builtin_return_address(0)); } EXPORT_SYMBOL(kfree); @@ -2130,7 +2135,7 @@ void *kmem_cache_zalloc(struct kmem_cach { void *x; - x = kmem_cache_alloc(s, flags); + x = slab_alloc(s, flags, -1, __builtin_return_address(0)); if (x) memset(x, 0, s->objsize); return x; @@ -2281,34 +2286,22 @@ __initcall(cpucache_init); void *__kmalloc_track_caller(size_t size, gfp_t gfpflags, void *caller) { struct kmem_cache *s = get_slab(size, gfpflags); - void *object; if (!s) return NULL; - object = kmem_cache_alloc(s, gfpflags); - - if (object && (s->flags & SLAB_STORE_USER)) - set_track(s, object, TRACK_ALLOC, caller); - - return object; + return slab_alloc(s, gfpflags, -1, caller); } void *__kmalloc_node_track_caller(size_t size, gfp_t gfpflags, int node, void *caller) { struct kmem_cache *s = get_slab(size, gfpflags); - void *object; if (!s) return NULL; - object = kmem_cache_alloc_node(s, gfpflags, node); - - if (object && (s->flags & SLAB_STORE_USER)) - set_track(s, object, TRACK_ALLOC, caller); - - return object; + return slab_alloc(s, gfpflags, node, caller); } #ifdef CONFIG_SYSFS @@ -2345,34 +2338,46 @@ static void validate_slab_slab(struct km validate_slab(s, page); slab_unlock(page); } else - printk(KERN_INFO "Skipped busy slab %p\n", page); + printk(KERN_INFO "SLUB: %s Skipped busy slab %p\n", + s->name, page); + + if (!PageError(page)) + printk(KERN_ERR "SLUB: %s PageError not set on slab %p\n", + s->name, page); } static int validate_slab_node(struct kmem_cache *s, struct kmem_cache_node *n) { - int count = 0; + unsigned long count = 0; struct page *page; unsigned long flags; spin_lock_irqsave(&n->list_lock, flags); + list_for_each_entry(page, &n->partial, lru) { validate_slab_slab(s, page); count++; } - printk("partial=%d\n",count); + if (count != n->nr_partial) + printk("SLUB: %s Partial slabs counted=%ld counter=%ld\n", + s->name, count, n->nr_partial); + list_for_each_entry(page, &n->full, lru) { validate_slab_slab(s, page); count++; } - printk("partial+full=%d\n",count); + if (count != atomic_long_read(&n->nr_slabs)) + printk("SLUB: %s Slabs counted=%ld counter=%ld\n", + s->name, count, atomic_long_read(&n->nr_slabs)); + spin_unlock_irqrestore(&n->list_lock, flags); return count; } -static void validate_slab_cache(struct kmem_cache *s) +static unsigned long validate_slab_cache(struct kmem_cache *s) { int node; - int count = 0; + unsigned long count = 0; flush_all(s); for_each_online_node(node) { @@ -2380,8 +2385,7 @@ static void validate_slab_cache(struct k count += validate_slab_node(s, n); } - printk(KERN_INFO "SLUB: Validated %d slabs in '%s'\n", - count, s->name); + return count; } /* @@ -2438,7 +2442,7 @@ static int add_location(struct loc_track struct location *l; void *caddr; - start = 0; + start = -1; end = t->count; for(;;) { @@ -2516,26 +2520,15 @@ static int list_locations(struct kmem_ca struct kmem_cache_node *n = get_node(s, node); unsigned long flags; struct page *page; - unsigned long slab_count = 0; if (!atomic_read(&n->nr_slabs)) continue; spin_lock_irqsave(&n->list_lock, flags); - list_for_each_entry(page, &n->partial, lru) { + list_for_each_entry(page, &n->partial, lru) process_slab(&t, s, page, alloc); - slab_count++; - } - if (slab_count != n->nr_partial) - printk(KERN_ERR "partial mismatch nr_partial=%ld actual=%ld\n", - n->nr_partial, slab_count); - list_for_each_entry(page, &n->full, lru) { + list_for_each_entry(page, &n->full, lru) process_slab(&t, s, page, alloc); - slab_count++; - } - if (slab_count != atomic_read(&n->nr_slabs)) - printk(KERN_ERR "counting mismatch counter=%ld actual=%ld\n", - atomic_read(&n->nr_slabs), slab_count); spin_unlock_irqrestore(&n->list_lock, flags); }