--- include/linux/slub_def.h | 1 + mm/slub.c | 38 ++++++++++++++++++++++++++++++++++---- 2 files changed, 35 insertions(+), 4 deletions(-) Index: linux-2.6.21-rc5-mm4/mm/slub.c =================================================================== --- linux-2.6.21-rc5-mm4.orig/mm/slub.c 2007-04-07 22:16:51.000000000 -0700 +++ linux-2.6.21-rc5-mm4/mm/slub.c 2007-04-08 18:12:23.000000000 -0700 @@ -277,7 +277,7 @@ static void print_track(const char *s, s } else #endif printk(KERN_ERR "%s: 0x%p", s, t->addr); - printk(" jiffies since=%lu cpu=%u pid=%d\n", jiffies - t->when, t->cpu, t->pid); + printk(" jiffies_ago=%lu cpu=%u pid=%d\n", jiffies - t->when, t->cpu, t->pid); } static void print_trailer(struct kmem_cache *s, u8 *p) @@ -572,6 +572,28 @@ static int on_freelist(struct kmem_cache return 0; } +/* + * Tracking of fully allocated slabs for debugging + */ +static void add_full(struct kmem_cache *s, struct page *page) +{ + struct kmem_cache_node *n = get_node(s, page_to_nid(page)); + + spin_lock(&n->list_lock); + list_add(&page->lru, &n->full); + spin_unlock(&n->list_lock); +} + +static void remove_full(struct kmem_cache *s, + struct page *page) +{ + struct kmem_cache_node *n = get_node(s, page_to_nid(page)); + + spin_lock(&n->list_lock); + list_del(&page->lru); + spin_unlock(&n->list_lock); +} + static int alloc_object_checks(struct kmem_cache *s, struct page *page, void *object) { @@ -862,7 +884,7 @@ static void add_partial(struct kmem_cach spin_lock(&n->list_lock); n->nr_partial++; - list_add_tail(&page->lru, &n->partial); + list_add(&page->lru, &n->partial); spin_unlock(&n->list_lock); } @@ -990,6 +1012,10 @@ static void putback_slab(struct kmem_cac if (page->inuse) { if (page->freelist) add_partial(s, page); + else + if (PageError(page)) + /* If debugging is on track full slabs */ + add_full(s, page); slab_unlock(page); } else { slab_unlock(page); @@ -1252,7 +1278,7 @@ out_unlock: slab_empty: if (prior) /* - * Partially used slab that is on the partial list. + * Slab on the partial list. */ remove_partial(s, page); @@ -1262,8 +1288,11 @@ slab_empty: return; debug: - if (free_object_checks(s, page, x)) + if (free_object_checks(s, page, x)) { + if (!PageActive(page) && !page->freelist) + remove_full(s, page); goto checks_ok; + } goto out_unlock; } @@ -1384,6 +1413,7 @@ static void init_kmem_cache_node(struct atomic_long_set(&n->nr_slabs, 0); spin_lock_init(&n->list_lock); INIT_LIST_HEAD(&n->partial); + INIT_LIST_HEAD(&n->full); } static void free_kmem_cache_nodes(struct kmem_cache *s) Index: linux-2.6.21-rc5-mm4/include/linux/slub_def.h =================================================================== --- linux-2.6.21-rc5-mm4.orig/include/linux/slub_def.h 2007-04-07 22:16:39.000000000 -0700 +++ linux-2.6.21-rc5-mm4/include/linux/slub_def.h 2007-04-07 22:16:55.000000000 -0700 @@ -16,6 +16,7 @@ struct kmem_cache_node { unsigned long nr_partial; atomic_long_t nr_slabs; struct list_head partial; + struct list_head full; }; /*