Index: linux-2.6.21-rc5-mm4/mm/slub.c =================================================================== --- linux-2.6.21-rc5-mm4.orig/mm/slub.c 2007-04-06 11:29:21.000000000 -0700 +++ linux-2.6.21-rc5-mm4/mm/slub.c 2007-04-06 11:32:48.000000000 -0700 @@ -2390,7 +2390,7 @@ static int validate_slab(struct kmem_cac return 1; } -static int validate_slab_slab(struct kmem_cache *s, struct page *page) +static void validate_slab_slab(struct kmem_cache *s, struct page *page) { if (slab_trylock(page)) { validate_slab(s, page); @@ -2411,7 +2411,7 @@ static int validate_slab_node(struct kme count++; } list_for_each_entry(page, &n->full, lru) { - validate_slab_slab(s, page);{ + validate_slab_slab(s, page); count++; } spin_unlock_irqrestore(&n->list_lock, flags); @@ -2434,6 +2434,132 @@ static void validate_slab_cache(struct k count, s->name); } +/* + * Generate lists of locations where slabcache objects are allocated + * and freed. + */ + +struct location { + unsigned long count; + void *addr; +}; + +struct loc_track { + unsigned long max; + unsigned long count; + struct location *loc; +}; + +static int alloc_loc_track(struct loc_track *l, unsigned long max) +{ + struct location *t; + + t = (void *)__get_free_pages(GFP_KERNEL| __GFP_COMP, + get_order(sizeof(struct location)* max)); + + if (!t) + return 0; + + l->max = max; + memcpy(t, l->loc, sizeof(struct location) * l->count); + + if (l->count) + free_page((unsigned long)l->loc); + l->loc = t; + return 1; +} + +static int add_location(struct loc_track *t, struct kmem_cache *s, + void *addr) +{ + unsigned long start, end; + struct location *l; + + start = 0; + end = t->count; + + for(;;) { + unsigned long pos = start + (end - start / 2); + void *caddr = t->loc[pos].addr; + + if (pos == start) + break; + + if (addr == caddr) { + /* Found. Increment number */ + t->loc[pos].count++; + return 1; + } + if (addr < caddr) + end = pos; + else + start = pos; + } + + /* + * Not found. Insert new tracking element + */ + if (t->count >= t->max && !alloc_loc_track(t, 2* t->max)) + return 0; + + l = t->loc + end; + memmove(l + 1, l, ((t->loc + t->count) - l) * sizeof(struct location)); + l->count = 1; + l->addr = addr; + return 1; +} + +static void process_slab(struct loc_track *t, struct kmem_cache *s, + struct page *page, int alloc) +{ + void *addr = page_address(page); + unsigned long map[BITS_TO_LONGS(s->objects)]; + void *p; + + bitmap_zero(map, s->objects); + for(p = page->freelist; p; p = get_freepointer(s, p)) + set_bit((p - addr) / s->size, map); + + for(p = addr; p < addr + s->objects * s->size; p += s->size) + if (!test_bit((p - addr) / s->size, map)) + add_location(t, s, get_track(s, p, alloc)); +} + +static int list_locations(struct kmem_cache *s, char *buf, int alloc) +{ + int n = 0; + unsigned long i; + struct loc_track t; + int node; + + t.count = 0; + t.max =0; + + /* Push back cpu slabs */ + flush_all(s); + + for_each_online_node(node) { + struct kmem_cache_node *n = get_node(s, node); + unsigned long flags; + struct page *page; + + spin_lock_irqsave(&n->list_lock, flags); + list_for_each_entry(page, &n->partial, lru) + process_slab(&t, s, page, alloc); + list_for_each_entry(page, &n->full, lru) + process_slab(&t, s, page, alloc); + spin_unlock_irqrestore(&n->list_lock, flags); + } + + for (i = 0; i < t.count; i++) { + n += sprintf(buf + n, "%ld ", t.loc[i].count); + n += sprint_symbol(buf + n, (unsigned long)t.loc[i].addr); + n += sprintf(buf + n, "\n"); + } + free_page((unsigned long)t.loc); + return n; +} + static unsigned long count_partial(struct kmem_cache_node *n) { unsigned long flags; @@ -2778,6 +2904,18 @@ static ssize_t validate_store(struct kme } SLAB_ATTR(validate); +static ssize_t list_alloc_locations_show(struct kmem_cache *s, char *buf) +{ + return list_locations(s, buf, 1); +} +SLAB_ATTR_RO(list_alloc_locations); + +static ssize_t list_free_locations_show(struct kmem_cache *s, char *buf) +{ + return list_locations(s, buf, 0); +} +SLAB_ATTR_RO(list_free_locations); + #ifdef CONFIG_NUMA static ssize_t defrag_ratio_show(struct kmem_cache *s, char *buf) { @@ -2818,6 +2956,8 @@ static struct attribute * slab_attrs[] = &poison_attr.attr, &store_user_attr.attr, &validate_attr.attr, + &list_alloc_locations_attr.attr, + &list_free_locations_attr.attr, #ifdef CONFIG_ZONE_DMA &cache_dma_attr.attr, #endif