--- include/linux/slub_def.h | 19 +++++++++++- mm/slub.c | 73 +++++++++++++++++++++++++++++++++++++++++------ 2 files changed, 82 insertions(+), 10 deletions(-) Index: slub/include/linux/slub_def.h =================================================================== --- slub.orig/include/linux/slub_def.h 2007-05-09 15:16:51.000000000 -0700 +++ slub/include/linux/slub_def.h 2007-05-09 15:58:56.000000000 -0700 @@ -11,12 +11,29 @@ #include #include +/* + * Debugging information that only exists if a SLAB_STORE_USER is enabled + * on a slab. + */ +struct kmem_cache_debug { + struct list_head full; + unsigned long freed_objects; /* Objects that were freed so far */ + unsigned long min_life; /* Shortest object lifetime */ + unsigned long max_life; /* Longest object lifetime */ + unsigned long long sum_life; + unsigned long allocs; + unsigned long frees; + unsigned long alloc_list; /* Allocs requiring partial lists */ + unsigned long free_list; /* Frees requiring partial lists */ +}; + + struct kmem_cache_node { spinlock_t list_lock; /* Protect partial list and nr_partial */ unsigned long nr_partial; atomic_long_t nr_slabs; struct list_head partial; - struct list_head full; + struct kmem_cache_debug *debug; }; /* Index: slub/mm/slub.c =================================================================== --- slub.orig/mm/slub.c 2007-05-09 15:16:51.000000000 -0700 +++ slub/mm/slub.c 2007-05-09 16:01:57.000000000 -0700 @@ -731,8 +731,11 @@ static int on_freelist(struct kmem_cache */ static void add_full(struct kmem_cache_node *n, struct page *page) { + /* Can occur during bootstrap */ + if (!n->debug) + return; spin_lock(&n->list_lock); - list_add(&page->lru, &n->full); + list_add(&page->lru, &n->debug->full); spin_unlock(&n->list_lock); } @@ -915,6 +918,21 @@ static void kmem_cache_open_debug_check( strlen(slub_debug_slabs)) == 0)) s->flags |= slub_debug; } + +static int alloc_kmem_cache_debug(struct kmem_cache *s, gfp_t gfpflags) +{ + int node; + + for_each_online_node(node) { + struct kmem_cache_node *n = get_node(s, node); + + n->debug = + kzalloc(sizeof(struct kmem_cache_debug), gfpflags); + if (!n->debug) + return -ENOMEM; + } + return 0; +} #else static inline int alloc_object_checks(struct kmem_cache *s, @@ -938,7 +956,11 @@ static inline void set_track(struct kmem enum track_item alloc, void *addr) {} static inline void kmem_cache_open_debug_check(struct kmem_cache *s) {} #define slub_debug 0 + +static inline int alloc_kmem_cache_debug(struct kmem_cache *s, + gfp_t gfpflags) { return 0; } #endif + /* * Slab allocation and freeing */ @@ -1556,8 +1578,19 @@ debug: goto out_unlock; if (!PageActive(page) && !page->freelist) remove_full(s, page); - if (s->flags & SLAB_STORE_USER) + if (s->flags & SLAB_STORE_USER) { + struct kmem_cache_debug *d = + get_node(s, page_to_nid(page))->debug; + unsigned long lifetime = jiffies - + get_track(s, x, TRACK_ALLOC)->when; + set_track(s, x, TRACK_FREE, addr); + d->sum_lifetime += lifetime; + if (lifetime < d->min_lifetime) + d->min_lifetime = lifetime; + if (lifetime > d->max_lifetime) + d->max_lifetime = lifetime; + } trace(s, page, object, 0); init_object(s, object, 0); goto checks_ok; @@ -1771,7 +1804,8 @@ static void init_kmem_cache_node(struct atomic_long_set(&n->nr_slabs, 0); spin_lock_init(&n->list_lock); INIT_LIST_HEAD(&n->partial); - INIT_LIST_HEAD(&n->full); + n->debug = NULL; +// INIT_LIST_HEAD(&n->full); } #ifdef CONFIG_NUMA @@ -2002,8 +2036,16 @@ static int kmem_cache_open(struct kmem_c s->defrag_ratio = 100; #endif - if (init_kmem_cache_nodes(s, gfpflags & ~SLUB_DMA)) - return 1; + if (!init_kmem_cache_nodes(s, gfpflags & ~SLUB_DMA)) + goto error; + + if (slab_state >= UP && s->flags & SLAB_STORE_USER) { + if (alloc_kmem_cache_debug(s, gfpflags & ~SLUB_DMA)) { + kmem_cache_close(s); + goto error; + } + } + return 1; error: if (flags & SLAB_PANIC) panic("Cannot create slab %s size=%lu realsize=%u " @@ -2074,6 +2116,8 @@ static int free_list(struct kmem_cache * } else slabs_inuse++; spin_unlock_irqrestore(&n->list_lock, flags); + kfree(n->debug); + n->debug = NULL; return slabs_inuse; } @@ -2458,9 +2502,20 @@ void __init kmem_cache_init(void) slab_state = UP; /* Provide the correct kmalloc names now that the caches are up */ - for (i = KMALLOC_SHIFT_LOW; i <= KMALLOC_SHIFT_HIGH; i++) - kmalloc_caches[i]. name = + for (i = KMALLOC_SHIFT_LOW; i <= KMALLOC_SHIFT_HIGH; i++) { + struct kmem_cache *s = kmalloc_caches + i; + + if (s->flags & SLAB_STORE_USER) + BUG_ON(alloc_kmem_cache_debug(s, GFP_KERNEL)); + s->name = kasprintf(GFP_KERNEL, "kmalloc-%d", 1 << i); + } + for (i = 1; i <= 2; i++) { + struct kmem_cache *s = kmalloc_caches + i; + + if (s->flags & SLAB_STORE_USER) + BUG_ON(alloc_kmem_cache_debug(s, GFP_KERNEL)); + } #ifdef CONFIG_SMP register_cpu_notifier(&slab_notifier); @@ -2719,7 +2774,7 @@ static int validate_slab_node(struct kme if (!(s->flags & SLAB_STORE_USER)) goto out; - list_for_each_entry(page, &n->full, lru) { + list_for_each_entry(page, &n->debug->full, lru) { validate_slab_slab(s, page); count++; } @@ -2972,7 +3027,7 @@ static int list_locations(struct kmem_ca spin_lock_irqsave(&n->list_lock, flags); list_for_each_entry(page, &n->partial, lru) process_slab(&t, s, page, alloc); - list_for_each_entry(page, &n->full, lru) + list_for_each_entry(page, &n->debug->full, lru) process_slab(&t, s, page, alloc); spin_unlock_irqrestore(&n->list_lock, flags); }