Index: linux-2.6.21-rc6/mm/slub.c =================================================================== --- linux-2.6.21-rc6.orig/mm/slub.c 2007-04-11 14:20:14.000000000 -0700 +++ linux-2.6.21-rc6/mm/slub.c 2007-04-11 14:25:27.000000000 -0700 @@ -181,6 +181,18 @@ static inline struct kmem_cache_node *ge #endif } +#ifndef CONFIG_SLUB_DEBUG + +static inline int debug_slab(struct page *page) +{ + return 0; +} + +#else +static inline int debug_slab(struct page *page) +{ + return PageError(page); +} /* * Object debugging */ @@ -718,6 +730,7 @@ fail: dump_stack(); return 0; } +#endif /* * Slab allocation and freeing @@ -752,7 +765,7 @@ static struct page *allocate_slab(struct static void setup_object(struct kmem_cache *s, struct page *page, void *object) { - if (PageError(page)) { + if (debug_slab(page)) { init_object(s, object, 0); init_tracking(s, object); } @@ -825,7 +838,7 @@ static void __free_slab(struct kmem_cach { int pages = 1 << s->order; - if (unlikely(PageError(page) || s->dtor)) { + if (unlikely(debug_slab(page) || s->dtor)) { void *start = page_address(page); void *end = start + (pages << PAGE_SHIFT); void *p; @@ -1042,7 +1055,7 @@ static void putback_slab(struct kmem_cac if (page->inuse) { if (page->freelist) add_partial(s, page); - else if (PageError(page)) + else if (debug_slab(page)) add_full(s, page); slab_unlock(page); } else { @@ -1179,7 +1192,7 @@ redo: object = page->freelist; if (unlikely(!object)) goto another_slab; - if (unlikely(PageError(page))) + if (unlikely(debug_slab(page))) goto debug; have_object: @@ -1274,7 +1287,7 @@ static void slab_free(struct kmem_cache local_irq_save(flags); slab_lock(page); - if (unlikely(PageError(page))) + if (unlikely(debug_slab(page))) goto debug; checks_ok: prior = object[page->offset] = page->freelist; @@ -1568,6 +1581,9 @@ static int calculate_sizes(struct kmem_c unsigned long size = s->objsize; unsigned long align = s->align; +#ifndef CONFIG_SLAB_DEBUG + BUG_ON(flags & DEFAULT_DEBUG_FLAGS); +#else /* * Determine if we can poison the object itself. If the user of * the slab may touch the object after free or before allocation @@ -1577,6 +1593,7 @@ static int calculate_sizes(struct kmem_c !s->ctor && !s->dtor) flags |= __OBJECT_POISON; else +#endif flags &= ~__OBJECT_POISON; /* @@ -1615,13 +1632,14 @@ static int calculate_sizes(struct kmem_c s->offset = size; size += sizeof(void *); } - +#ifdef CONFIG_SLAB_DEBUG if (flags & SLAB_STORE_USER) /* * Need to store information about allocs and frees after * the object. */ size += 2 * sizeof(struct track); +#endif /* * Determine the alignment based on various parameters that the @@ -2419,6 +2437,7 @@ void *__kmalloc_node_track_caller(size_t #ifdef CONFIG_SYSFS +#ifdef CONFIG_SLUB_DEBUG static int validate_slab(struct kmem_cache *s, struct page *page) { void *p; @@ -2454,7 +2473,7 @@ static void validate_slab_slab(struct km printk(KERN_INFO "SLUB: %s Skipped busy slab %p\n", s->name, page); - if (!PageError(page)) + if (!debug_slab(page)) printk(KERN_ERR "SLUB: %s PageError not set on slab %p\n", s->name, page); } @@ -2667,6 +2686,7 @@ static int list_locations(struct kmem_ca n += sprintf(buf, "No data\n"); return n; } +#endif static unsigned long count_partial(struct kmem_cache_node *n) { @@ -2879,6 +2899,7 @@ static ssize_t objects_show(struct kmem_ } SLAB_ATTR_RO(objects); +#ifdef CONFIG_SLAB_DEBUG static ssize_t sanity_checks_show(struct kmem_cache *s, char *buf) { return sprintf(buf, "%d\n", !!(s->flags & SLAB_DEBUG_FREE)); @@ -2908,6 +2929,7 @@ static ssize_t trace_store(struct kmem_c return length; } SLAB_ATTR(trace); +#endif static ssize_t reclaim_account_show(struct kmem_cache *s, char *buf) { @@ -2945,6 +2967,7 @@ static ssize_t destroy_by_rcu_show(struc } SLAB_ATTR_RO(destroy_by_rcu); +#ifdef CONFIG_SLUB_DEBUG static ssize_t red_zone_show(struct kmem_cache *s, char *buf) { return sprintf(buf, "%d\n", !!(s->flags & SLAB_RED_ZONE)); @@ -3033,6 +3056,7 @@ static ssize_t free_calls_show(struct km return list_locations(s, buf, TRACK_FREE); } SLAB_ATTR_RO(free_calls); +#endif #ifdef CONFIG_NUMA static ssize_t defrag_ratio_show(struct kmem_cache *s, char *buf) @@ -3065,17 +3089,21 @@ static struct attribute * slab_attrs[] = &dtor_attr.attr, &aliases_attr.attr, &align_attr.attr, +#ifdef CONFIG_SLUB_DEBUG &sanity_checks_attr.attr, &trace_attr.attr, +#endif &hwcache_align_attr.attr, &reclaim_account_attr.attr, &destroy_by_rcu_attr.attr, +#ifdef CONFIG_SLUB_DEBUG &red_zone_attr.attr, &poison_attr.attr, &store_user_attr.attr, &validate_attr.attr, &alloc_calls_attr.attr, &free_calls_attr.attr, +#endif #ifdef CONFIG_ZONE_DMA &cache_dma_attr.attr, #endif