From 71f4d9b15d2c108b0c6f6cad47b13c4b98be2dfb Mon Sep 17 00:00:00 2001 From: Christoph Lameter Date: Wed, 26 Mar 2008 12:46:37 -0700 Subject: [PATCH] Slub: No need for slab counters if !SLUB_DEBUG Counters are used mainly for showing data through the sysfs API. If that API is not compiled in then there is no point in keeping track of this data. Disable counters for the number of slabs and the number of total slabs if !SLUB_DEBUG. This also affects SLABINFO support. It now must depends on SLUB_DEBUG (which is on by default). Signed-off-by: Christoph Lameter --- include/linux/slub_def.h | 4 ++-- init/Kconfig | 2 +- mm/slub.c | 13 ++++++++++++- 3 files changed, 15 insertions(+), 4 deletions(-) Index: linux-2.6/include/linux/slub_def.h =================================================================== --- linux-2.6.orig/include/linux/slub_def.h 2008-03-31 16:29:09.359169711 -0700 +++ linux-2.6/include/linux/slub_def.h 2008-03-31 16:30:30.576668925 -0700 @@ -46,10 +46,10 @@ struct kmem_cache_cpu { struct kmem_cache_node { spinlock_t list_lock; /* Protect partial list and nr_partial */ unsigned long nr_partial; - atomic_long_t nr_slabs; - atomic_long_t total_objects; struct list_head partial; #ifdef CONFIG_SLUB_DEBUG + atomic_long_t nr_slabs; + atomic_long_t total_objects; struct list_head full; #endif }; Index: linux-2.6/init/Kconfig =================================================================== --- linux-2.6.orig/init/Kconfig 2008-03-31 16:27:54.826666917 -0700 +++ linux-2.6/init/Kconfig 2008-03-31 16:30:30.576668925 -0700 @@ -763,7 +763,7 @@ endmenu # General setup config SLABINFO bool depends on PROC_FS - depends on SLAB || SLUB + depends on SLAB || (SLUB && SLUB_DEBUG) default y config RT_MUTEXES Index: linux-2.6/mm/slub.c =================================================================== --- linux-2.6.orig/mm/slub.c 2008-03-31 16:30:27.049171258 -0700 +++ linux-2.6/mm/slub.c 2008-03-31 16:30:30.576668925 -0700 @@ -1137,10 +1137,12 @@ static struct page *new_slab(struct kmem goto out; n = get_node(s, page_to_nid(page)); +#ifdef CONFIG_SLUB_DEBUG if (n) { atomic_long_inc(&n->nr_slabs); atomic_long_add(page->objects, &n->total_objects); } +#endif page->slab = s; page->flags |= 1 << PG_slab; if (s->flags & (SLAB_DEBUG_FREE | SLAB_RED_ZONE | SLAB_POISON | @@ -1215,10 +1217,12 @@ static void free_slab(struct kmem_cache static void discard_slab(struct kmem_cache *s, struct page *page) { +#ifdef CONFIG_SLUB_DEBUG struct kmem_cache_node *n = get_node(s, page_to_nid(page)); atomic_long_dec(&n->nr_slabs); atomic_long_sub(page->objects, &n->total_objects); +#endif free_slab(s, page); } @@ -1933,10 +1937,11 @@ static void init_kmem_cache_cpu(struct k static void init_kmem_cache_node(struct kmem_cache_node *n) { n->nr_partial = 0; - atomic_long_set(&n->nr_slabs, 0); spin_lock_init(&n->list_lock); INIT_LIST_HEAD(&n->partial); #ifdef CONFIG_SLUB_DEBUG + atomic_long_set(&n->nr_slabs, 0); + atomic_long_set(&n->total_objects, 0); INIT_LIST_HEAD(&n->full); #endif } @@ -2105,7 +2110,9 @@ static struct kmem_cache_node *early_kme init_tracking(kmalloc_caches, n); #endif init_kmem_cache_node(n); +#ifdef CONFIG_SLUB_DEBUG atomic_long_inc(&n->nr_slabs); +#endif /* * lockdep requires consistent irq usage for each lock @@ -2412,8 +2419,10 @@ static inline int kmem_cache_close(struc struct kmem_cache_node *n = get_node(s, node); n->nr_partial -= free_list(s, n, &n->partial); +#ifdef CONFIG_SLUB_DEBUG if (atomic_long_read(&n->nr_slabs)) return 1; +#endif } free_kmem_cache_nodes(s); return 0; @@ -2863,6 +2872,7 @@ static void slab_mem_offline_callback(vo list_for_each_entry(s, &slab_caches, list) { n = get_node(s, offline_node); if (n) { +#ifdef CONFIG_SLUB_DEBUG /* * if n->nr_slabs > 0, slabs still exist on the node * that is going down. We were unable to free them, @@ -2870,6 +2880,7 @@ static void slab_mem_offline_callback(vo * callback. So, we must fail. */ BUG_ON(atomic_long_read(&n->nr_slabs)); +#endif s->node[offline_node] = NULL; kmem_cache_free(kmalloc_caches, n);