SLUB: Reduce memory foot print This reduces memory footprint by 1. Inlining small functions. 2. For the !CONFIG_SLUB_DEBUG case: Removal of u seless fields and functions. Signed-off-by: Christoph Lameter --- include/linux/slub_def.h | 4 ++++ mm/slub.c | 29 ++++++++++++++++++----------- 2 files changed, 22 insertions(+), 11 deletions(-) Index: slub/include/linux/slub_def.h =================================================================== --- slub.orig/include/linux/slub_def.h 2007-05-23 22:34:17.000000000 -0700 +++ slub/include/linux/slub_def.h 2007-05-23 22:36:23.000000000 -0700 @@ -17,7 +17,9 @@ struct kmem_cache_node { unsigned long nr_partial; atomic_long_t nr_slabs; struct list_head partial; +#ifdef CONFIG_SLUB_DEBUG struct list_head full; +#endif }; /* @@ -45,7 +47,9 @@ struct kmem_cache { int align; /* Alignment */ const char *name; /* Name (only for display!) */ struct list_head list; /* List of slab caches */ +#ifdef CONFIG_SLUB_DEBUG struct kobject kobj; /* For sysfs */ +#endif #ifdef CONFIG_NUMA int defrag_ratio; Index: slub/mm/slub.c =================================================================== --- slub.orig/mm/slub.c 2007-05-23 22:34:17.000000000 -0700 +++ slub/mm/slub.c 2007-05-23 22:36:23.000000000 -0700 @@ -254,9 +254,9 @@ static int sysfs_slab_add(struct kmem_ca static int sysfs_slab_alias(struct kmem_cache *, const char *); static void sysfs_slab_remove(struct kmem_cache *); #else -static int sysfs_slab_add(struct kmem_cache *s) { return 0; } -static int sysfs_slab_alias(struct kmem_cache *s, const char *p) { return 0; } -static void sysfs_slab_remove(struct kmem_cache *s) {} +static inline int sysfs_slab_add(struct kmem_cache *s) { return 0; } +static inline int sysfs_slab_alias(struct kmem_cache *s, const char *p) { return 0; } +static inline void sysfs_slab_remove(struct kmem_cache *s) {} #endif /******************************************************************** @@ -1011,7 +1011,7 @@ static struct page *allocate_slab(struct return page; } -static void setup_object(struct kmem_cache *s, struct page *page, +static inline void setup_object(struct kmem_cache *s, struct page *page, void *object) { setup_object_debug(s, page, object); @@ -1346,7 +1346,7 @@ static void deactivate_slab(struct kmem_ unfreeze_slab(s, page); } -static void flush_slab(struct kmem_cache *s, struct page *page, int cpu) +static inline void flush_slab(struct kmem_cache *s, struct page *page, int cpu) { slab_lock(page); deactivate_slab(s, page, cpu); @@ -1356,7 +1356,7 @@ static void flush_slab(struct kmem_cache * Flush cpu slab. * Called from IPI handler with interrupts disabled. */ -static void __flush_cpu_slab(struct kmem_cache *s, int cpu) +static inline void __flush_cpu_slab(struct kmem_cache *s, int cpu) { struct page *page = s->cpu_slab[cpu]; @@ -1764,7 +1764,7 @@ static inline int calculate_order(int si /* * Figure out what the alignment of the objects will be. */ -static unsigned long calculate_alignment(unsigned long flags, +static inline unsigned long calculate_alignment(unsigned long flags, unsigned long align, unsigned long size) { /* @@ -1786,13 +1786,15 @@ static unsigned long calculate_alignment return ALIGN(align, sizeof(void *)); } -static void init_kmem_cache_node(struct kmem_cache_node *n) +static inline void init_kmem_cache_node(struct kmem_cache_node *n) { n->nr_partial = 0; atomic_long_set(&n->nr_slabs, 0); spin_lock_init(&n->list_lock); INIT_LIST_HEAD(&n->partial); +#ifdef CONFIG_SLUB_DEBUG INIT_LIST_HEAD(&n->full); +#endif } #ifdef CONFIG_NUMA @@ -1877,11 +1879,11 @@ static int init_kmem_cache_nodes(struct return 1; } #else -static void free_kmem_cache_nodes(struct kmem_cache *s) +static inline void free_kmem_cache_nodes(struct kmem_cache *s) { } -static int init_kmem_cache_nodes(struct kmem_cache *s, gfp_t gfpflags) +static inline int init_kmem_cache_nodes(struct kmem_cache *s, gfp_t gfpflags) { init_kmem_cache_node(&s->local_node); return 1; @@ -2278,8 +2280,9 @@ size_t ksize(const void *object) BUG_ON(!page); s = page->slab; - BUG_ON(!s); +#ifdef CONFIG_SLUB_DEBUG + BUG_ON(!s); /* * Debugging requires use of the padding between object * and whatever may come after it. @@ -2295,6 +2298,8 @@ size_t ksize(const void *object) if (s->flags & (SLAB_DESTROY_BY_RCU | SLAB_STORE_USER)) return s->inuse; +#endif + /* * Else we can use all the padding etc for the allocation */ @@ -2475,10 +2480,12 @@ void __init kmem_cache_init(void) slab_state = UP; +#ifdef CONFIG_SLUB_DEBUG /* Provide the correct kmalloc names now that the caches are up */ for (i = KMALLOC_SHIFT_LOW; i <= KMALLOC_SHIFT_HIGH; i++) kmalloc_caches[i]. name = kasprintf(GFP_KERNEL, "kmalloc-%d", 1 << i); +#endif #ifdef CONFIG_SMP register_cpu_notifier(&slab_notifier);