--- include/linux/slub_def.h | 6 +++- mm/slub.c | 59 ++++++++++++++++++++++++++++++++--------------- 2 files changed, 46 insertions(+), 19 deletions(-) Index: slub/mm/slub.c =================================================================== --- slub.orig/mm/slub.c 2007-06-03 23:00:55.000000000 -0700 +++ slub/mm/slub.c 2007-06-03 23:11:39.000000000 -0700 @@ -183,7 +183,11 @@ static inline void ClearSlabDebug(struct * Mininum number of partial slabs. These will be left on the partial * lists even if they are empty. kmem_cache_shrink may reclaim them. */ +#ifdef CONFIG_SLUB_DEBUG #define MIN_PARTIAL 2 +#else +#define MIN_PARTIAL 0 +#endif /* * Maximum number of desirable partial slabs. @@ -254,9 +258,9 @@ static int sysfs_slab_add(struct kmem_ca static int sysfs_slab_alias(struct kmem_cache *, const char *); static void sysfs_slab_remove(struct kmem_cache *); #else -static int sysfs_slab_add(struct kmem_cache *s) { return 0; } -static int sysfs_slab_alias(struct kmem_cache *s, const char *p) { return 0; } -static void sysfs_slab_remove(struct kmem_cache *s) {} +static inline int sysfs_slab_add(struct kmem_cache *s) { return 0; } +static inline int sysfs_slab_alias(struct kmem_cache *s, const char *p) { return 0; } +static inline void sysfs_slab_remove(struct kmem_cache *s) {} #endif /******************************************************************** @@ -1065,7 +1069,7 @@ static struct page *allocate_slab(struct return page; } -static void setup_object(struct kmem_cache *s, struct page *page, +static inline void setup_object(struct kmem_cache *s, struct page *page, void *object) { setup_object_debug(s, page, object); @@ -1400,7 +1404,7 @@ static void deactivate_slab(struct kmem_ unfreeze_slab(s, page); } -static void flush_slab(struct kmem_cache *s, struct page *page, int cpu) +static inline void flush_slab(struct kmem_cache *s, struct page *page, int cpu) { slab_lock(page); deactivate_slab(s, page, cpu); @@ -1410,7 +1414,7 @@ static void flush_slab(struct kmem_cache * Flush cpu slab. * Called from IPI handler with interrupts disabled. */ -static void __flush_cpu_slab(struct kmem_cache *s, int cpu) +static inline void __flush_cpu_slab(struct kmem_cache *s, int cpu) { struct page *page = s->cpu_slab[cpu]; @@ -1818,7 +1822,7 @@ static inline int calculate_order(int si /* * Figure out what the alignment of the objects will be. */ -static unsigned long calculate_alignment(unsigned long flags, +static inline unsigned long calculate_alignment(unsigned long flags, unsigned long align, unsigned long size) { /* @@ -1840,13 +1844,15 @@ static unsigned long calculate_alignment return ALIGN(align, sizeof(void *)); } -static void init_kmem_cache_node(struct kmem_cache_node *n) +static inline void init_kmem_cache_node(struct kmem_cache_node *n) { n->nr_partial = 0; atomic_long_set(&n->nr_slabs, 0); spin_lock_init(&n->list_lock); INIT_LIST_HEAD(&n->partial); +#ifdef CONFIG_SLUB_DEBUG INIT_LIST_HEAD(&n->full); +#endif } #ifdef CONFIG_NUMA @@ -1934,11 +1940,11 @@ static int init_kmem_cache_nodes(struct return 1; } #else -static void free_kmem_cache_nodes(struct kmem_cache *s) +static inline void free_kmem_cache_nodes(struct kmem_cache *s) { } -static int init_kmem_cache_nodes(struct kmem_cache *s, gfp_t gfpflags) +static inline int init_kmem_cache_nodes(struct kmem_cache *s, gfp_t gfpflags) { init_kmem_cache_node(&s->local_node); return 1; @@ -2062,7 +2068,9 @@ static int kmem_cache_open(struct kmem_c size_t align, unsigned long flags, void (*ctor)(void *, struct kmem_cache *, unsigned long)) { +#ifdef CONFIG_SLUB_DEBUG s->name = name; +#endif s->ctor = ctor; s->objsize = size; s->flags = flags; @@ -2082,7 +2090,7 @@ error: if (flags & SLAB_PANIC) panic("Cannot create slab %s size=%lu realsize=%u " "order=%u offset=%u flags=%lx\n", - s->name, (unsigned long)size, s->size, s->order, + name, (unsigned long)size, s->size, s->order, s->offset, flags); return 0; } @@ -2124,7 +2132,11 @@ EXPORT_SYMBOL(kmem_cache_size); const char *kmem_cache_name(struct kmem_cache *s) { +#ifdef CONFIG_SLUB_DEBUG return s->name; +#else + return NULL; +#endif } EXPORT_SYMBOL(kmem_cache_name); @@ -2277,12 +2289,10 @@ static struct kmem_cache *get_slab(size_ if (likely(s)) return s; - - if (index >= KMALLOC_P1_5_CACHES) { + if (index >= KMALLOC_P1_5_CACHES) realsize = 1 << (index - KMALLOC_P1_5_CACHES + KMALLOC_P2_SHIFT + 1); - printk(KERN_ERR "index =%d realsize =%ld\n", index, realsize); - } else + else realsize = p1_5(index); /* @@ -2302,6 +2312,7 @@ void *__kmalloc(size_t size, gfp_t flags if (s) return slab_alloc(s, flags, -1, __builtin_return_address(0)); + return ZERO_SIZE_PTR; } EXPORT_SYMBOL(__kmalloc); @@ -2313,6 +2324,7 @@ void *__kmalloc_node(size_t size, gfp_t if (s) return slab_alloc(s, flags, node, __builtin_return_address(0)); + return ZERO_SIZE_PTR; } EXPORT_SYMBOL(__kmalloc_node); @@ -2325,8 +2337,9 @@ size_t ksize(const void *object) BUG_ON(!page); s = page->slab; - BUG_ON(!s); +#ifdef CONFIG_SLUB_DEBUG + BUG_ON(!s); /* * Debugging requires use of the padding between object * and whatever may come after it. @@ -2334,6 +2347,7 @@ size_t ksize(const void *object) if (s->flags & (SLAB_RED_ZONE | SLAB_POISON)) return s->objsize; +#endif /* * If we have the need to store the freelist pointer * back there or track user information then we can @@ -2342,6 +2356,7 @@ size_t ksize(const void *object) if (s->flags & (SLAB_DESTROY_BY_RCU | SLAB_STORE_USER)) return s->inuse; + /* * Else we can use all the padding etc for the allocation */ @@ -2382,6 +2397,7 @@ EXPORT_SYMBOL(kfree); */ int kmem_cache_shrink(struct kmem_cache *s) { +#ifdef CONFIG_SLUB_DEBUG int node; int i; struct kmem_cache_node *n; @@ -2445,6 +2461,9 @@ int kmem_cache_shrink(struct kmem_cache } kfree(slabs_by_inuse); +#else + flush_all(s); +#endif return 0; } EXPORT_SYMBOL(kmem_cache_shrink); @@ -3732,17 +3751,20 @@ static int sysfs_slab_alias(struct kmem_ return 0; } +#endif static int __init slab_sysfs_init(void) { struct list_head *h; int err; +#ifdef CONFIG_SLUB_DEBUG err = subsystem_register(&slab_subsys); if (err) { printk(KERN_ERR "Cannot register slab subsystem.\n"); return -ENOSYS; } +#endif slab_state = SYSFS; list_for_each(h, &slab_caches) { @@ -3751,8 +3773,10 @@ static int __init slab_sysfs_init(void) err = sysfs_slab_add(s); BUG_ON(err); + kmem_cache_shrink(s); } +#ifdef CONFIG_SLUB_DEBUG while (alias_list) { struct saved_alias *al = alias_list; @@ -3763,8 +3787,7 @@ static int __init slab_sysfs_init(void) } resiliency_test(); +#endif return 0; } - __initcall(slab_sysfs_init); -#endif Index: slub/include/linux/slub_def.h =================================================================== --- slub.orig/include/linux/slub_def.h 2007-06-03 22:43:07.000000000 -0700 +++ slub/include/linux/slub_def.h 2007-06-03 23:10:47.000000000 -0700 @@ -17,7 +17,9 @@ struct kmem_cache_node { unsigned long nr_partial; atomic_long_t nr_slabs; struct list_head partial; +#ifdef CONFIG_SLUB_DEBUG struct list_head full; +#endif }; /* @@ -43,9 +45,11 @@ struct kmem_cache { void (*ctor)(void *, struct kmem_cache *, unsigned long); int inuse; /* Offset to metadata */ int align; /* Alignment */ - const char *name; /* Name (only for display!) */ struct list_head list; /* List of slab caches */ +#ifdef CONFIG_SLUB_DEBUG + const char *name; /* Name (only for display!) */ struct kobject kobj; /* For sysfs */ +#endif #ifdef CONFIG_NUMA int defrag_ratio;