--- include/linux/slab.h | 4 ++-- include/linux/slub_def.h | 2 ++ mm/slub.c | 35 ++++++++++++++++++++++------------- 3 files changed, 26 insertions(+), 15 deletions(-) Index: slub/mm/slub.c =================================================================== --- slub.orig/mm/slub.c 2007-05-23 19:07:52.000000000 -0700 +++ slub/mm/slub.c 2007-05-23 19:33:34.000000000 -0700 @@ -258,9 +258,9 @@ static int sysfs_slab_add(struct kmem_ca static int sysfs_slab_alias(struct kmem_cache *, const char *); static void sysfs_slab_remove(struct kmem_cache *); #else -static int sysfs_slab_add(struct kmem_cache *s) { return 0; } -static int sysfs_slab_alias(struct kmem_cache *s, const char *p) { return 0; } -static void sysfs_slab_remove(struct kmem_cache *s) {} +static inline int sysfs_slab_add(struct kmem_cache *s) { return 0; } +static inline int sysfs_slab_alias(struct kmem_cache *s, const char *p) { return 0; } +static inline void sysfs_slab_remove(struct kmem_cache *s) {} #endif /******************************************************************** @@ -1015,7 +1015,7 @@ static struct page *allocate_slab(struct return page; } -static void setup_object(struct kmem_cache *s, struct page *page, +static inline void setup_object(struct kmem_cache *s, struct page *page, void *object) { setup_object_debug(s, page, object); @@ -1350,7 +1350,7 @@ static void deactivate_slab(struct kmem_ unfreeze_slab(s, page); } -static void flush_slab(struct kmem_cache *s, struct page *page, int cpu) +static inline void flush_slab(struct kmem_cache *s, struct page *page, int cpu) { slab_lock(page); deactivate_slab(s, page, cpu); @@ -1360,7 +1360,7 @@ static void flush_slab(struct kmem_cache * Flush cpu slab. * Called from IPI handler with interrupts disabled. */ -static void __flush_cpu_slab(struct kmem_cache *s, int cpu) +static inline void __flush_cpu_slab(struct kmem_cache *s, int cpu) { struct page *page = s->cpu_slab[cpu]; @@ -1494,7 +1494,7 @@ debug: * * Otherwise we can simply pick the next object from the lockless free list. */ -static void __always_inline *slab_alloc(struct kmem_cache *s, +static void *slab_alloc(struct kmem_cache *s, gfp_t gfpflags, int node, void *addr) { struct page *page; @@ -1599,7 +1599,7 @@ debug: * If fastpath is not possible then fall back to __slab_free where we deal * with all sorts of special processing. */ -static void __always_inline slab_free(struct kmem_cache *s, +static void slab_free(struct kmem_cache *s, struct page *page, void *x, void *addr) { void **object = (void *)x; @@ -1768,7 +1768,7 @@ static inline int calculate_order(int si /* * Figure out what the alignment of the objects will be. */ -static unsigned long calculate_alignment(unsigned long flags, +static inline unsigned long calculate_alignment(unsigned long flags, unsigned long align, unsigned long size) { /* @@ -1790,7 +1790,7 @@ static unsigned long calculate_alignment return ALIGN(align, sizeof(void *)); } -static void init_kmem_cache_node(struct kmem_cache_node *n) +static inline void init_kmem_cache_node(struct kmem_cache_node *n) { n->nr_partial = 0; atomic_long_set(&n->nr_slabs, 0); @@ -1883,11 +1883,11 @@ static int init_kmem_cache_nodes(struct return 1; } #else -static void free_kmem_cache_nodes(struct kmem_cache *s) +static inline void free_kmem_cache_nodes(struct kmem_cache *s) { } -static int init_kmem_cache_nodes(struct kmem_cache *s, gfp_t gfpflags) +static inline int init_kmem_cache_nodes(struct kmem_cache *s, gfp_t gfpflags) { init_kmem_cache_node(&s->local_node); return 1; @@ -2284,8 +2284,9 @@ size_t ksize(const void *object) BUG_ON(!page); s = page->slab; - BUG_ON(!s); +#ifdef CONFIG_SLUB_DEBUG + BUG_ON(!s); /* * Debugging requires use of the padding between object * and whatever may come after it. @@ -2301,6 +2302,8 @@ size_t ksize(const void *object) if (s->flags & (SLAB_DESTROY_BY_RCU | SLAB_STORE_USER)) return s->inuse; +#endif + /* * Else we can use all the padding etc for the allocation */ @@ -2335,6 +2338,7 @@ EXPORT_SYMBOL(kfree); */ int kmem_cache_shrink(struct kmem_cache *s) { +#ifdef CONFIG_SLUB_DEBUG int node; int i; struct kmem_cache_node *n; @@ -2398,6 +2402,9 @@ int kmem_cache_shrink(struct kmem_cache } kfree(slabs_by_inuse); +#else + flush_all(s); +#endif return 0; } EXPORT_SYMBOL(kmem_cache_shrink); @@ -2481,10 +2488,12 @@ void __init kmem_cache_init(void) slab_state = UP; +#ifdef CONFIG_SLUB_DEBUG /* Provide the correct kmalloc names now that the caches are up */ for (i = KMALLOC_SHIFT_LOW; i <= KMALLOC_SHIFT_HIGH; i++) kmalloc_caches[i]. name = kasprintf(GFP_KERNEL, "kmalloc-%d", 1 << i); +#endif #ifdef CONFIG_SMP register_cpu_notifier(&slab_notifier); Index: slub/include/linux/slab.h =================================================================== --- slub.orig/include/linux/slab.h 2007-05-23 19:14:43.000000000 -0700 +++ slub/include/linux/slab.h 2007-05-23 19:15:26.000000000 -0700 @@ -82,8 +82,8 @@ static inline void *kmem_cache_alloc_nod * to do various tricks to work around compiler limitations in order to * ensure proper constant folding. */ -#define KMALLOC_SHIFT_HIGH ((MAX_ORDER + PAGE_SHIFT) <= 25 ? \ - (MAX_ORDER + PAGE_SHIFT) : 25) +#define KMALLOC_SHIFT_HIGH ((MAX_ORDER + PAGE_SHIFT) <= 18 ? \ + (MAX_ORDER + PAGE_SHIFT) : 18) #define KMALLOC_MAX_SIZE (1UL << KMALLOC_SHIFT_HIGH) #define KMALLOC_MAX_ORDER (KMALLOC_SHIFT_HIGH - PAGE_SHIFT) Index: slub/include/linux/slub_def.h =================================================================== --- slub.orig/include/linux/slub_def.h 2007-05-23 19:14:32.000000000 -0700 +++ slub/include/linux/slub_def.h 2007-05-23 19:31:10.000000000 -0700 @@ -47,7 +47,9 @@ struct kmem_cache { int align; /* Alignment */ const char *name; /* Name (only for display!) */ struct list_head list; /* List of slab caches */ +#ifdef CONFIG_SLUB_DEBUG struct kobject kobj; /* For sysfs */ +#endif #ifdef CONFIG_NUMA int defrag_ratio;