From 394d8ee8f59b6b6130a4082963d634bfde7150f0 Mon Sep 17 00:00:00 2001 From: Christoph Lameter Date: Fri, 15 Feb 2008 23:45:24 -0800 Subject: [PATCH] slub: Remove slub_nomerge No one has used that option for a long time and AFAICT its currently utterly useless. Reviewed-by: Pekka Enberg Signed-off-by: Christoph Lameter --- mm/slub.c | 18 ++---------------- 1 file changed, 2 insertions(+), 16 deletions(-) Index: linux-2.6/mm/slub.c =================================================================== --- linux-2.6.orig/mm/slub.c 2008-02-28 17:43:32.947512714 -0800 +++ linux-2.6/mm/slub.c 2008-02-28 17:43:40.287548078 -0800 @@ -1738,12 +1738,6 @@ static int slub_max_order = DEFAULT_MAX_ static int slub_min_objects = DEFAULT_MIN_OBJECTS; /* - * Merge control. If this is set then no merging of slab caches will occur. - * (Could be removed. This was introduced to pacify the merge skeptics.) - */ -static int slub_nomerge; - -/* * Calculate the order of allocation given an slab object size. * * The order of allocation has significant impact on performance and other @@ -2423,14 +2417,6 @@ static int __init setup_slub_min_objects __setup("slub_min_objects=", setup_slub_min_objects); -static int __init setup_slub_nomerge(char *str) -{ - slub_nomerge = 1; - return 1; -} - -__setup("slub_nomerge", setup_slub_nomerge); - static struct kmem_cache *create_kmalloc_cache(struct kmem_cache *s, const char *name, int size, gfp_t gfp_flags) { @@ -2957,7 +2943,7 @@ void __init kmem_cache_init(void) */ static int slab_unmergeable(struct kmem_cache *s) { - if (slub_nomerge || (s->flags & SLUB_NEVER_MERGE)) + if (s->flags & SLUB_NEVER_MERGE) return 1; if ((s->flags & __PAGE_ALLOC_FALLBACK)) @@ -2981,7 +2967,7 @@ static struct kmem_cache *find_mergeable { struct kmem_cache *s; - if (slub_nomerge || (flags & SLUB_NEVER_MERGE)) + if (flags & SLUB_NEVER_MERGE) return NULL; if (ctor)