From 4f18b4f675c27f7e4c74c98e38062c855578372c Mon Sep 17 00:00:00 2001 From: Christoph Lameter Date: Fri, 15 Feb 2008 15:22:22 -0800 Subject: [PATCH] slub: Drop fallback to page allocator method Since there is now a in slub method of falling back to an order 0 slab we no longer need the fallback to kmalloc_large(). Reviewed-by: Pekka Enberg Signed-off-by: Christoph Lameter --- mm/slub.c | 36 ++---------------------------------- 1 file changed, 2 insertions(+), 34 deletions(-) Index: linux-2.6/mm/slub.c =================================================================== --- linux-2.6.orig/mm/slub.c 2008-02-28 18:45:26.633424434 -0800 +++ linux-2.6/mm/slub.c 2008-02-28 18:46:02.549632237 -0800 @@ -204,8 +204,6 @@ static inline void ClearSlabDebug(struct /* Internal SLUB flags */ #define __OBJECT_POISON 0x80000000 /* Poison object */ #define __SYSFS_ADD_DEFERRED 0x40000000 /* Not yet visible via sysfs */ -#define __KMALLOC_CACHE 0x20000000 /* objects freed using kfree */ -#define __PAGE_ALLOC_FALLBACK 0x10000000 /* Allow fallback to page alloc */ /* Not all arches define cache_line_size */ #ifndef cache_line_size @@ -1569,20 +1567,6 @@ new_slab: goto load_freelist; } - /* - * No memory available. - * - * If the slab uses higher order allocs but the object is - * smaller than a page size then we can fallback in emergencies - * to the page allocator via kmalloc_large. The page allocator may - * have failed to obtain a higher order page and we can try to - * allocate a single page if the object fits into a single page. - * That is only possible if certain conditions are met that are being - * checked when a slab is created. - */ - if (!(gfpflags & __GFP_NORETRY) && (s->flags & __PAGE_ALLOC_FALLBACK)) - return kmalloc_large(s->objsize, gfpflags); - return NULL; debug: object = c->page->freelist; @@ -2255,20 +2239,7 @@ static int calculate_sizes(struct kmem_c size = ALIGN(size, align); s->size = size; - if ((flags & __KMALLOC_CACHE) && - PAGE_SIZE / size < slub_min_objects) { - /* - * Kmalloc cache that would not have enough objects in - * an order 0 page. Kmalloc slabs can fallback to - * page allocator order 0 allocs so take a reasonably large - * order that will allows us a good number of objects. - */ - s->order = max(slub_max_order, PAGE_ALLOC_COSTLY_ORDER); - s->flags |= __PAGE_ALLOC_FALLBACK; - s->allocflags |= __GFP_NOWARN; - } else - s->order = calculate_order(size); - + s->order = calculate_order(size); if (s->order < 0) return 0; @@ -2478,7 +2449,7 @@ static struct kmem_cache *create_kmalloc down_write(&slub_lock); if (!kmem_cache_open(s, gfp_flags, name, size, ARCH_KMALLOC_MINALIGN, - flags | __KMALLOC_CACHE, NULL)) + flags, NULL)) goto panic; list_add(&s->list, &slab_caches); @@ -3022,9 +2993,6 @@ static int slab_unmergeable(struct kmem_ if (s->flags & SLUB_NEVER_MERGE) return 1; - if ((s->flags & __PAGE_ALLOC_FALLBACK)) - return 1; - if (s->ctor) return 1;