From 4b1fe45943c0b828955a423bfab243dd04eaae5a Mon Sep 17 00:00:00 2001 From: Christoph Lameter Date: Fri, 15 Feb 2008 15:22:22 -0800 Subject: [PATCH] slub: Fallback to minimal order during slab page allocation If any higher order allocation fails then fall back the smallest order necessary to contain at least one object. Add a new field min_objects that will contain the objects for the smallest possible order of an allocation. Reviewed-by: Pekka Enberg Signed-off-by: Christoph Lameter --- include/linux/slub_def.h | 2 + mm/slub.c | 49 +++++++++++++++++++++++++++++++++-------------- 2 files changed, 37 insertions(+), 14 deletions(-) Index: linux-2.6/include/linux/slub_def.h =================================================================== --- linux-2.6.orig/include/linux/slub_def.h 2008-03-16 23:32:26.607919977 -0700 +++ linux-2.6/include/linux/slub_def.h 2008-03-16 23:44:11.087862397 -0700 @@ -29,6 +29,7 @@ enum stat_item { DEACTIVATE_TO_HEAD, /* Cpu slab was moved to the head of partials */ DEACTIVATE_TO_TAIL, /* Cpu slab was moved to the tail of partials */ DEACTIVATE_REMOTE_FREES,/* Slab contained remotely freed objects */ + ORDER_FALLBACK, /* Number of times fallback was necessary */ NR_SLUB_STAT_ITEMS }; struct kmem_cache_cpu { @@ -73,6 +74,7 @@ struct kmem_cache { /* Allocation and freeing of slabs */ int max_objects; /* Number of objects in a slab of maximum size */ int objects; /* Number of objects in a slab of current size */ + int min_objects; /* Number of objects in a slab of mininum size */ gfp_t allocflags; /* gfp flags to use on each alloc */ int refcount; /* Refcount for slab cache destroy */ void (*ctor)(struct kmem_cache *, void *); Index: linux-2.6/mm/slub.c =================================================================== --- linux-2.6.orig/mm/slub.c 2008-03-16 23:40:38.486585292 -0700 +++ linux-2.6/mm/slub.c 2008-03-16 23:44:11.087862397 -0700 @@ -664,7 +664,7 @@ static int slab_pad_check(struct kmem_ca return 1; start = page_address(page); - end = start + (PAGE_SIZE << s->order); + end = start + (PAGE_SIZE << compound_order(page)); length = page->objects * s->size; remainder = end - (start + length); if (!remainder) @@ -1040,6 +1040,15 @@ static inline unsigned long kmem_cache_f } #define slub_debug 0 #endif + +static inline struct page *alloc_slab_page(gfp_t flags, int node, int order) +{ + if (node == -1) + return alloc_pages(flags, order); + else + return alloc_pages_node(node, flags, order); +} + /* * Slab allocation and freeing */ @@ -1050,15 +1059,24 @@ static struct page *allocate_slab(struct flags |= s->allocflags; - if (node == -1) - page = alloc_pages(flags, s->order); - else - page = alloc_pages_node(node, flags, s->order); - - if (!page) - return NULL; + page = alloc_slab_page(flags | __GFP_NOWARN | __GFP_NORETRY, + node, s->order); + if (unlikely(!page)) { + /* + * Allocation may have failed due to fragmentation. + * Try a lower order alloc if possible + */ + page = alloc_slab_page(flags, node, get_order(s->size)); + if (page) { + pages = 1 << compound_order(page); + stat(get_cpu_slab(s, raw_smp_processor_id()), + ORDER_FALLBACK); + page->objects = s->min_objects; + } else + return NULL; + } else + page->objects = s->objects; - page->objects = s->objects; mod_zone_page_state(page_zone(page), (s->flags & SLAB_RECLAIM_ACCOUNT) ? NR_SLAB_RECLAIMABLE : NR_SLAB_UNRECLAIMABLE, @@ -1104,7 +1122,8 @@ static struct page *new_slab(struct kmem start = page_address(page); if (unlikely(s->flags & SLAB_POISON)) - memset(start, POISON_INUSE, PAGE_SIZE << s->order); + memset(start, POISON_INUSE, + PAGE_SIZE << compound_order(page)); last = start; for_each_object(p, s, start, page->objects) { @@ -1123,7 +1142,7 @@ out: static void __free_slab(struct kmem_cache *s, struct page *page) { - int pages = 1 << s->order; + int order = compound_order(page); if (unlikely(SlabDebug(page))) { void *p; @@ -1138,9 +1157,9 @@ static void __free_slab(struct kmem_cach mod_zone_page_state(page_zone(page), (s->flags & SLAB_RECLAIM_ACCOUNT) ? NR_SLAB_RECLAIMABLE : NR_SLAB_UNRECLAIMABLE, - -pages); + - (1 << order)); - __free_pages(page, s->order); + __free_pages(page, order); } static void rcu_free_slab(struct rcu_head *h) @@ -2272,6 +2291,7 @@ static int calculate_sizes(struct kmem_c * Determine the number of objects per slab */ s->objects = (PAGE_SIZE << s->order) / size; + s->min_objects = (PAGE_SIZE << get_order(size)) / size; if (s->objects > 65535) { s->objects = 65535; printk(KERN_WARNING "slab %s: >65535 objects. " @@ -4060,7 +4080,7 @@ STAT_ATTR(DEACTIVATE_EMPTY, deactivate_e STAT_ATTR(DEACTIVATE_TO_HEAD, deactivate_to_head); STAT_ATTR(DEACTIVATE_TO_TAIL, deactivate_to_tail); STAT_ATTR(DEACTIVATE_REMOTE_FREES, deactivate_remote_frees); - +STAT_ATTR(ORDER_FALLBACK, order_fallback); #endif static struct attribute *slab_attrs[] = { @@ -4113,6 +4133,7 @@ static struct attribute *slab_attrs[] = &deactivate_to_head_attr.attr, &deactivate_to_tail_attr.attr, &deactivate_remote_frees_attr.attr, + &order_fallback_attr.attr, #endif NULL };