From 89825d43ba05ff1ef3f969118e0dcc2ab9b49279 Mon Sep 17 00:00:00 2001 From: Christoph Lameter Date: Fri, 15 Feb 2008 15:22:22 -0800 Subject: [PATCH] slub: Fallback to order 0 during slab page allocation If any higher order allocation fails then fall back the smallest order necessary to contain at least one object. Reviewed-by: Pekka Enberg Signed-off-by: Christoph Lameter --- include/linux/slub_def.h | 2 + mm/slub.c | 69 +++++++++++++++++++++++++++++++++++------------ 2 files changed, 54 insertions(+), 17 deletions(-) Index: linux-2.6/include/linux/slub_def.h =================================================================== --- linux-2.6.orig/include/linux/slub_def.h 2008-02-29 05:40:53.350262368 -0800 +++ linux-2.6/include/linux/slub_def.h 2008-02-29 05:40:58.314284789 -0800 @@ -29,6 +29,7 @@ enum stat_item { DEACTIVATE_TO_HEAD, /* Cpu slab was moved to the head of partials */ DEACTIVATE_TO_TAIL, /* Cpu slab was moved to the tail of partials */ DEACTIVATE_REMOTE_FREES,/* Slab contained remotely freed objects */ + ORDER_FALLBACK, /* Number of times fallback was necessary */ NR_SLUB_STAT_ITEMS }; struct kmem_cache_cpu { @@ -72,6 +73,7 @@ struct kmem_cache { /* Allocation and freeing of slabs */ int max_objects; /* Number of objects in a slab of maximum size */ + int min_objects; /* Number of object in an order 0 sized slab */ gfp_t allocflags; /* gfp flags to use on each alloc */ int refcount; /* Refcount for slab cache destroy */ void (*ctor)(struct kmem_cache *, void *); Index: linux-2.6/mm/slub.c =================================================================== --- linux-2.6.orig/mm/slub.c 2008-02-29 05:40:53.354262389 -0800 +++ linux-2.6/mm/slub.c 2008-02-29 05:52:16.661319745 -0800 @@ -294,7 +294,9 @@ static inline struct kmem_cache_cpu *get /* Determine the maximum number of objects that a slab page can hold */ static inline unsigned long slab_objects(struct kmem_cache *s, struct page *page) { - return s->max_objects; + if (PageCompound(page)) + return page[1].inuse; + return s->min_objects; } /* Verify that a pointer has an address that is valid within a slab page */ @@ -671,7 +673,7 @@ static int slab_pad_check(struct kmem_ca return 1; start = page_address(page); - end = start + (PAGE_SIZE << s->order); + end = start + (PAGE_SIZE << compound_order(page)); length = slab_objects(s, page) * s->size; remainder = end - (start + length); if (!remainder) @@ -1037,6 +1039,15 @@ static inline unsigned long kmem_cache_f } #define slub_debug 0 #endif + +static inline struct page *alloc_slab_page(gfp_t flags, int node, int order) +{ + if (node == -1) + return alloc_pages(flags, order); + else + return alloc_pages_node(node, flags, order); +} + /* * Slab allocation and freeing */ @@ -1047,14 +1058,21 @@ static struct page *allocate_slab(struct flags |= s->allocflags; - if (node == -1) - page = alloc_pages(flags, s->order); - else - page = alloc_pages_node(node, flags, s->order); - - if (!page) - return NULL; - + page = alloc_slab_page(flags | __GFP_NOWARN | __GFP_NORETRY, + node, s->order); + if (unlikely(!page)) { + /* + * Allocation may have failed due to fragmentation. + * Try a lower order alloc if possible + */ + page = alloc_slab_page(flags, node, get_order(s->size)); + if (page) { + pages = 1 << compound_order(page); + stat(get_cpu_slab(s, raw_smp_processor_id()), + ORDER_FALLBACK); + } else + return NULL; + } mod_zone_page_state(page_zone(page), (s->flags & SLAB_RECLAIM_ACCOUNT) ? NR_SLAB_RECLAIMABLE : NR_SLAB_UNRECLAIMABLE, @@ -1078,6 +1096,8 @@ static struct page *new_slab(struct kmem void *start; void *last; void *p; + unsigned long objects; + int order; BUG_ON(flags & GFP_SLAB_BUG_MASK); @@ -1086,6 +1106,15 @@ static struct page *new_slab(struct kmem if (!page) goto out; + order = compound_order(page); + if (order == s->order) + objects = s->max_objects; + else + objects = s->min_objects; + + if (order) + page[1].inuse = objects; + n = get_node(s, page_to_nid(page)); if (n) { atomic_long_inc(&n->nr_slabs); @@ -1100,10 +1129,10 @@ static struct page *new_slab(struct kmem start = page_address(page); if (unlikely(s->flags & SLAB_POISON)) - memset(start, POISON_INUSE, PAGE_SIZE << s->order); + memset(start, POISON_INUSE, PAGE_SIZE << order); last = start; - for_each_object(p, s, start, slab_objects(s, page)) { + for_each_object(p, s, start, objects) { setup_object(s, page, last); set_freepointer(s, last, p); last = p; @@ -1119,7 +1148,7 @@ out: static void __free_slab(struct kmem_cache *s, struct page *page) { - int pages = 1 << s->order; + int order = compound_order(page); if (unlikely(SlabDebug(page))) { void *p; @@ -1131,12 +1160,17 @@ static void __free_slab(struct kmem_cach ClearSlabDebug(page); } + reset_page_mapcount(page); + if (order) + /* mapcount of page + 1 was used for the object count */ + reset_page_mapcount(page + 1); + mod_zone_page_state(page_zone(page), (s->flags & SLAB_RECLAIM_ACCOUNT) ? NR_SLAB_RECLAIMABLE : NR_SLAB_UNRECLAIMABLE, - -pages); + - (1 << order)); - __free_pages(page, s->order); + __free_pages(page, order); } static void rcu_free_slab(struct rcu_head *h) @@ -1166,7 +1200,6 @@ static void discard_slab(struct kmem_cac atomic_long_dec(&n->nr_slabs); atomic_long_sub(slab_objects(s, page), &n->total_objects); - reset_page_mapcount(page); __ClearPageSlab(page); free_slab(s, page); } @@ -2255,6 +2288,7 @@ static int calculate_sizes(struct kmem_c * Determine the number of objects per slab */ s->max_objects = (PAGE_SIZE << s->order) / size; + s->min_objects = (PAGE_SIZE << get_order(size)) / size; return !!s->max_objects; @@ -4029,7 +4063,7 @@ STAT_ATTR(DEACTIVATE_EMPTY, deactivate_e STAT_ATTR(DEACTIVATE_TO_HEAD, deactivate_to_head); STAT_ATTR(DEACTIVATE_TO_TAIL, deactivate_to_tail); STAT_ATTR(DEACTIVATE_REMOTE_FREES, deactivate_remote_frees); - +STAT_ATTR(ORDER_FALLBACK, order_fallback); #endif static struct attribute *slab_attrs[] = { @@ -4081,6 +4115,7 @@ static struct attribute *slab_attrs[] = &deactivate_to_head_attr.attr, &deactivate_to_tail_attr.attr, &deactivate_remote_frees_attr.attr, + &order_fallback_attr.attr, #endif NULL };