From 4b1fe45943c0b828955a423bfab243dd04eaae5a Mon Sep 17 00:00:00 2001 From: Christoph Lameter Date: Fri, 15 Feb 2008 15:22:22 -0800 Subject: [PATCH] slub: Fallback to minimal order during slab page allocation If any higher order allocation fails then fall back the smallest order necessary to contain at least one object. Reviewed-by: Pekka Enberg Signed-off-by: Christoph Lameter --- include/linux/slub_def.h | 4 +- mm/slub.c | 71 +++++++++++++++++++++++++++++++++++++---------- 2 files changed, 60 insertions(+), 15 deletions(-) Index: linux-2.6/include/linux/slub_def.h =================================================================== --- linux-2.6.orig/include/linux/slub_def.h 2008-03-14 01:25:33.000000000 -0700 +++ linux-2.6/include/linux/slub_def.h 2008-03-14 01:28:12.000000000 -0700 @@ -29,6 +29,7 @@ enum stat_item { DEACTIVATE_TO_HEAD, /* Cpu slab was moved to the head of partials */ DEACTIVATE_TO_TAIL, /* Cpu slab was moved to the tail of partials */ DEACTIVATE_REMOTE_FREES,/* Slab contained remotely freed objects */ + ORDER_FALLBACK, /* Number of times fallback was necessary */ NR_SLUB_STAT_ITEMS }; struct kmem_cache_cpu { @@ -71,7 +72,8 @@ struct kmem_cache { struct kmem_cache_node local_node; /* Allocation and freeing of slabs */ - int max_objects; /* Number of objects in a slab of maximum size */ + int max_objects; /* Objects in a slab of maximum size */ + int min_objects; /* Objects the smallest possible slab */ gfp_t allocflags; /* gfp flags to use on each alloc */ int refcount; /* Refcount for slab cache destroy */ void (*ctor)(struct kmem_cache *, void *); Index: linux-2.6/mm/slub.c =================================================================== --- linux-2.6.orig/mm/slub.c 2008-03-14 01:25:33.000000000 -0700 +++ linux-2.6/mm/slub.c 2008-03-14 01:45:22.000000000 -0700 @@ -98,9 +98,13 @@ * PageError Slab requires special handling due to debug * options set. This moves slab handling out of * the fast path and disables lockless freelists. + * + * PageSwapCache Allocation of a slab of the ideal size was not + * possible. The slab has the minimum size possible. */ #define FROZEN (1 << PG_active) +#define SLABSMALL (1 << PG_swapcache) #ifdef CONFIG_SLUB_DEBUG #define SLABDEBUG (1 << PG_error) @@ -108,6 +112,7 @@ #define SLABDEBUG 0 #endif + static inline int SlabFrozen(struct page *page) { return page->flags & FROZEN; @@ -138,6 +143,21 @@ static inline void ClearSlabDebug(struct page->flags &= ~SLABDEBUG; } +static inline int SlabSmall(struct page *page) +{ + return page->flags & SLABSMALL; +} + +static inline void SetSlabSmall(struct page *page) +{ + page->flags |= SLABSMALL; +} + +static inline void ClearSlabSmall(struct page *page) +{ + page->flags &= ~SLABSMALL; +} + /* * Issues still to be resolved: * @@ -294,6 +314,8 @@ static inline struct kmem_cache_cpu *get /* Determine the maximum number of objects that a slab page can hold */ static inline unsigned long slab_objects(struct kmem_cache *s, struct page *page) { + if (unlikely(SlabSmall(page))) + return s->min_objects; return s->max_objects; } @@ -671,7 +693,7 @@ static int slab_pad_check(struct kmem_ca return 1; start = page_address(page); - end = start + (PAGE_SIZE << s->order); + end = start + (PAGE_SIZE << compound_order(page)); length = slab_objects(s, page) * s->size; remainder = end - (start + length); if (!remainder) @@ -1037,6 +1059,15 @@ static inline unsigned long kmem_cache_f } #define slub_debug 0 #endif + +static inline struct page *alloc_slab_page(gfp_t flags, int node, int order) +{ + if (node == -1) + return alloc_pages(flags, order); + else + return alloc_pages_node(node, flags, order); +} + /* * Slab allocation and freeing */ @@ -1047,14 +1078,22 @@ static struct page *allocate_slab(struct flags |= s->allocflags; - if (node == -1) - page = alloc_pages(flags, s->order); - else - page = alloc_pages_node(node, flags, s->order); - - if (!page) - return NULL; - + page = alloc_slab_page(flags | __GFP_NOWARN | __GFP_NORETRY, + node, s->order); + if (unlikely(!page)) { + /* + * Allocation may have failed due to fragmentation. + * Try a lower order alloc if possible + */ + page = alloc_slab_page(flags, node, get_order(s->size)); + if (page) { + pages = 1 << compound_order(page); + SetSlabSmall(page); + stat(get_cpu_slab(s, raw_smp_processor_id()), + ORDER_FALLBACK); + } else + return NULL; + } mod_zone_page_state(page_zone(page), (s->flags & SLAB_RECLAIM_ACCOUNT) ? NR_SLAB_RECLAIMABLE : NR_SLAB_UNRECLAIMABLE, @@ -1100,7 +1139,8 @@ static struct page *new_slab(struct kmem start = page_address(page); if (unlikely(s->flags & SLAB_POISON)) - memset(start, POISON_INUSE, PAGE_SIZE << s->order); + memset(start, POISON_INUSE, + PAGE_SIZE << compound_order(page)); last = start; for_each_object(p, s, start, slab_objects(s, page)) { @@ -1119,7 +1159,7 @@ out: static void __free_slab(struct kmem_cache *s, struct page *page) { - int pages = 1 << s->order; + int order = compound_order(page); if (unlikely(SlabDebug(page))) { void *p; @@ -1134,9 +1174,9 @@ static void __free_slab(struct kmem_cach mod_zone_page_state(page_zone(page), (s->flags & SLAB_RECLAIM_ACCOUNT) ? NR_SLAB_RECLAIMABLE : NR_SLAB_UNRECLAIMABLE, - -pages); + - (1 << order)); - __free_pages(page, s->order); + __free_pages(page, order); } static void rcu_free_slab(struct rcu_head *h) @@ -1167,6 +1207,7 @@ static void discard_slab(struct kmem_cac atomic_long_dec(&n->nr_slabs); atomic_long_sub(slab_objects(s, page), &n->total_objects); reset_page_mapcount(page); + ClearSlabSmall(page); __ClearPageSlab(page); free_slab(s, page); } @@ -2268,6 +2309,7 @@ static int calculate_sizes(struct kmem_c * Determine the number of objects per slab */ s->max_objects = (PAGE_SIZE << s->order) / size; + s->min_objects = (PAGE_SIZE << get_order(size)) / size; return !!s->max_objects; @@ -4052,7 +4094,7 @@ STAT_ATTR(DEACTIVATE_EMPTY, deactivate_e STAT_ATTR(DEACTIVATE_TO_HEAD, deactivate_to_head); STAT_ATTR(DEACTIVATE_TO_TAIL, deactivate_to_tail); STAT_ATTR(DEACTIVATE_REMOTE_FREES, deactivate_remote_frees); - +STAT_ATTR(ORDER_FALLBACK, order_fallback); #endif static struct attribute *slab_attrs[] = { @@ -4104,6 +4146,7 @@ static struct attribute *slab_attrs[] = &deactivate_to_head_attr.attr, &deactivate_to_tail_attr.attr, &deactivate_remote_frees_attr.attr, + &order_fallback_attr.attr, #endif NULL };