From: Christoph Lameter Subject: slub: Fallback to order 0 during slab page allocation If any higher order allocation fails then fall back to an order 0 allocation if the object is smaller than PAGE_SIZE. Signed-off-by: Christoph Lameter --- include/linux/slub_def.h | 2 + mm/slub.c | 56 ++++++++++++++++++++++++++++++++++++++--------- 2 files changed, 48 insertions(+), 10 deletions(-) Index: linux-2.6/include/linux/slub_def.h =================================================================== --- linux-2.6.orig/include/linux/slub_def.h 2008-02-15 13:58:28.705371769 -0800 +++ linux-2.6/include/linux/slub_def.h 2008-02-15 13:59:48.918454617 -0800 @@ -29,6 +29,7 @@ enum stat_item { DEACTIVATE_TO_HEAD, /* Cpu slab was moved to the head of partials */ DEACTIVATE_TO_TAIL, /* Cpu slab was moved to the tail of partials */ DEACTIVATE_REMOTE_FREES,/* Slab contained remotely freed objects */ + ORDER_FALLBACK, /* Allocation that fell back to order 0 */ NR_SLUB_STAT_ITEMS }; struct kmem_cache_cpu { @@ -72,6 +73,7 @@ struct kmem_cache { /* Allocation and freeing of slabs */ int objects; /* Number of objects in a slab of maximum size */ + int objects0; /* Number of object in an order 0 size slab */ gfp_t allocflags; /* gfp flags to use on each alloc */ int refcount; /* Refcount for slab cache destroy */ void (*ctor)(struct kmem_cache *, void *); Index: linux-2.6/mm/slub.c =================================================================== --- linux-2.6.orig/mm/slub.c 2008-02-15 13:58:28.721371150 -0800 +++ linux-2.6/mm/slub.c 2008-02-15 14:55:15.739774042 -0800 @@ -317,7 +317,9 @@ static void *slab_address(struct page *p static inline unsigned long slab_objects(struct kmem_cache *s, struct page *page) { - return s->objects; + if (PageCompound(page)) + return page[1].inuse; + return s->objects0; } static inline int check_valid_pointer(struct kmem_cache *s, @@ -1079,6 +1081,15 @@ static inline unsigned long kmem_cache_f } #define slub_debug 0 #endif + +static inline struct page *alloc_slab_page(gfp_t flags, int node, int order) +{ + if (node == -1) + return alloc_pages(flags, order); + else + return alloc_pages_node(node, flags, order); +} + /* * Slab allocation and freeing */ @@ -1089,14 +1100,23 @@ static struct page *allocate_slab(struct flags |= s->allocflags; - if (node == -1) - page = alloc_pages(flags, s->order); - else - page = alloc_pages_node(node, flags, s->order); - - if (!page) - return NULL; + page = alloc_slab_page(flags, node, s->order); + if (unlikely(!page)) { + /* + * Allocation may have failed due to fragmentation. + * Try an order 0 alloc alloc + */ + if (s->size <= PAGE_SIZE) { + pages = 1; + page = alloc_slab_page(flags, node, 0); + } + if (!page) + return NULL; + else + stat(get_cpu_slab(s, raw_smp_processor_id()), + ORDER_FALLBACK); + } mod_zone_page_state(page_zone(page), (s->flags & SLAB_RECLAIM_ACCOUNT) ? NR_SLAB_RECLAIMABLE : NR_SLAB_UNRECLAIMABLE, @@ -1120,6 +1140,9 @@ static struct page *new_slab(struct kmem void *start; void *last; void *p; + unsigned long size; + unsigned long objects; + int order; BUG_ON(flags & GFP_SLAB_BUG_MASK); @@ -1128,6 +1151,13 @@ static struct page *new_slab(struct kmem if (!page) goto out; + order = compound_order(page); + size = PAGE_SIZE << order; + objects = size / s->size; + + if (order) + page[1].inuse = objects; + n = get_node(s, page_to_nid(page)); if (n) { atomic_long_inc(&n->nr_slabs); @@ -1143,10 +1173,10 @@ static struct page *new_slab(struct kmem page->end = start + 1; if (unlikely(s->flags & SLAB_POISON)) - memset(start, POISON_INUSE, PAGE_SIZE << s->order); + memset(start, POISON_INUSE, size); last = start; - for_each_object(p, s, start, slab_objects(s, page)) { + for_each_object(p, s, start, objects) { setup_object(s, page, last); set_freepointer(s, last, p); last = p; @@ -1211,6 +1241,9 @@ static void discard_slab(struct kmem_cac atomic_long_dec(&n->nr_slabs); atomic_long_sub(slab_objects(s, page), &n->total_objects); reset_page_mapcount(page); + if (PageCompound(page)) + /* mapcount of page + 1 was used for the object count */ + reset_page_mapcount(page + 1); __ClearPageSlab(page); free_slab(s, page); } @@ -2381,6 +2414,7 @@ static int calculate_sizes(struct kmem_c * Determine the number of objects per slab */ s->objects = (PAGE_SIZE << s->order) / size; + s->objects0 = PAGE_SIZE / size; return !!s->objects; @@ -4153,6 +4187,7 @@ STAT_ATTR(DEACTIVATE_EMPTY, deactivate_e STAT_ATTR(DEACTIVATE_TO_HEAD, deactivate_to_head); STAT_ATTR(DEACTIVATE_TO_TAIL, deactivate_to_tail); STAT_ATTR(DEACTIVATE_REMOTE_FREES, deactivate_remote_frees); +STAT_ATTR(ORDER_FALLBACK, order_fallback); #endif @@ -4205,6 +4240,7 @@ static struct attribute *slab_attrs[] = &deactivate_to_head_attr.attr, &deactivate_to_tail_attr.attr, &deactivate_remote_frees_attr.attr, + &order_fallback_attr.attr, #endif NULL };