--- include/linux/slub_def.h | 72 +-- mm/slub.c | 1011 ++++++++++++++++++++++++++++++----------------- 2 files changed, 703 insertions(+), 380 deletions(-) Index: linux-2.6/include/linux/slub_def.h =================================================================== --- linux-2.6.orig/include/linux/slub_def.h 2007-10-15 14:05:59.000000000 -0700 +++ linux-2.6/include/linux/slub_def.h 2007-10-15 14:08:09.000000000 -0700 @@ -11,6 +11,15 @@ #include #include +struct kmem_cache_cpu { + void **freelist; + int objects; + int refill; + int max; + unsigned int offset; + unsigned int objsize; +}; + struct kmem_cache_node { spinlock_t list_lock; /* Protect partial list and nr_partial */ unsigned long nr_partial; @@ -54,7 +63,11 @@ struct kmem_cache { int defrag_ratio; struct kmem_cache_node *node[MAX_NUMNODES]; #endif - struct page *cpu_slab[NR_CPUS]; +#ifdef CONFIG_SMP + struct kmem_cache_cpu *cpu_slab[NR_CPUS]; +#else + struct kmem_cache_cpu cpu_slab; +#endif }; /* @@ -72,7 +85,7 @@ struct kmem_cache { * We keep the general caches in an array of slab caches that are used for * 2^x bytes of allocations. */ -extern struct kmem_cache kmalloc_caches[KMALLOC_SHIFT_HIGH + 1]; +extern struct kmem_cache kmalloc_caches[PAGE_SHIFT]; /* * Sorry that the following has to be that ugly but some versions of GCC @@ -83,9 +96,6 @@ static __always_inline int kmalloc_index if (!size) return 0; - if (size > KMALLOC_MAX_SIZE) - return -1; - if (size <= KMALLOC_MIN_SIZE) return KMALLOC_SHIFT_LOW; @@ -102,6 +112,10 @@ static __always_inline int kmalloc_index if (size <= 512) return 9; if (size <= 1024) return 10; if (size <= 2 * 1024) return 11; +/* + * The following is only needed to support architectures with a larger page + * size than 4k. + */ if (size <= 4 * 1024) return 12; if (size <= 8 * 1024) return 13; if (size <= 16 * 1024) return 14; @@ -109,13 +123,9 @@ static __always_inline int kmalloc_index if (size <= 64 * 1024) return 16; if (size <= 128 * 1024) return 17; if (size <= 256 * 1024) return 18; - if (size <= 512 * 1024) return 19; + if (size <= 512 * 1024) return 19; if (size <= 1024 * 1024) return 20; if (size <= 2 * 1024 * 1024) return 21; - if (size <= 4 * 1024 * 1024) return 22; - if (size <= 8 * 1024 * 1024) return 23; - if (size <= 16 * 1024 * 1024) return 24; - if (size <= 32 * 1024 * 1024) return 25; return -1; /* @@ -140,19 +150,6 @@ static __always_inline struct kmem_cache if (index == 0) return NULL; - /* - * This function only gets expanded if __builtin_constant_p(size), so - * testing it here shouldn't be needed. But some versions of gcc need - * help. - */ - if (__builtin_constant_p(size) && index < 0) { - /* - * Generate a link failure. Would be great if we could - * do something to stop the compile here. - */ - extern void __kmalloc_size_too_large(void); - __kmalloc_size_too_large(); - } return &kmalloc_caches[index]; } @@ -168,15 +165,21 @@ void *__kmalloc(size_t size, gfp_t flags static __always_inline void *kmalloc(size_t size, gfp_t flags) { - if (__builtin_constant_p(size) && !(flags & SLUB_DMA)) { - struct kmem_cache *s = kmalloc_slab(size); + if (__builtin_constant_p(size)) { + if (size > PAGE_SIZE / 2) + return (void *)__get_free_pages(flags | __GFP_COMP, + get_order(size)); - if (!s) - return ZERO_SIZE_PTR; + if (!(flags & SLUB_DMA)) { + struct kmem_cache *s = kmalloc_slab(size); - return kmem_cache_alloc(s, flags); - } else - return __kmalloc(size, flags); + if (!s) + return ZERO_SIZE_PTR; + + return kmem_cache_alloc(s, flags); + } + } + return __kmalloc(size, flags); } #ifdef CONFIG_NUMA @@ -185,15 +188,16 @@ void *kmem_cache_alloc_node(struct kmem_ static __always_inline void *kmalloc_node(size_t size, gfp_t flags, int node) { - if (__builtin_constant_p(size) && !(flags & SLUB_DMA)) { - struct kmem_cache *s = kmalloc_slab(size); + if (__builtin_constant_p(size) && + size <= PAGE_SIZE / 2 && !(flags & SLUB_DMA)) { + struct kmem_cache *s = kmalloc_slab(size); if (!s) return ZERO_SIZE_PTR; return kmem_cache_alloc_node(s, flags, node); - } else - return __kmalloc_node(size, flags, node); + } + return __kmalloc_node(size, flags, node); } #endif Index: linux-2.6/mm/slub.c =================================================================== --- linux-2.6.orig/mm/slub.c 2007-10-15 14:05:59.000000000 -0700 +++ linux-2.6/mm/slub.c 2007-10-15 14:08:09.000000000 -0700 @@ -90,7 +90,7 @@ * One use of this flag is to mark slabs that are * used for allocations. Then such a slab becomes a cpu * slab. The cpu slab may be equipped with an additional - * lockless_freelist that allows lockless access to + * freelist that allows lockless access to * free objects in addition to the regular freelist * that requires the slab lock. * @@ -140,11 +140,6 @@ static inline void ClearSlabDebug(struct /* * Issues still to be resolved: * - * - The per cpu array is updated for each new slab and and is a remote - * cacheline for most nodes. This could become a bouncing cacheline given - * enough frequent updates. There are 16 pointers in a cacheline, so at - * max 16 cpus could compete for the cacheline which may be okay. - * * - Support PAGE_ALLOC_DEBUG. Should be easy to do. * * - Variable sizing of the per node arrays @@ -153,25 +148,6 @@ static inline void ClearSlabDebug(struct /* Enable to test recovery from slab corruption on boot */ #undef SLUB_RESILIENCY_TEST -#if PAGE_SHIFT <= 12 - -/* - * Small page size. Make sure that we do not fragment memory - */ -#define DEFAULT_MAX_ORDER 1 -#define DEFAULT_MIN_OBJECTS 4 - -#else - -/* - * Large page machines are customarily able to handle larger - * page orders. - */ -#define DEFAULT_MAX_ORDER 2 -#define DEFAULT_MIN_OBJECTS 8 - -#endif - /* * Mininum number of partial slabs. These will be left on the partial * lists even if they are empty. kmem_cache_shrink may reclaim them. @@ -205,11 +181,6 @@ static inline void ClearSlabDebug(struct #define ARCH_SLAB_MINALIGN __alignof__(unsigned long long) #endif -/* - * The page->inuse field is 16 bit thus we have this limitation - */ -#define MAX_OBJECTS_PER_SLAB 65535 - /* Internal SLUB flags */ #define __OBJECT_POISON 0x80000000 /* Poison object */ #define __SYSFS_ADD_DEFERRED 0x40000000 /* Not yet visible via sysfs */ @@ -277,6 +248,15 @@ static inline struct kmem_cache_node *ge #endif } +static inline struct kmem_cache_cpu *get_cpu_slab(struct kmem_cache *s, int cpu) +{ +#ifdef CONFIG_SMP + return s->cpu_slab[cpu]; +#else + return &s->cpu_slab; +#endif +} + static inline int check_valid_pointer(struct kmem_cache *s, struct page *page, const void *object) { @@ -729,11 +709,6 @@ static int check_slab(struct kmem_cache slab_err(s, page, "Not a valid slab page"); return 0; } - if (page->offset * sizeof(void *) != s->offset) { - slab_err(s, page, "Corrupted offset %lu", - (unsigned long)(page->offset * sizeof(void *))); - return 0; - } if (page->inuse > s->objects) { slab_err(s, page, "inuse %u > max %u", s->name, page->inuse, s->objects); @@ -872,15 +847,15 @@ bad: slab_fix(s, "Marking all objects used"); page->inuse = s->objects; page->freelist = NULL; - /* Fix up fields that may be corrupted */ - page->offset = s->offset / sizeof(void *); } return 0; } -static int free_debug_processing(struct kmem_cache *s, struct page *page, +static int free_debug_processing(struct kmem_cache *s, void *object, void *addr) { + struct page *page = virt_to_head_page(object); + if (!check_slab(s, page)) goto fail; @@ -1023,10 +998,10 @@ static inline void setup_object_debug(st struct page *page, void *object) {} static inline int alloc_debug_processing(struct kmem_cache *s, - struct page *page, void *object, void *addr) { return 0; } + void *object, void *addr) { return 0; } static inline int free_debug_processing(struct kmem_cache *s, - struct page *page, void *object, void *addr) { return 0; } + void *object, void *addr) { return 0; } static inline int slab_pad_check(struct kmem_cache *s, struct page *page) { return 1; } @@ -1055,6 +1030,9 @@ static struct page *allocate_slab(struct if (s->flags & SLAB_CACHE_DMA) flags |= SLUB_DMA; + if (s->flags & SLAB_RECLAIM_ACCOUNT) + flags |= __GFP_RECLAIMABLE; + if (node == -1) page = alloc_pages(flags, s->order); else @@ -1088,19 +1066,16 @@ static struct page *new_slab(struct kmem void *last; void *p; - BUG_ON(flags & ~(GFP_DMA | __GFP_ZERO | GFP_LEVEL_MASK)); + BUG_ON(flags & GFP_SLAB_BUG_MASK); - if (flags & __GFP_WAIT) - local_irq_enable(); - - page = allocate_slab(s, flags & GFP_LEVEL_MASK, node); + page = allocate_slab(s, + flags & (GFP_RECLAIM_MASK | GFP_CONSTRAINT_MASK), node); if (!page) goto out; n = get_node(s, page_to_nid(page)); if (n) atomic_long_inc(&n->nr_slabs); - page->offset = s->offset / sizeof(void *); page->slab = s; page->flags |= 1 << PG_slab; if (s->flags & (SLAB_DEBUG_FREE | SLAB_RED_ZONE | SLAB_POISON | @@ -1123,11 +1098,8 @@ static struct page *new_slab(struct kmem set_freepointer(s, last, NULL); page->freelist = start; - page->lockless_freelist = NULL; page->inuse = 0; out: - if (flags & __GFP_WAIT) - local_irq_disable(); return page; } @@ -1149,7 +1121,6 @@ static void __free_slab(struct kmem_cach NR_SLAB_RECLAIMABLE : NR_SLAB_UNRECLAIMABLE, - pages); - page->mapping = NULL; __free_pages(page, s->order); } @@ -1178,6 +1149,8 @@ static void discard_slab(struct kmem_cac { struct kmem_cache_node *n = get_node(s, page_to_nid(page)); + WARN_ON(!atomic_long_read(&n->nr_slabs)); + atomic_long_dec(&n->nr_slabs); reset_page_mapcount(page); __ClearPageSlab(page); @@ -1208,19 +1181,15 @@ static __always_inline int slab_trylock( /* * Management of partially allocated slabs */ -static void add_partial_tail(struct kmem_cache_node *n, struct page *page) +static void add_partial(struct kmem_cache_node *n, + struct page *page, int tail) { spin_lock(&n->list_lock); n->nr_partial++; - list_add_tail(&page->lru, &n->partial); - spin_unlock(&n->list_lock); -} - -static void add_partial(struct kmem_cache_node *n, struct page *page) -{ - spin_lock(&n->list_lock); - n->nr_partial++; - list_add(&page->lru, &n->partial); + if (tail) + list_add_tail(&page->lru, &n->partial); + else + list_add(&page->lru, &n->partial); spin_unlock(&n->list_lock); } @@ -1348,7 +1317,7 @@ static struct page *get_partial(struct k * * On exit the slab lock will have been dropped. */ -static void unfreeze_slab(struct kmem_cache *s, struct page *page) +static void unfreeze_slab(struct kmem_cache *s, struct page *page, int tail) { struct kmem_cache_node *n = get_node(s, page_to_nid(page)); @@ -1356,7 +1325,7 @@ static void unfreeze_slab(struct kmem_ca if (page->inuse) { if (page->freelist) - add_partial(n, page); + add_partial(n, page, tail); else if (SlabDebug(page) && (s->flags & SLAB_STORE_USER)) add_full(n, page); slab_unlock(page); @@ -1371,7 +1340,7 @@ static void unfreeze_slab(struct kmem_ca * partial list stays small. kmem_cache_shrink can * reclaim empty slabs from the partial list. */ - add_partial_tail(n, page); + add_partial(n, page, 1); slab_unlock(page); } else { slab_unlock(page); @@ -1381,35 +1350,101 @@ static void unfreeze_slab(struct kmem_ca } /* - * Remove the cpu slab + * Object count was reduced. Unlock page and make sure that the page is on the + * right list */ -static void deactivate_slab(struct kmem_cache *s, struct page *page, int cpu) +static void putback_slab(struct kmem_cache *s, struct page *page, void *prior) { + if (unlikely(SlabFrozen(page))) + goto out_unlock; + + if (unlikely(!page->inuse)) { + /* Page is now empty */ + if (prior) + /* + * Slab still on the partial list. + */ + remove_partial(s, page); + + slab_unlock(page); + discard_slab(s, page); + return; + } + /* - * Merge cpu freelist into freelist. Typically we get here - * because both freelists are empty. So this is unlikely - * to occur. + * Objects left in the slab. If it + * was not on the partial list before + * then add it. */ - while (unlikely(page->lockless_freelist)) { - void **object; + if (unlikely(!prior)) + add_partial(get_node(s, page_to_nid(page)), page, 0); + +out_unlock: + slab_unlock(page); +} + +/* + * Free a list of objects and attempt to minimize the lock overhead + * while doing so. + * + * Interrupts are disabled + */ +static void free_objects(struct kmem_cache *s, struct kmem_cache_cpu *c, + void **list) +{ + struct page *page = NULL; + struct page *npage; + void **object; + void **prior = NULL; + + while (list) { + object = list; + list = object[c->offset]; - /* Retrieve object from cpu_freelist */ - object = page->lockless_freelist; - page->lockless_freelist = page->lockless_freelist[page->offset]; + npage = virt_to_head_page(object); + if (npage != page) { + if (page) + putback_slab(s, page, prior); + /* finish off the old one !*/ + page = npage; + slab_lock(page); + prior = page->freelist; + } - /* And put onto the regular freelist */ - object[page->offset] = page->freelist; + object[c->offset] = page->freelist; page->freelist = object; page->inuse--; } - s->cpu_slab[cpu] = NULL; - unfreeze_slab(s, page); + + if (page) + putback_slab(s, page, prior); } -static inline void flush_slab(struct kmem_cache *s, struct page *page, int cpu) +/* + * Reduce number of objects in a cpu structure + */ +static void drain(struct kmem_cache *s, struct kmem_cache_cpu *c, int objects) { - slab_lock(page); - deactivate_slab(s, page, cpu); + void **free; + + if (c->objects <= objects) + return; + + c->objects = objects; + free = c->freelist; + if (!objects) + c->freelist = NULL; + else { + /* Keep the first hot objects */ + void **x = free; + + while (--objects > 0) + x = x[c->offset]; + + free = x[c->offset]; + x[c->offset] = NULL; + } + free_objects(s, c, free); } /* @@ -1418,18 +1453,17 @@ static inline void flush_slab(struct kme */ static inline void __flush_cpu_slab(struct kmem_cache *s, int cpu) { - struct page *page = s->cpu_slab[cpu]; + struct kmem_cache_cpu *c = get_cpu_slab(s, cpu); - if (likely(page)) - flush_slab(s, page, cpu); + if (likely(c && c->freelist)) + drain(s, c, 0); } static void flush_cpu_slab(void *d) { struct kmem_cache *s = d; - int cpu = smp_processor_id(); - __flush_cpu_slab(s, cpu); + __flush_cpu_slab(s, smp_processor_id()); } static void flush_all(struct kmem_cache *s) @@ -1446,6 +1480,134 @@ static void flush_all(struct kmem_cache } /* + * Check if the objects in a per cpu structure fit numa + * locality expectations. + */ +static inline int node_match(void *object, int node) +{ +#ifdef CONFIG_NUMA + if (node != -1 && page_to_nid(virt_to_page(object)) != node) + return 0; +#endif + return 1; +} + +/* + * Find a new slab. + * + * Find new slab may reenable interrupts. Thus there is no guarantee that + * we are on the same cpu as before when this function returns. + */ +static struct page *find_new_slab(struct kmem_cache *s, gfp_t flags, int node) +{ + struct page *page; + + page = get_partial(s, flags, node); + if (page) + return page; + + if (flags & __GFP_WAIT) + local_irq_enable(); + + page = new_slab(s, flags, node); + + if (flags & __GFP_WAIT) + local_irq_disable(); + + if (page) + slab_lock(page); + return page; +} + +/* + * Drain objects that are not on the specified node until we find a fitting + * one. + */ +void numa_drain(struct kmem_cache *s, struct kmem_cache_cpu *c, int node) +{ +#ifdef CONFIG_NUMA + if (node >= 0) { + /* NUMA case. Throw the wrong node objects away */ + void **free = NULL; + + while (c->freelist) { + void **object = c->freelist; + + if (page_to_nid(virt_to_page(object)) == node) + break; + + c->freelist = object[c->offset]; + object[c->offset] = free; + free = object; + c->objects--; + } + if (free) + free_objects(s, c, free); + } +#endif +} + +static struct kmem_cache_cpu * fillup(struct kmem_cache *s, + struct kmem_cache_cpu *c, gfp_t gfpflags, int node, int refill) +{ + struct page *page; + + while (c->objects < refill) { + int new_objects; + void **new_freelist; + + page = find_new_slab(s, gfpflags, node); + + /* + * We may have switched cpus in find_new_slab. We merge the + * new objects into whatever processor per cpu list we are on + * right now. The bad side effects are limited to getting too + * many objects onto a per cpu list. + */ + c = get_cpu_slab(s, smp_processor_id()); + + if (!page) + return c; + + /* Extract objects and dispose of the slab */ + new_objects = s->objects - page->inuse; + new_freelist = page->freelist; + page->freelist = NULL; + page->inuse = s->objects; + unfreeze_slab(s, page, 1); + + /* + * We may have to merge two linked lists. + * Find the smaller one. + */ + if (unlikely(c->objects > new_objects)) { + void **temp = c->freelist; + + c->freelist = new_freelist; + new_freelist = temp; + } + + /* + * If there are objects already in the per cpu list + * then we need to find the end of that list and + * attach the new objects to the end of that list. + */ + if (unlikely(c->objects)) { + void **p = c->freelist; + + while (p[c->offset]) + p = p[c->offset]; + + p[c->offset] = new_freelist; + } else + c->freelist = new_freelist; + + c->objects += new_objects; + } + return c; +} + +/* * Slow path. The lockless freelist is empty or we need to perform * debugging duties. * @@ -1462,81 +1624,56 @@ static void flush_all(struct kmem_cache * And if we were unable to get a new slab from the partial slab lists then * we need to allocate a new slab. This is slowest path since we may sleep. */ -static void *__slab_alloc(struct kmem_cache *s, - gfp_t gfpflags, int node, void *addr, struct page *page) +static void *__slab_alloc(struct kmem_cache *s, struct kmem_cache_cpu *c, + gfp_t gfpflags, int node, void *addr) { + int refill = c->refill; void **object; - int cpu = smp_processor_id(); - if (!page) - goto new_slab; + /* + * If we got here in a NUMA system then the first object on the + * freelist does not have the right node. + * + * Simple approach: Dump all objects from the freelist that + * do not fit the requirements. If any are left then use that. + */ + if (unlikely(node >= 0 && c->objects)) { + numa_drain(s, c, node); + if (c->objects) + goto out; + refill = 1; + } - slab_lock(page); - if (unlikely(node != -1 && page_to_nid(page) != node)) - goto another_slab; -load_freelist: - object = page->freelist; - if (unlikely(!object)) - goto another_slab; - if (unlikely(SlabDebug(page))) - goto debug; - - object = page->freelist; - page->lockless_freelist = object[page->offset]; - page->inuse = s->objects; - page->freelist = NULL; - slab_unlock(page); - return object; + BUG_ON(!c->objects && c->freelist); + BUG_ON(!c->freelist && c->objects); -another_slab: - deactivate_slab(s, page, cpu); + /* + * Check for debug mode. If we are in a debug mode then the slab + * must be processed one object at a time. + */ + if (unlikely(c->max < 0)) { + struct page * page; -new_slab: - page = get_partial(s, gfpflags, node); - if (page) { - s->cpu_slab[cpu] = page; - goto load_freelist; - } + do { + page = find_new_slab(s, gfpflags, node); + if (!page) + return NULL; + object = page->freelist; + } while (!alloc_debug_processing(s, page, object, addr)); - page = new_slab(s, gfpflags, node); - if (page) { - cpu = smp_processor_id(); - if (s->cpu_slab[cpu]) { - /* - * Someone else populated the cpu_slab while we - * enabled interrupts, or we have gotten scheduled - * on another cpu. The page may not be on the - * requested node even if __GFP_THISNODE was - * specified. So we need to recheck. - */ - if (node == -1 || - page_to_nid(s->cpu_slab[cpu]) == node) { - /* - * Current cpuslab is acceptable and we - * want the current one since its cache hot - */ - discard_slab(s, page); - page = s->cpu_slab[cpu]; - slab_lock(page); - goto load_freelist; - } - /* New slab does not fit our expectations */ - flush_slab(s, s->cpu_slab[cpu], cpu); - } - slab_lock(page); - SetSlabFrozen(page); - s->cpu_slab[cpu] = page; - goto load_freelist; + page->inuse++; + page->freelist = object[c->offset]; + unfreeze_slab(s, page, 0); + return object; } - return NULL; -debug: - object = page->freelist; - if (!alloc_debug_processing(s, page, object, addr)) - goto another_slab; - page->inuse++; - page->freelist = object[page->offset]; - slab_unlock(page); + c = fillup(s, c, gfpflags, node, refill); +out: + object = c->freelist; + if (object) { + c->freelist = c->freelist[c->offset]; + c->objects--; + } return object; } @@ -1553,25 +1690,24 @@ debug: static void __always_inline *slab_alloc(struct kmem_cache *s, gfp_t gfpflags, int node, void *addr) { - struct page *page; void **object; unsigned long flags; + struct kmem_cache_cpu *c; local_irq_save(flags); - page = s->cpu_slab[smp_processor_id()]; - if (unlikely(!page || !page->lockless_freelist || - (node != -1 && page_to_nid(page) != node))) - - object = __slab_alloc(s, gfpflags, node, addr, page); - + c = get_cpu_slab(s, smp_processor_id()); + if (unlikely(!c->freelist || !node_match(c->freelist, node))) + object = __slab_alloc(s, c, gfpflags, node, addr); else { - object = page->lockless_freelist; - page->lockless_freelist = object[page->offset]; + + object = c->freelist; + c->freelist = object[c->offset]; + c->objects--; } local_irq_restore(flags); - if (unlikely((gfpflags & __GFP_ZERO) && object)) - memset(object, 0, s->objsize); + if (unlikely(gfpflags & __GFP_ZERO) && object) + memset(object, 0, c->objsize); return object; } @@ -1591,61 +1727,17 @@ EXPORT_SYMBOL(kmem_cache_alloc_node); #endif /* - * Slow patch handling. This may still be called frequently since objects - * have a longer lifetime than the cpu slabs in most processing loads. - * - * So we still attempt to reduce cache line usage. Just take the slab - * lock and free the item. If there is no additional partial page - * handling required then we can return immediately. + * Slow path of __slab_free. Implements debugging and draining. */ -static void __slab_free(struct kmem_cache *s, struct page *page, - void *x, void *addr) +static void __slab_free(struct kmem_cache *s, struct kmem_cache_cpu *c, + void *addr) { - void *prior; - void **object = (void *)x; - - slab_lock(page); - - if (unlikely(SlabDebug(page))) - goto debug; -checks_ok: - prior = object[page->offset] = page->freelist; - page->freelist = object; - page->inuse--; - - if (unlikely(SlabFrozen(page))) - goto out_unlock; - - if (unlikely(!page->inuse)) - goto slab_empty; - - /* - * Objects left in the slab. If it - * was not on the partial list before - * then add it. - */ - if (unlikely(!prior)) - add_partial(get_node(s, page_to_nid(page)), page); - -out_unlock: - slab_unlock(page); - return; - -slab_empty: - if (prior) - /* - * Slab still on the partial list. - */ - remove_partial(s, page); - - slab_unlock(page); - discard_slab(s, page); - return; - -debug: - if (!free_debug_processing(s, page, x, addr)) - goto out_unlock; - goto checks_ok; + if (unlikely(c->max < 0)) { + if (!free_debug_processing(s, c->freelist, addr)) + return; + drain(s, c, 0); + } else + drain(s, c, c->refill); } /* @@ -1659,31 +1751,28 @@ debug: * If fastpath is not possible then fall back to __slab_free where we deal * with all sorts of special processing. */ -static void __always_inline slab_free(struct kmem_cache *s, - struct page *page, void *x, void *addr) +static void __always_inline slab_free(struct kmem_cache *s, void *x, + void *addr) { void **object = (void *)x; unsigned long flags; + struct kmem_cache_cpu *c; local_irq_save(flags); debug_check_no_locks_freed(object, s->objsize); - if (likely(page == s->cpu_slab[smp_processor_id()] && - !SlabDebug(page))) { - object[page->offset] = page->lockless_freelist; - page->lockless_freelist = object; - } else - __slab_free(s, page, x, addr); + c = get_cpu_slab(s, smp_processor_id()); + object[c->offset] = c->freelist; + c->freelist = object; + c->objects++; + if (c->objects >= c->max) + __slab_free(s, c, addr); local_irq_restore(flags); } void kmem_cache_free(struct kmem_cache *s, void *x) { - struct page *page; - - page = virt_to_head_page(x); - - slab_free(s, page, x, __builtin_return_address(0)); + slab_free(s, x, __builtin_return_address(0)); } EXPORT_SYMBOL(kmem_cache_free); @@ -1718,8 +1807,9 @@ static struct page *get_object_page(cons * take the list_lock. */ static int slub_min_order; -static int slub_max_order = DEFAULT_MAX_ORDER; -static int slub_min_objects = DEFAULT_MIN_OBJECTS; +static int slub_max_order; +static int slub_min_objects = 4; +static int user_override; /* * Merge control. If this is set then no merging of slab caches will occur. @@ -1759,14 +1849,6 @@ static inline int slab_order(int size, i int rem; int min_order = slub_min_order; - /* - * If we would create too many object per slab then reduce - * the slab order even if it goes below slub_min_order. - */ - while (min_order > 0 && - (PAGE_SIZE << min_order) >= MAX_OBJECTS_PER_SLAB * size) - min_order--; - for (order = max(min_order, fls(min_objects * size - 1) - PAGE_SHIFT); order <= max_order; order++) { @@ -1781,9 +1863,6 @@ static inline int slab_order(int size, i if (rem <= slab_size / fract_leftover) break; - /* If the next size is too high then exit now */ - if (slab_size * 2 >= MAX_OBJECTS_PER_SLAB * size) - break; } return order; @@ -1858,6 +1937,25 @@ static unsigned long calculate_alignment return ALIGN(align, sizeof(void *)); } +static void init_kmem_cache_cpu(struct kmem_cache *s, + struct kmem_cache_cpu *c) +{ + c->freelist = NULL; + c->objects = 0; + c->offset = s->offset / sizeof(void *); + c->objsize = s->objsize; + if (s->flags & (SLAB_DEBUG_FREE | SLAB_RED_ZONE | + SLAB_POISON | SLAB_STORE_USER | SLAB_TRACE)) { + c->refill = 1; + c->max = -1; + } else { + c->refill = max(s->objects / 2, + max(slub_min_objects * 2, 10)); + c->max = max(c->refill * 2, + max(4 * s->objects, 20)); + } +} + static void init_kmem_cache_node(struct kmem_cache_node *n) { n->nr_partial = 0; @@ -1869,6 +1967,125 @@ static void init_kmem_cache_node(struct #endif } +#ifdef CONFIG_SMP +/* + * Per cpu array for per cpu structures. + * + * The per cpu array places all kmem_cache_cpu structures from one processor + * close together meaning that it becomes possible that multiple per cpu + * structures are contained in one cacheline. This may be particularly + * beneficial for the kmalloc caches. + * + * A desktop system typically has around 60-80 slabs. With 100 here we are + * likely able to get per cpu structures for all caches from the array defined + * here. We must be able to cover all kmalloc caches during bootstrap. + * + * If the per cpu array is exhausted then fall back to kmalloc + * of individual cachelines. No sharing is possible then. + */ +#define NR_KMEM_CACHE_CPU 100 + +static DEFINE_PER_CPU(struct kmem_cache_cpu, + kmem_cache_cpu)[NR_KMEM_CACHE_CPU]; + +static DEFINE_PER_CPU(struct kmem_cache_cpu *, kmem_cache_cpu_free); + +static struct kmem_cache_cpu *alloc_kmem_cache_cpu(struct kmem_cache *s, + int cpu, gfp_t flags) +{ + struct kmem_cache_cpu *c = per_cpu(kmem_cache_cpu_free, cpu); + + if (c) + per_cpu(kmem_cache_cpu_free, cpu) = + (void *)c->freelist; + else { + /* Table overflow: So allocate ourselves */ + c = kmalloc_node( + ALIGN(sizeof(struct kmem_cache_cpu), cache_line_size()), + flags, cpu_to_node(cpu)); + if (!c) + return NULL; + } + + init_kmem_cache_cpu(s, c); + return c; +} + +static void free_kmem_cache_cpu(struct kmem_cache_cpu *c, int cpu) +{ + if (c < per_cpu(kmem_cache_cpu, cpu) || + c > per_cpu(kmem_cache_cpu, cpu) + NR_KMEM_CACHE_CPU) { + kfree(c); + return; + } + c->freelist = (void *)per_cpu(kmem_cache_cpu_free, cpu); + per_cpu(kmem_cache_cpu_free, cpu) = c; +} + +static void free_kmem_cache_cpus(struct kmem_cache *s) +{ + int cpu; + + for_each_online_cpu(cpu) { + struct kmem_cache_cpu *c = get_cpu_slab(s, cpu); + + if (c) { + s->cpu_slab[cpu] = NULL; + free_kmem_cache_cpu(c, cpu); + } + } +} + +static int alloc_kmem_cache_cpus(struct kmem_cache *s, gfp_t flags) +{ + int cpu; + + for_each_online_cpu(cpu) { + struct kmem_cache_cpu *c = get_cpu_slab(s, cpu); + + if (c) + continue; + + c = alloc_kmem_cache_cpu(s, cpu, flags); + if (!c) { + free_kmem_cache_cpus(s); + return 0; + } + s->cpu_slab[cpu] = c; + } + return 1; +} + +/* + * Initialize the per cpu array. + */ +static void init_alloc_cpu_cpu(int cpu) +{ + int i; + + for (i = NR_KMEM_CACHE_CPU - 1; i >= 0; i--) + free_kmem_cache_cpu(&per_cpu(kmem_cache_cpu, cpu)[i], cpu); +} + +static void __init init_alloc_cpu(void) +{ + int cpu; + + for_each_online_cpu(cpu) + init_alloc_cpu_cpu(cpu); + } + +#else +static inline void free_kmem_cache_cpus(struct kmem_cache *s) {} +static inline void init_alloc_cpu(void) {} + +static inline int alloc_kmem_cache_cpus(struct kmem_cache *s, gfp_t flags) +{ + init_kmem_cache_cpu(s, &s->cpu_slab); + return 1; +} +#endif + #ifdef CONFIG_NUMA /* * No kmalloc_node yet so do it by hand. We know that this is the first @@ -1876,10 +2093,11 @@ static void init_kmem_cache_node(struct * possible. * * Note that this function only works on the kmalloc_node_cache - * when allocating for the kmalloc_node_cache. + * when allocating for the kmalloc_node_cache. This is used for bootstrapping + * memory on a fresh node that has no slab structures yet. */ -static struct kmem_cache_node * __init early_kmem_cache_node_alloc(gfp_t gfpflags, - int node) +static struct kmem_cache_node *early_kmem_cache_node_alloc(gfp_t gfpflags, + int node) { struct page *page; struct kmem_cache_node *n; @@ -1907,13 +2125,7 @@ static struct kmem_cache_node * __init e #endif init_kmem_cache_node(n); atomic_long_inc(&n->nr_slabs); - add_partial(n, page); - - /* - * new_slab() disables interupts. If we do not reenable interrupts here - * then bootup would continue with interrupts disabled. - */ - local_irq_enable(); + add_partial(n, page, 0); return n; } @@ -1921,7 +2133,7 @@ static void free_kmem_cache_nodes(struct { int node; - for_each_online_node(node) { + for_each_node_state(node, N_NORMAL_MEMORY) { struct kmem_cache_node *n = s->node[node]; if (n && n != &s->local_node) kmem_cache_free(kmalloc_caches, n); @@ -1939,7 +2151,7 @@ static int init_kmem_cache_nodes(struct else local_node = 0; - for_each_online_node(node) { + for_each_node_state(node, N_NORMAL_MEMORY) { struct kmem_cache_node *n; if (local_node == node) @@ -2068,23 +2280,20 @@ static int calculate_sizes(struct kmem_c size = ALIGN(size, align); s->size = size; - s->order = calculate_order(size); - if (s->order < 0) - return 0; + if (s->flags & __SLAB_FORCE_ORDER) + s->order = s->flags & SLAB_ORDER_MASK; + else { + s->order = calculate_order(size); + if (s->order < 0) + return 0; + } /* * Determine the number of objects per slab */ s->objects = (PAGE_SIZE << s->order) / size; - /* - * Verify that the number of objects is within permitted limits. - * The page->inuse field is only 16 bit wide! So we cannot have - * more than 64k objects per slab. - */ - if (!s->objects || s->objects > MAX_OBJECTS_PER_SLAB) - return 0; - return 1; + return !!s->objects; } @@ -2107,9 +2316,12 @@ static int kmem_cache_open(struct kmem_c #ifdef CONFIG_NUMA s->defrag_ratio = 100; #endif + if (!init_kmem_cache_nodes(s, gfpflags & ~SLUB_DMA)) + goto error; - if (init_kmem_cache_nodes(s, gfpflags & ~SLUB_DMA)) + if (alloc_kmem_cache_cpus(s, gfpflags & ~SLUB_DMA)) return 1; + free_kmem_cache_nodes(s); error: if (flags & SLAB_PANIC) panic("Cannot create slab %s size=%lu realsize=%u " @@ -2192,7 +2404,8 @@ static inline int kmem_cache_close(struc flush_all(s); /* Attempt to free all objects */ - for_each_online_node(node) { + free_kmem_cache_cpus(s); + for_each_node_state(node, N_NORMAL_MEMORY) { struct kmem_cache_node *n = get_node(s, node); n->nr_partial -= free_list(s, n, &n->partial); @@ -2227,17 +2440,17 @@ EXPORT_SYMBOL(kmem_cache_destroy); * Kmalloc subsystem *******************************************************************/ -struct kmem_cache kmalloc_caches[KMALLOC_SHIFT_HIGH + 1] __cacheline_aligned; +struct kmem_cache kmalloc_caches[PAGE_SHIFT] __cacheline_aligned; EXPORT_SYMBOL(kmalloc_caches); #ifdef CONFIG_ZONE_DMA -static struct kmem_cache *kmalloc_caches_dma[KMALLOC_SHIFT_HIGH + 1]; +static struct kmem_cache *kmalloc_caches_dma[PAGE_SHIFT]; #endif static int __init setup_slub_min_order(char *str) { get_option (&str, &slub_min_order); - + user_override = 1; return 1; } @@ -2246,7 +2459,7 @@ __setup("slub_min_order=", setup_slub_mi static int __init setup_slub_max_order(char *str) { get_option (&str, &slub_max_order); - + user_override = 1; return 1; } @@ -2255,7 +2468,7 @@ __setup("slub_max_order=", setup_slub_ma static int __init setup_slub_min_objects(char *str) { get_option (&str, &slub_min_objects); - + user_override = 1; return 1; } @@ -2397,12 +2610,8 @@ static struct kmem_cache *get_slab(size_ return ZERO_SIZE_PTR; index = size_index[(size - 1) / 8]; - } else { - if (size > KMALLOC_MAX_SIZE) - return NULL; - + } else index = fls(size - 1); - } #ifdef CONFIG_ZONE_DMA if (unlikely((flags & SLUB_DMA))) @@ -2414,9 +2623,15 @@ static struct kmem_cache *get_slab(size_ void *__kmalloc(size_t size, gfp_t flags) { - struct kmem_cache *s = get_slab(size, flags); + struct kmem_cache *s; - if (ZERO_OR_NULL_PTR(s)) + if (unlikely(size > PAGE_SIZE / 2)) + return (void *)__get_free_pages(flags | __GFP_COMP, + get_order(size)); + + s = get_slab(size, flags); + + if (unlikely(ZERO_OR_NULL_PTR(s))) return s; return slab_alloc(s, flags, -1, __builtin_return_address(0)); @@ -2426,9 +2641,15 @@ EXPORT_SYMBOL(__kmalloc); #ifdef CONFIG_NUMA void *__kmalloc_node(size_t size, gfp_t flags, int node) { - struct kmem_cache *s = get_slab(size, flags); + struct kmem_cache *s; - if (ZERO_OR_NULL_PTR(s)) + if (unlikely(size > PAGE_SIZE / 2)) + return (void *)__get_free_pages(flags | __GFP_COMP, + get_order(size)); + + s = get_slab(size, flags); + + if (unlikely(ZERO_OR_NULL_PTR(s))) return s; return slab_alloc(s, flags, node, __builtin_return_address(0)); @@ -2441,7 +2662,8 @@ size_t ksize(const void *object) struct page *page; struct kmem_cache *s; - if (ZERO_OR_NULL_PTR(object)) + BUG_ON(!object); + if (unlikely(object == ZERO_SIZE_PTR)) return 0; page = get_object_page(object); @@ -2473,25 +2695,33 @@ EXPORT_SYMBOL(ksize); void kfree(const void *x) { - struct kmem_cache *s; struct page *page; - /* - * This has to be an unsigned comparison. According to Linus - * some gcc version treat a pointer as a signed entity. Then - * this comparison would be true for all "negative" pointers - * (which would cover the whole upper half of the address space). - */ - if (ZERO_OR_NULL_PTR(x)) + if (unlikely(ZERO_OR_NULL_PTR(x))) return; page = virt_to_head_page(x); - s = page->slab; - - slab_free(s, page, (void *)x, __builtin_return_address(0)); + if (unlikely(!PageSlab(page))) { + put_page(page); + return; + } + slab_free(page->slab, (void *)x, __builtin_return_address(0)); } EXPORT_SYMBOL(kfree); +static unsigned long count_partial(struct kmem_cache_node *n) +{ + unsigned long flags; + unsigned long x = 0; + struct page *page; + + spin_lock_irqsave(&n->list_lock, flags); + list_for_each_entry(page, &n->partial, lru) + x += page->inuse; + spin_unlock_irqrestore(&n->list_lock, flags); + return x; +} + /* * kmem_cache_shrink removes empty slabs from the partial lists and sorts * the remaining slabs by the number of items in use. The slabs with the @@ -2517,7 +2747,7 @@ int kmem_cache_shrink(struct kmem_cache return -ENOMEM; flush_all(s); - for_each_online_node(node) { + for_each_node_state(node, N_NORMAL_MEMORY) { n = get_node(s, node); if (!n->nr_partial) @@ -2566,6 +2796,16 @@ int kmem_cache_shrink(struct kmem_cache } EXPORT_SYMBOL(kmem_cache_shrink); +/* + * Table to autotune the maximum slab order based on the number of pages + * that the system has available. + */ +static unsigned long __initdata phys_pages_for_order[PAGE_ALLOC_COSTLY_ORDER] = { + 32768, /* >128M if using 4K pages, >512M (16k), >2G (64k) */ + 256000, /* >1G if using 4k pages, >4G (16k), >16G (64k) */ + 1000000 /* >4G if using 4k pages, >16G (16k), >64G (64k) */ +}; + /******************************************************************** * Basic setup of slabs *******************************************************************/ @@ -2575,6 +2815,17 @@ void __init kmem_cache_init(void) int i; int caches = 0; + init_alloc_cpu(); + + if (!user_override) { + /* No manual parameters. Autotune for system */ + for (i = 0; i < PAGE_ALLOC_COSTLY_ORDER; i++) + if (num_physpages > phys_pages_for_order[i]) { + slub_max_order++; + slub_min_objects <<= 1; + } + } + #ifdef CONFIG_NUMA /* * Must first have the slab cache available for the allocations of the @@ -2602,7 +2853,7 @@ void __init kmem_cache_init(void) caches++; } - for (i = KMALLOC_SHIFT_LOW; i <= KMALLOC_SHIFT_HIGH; i++) { + for (i = KMALLOC_SHIFT_LOW; i < PAGE_SHIFT; i++) { create_kmalloc_cache(&kmalloc_caches[i], "kmalloc", 1 << i, GFP_KERNEL); caches++; @@ -2629,16 +2880,18 @@ void __init kmem_cache_init(void) slab_state = UP; /* Provide the correct kmalloc names now that the caches are up */ - for (i = KMALLOC_SHIFT_LOW; i <= KMALLOC_SHIFT_HIGH; i++) + for (i = KMALLOC_SHIFT_LOW; i < PAGE_SHIFT; i++) kmalloc_caches[i]. name = kasprintf(GFP_KERNEL, "kmalloc-%d", 1 << i); #ifdef CONFIG_SMP register_cpu_notifier(&slab_notifier); + kmem_size = offsetof(struct kmem_cache, cpu_slab) + + nr_cpu_ids * sizeof(struct kmem_cache_cpu *); +#else + kmem_size = sizeof(struct kmem_cache); #endif - kmem_size = offsetof(struct kmem_cache, cpu_slab) + - nr_cpu_ids * sizeof(struct page *); printk(KERN_INFO "SLUB: Genslabs=%d, HWalign=%d, Order=%d-%d, MinObjects=%d," " CPUs=%d, Nodes=%d\n", @@ -2717,12 +2970,21 @@ struct kmem_cache *kmem_cache_create(con down_write(&slub_lock); s = find_mergeable(size, align, flags, name, ctor); if (s) { + int cpu; + s->refcount++; /* * Adjust the object sizes so that we clear * the complete object on kzalloc. */ s->objsize = max(s->objsize, (int)size); + + /* + * And then we need to update the object size in the + * per cpu structures + */ + for_each_online_cpu(cpu) + get_cpu_slab(s, cpu)->objsize = s->objsize; s->inuse = max_t(int, s->inuse, ALIGN(size, sizeof(void *))); up_write(&slub_lock); if (sysfs_slab_alias(s, name)) @@ -2765,15 +3027,29 @@ static int __cpuinit slab_cpuup_callback unsigned long flags; switch (action) { + case CPU_UP_PREPARE: + case CPU_UP_PREPARE_FROZEN: + init_alloc_cpu_cpu(cpu); + down_read(&slub_lock); + list_for_each_entry(s, &slab_caches, list) + s->cpu_slab[cpu] = alloc_kmem_cache_cpu(s, cpu, + GFP_KERNEL); + up_read(&slub_lock); + break; + case CPU_UP_CANCELED: case CPU_UP_CANCELED_FROZEN: case CPU_DEAD: case CPU_DEAD_FROZEN: down_read(&slub_lock); list_for_each_entry(s, &slab_caches, list) { + struct kmem_cache_cpu *c = get_cpu_slab(s, cpu); + local_irq_save(flags); __flush_cpu_slab(s, cpu); local_irq_restore(flags); + free_kmem_cache_cpu(c, cpu); + s->cpu_slab[cpu] = NULL; } up_read(&slub_lock); break; @@ -2790,9 +3066,14 @@ static struct notifier_block __cpuinitda void *__kmalloc_track_caller(size_t size, gfp_t gfpflags, void *caller) { - struct kmem_cache *s = get_slab(size, gfpflags); + struct kmem_cache *s; + + if (unlikely(size > PAGE_SIZE / 2)) + return (void *)__get_free_pages(gfpflags | __GFP_COMP, + get_order(size)); + s = get_slab(size, gfpflags); - if (ZERO_OR_NULL_PTR(s)) + if (unlikely(ZERO_OR_NULL_PTR(s))) return s; return slab_alloc(s, gfpflags, -1, caller); @@ -2801,9 +3082,14 @@ void *__kmalloc_track_caller(size_t size void *__kmalloc_node_track_caller(size_t size, gfp_t gfpflags, int node, void *caller) { - struct kmem_cache *s = get_slab(size, gfpflags); + struct kmem_cache *s; + + if (unlikely(size > PAGE_SIZE / 2)) + return (void *)__get_free_pages(gfpflags | __GFP_COMP, + get_order(size)); + s = get_slab(size, gfpflags); - if (ZERO_OR_NULL_PTR(s)) + if (unlikely(ZERO_OR_NULL_PTR(s))) return s; return slab_alloc(s, gfpflags, node, caller); @@ -2902,7 +3188,7 @@ static long validate_slab_cache(struct k return -ENOMEM; flush_all(s); - for_each_online_node(node) { + for_each_node_state(node, N_NORMAL_MEMORY) { struct kmem_cache_node *n = get_node(s, node); count += validate_slab_node(s, n, map); @@ -3116,13 +3402,13 @@ static int list_locations(struct kmem_ca int node; if (!alloc_loc_track(&t, PAGE_SIZE / sizeof(struct location), - GFP_KERNEL)) + GFP_TEMPORARY)) return sprintf(buf, "Out of memory\n"); /* Push back cpu slabs */ flush_all(s); - for_each_online_node(node) { + for_each_node_state(node, N_NORMAL_MEMORY) { struct kmem_cache_node *n = get_node(s, node); unsigned long flags; struct page *page; @@ -3191,19 +3477,6 @@ static int list_locations(struct kmem_ca return n; } -static unsigned long count_partial(struct kmem_cache_node *n) -{ - unsigned long flags; - unsigned long x = 0; - struct page *page; - - spin_lock_irqsave(&n->list_lock, flags); - list_for_each_entry(page, &n->partial, lru) - x += page->inuse; - spin_unlock_irqrestore(&n->list_lock, flags); - return x; -} - enum slab_stat_type { SL_FULL, SL_PARTIAL, @@ -3224,32 +3497,27 @@ static unsigned long slab_objects(struct int node; int x; unsigned long *nodes; - unsigned long *per_cpu; - nodes = kzalloc(2 * sizeof(unsigned long) * nr_node_ids, GFP_KERNEL); - per_cpu = nodes + nr_node_ids; + nodes = kzalloc(sizeof(unsigned long) * nr_node_ids, GFP_KERNEL); - for_each_possible_cpu(cpu) { - struct page *page = s->cpu_slab[cpu]; - int node; + if ((s->flags & (SO_OBJECTS|SO_PARTIAL|SO_FULL|SO_CPU)) + == (SO_OBJECTS|SO_CPU)) { + for_each_online_cpu(cpu) { + int node; + struct kmem_cache_cpu *c = get_cpu_slab(s, cpu); - if (page) { - node = page_to_nid(page); - if (flags & SO_CPU) { - int x = 0; - - if (flags & SO_OBJECTS) - x = page->inuse; - else - x = 1; - total += x; - nodes[node] += x; - } - per_cpu[node]++; + if (!c) + continue; + + node = cpu_to_node(cpu); + x = c->objects; + + total += x; + nodes[node] += x; } } - for_each_online_node(node) { + for_each_node_state(node, N_NORMAL_MEMORY) { struct kmem_cache_node *n = get_node(s, node); if (flags & SO_PARTIAL) { @@ -3263,7 +3531,6 @@ static unsigned long slab_objects(struct if (flags & SO_FULL) { int full_slabs = atomic_long_read(&n->nr_slabs) - - per_cpu[node] - n->nr_partial; if (flags & SO_OBJECTS) @@ -3277,7 +3544,7 @@ static unsigned long slab_objects(struct x = sprintf(buf, "%lu", total); #ifdef CONFIG_NUMA - for_each_online_node(node) + for_each_node_state(node, N_NORMAL_MEMORY) if (nodes[node]) x += sprintf(buf + x, " N%d=%lu", node, nodes[node]); @@ -3291,13 +3558,19 @@ static int any_slab_objects(struct kmem_ int node; int cpu; - for_each_possible_cpu(cpu) - if (s->cpu_slab[cpu]) + for_each_possible_cpu(cpu) { + struct kmem_cache_cpu *c = get_cpu_slab(s, cpu); + + if (c && c->freelist) return 1; + } - for_each_node(node) { + for_each_online_node(node) { struct kmem_cache_node *n = get_node(s, node); + if (!n) + continue; + if (n->nr_partial || atomic_long_read(&n->nr_slabs)) return 1; } @@ -3344,6 +3617,50 @@ static ssize_t objs_per_slab_show(struct } SLAB_ATTR_RO(objs_per_slab); +static ssize_t refill_show(struct kmem_cache *s, char *buf) +{ + int refill = get_cpu_slab(s, 0)->refill; + + return sprintf(buf, "%d\n", refill); +} +static ssize_t refill_store(struct kmem_cache *s, + const char *buf, size_t length) +{ + int n = simple_strtoul(buf, NULL, 10); + int cpu; + + if (n < 1 || n > 200) + return -EINVAL; + + for_each_online_cpu(cpu) + get_cpu_slab(s, cpu)->refill = n; + + return length; +} +SLAB_ATTR(refill); + +static ssize_t max_show(struct kmem_cache *s, char *buf) +{ + int max = get_cpu_slab(s, 0)->max; + + return sprintf(buf, "%d\n", max); +} +static ssize_t max_store(struct kmem_cache *s, + const char *buf, size_t length) +{ + int n = simple_strtoul(buf, NULL, 10); + int cpu; + + if (n < 1 || n > 10000) + return -EINVAL; + + for_each_online_cpu(cpu) + get_cpu_slab(s, cpu)->max = n; + + return length; +} +SLAB_ATTR(max); + static ssize_t order_show(struct kmem_cache *s, char *buf) { return sprintf(buf, "%d\n", s->order); @@ -3379,11 +3696,11 @@ static ssize_t partial_show(struct kmem_ } SLAB_ATTR_RO(partial); -static ssize_t cpu_slabs_show(struct kmem_cache *s, char *buf) +static ssize_t cpu_objects_show(struct kmem_cache *s, char *buf) { - return slab_objects(s, buf, SO_CPU); + return slab_objects(s, buf, SO_CPU|SO_OBJECTS); } -SLAB_ATTR_RO(cpu_slabs); +SLAB_ATTR_RO(cpu_objects); static ssize_t objects_show(struct kmem_cache *s, char *buf) { @@ -3591,9 +3908,11 @@ static struct attribute * slab_attrs[] = &objs_per_slab_attr.attr, &order_attr.attr, &objects_attr.attr, + &refill_attr.attr, + &max_attr.attr, &slabs_attr.attr, &partial_attr.attr, - &cpu_slabs_attr.attr, + &cpu_objects_attr.attr, &ctor_attr.attr, &aliases_attr.attr, &align_attr.attr,