Index: linux-2.6.21-rc1/mm/slub.c =================================================================== --- linux-2.6.21-rc1.orig/mm/slub.c 2007-02-23 20:17:39.000000000 -0800 +++ linux-2.6.21-rc1/mm/slub.c 2007-02-24 10:05:32.000000000 -0800 @@ -57,7 +57,7 @@ * Enabling SLUB_DEBUG results in internal consistency checks * being enabled. */ -#undef SLUB_DEBUG +#define SLUB_DEBUG /* * SLUB_DEBUG_KFREE enabled checking for double frees. In order to do this @@ -67,17 +67,6 @@ #define SLUB_DEBUG_KFREE /* - * SLUB_MERGE causes multiple slabs that have the same object size to be - * combined. This reduces the number of _labs significantly. This in turn - * increases the chance of finding cache hot objects. However, the slab - * statistics are only kept per slab and thus one will not be able to - * separate out the uses of various slabs. - */ -#ifndef SLUB_DEBUG -#define SLUB_MERGE -#endif - -/* * Set of flags that will prohibit slab merging */ #define SLUB_NO_MERGE (SLAB_RECLAIM_ACCOUNT | SLAB_DESTROY_BY_RCU | \ @@ -439,10 +428,14 @@ { struct page *page; + BUG_ON(flags & ~(GFP_DMA | GFP_LEVEL_MASK | __GFP_NO_GROW)); + if (flags & __GFP_NO_GROW) + return NULL; + if (flags & __GFP_WAIT) local_irq_enable(); - page = allocate_slab(s, flags, node); + page = allocate_slab(s, flags & GFP_LEVEL_MASK, node); if (!page) goto out; @@ -630,7 +623,7 @@ if (page) goto gotpage; - page = new_slab(s, flags, node); + page = new_slab(s, gfpflags, node); if (!page) { local_irq_restore(flags); return NULL; @@ -826,7 +819,13 @@ * and increases the number of allocations possible without having to * take the list_lock. */ -static int slab_min_order = 0; +static int slub_min_order = 0; + +/* + * Merge control. If this is set then no merging of slab caches into the + * general caches will occur. + */ +static int slub_nomerge = 0; static int calculate_order(int size) { @@ -844,7 +843,7 @@ return order; } - for (order = max(slab_min_order, fls(size - 1) - PAGE_SHIFT); + for (order = max(slub_min_order, fls(size - 1) - PAGE_SHIFT); order < MAX_ORDER; order++) { unsigned long slab_size = PAGE_SIZE << order; @@ -885,18 +884,6 @@ BUG_ON(flags & SLUB_UNIMPLEMENTED); memset(s, 0, sizeof(struct kmem_cache)); - for_each_node(node) - atomic_long_set(&s->nr_slabs[node], 0); - atomic_set(&s->refcount, 1); - spin_lock_init(&s->list_lock); - for_each_possible_cpu(cpu) - s->cpu_slab[cpu] = NULL; - INIT_LIST_HEAD(&s->partial); -#ifdef CONFIG_SMP - mutex_init(&s->flushing); - atomic_set(&s->cpu_slabs, 0); - INIT_DELAYED_WORK(&s->flush, flusher); -#endif s->name = name; s->ctor = ctor; s->dtor = dtor; @@ -934,10 +921,25 @@ goto error; s->objects = (PAGE_SIZE << s->order) / size; - BUG_ON(s->objects > 65535); - if (!s->objects) + if (!s->objects || s->objects > 65535) goto error; + atomic_set(&s->refcount, 1); + + for_each_possible_cpu(cpu) + s->cpu_slab[cpu] = NULL; + + for_each_node(node) + atomic_long_set(&s->nr_slabs[node], 0); + INIT_LIST_HEAD(&s->partial); + spin_lock_init(&s->list_lock); + +#ifdef CONFIG_SMP + mutex_init(&s->flushing); + atomic_set(&s->cpu_slabs, 0); + INIT_DELAYED_WORK(&s->flush, flusher); +#endif + register_slab(s); return 1; @@ -1008,7 +1010,7 @@ spin_lock_irqsave(&s->list_lock, flags); list_for_each_entry_safe(page, h, list, lru) if (!page->inuse) { - list_del(&s->partial); + list_del(&page->lru); discard_slab(s, page); } else slabs_inuse++; @@ -1024,9 +1026,6 @@ { int node; - if (!atomic_dec_and_test(&s->refcount)) - return 0; - flush_all(s); free_list(s, &s->partial); @@ -1045,6 +1044,9 @@ */ void kmem_cache_destroy(struct kmem_cache *s) { + if (!atomic_dec_and_test(&s->refcount)) + return; + BUG_ON(kmem_cache_close(s)); kfree(s); } @@ -1115,14 +1117,22 @@ static struct kmem_cache *kmalloc_caches_dma[KMALLOC_NR_CACHES]; #endif -static int __init setup_slab_min_order(char *str) +static int __init setup_slub_min_order(char *str) { - get_option (&str, &slab_min_order); + get_option (&str, &slub_min_order); + + return 1; +} +__setup("slub_min_order=", setup_slub_min_order); + +static int __init setup_slub_nomerge(char *str) +{ + slub_nomerge = 1; return 1; } -__setup("slab_min_order=", setup_slab_min_order); +__setup("slub_nomerge", setup_slub_nomerge); static struct kmem_cache *create_kmalloc_cache(struct kmem_cache *s, const char *name, int size) @@ -1138,39 +1148,50 @@ static struct kmem_cache *get_slab(size_t size, gfp_t flags) { int index = kmalloc_index(size) - KMALLOC_SHIFT_LOW; - struct kmem_cache *s; - struct kmem_cache *x; - size_t realsize; - - BUG_ON(size < 0); - - if (!(flags & SLUB_DMA)) - return &kmalloc_caches[index]; - - s = kmalloc_caches_dma[index]; - if (s) - return s; - /* Dynamically create dma cache */ - x = kmalloc(sizeof(struct kmem_cache), flags & ~(__GFP_DMA)); + BUG_ON(size <= 0 || index < 0); - if (!x) - panic("Unable to allocate memory for dma cache\n"); +#ifdef CONFIG_ZONE_DMA + if ((flags & SLUB_DMA)) { + struct kmem_cache *s; + struct kmem_cache *x; + char *text; + size_t realsize; + + s = kmalloc_caches_dma[index]; + if (s) + return s; + + /* The control structures do not have to be in the DMA zone */ + flags &= ~__GFP_DMA; + + /* Dynamically create dma cache */ + x = kmalloc(sizeof(struct kmem_cache), flags); + if (!x) + panic("Unable to allocate memory for dma cache\n"); #ifdef KMALLOC_EXTRA - if (index <= KMALLOC_SHIFT_HIGH - KMALLOC_SHIFT_LOW) + if (index <= KMALLOC_SHIFT_HIGH - KMALLOC_SHIFT_LOW) #endif - realsize = 1 << index; + realsize = 1 << index; #ifdef KMALLOC_EXTRA - else if (index == KMALLOC_EXTRAS) - realsize = 96; - else - realsize = 192; + else { + index -= KMALLOC_SHIFT_HIGH - KMALLOC_SHIFT_LOW +1; + if (!index) + realsize = 96; + else + realsize = 192; + } #endif - s = create_kmalloc_cache(x, "kmalloc_dma", realsize); - kmalloc_caches_dma[index] = s; - return s; + text = kasprintf(flags, "kmalloc_dma-%ld", realsize); + s = create_kmalloc_cache(x, text, realsize); + kfree(text); + kmalloc_caches_dma[index] = s; + return s; + } +#endif + return &kmalloc_caches[index]; } void *__kmalloc(size_t size, gfp_t flags) @@ -1257,7 +1278,6 @@ #endif } -#ifdef SLUB_MERGE static struct kmem_cache *kmem_cache_dup(struct kmem_cache *s) { atomic_inc(&s->refcount); @@ -1273,7 +1293,6 @@ return NULL; return &kmalloc_caches[index]; } -#endif struct kmem_cache *kmem_cache_create(const char *name, size_t size, size_t align, unsigned long flags, @@ -1282,8 +1301,7 @@ { struct kmem_cache *s; -#ifdef SLUB_MERGE - if (!ctor && !dtor && !(flags & SLUB_NO_MERGE) && + if (!slub_nomerge && !ctor && !dtor && !(flags & SLUB_NO_MERGE) && align <= ARCH_SLAB_MINALIGN) { int sz = ALIGN(size, calculate_alignment(flags, align)); @@ -1305,7 +1323,6 @@ return kmem_cache_dup(s); } } -#endif s = kmalloc(sizeof(struct kmem_cache), GFP_KERNEL); if (!s) @@ -1607,4 +1624,5 @@ return 0; } __initcall(cpucache_init); +#endif