Index: linux-2.6.20-rc1/include/linux/slub_def.h =================================================================== --- linux-2.6.20-rc1.orig/include/linux/slub_def.h 2006-12-14 18:34:03.000000000 -0800 +++ linux-2.6.20-rc1/include/linux/slub_def.h 2006-12-14 19:06:25.000000000 -0800 @@ -11,18 +11,32 @@ #include /* + * Per cpu structure to manage active slabs. + */ +struct active_slab { + struct page *page; + struct kmem_cache *slab; + void **freelist; + int nr_free; + int referenced; +#ifdef CONFIG_SMP + int flush_active; + struct delayed_work flush; +#endif +} ____cacheline_aligned_in_smp; + +/* * Slab cache management. */ struct kmem_cache { spinlock_t list_lock; /* Protecty partial list and nr_partial */ struct list_head partial; unsigned long nr_partial; - int offset; /* Free pointer offset. */ - struct page *active[NR_CPUS]; atomic_long_t nr_slabs; /* Total slabs used */ + int offset; /* Free pointer offset. */ + int size; /* Total size of an object */ unsigned int order; /* Size of the slab page */ unsigned long flags; - int size; /* Total size of an object */ int objects; /* Number of objects in slab */ atomic_t refcount; /* Refcount for destroy */ int align; @@ -33,10 +47,10 @@ int inuse; /* Used portion of the chunk */ const char *name; /* Name (only for display!) */ struct list_head list; /* List of slabs */ -#ifdef CONFIG_SMP - struct mutex flushing; - atomic_t active_cpus; /* if >0 then flusher is scheduled */ - struct delayed_work flush; +#ifdef CONFIG_NUMA + struct active_slab *active[NR_CPUS]; +#else + struct active_slab active[NR_CPUS] ____cacheline_aligned_in_smp; #endif }; @@ -44,7 +58,6 @@ * Kmalloc subsystem. */ #define KMALLOC_SHIFT_LOW 3 - #define KMALLOC_SHIFT_HIGH 18 #if L1_CACHE_BYTES <= 64 Index: linux-2.6.20-rc1/mm/slub.c =================================================================== --- linux-2.6.20-rc1.orig/mm/slub.c 2006-12-14 18:34:03.000000000 -0800 +++ linux-2.6.20-rc1/mm/slub.c 2006-12-15 06:25:57.000000000 -0800 @@ -14,6 +14,7 @@ #include #include #include +#include #define SLUB_UNIMPLEMENTED (SLAB_DEBUG_FREE | SLAB_DEBUG_INITIAL | \ SLAB_RED_ZONE | SLAB_POISON | SLAB_STORE_USER) @@ -57,8 +58,52 @@ #define ARCH_SLAB_MINALIGN sizeof(void *) #endif -static void register_slab(struct kmem_cache *s); -static void unregister_slab(struct kmem_cache *s); +#ifdef CONFIG_NUMA + +/* We need to bootstrap the slab with the active slabs in a special way */ +#define ACTIVE_SLAB_NR kmalloc_index(sizeof(struct active_slab)) +#define ACTIVE_SLAB_SLAB &kmalloc_caches[ACTIVE_SLAB_NR - KMALLOC_SHIFT_LOW] + +#define ACTIVE_SLAB(__s,__cpu) ((__s)->active[__cpu]) +#else +#define ACTIVE_SLAB(__s,__cpu) (&(__s)->active[__cpu]) +#endif + +/********************************************************************* + * Track slabs and provide the ability to run operations on them + *********************************************************************/ + +static DECLARE_RWSEM(slabstat_sem); + +LIST_HEAD(slab_caches); + +void for_all_slabs(void (*func)(struct kmem_cache *, int), int cpu) +{ + struct list_head *h; + + down_read(&slabstat_sem); + list_for_each(h, &slab_caches) { + struct kmem_cache *s = + container_of(h, struct kmem_cache, list); + + func(s, cpu); + } + up_read(&slabstat_sem); +} + +void register_slab(struct kmem_cache *s) +{ + down_write(&slabstat_sem); + list_add(&s->list, &slab_caches); + up_write(&slabstat_sem); +} + +void unregister_slab(struct kmem_cache *s) +{ + down_write(&slabstat_sem); + list_add(&s->list, &slab_caches); + up_write(&slabstat_sem); +} /******************************************************************** * Core slab cache functions @@ -174,12 +219,16 @@ */ static __always_inline void slab_lock(struct page *page) { +#ifdef CONFIG_SMP bit_spin_lock(PG_locked, &page->flags); +#endif } static __always_inline void slab_unlock(struct page *page) { +#ifdef CONFIG_SMP bit_spin_unlock(PG_locked, &page->flags); +#endif } /* @@ -218,44 +267,27 @@ return 0; } -/* - * Get a partial page, lock it and return it. - */ -#ifdef CONFIG_NUMA -static struct page *get_partial(struct kmem_cache *s, gfp_t flags, int node) +struct page *numa_partial(struct kmem_cache *s, gfp_t flags, int node) { - struct page *page; +#ifdef CONFIG_NUMA int searchnode = (node == -1) ? numa_node_id() : node; + struct page *page; - if (!s->nr_partial) - return NULL; - - spin_lock(&s->list_lock); /* * Search for slab on the right node */ list_for_each_entry(page, &s->partial, lru) if (likely(page_to_nid(page) == searchnode) && lock_and_del_slab(s, page)) - goto out; + return page; - if (likely(!(flags & __GFP_THISNODE))) { - /* - * We can fall back to any other node in order to - * reduce the size of the partial list. - */ - list_for_each_entry(page, &s->partial, lru) - if (likely(lock_and_del_slab(s, page))) - goto out; - } - - /* Nothing found */ - page = NULL; -out: - spin_unlock(&s->list_lock); - return page; +#endif + return NULL; } -#else + +/* + * Get a partial page, lock it and return it. + */ static struct page *get_partial(struct kmem_cache *s, gfp_t flags, int node) { struct page *page; @@ -268,17 +300,24 @@ return NULL; spin_lock(&s->list_lock); + + page = numa_partial(s, flags, node); + if (page) + goto out; + + if (NUMA_BUILD && !(flags & __GFP_THISNODE)) + goto out; + list_for_each_entry(page, &s->partial, lru) if (likely(lock_and_del_slab(s, page))) goto out; - /* No slab or all slabs busy */ + /* Nothing found */ page = NULL; out: spin_unlock(&s->list_lock); return page; } -#endif /* * Debugging checks @@ -390,17 +429,12 @@ { struct page *page; - if (flags & __GFP_WAIT) - local_irq_enable(); - - page = allocate_slab(s, flags, node); + page = allocate_slab(s, flags & GFP_LEVEL_MASK, node); if (!page) - goto out; + return NULL; page->offset = s->offset; - atomic_long_inc(&s->nr_slabs); - page->slab = (struct kmem_cache *)s; __SetPageSlab(page); @@ -421,10 +455,6 @@ check_free_chain(s, page); } else __SetPageSlabsingle(page); - -out: - if (flags & __GFP_WAIT) - local_irq_disable(); return page; } @@ -450,81 +480,92 @@ /* * Remove the currently active slab */ -static void __always_inline deactivate_slab(struct kmem_cache *s, - struct page *page, int cpu) +static void __always_inline deactivate_slab(struct active_slab *a) { - s->active[cpu] = NULL; + struct page *page = a->page; + struct kmem_cache *s = a->slab; + + if (a->nr_free) { + if (unlikely(page->freelist)) { + /* + * Deal with the rare case where we have two + * freelists. + * + * Merge the two freelists. The freelist in the + * active slab comes first. + */ + void **freelist = page->freelist; + void **p; + + page->freelist = a->freelist; + + for (p = a->freelist; p[s->offset]; p = p[s->offset]) + page->inuse--; + + p[s->offset] = freelist; + + } else { + page->freelist = a->freelist; + page->inuse -= a->nr_free; + } + } + a->page = NULL; + a->referenced = 0; + a->nr_free = 0; + a->freelist = NULL; __ClearPageActive(page); - __ClearPageReferenced(page); - putback_slab(s, page); + putback_slab(a->slab, page); } /* - * Flush active slab + * Unconditionally flush any active slabs back to partial lists. + * * Called from IPI handler with interrupts disabled. */ static void flush_active(void *d) { struct kmem_cache *s = d; - int cpu = smp_processor_id(); - struct page *page = s->active[cpu]; + struct active_slab *a = ACTIVE_SLAB(s, smp_processor_id()); - page = s->active[cpu]; - if (likely(page)) { - slab_lock(page); - deactivate_slab(s, page, cpu); + if (likely(a->page)) { + slab_lock(a->page); + deactivate_slab(a); + a->flush_active = 0; } } #ifdef CONFIG_SMP /* - * Called from IPI during flushing to check and flush active slabs. + * Check for a active slab and if it has not + * been references flush it back to the partial list. + * + * Called from kevent workqueue. */ -void check_flush_active(void *d) +void check_flush_active(struct work_struct *w) { - struct kmem_cache *s = d; - int cpu = smp_processor_id(); - struct page *page = s->active[cpu]; + struct active_slab *a = container_of(w, struct active_slab, flush.work); - if (!page) + if (!a->page) return; - if (PageReferenced(page)) { - ClearPageReferenced(page); - atomic_inc(&s->active_cpus); + local_irq_disable(); + if (a->referenced) { + a->referenced = 0; + a->flush_active = 1; + schedule_delayed_work(&a->flush, 2 * HZ); } else { - slab_lock(page); - deactivate_slab(s, page, cpu); + slab_lock(a->page); + deactivate_slab(a); + a->flush_active = 0; } -} - -/* - * Called from eventd - */ -static void flusher(struct work_struct *w) -{ - struct kmem_cache *s = container_of(w, struct kmem_cache, flush.work); - - if (!mutex_trylock(&s->flushing)) - return; - - atomic_set(&s->active_cpus, num_online_cpus()); - on_each_cpu(check_flush_active, s, 1, 1); - if (atomic_read(&s->active_cpus)) - schedule_delayed_work(&s->flush, 2 * HZ); - mutex_unlock(&s->flushing); + local_irq_enable(); } static void drain_all(struct kmem_cache *s) { - if (atomic_read(&s->active_cpus)) { - mutex_lock(&s->flushing); - cancel_delayed_work(&s->flush); - atomic_set(&s->active_cpus, 0); - on_each_cpu(flush_active, s, 1, 1); - mutex_unlock(&s->flushing); - } + on_each_cpu(flush_active, s , 1, 1); + } #else static void drain_all(struct kmem_cache *s) @@ -537,93 +578,135 @@ } #endif -static __always_inline void *__slab_alloc(struct kmem_cache *s, +static __always_inline void *allocate(struct kmem_cache *s, gfp_t gfpflags, int node) { - struct page *page; + struct active_slab *a; void **object; - void *next_object; unsigned long flags; - int cpu; local_irq_save(flags); - cpu = smp_processor_id(); - page = s->active[cpu]; - if (!page) + a = ACTIVE_SLAB(s, smp_processor_id()); + if (unlikely(!a->page)) goto new_slab; - slab_lock(page); - check_free_chain(s, page); - if (unlikely(!page->freelist)) - goto another_slab; - - if (unlikely(node != -1 && page_to_nid(page) != node)) - goto another_slab; -redo: - page->inuse++; - object = page->freelist; - page->freelist = next_object = object[page->offset]; - __SetPageReferenced(page); - slab_unlock(page); - local_irq_restore(flags); - return object; + if (likely(a->nr_free)) + goto have_object; -another_slab: - deactivate_slab(s, page, cpu); + slab_lock(a->page); + check_free_chain(s, a->page); + if (a->page->freelist) + goto switch_freelist; + + if (node != -1 && page_to_nid(a->page) != node) + deactivate_slab(a); new_slab: - page = get_partial(s, gfpflags, node); - if (page) - goto gotpage; + a->page = get_partial(s, gfpflags, node); + if (unlikely(!a->page)) { + struct page *page; - page = new_slab(s, flags, node); - if (!page) { - local_irq_restore(flags); - return NULL; - } + if (flags & __GFP_WAIT) + local_irq_enable(); - /* - * There is no point in putting single object slabs - * on an active list. - */ - if (unlikely(s->objects == 1)) { - local_irq_restore(flags); - return page_address(page); - } + page = new_slab(s, flags, node); - slab_lock(page); + if (flags & __GFP_WAIT) + local_irq_disable(); + + if (!page) { + object = NULL; + goto out; + } + + /* + * There is no point in putting single object slabs + * on an active list. + */ + if (unlikely(s->objects == 1)) { + object = page_address(page); + goto out; + } + + /* + * We may have reenabled interrupts during the allocation + * Verify the state of the slab. + */ + a = ACTIVE_SLAB(s, smp_processor_id()); + if (a->page) + /* + * Someone else already allocated a page. Drop the + * new one. + */ + discard_slab(s, page); + else + a->page = page; -gotpage: - if (s->active[cpu]) { - slab_unlock(page); - discard_slab(s, page); - page = s->active[cpu]; slab_lock(page); - } else - s->active[cpu] = page; + } - __SetPageActive(page); - check_free_chain(s, page); + __SetPageActive(a->page); + check_free_chain(s, a->page); + +switch_freelist: + a->freelist = a->page->freelist; + a->page->freelist = NULL; + a->nr_free = s->objects - a->page->inuse; + a->page->inuse += a->nr_free; + slab_unlock(a->page); + +have_object: + /* Fastpath */ + object = a->freelist; + a->nr_free--; + a->referenced = 1; + a->freelist = object[a->page->offset]; #ifdef CONFIG_SMP - if (keventd_up() && !atomic_read(&s->active_cpus)) { - atomic_inc(&s->active_cpus); - schedule_delayed_work(&s->flush, 2 * HZ); + if (!a->flush_active && keventd_up()) { + a->flush_active = 1; + schedule_delayed_work(&a->flush, 2 * HZ); } #endif - goto redo; +out: + local_irq_restore(flags); + return object; } void *kmem_cache_alloc(struct kmem_cache *s, gfp_t gfpflags) { - return __slab_alloc(s, gfpflags, -1); + return allocate(s, gfpflags, -1); } EXPORT_SYMBOL(kmem_cache_alloc); #ifdef CONFIG_NUMA +/* + * Bootstrap function to allow the allocation of active_slabs without + * having active slabs yet. + */ +static void * __init early_active_slab_alloc(int node) +{ + struct kmem_cache *s = ACTIVE_SLAB_SLAB; + struct page *page; + void **object; + + page = get_partial(s, GFP_KERNEL, node); + if (!page) { + page = new_slab(s, GFP_KERNEL, node); + + BUG_ON(!page); + slab_lock(page); + } + object = page->freelist; + page->freelist = object[s ->offset]; + page->inuse++; + putback_slab(s, page); + return object; +} + void *kmem_cache_alloc_node(struct kmem_cache *s, gfp_t gfpflags, int node) { - return __slab_alloc(s, gfpflags, node); + return allocate(s, gfpflags, node); } EXPORT_SYMBOL(kmem_cache_alloc_node); #endif @@ -634,12 +717,12 @@ void *prior; void **object = (void *)x; unsigned long flags; + struct active_slab *a; if (!object) return; page = virt_to_page(x); - if (unlikely(PageCompound(page))) page = page->first_page; @@ -654,8 +737,19 @@ #endif local_irq_save(flags); + a = ACTIVE_SLAB(s, smp_processor_id()); + if (a->page == page) { + void **object = x; + + a->nr_free++; + object[s->offset] = a->freelist; + a->freelist = object; + goto out; + } + if (unlikely(PageSlabsingle(page))) goto single_object_slab; + slab_lock(page); #ifdef SLAB_DEBUG_KFREE @@ -688,8 +782,10 @@ */ remove_partial(s, page); slab_unlock(page); + single_object_slab: discard_slab(s, page); +out: local_irq_restore(flags); return; @@ -726,7 +822,6 @@ dump_stack(); printk(KERN_CRIT "***** Trying to continue by not " "freeing object.\n"); - return; #endif } EXPORT_SYMBOL(kmem_cache_free); @@ -809,6 +904,45 @@ return order; } +/* + * We can actually operate slabs any time after the page allocator is up. + * slab_is_available() merely means that the kmalloc array is available. + * + * However, be aware that deriving allocators depends on kmalloc being + * functional. + */ +static enum { DOWN, PARTIAL, UP } slab_state = DOWN; + +int slab_is_available(void) +{ + return slab_state == UP; +} + +static void alloc_active(struct kmem_cache *s, int cpu) +{ + struct active_slab *a; + +#ifdef CONFIG_NUMA + if (slab_state == DOWN) { + BUG_ON(s != ACTIVE_SLAB_SLAB); + a = early_active_slab_alloc(cpu_to_node(cpu)); + } else + a = kmem_cache_alloc_node(ACTIVE_SLAB_SLAB, + GFP_KERNEL, cpu_to_node(cpu)); + BUG_ON(!a); + s->active[cpu] = a; +#else + a = ACTIVE_SLAB(s, cpu); +#endif +#ifdef CONFIG_SMP + a->flush_active = 0; + INIT_DELAYED_WORK(&a->flush, check_flush_active); +#endif + a->page = NULL; + a->slab = s; + a->referenced = 0; +} + int kmem_cache_open(struct kmem_cache *s, const char *name, size_t size, size_t align, unsigned long flags, @@ -822,14 +956,7 @@ atomic_long_set(&s->nr_slabs, 0); atomic_set(&s->refcount, 1); spin_lock_init(&s->list_lock); - for_each_possible_cpu(cpu) - s->active[cpu] = NULL; INIT_LIST_HEAD(&s->partial); -#ifdef CONFIG_SMP - mutex_init(&s->flushing); - atomic_set(&s->active_cpus, 0); - INIT_DELAYED_WORK(&s->flush, flusher); -#endif s->name = name; s->ctor = ctor; s->dtor = dtor; @@ -876,12 +1003,15 @@ if (!s->objects) goto error; + for_each_online_cpu(cpu) + alloc_active(s, cpu); + register_slab(s); return 1; error: if (flags & SLAB_PANIC) - panic("Cannot create slab %s size=%ld realsize=%d " + panic("Cannot open slab %s size=%ld realsize=%d " "order=%d offset=%d flags=%lx\n", s->name, (unsigned long)size, s->size, s->order, s->offset, flags); @@ -1052,6 +1182,22 @@ return slabs_inuse; } +static void free_active(struct kmem_cache *s, int cpu) +{ +#ifdef CONFIG_NUMA + kfree(ACTIVE_SLAB(s, cpu)); + s->active[cpu] = NULL; +#endif +} + +static void release_active(struct kmem_cache *s) +{ + int cpu; + + for_each_online_cpu(cpu) + free_active(s, cpu); +} + /* * Release all resources used by slab cache * (Use with caches setup using kmem_cache_setup) @@ -1068,6 +1214,7 @@ return 1; unregister_slab(s); + release_active(s); return 0; } EXPORT_SYMBOL(kmem_cache_close); @@ -1083,7 +1230,6 @@ } EXPORT_SYMBOL(kmem_cache_destroy); - static unsigned long count_objects(struct kmem_cache *s, struct list_head *list) { int count = 0; @@ -1108,11 +1254,11 @@ int cpu; for_each_possible_cpu(cpu) { - struct page *page = s->active[cpu]; + struct active_slab *a = ACTIVE_SLAB(s, cpu); - if (page) { + if (a->page) { nr_active++; - active += page->inuse; + active += a->page->inuse; } } @@ -1129,6 +1275,34 @@ (nr_slabs - s->nr_partial - nr_active) * s->objects; } +/* + * Use the cpu notifier to insure that the thresholds are recalculated + * when necessary. + */ +static int __cpuinit slab_cpuup_callback(struct notifier_block *nfb, + unsigned long action, + void *hcpu) +{ + long cpu = (long)hcpu; + + switch (action) { + case CPU_UP_PREPARE: + for_all_slabs(alloc_active, cpu); + break; + case CPU_UP_CANCELED: + case CPU_DEAD: + for_all_slabs(free_active, cpu); + break; + default: + break; + } + return NOTIFY_OK; +} + +static struct notifier_block __cpuinitdata slab_notifier = + { &slab_cpuup_callback, NULL, 0 }; + + /******************************************************************** * Kmalloc subsystem *******************************************************************/ @@ -1147,22 +1321,10 @@ __setup("slab_min_order=", setup_slab_min_order); -static struct kmem_cache *create_kmalloc_cache(struct kmem_cache *s, - const char *name, int size) -{ - - if (!kmem_cache_open(s, name, size, ARCH_KMALLOC_MINALIGN, - 0, NULL, NULL)) - panic("Creation of kmalloc slab %s size=%d failed.\n", - name, size); - return s; -} - static struct kmem_cache *get_slab(size_t size, gfp_t flags) { int index = kmalloc_index(size) - KMALLOC_SHIFT_LOW; struct kmem_cache *s; - struct kmem_cache *x; size_t realsize; BUG_ON(size < 0); @@ -1175,9 +1337,9 @@ return s; /* Dynamically create dma cache */ - x = kmalloc(sizeof(struct kmem_cache), flags & ~(__GFP_DMA)); + s = kmalloc(sizeof(struct kmem_cache), flags & ~(__GFP_DMA)); - if (!x) + if (!s) panic("Unable to allocate memory for dma cache\n"); #ifdef KMALLOC_EXTRA @@ -1191,7 +1353,8 @@ realsize = 192; #endif - s = create_kmalloc_cache(x, "kmalloc_dma", realsize); + kmem_cache_open(s, kasprintf(flags, "kmalloc-dma-%d", (unsigned int)realsize), realsize, + ARCH_KMALLOC_MINALIGN, SLAB_PANIC, NULL, NULL); kmalloc_caches_dma[index] = s; return s; } @@ -1229,22 +1392,52 @@ } EXPORT_SYMBOL(kfree); -void __init kmalloc_init(void) +void __init kmem_cache_init(void) { int i; + char *bootname = "kmalloc"; + + /* + * NUMA Bootstrap only works if the slab for the active_slab + * structure does not use an EXTRA slab. + */ + BUG_ON(ACTIVE_SLAB_NR > KMALLOC_SHIFT_HIGH || ACTIVE_SLAB_NR < 0); + + kmem_cache_open(ACTIVE_SLAB_SLAB, "active_slab", 1 << ACTIVE_SLAB_NR, + ARCH_KMALLOC_MINALIGN, SLAB_PANIC, NULL, NULL); + slab_state = PARTIAL; + + /* Power of two sized caches */ + for (i = KMALLOC_SHIFT_LOW; i <= KMALLOC_SHIFT_HIGH; i++) + if (i != ACTIVE_SLAB_NR) + kmem_cache_open( + &kmalloc_caches[i - KMALLOC_SHIFT_LOW], + bootname, 1 << i, + ARCH_KMALLOC_MINALIGN, SLAB_PANIC, NULL, NULL); - for (i = KMALLOC_SHIFT_LOW; i <= KMALLOC_SHIFT_HIGH; i++) { - create_kmalloc_cache( - &kmalloc_caches[i - KMALLOC_SHIFT_LOW], - "kmalloc", 1 << i); - } #ifdef KMALLOC_EXTRA /* Non-power of two caches */ - create_kmalloc_cache(&kmalloc_caches - [KMALLOC_SHIFT_HIGH - KMALLOC_SHIFT_LOW + 1], "kmalloc", 96); - create_kmalloc_cache(&kmalloc_caches - [KMALLOC_SHIFT_HIGH - KMALLOC_SHIFT_LOW + 2], "kmalloc", 192); -#endif + kmem_cache_open(&kmalloc_caches + [KMALLOC_SHIFT_HIGH - KMALLOC_SHIFT_LOW + 1], + "kmalloc-96", 96 + ARCH_KMALLOC_MINALIGN, SLAB_PANIC, NULL, NULL); + kmem_cache_open(&kmalloc_caches + [KMALLOC_SHIFT_HIGH - KMALLOC_SHIFT_LOW + 1], + "kmalloc-192", 192 + ARCH_KMALLOC_MINALIGN, SLAB_PANIC, NULL, NULL); +#endif + slab_state = UP; + + /* We can provide the correct kmalloc names now that the caches are up */ + for (i = 0; i < KMALLOC_SHIFT_HIGH - KMALLOC_SHIFT_LOW; i++) + kmalloc_caches[i].name = kasprintf(GFP_KERNEL, "kmalloc-%d", + kmalloc_caches[i].size); + printk(KERN_INFO "Kmalloc cache initialized: Caches=%d" + " Min_order=%d.\n", + KMALLOC_SHIFT_HIGH - KMALLOC_SHIFT_LOW + KMALLOC_EXTRAS, + slab_min_order); + + register_cpu_notifier(&slab_notifier); } /******************************************************************** @@ -1253,25 +1446,6 @@ #define SLAB_MAX_ORDER 4 -/* - * We can actually operate slabs any time after the page allocator is up. - * slab_is_available() merely means that the kmalloc array is available. - * - * However, be aware that deriving allocators depends on kmalloc being - * functional. - */ -static int slab_up = 0; - -int slab_is_available(void) { - return slab_up; -} - -void kmem_cache_init(void) -{ - kmalloc_init(); - slab_up = 1; -} - static struct kmem_cache *__kmalloc_slab(size_t size) { int index = kmalloc_index(size) - KMALLOC_SHIFT_LOW; @@ -1308,15 +1482,18 @@ if (s->size - sz <= sizeof(void *)) { printk(KERN_INFO "SLUB: Merging slab_cache %s size %d" " into kmalloc array size %d\n", - name, size, s->size); + name, (unsigned int)size, s->size); return kmem_cache_dup(s); } } #endif s = kmalloc(sizeof(struct kmem_cache), GFP_KERNEL); - if (!s) + if (!s) { + if (flags & SLAB_PANIC) + panic("Unable to allocate memory for slab %s\n", name); return NULL; + } if (!kmem_cache_open(s, name, size, align, flags, ctor, dtor)) { kfree(s); @@ -1341,24 +1518,6 @@ * Slab proc interface *******************************************************************/ -static DECLARE_RWSEM(slabstat_sem); - -LIST_HEAD(slab_caches); - -void register_slab(struct kmem_cache *s) -{ - down_write(&slabstat_sem); - list_add(&s->list, &slab_caches); - up_write(&slabstat_sem); -} - -void unregister_slab(struct kmem_cache *s) -{ - down_write(&slabstat_sem); - list_add(&s->list, &slab_caches); - up_write(&slabstat_sem); -} - static void print_slabinfo_header(struct seq_file *m) { /*