From 47357eee6d4f9c4aea0f434028dfbd576776d0ee Mon Sep 17 00:00:00 2001 From: Christoph Lameter Date: Thu, 26 Jul 2007 19:32:52 -0700 Subject: [PATCH] i386 implementation of slab alloc --- include/asm-i386/slub_def.h | 66 +++++++++++++++++++++++++++++++++++++++++++ include/linux/slub_def.h | 4 ++- 2 files changed, 69 insertions(+), 1 deletions(-) diff --git a/include/asm-i386/slub_def.h b/include/asm-i386/slub_def.h new file mode 100644 index 0000000..9b88cd8 --- /dev/null +++ b/include/asm-i386/slub_def.h @@ -0,0 +1,66 @@ +#define ARCH_HAS_SLAB_ALLOC +static void __always_inline *slab_alloc(struct kmem_cache *s, + gfp_t gfpflags, int node, void *addr) +{ + struct page *page; + void **object; + unsigned long flags; + + preempt_disable(); +redo: + page = s->cpu_slab[smp_processor_id()]; + + if (unlikely(!page)) + goto slow; + + object = page->lockless_freelist; + + if (unlikely(!object)) + goto slow; +#if 0 + if (unlikely(node != -1 && page_to_nid(page) != node)) + goto slow; +#endif + + if (cmpxchg_local(&page->lockless_freelist, object, + object[page->offset]) != object) + goto redo; + preempt_enable(); + + if (unlikely((gfpflags & __GFP_ZERO))) + memset(object, 0, s->objsize); + + return object; + +slow: + local_irq_save(flags); + preempt_enable(); + return __slab_alloc(s, gfpflags, node, addr, flags); +} + +#define ARCH_HAS_SLAB_FREE +static void __always_inline slab_free(struct kmem_cache *s, + struct page *page, void *x, void *addr) +{ + void **object = (void *)x; + unsigned long flags; + + preempt_disable(); + if (unlikely(page != s->cpu_slab[smp_processor_id()])) + goto slow; + + if (unlikely(SlabDebug(page))) + goto slow; + + object[page->offset] = page->lockless_freelist; + page->lockless_freelist = object; + preempt_enable(); + return; + +slow: + local_irq_save(flags); + preempt_enable(); + __slab_free(s, page, x, addr); + local_irq_restore(flags); +} + diff --git a/include/linux/slub_def.h b/include/linux/slub_def.h index 7260829..6ebf394 100644 --- a/include/linux/slub_def.h +++ b/include/linux/slub_def.h @@ -217,6 +217,8 @@ void *__slab_alloc(struct kmem_cache *s, void __slab_free(struct kmem_cache *s, struct page *page, void *x, void *addr); +#include + #ifndef ARCH_HAS_SLAB_ALLOC /* * Inlined fastpath so that allocation functions (kmalloc, kmem_cache_alloc) @@ -253,7 +255,7 @@ static void __always_inline *slab_alloc(struct kmem_cache *s, page->lockless_freelist = object[page->offset]; local_irq_restore(flags); - if (unlikely((gfpflags & __GFP_ZERO) && object)) + if (unlikely((gfpflags & __GFP_ZERO))) memset(object, 0, s->objsize); return object; -- 1.4.4.4