From 47357eee6d4f9c4aea0f434028dfbd576776d0ee Mon Sep 17 00:00:00 2001 From: Christoph Lameter Date: Thu, 26 Jul 2007 19:32:52 -0700 Subject: [PATCH] i386 implementation of slab alloc --- include/asm-i386/slub_def.h | 68 +++++++++++++++++++++++++++++++++++++++ include/asm-x86_64/slub_def.h | 73 ++++++++++++++++++++++++++++++++++++++++++ include/linux/slub_def.h | 14 ++++++-- 3 files changed, 153 insertions(+), 2 deletions(-) Index: linux-2.6.23-rc1/include/asm-i386/slub_def.h =================================================================== --- /dev/null 1970-01-01 00:00:00.000000000 +0000 +++ linux-2.6.23-rc1/include/asm-i386/slub_def.h 2007-07-26 21:56:03.000000000 -0700 @@ -0,0 +1,68 @@ +#define ARCH_HAS_SLAB_ALLOC +static void __always_inline *slab_alloc(struct kmem_cache *s, + gfp_t gfpflags, int node, void *addr) +{ + struct page *page; + void **object; + unsigned long flags; + + preempt_disable(); +redo: + page = s->cpu_slab[smp_processor_id()]; + + if (unlikely(!page)) + goto slow; + + object = page->lockless_freelist; + + if (unlikely(!object)) + goto slow; +#if 0 + if (unlikely(node != -1 && page_to_nid(page) != node)) + goto slow; +#endif + + if (cmpxchg_local(&page->lockless_freelist, object, + object[page->offset]) != object) + goto redo; + preempt_enable(); + + if (unlikely((gfpflags & __GFP_ZERO))) + memset(object, 0, s->objsize); + + return object; + +slow: + preempt_enable(); + return __slab_alloc(s, gfpflags, node, addr); +} + +#define ARCH_HAS_SLAB_FREE +static void __always_inline slab_free(struct kmem_cache *s, + struct page *page, void *x, void *addr) +{ + void **object = (void *)x; + unsigned long flags; + void **ll; + + preempt_disable(); +redo: + if (unlikely(page != s->cpu_slab[smp_processor_id()])) + goto slow; + + if (unlikely(SlabDebug(page))) + goto slow; + + ll = page->lockless; + object[page->offset] = ll; + if (cmpxchg_local(&page->lockless, ll, object) != ll) + goto redo; + + preempt_enable(); + return; + +slow: + preempt_enable(); + __slab_free(s, page, x, addr); +} + Index: linux-2.6.23-rc1/include/linux/slub_def.h =================================================================== --- linux-2.6.23-rc1.orig/include/linux/slub_def.h 2007-07-26 21:56:03.000000000 -0700 +++ linux-2.6.23-rc1/include/linux/slub_def.h 2007-07-26 23:05:30.000000000 -0700 @@ -12,6 +12,7 @@ #include #include #include +#include struct kmem_cache_node { spinlock_t list_lock; /* Protect partial list and nr_partial */ @@ -217,6 +218,8 @@ void *__slab_alloc(struct kmem_cache *s, void __slab_free(struct kmem_cache *s, struct page *page, void *x, void *addr); +#include + #ifndef ARCH_HAS_SLAB_ALLOC /* * Inlined fastpath so that allocation functions (kmalloc, kmem_cache_alloc) @@ -234,6 +237,7 @@ static void __always_inline *slab_alloc( struct page *page; void **object; unsigned long flags; + INIT_PC(x); local_irq_save(flags); page = s->cpu_slab[smp_processor_id()]; @@ -253,13 +257,16 @@ static void __always_inline *slab_alloc( page->lockless_freelist = object[page->offset]; local_irq_restore(flags); - if (unlikely((gfpflags & __GFP_ZERO) && object)) + if (unlikely((gfpflags & __GFP_ZERO))) memset(object, 0, s->objsize); + pc_bytes(&x, s->objsize, PC_SLAB_ALLOC_FAST); return object; slow: local_irq_restore(flags); - return __slab_alloc(s, gfpflags, node, addr); + object = __slab_alloc(s, gfpflags, node, addr); + pc_bytes(&x, s->objsize, PC_SLAB_ALLOC_SLOW); + return object; } #endif @@ -280,6 +287,7 @@ static void __always_inline slab_free(st { void **object = (void *)x; unsigned long flags; + INIT_PC(xx); local_irq_save(flags); if (unlikely(page != s->cpu_slab[smp_processor_id()])) @@ -291,11 +299,13 @@ static void __always_inline slab_free(st object[page->offset] = page->lockless_freelist; page->lockless_freelist = object; local_irq_restore(flags); + pc_bytes(&xx, s->objsize, PC_SLAB_FREE_FAST); return; slow: local_irq_restore(flags); __slab_free(s, page, x, addr); + pc_bytes(&xx, s->objsize, PC_SLAB_FREE_SLOW); } #endif Index: linux-2.6.23-rc1/include/asm-x86_64/slub_def.h =================================================================== --- /dev/null 1970-01-01 00:00:00.000000000 +0000 +++ linux-2.6.23-rc1/include/asm-x86_64/slub_def.h 2007-07-26 23:05:48.000000000 -0700 @@ -0,0 +1,73 @@ +#define ARCH_HAS_SLAB_ALLOC +static void __always_inline *slab_alloc(struct kmem_cache *s, + gfp_t gfpflags, int node, void *addr) +{ + struct page *page; + void **object; + INIT_PC(x); + + preempt_disable(); +redo: + page = s->cpu_slab[smp_processor_id()]; + + if (unlikely(!page)) + goto slow; + + object = page->lockless_freelist; + + if (unlikely(!object)) + goto slow; +#if 0 + if (unlikely(node != -1 && page_to_nid(page) != node)) + goto slow; +#endif + + if (cmpxchg_local(&page->lockless_freelist, object, + object[page->offset]) != object) + goto redo; + preempt_enable(); + + if (unlikely((gfpflags & __GFP_ZERO))) + memset(object, 0, s->objsize); + + pc_bytes(&x, s->objsize, PC_SLAB_ALLOC_FAST); + return object; + +slow: + preempt_enable(); + object = __slab_alloc(s, gfpflags, node, addr); + pc_bytes(&x, s->objsize, PC_SLAB_ALLOC_SLOW); + return object; +} + +#define ARCH_HAS_SLAB_FREE +static void __always_inline slab_free(struct kmem_cache *s, + struct page *page, void *x, void *addr) +{ + void **object = (void *)x; + void **ll; + INIT_PC(xx); + + preempt_disable(); +redo: + if (unlikely(page != s->cpu_slab[smp_processor_id()])) + goto slow; + + if (unlikely(SlabDebug(page))) + goto slow; + + ll = page->lockless_freelist; + object[page->offset] = ll; + if (cmpxchg_local(&page->lockless_freelist, ll, object) != ll) + goto redo; + + preempt_enable(); + pc_bytes(&xx, s->objsize, PC_SLAB_FREE_FAST); + return; + +slow: + preempt_enable(); + __slab_free(s, page, x, addr); + pc_bytes(&xx, s->objsize, PC_SLAB_FREE_SLOW); +} +