From 6a33634b1c403feaded3cd2290380d64d81a40f9 Mon Sep 17 00:00:00 2001 From: Christoph Lameter Date: Thu, 26 Jul 2007 19:13:16 -0700 Subject: [PATCH] Move inline functions to slub_def.h --- include/linux/slub_def.h | 102 +++++++++++++++++++++++++++++++++++++++++++++++ mm/slub.c | 94 +------------------------------------------ 2 files changed, 106 insertions(+), 90 deletions(-) Index: linux-2.6.23-rc1/include/linux/slub_def.h =================================================================== --- linux-2.6.23-rc1.orig/include/linux/slub_def.h 2007-07-26 20:04:23.000000000 -0700 +++ linux-2.6.23-rc1/include/linux/slub_def.h 2007-07-26 20:14:22.000000000 -0700 @@ -10,6 +10,8 @@ #include #include #include +#include +#include struct kmem_cache_node { spinlock_t list_lock; /* Protect partial list and nr_partial */ @@ -197,4 +199,104 @@ static inline void *kmalloc_node(size_t } #endif + +#ifdef CONFIG_SLUB_DEBUG +#define SLABDEBUG (1 << PG_error) +#else +#define SLABDEBUG 0 +#endif + +static inline int SlabDebug(struct page *page) +{ + return page->flags & SLABDEBUG; +} + +void *__slab_alloc(struct kmem_cache *s, + gfp_t gfpflags, int node, void *addr); + +void __slab_free(struct kmem_cache *s, + struct page *page, void *x, void *addr); + +#ifndef ARCH_HAS_SLAB_ALLOC +/* + * Inlined fastpath so that allocation functions (kmalloc, kmem_cache_alloc) + * have the fastpath folded into their functions. So no function call + * overhead for requests that can be satisfied on the fastpath. + * + * The fastpath works by first checking if the lockless freelist can be used. + * If not then __slab_alloc is called for slow processing. + * + * Otherwise we can simply pick the next object from the lockless free list. + */ +static void __always_inline *slab_alloc(struct kmem_cache *s, + gfp_t gfpflags, int node, void *addr) +{ + struct page *page; + void **object; + unsigned long flags; + + local_irq_save(flags); + page = s->cpu_slab[smp_processor_id()]; + + if (unlikely(!page)) + goto slow; + + object = page->lockless_freelist; + + if (unlikely(!object)) + goto slow; +#if 0 + if (unlikely(node != -1 && page_to_nid(page) != node)) + goto slow; +#endif + + page->lockless_freelist = object[page->offset]; + local_irq_restore(flags); + + if (unlikely((gfpflags & __GFP_ZERO) && object)) + memset(object, 0, s->objsize); + + return object; +slow: + local_irq_restore(flags); + return __slab_alloc(s, gfpflags, node, addr); +} +#endif + +#ifndef ARCH_HAS_SLAB_FREE +/* + * Fastpath with forced inlining to produce a kfree and kmem_cache_free that + * can perform fastpath freeing without additional function calls. + * + * The fastpath is only possible if we are freeing to the current cpu slab + * of this processor. This typically the case if we have just allocated + * the item before. + * + * If fastpath is not possible then fall back to __slab_free where we deal + * with all sorts of special processing. + */ +static void __always_inline slab_free(struct kmem_cache *s, + struct page *page, void *x, void *addr) +{ + void **object = (void *)x; + unsigned long flags; + + local_irq_save(flags); + if (unlikely(page != s->cpu_slab[smp_processor_id()])) + goto slow; + + if (unlikely(SlabDebug(page))) + goto slow; + + object[page->offset] = page->lockless_freelist; + page->lockless_freelist = object; + local_irq_restore(flags); + return; + +slow: + local_irq_restore(flags); + __slab_free(s, page, x, addr); +} +#endif + #endif /* _LINUX_SLUB_DEF_H */ Index: linux-2.6.23-rc1/mm/slub.c =================================================================== --- linux-2.6.23-rc1.orig/mm/slub.c 2007-07-26 20:08:55.000000000 -0700 +++ linux-2.6.23-rc1/mm/slub.c 2007-07-26 20:11:06.000000000 -0700 @@ -101,12 +101,6 @@ #define FROZEN (1 << PG_active) -#ifdef CONFIG_SLUB_DEBUG -#define SLABDEBUG (1 << PG_error) -#else -#define SLABDEBUG 0 -#endif - static inline int SlabFrozen(struct page *page) { return page->flags & FROZEN; @@ -122,11 +116,6 @@ static inline void ClearSlabFrozen(struc page->flags &= ~FROZEN; } -static inline int SlabDebug(struct page *page) -{ - return page->flags & SLABDEBUG; -} - static inline void SetSlabDebug(struct page *page) { page->flags |= SLABDEBUG; @@ -1452,7 +1441,7 @@ static void flush_all(struct kmem_cache * And if we were unable to get a new slab from the partial slab lists then * we need to allocate a new slab. This is slowest path since we may sleep. */ -static void *__slab_alloc(struct kmem_cache *s, +void *__slab_alloc(struct kmem_cache *s, gfp_t gfpflags, int node, void *addr) { void **object; @@ -1539,49 +1528,7 @@ debug: page->freelist = object[page->offset]; goto out; } - -/* - * Inlined fastpath so that allocation functions (kmalloc, kmem_cache_alloc) - * have the fastpath folded into their functions. So no function call - * overhead for requests that can be satisfied on the fastpath. - * - * The fastpath works by first checking if the lockless freelist can be used. - * If not then __slab_alloc is called for slow processing. - * - * Otherwise we can simply pick the next object from the lockless free list. - */ -static void __always_inline *slab_alloc(struct kmem_cache *s, - gfp_t gfpflags, int node, void *addr) -{ - struct page *page; - void **object; - unsigned long flags; - - local_irq_save(flags); - page = s->cpu_slab[smp_processor_id()]; - - if (unlikely(!page)) - goto slow; - - object = page->lockless_freelist; - - if (unlikely(!object)) - goto slow; - - if (unlikely(node != -1 && page_to_nid(page) != node)) - goto slow; - - page->lockless_freelist = object[page->offset]; - local_irq_restore(flags); - - if (unlikely((gfpflags & __GFP_ZERO) && object)) - memset(object, 0, s->objsize); - - return object; -slow: - local_irq_restore(flags); - return __slab_alloc(s, gfpflags, node, addr); -} +EXPORT_SYMBOL(__slab_alloc); void *kmem_cache_alloc(struct kmem_cache *s, gfp_t gfpflags) { @@ -1605,7 +1552,7 @@ EXPORT_SYMBOL(kmem_cache_alloc_node); * lock and free the item. If there is no additional partial page * handling required then we can return immediately. */ -static void __slab_free(struct kmem_cache *s, struct page *page, +void __slab_free(struct kmem_cache *s, struct page *page, void *x, void *addr) { void *prior; @@ -1658,40 +1605,7 @@ debug: goto out_unlock; goto checks_ok; } - -/* - * Fastpath with forced inlining to produce a kfree and kmem_cache_free that - * can perform fastpath freeing without additional function calls. - * - * The fastpath is only possible if we are freeing to the current cpu slab - * of this processor. This typically the case if we have just allocated - * the item before. - * - * If fastpath is not possible then fall back to __slab_free where we deal - * with all sorts of special processing. - */ -static void __always_inline slab_free(struct kmem_cache *s, - struct page *page, void *x, void *addr) -{ - void **object = (void *)x; - unsigned long flags; - - local_irq_save(flags); - if (unlikely(page != s->cpu_slab[smp_processor_id()])) - goto slow; - - if (unlikely(SlabDebug(page))) - goto slow; - - object[page->offset] = page->lockless_freelist; - page->lockless_freelist = object; - local_irq_restore(flags); - return; - -slow: - local_irq_restore(flags); - __slab_free(s, page, x, addr); -} +EXPORT_SYMBOL(__slab_free); void kmem_cache_free(struct kmem_cache *s, void *x) {