From 6a33634b1c403feaded3cd2290380d64d81a40f9 Mon Sep 17 00:00:00 2001 From: Christoph Lameter Date: Thu, 26 Jul 2007 19:13:16 -0700 Subject: [PATCH] Move inline functions to slub_def.h --- include/linux/slub_def.h | 101 ++++++++++++++++++++++++++++++++++++++++++++++ mm/slub.c | 91 +---------------------------------------- 2 files changed, 103 insertions(+), 89 deletions(-) diff --git a/include/linux/slub_def.h b/include/linux/slub_def.h index 124270d..7260829 100644 --- a/include/linux/slub_def.h +++ b/include/linux/slub_def.h @@ -10,6 +10,8 @@ #include #include #include +#include +#include struct kmem_cache_node { spinlock_t list_lock; /* Protect partial list and nr_partial */ @@ -197,4 +199,103 @@ static inline void *kmalloc_node(size_t size, gfp_t flags, int node) } #endif + +#ifdef CONFIG_SLUB_DEBUG +#define SLABDEBUG (1 << PG_error) +#else +#define SLABDEBUG 0 +#endif + +static inline int SlabDebug(struct page *page) +{ + return page->flags & SLABDEBUG; +} + +void *__slab_alloc(struct kmem_cache *s, + gfp_t gfpflags, int node, void *addr, unsigned long flags); + +void __slab_free(struct kmem_cache *s, + struct page *page, void *x, void *addr); + +#ifndef ARCH_HAS_SLAB_ALLOC +/* + * Inlined fastpath so that allocation functions (kmalloc, kmem_cache_alloc) + * have the fastpath folded into their functions. So no function call + * overhead for requests that can be satisfied on the fastpath. + * + * The fastpath works by first checking if the lockless freelist can be used. + * If not then __slab_alloc is called for slow processing. + * + * Otherwise we can simply pick the next object from the lockless free list. + */ +static void __always_inline *slab_alloc(struct kmem_cache *s, + gfp_t gfpflags, int node, void *addr) +{ + struct page *page; + void **object; + unsigned long flags; + + local_irq_save(flags); + page = s->cpu_slab[smp_processor_id()]; + + if (unlikely(!page)) + goto slow; + + object = page->lockless_freelist; + + if (unlikely(!object)) + goto slow; +#if 0 + if (unlikely(node != -1 && page_to_nid(page) != node)) + goto slow; +#endif + + page->lockless_freelist = object[page->offset]; + local_irq_restore(flags); + + if (unlikely((gfpflags & __GFP_ZERO) && object)) + memset(object, 0, s->objsize); + + return object; +slow: + return __slab_alloc(s, gfpflags, node, addr, flags); +} +#endif + +#ifndef ARCH_HAS_SLAB_FREE +/* + * Fastpath with forced inlining to produce a kfree and kmem_cache_free that + * can perform fastpath freeing without additional function calls. + * + * The fastpath is only possible if we are freeing to the current cpu slab + * of this processor. This typically the case if we have just allocated + * the item before. + * + * If fastpath is not possible then fall back to __slab_free where we deal + * with all sorts of special processing. + */ +static void __always_inline slab_free(struct kmem_cache *s, + struct page *page, void *x, void *addr) +{ + void **object = (void *)x; + unsigned long flags; + + local_irq_save(flags); + if (unlikely(page != s->cpu_slab[smp_processor_id()])) + goto slow; + + if (unlikely(SlabDebug(page))) + goto slow; + + object[page->offset] = page->lockless_freelist; + page->lockless_freelist = object; + local_irq_restore(flags); + return; + +slow: + __slab_free(s, page, x, addr); + local_irq_restore(flags); +} +#endif + #endif /* _LINUX_SLUB_DEF_H */ diff --git a/mm/slub.c b/mm/slub.c index 7df9f2b..2a09696 100644 --- a/mm/slub.c +++ b/mm/slub.c @@ -101,12 +101,6 @@ #define FROZEN (1 << PG_active) -#ifdef CONFIG_SLUB_DEBUG -#define SLABDEBUG (1 << PG_error) -#else -#define SLABDEBUG 0 -#endif - static inline int SlabFrozen(struct page *page) { return page->flags & FROZEN; @@ -122,11 +116,6 @@ static inline void ClearSlabFrozen(struct page *page) page->flags &= ~FROZEN; } -static inline int SlabDebug(struct page *page) -{ - return page->flags & SLABDEBUG; -} - static inline void SetSlabDebug(struct page *page) { page->flags |= SLABDEBUG; @@ -1452,7 +1441,7 @@ static void flush_all(struct kmem_cache *s) * And if we were unable to get a new slab from the partial slab lists then * we need to allocate a new slab. This is slowest path since we may sleep. */ -static void *__slab_alloc(struct kmem_cache *s, +void *__slab_alloc(struct kmem_cache *s, gfp_t gfpflags, int node, void *addr, unsigned long flags) { void **object; @@ -1536,48 +1525,6 @@ debug: goto out; } -/* - * Inlined fastpath so that allocation functions (kmalloc, kmem_cache_alloc) - * have the fastpath folded into their functions. So no function call - * overhead for requests that can be satisfied on the fastpath. - * - * The fastpath works by first checking if the lockless freelist can be used. - * If not then __slab_alloc is called for slow processing. - * - * Otherwise we can simply pick the next object from the lockless free list. - */ -static void __always_inline *slab_alloc(struct kmem_cache *s, - gfp_t gfpflags, int node, void *addr) -{ - struct page *page; - void **object; - unsigned long flags; - - local_irq_save(flags); - page = s->cpu_slab[smp_processor_id()]; - - if (unlikely(!page)) - goto slow; - - object = page->lockless_freelist; - - if (unlikely(!object)) - goto slow; - - if (unlikely(node != -1 && page_to_nid(page) != node)) - goto slow; - - page->lockless_freelist = object[page->offset]; - local_irq_restore(flags); - - if (unlikely((gfpflags & __GFP_ZERO) && object)) - memset(object, 0, s->objsize); - - return object; -slow: - return __slab_alloc(s, gfpflags, node, addr, flags); -} - void *kmem_cache_alloc(struct kmem_cache *s, gfp_t gfpflags) { return slab_alloc(s, gfpflags, -1, __builtin_return_address(0)); @@ -1600,7 +1547,7 @@ EXPORT_SYMBOL(kmem_cache_alloc_node); * lock and free the item. If there is no additional partial page * handling required then we can return immediately. */ -static void __slab_free(struct kmem_cache *s, struct page *page, +void __slab_free(struct kmem_cache *s, struct page *page, void *x, void *addr) { void *prior; @@ -1650,40 +1597,6 @@ debug: goto checks_ok; } -/* - * Fastpath with forced inlining to produce a kfree and kmem_cache_free that - * can perform fastpath freeing without additional function calls. - * - * The fastpath is only possible if we are freeing to the current cpu slab - * of this processor. This typically the case if we have just allocated - * the item before. - * - * If fastpath is not possible then fall back to __slab_free where we deal - * with all sorts of special processing. - */ -static void __always_inline slab_free(struct kmem_cache *s, - struct page *page, void *x, void *addr) -{ - void **object = (void *)x; - unsigned long flags; - - local_irq_save(flags); - if (unlikely(page != s->cpu_slab[smp_processor_id()])) - goto slow; - - if (unlikely(SlabDebug(page))) - goto slow; - - object[page->offset] = page->lockless_freelist; - page->lockless_freelist = object; - local_irq_restore(flags); - return; - -slow: - __slab_free(s, page, x, addr); - local_irq_restore(flags); -} - void kmem_cache_free(struct kmem_cache *s, void *x) { struct page *page; -- 1.4.4.4