Get rid of slub_def.h Fold slub_def.h into slab.h Signed-off-by: Christoph Lameter --- include/linux/slab.h | 245 ++++++++++++++++++++++++++++++++++++++--------- include/linux/slub_def.h | 200 -------------------------------------- 2 files changed, 199 insertions(+), 246 deletions(-) Index: linux-2.6/include/linux/slab.h =================================================================== --- linux-2.6.orig/include/linux/slab.h 2007-08-29 20:04:45.000000000 -0700 +++ linux-2.6/include/linux/slab.h 2007-08-29 20:23:54.000000000 -0700 @@ -1,9 +1,7 @@ /* - * Written by Mark Hemment, 1996 (markhe@nextd.demon.co.uk). + * SLUB : A Slab allocator without object queues. * - * (C) SGI 2006, Christoph Lameter - * Cleaned up and restructured to ease the addition of alternative - * implementations of SLAB allocators. + * (C) 2007 SGI, Christoph Lameter */ #ifndef _LINUX_SLAB_H @@ -13,6 +11,9 @@ #include #include +#include +#include +#include /* * Flags to pass to kmem_cache_create(). @@ -42,30 +43,6 @@ #define ZERO_OR_NULL_PTR(x) ((unsigned long)(x) <= \ (unsigned long)ZERO_SIZE_PTR) -/* - * struct kmem_cache related prototypes - */ -void __init kmem_cache_init(void); -int slab_is_available(void); - -struct kmem_cache *kmem_cache_create(const char *, size_t, size_t, - unsigned long, - void (*)(struct kmem_cache *, void *)); -void kmem_cache_destroy(struct kmem_cache *); -int kmem_cache_shrink(struct kmem_cache *); -void kmem_cache_free(struct kmem_cache *, void *); -unsigned int kmem_cache_size(struct kmem_cache *); -const char *kmem_cache_name(struct kmem_cache *); -int kmem_ptr_validate(struct kmem_cache *cachep, const void *ptr); - -/* - * Please use this macro to create slab caches. Simply specify the - * name of the structure and maybe some flags that are listed above. - * - * The alignment of the struct determines object alignment. If you - * f.e. add ____cacheline_aligned_in_smp to the struct declaration - * then the objects will be properly aligned in SMP configurations. - */ #define KMEM_CACHE(__struct, __flags) kmem_cache_create(#__struct,\ sizeof(struct __struct), __alignof__(struct __struct),\ (__flags), NULL) @@ -92,26 +69,186 @@ void * __must_check krealloc(const void void kfree(const void *); size_t ksize(const void *); +struct kmem_cache_node { + spinlock_t list_lock; /* Protect partial list and nr_partial */ + unsigned long nr_partial; + atomic_long_t nr_slabs; + struct list_head partial; +#ifdef CONFIG_SLUB_DEBUG + struct list_head full; +#endif +}; + +/* + * Slab cache management. + */ +struct kmem_cache { + /* Used for retriving partial slabs etc */ + unsigned long flags; + int size; /* The size of an object including meta data */ + int objsize; /* The size of an object without meta data */ + int offset; /* Free pointer offset. */ + int order; + + /* + * Avoid an extra cache line for UP, SMP and for the node local to + * struct kmem_cache. + */ + struct kmem_cache_node local_node; + + /* Allocation and freeing of slabs */ + int objects; /* Number of objects in slab */ + int refcount; /* Refcount for slab cache destroy */ + void (*ctor)(struct kmem_cache *, void *); + int inuse; /* Offset to metadata */ + int align; /* Alignment */ + const char *name; /* Name (only for display!) */ + struct list_head list; /* List of slab caches */ +#ifdef CONFIG_SLUB_DEBUG + struct kobject kobj; /* For sysfs */ +#endif + +#ifdef CONFIG_NUMA + int defrag_ratio; + struct kmem_cache_node *node[MAX_NUMNODES]; +#endif + struct page *cpu_slab[NR_CPUS]; +}; + +/* + * Kmalloc subsystem. + */ +#if defined(ARCH_KMALLOC_MINALIGN) && ARCH_KMALLOC_MINALIGN > 8 +#define KMALLOC_MIN_SIZE ARCH_KMALLOC_MINALIGN +#else +#define KMALLOC_MIN_SIZE 8 +#endif + +#define KMALLOC_SHIFT_LOW ilog2(KMALLOC_MIN_SIZE) + +void __init kmem_cache_init(void); +int slab_is_available(void); + +struct kmem_cache *kmem_cache_create(const char *, size_t, size_t, + unsigned long, + void (*)(struct kmem_cache *, void *)); +void kmem_cache_destroy(struct kmem_cache *); +int kmem_cache_shrink(struct kmem_cache *); +void kmem_cache_free(struct kmem_cache *, void *); +unsigned int kmem_cache_size(struct kmem_cache *); +const char *kmem_cache_name(struct kmem_cache *); +int kmem_ptr_validate(struct kmem_cache *cachep, const void *ptr); + /* - * Allocator specific definitions. These are mainly used to establish optimized - * ways to convert kmalloc() calls to kmem_cache_alloc() invocations by - * selecting the appropriate general cache at compile time. - * - * Allocators must define at least: - * - * kmem_cache_alloc() - * __kmalloc() - * kmalloc() - * - * Those wishing to support NUMA must also define: - * - * kmem_cache_alloc_node() - * kmalloc_node() + * We keep the general caches in an array of slab caches that are used for + * 2^x bytes of allocations. + */ +extern struct kmem_cache kmalloc_caches[KMALLOC_SHIFT_HIGH + 1]; + +/* + * Sorry that the following has to be that ugly but some versions of GCC + * have trouble with constant propagation and loops. + */ +static inline int kmalloc_index(size_t size) +{ + if (!size) + return 0; + + if (size > KMALLOC_MAX_SIZE) + return -1; + + if (size <= KMALLOC_MIN_SIZE) + return KMALLOC_SHIFT_LOW; + + if (size > 64 && size <= 96) + return 1; + if (size > 128 && size <= 192) + return 2; + if (size <= 8) return 3; + if (size <= 16) return 4; + if (size <= 32) return 5; + if (size <= 64) return 6; + if (size <= 128) return 7; + if (size <= 256) return 8; + if (size <= 512) return 9; + if (size <= 1024) return 10; + if (size <= 2 * 1024) return 11; + if (size <= 4 * 1024) return 12; + if (size <= 8 * 1024) return 13; + if (size <= 16 * 1024) return 14; + if (size <= 32 * 1024) return 15; + if (size <= 64 * 1024) return 16; + if (size <= 128 * 1024) return 17; + if (size <= 256 * 1024) return 18; + if (size <= 512 * 1024) return 19; + if (size <= 1024 * 1024) return 20; + if (size <= 2 * 1024 * 1024) return 21; + if (size <= 4 * 1024 * 1024) return 22; + if (size <= 8 * 1024 * 1024) return 23; + if (size <= 16 * 1024 * 1024) return 24; + if (size <= 32 * 1024 * 1024) return 25; + return -1; + +/* + * What we really wanted to do and cannot do because of compiler issues is: + * int i; + * for (i = KMALLOC_SHIFT_LOW; i <= KMALLOC_SHIFT_HIGH; i++) + * if (size <= (1 << i)) + * return i; + */ +} + +/* + * Find the slab cache for a given combination of allocation flags and size. * - * See each allocator definition file for additional comments and - * implementation notes. + * This ought to end up with a global pointer to the right cache + * in kmalloc_caches. */ -#include +static inline struct kmem_cache *kmalloc_slab(size_t size) +{ + int index = kmalloc_index(size); + + if (index == 0) + return NULL; + + /* + * This function only gets expanded if __builtin_constant_p(size), so + * testing it here shouldn't be needed. But some versions of gcc need + * help. + */ + if (__builtin_constant_p(size) && index < 0) { + /* + * Generate a link failure. Would be great if we could + * do something to stop the compile here. + */ + extern void __kmalloc_size_too_large(void); + __kmalloc_size_too_large(); + } + return &kmalloc_caches[index]; +} + +#ifdef CONFIG_ZONE_DMA +#define SLUB_DMA __GFP_DMA +#else +/* Disable DMA functionality */ +#define SLUB_DMA (__force gfp_t)0 +#endif + +void *kmem_cache_alloc(struct kmem_cache *, gfp_t); +void *__kmalloc(size_t size, gfp_t flags); + +static inline void *kmalloc(size_t size, gfp_t flags) +{ + if (__builtin_constant_p(size) && !(flags & SLUB_DMA)) { + struct kmem_cache *s = kmalloc_slab(size); + + if (!s) + return ZERO_SIZE_PTR; + + return kmem_cache_alloc(s, flags); + } else + return __kmalloc(size, flags); +} /** * kcalloc - allocate memory for an array. The memory is set to zero. @@ -171,7 +308,23 @@ static inline void *kcalloc(size_t n, si return __kmalloc(n * size, flags | __GFP_ZERO); } -#if !defined(CONFIG_NUMA) +#ifdef CONFIG_NUMA +void *__kmalloc_node(size_t size, gfp_t flags, int node); +void *kmem_cache_alloc_node(struct kmem_cache *, gfp_t flags, int node); + +static inline void *kmalloc_node(size_t size, gfp_t flags, int node) +{ + if (__builtin_constant_p(size) && !(flags & SLUB_DMA)) { + struct kmem_cache *s = kmalloc_slab(size); + + if (!s) + return ZERO_SIZE_PTR; + + return kmem_cache_alloc_node(s, flags, node); + } else + return __kmalloc_node(size, flags, node); +} +#else /** * kmalloc_node - allocate memory from a specific node * @size: how many bytes of memory are required. Index: linux-2.6/include/linux/slub_def.h =================================================================== --- linux-2.6.orig/include/linux/slub_def.h 2007-08-29 20:07:37.000000000 -0700 +++ /dev/null 1970-01-01 00:00:00.000000000 +0000 @@ -1,200 +0,0 @@ -#ifndef _LINUX_SLUB_DEF_H -#define _LINUX_SLUB_DEF_H - -/* - * SLUB : A Slab allocator without object queues. - * - * (C) 2007 SGI, Christoph Lameter - */ -#include -#include -#include -#include - -struct kmem_cache_node { - spinlock_t list_lock; /* Protect partial list and nr_partial */ - unsigned long nr_partial; - atomic_long_t nr_slabs; - struct list_head partial; -#ifdef CONFIG_SLUB_DEBUG - struct list_head full; -#endif -}; - -/* - * Slab cache management. - */ -struct kmem_cache { - /* Used for retriving partial slabs etc */ - unsigned long flags; - int size; /* The size of an object including meta data */ - int objsize; /* The size of an object without meta data */ - int offset; /* Free pointer offset. */ - int order; - - /* - * Avoid an extra cache line for UP, SMP and for the node local to - * struct kmem_cache. - */ - struct kmem_cache_node local_node; - - /* Allocation and freeing of slabs */ - int objects; /* Number of objects in slab */ - int refcount; /* Refcount for slab cache destroy */ - void (*ctor)(struct kmem_cache *, void *); - int inuse; /* Offset to metadata */ - int align; /* Alignment */ - const char *name; /* Name (only for display!) */ - struct list_head list; /* List of slab caches */ -#ifdef CONFIG_SLUB_DEBUG - struct kobject kobj; /* For sysfs */ -#endif - -#ifdef CONFIG_NUMA - int defrag_ratio; - struct kmem_cache_node *node[MAX_NUMNODES]; -#endif - struct page *cpu_slab[NR_CPUS]; -}; - -/* - * Kmalloc subsystem. - */ -#if defined(ARCH_KMALLOC_MINALIGN) && ARCH_KMALLOC_MINALIGN > 8 -#define KMALLOC_MIN_SIZE ARCH_KMALLOC_MINALIGN -#else -#define KMALLOC_MIN_SIZE 8 -#endif - -#define KMALLOC_SHIFT_LOW ilog2(KMALLOC_MIN_SIZE) - -/* - * We keep the general caches in an array of slab caches that are used for - * 2^x bytes of allocations. - */ -extern struct kmem_cache kmalloc_caches[KMALLOC_SHIFT_HIGH + 1]; - -/* - * Sorry that the following has to be that ugly but some versions of GCC - * have trouble with constant propagation and loops. - */ -static inline int kmalloc_index(size_t size) -{ - if (!size) - return 0; - - if (size > KMALLOC_MAX_SIZE) - return -1; - - if (size <= KMALLOC_MIN_SIZE) - return KMALLOC_SHIFT_LOW; - - if (size > 64 && size <= 96) - return 1; - if (size > 128 && size <= 192) - return 2; - if (size <= 8) return 3; - if (size <= 16) return 4; - if (size <= 32) return 5; - if (size <= 64) return 6; - if (size <= 128) return 7; - if (size <= 256) return 8; - if (size <= 512) return 9; - if (size <= 1024) return 10; - if (size <= 2 * 1024) return 11; - if (size <= 4 * 1024) return 12; - if (size <= 8 * 1024) return 13; - if (size <= 16 * 1024) return 14; - if (size <= 32 * 1024) return 15; - if (size <= 64 * 1024) return 16; - if (size <= 128 * 1024) return 17; - if (size <= 256 * 1024) return 18; - if (size <= 512 * 1024) return 19; - if (size <= 1024 * 1024) return 20; - if (size <= 2 * 1024 * 1024) return 21; - if (size <= 4 * 1024 * 1024) return 22; - if (size <= 8 * 1024 * 1024) return 23; - if (size <= 16 * 1024 * 1024) return 24; - if (size <= 32 * 1024 * 1024) return 25; - return -1; - -/* - * What we really wanted to do and cannot do because of compiler issues is: - * int i; - * for (i = KMALLOC_SHIFT_LOW; i <= KMALLOC_SHIFT_HIGH; i++) - * if (size <= (1 << i)) - * return i; - */ -} - -/* - * Find the slab cache for a given combination of allocation flags and size. - * - * This ought to end up with a global pointer to the right cache - * in kmalloc_caches. - */ -static inline struct kmem_cache *kmalloc_slab(size_t size) -{ - int index = kmalloc_index(size); - - if (index == 0) - return NULL; - - /* - * This function only gets expanded if __builtin_constant_p(size), so - * testing it here shouldn't be needed. But some versions of gcc need - * help. - */ - if (__builtin_constant_p(size) && index < 0) { - /* - * Generate a link failure. Would be great if we could - * do something to stop the compile here. - */ - extern void __kmalloc_size_too_large(void); - __kmalloc_size_too_large(); - } - return &kmalloc_caches[index]; -} - -#ifdef CONFIG_ZONE_DMA -#define SLUB_DMA __GFP_DMA -#else -/* Disable DMA functionality */ -#define SLUB_DMA (__force gfp_t)0 -#endif - -void *kmem_cache_alloc(struct kmem_cache *, gfp_t); -void *__kmalloc(size_t size, gfp_t flags); - -static inline void *kmalloc(size_t size, gfp_t flags) -{ - if (__builtin_constant_p(size) && !(flags & SLUB_DMA)) { - struct kmem_cache *s = kmalloc_slab(size); - - if (!s) - return ZERO_SIZE_PTR; - - return kmem_cache_alloc(s, flags); - } else - return __kmalloc(size, flags); -} - -#ifdef CONFIG_NUMA -void *__kmalloc_node(size_t size, gfp_t flags, int node); -void *kmem_cache_alloc_node(struct kmem_cache *, gfp_t flags, int node); - -static inline void *kmalloc_node(size_t size, gfp_t flags, int node) -{ - if (__builtin_constant_p(size) && !(flags & SLUB_DMA)) { - struct kmem_cache *s = kmalloc_slab(size); - - if (!s) - return ZERO_SIZE_PTR; - - return kmem_cache_alloc_node(s, flags, node); - } else - return __kmalloc_node(size, flags, node); -} -#endif - -#endif /* _LINUX_SLUB_DEF_H */