SLUB: kmem_cache_ops instead of constructors / destructors This patch gets rid constructors and destructors and replaces them with a slab operations structure that is passed into SLUB. For backward compatibility we provide a kmem_cache_create() emulation that can construct a slab operations structure on the fly. The new API's to create slabs are: Without any callbacks: slabhandle = KMEM_CACHE(, ) Creates a slab based on the structure definition with the structure alignment, size and name. This is cleaner because the name showing up in /sys/slab/xxx will be the structure name. One can search the source for the name. The common alignment attributs to the struct can control slab alignmnet. Note: SLAB_HWCACHE_ALIGN is *not* supported as a flag. The flags do *not* specify alignments. The alignment is done to the structure and please nowhere else. Create a slabcache with kmem_cache_ops (please use only for special slabs): KMEM_CACHE_OPS(, , ) Old kmem_cache_create() support: kmem_cache_create alone still accepts the specification of SLAB_HWCACHE_ALIGN *if* there is no other alignment specified. In that case kmem_cache_create will generate a proper alignment depending on the size of the structure. Signed-off-by: Christoph Lameter --- include/linux/slab.h | 48 ++++++++++++++++++++++------ include/linux/slub_def.h | 3 - mm/slub.c | 80 ++++++++++++++++++----------------------------- 3 files changed, 71 insertions(+), 60 deletions(-) Index: slub/include/linux/slab.h =================================================================== --- slub.orig/include/linux/slab.h 2007-05-05 08:55:33.000000000 -0700 +++ slub/include/linux/slab.h 2007-05-05 08:56:42.000000000 -0700 @@ -32,19 +32,21 @@ typedef struct kmem_cache kmem_cache_t _ #define SLAB_MEM_SPREAD 0x00100000UL /* Spread some memory over cpuset */ #define SLAB_TRACE 0x00200000UL /* Trace allocations and frees */ -/* Flags passed to a constructor functions */ -#define SLAB_CTOR_CONSTRUCTOR 0x001UL /* If not set, then deconstructor */ - /* * struct kmem_cache related prototypes */ void __init kmem_cache_init(void); int slab_is_available(void); -struct kmem_cache *kmem_cache_create(const char *, size_t, size_t, - unsigned long, - void (*)(void *, struct kmem_cache *, unsigned long), - void (*)(void *, struct kmem_cache *, unsigned long)); +struct kmem_cache_ops { + /* FIXME: ctor should only take the object as an argument. */ + void (*ctor)(void *, struct kmem_cache *, unsigned long); + /* FIXME: Remove all destructors ? */ + void (*dtor)(void *, struct kmem_cache *, unsigned long); +}; + +struct kmem_cache *__kmem_cache_create(const char *, size_t, size_t, + unsigned long, const struct kmem_cache_ops *s); void kmem_cache_destroy(struct kmem_cache *); int kmem_cache_shrink(struct kmem_cache *); void *kmem_cache_alloc(struct kmem_cache *, gfp_t); @@ -62,9 +64,14 @@ int kmem_ptr_validate(struct kmem_cache * f.e. add ____cacheline_aligned_in_smp to the struct declaration * then the objects will be properly aligned in SMP configurations. */ -#define KMEM_CACHE(__struct, __flags) kmem_cache_create(#__struct,\ +#define KMEM_CACHE(__struct, __flags) __kmem_cache_create(#__struct,\ sizeof(struct __struct), __alignof__(struct __struct),\ - (__flags), NULL, NULL) + (__flags), NULL) + +#define KMEM_CACHE_OPS(__struct, __flags, __ops) \ + __kmem_cache_create(#__struct, sizeof(struct __struct), \ + __alignof__(struct __struct), (__flags), (__ops)) + #ifdef CONFIG_NUMA extern void *kmem_cache_alloc_node(struct kmem_cache *, gfp_t flags, int node); @@ -236,6 +243,29 @@ extern void *__kmalloc_node_track_caller extern const struct seq_operations slabinfo_op; ssize_t slabinfo_write(struct file *, const char __user *, size_t, loff_t *); +/* + * Legacy functions + * + * The sole reason that these definitions are here is because of their + * frequent use. Remove when all call sites have been updated. + */ +#define SLAB_CTOR_CONSTRUCTOR 0x001UL + +static inline struct kmem_cache *kmem_cache_create(const char *s, + size_t size, size_t align, unsigned long flags, + void (*ctor)(void *, struct kmem_cache *, unsigned long), + void (*dtor)(void *, struct kmem_cache *, unsigned long)) +{ + struct kmem_cache_ops *so = NULL; + + if (ctor || dtor) { + so = kzalloc(sizeof(struct kmem_cache_ops), GFP_KERNEL); + so->ctor = ctor; + so->dtor = dtor; + } + return __kmem_cache_create(s, size, align, flags, so); +} + #endif /* __KERNEL__ */ #endif /* _LINUX_SLAB_H */ Index: slub/mm/slub.c =================================================================== --- slub.orig/mm/slub.c 2007-05-05 08:55:33.000000000 -0700 +++ slub/mm/slub.c 2007-05-05 08:59:32.000000000 -0700 @@ -212,6 +212,11 @@ static inline struct kmem_cache_node *ge #endif } +struct kmem_cache_ops default_kmem_cache_ops = { + NULL, + NULL +}; + /* * Object debugging */ @@ -812,8 +817,8 @@ static void setup_object(struct kmem_cac init_tracking(s, object); } - if (unlikely(s->ctor)) - s->ctor(object, s, SLAB_CTOR_CONSTRUCTOR); + if (unlikely(s->kmem_cache_ops->ctor)) + s->kmem_cache_ops->ctor(object, s, SLAB_CTOR_CONSTRUCTOR); } static struct page *new_slab(struct kmem_cache *s, gfp_t flags, int node) @@ -871,16 +876,18 @@ out: static void __free_slab(struct kmem_cache *s, struct page *page) { int pages = 1 << s->order; + void (*dtor)(void *, struct kmem_cache *, unsigned long) = + s->kmem_cache_ops->dtor; - if (unlikely(PageError(page) || s->dtor)) { + if (unlikely(PageError(page) || dtor)) { void *start = page_address(page); void *end = start + (pages << PAGE_SHIFT); void *p; slab_pad_check(s, page); for (p = start; p <= end - s->size; p += s->size) { - if (s->dtor) - s->dtor(p, s, 0); + if (dtor) + dtor(p, s, 0); check_object(s, page, p, 0); } } @@ -1661,7 +1668,7 @@ static int calculate_sizes(struct kmem_c * then we should never poison the object itself. */ if ((flags & SLAB_POISON) && !(flags & SLAB_DESTROY_BY_RCU) && - !s->ctor && !s->dtor) + !s->kmem_cache_ops->ctor && !s->kmem_cache_ops->dtor) s->flags |= __OBJECT_POISON; else s->flags &= ~__OBJECT_POISON; @@ -1690,7 +1697,7 @@ static int calculate_sizes(struct kmem_c s->inuse = size; if (((flags & (SLAB_DESTROY_BY_RCU | SLAB_POISON)) || - s->ctor || s->dtor)) { + s->kmem_cache_ops->ctor || s->kmem_cache_ops->dtor)) { /* * Relocate free pointer after the object if it is not * permitted to overwrite the first word of the object on @@ -1721,8 +1728,8 @@ static int calculate_sizes(struct kmem_c size += sizeof(void *); /* * Determine the alignment based on various parameters that the - * user specified (this is unecessarily complex due to the attempt - * to be compatible with SLAB. Should be cleaned up some day). + * user specified and the dynamic determination of cache line size + * on bootup. */ align = calculate_alignment(flags, align, s->objsize); @@ -1774,13 +1781,11 @@ static int __init finish_bootstrap(void) static int kmem_cache_open(struct kmem_cache *s, gfp_t gfpflags, const char *name, size_t size, size_t align, unsigned long flags, - void (*ctor)(void *, struct kmem_cache *, unsigned long), - void (*dtor)(void *, struct kmem_cache *, unsigned long)) + const struct kmem_cache_ops *kmem_cache_ops) { memset(s, 0, kmem_size); s->name = name; - s->ctor = ctor; - s->dtor = dtor; + s->kmem_cache_ops = kmem_cache_ops; s->objsize = size; s->flags = flags; s->align = align; @@ -1800,7 +1805,7 @@ static int kmem_cache_open(struct kmem_c if (s->size >= 65535 * sizeof(void *)) { BUG_ON(flags & (SLAB_RED_ZONE | SLAB_POISON | SLAB_STORE_USER | SLAB_DESTROY_BY_RCU)); - BUG_ON(ctor || dtor); + BUG_ON(kmem_cache_ops->ctor || kmem_cache_ops->dtor); } else /* @@ -2035,7 +2040,7 @@ static struct kmem_cache *create_kmalloc down_write(&slub_lock); if (!kmem_cache_open(s, gfp_flags, name, size, ARCH_KMALLOC_MINALIGN, - flags, NULL, NULL)) + flags, &default_kmem_cache_ops)) goto panic; list_add(&s->list, &slab_caches); @@ -2356,7 +2361,7 @@ static int slab_unmergeable(struct kmem_ if (slub_nomerge || (s->flags & SLUB_NEVER_MERGE)) return 1; - if (s->ctor || s->dtor) + if (s->kmem_cache_ops != &default_kmem_cache_ops) return 1; return 0; @@ -2364,15 +2369,14 @@ static int slab_unmergeable(struct kmem_ static struct kmem_cache *find_mergeable(size_t size, size_t align, unsigned long flags, - void (*ctor)(void *, struct kmem_cache *, unsigned long), - void (*dtor)(void *, struct kmem_cache *, unsigned long)) + const struct kmem_cache_ops *kmem_cache_ops) { struct list_head *h; if (slub_nomerge || (flags & SLUB_NEVER_MERGE)) return NULL; - if (ctor || dtor) + if (kmem_cache_ops != &default_kmem_cache_ops) return NULL; size = ALIGN(size, sizeof(void *)); @@ -2407,15 +2411,17 @@ static struct kmem_cache *find_mergeable return NULL; } -struct kmem_cache *kmem_cache_create(const char *name, size_t size, +struct kmem_cache *__kmem_cache_create(const char *name, size_t size, size_t align, unsigned long flags, - void (*ctor)(void *, struct kmem_cache *, unsigned long), - void (*dtor)(void *, struct kmem_cache *, unsigned long)) + const struct kmem_cache_ops *kmem_cache_ops) { struct kmem_cache *s; + if (!kmem_cache_ops) + kmem_cache_ops = &default_kmem_cache_ops; + down_write(&slub_lock); - s = find_mergeable(size, align, flags, dtor, ctor); + s = find_mergeable(size, align, flags, kmem_cache_ops); if (s) { s->refcount++; /* @@ -2429,7 +2435,7 @@ struct kmem_cache *kmem_cache_create(con } else { s = kmalloc(kmem_size, GFP_KERNEL); if (s && kmem_cache_open(s, GFP_KERNEL, name, - size, align, flags, ctor, dtor)) { + size, align, flags, kmem_cache_ops)) { if (sysfs_slab_add(s)) { kfree(s); goto err; @@ -2449,7 +2455,7 @@ err: s = NULL; return s; } -EXPORT_SYMBOL(kmem_cache_create); +EXPORT_SYMBOL(__kmem_cache_create); void *kmem_cache_zalloc(struct kmem_cache *s, gfp_t flags) { @@ -3004,28 +3010,6 @@ static ssize_t order_show(struct kmem_ca } SLAB_ATTR_RO(order); -static ssize_t ctor_show(struct kmem_cache *s, char *buf) -{ - if (s->ctor) { - int n = sprint_symbol(buf, (unsigned long)s->ctor); - - return n + sprintf(buf + n, "\n"); - } - return 0; -} -SLAB_ATTR_RO(ctor); - -static ssize_t dtor_show(struct kmem_cache *s, char *buf) -{ - if (s->dtor) { - int n = sprint_symbol(buf, (unsigned long)s->dtor); - - return n + sprintf(buf + n, "\n"); - } - return 0; -} -SLAB_ATTR_RO(dtor); - static ssize_t aliases_show(struct kmem_cache *s, char *buf) { return sprintf(buf, "%d\n", s->refcount - 1); @@ -3256,8 +3240,6 @@ static struct attribute * slab_attrs[] = &slabs_attr.attr, &partial_attr.attr, &cpu_slabs_attr.attr, - &ctor_attr.attr, - &dtor_attr.attr, &aliases_attr.attr, &align_attr.attr, &sanity_checks_attr.attr, Index: slub/include/linux/slub_def.h =================================================================== --- slub.orig/include/linux/slub_def.h 2007-05-05 08:55:33.000000000 -0700 +++ slub/include/linux/slub_def.h 2007-05-05 09:00:08.000000000 -0700 @@ -39,8 +39,7 @@ struct kmem_cache { /* Allocation and freeing of slabs */ int objects; /* Number of objects in slab */ int refcount; /* Refcount for slab cache destroy */ - void (*ctor)(void *, struct kmem_cache *, unsigned long); - void (*dtor)(void *, struct kmem_cache *, unsigned long); + const struct kmem_cache_ops *kmem_cache_ops; int inuse; /* Offset to metadata */ int align; /* Alignment */ const char *name; /* Name (only for display!) */