[RFC] Slab allocators: callbacks instead constructors destructors We may need some more callbacks for reclaim purposes in the slab allocators soon. The existing constructor/destructor is pretty unflexible and cannot provide additional callbacks. Moreover the way of handling these is a bit cumbersome since they have to be specified on each kmem_cache_create() call. Lets change the kmem_cache_create call so that. 1. One callback function can be specified per slab cache. 2. Flags are passed to the function to determine the function that should be performed by the callback. For a creator this would be SLAB_CALLBACK_CTOR and for a destructor SLAB_CALLBACK_DTOR. 3. The callback returns an exit code to show error conditions. This will require some modifications to all callsites for kmem_cache_create. A. No constructor is being used Simply remove the last NULL parameter to kmem_cache_create. I would recommend to use KMEM_CACHE instead of kmem_cache_create. The KMEM_CACHE macro can configure the slab depending on the definition of the structure. It will provide the alignent of the structure for the slab objects. i.e. B. Constructor The constructor must now become the callback. This means it needs to return a value and it should check for the type of function it should provide. kmem_cache_create must add SLAB_CALLBACK_CTOR to the flags to enable the callback. Drop the last NULL parameter. C. Destructor We still have such uses? Works analoguous to the constructor. I'd like to get rid of those though. Signed-off-by: Christoph Lameter --- block/ll_rw_blk.c | 6 +-- drivers/acpi/osl.c | 2 - fs/bio.c | 2 - fs/block_dev.c | 9 ++-- fs/buffer.c | 10 ++--- fs/configfs/mount.c | 2 - fs/dcache.c | 5 +- fs/dcookies.c | 5 -- fs/dnotify.c | 3 - fs/dquot.c | 7 +-- fs/eventpoll.c | 9 +--- fs/ext2/super.c | 11 +++-- fs/ext3/super.c | 11 +++-- fs/fcntl.c | 3 - fs/fuse/dev.c | 2 - fs/fuse/inode.c | 11 +++-- fs/inode.c | 12 +++--- fs/inotify_user.c | 8 +--- fs/locks.c | 12 +++--- fs/mbcache.c | 2 - fs/namespace.c | 2 - fs/nfs/direct.c | 2 - fs/nfs/inode.c | 11 +++-- fs/nfs/pagelist.c | 5 -- fs/nfs/read.c | 5 -- fs/nfs/write.c | 5 -- fs/nfsd/nfs4state.c | 12 ++---- fs/proc/inode.c | 14 ++++--- fs/reiserfs/super.c | 11 +++-- fs/revoke.c | 9 +++- fs/sysfs/mount.c | 2 - fs/udf/super.c | 11 +++-- fs/xfs/linux-2.6/kmem.h | 8 ++-- fs/xfs/linux-2.6/xfs_super.c | 7 ++- include/linux/slab.h | 25 +++++++++--- include/linux/slub_def.h | 3 - ipc/mqueue.c | 10 +++-- kernel/fork.c | 25 ++++++------ kernel/posix-timers.c | 2 - kernel/user.c | 2 - lib/idr.c | 11 ++++- lib/radix-tree.c | 11 ++++- mm/mempolicy.c | 4 +- mm/rmap.c | 7 ++- mm/shmem.c | 8 ++-- mm/slab.c | 84 +++++++++++++++++++----------------------- mm/slob.c | 19 ++++----- mm/slub.c | 86 ++++++++++++++++--------------------------- mm/swap_prefetch.c | 2 - net/socket.c | 13 ++++-- 50 files changed, 280 insertions(+), 268 deletions(-) Index: slub/include/linux/slab.h =================================================================== --- slub.orig/include/linux/slab.h 2007-04-30 15:04:34.000000000 -0700 +++ slub/include/linux/slab.h 2007-04-30 16:35:16.000000000 -0700 @@ -31,9 +31,8 @@ typedef struct kmem_cache kmem_cache_t _ #define SLAB_DESTROY_BY_RCU 0x00080000UL /* Defer freeing slabs to RCU */ #define SLAB_MEM_SPREAD 0x00100000UL /* Spread some memory over cpuset */ #define SLAB_TRACE 0x00200000UL /* Trace allocations and frees */ - -/* Flags passed to a constructor functions */ -#define SLAB_CTOR_CONSTRUCTOR 0x001UL /* If not set, then deconstructor */ +#define SLAB_CALLBACK_CTOR 0x00000200UL /* Use callback for constructor */ +#define SLAB_CALLBACK_DTOR 0x00001000UL /* Use callback for destructor */ /* * struct kmem_cache related prototypes @@ -41,10 +40,22 @@ typedef struct kmem_cache kmem_cache_t _ void __init kmem_cache_init(void); int slab_is_available(void); -struct kmem_cache *kmem_cache_create(const char *, size_t, size_t, +struct kmem_cache *kmem_cache_new(const char *, size_t, size_t, unsigned long, - void (*)(void *, struct kmem_cache *, unsigned long), - void (*)(void *, struct kmem_cache *, unsigned long)); + int (*)(void *, struct kmem_cache *, unsigned long)); + +/* + * Backward compatible function. We cannot take any constructors and + * destructors. + */ +static inline struct kmem_cache *kmem_cache_create(const char *s, + size_t size, size_t align, unsigned long flags, + struct fail *a, struct fail *b) +{ + BUG_ON(a || b); + return kmem_cache_new(s, size, align, flags, NULL); +} + void kmem_cache_destroy(struct kmem_cache *); int kmem_cache_shrink(struct kmem_cache *); void *kmem_cache_alloc(struct kmem_cache *, gfp_t); @@ -64,7 +75,7 @@ int kmem_ptr_validate(struct kmem_cache */ #define KMEM_CACHE(__struct, __flags) kmem_cache_create(#__struct,\ sizeof(struct __struct), __alignof__(struct __struct),\ - (__flags), NULL, NULL) + (__flags), NULL) #ifdef CONFIG_NUMA extern void *kmem_cache_alloc_node(struct kmem_cache *, gfp_t flags, int node); Index: slub/include/linux/slub_def.h =================================================================== --- slub.orig/include/linux/slub_def.h 2007-04-30 15:02:18.000000000 -0700 +++ slub/include/linux/slub_def.h 2007-04-30 15:02:41.000000000 -0700 @@ -39,8 +39,7 @@ struct kmem_cache { /* Allocation and freeing of slabs */ int objects; /* Number of objects in slab */ int refcount; /* Refcount for slab cache destroy */ - void (*ctor)(void *, struct kmem_cache *, unsigned long); - void (*dtor)(void *, struct kmem_cache *, unsigned long); + int (*callback)(void *, struct kmem_cache *, unsigned long); int inuse; /* Offset to metadata */ int align; /* Alignment */ const char *name; /* Name (only for display!) */ Index: slub/mm/slub.c =================================================================== --- slub.orig/mm/slub.c 2007-04-30 15:02:44.000000000 -0700 +++ slub/mm/slub.c 2007-04-30 16:35:45.000000000 -0700 @@ -148,7 +148,8 @@ * Set of flags that will prevent slab merging */ #define SLUB_NEVER_MERGE (SLAB_RED_ZONE | SLAB_POISON | SLAB_STORE_USER | \ - SLAB_TRACE | SLAB_DESTROY_BY_RCU) + SLAB_TRACE | SLAB_DESTROY_BY_RCU | \ + SLAB_CALLBACK_CTOR | SLAB_CALLBACK_DTOR) #define SLUB_MERGE_SAME (SLAB_DEBUG_FREE | SLAB_RECLAIM_ACCOUNT | \ SLAB_CACHE_DMA) @@ -809,8 +810,8 @@ static void setup_object(struct kmem_cac init_tracking(s, object); } - if (unlikely(s->ctor)) - s->ctor(object, s, SLAB_CTOR_CONSTRUCTOR); + if (unlikely(s->flags & SLAB_CALLBACK_CTOR)) + s->callback(object, s, SLAB_CALLBACK_CTOR); } static struct page *new_slab(struct kmem_cache *s, gfp_t flags, int node) @@ -868,15 +869,17 @@ static void __free_slab(struct kmem_cach { int pages = 1 << s->order; - if (unlikely(PageError(page) || s->dtor)) { + if (unlikely(PageError(page) || + (s->flags & SLAB_CALLBACK_DTOR))) { + void *start = page_address(page); void *end = start + (pages << PAGE_SHIFT); void *p; slab_pad_check(s, page); for (p = start; p <= end - s->size; p += s->size) { - if (s->dtor) - s->dtor(p, s, 0); + if (s->flags & SLAB_CALLBACK_DTOR) + s->callback(p, s, 0); check_object(s, page, p, 0); } } @@ -1617,8 +1620,9 @@ static int calculate_sizes(struct kmem_c * the slab may touch the object after free or before allocation * then we should never poison the object itself. */ - if ((flags & SLAB_POISON) && !(flags & SLAB_DESTROY_BY_RCU) && - !s->ctor && !s->dtor) + if ((flags & SLAB_POISON) && + !(flags & (SLAB_DESTROY_BY_RCU | + SLAB_CALLBACK_CTOR | SLAB_CALLBACK_DTOR))) s->flags |= __OBJECT_POISON; else s->flags &= ~__OBJECT_POISON; @@ -1646,8 +1650,8 @@ static int calculate_sizes(struct kmem_c */ s->inuse = size; - if (((flags & (SLAB_DESTROY_BY_RCU | SLAB_POISON)) || - s->ctor || s->dtor)) { + if (flags & (SLAB_DESTROY_BY_RCU | SLAB_POISON | + SLAB_CALLBACK_CTOR | SLAB_CALLBACK_DTOR)) { /* * Relocate free pointer after the object if it is not * permitted to overwrite the first word of the object on @@ -1731,13 +1735,11 @@ static int __init finish_bootstrap(void) static int kmem_cache_open(struct kmem_cache *s, gfp_t gfpflags, const char *name, size_t size, size_t align, unsigned long flags, - void (*ctor)(void *, struct kmem_cache *, unsigned long), - void (*dtor)(void *, struct kmem_cache *, unsigned long)) + int (*callback)(void *, struct kmem_cache *, unsigned long)) { memset(s, 0, kmem_size); s->name = name; - s->ctor = ctor; - s->dtor = dtor; + s->callback = callback; s->objsize = size; s->flags = flags; s->align = align; @@ -1751,14 +1753,13 @@ static int kmem_cache_open(struct kmem_c * On 32 bit platforms the limit is 256k. On 64bit platforms * the limit is 512k. * - * Debugging or ctor/dtors may create a need to move the free + * Debugging or callbacks may create a need to move the free * pointer. Fail if this happens. */ - if (s->size >= 65535 * sizeof(void *)) { + if (s->size >= 65535 * sizeof(void *)) BUG_ON(flags & (SLAB_RED_ZONE | SLAB_POISON | - SLAB_STORE_USER | SLAB_DESTROY_BY_RCU)); - BUG_ON(ctor || dtor); - } + SLAB_STORE_USER | SLAB_DESTROY_BY_RCU | + SLAB_CALLBACK_CTOR | SLAB_CALLBACK_DTOR)); else /* * Enable debugging if selected on the kernel commandline. @@ -1880,7 +1881,7 @@ static int kmem_cache_close(struct kmem_ /* * Close a cache and release the kmem_cache structure - * (must be used for caches created using kmem_cache_create) + * (must be used for caches created using kmem_cache_new) */ void kmem_cache_destroy(struct kmem_cache *s) { @@ -1992,7 +1993,7 @@ static struct kmem_cache *create_kmalloc down_write(&slub_lock); if (!kmem_cache_open(s, gfp_flags, name, size, ARCH_KMALLOC_MINALIGN, - flags, NULL, NULL)) + flags, NULL)) goto panic; list_add(&s->list, &slab_caches); @@ -2313,25 +2314,17 @@ static int slab_unmergeable(struct kmem_ if (slub_nomerge || (s->flags & SLUB_NEVER_MERGE)) return 1; - if (s->ctor || s->dtor) - return 1; - return 0; } static struct kmem_cache *find_mergeable(size_t size, - size_t align, unsigned long flags, - void (*ctor)(void *, struct kmem_cache *, unsigned long), - void (*dtor)(void *, struct kmem_cache *, unsigned long)) + size_t align, unsigned long flags) { struct list_head *h; if (slub_nomerge || (flags & SLUB_NEVER_MERGE)) return NULL; - if (ctor || dtor) - return NULL; - size = ALIGN(size, sizeof(void *)); align = calculate_alignment(flags, align, size); size = ALIGN(size, align); @@ -2364,15 +2357,14 @@ static struct kmem_cache *find_mergeable return NULL; } -struct kmem_cache *kmem_cache_create(const char *name, size_t size, +struct kmem_cache *kmem_cache_new(const char *name, size_t size, size_t align, unsigned long flags, - void (*ctor)(void *, struct kmem_cache *, unsigned long), - void (*dtor)(void *, struct kmem_cache *, unsigned long)) + int (*callback)(void *, struct kmem_cache *, unsigned long)) { struct kmem_cache *s; down_write(&slub_lock); - s = find_mergeable(size, align, flags, dtor, ctor); + s = find_mergeable(size, align, flags); if (s) { s->refcount++; /* @@ -2386,7 +2378,7 @@ struct kmem_cache *kmem_cache_create(con } else { s = kmalloc(kmem_size, GFP_KERNEL); if (s && kmem_cache_open(s, GFP_KERNEL, name, - size, align, flags, ctor, dtor)) { + size, align, flags, callback)) { if (sysfs_slab_add(s)) { kfree(s); goto err; @@ -2406,7 +2398,7 @@ err: s = NULL; return s; } -EXPORT_SYMBOL(kmem_cache_create); +EXPORT_SYMBOL(kmem_cache_new); void *kmem_cache_zalloc(struct kmem_cache *s, gfp_t flags) { @@ -2961,27 +2953,16 @@ static ssize_t order_show(struct kmem_ca } SLAB_ATTR_RO(order); -static ssize_t ctor_show(struct kmem_cache *s, char *buf) -{ - if (s->ctor) { - int n = sprint_symbol(buf, (unsigned long)s->ctor); - - return n + sprintf(buf + n, "\n"); - } - return 0; -} -SLAB_ATTR_RO(ctor); - -static ssize_t dtor_show(struct kmem_cache *s, char *buf) +static ssize_t callback_show(struct kmem_cache *s, char *buf) { - if (s->dtor) { - int n = sprint_symbol(buf, (unsigned long)s->dtor); + if (s->callback) { + int n = sprint_symbol(buf, (unsigned long)s->callback); return n + sprintf(buf + n, "\n"); } return 0; } -SLAB_ATTR_RO(dtor); +SLAB_ATTR_RO(callback); static ssize_t aliases_show(struct kmem_cache *s, char *buf) { @@ -3213,8 +3194,7 @@ static struct attribute * slab_attrs[] = &slabs_attr.attr, &partial_attr.attr, &cpu_slabs_attr.attr, - &ctor_attr.attr, - &dtor_attr.attr, + &callback_attr.attr, &aliases_attr.attr, &align_attr.attr, &sanity_checks_attr.attr, Index: slub/block/ll_rw_blk.c =================================================================== --- slub.orig/block/ll_rw_blk.c 2007-04-30 15:27:24.000000000 -0700 +++ slub/block/ll_rw_blk.c 2007-04-30 15:27:45.000000000 -0700 @@ -3695,13 +3695,13 @@ int __init blk_dev_init(void) panic("Failed to create kblockd\n"); request_cachep = kmem_cache_create("blkdev_requests", - sizeof(struct request), 0, SLAB_PANIC, NULL, NULL); + sizeof(struct request), 0, SLAB_PANIC, NULL); requestq_cachep = kmem_cache_create("blkdev_queue", - sizeof(request_queue_t), 0, SLAB_PANIC, NULL, NULL); + sizeof(request_queue_t), 0, SLAB_PANIC, NULL); iocontext_cachep = kmem_cache_create("blkdev_ioc", - sizeof(struct io_context), 0, SLAB_PANIC, NULL, NULL); + sizeof(struct io_context), 0, SLAB_PANIC, NULL); for_each_possible_cpu(i) INIT_LIST_HEAD(&per_cpu(blk_cpu_done, i)); Index: slub/fs/bio.c =================================================================== --- slub.orig/fs/bio.c 2007-04-30 15:27:51.000000000 -0700 +++ slub/fs/bio.c 2007-04-30 15:28:20.000000000 -0700 @@ -1187,7 +1187,7 @@ static void __init biovec_init_slabs(voi size = bvs->nr_vecs * sizeof(struct bio_vec); bvs->slab = kmem_cache_create(bvs->name, size, 0, - SLAB_HWCACHE_ALIGN|SLAB_PANIC, NULL, NULL); + SLAB_HWCACHE_ALIGN|SLAB_PANIC, NULL); } } Index: slub/fs/block_dev.c =================================================================== --- slub.orig/fs/block_dev.c 2007-04-30 15:28:28.000000000 -0700 +++ slub/fs/block_dev.c 2007-04-30 15:29:25.000000000 -0700 @@ -455,12 +455,12 @@ static void bdev_destroy_inode(struct in kmem_cache_free(bdev_cachep, bdi); } -static void init_once(void * foo, struct kmem_cache * cachep, unsigned long flags) +static int init_once(void * foo, struct kmem_cache * cachep, unsigned long flags) { struct bdev_inode *ei = (struct bdev_inode *) foo; struct block_device *bdev = &ei->bdev; - if (flags & SLAB_CTOR_CONSTRUCTOR) { + if (flags & SLAB_CALLBACK_CTOR) { memset(bdev, 0, sizeof(*bdev)); mutex_init(&bdev->bd_mutex); sema_init(&bdev->bd_mount_sem, 1); @@ -471,6 +471,7 @@ static void init_once(void * foo, struct #endif inode_init_once(&ei->vfs_inode); } + return 0; } static inline void __bd_forget(struct inode *inode) @@ -520,8 +521,8 @@ void __init bdev_cache_init(void) int err; bdev_cachep = kmem_cache_create("bdev_cache", sizeof(struct bdev_inode), 0, (SLAB_HWCACHE_ALIGN|SLAB_RECLAIM_ACCOUNT| - SLAB_MEM_SPREAD|SLAB_PANIC), - init_once, NULL); + SLAB_MEM_SPREAD|SLAB_PANIC|SLAB_CALLBACK_CTOR), + init_once); err = register_filesystem(&bd_type); if (err) panic("Cannot register bdev pseudo-fs"); Index: slub/fs/buffer.c =================================================================== --- slub.orig/fs/buffer.c 2007-04-30 15:29:34.000000000 -0700 +++ slub/fs/buffer.c 2007-04-30 15:30:57.000000000 -0700 @@ -2928,15 +2928,16 @@ void free_buffer_head(struct buffer_head } EXPORT_SYMBOL(free_buffer_head); -static void +static int init_buffer_head(void *data, struct kmem_cache *cachep, unsigned long flags) { - if (flags & SLAB_CTOR_CONSTRUCTOR) { + if (flags & SLAB_CALLBACK_CTOR) { struct buffer_head * bh = (struct buffer_head *)data; memset(bh, 0, sizeof(*bh)); INIT_LIST_HEAD(&bh->b_assoc_buffers); } + return 0; } static void buffer_exit_cpu(int cpu) @@ -2968,9 +2969,8 @@ void __init buffer_init(void) bh_cachep = kmem_cache_create("buffer_head", sizeof(struct buffer_head), 0, (SLAB_RECLAIM_ACCOUNT|SLAB_PANIC| - SLAB_MEM_SPREAD), - init_buffer_head, - NULL); + SLAB_MEM_SPREAD|SLAB_CALLBACK_CTOR), + init_buffer_head); /* * Limit the bh occupancy to 10% of ZONE_NORMAL Index: slub/fs/dcache.c =================================================================== --- slub.orig/fs/dcache.c 2007-04-30 15:31:12.000000000 -0700 +++ slub/fs/dcache.c 2007-04-30 15:32:04.000000000 -0700 @@ -2190,10 +2190,9 @@ void __init vfs_caches_init(unsigned lon mempages -= reserve; names_cachep = kmem_cache_create("names_cache", PATH_MAX, 0, - SLAB_HWCACHE_ALIGN|SLAB_PANIC, NULL, NULL); + SLAB_HWCACHE_ALIGN|SLAB_PANIC, NULL); - filp_cachep = kmem_cache_create("filp", sizeof(struct file), 0, - SLAB_HWCACHE_ALIGN|SLAB_PANIC, NULL, NULL); + filp_cachep = KMEM_CACHE(file, SLAB_HWCACHE_ALIGN|SLAB_PANIC); dcache_init(mempages); inode_init(mempages); Index: slub/fs/dcookies.c =================================================================== --- slub.orig/fs/dcookies.c 2007-04-30 15:32:12.000000000 -0700 +++ slub/fs/dcookies.c 2007-04-30 15:32:41.000000000 -0700 @@ -203,10 +203,7 @@ static int dcookie_init(void) unsigned int i, hash_bits; int err = -ENOMEM; - dcookie_cache = kmem_cache_create("dcookie_cache", - sizeof(struct dcookie_struct), - 0, 0, NULL, NULL); - + dcookie_cache = KMEM_CACHE(dcookie_struct, 0); if (!dcookie_cache) goto out; Index: slub/fs/dnotify.c =================================================================== --- slub.orig/fs/dnotify.c 2007-04-30 15:32:51.000000000 -0700 +++ slub/fs/dnotify.c 2007-04-30 15:33:13.000000000 -0700 @@ -175,8 +175,7 @@ EXPORT_SYMBOL_GPL(dnotify_parent); static int __init dnotify_init(void) { - dn_cache = kmem_cache_create("dnotify_cache", - sizeof(struct dnotify_struct), 0, SLAB_PANIC, NULL, NULL); + dn_cache = KMEM_CACHE(dnotify_struct, SLAB_PANIC); return 0; } Index: slub/fs/dquot.c =================================================================== --- slub.orig/fs/dquot.c 2007-04-30 15:33:36.000000000 -0700 +++ slub/fs/dquot.c 2007-04-30 15:36:31.000000000 -0700 @@ -1848,11 +1848,8 @@ static int __init dquot_init(void) register_sysctl_table(sys_table); - dquot_cachep = kmem_cache_create("dquot", - sizeof(struct dquot), sizeof(unsigned long) * 4, - (SLAB_HWCACHE_ALIGN|SLAB_RECLAIM_ACCOUNT| - SLAB_MEM_SPREAD|SLAB_PANIC), - NULL, NULL); + dquot_cachep = KMEM_CACHE(dquot, SLAB_HWCACHE_ALIGN|SLAB_RECLAIM_ACCOUNT| + SLAB_MEM_SPREAD|SLAB_PANIC); order = 0; dquot_hash = (struct hlist_head *)__get_free_pages(GFP_ATOMIC, order); Index: slub/fs/eventpoll.c =================================================================== --- slub.orig/fs/eventpoll.c 2007-04-30 15:36:42.000000000 -0700 +++ slub/fs/eventpoll.c 2007-04-30 15:37:42.000000000 -0700 @@ -1397,14 +1397,11 @@ static int __init eventpoll_init(void) ep_poll_safewake_init(&psw); /* Allocates slab cache used to allocate "struct epitem" items */ - epi_cache = kmem_cache_create("eventpoll_epi", sizeof(struct epitem), - 0, SLAB_HWCACHE_ALIGN|EPI_SLAB_DEBUG|SLAB_PANIC, - NULL, NULL); + epi_cache = KMEM_CACHE(epitem, + SLAB_HWCACHE_ALIGN|EPI_SLAB_DEBUG|SLAB_PANIC); /* Allocates slab cache used to allocate "struct eppoll_entry" */ - pwq_cache = kmem_cache_create("eventpoll_pwq", - sizeof(struct eppoll_entry), 0, - EPI_SLAB_DEBUG|SLAB_PANIC, NULL, NULL); + pwq_cache = KMEM_CACHE(eppoll_entry, EPI_SLAB_DEBUG|SLAB_PANIC); return 0; } Index: slub/fs/fcntl.c =================================================================== --- slub.orig/fs/fcntl.c 2007-04-30 15:37:50.000000000 -0700 +++ slub/fs/fcntl.c 2007-04-30 15:38:06.000000000 -0700 @@ -637,8 +637,7 @@ EXPORT_SYMBOL(kill_fasync); static int __init fasync_init(void) { - fasync_cache = kmem_cache_create("fasync_cache", - sizeof(struct fasync_struct), 0, SLAB_PANIC, NULL, NULL); + fasync_cache = KMEM_CACHE(fasync_struct, SLAB_PANIC); return 0; } Index: slub/fs/inode.c =================================================================== --- slub.orig/fs/inode.c 2007-04-30 15:38:19.000000000 -0700 +++ slub/fs/inode.c 2007-04-30 15:40:09.000000000 -0700 @@ -219,12 +219,15 @@ void inode_init_once(struct inode *inode EXPORT_SYMBOL(inode_init_once); -static void init_once(void * foo, struct kmem_cache * cachep, unsigned long flags) +static int init_once(void * foo, struct kmem_cache * cachep, unsigned long flags) { struct inode * inode = (struct inode *) foo; - if (flags & SLAB_CTOR_CONSTRUCTOR) + if (flags & SLAB_CALLBACK_CTOR) { inode_init_once(inode); + return 0; + } + return -ENOSYS; } /* @@ -1398,9 +1401,8 @@ void __init inode_init(unsigned long mem sizeof(struct inode), 0, (SLAB_RECLAIM_ACCOUNT|SLAB_PANIC| - SLAB_MEM_SPREAD), - init_once, - NULL); + SLAB_MEM_SPREAD|SLAB_CALLBACK_CTOR), + init_once); register_shrinker(&icache_shrinker); /* Hash may have been set up in inode_init_early */ Index: slub/fs/inotify_user.c =================================================================== --- slub.orig/fs/inotify_user.c 2007-04-30 15:40:19.000000000 -0700 +++ slub/fs/inotify_user.c 2007-04-30 15:41:27.000000000 -0700 @@ -714,12 +714,8 @@ static int __init inotify_user_setup(voi inotify_max_user_instances = 128; inotify_max_user_watches = 8192; - watch_cachep = kmem_cache_create("inotify_watch_cache", - sizeof(struct inotify_user_watch), - 0, SLAB_PANIC, NULL, NULL); - event_cachep = kmem_cache_create("inotify_event_cache", - sizeof(struct inotify_kernel_event), - 0, SLAB_PANIC, NULL, NULL); + watch_cachep = KMEM_CACHE(inotify_user_watch, SLAB_PANIC); + event_cachep = KMEM_CACHE(inotify_kernel_event, SLAB_PANIC); return 0; } Index: slub/fs/locks.c =================================================================== --- slub.orig/fs/locks.c 2007-04-30 15:41:41.000000000 -0700 +++ slub/fs/locks.c 2007-04-30 15:59:14.000000000 -0700 @@ -199,14 +199,15 @@ EXPORT_SYMBOL(locks_init_lock); * Initialises the fields of the file lock which are invariant for * free file_locks. */ -static void init_once(void *foo, struct kmem_cache *cache, unsigned long flags) +static int init_once(void *foo, struct kmem_cache *cache, unsigned long flags) { struct file_lock *lock = (struct file_lock *) foo; - if (!(flags & SLAB_CTOR_CONSTRUCTOR)) - return; + if (!(flags & SLAB_CALLBACK_CTOR)) + return -ENOSYS; locks_init_lock(lock); + return 0; } static void locks_copy_private(struct file_lock *new, struct file_lock *fl) @@ -2270,8 +2271,9 @@ EXPORT_SYMBOL(lock_may_write); static int __init filelock_init(void) { filelock_cache = kmem_cache_create("file_lock_cache", - sizeof(struct file_lock), 0, SLAB_PANIC, - init_once, NULL); + sizeof(struct file_lock), 0, + SLAB_PANIC|SLAB_CALLBACK_CTOR, + init_once); return 0; } Index: slub/fs/mbcache.c =================================================================== --- slub.orig/fs/mbcache.c 2007-04-30 15:42:49.000000000 -0700 +++ slub/fs/mbcache.c 2007-04-30 15:43:04.000000000 -0700 @@ -292,7 +292,7 @@ mb_cache_create(const char *name, struct INIT_LIST_HEAD(&cache->c_indexes_hash[m][n]); } cache->c_entry_cache = kmem_cache_create(name, entry_size, 0, - SLAB_RECLAIM_ACCOUNT|SLAB_MEM_SPREAD, NULL, NULL); + SLAB_RECLAIM_ACCOUNT|SLAB_MEM_SPREAD, NULL); if (!cache->c_entry_cache) goto fail; Index: slub/fs/namespace.c =================================================================== --- slub.orig/fs/namespace.c 2007-04-30 15:43:15.000000000 -0700 +++ slub/fs/namespace.c 2007-04-30 15:43:21.000000000 -0700 @@ -1825,7 +1825,7 @@ void __init mnt_init(unsigned long mempa init_rwsem(&namespace_sem); mnt_cache = kmem_cache_create("mnt_cache", sizeof(struct vfsmount), - 0, SLAB_HWCACHE_ALIGN | SLAB_PANIC, NULL, NULL); + 0, SLAB_HWCACHE_ALIGN | SLAB_PANIC, NULL); mount_hashtable = (struct list_head *)__get_free_page(GFP_ATOMIC); Index: slub/fs/revoke.c =================================================================== --- slub.orig/fs/revoke.c 2007-04-30 15:43:29.000000000 -0700 +++ slub/fs/revoke.c 2007-04-30 16:00:06.000000000 -0700 @@ -704,15 +704,17 @@ static struct file_system_type revokefs_ .kill_sb = kill_anon_super }; -static void revokefs_init_inode(void *obj, struct kmem_cache *cache, +static int revokefs_init_inode(void *obj, struct kmem_cache *cache, unsigned long flags) { struct revokefs_inode_info *info = obj; - if (flags & SLAB_CTOR_CONSTRUCTOR) { + if (flags & SLAB_CALLBACK_CTOR) { info->owner = NULL; inode_init_once(&info->vfs_inode); + return 0; } + return -ENOSYS; } static int __init revokefs_init(void) @@ -724,7 +726,8 @@ static int __init revokefs_init(void) sizeof(struct revokefs_inode_info), 0, (SLAB_HWCACHE_ALIGN | SLAB_RECLAIM_ACCOUNT | - SLAB_MEM_SPREAD), revokefs_init_inode, NULL); + SLAB_MEM_SPREAD | SLAB_CALLBACK_CTOR), + revokefs_init_inode); if (!revokefs_inode_cache) goto out; Index: slub/ipc/mqueue.c =================================================================== --- slub.orig/ipc/mqueue.c 2007-04-30 15:44:19.000000000 -0700 +++ slub/ipc/mqueue.c 2007-04-30 15:45:10.000000000 -0700 @@ -211,12 +211,15 @@ static int mqueue_get_sb(struct file_sys return get_sb_single(fs_type, flags, data, mqueue_fill_super, mnt); } -static void init_once(void *foo, struct kmem_cache * cachep, unsigned long flags) +static int init_once(void *foo, struct kmem_cache * cachep, unsigned long flags) { struct mqueue_inode_info *p = (struct mqueue_inode_info *) foo; - if (flags & SLAB_CTOR_CONSTRUCTOR) + if (flags & SLAB_CALLBACK_CTOR) { inode_init_once(&p->vfs_inode); + return 0; + } + return -ENOSYS; } static struct inode *mqueue_alloc_inode(struct super_block *sb) @@ -1250,7 +1253,8 @@ static int __init init_mqueue_fs(void) mqueue_inode_cachep = kmem_cache_create("mqueue_inode_cache", sizeof(struct mqueue_inode_info), 0, - SLAB_HWCACHE_ALIGN, init_once, NULL); + SLAB_HWCACHE_ALIGN|SLAB_CALLBACK_CTOR, + init_once); if (mqueue_inode_cachep == NULL) return -ENOMEM; Index: slub/kernel/fork.c =================================================================== --- slub.orig/kernel/fork.c 2007-04-30 15:45:43.000000000 -0700 +++ slub/kernel/fork.c 2007-04-30 15:58:01.000000000 -0700 @@ -137,7 +137,7 @@ void __init fork_init(unsigned long memp /* create a slab on which task_structs can be allocated */ task_struct_cachep = kmem_cache_create("task_struct", sizeof(struct task_struct), - ARCH_MIN_TASKALIGN, SLAB_PANIC, NULL, NULL); + ARCH_MIN_TASKALIGN, SLAB_PANIC, NULL); #endif /* @@ -1429,38 +1429,39 @@ long do_fork(unsigned long clone_flags, #define ARCH_MIN_MMSTRUCT_ALIGN 0 #endif -static void sighand_ctor(void *data, struct kmem_cache *cachep, +static int sighand_ctor(void *data, struct kmem_cache *cachep, unsigned long flags) { struct sighand_struct *sighand = data; - if (flags & SLAB_CTOR_CONSTRUCTOR) { + if (flags & SLAB_CALLBACK_CTOR) { spin_lock_init(&sighand->siglock); INIT_LIST_HEAD(&sighand->signalfd_list); + return 0; } + return -ENOSYS; } void __init proc_caches_init(void) { sighand_cachep = kmem_cache_create("sighand_cache", sizeof(struct sighand_struct), 0, - SLAB_HWCACHE_ALIGN|SLAB_PANIC|SLAB_DESTROY_BY_RCU, - sighand_ctor, NULL); + SLAB_HWCACHE_ALIGN|SLAB_PANIC| + SLAB_DESTROY_BY_RCU|SLAB_CALLBACK_CTOR, + sighand_ctor); signal_cachep = kmem_cache_create("signal_cache", sizeof(struct signal_struct), 0, - SLAB_HWCACHE_ALIGN|SLAB_PANIC, NULL, NULL); + SLAB_HWCACHE_ALIGN|SLAB_PANIC, NULL); files_cachep = kmem_cache_create("files_cache", sizeof(struct files_struct), 0, - SLAB_HWCACHE_ALIGN|SLAB_PANIC, NULL, NULL); + SLAB_HWCACHE_ALIGN|SLAB_PANIC, NULL); fs_cachep = kmem_cache_create("fs_cache", sizeof(struct fs_struct), 0, - SLAB_HWCACHE_ALIGN|SLAB_PANIC, NULL, NULL); - vm_area_cachep = kmem_cache_create("vm_area_struct", - sizeof(struct vm_area_struct), 0, - SLAB_PANIC, NULL, NULL); + SLAB_HWCACHE_ALIGN|SLAB_PANIC, NULL); + vm_area_cachep = KMEM_CACHE(vm_area_struct, SLAB_PANIC); mm_cachep = kmem_cache_create("mm_struct", sizeof(struct mm_struct), ARCH_MIN_MMSTRUCT_ALIGN, - SLAB_HWCACHE_ALIGN|SLAB_PANIC, NULL, NULL); + SLAB_HWCACHE_ALIGN|SLAB_PANIC, NULL); } /* Index: slub/kernel/posix-timers.c =================================================================== --- slub.orig/kernel/posix-timers.c 2007-04-30 15:47:06.000000000 -0700 +++ slub/kernel/posix-timers.c 2007-04-30 15:47:16.000000000 -0700 @@ -241,7 +241,7 @@ static __init int init_posix_timers(void register_posix_clock(CLOCK_MONOTONIC, &clock_monotonic); posix_timers_cache = kmem_cache_create("posix_timers_cache", - sizeof (struct k_itimer), 0, 0, NULL, NULL); + sizeof (struct k_itimer), 0, 0, NULL); idr_init(&posix_timers_id); return 0; } Index: slub/kernel/user.c =================================================================== --- slub.orig/kernel/user.c 2007-04-30 15:47:32.000000000 -0700 +++ slub/kernel/user.c 2007-04-30 15:47:39.000000000 -0700 @@ -208,7 +208,7 @@ static int __init uid_cache_init(void) int n; uid_cachep = kmem_cache_create("uid_cache", sizeof(struct user_struct), - 0, SLAB_HWCACHE_ALIGN|SLAB_PANIC, NULL, NULL); + 0, SLAB_HWCACHE_ALIGN|SLAB_PANIC, NULL); for(n = 0; n < UIDHASH_SZ; ++n) INIT_LIST_HEAD(uidhash_table + n); Index: slub/lib/idr.c =================================================================== --- slub.orig/lib/idr.c 2007-04-30 15:47:56.000000000 -0700 +++ slub/lib/idr.c 2007-04-30 15:48:40.000000000 -0700 @@ -445,17 +445,22 @@ void *idr_replace(struct idr *idp, void } EXPORT_SYMBOL(idr_replace); -static void idr_cache_ctor(void * idr_layer, struct kmem_cache *idr_layer_cache, +static int idr_cache_ctor(void * idr_layer, struct kmem_cache *idr_layer_cache, unsigned long flags) { - memset(idr_layer, 0, sizeof(struct idr_layer)); + if (flags & SLAB_CALLBACK_CTOR) { + memset(idr_layer, 0, sizeof(struct idr_layer)); + return 0; + } + return -ENOSYS; } static int init_id_cache(void) { if (!idr_layer_cache) idr_layer_cache = kmem_cache_create("idr_layer_cache", - sizeof(struct idr_layer), 0, 0, idr_cache_ctor, NULL); + sizeof(struct idr_layer), 0, + SLAB_CALLBACK_CTOR, idr_cache_ctor); return 0; } Index: slub/lib/radix-tree.c =================================================================== --- slub.orig/lib/radix-tree.c 2007-04-30 15:48:52.000000000 -0700 +++ slub/lib/radix-tree.c 2007-04-30 15:50:07.000000000 -0700 @@ -1068,10 +1068,14 @@ int radix_tree_tagged(struct radix_tree_ } EXPORT_SYMBOL(radix_tree_tagged); -static void +static int radix_tree_node_ctor(void *node, struct kmem_cache *cachep, unsigned long flags) { - memset(node, 0, sizeof(struct radix_tree_node)); + if (flags & SLAB_CALLBACK_CTOR) { + memset(node, 0, sizeof(struct radix_tree_node)); + return 0; + } + return -ENOSYS; } static __init unsigned long __maxindex(unsigned int height) @@ -1116,7 +1120,8 @@ void __init radix_tree_init(void) { radix_tree_node_cachep = kmem_cache_create("radix_tree_node", sizeof(struct radix_tree_node), 0, - SLAB_PANIC, radix_tree_node_ctor, NULL); + SLAB_PANIC|SLAB_CALLBACK_CTOR, + radix_tree_node_ctor); radix_tree_init_maxindex(); hotcpu_notifier(radix_tree_callback, 0); } Index: slub/mm/mempolicy.c =================================================================== --- slub.orig/mm/mempolicy.c 2007-04-30 15:50:19.000000000 -0700 +++ slub/mm/mempolicy.c 2007-04-30 15:50:32.000000000 -0700 @@ -1600,11 +1600,11 @@ void __init numa_policy_init(void) { policy_cache = kmem_cache_create("numa_policy", sizeof(struct mempolicy), - 0, SLAB_PANIC, NULL, NULL); + 0, SLAB_PANIC, NULL); sn_cache = kmem_cache_create("shared_policy_node", sizeof(struct sp_node), - 0, SLAB_PANIC, NULL, NULL); + 0, SLAB_PANIC, NULL); /* Set interleaving policy for system init. This way not all the data structures allocated at system boot end up in node zero. */ Index: slub/mm/rmap.c =================================================================== --- slub.orig/mm/rmap.c 2007-04-30 15:50:44.000000000 -0700 +++ slub/mm/rmap.c 2007-04-30 15:51:15.000000000 -0700 @@ -162,18 +162,21 @@ void anon_vma_unlink(struct vm_area_stru static void anon_vma_ctor(void *data, struct kmem_cache *cachep, unsigned long flags) { - if (flags & SLAB_CTOR_CONSTRUCTOR) { + if (flags & SLAB_CALLBACK_CTOR) { struct anon_vma *anon_vma = data; spin_lock_init(&anon_vma->lock); INIT_LIST_HEAD(&anon_vma->head); + return 0; } + return -ENOSYS; } void __init anon_vma_init(void) { anon_vma_cachep = kmem_cache_create("anon_vma", sizeof(struct anon_vma), - 0, SLAB_DESTROY_BY_RCU|SLAB_PANIC, anon_vma_ctor, NULL); + 0, SLAB_DESTROY_BY_RCU|SLAB_PANIC|SLAB_CALLBACK_CTOR, + anon_vma_ctor); } /* Index: slub/mm/shmem.c =================================================================== --- slub.orig/mm/shmem.c 2007-04-30 15:51:27.000000000 -0700 +++ slub/mm/shmem.c 2007-04-30 15:52:07.000000000 -0700 @@ -2324,25 +2324,27 @@ static void shmem_destroy_inode(struct i kmem_cache_free(shmem_inode_cachep, SHMEM_I(inode)); } -static void init_once(void *foo, struct kmem_cache *cachep, +static int init_once(void *foo, struct kmem_cache *cachep, unsigned long flags) { struct shmem_inode_info *p = (struct shmem_inode_info *) foo; - if (flags & SLAB_CTOR_CONSTRUCTOR) { + if (flags & SLAB_CALLBACK_CTOR) { inode_init_once(&p->vfs_inode); #ifdef CONFIG_TMPFS_POSIX_ACL p->i_acl = NULL; p->i_default_acl = NULL; #endif + return 0; } + return -ENOSYS; } static int init_inodecache(void) { shmem_inode_cachep = kmem_cache_create("shmem_inode_cache", sizeof(struct shmem_inode_info), - 0, 0, init_once, NULL); + 0, SLAB_CALLBACK_CTOR, init_once); if (shmem_inode_cachep == NULL) return -ENOMEM; return 0; Index: slub/mm/slab.c =================================================================== --- slub.orig/mm/slab.c 2007-04-30 15:19:05.000000000 -0700 +++ slub/mm/slab.c 2007-04-30 16:37:04.000000000 -0700 @@ -176,12 +176,14 @@ SLAB_CACHE_DMA | \ SLAB_STORE_USER | \ SLAB_RECLAIM_ACCOUNT | SLAB_PANIC | \ - SLAB_DESTROY_BY_RCU | SLAB_MEM_SPREAD) + SLAB_DESTROY_BY_RCU | SLAB_MEM_SPREAD | \ + SLAB_CALLBACK_CTOR | SLAB_CALLBACK_DTOR) #else # define CREATE_MASK (SLAB_HWCACHE_ALIGN | \ SLAB_CACHE_DMA | \ SLAB_RECLAIM_ACCOUNT | SLAB_PANIC | \ - SLAB_DESTROY_BY_RCU | SLAB_MEM_SPREAD) + SLAB_DESTROY_BY_RCU | SLAB_MEM_SPREAD \ + SLAB_CALLBACK_CTOR | SLAB_CALLBACK_DTOR) #endif /* @@ -405,11 +407,7 @@ struct kmem_cache { unsigned int slab_size; unsigned int dflags; /* dynamic flags */ - /* constructor func */ - void (*ctor) (void *, struct kmem_cache *, unsigned long); - - /* de-constructor func */ - void (*dtor) (void *, struct kmem_cache *, unsigned long); + int (*callback) (void *, struct kmem_cache *, unsigned long); /* 5) cache creation/removal */ const char *name; @@ -1490,19 +1488,19 @@ void __init kmem_cache_init(void) * bug. */ - sizes[INDEX_AC].cs_cachep = kmem_cache_create(names[INDEX_AC].name, + sizes[INDEX_AC].cs_cachep = kmem_cache_new(names[INDEX_AC].name, sizes[INDEX_AC].cs_size, ARCH_KMALLOC_MINALIGN, ARCH_KMALLOC_FLAGS|SLAB_PANIC, - NULL, NULL); + NULL); if (INDEX_AC != INDEX_L3) { sizes[INDEX_L3].cs_cachep = - kmem_cache_create(names[INDEX_L3].name, + kmem_cache_new(names[INDEX_L3].name, sizes[INDEX_L3].cs_size, ARCH_KMALLOC_MINALIGN, ARCH_KMALLOC_FLAGS|SLAB_PANIC, - NULL, NULL); + NULL); } slab_early_init = 0; @@ -1516,20 +1514,20 @@ void __init kmem_cache_init(void) * allow tighter packing of the smaller caches. */ if (!sizes->cs_cachep) { - sizes->cs_cachep = kmem_cache_create(names->name, + sizes->cs_cachep = kmem_cache_new(names->name, sizes->cs_size, ARCH_KMALLOC_MINALIGN, ARCH_KMALLOC_FLAGS|SLAB_PANIC, - NULL, NULL); + NULL); } #ifdef CONFIG_ZONE_DMA - sizes->cs_dmacachep = kmem_cache_create( + sizes->cs_dmacachep = kmem_cache_new( names->name_dma, sizes->cs_size, ARCH_KMALLOC_MINALIGN, ARCH_KMALLOC_FLAGS|SLAB_CACHE_DMA| SLAB_PANIC, - NULL, NULL); + NULL); #endif sizes++; names++; @@ -1907,18 +1905,20 @@ static void slab_destroy_objs(struct kme slab_error(cachep, "end of a freed object " "was overwritten"); } - if (cachep->dtor && !(cachep->flags & SLAB_POISON)) - (cachep->dtor) (objp + obj_offset(cachep), cachep, 0); + if ((cachep->flags & SLAB_CALLBACK_DTOR) && + !(cachep->flags & SLAB_POISON)) + cachep->callback(objp + obj_offset(cachep), cachep, + SLAB_CALLBACK_DTOR); } } #else static void slab_destroy_objs(struct kmem_cache *cachep, struct slab *slabp) { - if (cachep->dtor) { + if (cachep->flags & SLAB_CALLBACK_DTOR) { int i; for (i = 0; i < cachep->num; i++) { void *objp = index_to_obj(cachep, slabp, i); - (cachep->dtor) (objp, cachep, 0); + cachep->callback(objp, cachep, 0); } } } @@ -2114,18 +2114,15 @@ static int setup_cpu_cache(struct kmem_c } /** - * kmem_cache_create - Create a cache. + * kmem_cache_new - Create a cache. * @name: A string which is used in /proc/slabinfo to identify this cache. * @size: The size of objects to be created in this cache. * @align: The required alignment for the objects. * @flags: SLAB flags - * @ctor: A constructor for the objects. - * @dtor: A destructor for the objects. + * @callback: Callback for constructor and destructor. * * Returns a ptr to the cache on success, NULL on failure. * Cannot be called within a int, but can be interrupted. - * The @ctor is run when new pages are allocated by the cache - * and the @dtor is run before the pages are handed back. * * @name must be valid until the cache is destroyed. This implies that * the module calling this has to destroy the cache before getting unloaded. @@ -2143,10 +2140,9 @@ static int setup_cpu_cache(struct kmem_c * as davem. */ struct kmem_cache * -kmem_cache_create (const char *name, size_t size, size_t align, +kmem_cache_new (const char *name, size_t size, size_t align, unsigned long flags, - void (*ctor)(void*, struct kmem_cache *, unsigned long), - void (*dtor)(void*, struct kmem_cache *, unsigned long)) + int (*callback)(void*, struct kmem_cache *, unsigned long)) { size_t left_over, slab_size, ralign; struct kmem_cache *cachep = NULL, *pc; @@ -2155,7 +2151,7 @@ kmem_cache_create (const char *name, siz * Sanity checks... these are all serious usage bugs. */ if (!name || in_interrupt() || (size < BYTES_PER_WORD) || - (size > (1 << MAX_OBJ_ORDER) * PAGE_SIZE) || (dtor && !ctor)) { + (size > (1 << MAX_OBJ_ORDER) * PAGE_SIZE)) { printk(KERN_ERR "%s: Early error in slab %s\n", __FUNCTION__, name); BUG(); @@ -2210,7 +2206,7 @@ kmem_cache_create (const char *name, siz BUG_ON(flags & SLAB_POISON); #endif if (flags & SLAB_DESTROY_BY_RCU) - BUG_ON(dtor); + BUG_ON(flags & SLAB_CALLBACK_DTOR); /* * Always checks flags, a caller might be expecting debug support which @@ -2365,8 +2361,7 @@ kmem_cache_create (const char *name, siz */ BUG_ON(!cachep->slabp_cache); } - cachep->ctor = ctor; - cachep->dtor = dtor; + cachep->callback = callback; cachep->name = name; if (setup_cpu_cache(cachep)) { @@ -2384,7 +2379,7 @@ oops: mutex_unlock(&cache_chain_mutex); return cachep; } -EXPORT_SYMBOL(kmem_cache_create); +EXPORT_SYMBOL(kmem_cache_new); #if DEBUG static void check_irq_off(void) @@ -2621,7 +2616,7 @@ static inline kmem_bufctl_t *slab_bufctl } static void cache_init_objs(struct kmem_cache *cachep, - struct slab *slabp, unsigned long ctor_flags) + struct slab *slabp) { int i; @@ -2643,9 +2638,10 @@ static void cache_init_objs(struct kmem_ * cache which they are a constructor for. Otherwise, deadlock. * They must also be threaded. */ - if (cachep->ctor && !(cachep->flags & SLAB_POISON)) - cachep->ctor(objp + obj_offset(cachep), cachep, - ctor_flags); + if ((cachep->flags & SLAB_CALLBACK_CTOR) + && !(cachep->flags & SLAB_POISON)) + cachep->callback(objp + obj_offset(cachep), cachep, + SLAB_CALLBACK_CTOR); if (cachep->flags & SLAB_RED_ZONE) { if (*dbg_redzone2(cachep, objp) != RED_INACTIVE) @@ -2660,8 +2656,8 @@ static void cache_init_objs(struct kmem_ kernel_map_pages(virt_to_page(objp), cachep->buffer_size / PAGE_SIZE, 0); #else - if (cachep->ctor) - cachep->ctor(objp, cachep, ctor_flags); + if (cachep->flags & SLAB_CALLBACK_CTOR) + cachep->callback(objp, cachep, SLAB_CALLBACK_CTOR); #endif slab_bufctl(slabp)[i] = i + 1; } @@ -2750,7 +2746,6 @@ static int cache_grow(struct kmem_cache struct slab *slabp; size_t offset; gfp_t local_flags; - unsigned long ctor_flags; struct kmem_list3 *l3; /* @@ -2759,7 +2754,6 @@ static int cache_grow(struct kmem_cache */ BUG_ON(flags & ~(GFP_DMA | GFP_LEVEL_MASK)); - ctor_flags = SLAB_CTOR_CONSTRUCTOR; local_flags = (flags & GFP_LEVEL_MASK); /* Take the l3 list lock to change the colour_next on this node */ check_irq_off(); @@ -2804,7 +2798,7 @@ static int cache_grow(struct kmem_cache slabp->nodeid = nodeid; slab_map_pages(cachep, slabp, objp); - cache_init_objs(cachep, slabp, ctor_flags); + cache_init_objs(cachep, slabp); if (local_flags & __GFP_WAIT) local_irq_disable(); @@ -2890,11 +2884,11 @@ static void *cache_free_debugcheck(struc BUG_ON(objnr >= cachep->num); BUG_ON(objp != index_to_obj(cachep, slabp, objnr)); - if (cachep->flags & SLAB_POISON && cachep->dtor) { + if (cachep->flags & SLAB_POISON && cachep->flags & SLAB_CALLBACK_DTOR) { /* we want to cache poison the object, * call the destruction callback */ - cachep->dtor(objp + obj_offset(cachep), cachep, 0); + cachep->callback(objp + obj_offset(cachep), cachep, SLAB_CALLBACK_DTOR); } #ifdef CONFIG_DEBUG_SLAB_LEAK slab_bufctl(slabp)[objnr] = BUFCTL_FREE; @@ -3094,8 +3088,8 @@ static void *cache_alloc_debugcheck_afte } #endif objp += obj_offset(cachep); - if (cachep->ctor && cachep->flags & SLAB_POISON) - cachep->ctor(objp, cachep, SLAB_CTOR_CONSTRUCTOR); + if (cachep->flags & SLAB_CALLBACK_CTOR && cachep->flags & SLAB_POISON) + cachep->callback(objp, cachep, SLAB_CALLBACK_CTOR); #if ARCH_SLAB_MINALIGN if ((u32)objp & (ARCH_SLAB_MINALIGN-1)) { printk(KERN_ERR "0x%p: not aligned to ARCH_SLAB_MINALIGN=%d\n", Index: slub/mm/slob.c =================================================================== --- slub.orig/mm/slob.c 2007-04-30 15:53:46.000000000 -0700 +++ slub/mm/slob.c 2007-04-30 15:56:45.000000000 -0700 @@ -276,14 +276,13 @@ size_t ksize(const void *block) struct kmem_cache { unsigned int size, align; const char *name; - void (*ctor)(void *, struct kmem_cache *, unsigned long); - void (*dtor)(void *, struct kmem_cache *, unsigned long); + unsigned long flags; + int (*callback)(void *, struct kmem_cache *, unsigned long); }; struct kmem_cache *kmem_cache_create(const char *name, size_t size, size_t align, unsigned long flags, - void (*ctor)(void*, struct kmem_cache *, unsigned long), - void (*dtor)(void*, struct kmem_cache *, unsigned long)) + int (*callback)(void*, struct kmem_cache *, unsigned long)) { struct kmem_cache *c; @@ -292,8 +291,8 @@ struct kmem_cache *kmem_cache_create(con if (c) { c->name = name; c->size = size; - c->ctor = ctor; - c->dtor = dtor; + c->flags = flags; + c->callback = callback; /* ignore alignment unless it's forced */ c->align = (flags & SLAB_HWCACHE_ALIGN) ? SLOB_ALIGN : 0; if (c->align < align) @@ -320,8 +319,8 @@ void *kmem_cache_alloc(struct kmem_cache else b = (void *)__get_free_pages(flags, find_order(c->size)); - if (c->ctor) - c->ctor(b, c, SLAB_CTOR_CONSTRUCTOR); + if (c->flags & SLAB_CALLBACK_CTOR) + c->callback(b, c, SLAB_CALLBACK_CTOR); return b; } @@ -339,8 +338,8 @@ EXPORT_SYMBOL(kmem_cache_zalloc); void kmem_cache_free(struct kmem_cache *c, void *b) { - if (c->dtor) - c->dtor(b, c, 0); + if (c->flags & SLAB_CALLBACK_DTOR) + c->callback(b, c, SLAB_CALLBACK_DTOR); if (c->size < PAGE_SIZE) slob_free(b, c->size); Index: slub/mm/swap_prefetch.c =================================================================== --- slub.orig/mm/swap_prefetch.c 2007-04-30 15:52:19.000000000 -0700 +++ slub/mm/swap_prefetch.c 2007-04-30 15:52:26.000000000 -0700 @@ -545,7 +545,7 @@ void __init prepare_swap_prefetch(void) struct zone *zone; swapped.cache = kmem_cache_create("swapped_entry", - sizeof(struct swapped_entry), 0, SLAB_PANIC, NULL, NULL); + sizeof(struct swapped_entry), 0, SLAB_PANIC, NULL); /* * Set max number of entries to 2/3 the size of physical ram as we Index: slub/net/socket.c =================================================================== --- slub.orig/net/socket.c 2007-04-30 15:52:41.000000000 -0700 +++ slub/net/socket.c 2007-04-30 15:53:35.000000000 -0700 @@ -257,12 +257,15 @@ static void sock_destroy_inode(struct in container_of(inode, struct socket_alloc, vfs_inode)); } -static void init_once(void *foo, struct kmem_cache *cachep, unsigned long flags) +static int init_once(void *foo, struct kmem_cache *cachep, unsigned long flags) { struct socket_alloc *ei = (struct socket_alloc *)foo; - if (flags & SLAB_CTOR_CONSTRUCTOR) + if (flags & SLAB_CALLBACK_CTOR) { inode_init_once(&ei->vfs_inode); + return 0; + } + return -ENOSYS; } static int init_inodecache(void) @@ -272,9 +275,9 @@ static int init_inodecache(void) 0, (SLAB_HWCACHE_ALIGN | SLAB_RECLAIM_ACCOUNT | - SLAB_MEM_SPREAD), - init_once, - NULL); + SLAB_MEM_SPREAD | + SLAB_CALLBACK_CTOR), + init_once); if (sock_inode_cachep == NULL) return -ENOMEM; return 0; Index: slub/drivers/acpi/osl.c =================================================================== --- slub.orig/drivers/acpi/osl.c 2007-04-30 16:30:00.000000000 -0700 +++ slub/drivers/acpi/osl.c 2007-04-30 16:30:10.000000000 -0700 @@ -1038,7 +1038,7 @@ void acpi_os_release_lock(acpi_spinlock acpi_status acpi_os_create_cache(char *name, u16 size, u16 depth, acpi_cache_t ** cache) { - *cache = kmem_cache_create(name, size, 0, 0, NULL, NULL); + *cache = kmem_cache_create(name, size, 0, 0, NULL); if (*cache == NULL) return AE_ERROR; else Index: slub/fs/configfs/mount.c =================================================================== --- slub.orig/fs/configfs/mount.c 2007-04-30 16:00:44.000000000 -0700 +++ slub/fs/configfs/mount.c 2007-04-30 16:00:53.000000000 -0700 @@ -136,7 +136,7 @@ static int __init configfs_init(void) configfs_dir_cachep = kmem_cache_create("configfs_dir_cache", sizeof(struct configfs_dirent), - 0, 0, NULL, NULL); + 0, 0, NULL); if (!configfs_dir_cachep) goto out; Index: slub/fs/ext2/super.c =================================================================== --- slub.orig/fs/ext2/super.c 2007-04-30 16:01:15.000000000 -0700 +++ slub/fs/ext2/super.c 2007-04-30 16:02:09.000000000 -0700 @@ -157,18 +157,20 @@ static void ext2_destroy_inode(struct in kmem_cache_free(ext2_inode_cachep, EXT2_I(inode)); } -static void init_once(void * foo, struct kmem_cache * cachep, unsigned long flags) +static int init_once(void * foo, struct kmem_cache * cachep, unsigned long flags) { struct ext2_inode_info *ei = (struct ext2_inode_info *) foo; - if (flags & SLAB_CTOR_CONSTRUCTOR) { + if (flags & SLAB_CALLBACK_CTOR) { rwlock_init(&ei->i_meta_lock); #ifdef CONFIG_EXT2_FS_XATTR init_rwsem(&ei->xattr_sem); #endif mutex_init(&ei->truncate_mutex); inode_init_once(&ei->vfs_inode); + return 0; } + return -ENOSYS; } static int init_inodecache(void) @@ -176,8 +178,9 @@ static int init_inodecache(void) ext2_inode_cachep = kmem_cache_create("ext2_inode_cache", sizeof(struct ext2_inode_info), 0, (SLAB_RECLAIM_ACCOUNT| - SLAB_MEM_SPREAD), - init_once, NULL); + SLAB_MEM_SPREAD| + SLAB_CALLBACK_CTOR), + init_once); if (ext2_inode_cachep == NULL) return -ENOMEM; return 0; Index: slub/fs/ext3/super.c =================================================================== --- slub.orig/fs/ext3/super.c 2007-04-30 16:02:24.000000000 -0700 +++ slub/fs/ext3/super.c 2007-04-30 16:09:43.000000000 -0700 @@ -462,18 +462,20 @@ static void ext3_destroy_inode(struct in kmem_cache_free(ext3_inode_cachep, EXT3_I(inode)); } -static void init_once(void * foo, struct kmem_cache * cachep, unsigned long flags) +static int init_once(void * foo, struct kmem_cache * cachep, unsigned long flags) { struct ext3_inode_info *ei = (struct ext3_inode_info *) foo; - if (flags & SLAB_CTOR_CONSTRUCTOR) { + if (flags & SLAB_CALLBACK_CTOR) { INIT_LIST_HEAD(&ei->i_orphan); #ifdef CONFIG_EXT3_FS_XATTR init_rwsem(&ei->xattr_sem); #endif mutex_init(&ei->truncate_mutex); inode_init_once(&ei->vfs_inode); + return 0; } + return -ENOSYS; } static int init_inodecache(void) @@ -481,8 +483,9 @@ static int init_inodecache(void) ext3_inode_cachep = kmem_cache_create("ext3_inode_cache", sizeof(struct ext3_inode_info), 0, (SLAB_RECLAIM_ACCOUNT| - SLAB_MEM_SPREAD), - init_once, NULL); + SLAB_MEM_SPREAD| + SLAB_CALLBACK_CTOR), + init_once); if (ext3_inode_cachep == NULL) return -ENOMEM; return 0; Index: slub/fs/fuse/dev.c =================================================================== --- slub.orig/fs/fuse/dev.c 2007-04-30 16:09:55.000000000 -0700 +++ slub/fs/fuse/dev.c 2007-04-30 16:10:01.000000000 -0700 @@ -1044,7 +1044,7 @@ int __init fuse_dev_init(void) int err = -ENOMEM; fuse_req_cachep = kmem_cache_create("fuse_request", sizeof(struct fuse_req), - 0, 0, NULL, NULL); + 0, 0, NULL); if (!fuse_req_cachep) goto out; Index: slub/fs/fuse/inode.c =================================================================== --- slub.orig/fs/fuse/inode.c 2007-04-30 16:13:14.000000000 -0700 +++ slub/fs/fuse/inode.c 2007-04-30 16:13:59.000000000 -0700 @@ -682,13 +682,16 @@ static inline void unregister_fuseblk(vo static decl_subsys(fuse, NULL, NULL); static decl_subsys(connections, NULL, NULL); -static void fuse_inode_init_once(void *foo, struct kmem_cache *cachep, +static int fuse_inode_init_once(void *foo, struct kmem_cache *cachep, unsigned long flags) { struct inode * inode = foo; - if (flags & SLAB_CTOR_CONSTRUCTOR) + if (flags & SLAB_CALLBACK_CTOR) { inode_init_once(inode); + return 0; + } + return -ENOSYS; } static int __init fuse_fs_init(void) @@ -705,8 +708,8 @@ static int __init fuse_fs_init(void) fuse_inode_cachep = kmem_cache_create("fuse_inode", sizeof(struct fuse_inode), - 0, SLAB_HWCACHE_ALIGN, - fuse_inode_init_once, NULL); + 0, SLAB_HWCACHE_ALIGN|SLAB_CALLBACK_CTOR, + fuse_inode_init_once); err = -ENOMEM; if (!fuse_inode_cachep) goto out_unreg2; Index: slub/fs/nfs/direct.c =================================================================== --- slub.orig/fs/nfs/direct.c 2007-04-30 16:20:25.000000000 -0700 +++ slub/fs/nfs/direct.c 2007-04-30 16:20:32.000000000 -0700 @@ -847,7 +847,7 @@ int __init nfs_init_directcache(void) sizeof(struct nfs_direct_req), 0, (SLAB_RECLAIM_ACCOUNT| SLAB_MEM_SPREAD), - NULL, NULL); + NULL); if (nfs_direct_cachep == NULL) return -ENOMEM; Index: slub/fs/nfs/inode.c =================================================================== --- slub.orig/fs/nfs/inode.c 2007-04-30 16:16:44.000000000 -0700 +++ slub/fs/nfs/inode.c 2007-04-30 16:18:09.000000000 -0700 @@ -1160,11 +1160,11 @@ static inline void nfs4_init_once(struct #endif } -static void init_once(void * foo, struct kmem_cache * cachep, unsigned long flags) +static int init_once(void * foo, struct kmem_cache * cachep, unsigned long flags) { struct nfs_inode *nfsi = (struct nfs_inode *) foo; - if (flags & SLAB_CTOR_CONSTRUCTOR) { + if (flags & SLAB_CALLBACK_CTOR) { inode_init_once(&nfsi->vfs_inode); spin_lock_init(&nfsi->req_lock); INIT_LIST_HEAD(&nfsi->dirty); @@ -1178,7 +1178,9 @@ static void init_once(void * foo, struct nfsi->ncommit = 0; nfsi->npages = 0; nfs4_init_once(nfsi); + return 0; } + return -ENOSYS; } static int __init nfs_init_inodecache(void) @@ -1186,8 +1188,9 @@ static int __init nfs_init_inodecache(vo nfs_inode_cachep = kmem_cache_create("nfs_inode_cache", sizeof(struct nfs_inode), 0, (SLAB_RECLAIM_ACCOUNT| - SLAB_MEM_SPREAD), - init_once, NULL); + SLAB_MEM_SPREAD| + SLAB_CALLBACK_CTOR), + init_once); if (nfs_inode_cachep == NULL) return -ENOMEM; Index: slub/fs/nfs/pagelist.c =================================================================== --- slub.orig/fs/nfs/pagelist.c 2007-04-30 16:18:20.000000000 -0700 +++ slub/fs/nfs/pagelist.c 2007-04-30 16:18:51.000000000 -0700 @@ -390,10 +390,7 @@ out: int __init nfs_init_nfspagecache(void) { - nfs_page_cachep = kmem_cache_create("nfs_page", - sizeof(struct nfs_page), - 0, SLAB_HWCACHE_ALIGN, - NULL, NULL); + nfs_page_cachep = KMEM_CACHE(nfs_page, SLAB_HWCACHE_ALIGN); if (nfs_page_cachep == NULL) return -ENOMEM; Index: slub/fs/nfs/read.c =================================================================== --- slub.orig/fs/nfs/read.c 2007-04-30 16:18:59.000000000 -0700 +++ slub/fs/nfs/read.c 2007-04-30 16:19:22.000000000 -0700 @@ -607,10 +607,7 @@ out: int __init nfs_init_readpagecache(void) { - nfs_rdata_cachep = kmem_cache_create("nfs_read_data", - sizeof(struct nfs_read_data), - 0, SLAB_HWCACHE_ALIGN, - NULL, NULL); + nfs_rdata_cachep = KMEM_CACHE(nfs_read_data, SLAB_HWCACHE_ALIGN); if (nfs_rdata_cachep == NULL) return -ENOMEM; Index: slub/fs/nfs/write.c =================================================================== --- slub.orig/fs/nfs/write.c 2007-04-30 16:19:32.000000000 -0700 +++ slub/fs/nfs/write.c 2007-04-30 16:20:03.000000000 -0700 @@ -1551,10 +1551,7 @@ int nfs_set_page_dirty(struct page *page int __init nfs_init_writepagecache(void) { - nfs_wdata_cachep = kmem_cache_create("nfs_write_data", - sizeof(struct nfs_write_data), - 0, SLAB_HWCACHE_ALIGN, - NULL, NULL); + nfs_wdata_cachep = KMEM_CACHE(nfs_write_data, SLAB_HWCACHE_ALIGN); if (nfs_wdata_cachep == NULL) return -ENOMEM; Index: slub/fs/nfsd/nfs4state.c =================================================================== --- slub.orig/fs/nfsd/nfs4state.c 2007-04-30 16:20:55.000000000 -0700 +++ slub/fs/nfsd/nfs4state.c 2007-04-30 16:22:14.000000000 -0700 @@ -1026,20 +1026,16 @@ nfsd4_free_slabs(void) static int nfsd4_init_slabs(void) { - stateowner_slab = kmem_cache_create("nfsd4_stateowners", - sizeof(struct nfs4_stateowner), 0, 0, NULL, NULL); + stateowner_slab = KMEM_CACHE(nfs4_stateowner, 0); if (stateowner_slab == NULL) goto out_nomem; - file_slab = kmem_cache_create("nfsd4_files", - sizeof(struct nfs4_file), 0, 0, NULL, NULL); + file_slab = KMEM_CACHE(nfs4_file, 0); if (file_slab == NULL) goto out_nomem; - stateid_slab = kmem_cache_create("nfsd4_stateids", - sizeof(struct nfs4_stateid), 0, 0, NULL, NULL); + stateid_slab = KMEM_CACHE(nfs4_stateid, 0); if (stateid_slab == NULL) goto out_nomem; - deleg_slab = kmem_cache_create("nfsd4_delegations", - sizeof(struct nfs4_delegation), 0, 0, NULL, NULL); + deleg_slab = KMEM_CACHE(nfs4_delegation, 0); if (deleg_slab == NULL) goto out_nomem; return 0; Index: slub/fs/proc/inode.c =================================================================== --- slub.orig/fs/proc/inode.c 2007-04-30 16:22:30.000000000 -0700 +++ slub/fs/proc/inode.c 2007-04-30 16:24:13.000000000 -0700 @@ -105,21 +105,25 @@ static void proc_destroy_inode(struct in kmem_cache_free(proc_inode_cachep, PROC_I(inode)); } -static void init_once(void * foo, struct kmem_cache * cachep, unsigned long flags) +static int init_once(void * foo, struct kmem_cache * cachep, unsigned long flags) { struct proc_inode *ei = (struct proc_inode *) foo; - if (flags & SLAB_CTOR_CONSTRUCTOR) + if (flags & SLAB_CALLBACK_CTOR) { inode_init_once(&ei->vfs_inode); + return 0; + } + return -ENOSYS; } - + int __init proc_init_inodecache(void) { proc_inode_cachep = kmem_cache_create("proc_inode_cache", sizeof(struct proc_inode), 0, (SLAB_RECLAIM_ACCOUNT| - SLAB_MEM_SPREAD), - init_once, NULL); + SLAB_MEM_SPREAD| + SLAB_CALLBACK_CTOR), + init_once); if (proc_inode_cachep == NULL) return -ENOMEM; return 0; Index: slub/fs/reiserfs/super.c =================================================================== --- slub.orig/fs/reiserfs/super.c 2007-04-30 16:24:33.000000000 -0700 +++ slub/fs/reiserfs/super.c 2007-04-30 16:25:18.000000000 -0700 @@ -508,18 +508,20 @@ static void reiserfs_destroy_inode(struc kmem_cache_free(reiserfs_inode_cachep, REISERFS_I(inode)); } -static void init_once(void *foo, struct kmem_cache * cachep, unsigned long flags) +static int init_once(void *foo, struct kmem_cache * cachep, unsigned long flags) { struct reiserfs_inode_info *ei = (struct reiserfs_inode_info *)foo; - if (flags & SLAB_CTOR_CONSTRUCTOR) { + if (flags & SLAB_CALLBACK_CTOR) { INIT_LIST_HEAD(&ei->i_prealloc_list); inode_init_once(&ei->vfs_inode); #ifdef CONFIG_REISERFS_FS_POSIX_ACL ei->i_acl_access = NULL; ei->i_acl_default = NULL; #endif + return 0; } + return -ENOSYS; } static int init_inodecache(void) @@ -528,8 +530,9 @@ static int init_inodecache(void) sizeof(struct reiserfs_inode_info), 0, (SLAB_RECLAIM_ACCOUNT| - SLAB_MEM_SPREAD), - init_once, NULL); + SLAB_MEM_SPREAD| + SLAB_CALLBACK_CTOR), + init_once); if (reiserfs_inode_cachep == NULL) return -ENOMEM; return 0; Index: slub/fs/sysfs/mount.c =================================================================== --- slub.orig/fs/sysfs/mount.c 2007-04-30 16:25:37.000000000 -0700 +++ slub/fs/sysfs/mount.c 2007-04-30 16:25:42.000000000 -0700 @@ -83,7 +83,7 @@ int __init sysfs_init(void) sysfs_dir_cachep = kmem_cache_create("sysfs_dir_cache", sizeof(struct sysfs_dirent), - 0, 0, NULL, NULL); + 0, 0, NULL); if (!sysfs_dir_cachep) goto out; Index: slub/fs/udf/super.c =================================================================== --- slub.orig/fs/udf/super.c 2007-04-30 16:25:54.000000000 -0700 +++ slub/fs/udf/super.c 2007-04-30 16:26:31.000000000 -0700 @@ -130,14 +130,16 @@ static void udf_destroy_inode(struct ino kmem_cache_free(udf_inode_cachep, UDF_I(inode)); } -static void init_once(void * foo, struct kmem_cache * cachep, unsigned long flags) +static int init_once(void * foo, struct kmem_cache * cachep, unsigned long flags) { struct udf_inode_info *ei = (struct udf_inode_info *) foo; - if (flags & SLAB_CTOR_CONSTRUCTOR) { + if (flags & SLAB_CALLBACK_CTOR) { ei->i_ext.i_data = NULL; inode_init_once(&ei->vfs_inode); + return 0; } + return -ENOSYS; } static int init_inodecache(void) @@ -145,8 +147,9 @@ static int init_inodecache(void) udf_inode_cachep = kmem_cache_create("udf_inode_cache", sizeof(struct udf_inode_info), 0, (SLAB_RECLAIM_ACCOUNT| - SLAB_MEM_SPREAD), - init_once, NULL); + SLAB_MEM_SPREAD| + SLAB_CALLBACK_CTOR), + init_once); if (udf_inode_cachep == NULL) return -ENOMEM; return 0; Index: slub/fs/xfs/linux-2.6/kmem.h =================================================================== --- slub.orig/fs/xfs/linux-2.6/kmem.h 2007-04-30 16:26:47.000000000 -0700 +++ slub/fs/xfs/linux-2.6/kmem.h 2007-04-30 16:28:00.000000000 -0700 @@ -74,14 +74,16 @@ extern void kmem_free(void *, size_t); static inline kmem_zone_t * kmem_zone_init(int size, char *zone_name) { - return kmem_cache_create(zone_name, size, 0, 0, NULL, NULL); + return kmem_cache_create(zone_name, size, 0, 0, NULL); } static inline kmem_zone_t * kmem_zone_init_flags(int size, char *zone_name, unsigned long flags, - void (*construct)(void *, kmem_zone_t *, unsigned long)) + int (*construct)(void *, kmem_zone_t *, unsigned long)) { - return kmem_cache_create(zone_name, size, 0, flags, construct, NULL); + if (construct) + return kmem_cache_create(zone_name, size, 0, flags|SLAB_CALLBACK_CTOR, construct); + return kmem_cache_create(zone_name, size, 0, flags, NULL); } static inline void Index: slub/fs/xfs/linux-2.6/xfs_super.c =================================================================== --- slub.orig/fs/xfs/linux-2.6/xfs_super.c 2007-04-30 16:28:51.000000000 -0700 +++ slub/fs/xfs/linux-2.6/xfs_super.c 2007-04-30 16:29:15.000000000 -0700 @@ -354,14 +354,17 @@ xfs_fs_destroy_inode( kmem_zone_free(xfs_vnode_zone, vn_from_inode(inode)); } -STATIC void +STATIC int xfs_fs_inode_init_once( void *vnode, kmem_zone_t *zonep, unsigned long flags) { - if (flags & SLAB_CTOR_CONSTRUCTOR) + if (flags & SLAB_CALLBACK_CTOR) { inode_init_once(vn_to_inode((bhv_vnode_t *)vnode)); + return 0; + } + return -ENOSYS; } STATIC int