--- fs/block_dev.c | 11 ++-- fs/buffer.c | 12 ++-- fs/ext2/super.c | 13 +++- fs/ext3/super.c | 13 +++- fs/fuse/inode.c | 13 +++- fs/inode.c | 14 +++-- fs/isofs/inode.c | 11 ++-- fs/locks.c | 14 +++-- fs/nfs/inode.c | 13 +++- fs/proc/inode.c | 16 +++--- fs/reiserfs/super.c | 13 +++- fs/revoke.c | 11 ++-- fs/udf/super.c | 13 +++- fs/xfs/linux-2.6/kmem.h | 8 +-- fs/xfs/linux-2.6/xfs_super.c | 7 +- include/linux/slab.h | 63 ++++++++++++++++++++---- include/linux/slub_def.h | 3 - ipc/mqueue.c | 12 +++- kernel/fork.c | 33 ++++++------ lib/idr.c | 11 +++- lib/radix-tree.c | 11 +++- mm/rmap.c | 11 ++-- mm/shmem.c | 10 ++- mm/slab.c | 112 ++++++++++++++++--------------------------- mm/slob.c | 19 +++---- mm/slub.c | 109 +++++++++++++---------------------------- net/socket.c | 13 +++- 27 files changed, 315 insertions(+), 274 deletions(-) Index: slub/fs/block_dev.c =================================================================== --- slub.orig/fs/block_dev.c 2007-04-30 16:37:08.000000000 -0700 +++ slub/fs/block_dev.c 2007-04-30 21:13:14.000000000 -0700 @@ -455,12 +455,12 @@ static void bdev_destroy_inode(struct in kmem_cache_free(bdev_cachep, bdi); } -static void init_once(void * foo, struct kmem_cache * cachep, unsigned long flags) +static int init_once(void * foo, struct kmem_cache * cachep, unsigned long flags) { struct bdev_inode *ei = (struct bdev_inode *) foo; struct block_device *bdev = &ei->bdev; - if (flags & SLAB_CTOR_CONSTRUCTOR) { + if (flags & SLAB_CALLBACK_CTOR) { memset(bdev, 0, sizeof(*bdev)); mutex_init(&bdev->bd_mutex); sema_init(&bdev->bd_mount_sem, 1); @@ -471,6 +471,7 @@ static void init_once(void * foo, struct #endif inode_init_once(&ei->vfs_inode); } + return 0; } static inline void __bd_forget(struct inode *inode) @@ -518,10 +519,10 @@ struct super_block *blockdev_superblock; void __init bdev_cache_init(void) { int err; - bdev_cachep = kmem_cache_create("bdev_cache", sizeof(struct bdev_inode), + bdev_cachep = kmem_cache_new("bdev_cache", sizeof(struct bdev_inode), 0, (SLAB_HWCACHE_ALIGN|SLAB_RECLAIM_ACCOUNT| - SLAB_MEM_SPREAD|SLAB_PANIC), - init_once, NULL); + SLAB_MEM_SPREAD|SLAB_PANIC|SLAB_CALLBACK_CTOR), + init_once); err = register_filesystem(&bd_type); if (err) panic("Cannot register bdev pseudo-fs"); Index: slub/fs/buffer.c =================================================================== --- slub.orig/fs/buffer.c 2007-04-30 16:37:08.000000000 -0700 +++ slub/fs/buffer.c 2007-04-30 21:12:54.000000000 -0700 @@ -2928,15 +2928,16 @@ void free_buffer_head(struct buffer_head } EXPORT_SYMBOL(free_buffer_head); -static void +static int init_buffer_head(void *data, struct kmem_cache *cachep, unsigned long flags) { - if (flags & SLAB_CTOR_CONSTRUCTOR) { + if (flags & SLAB_CALLBACK_CTOR) { struct buffer_head * bh = (struct buffer_head *)data; memset(bh, 0, sizeof(*bh)); INIT_LIST_HEAD(&bh->b_assoc_buffers); } + return 0; } static void buffer_exit_cpu(int cpu) @@ -2965,12 +2966,11 @@ void __init buffer_init(void) { int nrpages; - bh_cachep = kmem_cache_create("buffer_head", + bh_cachep = kmem_cache_new("buffer_head", sizeof(struct buffer_head), 0, (SLAB_RECLAIM_ACCOUNT|SLAB_PANIC| - SLAB_MEM_SPREAD), - init_buffer_head, - NULL); + SLAB_MEM_SPREAD|SLAB_CALLBACK_CTOR), + init_buffer_head); /* * Limit the bh occupancy to 10% of ZONE_NORMAL Index: slub/fs/ext2/super.c =================================================================== --- slub.orig/fs/ext2/super.c 2007-04-30 16:37:08.000000000 -0700 +++ slub/fs/ext2/super.c 2007-04-30 21:14:08.000000000 -0700 @@ -157,27 +157,30 @@ static void ext2_destroy_inode(struct in kmem_cache_free(ext2_inode_cachep, EXT2_I(inode)); } -static void init_once(void * foo, struct kmem_cache * cachep, unsigned long flags) +static int init_once(void * foo, struct kmem_cache * cachep, unsigned long flags) { struct ext2_inode_info *ei = (struct ext2_inode_info *) foo; - if (flags & SLAB_CTOR_CONSTRUCTOR) { + if (flags & SLAB_CALLBACK_CTOR) { rwlock_init(&ei->i_meta_lock); #ifdef CONFIG_EXT2_FS_XATTR init_rwsem(&ei->xattr_sem); #endif mutex_init(&ei->truncate_mutex); inode_init_once(&ei->vfs_inode); + return 0; } + return -ENOSYS; } static int init_inodecache(void) { - ext2_inode_cachep = kmem_cache_create("ext2_inode_cache", + ext2_inode_cachep = kmem_cache_new("ext2_inode_cache", sizeof(struct ext2_inode_info), 0, (SLAB_RECLAIM_ACCOUNT| - SLAB_MEM_SPREAD), - init_once, NULL); + SLAB_MEM_SPREAD| + SLAB_CALLBACK_CTOR), + init_once); if (ext2_inode_cachep == NULL) return -ENOMEM; return 0; Index: slub/fs/ext3/super.c =================================================================== --- slub.orig/fs/ext3/super.c 2007-04-30 16:37:08.000000000 -0700 +++ slub/fs/ext3/super.c 2007-04-30 21:14:29.000000000 -0700 @@ -462,27 +462,30 @@ static void ext3_destroy_inode(struct in kmem_cache_free(ext3_inode_cachep, EXT3_I(inode)); } -static void init_once(void * foo, struct kmem_cache * cachep, unsigned long flags) +static int init_once(void * foo, struct kmem_cache * cachep, unsigned long flags) { struct ext3_inode_info *ei = (struct ext3_inode_info *) foo; - if (flags & SLAB_CTOR_CONSTRUCTOR) { + if (flags & SLAB_CALLBACK_CTOR) { INIT_LIST_HEAD(&ei->i_orphan); #ifdef CONFIG_EXT3_FS_XATTR init_rwsem(&ei->xattr_sem); #endif mutex_init(&ei->truncate_mutex); inode_init_once(&ei->vfs_inode); + return 0; } + return -ENOSYS; } static int init_inodecache(void) { - ext3_inode_cachep = kmem_cache_create("ext3_inode_cache", + ext3_inode_cachep = kmem_cache_new("ext3_inode_cache", sizeof(struct ext3_inode_info), 0, (SLAB_RECLAIM_ACCOUNT| - SLAB_MEM_SPREAD), - init_once, NULL); + SLAB_MEM_SPREAD| + SLAB_CALLBACK_CTOR), + init_once); if (ext3_inode_cachep == NULL) return -ENOMEM; return 0; Index: slub/fs/fuse/inode.c =================================================================== --- slub.orig/fs/fuse/inode.c 2007-04-30 16:37:08.000000000 -0700 +++ slub/fs/fuse/inode.c 2007-04-30 21:14:51.000000000 -0700 @@ -682,13 +682,16 @@ static inline void unregister_fuseblk(vo static decl_subsys(fuse, NULL, NULL); static decl_subsys(connections, NULL, NULL); -static void fuse_inode_init_once(void *foo, struct kmem_cache *cachep, +static int fuse_inode_init_once(void *foo, struct kmem_cache *cachep, unsigned long flags) { struct inode * inode = foo; - if (flags & SLAB_CTOR_CONSTRUCTOR) + if (flags & SLAB_CALLBACK_CTOR) { inode_init_once(inode); + return 0; + } + return -ENOSYS; } static int __init fuse_fs_init(void) @@ -703,10 +706,10 @@ static int __init fuse_fs_init(void) if (err) goto out_unreg; - fuse_inode_cachep = kmem_cache_create("fuse_inode", + fuse_inode_cachep = kmem_cache_new("fuse_inode", sizeof(struct fuse_inode), - 0, SLAB_HWCACHE_ALIGN, - fuse_inode_init_once, NULL); + 0, SLAB_HWCACHE_ALIGN|SLAB_CALLBACK_CTOR, + fuse_inode_init_once); err = -ENOMEM; if (!fuse_inode_cachep) goto out_unreg2; Index: slub/fs/inode.c =================================================================== --- slub.orig/fs/inode.c 2007-04-30 16:37:08.000000000 -0700 +++ slub/fs/inode.c 2007-04-30 21:12:28.000000000 -0700 @@ -219,12 +219,15 @@ void inode_init_once(struct inode *inode EXPORT_SYMBOL(inode_init_once); -static void init_once(void * foo, struct kmem_cache * cachep, unsigned long flags) +static int init_once(void * foo, struct kmem_cache * cachep, unsigned long flags) { struct inode * inode = (struct inode *) foo; - if (flags & SLAB_CTOR_CONSTRUCTOR) + if (flags & SLAB_CALLBACK_CTOR) { inode_init_once(inode); + return 0; + } + return -ENOSYS; } /* @@ -1394,13 +1397,12 @@ void __init inode_init(unsigned long mem int loop; /* inode slab cache */ - inode_cachep = kmem_cache_create("inode_cache", + inode_cachep = kmem_cache_new("inode_cache", sizeof(struct inode), 0, (SLAB_RECLAIM_ACCOUNT|SLAB_PANIC| - SLAB_MEM_SPREAD), - init_once, - NULL); + SLAB_MEM_SPREAD|SLAB_CALLBACK_CTOR), + init_once); register_shrinker(&icache_shrinker); /* Hash may have been set up in inode_init_early */ Index: slub/fs/locks.c =================================================================== --- slub.orig/fs/locks.c 2007-04-30 16:37:08.000000000 -0700 +++ slub/fs/locks.c 2007-04-30 21:12:04.000000000 -0700 @@ -199,14 +199,15 @@ EXPORT_SYMBOL(locks_init_lock); * Initialises the fields of the file lock which are invariant for * free file_locks. */ -static void init_once(void *foo, struct kmem_cache *cache, unsigned long flags) +static int init_once(void *foo, struct kmem_cache *cache, unsigned long flags) { struct file_lock *lock = (struct file_lock *) foo; - if (!(flags & SLAB_CTOR_CONSTRUCTOR)) - return; + if (!(flags & SLAB_CALLBACK_CTOR)) + return -ENOSYS; locks_init_lock(lock); + return 0; } static void locks_copy_private(struct file_lock *new, struct file_lock *fl) @@ -2269,9 +2270,10 @@ EXPORT_SYMBOL(lock_may_write); static int __init filelock_init(void) { - filelock_cache = kmem_cache_create("file_lock_cache", - sizeof(struct file_lock), 0, SLAB_PANIC, - init_once, NULL); + filelock_cache = kmem_cache_new("file_lock_cache", + sizeof(struct file_lock), 0, + SLAB_PANIC|SLAB_CALLBACK_CTOR, + init_once); return 0; } Index: slub/fs/nfs/inode.c =================================================================== --- slub.orig/fs/nfs/inode.c 2007-04-30 16:37:08.000000000 -0700 +++ slub/fs/nfs/inode.c 2007-04-30 21:17:05.000000000 -0700 @@ -1160,11 +1160,11 @@ static inline void nfs4_init_once(struct #endif } -static void init_once(void * foo, struct kmem_cache * cachep, unsigned long flags) +static int init_once(void * foo, struct kmem_cache * cachep, unsigned long flags) { struct nfs_inode *nfsi = (struct nfs_inode *) foo; - if (flags & SLAB_CTOR_CONSTRUCTOR) { + if (flags & SLAB_CALLBACK_CTOR) { inode_init_once(&nfsi->vfs_inode); spin_lock_init(&nfsi->req_lock); INIT_LIST_HEAD(&nfsi->dirty); @@ -1178,16 +1178,19 @@ static void init_once(void * foo, struct nfsi->ncommit = 0; nfsi->npages = 0; nfs4_init_once(nfsi); + return 0; } + return -ENOSYS; } static int __init nfs_init_inodecache(void) { - nfs_inode_cachep = kmem_cache_create("nfs_inode_cache", + nfs_inode_cachep = kmem_cache_new("nfs_inode_cache", sizeof(struct nfs_inode), 0, (SLAB_RECLAIM_ACCOUNT| - SLAB_MEM_SPREAD), - init_once, NULL); + SLAB_MEM_SPREAD| + SLAB_CALLBACK_CTOR), + init_once); if (nfs_inode_cachep == NULL) return -ENOMEM; Index: slub/fs/proc/inode.c =================================================================== --- slub.orig/fs/proc/inode.c 2007-04-30 16:37:08.000000000 -0700 +++ slub/fs/proc/inode.c 2007-04-30 21:17:51.000000000 -0700 @@ -105,21 +105,25 @@ static void proc_destroy_inode(struct in kmem_cache_free(proc_inode_cachep, PROC_I(inode)); } -static void init_once(void * foo, struct kmem_cache * cachep, unsigned long flags) +static int init_once(void * foo, struct kmem_cache * cachep, unsigned long flags) { struct proc_inode *ei = (struct proc_inode *) foo; - if (flags & SLAB_CTOR_CONSTRUCTOR) + if (flags & SLAB_CALLBACK_CTOR) { inode_init_once(&ei->vfs_inode); + return 0; + } + return -ENOSYS; } - + int __init proc_init_inodecache(void) { - proc_inode_cachep = kmem_cache_create("proc_inode_cache", + proc_inode_cachep = kmem_cache_new("proc_inode_cache", sizeof(struct proc_inode), 0, (SLAB_RECLAIM_ACCOUNT| - SLAB_MEM_SPREAD), - init_once, NULL); + SLAB_MEM_SPREAD| + SLAB_CALLBACK_CTOR), + init_once); if (proc_inode_cachep == NULL) return -ENOMEM; return 0; Index: slub/fs/reiserfs/super.c =================================================================== --- slub.orig/fs/reiserfs/super.c 2007-04-30 16:37:08.000000000 -0700 +++ slub/fs/reiserfs/super.c 2007-04-30 21:18:27.000000000 -0700 @@ -508,28 +508,31 @@ static void reiserfs_destroy_inode(struc kmem_cache_free(reiserfs_inode_cachep, REISERFS_I(inode)); } -static void init_once(void *foo, struct kmem_cache * cachep, unsigned long flags) +static int init_once(void *foo, struct kmem_cache * cachep, unsigned long flags) { struct reiserfs_inode_info *ei = (struct reiserfs_inode_info *)foo; - if (flags & SLAB_CTOR_CONSTRUCTOR) { + if (flags & SLAB_CALLBACK_CTOR) { INIT_LIST_HEAD(&ei->i_prealloc_list); inode_init_once(&ei->vfs_inode); #ifdef CONFIG_REISERFS_FS_POSIX_ACL ei->i_acl_access = NULL; ei->i_acl_default = NULL; #endif + return 0; } + return -ENOSYS; } static int init_inodecache(void) { - reiserfs_inode_cachep = kmem_cache_create("reiser_inode_cache", + reiserfs_inode_cachep = kmem_cache_new("reiser_inode_cache", sizeof(struct reiserfs_inode_info), 0, (SLAB_RECLAIM_ACCOUNT| - SLAB_MEM_SPREAD), - init_once, NULL); + SLAB_MEM_SPREAD| + SLAB_CALLBACK_CTOR), + init_once); if (reiserfs_inode_cachep == NULL) return -ENOMEM; return 0; Index: slub/fs/revoke.c =================================================================== --- slub.orig/fs/revoke.c 2007-04-30 16:37:08.000000000 -0700 +++ slub/fs/revoke.c 2007-04-30 21:13:37.000000000 -0700 @@ -704,15 +704,17 @@ static struct file_system_type revokefs_ .kill_sb = kill_anon_super }; -static void revokefs_init_inode(void *obj, struct kmem_cache *cache, +static int revokefs_init_inode(void *obj, struct kmem_cache *cache, unsigned long flags) { struct revokefs_inode_info *info = obj; - if (flags & SLAB_CTOR_CONSTRUCTOR) { + if (flags & SLAB_CALLBACK_CTOR) { info->owner = NULL; inode_init_once(&info->vfs_inode); + return 0; } + return -ENOSYS; } static int __init revokefs_init(void) @@ -720,11 +722,12 @@ static int __init revokefs_init(void) int err = -ENOMEM; revokefs_inode_cache = - kmem_cache_create("revokefs_inode_cache", + kmem_cache_new("revokefs_inode_cache", sizeof(struct revokefs_inode_info), 0, (SLAB_HWCACHE_ALIGN | SLAB_RECLAIM_ACCOUNT | - SLAB_MEM_SPREAD), revokefs_init_inode, NULL); + SLAB_MEM_SPREAD | SLAB_CALLBACK_CTOR), + revokefs_init_inode); if (!revokefs_inode_cache) goto out; Index: slub/fs/udf/super.c =================================================================== --- slub.orig/fs/udf/super.c 2007-04-30 16:37:08.000000000 -0700 +++ slub/fs/udf/super.c 2007-04-30 21:18:59.000000000 -0700 @@ -130,23 +130,26 @@ static void udf_destroy_inode(struct ino kmem_cache_free(udf_inode_cachep, UDF_I(inode)); } -static void init_once(void * foo, struct kmem_cache * cachep, unsigned long flags) +static int init_once(void * foo, struct kmem_cache * cachep, unsigned long flags) { struct udf_inode_info *ei = (struct udf_inode_info *) foo; - if (flags & SLAB_CTOR_CONSTRUCTOR) { + if (flags & SLAB_CALLBACK_CTOR) { ei->i_ext.i_data = NULL; inode_init_once(&ei->vfs_inode); + return 0; } + return -ENOSYS; } static int init_inodecache(void) { - udf_inode_cachep = kmem_cache_create("udf_inode_cache", + udf_inode_cachep = kmem_cache_new("udf_inode_cache", sizeof(struct udf_inode_info), 0, (SLAB_RECLAIM_ACCOUNT| - SLAB_MEM_SPREAD), - init_once, NULL); + SLAB_MEM_SPREAD| + SLAB_CALLBACK_CTOR), + init_once); if (udf_inode_cachep == NULL) return -ENOMEM; return 0; Index: slub/fs/xfs/linux-2.6/kmem.h =================================================================== --- slub.orig/fs/xfs/linux-2.6/kmem.h 2007-04-30 16:37:08.000000000 -0700 +++ slub/fs/xfs/linux-2.6/kmem.h 2007-04-30 21:05:36.000000000 -0700 @@ -74,14 +74,16 @@ extern void kmem_free(void *, size_t); static inline kmem_zone_t * kmem_zone_init(int size, char *zone_name) { - return kmem_cache_create(zone_name, size, 0, 0, NULL, NULL); + return kmem_cache_new(zone_name, size, 0, 0, NULL); } static inline kmem_zone_t * kmem_zone_init_flags(int size, char *zone_name, unsigned long flags, - void (*construct)(void *, kmem_zone_t *, unsigned long)) + int (*construct)(void *, kmem_zone_t *, unsigned long)) { - return kmem_cache_create(zone_name, size, 0, flags, construct, NULL); + if (construct) + return kmem_cache_new(zone_name, size, 0, flags|SLAB_CALLBACK_CTOR, construct); + return kmem_cache_new(zone_name, size, 0, flags, NULL); } static inline void Index: slub/fs/xfs/linux-2.6/xfs_super.c =================================================================== --- slub.orig/fs/xfs/linux-2.6/xfs_super.c 2007-04-30 16:37:08.000000000 -0700 +++ slub/fs/xfs/linux-2.6/xfs_super.c 2007-04-30 21:05:36.000000000 -0700 @@ -354,14 +354,17 @@ xfs_fs_destroy_inode( kmem_zone_free(xfs_vnode_zone, vn_from_inode(inode)); } -STATIC void +STATIC int xfs_fs_inode_init_once( void *vnode, kmem_zone_t *zonep, unsigned long flags) { - if (flags & SLAB_CTOR_CONSTRUCTOR) + if (flags & SLAB_CALLBACK_CTOR) { inode_init_once(vn_to_inode((bhv_vnode_t *)vnode)); + return 0; + } + return -ENOSYS; } STATIC int Index: slub/include/linux/slab.h =================================================================== --- slub.orig/include/linux/slab.h 2007-04-30 16:37:08.000000000 -0700 +++ slub/include/linux/slab.h 2007-04-30 22:03:13.000000000 -0700 @@ -23,7 +23,6 @@ typedef struct kmem_cache kmem_cache_t _ #define SLAB_DEBUG_FREE 0x00000100UL /* DEBUG: Perform (expensive) checks on free */ #define SLAB_RED_ZONE 0x00000400UL /* DEBUG: Red zone objs in a cache */ #define SLAB_POISON 0x00000800UL /* DEBUG: Poison objects */ -#define SLAB_HWCACHE_ALIGN 0x00002000UL /* Align objs on cache lines */ #define SLAB_CACHE_DMA 0x00004000UL /* Use GFP_DMA memory */ #define SLAB_STORE_USER 0x00010000UL /* DEBUG: Store the last owner for bug hunting */ #define SLAB_RECLAIM_ACCOUNT 0x00020000UL /* Objects are reclaimable */ @@ -31,9 +30,8 @@ typedef struct kmem_cache kmem_cache_t _ #define SLAB_DESTROY_BY_RCU 0x00080000UL /* Defer freeing slabs to RCU */ #define SLAB_MEM_SPREAD 0x00100000UL /* Spread some memory over cpuset */ #define SLAB_TRACE 0x00200000UL /* Trace allocations and frees */ - -/* Flags passed to a constructor functions */ -#define SLAB_CTOR_CONSTRUCTOR 0x001UL /* If not set, then deconstructor */ +#define SLAB_CALLBACK_CTOR 0x00000200UL /* Use callback for constructor */ +#define SLAB_CALLBACK_DTOR 0x00001000UL /* Use callback for destructor */ /* * struct kmem_cache related prototypes @@ -41,10 +39,50 @@ typedef struct kmem_cache kmem_cache_t _ void __init kmem_cache_init(void); int slab_is_available(void); -struct kmem_cache *kmem_cache_create(const char *, size_t, size_t, - unsigned long, - void (*)(void *, struct kmem_cache *, unsigned long), - void (*)(void *, struct kmem_cache *, unsigned long)); +struct kmem_cache *__kmem_cache_create(const char *, size_t, size_t, + unsigned long, int (*)(void *, struct kmem_cache *, unsigned long)); + +/* + * Backward compatible function. We can sort of handle a constructor + * but definitely not a destructor. + * + * All of this is to be phased out at some point. They are defined here + * because these definitions are frequently used. + */ +#define SLAB_CONSTRUCTOR_CTOR SLAB_CALLBACK_CTOR +#define SLAB_HWCACHE_ALIGN 0x8000000000UL /* Align objs on cache lines */ + +static inline struct kmem_cache *kmem_cache_create(const char *s, + size_t size, size_t align, unsigned long flags, + void (*ctor)(void *, struct kmem_cache *, + unsigned long), void *dtor) +{ + /* + * This conversion only works now while we do not check the + * return value of constructors. This is the case for + * constructors. + */ + int (*callback)(void *, struct kmem_cache *, unsigned long) = (void *)ctor; + + BUG_ON(dtor); + + if (ctor) + flags |= SLAB_CALLBACK_CTOR; + + if ((flags & SLAB_HWCACHE_ALIGN) && size > L1_CACHE_BYTES / 2) { + /* Clear the align flag. It is no longer supported */ + flags &= ~SLAB_HWCACHE_ALIGN; + + /* Do not allow conflicting alignment specificiations */ + BUG_ON(align); + + /* And set the cacheline alignment */ + align = L1_CACHE_BYTES; + } + + return kmem_cache_new(s, size, align, flags, callback); +} + void kmem_cache_destroy(struct kmem_cache *); int kmem_cache_shrink(struct kmem_cache *); void *kmem_cache_alloc(struct kmem_cache *, gfp_t); @@ -62,9 +100,14 @@ int kmem_ptr_validate(struct kmem_cache * f.e. add ____cacheline_aligned_in_smp to the struct declaration * then the objects will be properly aligned in SMP configurations. */ -#define KMEM_CACHE(__struct, __flags) kmem_cache_create(#__struct,\ +#define KMEM_CACHE(__struct, __flags) kmem_cache_new(#__struct,\ sizeof(struct __struct), __alignof__(struct __struct),\ - (__flags), NULL, NULL) + (__flags), NULL) + +#define KMEM_CACHE_CALLBACK(__struct, __flags, __callback) \ + kmem_cache_new(#__struct,\ + sizeof(struct __struct), __alignof__(struct __struct),\ + (__flags), (__callback)) #ifdef CONFIG_NUMA extern void *kmem_cache_alloc_node(struct kmem_cache *, gfp_t flags, int node); Index: slub/include/linux/slub_def.h =================================================================== --- slub.orig/include/linux/slub_def.h 2007-04-30 16:37:08.000000000 -0700 +++ slub/include/linux/slub_def.h 2007-04-30 21:05:36.000000000 -0700 @@ -39,8 +39,7 @@ struct kmem_cache { /* Allocation and freeing of slabs */ int objects; /* Number of objects in slab */ int refcount; /* Refcount for slab cache destroy */ - void (*ctor)(void *, struct kmem_cache *, unsigned long); - void (*dtor)(void *, struct kmem_cache *, unsigned long); + int (*callback)(void *, struct kmem_cache *, unsigned long); int inuse; /* Offset to metadata */ int align; /* Alignment */ const char *name; /* Name (only for display!) */ Index: slub/ipc/mqueue.c =================================================================== --- slub.orig/ipc/mqueue.c 2007-04-30 16:37:08.000000000 -0700 +++ slub/ipc/mqueue.c 2007-04-30 21:21:58.000000000 -0700 @@ -211,12 +211,15 @@ static int mqueue_get_sb(struct file_sys return get_sb_single(fs_type, flags, data, mqueue_fill_super, mnt); } -static void init_once(void *foo, struct kmem_cache * cachep, unsigned long flags) +static int init_once(void *foo, struct kmem_cache * cachep, unsigned long flags) { struct mqueue_inode_info *p = (struct mqueue_inode_info *) foo; - if (flags & SLAB_CTOR_CONSTRUCTOR) + if (flags & SLAB_CALLBACK_CTOR) { inode_init_once(&p->vfs_inode); + return 0; + } + return -ENOSYS; } static struct inode *mqueue_alloc_inode(struct super_block *sb) @@ -1248,9 +1251,10 @@ static int __init init_mqueue_fs(void) { int error; - mqueue_inode_cachep = kmem_cache_create("mqueue_inode_cache", + mqueue_inode_cachep = kmem_cache_new("mqueue_inode_cache", sizeof(struct mqueue_inode_info), 0, - SLAB_HWCACHE_ALIGN, init_once, NULL); + SLAB_HWCACHE_ALIGN|SLAB_CALLBACK_CTOR, + init_once); if (mqueue_inode_cachep == NULL) return -ENOMEM; Index: slub/kernel/fork.c =================================================================== --- slub.orig/kernel/fork.c 2007-04-30 16:37:08.000000000 -0700 +++ slub/kernel/fork.c 2007-04-30 21:08:58.000000000 -0700 @@ -1429,38 +1429,39 @@ long do_fork(unsigned long clone_flags, #define ARCH_MIN_MMSTRUCT_ALIGN 0 #endif -static void sighand_ctor(void *data, struct kmem_cache *cachep, +static int sighand_ctor(void *data, struct kmem_cache *cachep, unsigned long flags) { struct sighand_struct *sighand = data; - if (flags & SLAB_CTOR_CONSTRUCTOR) { + if (flags & SLAB_CALLBACK_CTOR) { spin_lock_init(&sighand->siglock); INIT_LIST_HEAD(&sighand->signalfd_list); + return 0; } + return -ENOSYS; } void __init proc_caches_init(void) { - sighand_cachep = kmem_cache_create("sighand_cache", + sighand_cachep = kmem_cache_new("sighand_cache", sizeof(struct sighand_struct), 0, - SLAB_HWCACHE_ALIGN|SLAB_PANIC|SLAB_DESTROY_BY_RCU, - sighand_ctor, NULL); - signal_cachep = kmem_cache_create("signal_cache", + SLAB_HWCACHE_ALIGN|SLAB_PANIC| + SLAB_DESTROY_BY_RCU|SLAB_CALLBACK_CTOR, + sighand_ctor); + signal_cachep = kmem_cache_new("signal_cache", sizeof(struct signal_struct), 0, - SLAB_HWCACHE_ALIGN|SLAB_PANIC, NULL, NULL); - files_cachep = kmem_cache_create("files_cache", + SLAB_HWCACHE_ALIGN|SLAB_PANIC, NULL); + files_cachep = kmem_cache_new("files_cache", sizeof(struct files_struct), 0, - SLAB_HWCACHE_ALIGN|SLAB_PANIC, NULL, NULL); - fs_cachep = kmem_cache_create("fs_cache", + SLAB_HWCACHE_ALIGN|SLAB_PANIC, NULL); + fs_cachep = kmem_cache_new("fs_cache", sizeof(struct fs_struct), 0, - SLAB_HWCACHE_ALIGN|SLAB_PANIC, NULL, NULL); - vm_area_cachep = kmem_cache_create("vm_area_struct", - sizeof(struct vm_area_struct), 0, - SLAB_PANIC, NULL, NULL); - mm_cachep = kmem_cache_create("mm_struct", + SLAB_HWCACHE_ALIGN|SLAB_PANIC, NULL); + vm_area_cachep = KMEM_CACHE(vm_area_struct, SLAB_PANIC); + mm_cachep = kmem_cache_new("mm_struct", sizeof(struct mm_struct), ARCH_MIN_MMSTRUCT_ALIGN, - SLAB_HWCACHE_ALIGN|SLAB_PANIC, NULL, NULL); + SLAB_HWCACHE_ALIGN|SLAB_PANIC, NULL); } /* Index: slub/lib/idr.c =================================================================== --- slub.orig/lib/idr.c 2007-04-30 16:37:08.000000000 -0700 +++ slub/lib/idr.c 2007-04-30 21:05:36.000000000 -0700 @@ -445,17 +445,22 @@ void *idr_replace(struct idr *idp, void } EXPORT_SYMBOL(idr_replace); -static void idr_cache_ctor(void * idr_layer, struct kmem_cache *idr_layer_cache, +static int idr_cache_ctor(void * idr_layer, struct kmem_cache *idr_layer_cache, unsigned long flags) { - memset(idr_layer, 0, sizeof(struct idr_layer)); + if (flags & SLAB_CALLBACK_CTOR) { + memset(idr_layer, 0, sizeof(struct idr_layer)); + return 0; + } + return -ENOSYS; } static int init_id_cache(void) { if (!idr_layer_cache) idr_layer_cache = kmem_cache_create("idr_layer_cache", - sizeof(struct idr_layer), 0, 0, idr_cache_ctor, NULL); + sizeof(struct idr_layer), 0, + SLAB_CALLBACK_CTOR, idr_cache_ctor); return 0; } Index: slub/lib/radix-tree.c =================================================================== --- slub.orig/lib/radix-tree.c 2007-04-30 16:37:08.000000000 -0700 +++ slub/lib/radix-tree.c 2007-04-30 21:05:36.000000000 -0700 @@ -1068,10 +1068,14 @@ int radix_tree_tagged(struct radix_tree_ } EXPORT_SYMBOL(radix_tree_tagged); -static void +static int radix_tree_node_ctor(void *node, struct kmem_cache *cachep, unsigned long flags) { - memset(node, 0, sizeof(struct radix_tree_node)); + if (flags & SLAB_CALLBACK_CTOR) { + memset(node, 0, sizeof(struct radix_tree_node)); + return 0; + } + return -ENOSYS; } static __init unsigned long __maxindex(unsigned int height) @@ -1116,7 +1120,8 @@ void __init radix_tree_init(void) { radix_tree_node_cachep = kmem_cache_create("radix_tree_node", sizeof(struct radix_tree_node), 0, - SLAB_PANIC, radix_tree_node_ctor, NULL); + SLAB_PANIC|SLAB_CALLBACK_CTOR, + radix_tree_node_ctor); radix_tree_init_maxindex(); hotcpu_notifier(radix_tree_callback, 0); } Index: slub/mm/rmap.c =================================================================== --- slub.orig/mm/rmap.c 2007-04-30 16:37:08.000000000 -0700 +++ slub/mm/rmap.c 2007-04-30 21:11:03.000000000 -0700 @@ -159,21 +159,24 @@ void anon_vma_unlink(struct vm_area_stru anon_vma_free(anon_vma); } -static void anon_vma_ctor(void *data, struct kmem_cache *cachep, +static int anon_vma_ctor(void *data, struct kmem_cache *cachep, unsigned long flags) { - if (flags & SLAB_CTOR_CONSTRUCTOR) { + if (flags & SLAB_CALLBACK_CTOR) { struct anon_vma *anon_vma = data; spin_lock_init(&anon_vma->lock); INIT_LIST_HEAD(&anon_vma->head); + return 0; } + return -ENOSYS; } void __init anon_vma_init(void) { - anon_vma_cachep = kmem_cache_create("anon_vma", sizeof(struct anon_vma), - 0, SLAB_DESTROY_BY_RCU|SLAB_PANIC, anon_vma_ctor, NULL); + anon_vma_cachep = kmem_cache_new("anon_vma", sizeof(struct anon_vma), + 0, SLAB_DESTROY_BY_RCU|SLAB_PANIC|SLAB_CALLBACK_CTOR, + anon_vma_ctor); } /* Index: slub/mm/shmem.c =================================================================== --- slub.orig/mm/shmem.c 2007-04-30 16:37:08.000000000 -0700 +++ slub/mm/shmem.c 2007-04-30 21:11:31.000000000 -0700 @@ -2324,25 +2324,27 @@ static void shmem_destroy_inode(struct i kmem_cache_free(shmem_inode_cachep, SHMEM_I(inode)); } -static void init_once(void *foo, struct kmem_cache *cachep, +static int init_once(void *foo, struct kmem_cache *cachep, unsigned long flags) { struct shmem_inode_info *p = (struct shmem_inode_info *) foo; - if (flags & SLAB_CTOR_CONSTRUCTOR) { + if (flags & SLAB_CALLBACK_CTOR) { inode_init_once(&p->vfs_inode); #ifdef CONFIG_TMPFS_POSIX_ACL p->i_acl = NULL; p->i_default_acl = NULL; #endif + return 0; } + return -ENOSYS; } static int init_inodecache(void) { - shmem_inode_cachep = kmem_cache_create("shmem_inode_cache", + shmem_inode_cachep = kmem_cache_new("shmem_inode_cache", sizeof(struct shmem_inode_info), - 0, 0, init_once, NULL); + 0, SLAB_CALLBACK_CTOR, init_once); if (shmem_inode_cachep == NULL) return -ENOMEM; return 0; Index: slub/mm/slab.c =================================================================== --- slub.orig/mm/slab.c 2007-04-30 16:37:08.000000000 -0700 +++ slub/mm/slab.c 2007-04-30 21:41:02.000000000 -0700 @@ -166,22 +166,23 @@ #endif #ifndef ARCH_KMALLOC_FLAGS -#define ARCH_KMALLOC_FLAGS SLAB_HWCACHE_ALIGN +#define ARCH_KMALLOC_FLAGS #endif /* Legal flag mask for kmem_cache_create(). */ #if DEBUG # define CREATE_MASK (SLAB_RED_ZONE | \ - SLAB_POISON | SLAB_HWCACHE_ALIGN | \ + SLAB_POISON | \ SLAB_CACHE_DMA | \ SLAB_STORE_USER | \ SLAB_RECLAIM_ACCOUNT | SLAB_PANIC | \ - SLAB_DESTROY_BY_RCU | SLAB_MEM_SPREAD) + SLAB_DESTROY_BY_RCU | SLAB_MEM_SPREAD | \ + SLAB_CALLBACK_CTOR | SLAB_CALLBACK_DTOR) #else -# define CREATE_MASK (SLAB_HWCACHE_ALIGN | \ - SLAB_CACHE_DMA | \ +# define CREATE_MASK (SLAB_CACHE_DMA | \ SLAB_RECLAIM_ACCOUNT | SLAB_PANIC | \ - SLAB_DESTROY_BY_RCU | SLAB_MEM_SPREAD) + SLAB_DESTROY_BY_RCU | SLAB_MEM_SPREAD \ + SLAB_CALLBACK_CTOR | SLAB_CALLBACK_DTOR) #endif /* @@ -405,11 +406,7 @@ struct kmem_cache { unsigned int slab_size; unsigned int dflags; /* dynamic flags */ - /* constructor func */ - void (*ctor) (void *, struct kmem_cache *, unsigned long); - - /* de-constructor func */ - void (*dtor) (void *, struct kmem_cache *, unsigned long); + int (*callback) (void *, struct kmem_cache *, unsigned long); /* 5) cache creation/removal */ const char *name; @@ -1490,19 +1487,19 @@ void __init kmem_cache_init(void) * bug. */ - sizes[INDEX_AC].cs_cachep = kmem_cache_create(names[INDEX_AC].name, + sizes[INDEX_AC].cs_cachep = kmem_cache_new(names[INDEX_AC].name, sizes[INDEX_AC].cs_size, ARCH_KMALLOC_MINALIGN, ARCH_KMALLOC_FLAGS|SLAB_PANIC, - NULL, NULL); + NULL); if (INDEX_AC != INDEX_L3) { sizes[INDEX_L3].cs_cachep = - kmem_cache_create(names[INDEX_L3].name, + kmem_cache_new(names[INDEX_L3].name, sizes[INDEX_L3].cs_size, ARCH_KMALLOC_MINALIGN, ARCH_KMALLOC_FLAGS|SLAB_PANIC, - NULL, NULL); + NULL); } slab_early_init = 0; @@ -1516,20 +1513,20 @@ void __init kmem_cache_init(void) * allow tighter packing of the smaller caches. */ if (!sizes->cs_cachep) { - sizes->cs_cachep = kmem_cache_create(names->name, + sizes->cs_cachep = kmem_cache_new(names->name, sizes->cs_size, ARCH_KMALLOC_MINALIGN, ARCH_KMALLOC_FLAGS|SLAB_PANIC, - NULL, NULL); + NULL); } #ifdef CONFIG_ZONE_DMA - sizes->cs_dmacachep = kmem_cache_create( + sizes->cs_dmacachep = kmem_cache_new( names->name_dma, sizes->cs_size, ARCH_KMALLOC_MINALIGN, ARCH_KMALLOC_FLAGS|SLAB_CACHE_DMA| SLAB_PANIC, - NULL, NULL); + NULL); #endif sizes++; names++; @@ -1907,18 +1904,20 @@ static void slab_destroy_objs(struct kme slab_error(cachep, "end of a freed object " "was overwritten"); } - if (cachep->dtor && !(cachep->flags & SLAB_POISON)) - (cachep->dtor) (objp + obj_offset(cachep), cachep, 0); + if ((cachep->flags & SLAB_CALLBACK_DTOR) && + !(cachep->flags & SLAB_POISON)) + cachep->callback(objp + obj_offset(cachep), cachep, + SLAB_CALLBACK_DTOR); } } #else static void slab_destroy_objs(struct kmem_cache *cachep, struct slab *slabp) { - if (cachep->dtor) { + if (cachep->flags & SLAB_CALLBACK_DTOR) { int i; for (i = 0; i < cachep->num; i++) { void *objp = index_to_obj(cachep, slabp, i); - (cachep->dtor) (objp, cachep, 0); + cachep->callback(objp, cachep, 0); } } } @@ -2114,18 +2113,15 @@ static int setup_cpu_cache(struct kmem_c } /** - * kmem_cache_create - Create a cache. + * kmem_cache_new - Create a cache. * @name: A string which is used in /proc/slabinfo to identify this cache. * @size: The size of objects to be created in this cache. * @align: The required alignment for the objects. * @flags: SLAB flags - * @ctor: A constructor for the objects. - * @dtor: A destructor for the objects. + * @callback: Callback for constructor and destructor. * * Returns a ptr to the cache on success, NULL on failure. * Cannot be called within a int, but can be interrupted. - * The @ctor is run when new pages are allocated by the cache - * and the @dtor is run before the pages are handed back. * * @name must be valid until the cache is destroyed. This implies that * the module calling this has to destroy the cache before getting unloaded. @@ -2137,16 +2133,11 @@ static int setup_cpu_cache(struct kmem_c * * %SLAB_RED_ZONE - Insert `Red' zones around the allocated memory to check * for buffer overruns. - * - * %SLAB_HWCACHE_ALIGN - Align the objects in this cache to a hardware - * cacheline. This can be beneficial if you're counting cycles as closely - * as davem. */ struct kmem_cache * -kmem_cache_create (const char *name, size_t size, size_t align, +kmem_cache_new (const char *name, size_t size, size_t align, unsigned long flags, - void (*ctor)(void*, struct kmem_cache *, unsigned long), - void (*dtor)(void*, struct kmem_cache *, unsigned long)) + int (*callback)(void*, struct kmem_cache *, unsigned long)) { size_t left_over, slab_size, ralign; struct kmem_cache *cachep = NULL, *pc; @@ -2155,7 +2146,8 @@ kmem_cache_create (const char *name, siz * Sanity checks... these are all serious usage bugs. */ if (!name || in_interrupt() || (size < BYTES_PER_WORD) || - (size > (1 << MAX_OBJ_ORDER) * PAGE_SIZE) || (dtor && !ctor)) { + (size > (1 << MAX_OBJ_ORDER) * PAGE_SIZE) || + !!callback ^ !!(flags & (SLAB_CALLBACK_CTOR|SLAB_CALLBACK_DTOR))) { printk(KERN_ERR "%s: Early error in slab %s\n", __FUNCTION__, name); BUG(); @@ -2210,7 +2202,7 @@ kmem_cache_create (const char *name, siz BUG_ON(flags & SLAB_POISON); #endif if (flags & SLAB_DESTROY_BY_RCU) - BUG_ON(dtor); + BUG_ON(flags & SLAB_CALLBACK_DTOR); /* * Always checks flags, a caller might be expecting debug support which @@ -2228,21 +2220,7 @@ kmem_cache_create (const char *name, siz size &= ~(BYTES_PER_WORD - 1); } - /* calculate the final buffer alignment: */ - - /* 1) arch recommendation: can be overridden for debug */ - if (flags & SLAB_HWCACHE_ALIGN) { - /* - * Default alignment: as specified by the arch code. Except if - * an object is really small, then squeeze multiple objects into - * one cacheline. - */ - ralign = cache_line_size(); - while (size <= ralign / 2) - ralign /= 2; - } else { - ralign = BYTES_PER_WORD; - } + ralign = BYTES_PER_WORD; /* * Redzoning and user store require word alignment. Note this will be @@ -2365,8 +2343,7 @@ kmem_cache_create (const char *name, siz */ BUG_ON(!cachep->slabp_cache); } - cachep->ctor = ctor; - cachep->dtor = dtor; + cachep->callback = callback; cachep->name = name; if (setup_cpu_cache(cachep)) { @@ -2384,7 +2361,7 @@ oops: mutex_unlock(&cache_chain_mutex); return cachep; } -EXPORT_SYMBOL(kmem_cache_create); +EXPORT_SYMBOL(kmem_cache_new); #if DEBUG static void check_irq_off(void) @@ -2621,7 +2598,7 @@ static inline kmem_bufctl_t *slab_bufctl } static void cache_init_objs(struct kmem_cache *cachep, - struct slab *slabp, unsigned long ctor_flags) + struct slab *slabp) { int i; @@ -2643,9 +2620,10 @@ static void cache_init_objs(struct kmem_ * cache which they are a constructor for. Otherwise, deadlock. * They must also be threaded. */ - if (cachep->ctor && !(cachep->flags & SLAB_POISON)) - cachep->ctor(objp + obj_offset(cachep), cachep, - ctor_flags); + if ((cachep->flags & SLAB_CALLBACK_CTOR) + && !(cachep->flags & SLAB_POISON)) + cachep->callback(objp + obj_offset(cachep), cachep, + SLAB_CALLBACK_CTOR); if (cachep->flags & SLAB_RED_ZONE) { if (*dbg_redzone2(cachep, objp) != RED_INACTIVE) @@ -2660,8 +2638,8 @@ static void cache_init_objs(struct kmem_ kernel_map_pages(virt_to_page(objp), cachep->buffer_size / PAGE_SIZE, 0); #else - if (cachep->ctor) - cachep->ctor(objp, cachep, ctor_flags); + if (cachep->flags & SLAB_CALLBACK_CTOR) + cachep->callback(objp, cachep, SLAB_CALLBACK_CTOR); #endif slab_bufctl(slabp)[i] = i + 1; } @@ -2750,7 +2728,6 @@ static int cache_grow(struct kmem_cache struct slab *slabp; size_t offset; gfp_t local_flags; - unsigned long ctor_flags; struct kmem_list3 *l3; /* @@ -2759,7 +2736,6 @@ static int cache_grow(struct kmem_cache */ BUG_ON(flags & ~(GFP_DMA | GFP_LEVEL_MASK)); - ctor_flags = SLAB_CTOR_CONSTRUCTOR; local_flags = (flags & GFP_LEVEL_MASK); /* Take the l3 list lock to change the colour_next on this node */ check_irq_off(); @@ -2804,7 +2780,7 @@ static int cache_grow(struct kmem_cache slabp->nodeid = nodeid; slab_map_pages(cachep, slabp, objp); - cache_init_objs(cachep, slabp, ctor_flags); + cache_init_objs(cachep, slabp); if (local_flags & __GFP_WAIT) local_irq_disable(); @@ -2890,11 +2866,11 @@ static void *cache_free_debugcheck(struc BUG_ON(objnr >= cachep->num); BUG_ON(objp != index_to_obj(cachep, slabp, objnr)); - if (cachep->flags & SLAB_POISON && cachep->dtor) { + if (cachep->flags & SLAB_POISON && cachep->flags & SLAB_CALLBACK_DTOR) { /* we want to cache poison the object, * call the destruction callback */ - cachep->dtor(objp + obj_offset(cachep), cachep, 0); + cachep->callback(objp + obj_offset(cachep), cachep, SLAB_CALLBACK_DTOR); } #ifdef CONFIG_DEBUG_SLAB_LEAK slab_bufctl(slabp)[objnr] = BUFCTL_FREE; @@ -3094,8 +3070,8 @@ static void *cache_alloc_debugcheck_afte } #endif objp += obj_offset(cachep); - if (cachep->ctor && cachep->flags & SLAB_POISON) - cachep->ctor(objp, cachep, SLAB_CTOR_CONSTRUCTOR); + if (cachep->flags & SLAB_CALLBACK_CTOR && cachep->flags & SLAB_POISON) + cachep->callback(objp, cachep, SLAB_CALLBACK_CTOR); #if ARCH_SLAB_MINALIGN if ((u32)objp & (ARCH_SLAB_MINALIGN-1)) { printk(KERN_ERR "0x%p: not aligned to ARCH_SLAB_MINALIGN=%d\n", Index: slub/mm/slob.c =================================================================== --- slub.orig/mm/slob.c 2007-04-30 16:37:08.000000000 -0700 +++ slub/mm/slob.c 2007-04-30 21:05:36.000000000 -0700 @@ -276,14 +276,13 @@ size_t ksize(const void *block) struct kmem_cache { unsigned int size, align; const char *name; - void (*ctor)(void *, struct kmem_cache *, unsigned long); - void (*dtor)(void *, struct kmem_cache *, unsigned long); + unsigned long flags; + int (*callback)(void *, struct kmem_cache *, unsigned long); }; struct kmem_cache *kmem_cache_create(const char *name, size_t size, size_t align, unsigned long flags, - void (*ctor)(void*, struct kmem_cache *, unsigned long), - void (*dtor)(void*, struct kmem_cache *, unsigned long)) + int (*callback)(void*, struct kmem_cache *, unsigned long)) { struct kmem_cache *c; @@ -292,8 +291,8 @@ struct kmem_cache *kmem_cache_create(con if (c) { c->name = name; c->size = size; - c->ctor = ctor; - c->dtor = dtor; + c->flags = flags; + c->callback = callback; /* ignore alignment unless it's forced */ c->align = (flags & SLAB_HWCACHE_ALIGN) ? SLOB_ALIGN : 0; if (c->align < align) @@ -320,8 +319,8 @@ void *kmem_cache_alloc(struct kmem_cache else b = (void *)__get_free_pages(flags, find_order(c->size)); - if (c->ctor) - c->ctor(b, c, SLAB_CTOR_CONSTRUCTOR); + if (c->flags & SLAB_CALLBACK_CTOR) + c->callback(b, c, SLAB_CALLBACK_CTOR); return b; } @@ -339,8 +338,8 @@ EXPORT_SYMBOL(kmem_cache_zalloc); void kmem_cache_free(struct kmem_cache *c, void *b) { - if (c->dtor) - c->dtor(b, c, 0); + if (c->flags & SLAB_CALLBACK_DTOR) + c->callback(b, c, SLAB_CALLBACK_DTOR); if (c->size < PAGE_SIZE) slob_free(b, c->size); Index: slub/mm/slub.c =================================================================== --- slub.orig/mm/slub.c 2007-04-30 16:37:08.000000000 -0700 +++ slub/mm/slub.c 2007-04-30 21:42:37.000000000 -0700 @@ -148,7 +148,8 @@ * Set of flags that will prevent slab merging */ #define SLUB_NEVER_MERGE (SLAB_RED_ZONE | SLAB_POISON | SLAB_STORE_USER | \ - SLAB_TRACE | SLAB_DESTROY_BY_RCU) + SLAB_TRACE | SLAB_DESTROY_BY_RCU | \ + SLAB_CALLBACK_CTOR | SLAB_CALLBACK_DTOR) #define SLUB_MERGE_SAME (SLAB_DEBUG_FREE | SLAB_RECLAIM_ACCOUNT | \ SLAB_CACHE_DMA) @@ -809,8 +810,8 @@ static void setup_object(struct kmem_cac init_tracking(s, object); } - if (unlikely(s->ctor)) - s->ctor(object, s, SLAB_CTOR_CONSTRUCTOR); + if (unlikely(s->flags & SLAB_CALLBACK_CTOR)) + s->callback(object, s, SLAB_CALLBACK_CTOR); } static struct page *new_slab(struct kmem_cache *s, gfp_t flags, int node) @@ -868,15 +869,17 @@ static void __free_slab(struct kmem_cach { int pages = 1 << s->order; - if (unlikely(PageError(page) || s->dtor)) { + if (unlikely(PageError(page) || + (s->flags & SLAB_CALLBACK_DTOR))) { + void *start = page_address(page); void *end = start + (pages << PAGE_SHIFT); void *p; slab_pad_check(s, page); for (p = start; p <= end - s->size; p += s->size) { - if (s->dtor) - s->dtor(p, s, 0); + if (s->flags & SLAB_CALLBACK_DTOR) + s->callback(p, s, 0); check_object(s, page, p, 0); } } @@ -1482,19 +1485,6 @@ static int calculate_order(int size) static unsigned long calculate_alignment(unsigned long flags, unsigned long align, unsigned long size) { - /* - * If the user wants hardware cache aligned objects then - * follow that suggestion if the object is sufficiently - * large. - * - * The hardware cache alignment cannot override the - * specified alignment though. If that is greater - * then use it. - */ - if ((flags & SLAB_HWCACHE_ALIGN) && - size > L1_CACHE_BYTES / 2) - return max_t(unsigned long, align, L1_CACHE_BYTES); - if (align < ARCH_SLAB_MINALIGN) return ARCH_SLAB_MINALIGN; @@ -1617,8 +1607,9 @@ static int calculate_sizes(struct kmem_c * the slab may touch the object after free or before allocation * then we should never poison the object itself. */ - if ((flags & SLAB_POISON) && !(flags & SLAB_DESTROY_BY_RCU) && - !s->ctor && !s->dtor) + if ((flags & SLAB_POISON) && + !(flags & (SLAB_DESTROY_BY_RCU | + SLAB_CALLBACK_CTOR | SLAB_CALLBACK_DTOR))) s->flags |= __OBJECT_POISON; else s->flags &= ~__OBJECT_POISON; @@ -1646,8 +1637,8 @@ static int calculate_sizes(struct kmem_c */ s->inuse = size; - if (((flags & (SLAB_DESTROY_BY_RCU | SLAB_POISON)) || - s->ctor || s->dtor)) { + if (flags & (SLAB_DESTROY_BY_RCU | SLAB_POISON | + SLAB_CALLBACK_CTOR | SLAB_CALLBACK_DTOR)) { /* * Relocate free pointer after the object if it is not * permitted to overwrite the first word of the object on @@ -1731,17 +1722,18 @@ static int __init finish_bootstrap(void) static int kmem_cache_open(struct kmem_cache *s, gfp_t gfpflags, const char *name, size_t size, size_t align, unsigned long flags, - void (*ctor)(void *, struct kmem_cache *, unsigned long), - void (*dtor)(void *, struct kmem_cache *, unsigned long)) + int (*callback)(void *, struct kmem_cache *, unsigned long)) { memset(s, 0, kmem_size); s->name = name; - s->ctor = ctor; - s->dtor = dtor; + s->callback = callback; s->objsize = size; s->flags = flags; s->align = align; + if (!!callback ^ !!(s->flags & (SLAB_CALLBACK_CTOR|SLAB_CALLBACK_DTOR))) + goto error; + /* * The page->offset field is only 16 bit wide. This is an offset * in units of words from the beginning of an object. If the slab @@ -1751,14 +1743,13 @@ static int kmem_cache_open(struct kmem_c * On 32 bit platforms the limit is 256k. On 64bit platforms * the limit is 512k. * - * Debugging or ctor/dtors may create a need to move the free + * Debugging or callbacks may create a need to move the free * pointer. Fail if this happens. */ - if (s->size >= 65535 * sizeof(void *)) { + if (s->size >= 65535 * sizeof(void *)) BUG_ON(flags & (SLAB_RED_ZONE | SLAB_POISON | - SLAB_STORE_USER | SLAB_DESTROY_BY_RCU)); - BUG_ON(ctor || dtor); - } + SLAB_STORE_USER | SLAB_DESTROY_BY_RCU | + SLAB_CALLBACK_CTOR | SLAB_CALLBACK_DTOR)); else /* * Enable debugging if selected on the kernel commandline. @@ -1880,7 +1871,7 @@ static int kmem_cache_close(struct kmem_ /* * Close a cache and release the kmem_cache structure - * (must be used for caches created using kmem_cache_create) + * (must be used for caches created using kmem_cache_new) */ void kmem_cache_destroy(struct kmem_cache *s) { @@ -1992,7 +1983,7 @@ static struct kmem_cache *create_kmalloc down_write(&slub_lock); if (!kmem_cache_open(s, gfp_flags, name, size, ARCH_KMALLOC_MINALIGN, - flags, NULL, NULL)) + flags, NULL)) goto panic; list_add(&s->list, &slab_caches); @@ -2313,25 +2304,17 @@ static int slab_unmergeable(struct kmem_ if (slub_nomerge || (s->flags & SLUB_NEVER_MERGE)) return 1; - if (s->ctor || s->dtor) - return 1; - return 0; } static struct kmem_cache *find_mergeable(size_t size, - size_t align, unsigned long flags, - void (*ctor)(void *, struct kmem_cache *, unsigned long), - void (*dtor)(void *, struct kmem_cache *, unsigned long)) + size_t align, unsigned long flags) { struct list_head *h; if (slub_nomerge || (flags & SLUB_NEVER_MERGE)) return NULL; - if (ctor || dtor) - return NULL; - size = ALIGN(size, sizeof(void *)); align = calculate_alignment(flags, align, size); size = ALIGN(size, align); @@ -2364,15 +2347,14 @@ static struct kmem_cache *find_mergeable return NULL; } -struct kmem_cache *kmem_cache_create(const char *name, size_t size, +struct kmem_cache *kmem_cache_new(const char *name, size_t size, size_t align, unsigned long flags, - void (*ctor)(void *, struct kmem_cache *, unsigned long), - void (*dtor)(void *, struct kmem_cache *, unsigned long)) + int (*callback)(void *, struct kmem_cache *, unsigned long)) { struct kmem_cache *s; down_write(&slub_lock); - s = find_mergeable(size, align, flags, dtor, ctor); + s = find_mergeable(size, align, flags); if (s) { s->refcount++; /* @@ -2386,7 +2368,7 @@ struct kmem_cache *kmem_cache_create(con } else { s = kmalloc(kmem_size, GFP_KERNEL); if (s && kmem_cache_open(s, GFP_KERNEL, name, - size, align, flags, ctor, dtor)) { + size, align, flags, callback)) { if (sysfs_slab_add(s)) { kfree(s); goto err; @@ -2406,7 +2388,7 @@ err: s = NULL; return s; } -EXPORT_SYMBOL(kmem_cache_create); +EXPORT_SYMBOL(kmem_cache_new); void *kmem_cache_zalloc(struct kmem_cache *s, gfp_t flags) { @@ -2961,27 +2943,16 @@ static ssize_t order_show(struct kmem_ca } SLAB_ATTR_RO(order); -static ssize_t ctor_show(struct kmem_cache *s, char *buf) +static ssize_t callback_show(struct kmem_cache *s, char *buf) { - if (s->ctor) { - int n = sprint_symbol(buf, (unsigned long)s->ctor); + if (s->callback) { + int n = sprint_symbol(buf, (unsigned long)s->callback); return n + sprintf(buf + n, "\n"); } return 0; } -SLAB_ATTR_RO(ctor); - -static ssize_t dtor_show(struct kmem_cache *s, char *buf) -{ - if (s->dtor) { - int n = sprint_symbol(buf, (unsigned long)s->dtor); - - return n + sprintf(buf + n, "\n"); - } - return 0; -} -SLAB_ATTR_RO(dtor); +SLAB_ATTR_RO(callback); static ssize_t aliases_show(struct kmem_cache *s, char *buf) { @@ -3058,12 +3029,6 @@ static ssize_t reclaim_account_store(str } SLAB_ATTR(reclaim_account); -static ssize_t hwcache_align_show(struct kmem_cache *s, char *buf) -{ - return sprintf(buf, "%d\n", !!(s->flags & SLAB_HWCACHE_ALIGN)); -} -SLAB_ATTR_RO(hwcache_align); - #ifdef CONFIG_ZONE_DMA static ssize_t cache_dma_show(struct kmem_cache *s, char *buf) { @@ -3213,13 +3178,11 @@ static struct attribute * slab_attrs[] = &slabs_attr.attr, &partial_attr.attr, &cpu_slabs_attr.attr, - &ctor_attr.attr, - &dtor_attr.attr, + &callback_attr.attr, &aliases_attr.attr, &align_attr.attr, &sanity_checks_attr.attr, &trace_attr.attr, - &hwcache_align_attr.attr, &reclaim_account_attr.attr, &destroy_by_rcu_attr.attr, &red_zone_attr.attr, Index: slub/net/socket.c =================================================================== --- slub.orig/net/socket.c 2007-04-30 16:37:08.000000000 -0700 +++ slub/net/socket.c 2007-04-30 21:05:36.000000000 -0700 @@ -257,12 +257,15 @@ static void sock_destroy_inode(struct in container_of(inode, struct socket_alloc, vfs_inode)); } -static void init_once(void *foo, struct kmem_cache *cachep, unsigned long flags) +static int init_once(void *foo, struct kmem_cache *cachep, unsigned long flags) { struct socket_alloc *ei = (struct socket_alloc *)foo; - if (flags & SLAB_CTOR_CONSTRUCTOR) + if (flags & SLAB_CALLBACK_CTOR) { inode_init_once(&ei->vfs_inode); + return 0; + } + return -ENOSYS; } static int init_inodecache(void) @@ -272,9 +275,9 @@ static int init_inodecache(void) 0, (SLAB_HWCACHE_ALIGN | SLAB_RECLAIM_ACCOUNT | - SLAB_MEM_SPREAD), - init_once, - NULL); + SLAB_MEM_SPREAD | + SLAB_CALLBACK_CTOR), + init_once); if (sock_inode_cachep == NULL) return -ENOMEM; return 0; Index: slub/fs/isofs/inode.c =================================================================== --- slub.orig/fs/isofs/inode.c 2007-04-30 21:15:16.000000000 -0700 +++ slub/fs/isofs/inode.c 2007-04-30 21:16:32.000000000 -0700 @@ -77,18 +77,21 @@ static int init_once(void *foo, struct k { struct iso_inode_info *ei = foo; - if (flags & SLAB_CTOR_CONSTRUCTOR) { + if (flags & SLAB_CALLBACK_CTOR) { inode_init_once(&ei->vfs_inode); + return 0; } + return -ENOSYS; } static int init_inodecache(void) { - isofs_inode_cachep = kmem_cache_create("isofs_inode_cache", + isofs_inode_cachep = kmem_cache_new("isofs_inode_cache", sizeof(struct iso_inode_info), 0, (SLAB_RECLAIM_ACCOUNT| - SLAB_MEM_SPREAD), - init_once, NULL); + SLAB_MEM_SPREAD| + SLAB_CALLBACK_CTOR), + init_once); if (isofs_inode_cachep == NULL) return -ENOMEM; return 0;