Index: linux-2.6.18-rc4/ipc/mqueue.c =================================================================== --- linux-2.6.18-rc4.orig/ipc/mqueue.c 2006-08-06 11:20:11.000000000 -0700 +++ linux-2.6.18-rc4/ipc/mqueue.c 2006-08-15 12:44:00.077106865 -0700 @@ -225,7 +225,7 @@ static struct inode *mqueue_alloc_inode( { struct mqueue_inode_info *ei; - ei = kmem_cache_alloc(mqueue_inode_cachep, SLAB_KERNEL); + ei = kmem_cache_alloc(mqueue_inode_cachep, GFP_KERNEL); if (!ei) return NULL; return &ei->vfs_inode; Index: linux-2.6.18-rc4/fs/locks.c =================================================================== --- linux-2.6.18-rc4.orig/fs/locks.c 2006-08-06 11:20:11.000000000 -0700 +++ linux-2.6.18-rc4/fs/locks.c 2006-08-15 12:44:47.325169244 -0700 @@ -147,7 +147,7 @@ static kmem_cache_t *filelock_cache __re /* Allocate an empty lock structure. */ static struct file_lock *locks_alloc_lock(void) { - return kmem_cache_alloc(filelock_cache, SLAB_KERNEL); + return kmem_cache_alloc(filelock_cache, GFP_KERNEL); } static void locks_release_private(struct file_lock *fl) Index: linux-2.6.18-rc4/mm/mempolicy.c =================================================================== --- linux-2.6.18-rc4.orig/mm/mempolicy.c 2006-08-06 11:20:11.000000000 -0700 +++ linux-2.6.18-rc4/mm/mempolicy.c 2006-08-15 12:43:16.238013863 -0700 @@ -1309,7 +1309,7 @@ struct mempolicy *__mpol_copy(struct mem atomic_set(&new->refcnt, 1); if (new->policy == MPOL_BIND) { int sz = ksize(old->v.zonelist); - new->v.zonelist = kmalloc(sz, SLAB_KERNEL); + new->v.zonelist = kmalloc(sz, GFP_KERNEL); if (!new->v.zonelist) { kmem_cache_free(policy_cache, new); return ERR_PTR(-ENOMEM); Index: linux-2.6.18-rc4/fs/cifs/misc.c =================================================================== --- linux-2.6.18-rc4.orig/fs/cifs/misc.c 2006-08-06 11:20:11.000000000 -0700 +++ linux-2.6.18-rc4/fs/cifs/misc.c 2006-08-15 13:03:13.751056712 -0700 @@ -153,7 +153,7 @@ cifs_buf_get(void) albeit slightly larger than necessary and maxbuffersize defaults to this and can not be bigger */ ret_buf = - (struct smb_hdr *) mempool_alloc(cifs_req_poolp, SLAB_KERNEL | SLAB_NOFS); + (struct smb_hdr *) mempool_alloc(cifs_req_poolp, GFP_KERNEL | GFP_NOFS); /* clear the first few header bytes */ /* for most paths, more is cleared in header_assemble */ @@ -192,7 +192,7 @@ cifs_small_buf_get(void) albeit slightly larger than necessary and maxbuffersize defaults to this and can not be bigger */ ret_buf = - (struct smb_hdr *) mempool_alloc(cifs_sm_req_poolp, SLAB_KERNEL | SLAB_NOFS); + (struct smb_hdr *) mempool_alloc(cifs_sm_req_poolp, GFP_KERNEL | GFP_NOFS); if (ret_buf) { /* No need to clear memory here, cleared in header assemble */ /* memset(ret_buf, 0, sizeof(struct smb_hdr) + 27);*/ Index: linux-2.6.18-rc4/fs/block_dev.c =================================================================== --- linux-2.6.18-rc4.orig/fs/block_dev.c 2006-08-13 20:00:58.344727388 -0700 +++ linux-2.6.18-rc4/fs/block_dev.c 2006-08-15 12:42:37.262878636 -0700 @@ -237,7 +237,7 @@ static kmem_cache_t * bdev_cachep __read static struct inode *bdev_alloc_inode(struct super_block *sb) { - struct bdev_inode *ei = kmem_cache_alloc(bdev_cachep, SLAB_KERNEL); + struct bdev_inode *ei = kmem_cache_alloc(bdev_cachep, GFP_KERNEL); if (!ei) return NULL; return &ei->vfs_inode; Index: linux-2.6.18-rc4/kernel/fork.c =================================================================== --- linux-2.6.18-rc4.orig/kernel/fork.c 2006-08-06 11:20:11.000000000 -0700 +++ linux-2.6.18-rc4/kernel/fork.c 2006-08-15 12:38:09.091918281 -0700 @@ -227,7 +227,7 @@ static inline int dup_mmap(struct mm_str goto fail_nomem; charge = len; } - tmp = kmem_cache_alloc(vm_area_cachep, SLAB_KERNEL); + tmp = kmem_cache_alloc(vm_area_cachep, GFP_KERNEL); if (!tmp) goto fail_nomem; *tmp = *mpnt; @@ -309,7 +309,7 @@ static inline void mm_free_pgd(struct mm __cacheline_aligned_in_smp DEFINE_SPINLOCK(mmlist_lock); -#define allocate_mm() (kmem_cache_alloc(mm_cachep, SLAB_KERNEL)) +#define allocate_mm() (kmem_cache_alloc(mm_cachep, GFP_KERNEL)) #define free_mm(mm) (kmem_cache_free(mm_cachep, (mm))) #include @@ -603,7 +603,7 @@ static struct files_struct *alloc_files( struct files_struct *newf; struct fdtable *fdt; - newf = kmem_cache_alloc(files_cachep, SLAB_KERNEL); + newf = kmem_cache_alloc(files_cachep, GFP_KERNEL); if (!newf) goto out; Index: linux-2.6.18-rc4/include/linux/kmalloc.h =================================================================== --- linux-2.6.18-rc4.orig/include/linux/kmalloc.h 2006-08-15 13:19:58.611903801 -0700 +++ linux-2.6.18-rc4/include/linux/kmalloc.h 2006-08-15 14:13:32.041379422 -0700 @@ -11,8 +11,8 @@ #include #include -// #define kmalloc_allocator slabifier_allocator -#define kmalloc_allocator numa_slab_allocator +#define kmalloc_allocator slabifier_allocator +// #define kmalloc_allocator numa_slab_allocator /* * We keep the general caches in an array of slab caches that are used for Index: linux-2.6.18-rc4/mm/kmalloc.c =================================================================== --- linux-2.6.18-rc4.orig/mm/kmalloc.c 2006-08-15 12:00:33.757472840 -0700 +++ linux-2.6.18-rc4/mm/kmalloc.c 2006-08-15 14:14:01.181180806 -0700 @@ -78,7 +78,7 @@ struct slab_cache *kmalloc_generate_slab if (dma) a = dmaify_page_allocator(a); - s = kmalloc_allocator.create(&kmalloc_allocator, a, + s = kmalloc_allocator.create(&kmalloc_allocator, a, -1, dma ? "kmalloc-DMA" : "kmalloc", realsize, L1_CACHE_BYTES, order(realsize), realsize, realsize, 0); if (!s) Index: linux-2.6.18-rc4/mm/mmap.c =================================================================== --- linux-2.6.18-rc4.orig/mm/mmap.c 2006-08-06 11:20:11.000000000 -0700 +++ linux-2.6.18-rc4/mm/mmap.c 2006-08-15 12:41:36.671889011 -0700 @@ -1724,7 +1724,7 @@ int split_vma(struct mm_struct * mm, str if (mm->map_count >= sysctl_max_map_count) return -ENOMEM; - new = kmem_cache_alloc(vm_area_cachep, SLAB_KERNEL); + new = kmem_cache_alloc(vm_area_cachep, GFP_KERNEL); if (!new) return -ENOMEM; @@ -2037,7 +2037,7 @@ struct vm_area_struct *copy_vma(struct v vma_start < new_vma->vm_end) *vmap = new_vma; } else { - new_vma = kmem_cache_alloc(vm_area_cachep, SLAB_KERNEL); + new_vma = kmem_cache_alloc(vm_area_cachep, GFP_KERNEL); if (new_vma) { *new_vma = *vma; pol = mpol_copy(vma_policy(vma)); Index: linux-2.6.18-rc4/arch/ia64/mm/init.c =================================================================== --- linux-2.6.18-rc4.orig/arch/ia64/mm/init.c 2006-08-15 12:31:59.802206856 -0700 +++ linux-2.6.18-rc4/arch/ia64/mm/init.c 2006-08-15 12:36:35.941416758 -0700 @@ -175,7 +175,7 @@ ia64_init_addr_space (void) /* map NaT-page at address zero to speed up speculative dereferencing of NULL: */ if (!(current->personality & MMAP_PAGE_ZERO)) { - vma = kmem_cache_alloc(vm_area_cachep, SLAB_KERNEL); + vma = kmem_cache_alloc(vm_area_cachep, GFP_KERNEL); if (vma) { memset(vma, 0, sizeof(*vma)); vma->vm_mm = current->mm; Index: linux-2.6.18-rc4/fs/exec.c =================================================================== --- linux-2.6.18-rc4.orig/fs/exec.c 2006-08-06 11:20:11.000000000 -0700 +++ linux-2.6.18-rc4/fs/exec.c 2006-08-15 12:43:03.518095258 -0700 @@ -404,7 +404,7 @@ int setup_arg_pages(struct linux_binprm bprm->loader += stack_base; bprm->exec += stack_base; - mpnt = kmem_cache_alloc(vm_area_cachep, SLAB_KERNEL); + mpnt = kmem_cache_alloc(vm_area_cachep, GFP_KERNEL); if (!mpnt) return -ENOMEM; Index: linux-2.6.18-rc4/kernel/user.c =================================================================== --- linux-2.6.18-rc4.orig/kernel/user.c 2006-08-06 11:20:11.000000000 -0700 +++ linux-2.6.18-rc4/kernel/user.c 2006-08-15 12:39:49.937260669 -0700 @@ -132,7 +132,7 @@ struct user_struct * alloc_uid(uid_t uid if (!up) { struct user_struct *new; - new = kmem_cache_alloc(uid_cachep, SLAB_KERNEL); + new = kmem_cache_alloc(uid_cachep, GFP_KERNEL); if (!new) return NULL; new->uid = uid; Index: linux-2.6.18-rc4/include/linux/slabulator.h =================================================================== --- linux-2.6.18-rc4.orig/include/linux/slabulator.h 2006-08-15 13:00:00.346972661 -0700 +++ linux-2.6.18-rc4/include/linux/slabulator.h 2006-08-15 14:34:36.416329783 -0700 @@ -23,6 +23,7 @@ /* We really should be getting rid of these */ #define SLAB_KERNEL GFP_KERNEL #define SLAB_ATOMIC GFP_ATOMIC +#define SLAB_NOFS GFP_NOFS /* No debug features for now */ #define SLAB_HWCACHE_ALIGN 0x00002000UL /* align objs on a h/w cache lines */ Index: linux-2.6.18-rc4/include/linux/taskstats_kern.h =================================================================== --- linux-2.6.18-rc4.orig/include/linux/taskstats_kern.h 2006-08-06 11:20:11.000000000 -0700 +++ linux-2.6.18-rc4/include/linux/taskstats_kern.h 2006-08-15 12:35:26.166425715 -0700 @@ -32,7 +32,7 @@ static inline void taskstats_tgid_alloc( struct taskstats *stats; unsigned long flags; - stats = kmem_cache_zalloc(taskstats_cache, SLAB_KERNEL); + stats = kmem_cache_zalloc(taskstats_cache, GFP_KERNEL); if (!stats) return; Index: linux-2.6.18-rc4/fs/eventpoll.c =================================================================== --- linux-2.6.18-rc4.orig/fs/eventpoll.c 2006-08-06 11:20:11.000000000 -0700 +++ linux-2.6.18-rc4/fs/eventpoll.c 2006-08-15 12:45:35.067341713 -0700 @@ -910,7 +910,7 @@ static void ep_ptable_queue_proc(struct struct epitem *epi = ep_item_from_epqueue(pt); struct eppoll_entry *pwq; - if (epi->nwait >= 0 && (pwq = kmem_cache_alloc(pwq_cache, SLAB_KERNEL))) { + if (epi->nwait >= 0 && (pwq = kmem_cache_alloc(pwq_cache, GFP_KERNEL))) { init_waitqueue_func_entry(&pwq->wait, ep_poll_callback); pwq->whead = whead; pwq->base = epi; @@ -953,7 +953,7 @@ static int ep_insert(struct eventpoll *e struct ep_pqueue epq; error = -ENOMEM; - if (!(epi = kmem_cache_alloc(epi_cache, SLAB_KERNEL))) + if (!(epi = kmem_cache_alloc(epi_cache, GFP_KERNEL))) goto eexit_1; /* Item initialization follow here ... */ Index: linux-2.6.18-rc4/include/linux/fs.h =================================================================== --- linux-2.6.18-rc4.orig/include/linux/fs.h 2006-08-06 11:20:11.000000000 -0700 +++ linux-2.6.18-rc4/include/linux/fs.h 2006-08-15 12:35:10.695700718 -0700 @@ -1430,7 +1430,7 @@ extern char * getname(const char __user extern void __init vfs_caches_init_early(void); extern void __init vfs_caches_init(unsigned long); -#define __getname() kmem_cache_alloc(names_cachep, SLAB_KERNEL) +#define __getname() kmem_cache_alloc(names_cachep, GFP_KERNEL) #define __putname(name) kmem_cache_free(names_cachep, (void *)(name)) #ifndef CONFIG_AUDITSYSCALL #define putname(name) __putname(name) Index: linux-2.6.18-rc4/mm/numa_slab.c =================================================================== --- linux-2.6.18-rc4.orig/mm/numa_slab.c 2006-08-15 12:12:26.606030651 -0700 +++ linux-2.6.18-rc4/mm/numa_slab.c 2006-08-15 14:17:07.955775158 -0700 @@ -13,6 +13,7 @@ #include #include +#define NUMA_SLAB_DEBUG /* * Prelude : Numacontrol for allocators */ @@ -68,6 +69,8 @@ static int __numa_slab_destroy(struct nu { int node; + printk(KERN_CRIT "__numa_slab_destroy(%s)\n", n->sc.name); + for_each_node(node) { base.free(NULL,n->node[node]); n->node[node] = NULL; @@ -80,6 +83,7 @@ static struct slab_cache *bring_up_node( struct slab_cache *s = n->node[node]; struct slab_cache *sc = &n->sc; + printk(KERN_CRIT "bring_up_node(%s, %d)\n", n->sc.name, node); if (s) return s; @@ -89,8 +93,7 @@ static struct slab_cache *bring_up_node( spin_unlock(&n->lock); return s; } - s = n->node[node] = base.create(&base, - numactl_allocator(sc->page_alloc, node, 0 /* GFP_THISNODE */), + s = n->node[node] = base.create(&base, sc->page_alloc, node, sc->name, sc->size, sc->align, sc->order, sc->objsize, sc->inuse, sc->offset); @@ -99,13 +102,17 @@ static struct slab_cache *bring_up_node( } static struct slab_cache *numa_slab_create(const struct slab_allocator *slab_alloc, - const struct page_allocator *page_alloc, + const struct page_allocator *page_alloc, int node, const char *name, int size, int align, int order, int objsize, int inuse, int offset) { struct numa_slab *n; + printk(KERN_CRIT "numa_slab_create(%s, %s, %d, %s, %d, %d, %d ,%d ,%d ,%d)\n", + slab_alloc->name, page_alloc->name, node, name, size, + align, order, objsize, inuse, offset); + if (!numa_cache) { - numa_cache = base.create(&base, page_alloc, "numa_cache", + numa_cache = base.create(&base, page_alloc, node, "numa_cache", ALIGN(sizeof(struct numa_slab), L1_CACHE_BYTES), L1_CACHE_BYTES, 0, sizeof(struct numa_slab), sizeof(struct numa_slab), 0); @@ -115,6 +122,8 @@ static struct slab_cache *numa_slab_crea n = base.alloc(numa_cache, GFP_KERNEL); memset(n, 0, sizeof(struct numa_slab)); + slab_allocator_fill(&n->sc, slab_alloc, page_alloc, node, name, size, align, + order, objsize, inuse, offset); spin_lock_init(&n->lock); atomic_set(&n->refcount, 1); @@ -132,6 +141,8 @@ static void *numa_slab_alloc_node(struct struct numa_slab *n = (void *)sc; struct slab_cache *s; + printk(KERN_CRIT "numa_slab_alloc_node(%s, %x, %d)\n", sc->name, flags, node); + if (n < 0) node = numa_node_id(); @@ -149,6 +160,8 @@ static void *numa_slab_alloc(struct slab { int node = numa_node_id(); + printk(KERN_CRIT "numa_slab_alloc(%s, %x)\n", sc->name, flags); + if (unlikely(current->flags & (PF_SPREAD_SLAB | PF_MEMPOLICY)) && !in_interrupt()) { if (cpuset_do_slab_mem_spread()) @@ -163,6 +176,8 @@ static int numa_slab_destroy(struct slab { struct numa_slab *n = (void *)sc; + printk(KERN_CRIT "numa_slab_destroy(%s)\n", sc->name); + if (!atomic_dec_and_test(&n->refcount)) return 0; @@ -175,6 +190,8 @@ static int numa_slab_pointer_valid(struc struct numa_slab *n = (void *)sc; int node; + printk(KERN_CRIT "numa_slab_pointer_valid(%s, %p)\n", sc->name, object); + /* We can deduct from the allocator which node this is. */ node = ((struct numactl *)(sc->page_alloc))->node; return base.valid_pointer(n->node[node], object); @@ -185,6 +202,8 @@ static unsigned long numa_slab_object_si struct numa_slab *n = (void *)sc; int node; + printk(KERN_CRIT "numa_slab_object_size(%s, %p)\n", sc->name, object); + /* We can deduct from the allocator which node this is. */ node = ((struct numactl *)(sc->page_alloc))->node; return base.object_size(n->node[node], object); @@ -192,6 +211,7 @@ static unsigned long numa_slab_object_si static void numa_slab_free(struct slab_cache *sc, const void *object) { + printk(KERN_CRIT "numa_slab_free(%s, %p)\n", sc ? sc->name : "", object); base.free(NULL, object); } @@ -199,6 +219,8 @@ static struct slab_cache *numa_slab_dup( { struct numa_slab *n = (void *)sc; + printk(KERN_CRIT "numa_slab_dup(%s)\n", sc->name); + atomic_inc(&n->refcount); return sc; } @@ -218,6 +240,8 @@ static int numa_slab_shrink(struct slab_ int node; int count = 0; + printk(KERN_CRIT "numa_slab_shrink(%s, %p)\n", sc->name, move_object); + /* * FIXME: What you really want to do here is to * run the shrinking on each node separately @@ -238,6 +262,8 @@ static unsigned long numa_slab_objects(s unsigned long count_active = 0; unsigned long count_partial = 0; + printk(KERN_CRIT "numa_slab_objects(%s)\n", sc->name); + for_each_node(node) { unsigned long nactive, npartial; Index: linux-2.6.18-rc4/fs/inode.c =================================================================== --- linux-2.6.18-rc4.orig/fs/inode.c 2006-08-06 11:20:11.000000000 -0700 +++ linux-2.6.18-rc4/fs/inode.c 2006-08-15 12:45:49.115303354 -0700 @@ -109,7 +109,7 @@ static struct inode *alloc_inode(struct if (sb->s_op->alloc_inode) inode = sb->s_op->alloc_inode(sb); else - inode = (struct inode *) kmem_cache_alloc(inode_cachep, SLAB_KERNEL); + inode = (struct inode *) kmem_cache_alloc(inode_cachep, GFP_KERNEL); if (inode) { struct address_space * const mapping = &inode->i_data; Index: linux-2.6.18-rc4/arch/ia64/kernel/perfmon.c =================================================================== --- linux-2.6.18-rc4.orig/arch/ia64/kernel/perfmon.c 2006-08-06 11:20:11.000000000 -0700 +++ linux-2.6.18-rc4/arch/ia64/kernel/perfmon.c 2006-08-15 12:37:00.470176498 -0700 @@ -2299,7 +2299,7 @@ pfm_smpl_buffer_alloc(struct task_struct DPRINT(("smpl_buf @%p\n", smpl_buf)); /* allocate vma */ - vma = kmem_cache_alloc(vm_area_cachep, SLAB_KERNEL); + vma = kmem_cache_alloc(vm_area_cachep, GFP_KERNEL); if (!vma) { DPRINT(("Cannot allocate vma\n")); goto error_kmem; Index: linux-2.6.18-rc4/fs/dnotify.c =================================================================== --- linux-2.6.18-rc4.orig/fs/dnotify.c 2006-08-06 11:20:11.000000000 -0700 +++ linux-2.6.18-rc4/fs/dnotify.c 2006-08-15 12:45:14.651608767 -0700 @@ -77,7 +77,7 @@ int fcntl_dirnotify(int fd, struct file inode = filp->f_dentry->d_inode; if (!S_ISDIR(inode->i_mode)) return -ENOTDIR; - dn = kmem_cache_alloc(dn_cache, SLAB_KERNEL); + dn = kmem_cache_alloc(dn_cache, GFP_KERNEL); if (dn == NULL) return -ENOMEM; spin_lock(&inode->i_lock); Index: linux-2.6.18-rc4/fs/ext3/super.c =================================================================== --- linux-2.6.18-rc4.orig/fs/ext3/super.c 2006-08-06 11:20:11.000000000 -0700 +++ linux-2.6.18-rc4/fs/ext3/super.c 2006-08-15 12:55:40.368869391 -0700 @@ -444,7 +444,7 @@ static struct inode *ext3_alloc_inode(st { struct ext3_inode_info *ei; - ei = kmem_cache_alloc(ext3_inode_cachep, SLAB_NOFS); + ei = kmem_cache_alloc(ext3_inode_cachep, GFP_NOFS); if (!ei) return NULL; #ifdef CONFIG_EXT3_FS_POSIX_ACL Index: linux-2.6.18-rc4/arch/ia64/ia32/binfmt_elf32.c =================================================================== --- linux-2.6.18-rc4.orig/arch/ia64/ia32/binfmt_elf32.c 2006-08-06 11:20:11.000000000 -0700 +++ linux-2.6.18-rc4/arch/ia64/ia32/binfmt_elf32.c 2006-08-15 12:37:30.691944278 -0700 @@ -91,7 +91,7 @@ ia64_elf32_init (struct pt_regs *regs) * it with privilege level 3 because the IVE uses non-privileged accesses to these * tables. IA-32 segmentation is used to protect against IA-32 accesses to them. */ - vma = kmem_cache_alloc(vm_area_cachep, SLAB_KERNEL); + vma = kmem_cache_alloc(vm_area_cachep, GFP_KERNEL); if (vma) { memset(vma, 0, sizeof(*vma)); vma->vm_mm = current->mm; @@ -117,7 +117,7 @@ ia64_elf32_init (struct pt_regs *regs) * code is locked in specific gate page, which is pointed by pretcode * when setup_frame_ia32 */ - vma = kmem_cache_alloc(vm_area_cachep, SLAB_KERNEL); + vma = kmem_cache_alloc(vm_area_cachep, GFP_KERNEL); if (vma) { memset(vma, 0, sizeof(*vma)); vma->vm_mm = current->mm; @@ -142,7 +142,7 @@ ia64_elf32_init (struct pt_regs *regs) * Install LDT as anonymous memory. This gives us all-zero segment descriptors * until a task modifies them via modify_ldt(). */ - vma = kmem_cache_alloc(vm_area_cachep, SLAB_KERNEL); + vma = kmem_cache_alloc(vm_area_cachep, GFP_KERNEL); if (vma) { memset(vma, 0, sizeof(*vma)); vma->vm_mm = current->mm; @@ -214,7 +214,7 @@ ia32_setup_arg_pages (struct linux_binpr bprm->loader += stack_base; bprm->exec += stack_base; - mpnt = kmem_cache_alloc(vm_area_cachep, SLAB_KERNEL); + mpnt = kmem_cache_alloc(vm_area_cachep, GFP_KERNEL); if (!mpnt) return -ENOMEM; Index: linux-2.6.18-rc4/mm/util.c =================================================================== --- linux-2.6.18-rc4.orig/mm/util.c 2006-08-15 04:24:05.599795347 -0700 +++ linux-2.6.18-rc4/mm/util.c 2006-08-15 12:41:05.631811176 -0700 @@ -35,7 +35,7 @@ char *kstrdup(const char *s, gfp_t gfp) return NULL; len = strlen(s) + 1; - buf = ____kmalloc(len, gfp); + buf = kmalloc(len, gfp); if (buf) memcpy(buf, s, len); return buf; Index: linux-2.6.18-rc4/mm/shmem.c =================================================================== --- linux-2.6.18-rc4.orig/mm/shmem.c 2006-08-06 11:20:11.000000000 -0700 +++ linux-2.6.18-rc4/mm/shmem.c 2006-08-15 12:43:41.454232041 -0700 @@ -2118,7 +2118,7 @@ static struct kmem_cache *shmem_inode_ca static struct inode *shmem_alloc_inode(struct super_block *sb) { struct shmem_inode_info *p; - p = (struct shmem_inode_info *)kmem_cache_alloc(shmem_inode_cachep, SLAB_KERNEL); + p = (struct shmem_inode_info *)kmem_cache_alloc(shmem_inode_cachep, GFP_KERNEL); if (!p) return NULL; return &p->vfs_inode; Index: linux-2.6.18-rc4/include/linux/allocator.h =================================================================== --- linux-2.6.18-rc4.orig/include/linux/allocator.h 2006-08-15 13:47:22.741805560 -0700 +++ linux-2.6.18-rc4/include/linux/allocator.h 2006-08-15 14:17:54.531022852 -0700 @@ -103,6 +103,7 @@ struct derived_page_allocator *derive_pa struct slab_cache { const struct slab_allocator *slab_alloc; const struct page_allocator *page_alloc; + int node; /* Node passed to page allocator */ const char *name; /* Name (only for display!) */ int size; /* The size of a chunk on a slab */ int align; /* Alignment requirements */ @@ -117,8 +118,9 @@ struct slab_allocator { * Create an actually usable slab cache from a slab allocator */ struct slab_cache *(*create)(const struct slab_allocator *, - const struct page_allocator *a, const char *name, int size, int align, - int order, int objsize, int inuse, int offset); + const struct page_allocator *a, int node, const char *name, + int size, int align, int order, int objsize, int inuse, + int offset); /* Allocation functions */ void *(*alloc)(struct slab_cache *, gfp_t); @@ -169,7 +171,7 @@ struct slab_allocator { struct slab_cache * (*dup)(struct slab_cache *); int (*destroy)(struct slab_cache *); void (*destructor)(struct slab_allocator *); - void *name; + const char *name; }; /* Standard slab allocators */ @@ -196,12 +198,14 @@ struct slab_allocator *trace_slab(struct /* Tools to make your own slab allocators */ static inline void slab_allocator_fill(struct slab_cache *sc, - const struct slab_allocator *slab_alloc, const struct page_allocator *page_alloc, + const struct slab_allocator *slab_alloc, + const struct page_allocator *page_alloc, int node, const char *name, int size, int align, int order, int objsize, int inuse, int offset) { sc->slab_alloc = slab_alloc; sc->page_alloc = page_alloc; + sc->node = node; sc->name = name; sc->size = size; sc->align = align; Index: linux-2.6.18-rc4/include/linux/rmap.h =================================================================== --- linux-2.6.18-rc4.orig/include/linux/rmap.h 2006-08-06 11:20:11.000000000 -0700 +++ linux-2.6.18-rc4/include/linux/rmap.h 2006-08-15 12:31:35.115253616 -0700 @@ -34,7 +34,7 @@ extern kmem_cache_t *anon_vma_cachep; static inline struct anon_vma *anon_vma_alloc(void) { - return kmem_cache_alloc(anon_vma_cachep, SLAB_KERNEL); + return kmem_cache_alloc(anon_vma_cachep, GFP_KERNEL); } static inline void anon_vma_free(struct anon_vma *anon_vma) Index: linux-2.6.18-rc4/net/socket.c =================================================================== --- linux-2.6.18-rc4.orig/net/socket.c 2006-08-06 11:20:11.000000000 -0700 +++ linux-2.6.18-rc4/net/socket.c 2006-08-15 12:46:03.434732621 -0700 @@ -285,7 +285,7 @@ static kmem_cache_t * sock_inode_cachep static struct inode *sock_alloc_inode(struct super_block *sb) { struct socket_alloc *ei; - ei = (struct socket_alloc *)kmem_cache_alloc(sock_inode_cachep, SLAB_KERNEL); + ei = (struct socket_alloc *)kmem_cache_alloc(sock_inode_cachep, GFP_KERNEL); if (!ei) return NULL; init_waitqueue_head(&ei->socket.wait); Index: linux-2.6.18-rc4/fs/fcntl.c =================================================================== --- linux-2.6.18-rc4.orig/fs/fcntl.c 2006-08-06 11:20:11.000000000 -0700 +++ linux-2.6.18-rc4/fs/fcntl.c 2006-08-15 12:44:14.940447937 -0700 @@ -544,7 +544,7 @@ int fasync_helper(int fd, struct file * int result = 0; if (on) { - new = kmem_cache_alloc(fasync_cache, SLAB_KERNEL); + new = kmem_cache_alloc(fasync_cache, GFP_KERNEL); if (!new) return -ENOMEM; }