Index: linux-2.6.21-rc2-mm2/init/Kconfig =================================================================== --- linux-2.6.21-rc2-mm2.orig/init/Kconfig 2007-03-06 14:08:17.000000000 -0800 +++ linux-2.6.21-rc2-mm2/init/Kconfig 2007-03-06 14:08:18.000000000 -0800 @@ -570,9 +570,10 @@ config SLUB config SLOB # -# SLOB does not support SMP because SLAB_DESTROY_BY_RCU is not support. +# SLOB cannot support SMP because SLAB_DESTROY_BY_RCU does not work +# properly. # - depends on EMBEDDED && !SMP + depends on EMBEDDED && !SMP && !SPARSEMEM bool "SLOB (Simple Allocator)" help SLOB replaces the SLAB allocator with a drastically simpler Index: linux-2.6.21-rc2-mm2/mm/slub.c =================================================================== --- linux-2.6.21-rc2-mm2.orig/mm/slub.c 2007-03-06 14:08:17.000000000 -0800 +++ linux-2.6.21-rc2-mm2/mm/slub.c 2007-03-06 14:08:31.000000000 -0800 @@ -407,8 +407,12 @@ static int check_slab(struct kmem_cache if (page->offset * sizeof(void *) != s->offset) { printk(KERN_CRIT "SLUB: %s Corrupted offset %lu in slab @%p" " flags=%lx mapping=%p count=%d\n", - s->name, page->offset * sizeof(void *), page, - page->flags, page->mapping, page_count(page)); + s->name, + (unsigned long)(page->offset * sizeof(void *)), + page, + page->flags, + page->mapping, + page_count(page)); return 0; } if (page->inuse > s->objects) { @@ -1422,7 +1426,7 @@ static int kmem_cache_open(struct kmem_c atomic_set(&s->cpu_slabs, 0); INIT_DELAYED_WORK(&s->flush, flusher); #endif - if (init_kmem_cache_nodes(s, gfpflags)) { + if (init_kmem_cache_nodes(s, gfpflags & ~SLUB_DMA)) { return 1; } error: @@ -1650,11 +1654,16 @@ static int __init setup_slub_debug(char __setup("slub_debug", setup_slub_debug); static struct kmem_cache *create_kmalloc_cache(struct kmem_cache *s, - const char *name, int size, gfp_t flags) + const char *name, int size, gfp_t gfp_flags) { + unsigned int flags = 0; + + if (gfp_flags & SLUB_DMA) + flags = SLAB_CACHE_DMA; + down_write(&slub_lock); - if (!kmem_cache_open(s, flags, name, size, ARCH_KMALLOC_MINALIGN, - 0, NULL, NULL)) + if (!kmem_cache_open(s, gfp_flags, name, size, ARCH_KMALLOC_MINALIGN, + flags, NULL, NULL)) panic("Creation of kmalloc slab %s size=%d failed.\n", name, size); list_add(&s->list, &slab_caches); @@ -1682,11 +1691,8 @@ static struct kmem_cache *get_slab(size_ if (s) return s; - /* The control structures do not have to be in the DMA zone */ - flags &= ~__GFP_DMA; - /* Dynamically create dma cache */ - x = kmalloc(kmem_size, flags); + x = kmalloc(kmem_size, flags & ~SLUB_DMA); if (!x) panic("Unable to allocate memory for dma cache\n"); @@ -1704,7 +1710,7 @@ static struct kmem_cache *get_slab(size_ } #endif - text = kasprintf(flags, "kmalloc_dma-%d", + text = kasprintf(flags & ~SLUB_DMA, "kmalloc_dma-%d", (unsigned int)realsize); s = create_kmalloc_cache(x, text, realsize, flags); kmalloc_caches_dma[index] = s; @@ -1898,9 +1904,10 @@ struct kmem_cache *kmem_cache_create(con if (s) { s = kmem_cache_dup(s, GFP_KERNEL, name); if (s) { - printk(KERN_INFO "SLUB: Merging slab_cache %s size %d" - " with slab_cache %s size %d\n", - name, (int)size, s->name, s->size); + if (slub_debug) + printk(KERN_INFO "SLUB: Merging slab_cache %s " + "size %d with slab_cache %s size %d\n", + name, (int)size, s->name, s->size); goto out; }