Index: linux-2.6.21-rc2-mm1/init/Kconfig =================================================================== --- linux-2.6.21-rc2-mm1.orig/init/Kconfig 2007-03-06 16:17:17.000000000 -0800 +++ linux-2.6.21-rc2-mm1/init/Kconfig 2007-03-06 16:17:30.000000000 -0800 @@ -554,9 +554,10 @@ config SLUB config SLOB # -# SLOB does not support SMP because SLAB_DESTROY_BY_RCU is not support. +# SLOB cannot support SMP because SLAB_DESTROY_BY_RCU does not work +# properly. # - depends on EMBEDDED && !SMP + depends on EMBEDDED && !SMP && !SPARSEMEM bool "SLOB (Simple Allocator)" help SLOB replaces the SLAB allocator with a drastically simpler Index: linux-2.6.21-rc2-mm1/mm/slub.c =================================================================== --- linux-2.6.21-rc2-mm1.orig/mm/slub.c 2007-03-06 16:16:46.000000000 -0800 +++ linux-2.6.21-rc2-mm1/mm/slub.c 2007-03-06 17:56:36.000000000 -0800 @@ -407,8 +407,12 @@ static int check_slab(struct kmem_cache if (page->offset * sizeof(void *) != s->offset) { printk(KERN_CRIT "SLUB: %s Corrupted offset %lu in slab @%p" " flags=%lx mapping=%p count=%d\n", - s->name, page->offset * sizeof(void *), page, - page->flags, page->mapping, page_count(page)); + s->name, + (unsigned long)(page->offset * sizeof(void *)), + page, + page->flags, + page->mapping, + page_count(page)); return 0; } if (page->inuse > s->objects) { @@ -1422,7 +1426,7 @@ static int kmem_cache_open(struct kmem_c atomic_set(&s->cpu_slabs, 0); INIT_DELAYED_WORK(&s->flush, flusher); #endif - if (init_kmem_cache_nodes(s, gfpflags)) { + if (init_kmem_cache_nodes(s, gfpflags & ~SLUB_DMA)) { return 1; } error: @@ -1650,11 +1654,16 @@ static int __init setup_slub_debug(char __setup("slub_debug", setup_slub_debug); static struct kmem_cache *create_kmalloc_cache(struct kmem_cache *s, - const char *name, int size, gfp_t flags) + const char *name, int size, gfp_t gfp_flags) { + unsigned int flags = 0; + + if (gfp_flags & SLUB_DMA) + flags = SLAB_CACHE_DMA; + down_write(&slub_lock); - if (!kmem_cache_open(s, flags, name, size, ARCH_KMALLOC_MINALIGN, - 0, NULL, NULL)) + if (!kmem_cache_open(s, gfp_flags, name, size, ARCH_KMALLOC_MINALIGN, + flags, NULL, NULL)) panic("Creation of kmalloc slab %s size=%d failed.\n", name, size); list_add(&s->list, &slab_caches); @@ -1682,11 +1691,8 @@ static struct kmem_cache *get_slab(size_ if (s) return s; - /* The control structures do not have to be in the DMA zone */ - flags &= ~__GFP_DMA; - /* Dynamically create dma cache */ - x = kmalloc(kmem_size, flags); + x = kmalloc(kmem_size, flags & ~SLUB_DMA); if (!x) panic("Unable to allocate memory for dma cache\n"); @@ -1704,7 +1710,7 @@ static struct kmem_cache *get_slab(size_ } #endif - text = kasprintf(flags, "kmalloc_dma-%d", + text = kasprintf(flags & ~SLUB_DMA, "kmalloc_dma-%d", (unsigned int)realsize); s = create_kmalloc_cache(x, text, realsize, flags); kmalloc_caches_dma[index] = s; @@ -1729,7 +1735,7 @@ void *__kmalloc_node(size_t size, gfp_t EXPORT_SYMBOL(__kmalloc_node); #endif -unsigned int ksize(const void *object) +size_t ksize(const void *object) { struct page *page = get_object_page(object); struct kmem_cache *s; @@ -1751,6 +1757,58 @@ void kfree(const void *object) } EXPORT_SYMBOL(kfree); +/** + * krealloc - reallocate memory. The contents will remain unchanged. + * + * @p: object to reallocate memory for. + * @new_size: how many bytes of memory are required. + * @flags: the type of memory to allocate. + * + * The contents of the object pointed to are preserved up to the + * lesser of the new and old sizes. If @p is %NULL, krealloc() + * behaves exactly like kmalloc(). If @size is 0 and @p is not a + * %NULL pointer, the object pointed to is freed. + */ +void *krealloc(const void *p, size_t new_size, gfp_t flags) +{ + struct kmem_cache *new_cache; + void *ret; + struct page *page; + + if (unlikely(!p)) + return kmalloc(new_size, flags); + + if (unlikely(!new_size)) { + kfree(p); + return NULL; + } + + page = virt_to_page(p); + + if (unlikely(PageCompound(page))) + page = page->first_page; + + new_cache = get_slab(new_size, flags); + + /* + * If new size fits in the current cache, bail out. + */ + if (likely(page->slab == new_cache)) + return (void *)p; + + /* + * We are on the slow-path here so do not use __cache_alloc + * because it bloats kernel text. + */ + ret = kmalloc(new_size, flags); + if (ret) { + memcpy(ret, p, min(new_size, ksize(p))); + kfree(p); + } + return ret; +} +EXPORT_SYMBOL(krealloc); + /******************************************************************** * Basic setup of slabs *******************************************************************/ @@ -1898,9 +1956,10 @@ struct kmem_cache *kmem_cache_create(con if (s) { s = kmem_cache_dup(s, GFP_KERNEL, name); if (s) { - printk(KERN_INFO "SLUB: Merging slab_cache %s size %d" - " with slab_cache %s size %d\n", - name, (int)size, s->name, s->size); + if (slub_debug) + printk(KERN_INFO "SLUB: Merging slab_cache %s " + "size %d with slab_cache %s size %d\n", + name, (int)size, s->name, s->size); goto out; }