Index: linux-2.6.21-rc2-mm2/mm/slub.c =================================================================== --- linux-2.6.21-rc2-mm2.orig/mm/slub.c 2007-03-06 14:13:55.000000000 -0800 +++ linux-2.6.21-rc2-mm2/mm/slub.c 2007-03-06 14:31:31.000000000 -0800 @@ -1775,7 +1775,7 @@ void *__kmalloc_node(size_t size, gfp_t EXPORT_SYMBOL(__kmalloc_node); #endif -unsigned int ksize(const void *object) +size_t ksize(const void *object) { struct page *page = get_object_page(object); struct kmem_cache *s; @@ -1797,6 +1797,58 @@ void kfree(const void *object) } EXPORT_SYMBOL(kfree); +/** + * krealloc - reallocate memory. The contents will remain unchanged. + * + * @p: object to reallocate memory for. + * @new_size: how many bytes of memory are required. + * @flags: the type of memory to allocate. + * + * The contents of the object pointed to are preserved up to the + * lesser of the new and old sizes. If @p is %NULL, krealloc() + * behaves exactly like kmalloc(). If @size is 0 and @p is not a + * %NULL pointer, the object pointed to is freed. + */ +void *krealloc(const void *p, size_t new_size, gfp_t flags) +{ + struct kmem_cache *new_cache; + void *ret; + struct page *page; + + if (unlikely(!p)) + return kmalloc(new_size, flags); + + if (unlikely(!new_size)) { + kfree(p); + return NULL; + } + + page = virt_to_page(p); + + if (unlikely(PageCompound(page))) + page = page->first_page; + + new_cache = get_slab(new_size, flags); + + /* + * If new size fits in the current cache, bail out. + */ + if (likely(page->slab == new_cache)) + return (void *)p; + + /* + * We are on the slow-path here so do not use __cache_alloc + * because it bloats kernel text. + */ + ret = kmalloc(new_size, flags); + if (ret) { + memcpy(ret, p, min(new_size, ksize(p))); + kfree(p); + } + return ret; +} +EXPORT_SYMBOL(krealloc); + /******************************************************************** * Basic setup of slabs *******************************************************************/