--- mm/slub.c | 32 ++++++++++++++++---------------- 1 file changed, 16 insertions(+), 16 deletions(-) Index: linux-2.6/mm/slub.c =================================================================== --- linux-2.6.orig/mm/slub.c 2008-01-20 21:49:42.989529266 -0800 +++ linux-2.6/mm/slub.c 2008-01-20 21:52:51.808799557 -0800 @@ -1465,7 +1465,7 @@ static inline int node_match(struct kmem * we need to allocate a new slab. This is slowest path since we may sleep. */ static void *__slab_alloc(struct kmem_cache *s, - gfp_t gfpflags, int node, void *addr, struct kmem_cache_cpu *c) + gfp_t gfpflags, int node, struct kmem_cache_cpu *c) { void **object; struct page *new; @@ -1521,7 +1521,7 @@ new_slab: return NULL; debug: object = c->page->freelist; - if (!alloc_debug_processing(s, c->page, object, addr)) + if (!alloc_debug_processing(s, c->page, object, NULL)) goto another_slab; c->page->inuse++; @@ -1542,7 +1542,7 @@ debug: * Otherwise we can simply pick the next object from the lockless free list. */ static __always_inline void *slab_alloc(struct kmem_cache *s, - gfp_t gfpflags, int node, void *addr) + gfp_t gfpflags, int node) { void **object; unsigned long flags; @@ -1552,7 +1552,7 @@ static __always_inline void *slab_alloc( c = get_cpu_slab(s, smp_processor_id()); if (unlikely(!c->freelist || !node_match(c, node))) - object = __slab_alloc(s, gfpflags, node, addr, c); + object = __slab_alloc(s, gfpflags, node, c); else { object = c->freelist; @@ -1568,14 +1568,14 @@ static __always_inline void *slab_alloc( void *kmem_cache_alloc(struct kmem_cache *s, gfp_t gfpflags) { - return slab_alloc(s, gfpflags, -1, __builtin_return_address(0)); + return slab_alloc(s, gfpflags, -1); } EXPORT_SYMBOL(kmem_cache_alloc); #ifdef CONFIG_NUMA void *kmem_cache_alloc_node(struct kmem_cache *s, gfp_t gfpflags, int node) { - return slab_alloc(s, gfpflags, node, __builtin_return_address(0)); + return slab_alloc(s, gfpflags, node); } EXPORT_SYMBOL(kmem_cache_alloc_node); #endif @@ -1589,7 +1589,7 @@ EXPORT_SYMBOL(kmem_cache_alloc_node); * handling required then we can return immediately. */ static void __slab_free(struct kmem_cache *s, struct page *page, - void *x, void *addr, unsigned int offset) + void *x, unsigned int offset) { void *prior; void **object = (void *)x; @@ -1633,7 +1633,7 @@ slab_empty: return; debug: - if (!free_debug_processing(s, page, x, addr)) + if (!free_debug_processing(s, page, x, NULL)) goto out_unlock; goto checks_ok; } @@ -1650,7 +1650,7 @@ debug: * with all sorts of special processing. */ static __always_inline void slab_free(struct kmem_cache *s, - struct page *page, void *x, void *addr) + struct page *page, void *x) { void **object = (void *)x; unsigned long flags; @@ -1663,7 +1663,7 @@ static __always_inline void slab_free(st object[c->offset] = c->freelist; c->freelist = object; } else - __slab_free(s, page, x, addr, c->offset); + __slab_free(s, page, x, c->offset); local_irq_restore(flags); } @@ -1674,7 +1674,7 @@ void kmem_cache_free(struct kmem_cache * page = virt_to_head_page(x); - slab_free(s, page, x, __builtin_return_address(0)); + slab_free(s, page, x); } EXPORT_SYMBOL(kmem_cache_free); @@ -2535,7 +2535,7 @@ void *__kmalloc(size_t size, gfp_t flags if (unlikely(ZERO_OR_NULL_PTR(s))) return s; - return slab_alloc(s, flags, -1, __builtin_return_address(0)); + return slab_alloc(s, flags, -1); } EXPORT_SYMBOL(__kmalloc); @@ -2553,7 +2553,7 @@ void *__kmalloc_node(size_t size, gfp_t if (unlikely(ZERO_OR_NULL_PTR(s))) return s; - return slab_alloc(s, flags, node, __builtin_return_address(0)); + return slab_alloc(s, flags, node); } EXPORT_SYMBOL(__kmalloc_node); #endif @@ -2610,7 +2610,7 @@ void kfree(const void *x) put_page(page); return; } - slab_free(page->slab, page, (void *)x, __builtin_return_address(0)); + slab_free(page->slab, page, (void *)x); } EXPORT_SYMBOL(kfree); @@ -3080,7 +3080,7 @@ void *__kmalloc_track_caller(size_t size if (unlikely(ZERO_OR_NULL_PTR(s))) return s; - return slab_alloc(s, gfpflags, -1, caller); + return slab_alloc(s, gfpflags, -1); } void *__kmalloc_node_track_caller(size_t size, gfp_t gfpflags, @@ -3096,7 +3096,7 @@ void *__kmalloc_node_track_caller(size_t if (unlikely(ZERO_OR_NULL_PTR(s))) return s; - return slab_alloc(s, gfpflags, node, caller); + return slab_alloc(s, gfpflags, node); } #if defined(CONFIG_SYSFS) && defined(CONFIG_SLUB_DEBUG)