From akpm@linux-foundation.org Fri Apr 6 22:24:55 2007 Date: Fri, 06 Apr 2007 22:24:53 -0700 From: akpm@linux-foundation.org To: mm-commits@vger.kernel.org Cc: clameter@sgi.com Subject: + add-virt_to_head_page-and-consolidate-code-in-slab-and-slub.patch added to -mm tree The patch titled Add virt_to_head_page and consolidate code in slab and slub has been added to the -mm tree. Its filename is add-virt_to_head_page-and-consolidate-code-in-slab-and-slub.patch --- include/linux/mm.h | 6 ++++++ mm/slab.c | 9 ++++----- mm/slub.c | 10 ++++------ 3 files changed, 14 insertions(+), 11 deletions(-) Index: linux-2.6.21-rc5-mm4/include/linux/mm.h =================================================================== --- linux-2.6.21-rc5-mm4.orig/include/linux/mm.h 2007-04-09 11:04:10.000000000 -0700 +++ linux-2.6.21-rc5-mm4/include/linux/mm.h 2007-04-09 11:04:15.000000000 -0700 @@ -323,6 +323,12 @@ static inline void get_page(struct page atomic_inc(&page->_count); } +static inline struct page *virt_to_head_page(const void *x) +{ + struct page *page = virt_to_page(x); + return compound_head(page); +} + /* * Setup the page count before being freed into the page allocator for * the first time (boot or memory hotplug) Index: linux-2.6.21-rc5-mm4/mm/slab.c =================================================================== --- linux-2.6.21-rc5-mm4.orig/mm/slab.c 2007-04-08 17:17:18.000000000 -0700 +++ linux-2.6.21-rc5-mm4/mm/slab.c 2007-04-09 11:04:15.000000000 -0700 @@ -614,20 +614,19 @@ static inline void page_set_slab(struct static inline struct slab *page_get_slab(struct page *page) { - page = compound_head(page); BUG_ON(!PageSlab(page)); return (struct slab *)page->lru.prev; } static inline struct kmem_cache *virt_to_cache(const void *obj) { - struct page *page = virt_to_page(obj); + struct page *page = virt_to_head_page(obj); return page_get_cache(page); } static inline struct slab *virt_to_slab(const void *obj) { - struct page *page = virt_to_page(obj); + struct page *page = virt_to_head_page(obj); return page_get_slab(page); } @@ -2884,7 +2883,7 @@ static void *cache_free_debugcheck(struc objp -= obj_offset(cachep); kfree_debugcheck(objp); - page = virt_to_page(objp); + page = virt_to_head_page(objp); slabp = page_get_slab(page); @@ -3108,7 +3107,7 @@ static void *cache_alloc_debugcheck_afte struct slab *slabp; unsigned objnr; - slabp = page_get_slab(virt_to_page(objp)); + slabp = page_get_slab(virt_to_head_page(objp)); objnr = (unsigned)(objp - slabp->s_mem) / cachep->buffer_size; slab_bufctl(slabp)[objnr] = BUFCTL_ACTIVE; } Index: linux-2.6.21-rc5-mm4/mm/slub.c =================================================================== --- linux-2.6.21-rc5-mm4.orig/mm/slub.c 2007-04-09 10:57:50.000000000 -0700 +++ linux-2.6.21-rc5-mm4/mm/slub.c 2007-04-09 11:04:15.000000000 -0700 @@ -1271,9 +1271,7 @@ void kmem_cache_free(struct kmem_cache * { struct page * page; - page = virt_to_page(x); - - page = compound_head(page); + page = virt_to_head_page(x); if (unlikely(PageError(page) && (s->flags & SLAB_STORE_USER))) set_tracking(s, x, 1); @@ -1284,7 +1282,7 @@ EXPORT_SYMBOL(kmem_cache_free); /* Figure out on which slab object the object resides */ static struct page *get_object_page(const void *x) { - struct page *page = compound_head(virt_to_page(x)); + struct page *page = virt_to_head_page(x); if (!PageSlab(page)) return NULL; @@ -1891,7 +1889,7 @@ void kfree(const void *x) if (!x) return; - page = compound_head(virt_to_page(x)); + page = virt_to_head_page(x); s = page->slab; @@ -1927,7 +1925,7 @@ void *krealloc(const void *p, size_t new return NULL; } - page = compound_head(virt_to_page(p)); + page = virt_to_head_page(p); new_cache = get_slab(new_size, flags);