SLUB: Use mapping for the address of the slab's memory Bring back the use of the mapping field by SLUB. We now have to call a relatively expense function page_address in the half hot path of __slab_alloc. Use of the field allows to avoid that function and allows various scanning functions (in the future also slab defrag) avoid calling page_address(). Signed-off-by: Christoph Lameter --- include/linux/mm.h | 4 ++-- include/linux/mm_types.h | 5 ++++- mm/slub.c | 21 ++++++++++----------- 3 files changed, 16 insertions(+), 14 deletions(-) Index: linux-2.6/include/linux/mm.h =================================================================== --- linux-2.6.orig/include/linux/mm.h 2007-10-19 16:37:32.000000000 -0700 +++ linux-2.6/include/linux/mm.h 2007-10-19 16:38:40.000000000 -0700 @@ -563,10 +563,10 @@ static inline struct address_space *page { struct address_space *mapping = page->mapping; - VM_BUG_ON(PageSlab(page)); if (unlikely(PageSwapCache(page))) mapping = &swapper_space; - else if (unlikely((unsigned long)mapping & PAGE_MAPPING_ANON)) + else if (unlikely((unsigned long)mapping & PAGE_MAPPING_ANON) + || PageSlab(page)) mapping = NULL; return mapping; } Index: linux-2.6/include/linux/mm_types.h =================================================================== --- linux-2.6.orig/include/linux/mm_types.h 2007-10-19 16:36:13.000000000 -0700 +++ linux-2.6/include/linux/mm_types.h 2007-10-19 16:42:04.000000000 -0700 @@ -64,7 +64,10 @@ struct page { #if NR_CPUS >= CONFIG_SPLIT_PTLOCK_CPUS spinlock_t ptl; #endif - struct kmem_cache *slab; /* SLUB: Pointer to slab */ + struct { + struct kmem_cache *slab; /* SLUB: Pointer to slab */ + void *address; /* SLUB: Page address */ + }; struct page *first_page; /* Compound tail pages */ }; union { Index: linux-2.6/mm/slub.c =================================================================== --- linux-2.6.orig/mm/slub.c 2007-10-19 16:38:43.000000000 -0700 +++ linux-2.6/mm/slub.c 2007-10-19 17:00:32.000000000 -0700 @@ -292,7 +292,7 @@ static inline int is_end(const void *p) static inline void **end(struct page *page) { - return make_end(page_address(page)); + return make_end(page->address); } static inline int check_valid_pointer(struct kmem_cache *s, @@ -303,7 +303,7 @@ static inline int check_valid_pointer(st if (is_end(object)) return 1; - base = page_address(page); + base = page->address; if (object < base || object >= base + s->objects * s->size || (object - base) % s->size) { return 0; @@ -488,7 +488,7 @@ static void slab_fix(struct kmem_cache * static void print_trailer(struct kmem_cache *s, struct page *page, u8 *p) { unsigned int off; /* Offset of last byte */ - u8 *addr = page_address(page); + u8 *addr = page->address; print_tracking(s, p); @@ -666,7 +666,7 @@ static int slab_pad_check(struct kmem_ca if (!(s->flags & SLAB_POISON)) return 1; - start = page_address(page); + start = page->address; end = start + (PAGE_SIZE << s->order); length = s->objects * s->size; remainder = end - (start + length); @@ -1098,7 +1098,6 @@ static struct page *new_slab(struct kmem struct page *page; struct kmem_cache_node *n; void *start; - void *end; void *last; void *p; @@ -1118,8 +1117,7 @@ static struct page *new_slab(struct kmem SLAB_STORE_USER | SLAB_TRACE)) SetSlabDebug(page); - start = page_address(page); - end = start + s->objects * s->size; + page->address = start = page_address(page); if (unlikely(s->flags & SLAB_POISON)) memset(start, POISON_INUSE, PAGE_SIZE << s->order); @@ -1131,7 +1129,7 @@ static struct page *new_slab(struct kmem last = p; } setup_object(s, page, last); - set_freepointer(s, last, make_end(start)); + set_freepointer(s, last, end(page)); page->freelist = start; page->inuse = 0; @@ -1147,7 +1145,7 @@ static void __free_slab(struct kmem_cach void *p; slab_pad_check(s, page); - for_each_object(p, s, page_address(page)) + for_each_object(p, s, page->address) check_object(s, page, p, 0); ClearSlabDebug(page); } @@ -1157,6 +1155,7 @@ static void __free_slab(struct kmem_cach NR_SLAB_RECLAIMABLE : NR_SLAB_UNRECLAIMABLE, - pages); + page->mapping = NULL; __free_pages(page, s->order); } @@ -3003,7 +3002,7 @@ static int validate_slab(struct kmem_cac unsigned long *map) { void *p; - void *addr = page_address(page); + void *addr = page->address; if (!check_slab(s, page) || !on_freelist(s, page, NULL)) @@ -3283,7 +3282,7 @@ static int add_location(struct loc_track static void process_slab(struct loc_track *t, struct kmem_cache *s, struct page *page, enum track_item alloc) { - void *addr = page_address(page); + void *addr = page->address; DECLARE_BITMAP(map, s->objects); void *p;