--- include/linux/mm_types.h | 1 include/linux/slub_def.h | 2 + mm/slub.c | 50 ++++++++++++++++++++++++++++------------------- 3 files changed, 33 insertions(+), 20 deletions(-) Index: linux-2.6/include/linux/mm_types.h =================================================================== --- linux-2.6.orig/include/linux/mm_types.h 2008-02-06 18:11:57.145912950 -0800 +++ linux-2.6/include/linux/mm_types.h 2008-02-06 18:12:24.978616203 -0800 @@ -54,6 +54,7 @@ struct page { * it points to anon_vma object: * see PAGE_MAPPING_ANON below. */ + void *address; /* SLUB page address */ }; #if NR_CPUS >= CONFIG_SPLIT_PTLOCK_CPUS spinlock_t ptl; Index: linux-2.6/include/linux/slub_def.h =================================================================== --- linux-2.6.orig/include/linux/slub_def.h 2008-02-06 18:55:29.666210183 -0800 +++ linux-2.6/include/linux/slub_def.h 2008-02-06 18:56:05.006727261 -0800 @@ -14,6 +14,8 @@ struct kmem_cache_cpu { unsigned long freemap; /* Bitmap of free objects */ struct page *page; /* The slab from which we are allocating */ + void *address; /* Page address */ + unsigned int size; /* Slab size */ int node; /* The node of the page (or -1 for debug) */ unsigned int objsize; /* Size of an object (from kmem_cache) */ }; Index: linux-2.6/mm/slub.c =================================================================== --- linux-2.6.orig/mm/slub.c 2008-02-06 18:12:29.450730970 -0800 +++ linux-2.6/mm/slub.c 2008-02-06 19:03:14.234529774 -0800 @@ -288,7 +288,7 @@ static inline int check_valid_pointer(st if (!object) return 1; - base = page_address(page); + base = page->address; if (object < base || object >= base + s->objects * s->size || (object - base) % s->size) { return 0; @@ -300,9 +300,11 @@ static inline int check_valid_pointer(st static inline int object_index(struct kmem_cache *s, struct page *page, const void *addr) { - VM_BUG_ON(addr - page_address(page) >= (PAGE_SIZE << s->order)); + unsigned long offset = addr - page->address; - return (addr - page_address(page)) / s->size; + VM_BUG_ON(offset >= (PAGE_SIZE << s->order) || offset % s->size); + + return offset / s->size; } static inline void *object_addr(struct kmem_cache *s, struct page *page, @@ -310,7 +312,7 @@ static inline void *object_addr(struct k { VM_BUG_ON(index < 0 || index >= s->objects); - return page_address(page) + index * s->size; + return page->address + index * s->size; } /* Loop over all objects in a slab */ @@ -468,7 +470,7 @@ static void slab_fix(struct kmem_cache * static void print_trailer(struct kmem_cache *s, struct page *page, u8 *p) { unsigned int off; /* Offset of last byte */ - u8 *addr = page_address(page); + u8 *addr = page->address; print_tracking(s, p); @@ -639,7 +641,7 @@ static int slab_pad_check(struct kmem_ca if (!(s->flags & SLAB_POISON)) return 1; - start = page_address(page); + start = page->address; end = start + (PAGE_SIZE << s->order); length = s->objects * s->size; remainder = end - (start + length); @@ -1076,6 +1078,7 @@ static struct page *new_slab(struct kmem if (n) atomic_long_inc(&n->nr_slabs); page->slab = s; + page->address = page_address(page); page->flags |= 1 << PG_slab; if (s->flags & (SLAB_DEBUG_FREE | SLAB_RED_ZONE | SLAB_POISON | SLAB_STORE_USER | SLAB_TRACE)) @@ -1085,7 +1088,7 @@ static struct page *new_slab(struct kmem BUG_ON(!page->freemap); if (unlikely(s->flags & SLAB_POISON)) - memset(page_address(page), POISON_INUSE, + memset(page->address, POISON_INUSE, PAGE_SIZE << s->order); if (SlabDebug(page) || s->ctor) { @@ -1116,6 +1119,7 @@ static void __free_slab(struct kmem_cach NR_SLAB_RECLAIMABLE : NR_SLAB_UNRECLAIMABLE, -pages); + page->address = NULL; __free_pages(page, s->order); } @@ -1349,12 +1353,12 @@ static void unfreeze_slab(struct kmem_ca static void deactivate_slab(struct kmem_cache *s, struct kmem_cache_cpu *c) { struct page *page = c->page; + int tail = c->freemap != 0; - printk("Deactivate slab %s page freemap=%lx cpu freemap=%lx\n", - s->name, page->freemap, c->freemap); page->freemap |= c->freemap; + c->freemap = 0; c->page = NULL; - unfreeze_slab(s, page, c->freemap != 0); + unfreeze_slab(s, page, tail); } static inline void flush_slab(struct kmem_cache *s, struct kmem_cache_cpu *c) @@ -1449,7 +1453,6 @@ load_freelist: index = __ffs(map); __clear_bit(index, &map); c->freemap = map; - c->node = page_to_nid(c->page); unlock_out: slab_unlock(c->page); return object_addr(s, c->page, index); @@ -1461,6 +1464,8 @@ new_slab: new = get_partial(s, gfpflags, node); if (new) { c->page = new; + c->address = new->address; + c->node = page_to_nid(new); goto load_freelist; } @@ -1479,6 +1484,8 @@ new_slab: slab_lock(new); SetSlabFrozen(new); c->page = new; + c->address = new->address; + c->node = page_to_nid(new); goto load_freelist; } return NULL; @@ -1521,7 +1528,7 @@ static __always_inline void *slab_alloc( int index; index = __ffs(map); - object = object_addr(s, c->page, index); + object = c->address + index * c->size; __clear_bit(index, &c->freemap); } local_irq_restore(flags); @@ -1614,14 +1621,18 @@ debug: * with all sorts of special processing. */ static __always_inline void slab_free(struct kmem_cache *s, - struct page *page, int index, void *addr) + struct page *page, const void *object, void *addr) { unsigned long flags; struct kmem_cache_cpu *c; + int index; + unsigned long offset; local_irq_save(flags); -// debug_check_no_locks_freed(x, s->objsize); c = get_cpu_slab(s, smp_processor_id()); + offset = object - c->address; + index = offset / c->size; +// debug_check_no_locks_freed(x, s->objsize); if (likely(page == c->page && c->node >= 0)) __set_bit(index, &c->freemap); else @@ -1636,8 +1647,7 @@ void kmem_cache_free(struct kmem_cache * page = virt_to_head_page(x); - slab_free(s, page, object_index(s, page, x), - __builtin_return_address(0)); + slab_free(s, page, x, __builtin_return_address(0)); } EXPORT_SYMBOL(kmem_cache_free); @@ -1808,6 +1818,7 @@ static void init_kmem_cache_cpu(struct k c->freemap = 0; c->node = 0; c->objsize = s->objsize; + c->size = s->size; } static void init_kmem_cache_node(struct kmem_cache_node *n) @@ -2560,8 +2571,7 @@ void kfree(const void *x) put_page(page); return; } - slab_free(page->slab, page, object_index(page->slab, page, x), - __builtin_return_address(0)); + slab_free(page->slab, page, x, __builtin_return_address(0)); } EXPORT_SYMBOL(kfree); @@ -3057,7 +3067,7 @@ static int validate_slab(struct kmem_cac unsigned long *map) { void *p; - void *addr = page_address(page); + void *addr = page->address; int index; if (!check_slab(s, page) || @@ -3338,7 +3348,7 @@ static int add_location(struct loc_track static void process_slab(struct loc_track *t, struct kmem_cache *s, struct page *page, enum track_item alloc) { - void *addr = page_address(page); + void *addr = page->address; DECLARE_BITMAP(map, s->objects); void *p; int index;