Index: linux-2.6.21-rc2/mm/slub.c =================================================================== --- linux-2.6.21-rc2.orig/mm/slub.c 2007-02-28 17:35:26.000000000 -0800 +++ linux-2.6.21-rc2/mm/slub.c 2007-02-28 18:46:29.000000000 -0800 @@ -165,16 +165,12 @@ static void print_section(char *text, u8 */ static void *get_freepointer(struct kmem_cache *s, void *object) { - void **p = object; - - return p[s->offset]; + return *(void **)(object + s->offset); } static void set_freepointer(struct kmem_cache *s, void *object, void *fp) { - void **p = object; - - p[s->offset] = fp; + *(void **)(object + s->offset) = fp; } /* @@ -208,10 +204,10 @@ static void init_tracking(struct kmem_ca static void print_trailer(struct kmem_cache *s, u8 *p) { - unsigned int off; + unsigned int off; /* Offset of last byte */ if (s->offset) - off = (s->offset + 1) * sizeof(void *); + off = s->offset + sizeof(void *); else off = s->inuse; @@ -220,7 +216,7 @@ static void print_trailer(struct kmem_ca s->inuse - s->objsize); printk(KERN_ERR "FreePointer %p: %p\n", - p + s->offset * sizeof(void *), + p + s->offset, get_freepointer(s, p)); if (s->flags & SLAB_STORE_USER) { @@ -326,21 +322,35 @@ static int check_valid_pointer(struct km * And therefore no slab options that rely on these boundaries may be used with * merged slabcaches. */ + +static int check_pad_bytes(struct kmem_cache *s, struct page *page, u8 *p) +{ + unsigned long off = s->inuse; /* The end of info */ + + if (s->offset) + /* Freepointer is placed after the object. */ + off += sizeof(void *); + + if (s->flags & SLAB_STORE_USER) + /* We also have user information there */ + off += 2 * sizeof(void *); + + if (s->size == off) + return 1; + + if (check_bytes(p + off, POISON_INUSE, s->size - off)) + return 1; + + object_err(s, page, p, "Padding check fails"); + return 0; +} + static int check_object(struct kmem_cache *s, struct page *page, void *object, int active) { u8 *p = object; u8 *endobject = object + s->objsize; - /* Offset of first byte after free pointer */ - unsigned long off = s->inuse; - - if (s->offset) { - off += sizeof(void *); - if (s->flags & SLAB_STORE_USER) - off += 2 * sizeof(void *); - } - /* Single object slabs do not get policed */ if (s->objects == 1) return 1; @@ -358,7 +368,7 @@ static int check_object(struct kmem_cach if ((s->flags & SLAB_POISON) && s->objsize < s->inuse && !check_bytes(endobject, POISON_INUSE, s->inuse - s->objsize)) - object_err(s, page, p, "Alignment Filler check fails"); + object_err(s, page, p, "Alignment Padding check fails"); if (s->flags & SLAB_POISON) { if (!active && (!check_bytes(p, POISON_FREE, s->objsize - 1) || @@ -366,10 +376,8 @@ static int check_object(struct kmem_cach object_err(s, page, p, "Poison"); return 0; } - if (s->size > off && !check_bytes(p + off, - POISON_INUSE, s->size - off)) - object_err(s, page, p, - "Interobject Filler check fails"); + if (!check_pad_bytes(s, page, p)) + return 0; } if (s->offset == 0 && active) @@ -402,7 +410,7 @@ static int check_slab(struct kmem_cache page_count(page)); return 0; } - if (page->offset != s->offset) { + if (page->offset * sizeof(void *) != s->offset) { printk(KERN_CRIT "SLUB: %s Corrupted offset %lu in slab @%p" " flags=%lx mapping=%p count=%d\n", s->name, page->offset * sizeof(void *), page, @@ -621,7 +629,7 @@ static struct page *new_slab(struct kmem n = get_node(s, page_to_nid(page)); if (n) atomic_long_inc(&n->nr_slabs); - page->offset = s->offset; + page->offset = s->offset / sizeof(void *); page->slab = s; page->flags |= 1 << PG_slab; if (s->flags & (SLAB_DEBUG_FREE | SLAB_RED_ZONE | SLAB_POISON | @@ -632,7 +640,7 @@ static struct page *new_slab(struct kmem if (s->objects > 1) { void *start = page_address(page); void *end = start + s->objects * s->size; - void **last = start; + void *last = start; void *p = start + s->size; if (unlikely(s->flags & SLAB_POISON)) @@ -642,11 +650,11 @@ static struct page *new_slab(struct kmem init_object(s, last, 0); init_tracking(s, last); } - last[s->offset] = p; + set_freepointer(s, last, p); last = p; p += s->size; } - last[s->offset] = NULL; + set_freepointer(s, last, NULL); page->freelist = start; page->inuse = 0; if (PageError(page)) { @@ -1389,7 +1397,7 @@ int kmem_cache_open(struct kmem_cache *s * This is the case if we do RCU, have a constructor or * destructor. */ - s->offset = size / sizeof(void *); + s->offset = size; size += sizeof(void *); } @@ -1423,9 +1431,9 @@ int kmem_cache_open(struct kmem_cache *s error: if (flags & SLAB_PANIC) panic("Cannot create slab %s size=%lu realsize=%u " - "order=%u offset=%lu flags=%lx\n", + "order=%u offset=%u flags=%lx\n", s->name, (unsigned long)size, s->size, s->order, - s->offset * sizeof(void *), flags); + s->offset, flags); return 0; } EXPORT_SYMBOL(kmem_cache_open);