Index: linux-2.6.21-rc1/mm/slub.c =================================================================== --- linux-2.6.21-rc1.orig/mm/slub.c 2007-02-26 12:48:17.000000000 -0800 +++ linux-2.6.21-rc1/mm/slub.c 2007-02-26 13:49:57.000000000 -0800 @@ -42,8 +42,9 @@ * to push back per cpu slabs if they are unused * for a longer time period. * - * PageError Slab requires special handling due to debut - * options set or a single page slab + * PageError Slab requires special handling due to debug + * options set or a single page slab. This moves + * slab handling out of the fast path. */ /* @@ -52,6 +53,8 @@ #define SLUB_UNIMPLEMENTED (SLAB_DEBUG_INITIAL | \ SLAB_RED_ZONE | SLAB_POISON | SLAB_STORE_USER) +#define DEBUG_DEFAULT_FLAGS (SLAB_DEBUG_FREE | SLAB_RED_ZONE | \ + SLAB_STORE_USER | SLAB_POISON) /* * Set of flags that will prevent slab merging */ @@ -200,6 +203,103 @@ } /* + * Object debugging + */ +static void print_section(char *text, u8 *addr, unsigned int length) +{ + int i; + int newline = 1; + + for (i = 0; i < length; i++) { + if (newline) { + printk(KERN_ERR "%s %p: ", text, addr + i); + newline = 0; + } + printk(" %2x", addr[i]); + if ((i % 16) == 15) { + printk("\n"); + newline = 1; + } + } +} + +static void print_trailer(struct kmem_cache *s, u8 *p) +{ + if (s->flags & SLAB_RED_ZONE) + print_section("Redzone", p + s->objsize, s->inuse - s->objsize); + if (s->inuse != s->size) + print_section("Filler", p + s->inuse, s->size - s->inuse); +} + +static void object_err(struct kmem_cache *s, struct page *page, + u8 *object, char *reason) +{ + u8 *addr = page_address(page); + + printk(KERN_ERR "SLUB: %s failure in %s@%p offset=%ld flags=%lx inuse=%d freelist=%p\n", + reason, s->name, object, object - addr, page->flags, page->inuse, page->freelist); + + print_section("Object", object, min(s->objsize, 128)); + print_trailer(s, object); + if (object != addr) { + printk(KERN_ERR "Prior object trailer:\n"); + print_trailer(s, object - s->size); + } +} + +static void init_object(struct kmem_cache *s, void *object, int active) +{ +#if 0 + u8 *p = object; + + if (s->flags & SLAB_POISON) { + memset(p, POISON_FREE, s->objsize -1); + p[s->objsize -1] = POISON_END; + } + + if (s->flags & SLAB_RED_ZONE) + memset(p + s->objsize, + active ? RED_ACTIVE : RED_INACTIVE, + s->inuse - s->objsize); +#endif +} + +static int check_bytes(u8 *start, unsigned int value, unsigned int bytes) +{ + while (bytes) { + if (*start != (u8)value) + return 0; + start++; + bytes--; + } + return 1; +} + +static int check_object(struct kmem_cache *s, struct page *page, + void *object, int active) +{ +#if 0 + u8 *p = object; + + if (s->flags & SLAB_RED_ZONE) + if (!check_bytes(p + s->objsize, active ? RED_ACTIVE : RED_INACTIVE, + s->inuse - s->objsize)) { + object_err(s, page, object, + active ? "Redzone Active" : "Redzone Inactive"); + return 0; + } + if ((s->flags & SLAB_POISON) && !active) + if (!check_bytes(p, POISON_FREE, s->objsize -1) || + p[s->objsize -1] != POISON_END) { + object_err(s, page, p, "Poison"); + return 0; + } +#endif + return 1; +} + + +/* * Locking for each individual slab using the pagelock */ static __always_inline void slab_lock(struct page *page) @@ -466,14 +566,23 @@ void *p = start + s->size; while (p < end) { + init_object(s, last, 0); last[s->offset] = p; last = p; p += s->size; } + init_object(s, last, 0); last[s->offset] = NULL; page->freelist = start; page->inuse = 0; check_free_chain(s, page); + if (s->flags & SLAB_POISON) { + unsigned long leftover = start + + (PAGE_SIZE << s->order) - end; + + if (leftover) + memset(end, POISON_INUSE, leftover); + } } out: @@ -626,6 +735,11 @@ SetPageReferenced(page); slab_unlock(page); local_irq_restore(flags); + if (unlikely(PageError(page))) { + if (!check_object(s, page, object, 0)) + dump_stack(); + init_object(s, object, 1); + } return object; another_slab: @@ -712,8 +826,11 @@ goto slab_mismatch; if (s->objects == 1) goto single_object_slab; - if (unlikely(!check_valid_pointer(s, page, object, NULL))) + if (!check_valid_pointer(s, page, object, NULL)) goto dumpret; + if (!check_object(s, page, object, 1)) + goto dumpret; + init_object(s, object, 0); } slab_lock(page); @@ -977,21 +1094,30 @@ strlen(slub_debug_slabs) == 0))) flags |= slub_debug; + printk("kmem_cache_open %s slub_debug=%lx slub_debug_slabs=%s flags=%lx\n", name, + slub_debug,slub_debug_slabs, flags); + s->name = name; s->ctor = ctor; s->dtor = dtor; s->objsize = size; s->flags = flags; + size = ALIGN(size, sizeof(void *)); + /* - * Here is the place to add other management type information - * to the end of the object F.e. debug info + * If we redzone then check if we have space through above + * alignment. If not then add an additional word, so + * that we have a guard value to check for overwrites. */ - size = ALIGN(size, sizeof(void *)); + if ((s->flags & SLAB_RED_ZONE) && size == s->objsize) + size += sizeof(void *); + s->inuse = size; if (size * 2 < (PAGE_SIZE << calculate_order(size)) && - ((flags & SLAB_DESTROY_BY_RCU) || ctor || dtor)) { + ((flags & (SLAB_DESTROY_BY_RCU | SLAB_POISON)) || + ctor || dtor)) { /* * Relocate free pointer after the object if it is not * permitted to overwrite the first word of the object on @@ -1116,7 +1242,7 @@ /* Attempt to free all objects */ for_each_online_node(node) { - struct kmem_cache_node *n = s->node[node]; + struct kmem_cache_node *n = get_node(s, node); free_list(s, n, &n->partial); if (atomic_long_read(&n->nr_slabs)) @@ -1223,12 +1349,14 @@ static int __init setup_slub_debug(char *str) { - printk(KERN_CRIT "setup_slub_debug=%s\n", str); - - if (!str || *str == ',') - slub_debug = SLAB_DEBUG_FREE | SLAB_RED_ZONE | - SLAB_STORE_USER | SLAB_POISON; + if (!str) + slub_debug = DEBUG_DEFAULT_FLAGS; else + if (*str == '=') { + str++; + if (!*str || *str == ',') + slub_debug = DEBUG_DEFAULT_FLAGS; + else for( ;str && *str != ','; str++) switch (*str) { case 'f' : case 'F' : slub_debug |= SLAB_DEBUG_FREE;break; @@ -1238,9 +1366,10 @@ default: printk(KERN_CRIT "slub_debug option '%c' unknown. skipped\n",*str); } - + } if (*str == ',') slub_debug_slabs = str + 1; + printk("setup_slub_debug slub_debug=%lx slub_debug_slabs=%s\n", slub_debug,slub_debug_slabs); return 1; } @@ -1596,7 +1725,7 @@ unsigned long cpu_slabs; unsigned long partial_slabs; unsigned long objects; - unsigned char options[14]; + unsigned char options[15]; char *d = options; char *x; unsigned long nodes[nr_node_ids];