Index: linux-2.6.21-rc6/mm/slub.c =================================================================== --- linux-2.6.21-rc6.orig/mm/slub.c 2007-04-12 15:35:26.000000000 -0700 +++ linux-2.6.21-rc6/mm/slub.c 2007-04-12 16:39:12.000000000 -0700 @@ -355,9 +355,16 @@ static void init_object(struct kmem_cach s->inuse - s->objsize); } +int check_byte_log; + static int check_bytes(u8 *start, unsigned int value, unsigned int bytes) { int rc=0; + unsigned int savebytes=bytes; + u8 *savestart = start; + + BUG_ON(!bytes); + while (bytes) { if (*start != (u8)value) { rc = 0; @@ -366,8 +373,11 @@ static int check_bytes(u8 *start, unsign start++; bytes--; } + rc =1; out: - printk(KERN_ERR "check_bytes(%p %x %d) result=%d\n", start, value, bytes, rc); + if (check_byte_log) + + printk(KERN_ERR "check_bytes(%p %x %d) result=%d\n", savestart, value, savebytes, rc); return rc; } @@ -491,7 +501,7 @@ static int check_object(struct kmem_cach s->inuse - s->objsize)) { object_err(s, page, object, active ? "Redzone Active check fails" : - "Redzone Inactive check fails");\ + "Redzone Inactive check fails"); /* * No need to fix it up. The next init_object * will clear it up @@ -1444,7 +1454,7 @@ static int slub_nomerge; * Debug settings: */ static int slub_debug; - +static char *slub_parameter; static char *slub_debug_slabs; /* @@ -1677,6 +1687,15 @@ static int calculate_sizes(struct kmem_c */ size += 2 * sizeof(struct track); + if (flags & DEBUG_DEFAULT_FLAGS) + /* + * Add some empty padding so that we can catch + * overwrites from earlier objects rather than let + * tracking information or the free pointer be + * corrupted if an user writes before the start + * of the object. + */ + size += sizeof(void *); /* * Determine the alignment based on various parameters that the * user specified (this is unecessarily complex due to the attempt @@ -1936,6 +1955,7 @@ __setup("slub_nomerge", setup_slub_nomer static int __init setup_slub_debug(char *str) { + slub_parameter = str; if (!str || *str != '=') slub_debug = DEBUG_DEFAULT_FLAGS; else { @@ -1982,7 +2002,7 @@ static struct kmem_cache *create_kmalloc flags = SLAB_CACHE_DMA; down_write(&slub_lock); - if (!kmem_cache_open(s, gfp_flags, name, max(32, size), ARCH_KMALLOC_MINALIGN, + if (!kmem_cache_open(s, gfp_flags, name, size, ARCH_KMALLOC_MINALIGN, flags, NULL, NULL)) goto panic; @@ -2202,9 +2222,10 @@ void __init kmem_cache_init(void) + nr_cpu_ids * sizeof(struct page *); printk(KERN_INFO "SLUB: General Slabs=%d, HW alignment=%d, " - "Processors=%d, Nodes=%d\n", + "Processors=%d, Nodes=%d Debug=%s\n", KMALLOC_SHIFT_HIGH, L1_CACHE_BYTES, - nr_cpu_ids, nr_node_ids); + nr_cpu_ids, nr_node_ids, + slub_parameter ? slub_parameter : "off"); } /* @@ -2474,11 +2495,15 @@ void *__kmalloc_node_track_caller(size_t #ifdef CONFIG_SYSFS +struct page *corrupt_page[6]; + static int validate_slab(struct kmem_cache *s, struct page *page) { void *p; void *addr = page_address(page); unsigned long map[BITS_TO_LONGS(s->objects)]; + int corrupt = 0; + int i; printk(KERN_ERR "Validate slab %s %p->%p inuse=%d\n", s->name, page, addr, page->inuse); @@ -2491,8 +2516,14 @@ static int validate_slab(struct kmem_cac /* Now we know that a valid freelist exists */ bitmap_zero(map, s->objects); + for(i=0;i<3;i++) + if (page == corrupt_page[i]) + corrupt = 1; + + check_byte_log = corrupt; for(p = page->freelist; p; p = get_freepointer(s, p)) { - printk(KERN_ERR "%td free\n", (p-addr)/s->size); + if (corrupt) + printk(KERN_ERR "%td free\n", (p-addr)/s->size); set_bit((p - addr) / s->size, map); if (!check_object(s, page, p, 0)) { printk(KERN_ERR "slab fails free object checks\n"); @@ -2502,12 +2533,14 @@ static int validate_slab(struct kmem_cac for(p = addr; p < addr + s->objects * s->size; p += s->size) if (!test_bit((p - addr) / s->size, map)) { - printk(KERN_ERR "%td used\n", (p-addr)/s->size); + if (corrupt) + printk(KERN_ERR "%td used\n", (p-addr)/s->size); if (!check_object(s, page, p, 1)) { printk(KERN_ERR "slab fails used object checks\n"); return 0; } } + check_byte_log = 0; return 1; } @@ -3307,27 +3340,53 @@ int __init slab_sysfs_init(void) kfree(al); } -#if 1 +#define SLUB_SLAB_CORRUPTION_TEST +#ifdef SLUB_SLAB_CORRUPTION_TEST /* Some test code to verify that the detection of corrupt slabs works */ + printk(KERN_ERR "SLUB resiliency testing\n"); + printk(KERN_ERR "-----------------------\n"); + printk(KERN_ERR "1. Corruption after allocation\n"); /* Overwrite redzone or next object pointer */ p = kzalloc(16, GFP_KERNEL); p[16] = 0x12; - printk(KERN_CRIT "XXXXX SLUB TESTING: kmalloc-16 Corrupting with 0x12 at offset 16 after " - "object at %p\n", p); + printk(KERN_CRIT "kmalloc-16: Corrupting redzone/next pointer by writing 0x12 to %p\n", p + 16); + + corrupt_page[0] = virt_to_head_page(p); /* Overwrite next pointer (debug) or the following object (debug off) */ p = kzalloc(32, GFP_KERNEL); p[32 + sizeof(void *)] = 0x34; - printk(KERN_CRIT "XXXXX SLUB TESTING: kmalloc-32 Corrupting with 0x23 at offset %td after " - "object at %p\n", 32 + sizeof(void *), p); + printk(KERN_CRIT "SLUB TESTING: Corrupting next pointer/following object in kmalloc-32" + "by writing 0x23 to %p\n", p); + corrupt_page[1] = virt_to_head_page(p); /* Random madness after the object */ obj = p = kzalloc(64, GFP_KERNEL); p += 64 + (get_cycles() & 0xff) * sizeof(void *); - printk(KERN_CRIT "XXXXX SLUB TESTING: kmalloc-64 Corrupting with 0x56 at offset %td after " - "object at %p\n", p - obj, p); + printk(KERN_CRIT "SLUB TESTING: Corrupting random byte in kmalloc-64 with 0x56 at " + " %p\n", p); *p = 0x56; + corrupt_page[2] = virt_to_head_page(p); + + /* Corrupt after free */ + /* Overwrite free pointer in non debug */ + p = kzalloc(128, GFP_KERNEL); + kfree(p); + *p = 0x78; + corrupt_page[3] = virt_to_head_page(p); + + /* Overwrite arbitrary byte in object */ + p = kzalloc(256, GFP_KERNEL); + kfree(p); + p[50] = 0x9a; + corrupt_page[4] = virt_to_head_page(p); + + /* Overwrite redzone */ + p = kzalloc(512, GFP_KERNEL); + kfree(p); + p[512] = 0xab; + corrupt_page[5] = virt_to_head_page(p); #endif return 0;