--- include/linux/slub_def.h | 2 ++ init/Kconfig | 7 +++++++ mm/slab.c | 2 ++ mm/slub.c | 32 ++++++++++++++++++++++++++------ 4 files changed, 37 insertions(+), 6 deletions(-) Index: slub/mm/slub.c =================================================================== --- slub.orig/mm/slub.c 2007-05-29 18:39:54.000000000 -0700 +++ slub/mm/slub.c 2007-05-29 18:39:55.000000000 -0700 @@ -374,7 +374,7 @@ static struct track *get_track(struct km { struct track *p; - if (s->offset) + if (s->offset > s->objsize) p = object + s->offset + sizeof(void *); else p = object + s->inuse; @@ -387,7 +387,7 @@ static void set_track(struct kmem_cache { struct track *p; - if (s->offset) + if (s->offset > s->objsize) p = object + s->offset + sizeof(void *); else p = object + s->inuse; @@ -452,7 +452,7 @@ static void print_trailer(struct kmem_ca print_section(KERN_INFO " Redzone ", p + s->objsize, s->inuse - s->objsize); - if (s->offset) + if (s->offset > s->objsize) off = s->offset + sizeof(void *); else off = s->inuse; @@ -586,7 +586,7 @@ static int check_pad_bytes(struct kmem_c { unsigned long off = s->inuse; /* The end of info */ - if (s->offset) + if (s->offset > s->objsize) /* Freepointer is placed after the object. */ off += sizeof(void *); @@ -665,7 +665,7 @@ static int check_object(struct kmem_cach check_pad_bytes(s, page, p); } - if (!s->offset && active) + if (s->offset < s->objsize && active) /* * Object and freepointer overlap. Cannot check * freepointer while object is allocated. @@ -1931,6 +1931,25 @@ static int calculate_sizes(struct kmem_c */ size = ALIGN(size, sizeof(void *)); + +#ifdef CONFIG_STABLE + if (size >= 2*sizeof(void *)) { + /* + * For SLUB robustness we use the second word. The first word + * is likely to be corrupted by write after the object end or + * write after free. This means we do not fail because of + * a corrupted free pointer. We continue with the corrupted + * object like SLAB. + */ + s->offset = sizeof(void *); + } else +#endif + /* + * Object is too small to push back the free pointer a word. Or this is + * not a release kernel. We prefer failures over object corruption. + */ + s->offset = 0; + #ifdef CONFIG_SLUB_DEBUG /* * If we are Redzoning then check if there is some space between the @@ -2502,12 +2521,13 @@ void __init kmem_cache_init(void) kmem_size = offsetof(struct kmem_cache, cpu_slab) + nr_cpu_ids * sizeof(struct page *); - +#ifndef CONFIG_STABLE printk(KERN_INFO "SLUB: Genslabs=%d, HWalign=%d, Order=%d-%d, MinObjects=%d," " Processors=%d, Nodes=%d\n", KMALLOC_SHIFT_HIGH, cache_line_size(), slub_min_order, slub_max_order, slub_min_objects, nr_cpu_ids, nr_node_ids); +#endif } /* Index: slub/include/linux/slub_def.h =================================================================== --- slub.orig/include/linux/slub_def.h 2007-05-29 18:39:30.000000000 -0700 +++ slub/include/linux/slub_def.h 2007-05-29 18:39:55.000000000 -0700 @@ -74,6 +74,7 @@ extern struct kmem_cache kmalloc_caches[ */ static inline int kmalloc_index(size_t size) { +#ifdef CONFIG_STABLE /* * We should return 0 if size == 0 (which would result in the * kmalloc caller to get NULL) but we use the smallest object @@ -81,6 +82,7 @@ static inline int kmalloc_index(size_t s * we can discover locations where we do 0 sized allocations. */ WARN_ON_ONCE(size == 0); +#endif if (size > KMALLOC_MAX_SIZE) return -1; Index: slub/init/Kconfig =================================================================== --- slub.orig/init/Kconfig 2007-05-29 18:39:30.000000000 -0700 +++ slub/init/Kconfig 2007-05-29 18:39:55.000000000 -0700 @@ -65,6 +65,13 @@ endmenu menu "General setup" +config STABLE + bool "Stable kernel" + help + If the kernel is configured to be a stable kernel then various + checks that are only of interest to kernel development will be + omitted. + config LOCALVERSION string "Local version - append to kernel release" help Index: slub/mm/slab.c =================================================================== --- slub.orig/mm/slab.c 2007-05-29 18:39:30.000000000 -0700 +++ slub/mm/slab.c 2007-05-29 18:39:55.000000000 -0700 @@ -774,7 +774,9 @@ static inline struct kmem_cache *__find_ */ BUG_ON(malloc_sizes[INDEX_AC].cs_cachep == NULL); #endif +#ifndef CONFIG_STABLE WARN_ON_ONCE(size == 0); +#endif while (size > csizep->cs_size) csizep++;