Index: linux-2.6.21-rc1/mm/slub.c =================================================================== --- linux-2.6.21-rc1.orig/mm/slub.c 2007-02-25 22:08:20.000000000 -0800 +++ linux-2.6.21-rc1/mm/slub.c 2007-02-25 23:30:30.000000000 -0800 @@ -50,8 +50,8 @@ /* * Flags from the regular SLAB that we have not implemented: */ -#define SLUB_UNIMPLEMENTED (SLAB_DEBUG_FREE | SLAB_DEBUG_INITIAL | \ - SLAB_RED_ZONE | SLAB_POISON | SLAB_STORE_USER) +#define SLUB_UNIMPLEMENTED (SLAB_DEBUG_INITIAL | SLAB_RED_ZONE | \ + SLAB_STORE_USER) /* * Enabling SLUB_DEBUG results in internal consistency checks. @@ -66,6 +66,11 @@ #ifdef SLUB_DEBUG #define SLUB_DEBUG_KFREE #endif +#ifdef SLUB_DEBUG +#define SLUB_DEFAULT_FLAGS (SLAB_POISON | SLAB_DEBUG_FREE) +#else +#define SLUB_DEFAULT_FLAGS 0 +#endif /* * Set of flags that will prevent slab merging @@ -130,10 +135,35 @@ LIST_HEAD(slab_caches); * fast frees and allocs. */ +void printk_obj(struct kmem_cache *s, struct page *page, void *object) +{ + u8 *p = object; + int eol = 0; + int i; + + printk(KERN_ERR "---------------------------------------------------------------\n"); + printk(KERN_ERR "Slabcache:%s slab:%p flags:%lx object:%p\n", s->name, + page, page->flags, object); + for(i=0;i>s->size;i++) { + if (!eol) { + printk(KERN_ERR "%p: ",p); + eol = 1; + } + printk(" %2x", *p++); + if ((i % 16) == 15) { + printk("\n"); + eol = 0; + } + } + printk("\n"); + printk(KERN_ERR "---------------------------------------------------------------\n"); +} + static struct page *allocate_slab(struct kmem_cache *s, gfp_t flags, int node) { struct page * page; int pages = 1 << s->order; + void *start; if (s->order) flags |= __GFP_COMP; @@ -149,13 +179,16 @@ static struct page *allocate_slab(struct if (!page) return NULL; + start = page_address(page); + if (s->flags & SLAB_POISON) + memset(start, POISON_END, PAGE_SIZE << s->order); + mod_zone_page_state(page_zone(page), (s->flags & SLAB_RECLAIM_ACCOUNT) ? NR_SLAB_RECLAIMABLE : NR_SLAB_UNRECLAIMABLE, pages); if (unlikely(s->ctor)) { - void *start = page_address(page); void *end = start + (pages << PAGE_SHIFT); void *p; int mode = 1; @@ -203,6 +236,8 @@ static void rcu_free_slab(struct rcu_hea static void free_slab(struct kmem_cache *s, struct page *page) { +// check_object(s, page, object, SLAB_INACTIVE); + if (unlikely(s->flags & SLAB_DESTROY_BY_RCU)) { /* * RCU free overloads the RCU head over the LRU @@ -369,6 +404,7 @@ static int check_valid_pointer(struct km " range (%p-%p) in page %p\n", s->name, s->size, origin, object, base, base + s->objects * s->size, page); + printk_obj(s, page, object); return 0; } @@ -377,6 +413,7 @@ static int check_valid_pointer(struct km "does not properly point" "to an object in page %p\n", s->name, s->size, origin, object, page); + printk_obj(s, page, object); return 0; } return 1; @@ -457,6 +494,35 @@ static void discard_slab(struct kmem_cac free_slab(s, page); } +static void init_object(struct kmem_cache *s, void *object, int poison) +{ + if (s->flags & SLAB_POISON) + memset(object, POISON_FREE, s->objsize); + + if (s->flags & SLAB_RED_ZONE) + memset(object + s->objsize, poison, + s->inuse - s->objsize); + + if (s->flags & SLAB_STORE_USER) + memset(object + s->inuse, 0, sizeof(void *)); +} + +static int check_object(struct kmem_cache *s, struct page *page, + void *object, int poison) +{ + u8 *p; + + if (!(s->flags & SLAB_RED_ZONE)) + return 1; + + for(p = object + s->objsize; p > (u8 *)object + s->inuse; p++) + if (*p != poison) { + printk_obj(s, page, object); + return 0; + } + return 1; +} + static struct page *new_slab(struct kmem_cache *s, gfp_t flags, int node) { struct page *page; @@ -487,10 +553,12 @@ static struct page *new_slab(struct kmem void *p = start + s->size; while (p < end) { + init_object(s, last, RED_INACTIVE); last[s->offset] = p; last = p; p += s->size; } + init_object(s, last, RED_INACTIVE); last[s->offset] = NULL; page->freelist = start; page->inuse = 0; @@ -730,17 +798,19 @@ void kmem_cache_free(struct kmem_cache * #ifdef SLUB_DEBUG if (unlikely(s != page->slab)) goto slab_mismatch; - if (unlikely(!check_valid_pointer(s, page, object, NULL))) + if (unlikely(!check_valid_pointer(s, page, object, NULL) || + !check_object(s,page,x, RED_ACTIVE))) goto dumpret; -#endif + init_object(s, x, RED_INACTIVE); +#endif local_irq_save(flags); if (unlikely(PageError(page))) goto single_object_slab; slab_lock(page); -#ifdef SLUB_DEBUG_KFREE - if (on_freelist(s, page, object)) +#ifdef SLUB_DEBUG + if ((s->flags & SLAB_DEBUG_FREE) && on_freelist(s, page, object)) goto double_free; #endif @@ -992,15 +1062,27 @@ int kmem_cache_open(struct kmem_cache *s s->ctor = ctor; s->dtor = dtor; s->objsize = size; - s->flags = flags; + s->flags = flags | SLUB_DEFAULT_FLAGS; /* * Here is the place to add other management type information * to the end of the object F.e. debug info */ size = ALIGN(size, sizeof(void *)); + + /* + * If we redzone then check if we have space through above + * alignment. If not then add an additional word + */ + if ((s->flags & SLAB_RED_ZONE) && size == s->objsize) + size += sizeof(void *); s->inuse = size; + /* + * Maybe we need space to store the last user + */ + if (s->flags & SLAB_STORE_USER) + size += sizeof(void *); if (size * 2 < (PAGE_SIZE << calculate_order(size)) && ((flags & SLAB_DESTROY_BY_RCU) || ctor || dtor)) { /*