Index: linux-2.6.18-rc4-mm3/mm/slabifier.c =================================================================== --- linux-2.6.18-rc4-mm3.orig/mm/slabifier.c 2006-08-30 21:51:23.937081429 -0700 +++ linux-2.6.18-rc4-mm3/mm/slabifier.c 2006-08-31 15:10:29.622023594 -0700 @@ -12,6 +12,8 @@ #include #include +#define SLABIFIER_DEBUG + #ifdef SLABIFIER_DEBUG #define DBUG_ON(_x) BUG_ON(_x) #else @@ -607,7 +609,11 @@ static void *slab_alloc_node(struct slab_cache *sc, gfp_t gfpflags, int node) { +#ifdef CONFIG_NUMA return __slab_alloc(sc, gfpflags, node); +#else + return slab_alloc(sc, gfpflags); +#endif } /* Figure out on which slab object the object resides */ @@ -636,84 +642,98 @@ return; page = get_object_page(object); - if (unlikely(!page)) { - printk(KERN_CRIT "slab_free %s size %d: attempt to free object" - "(%p) outside of slab.\n", s->sc.name, s->size, object); - goto dumpret; - } - if (!s) { - if (unlikely(!page->slab)) { - printk(KERN_CRIT - "slab_free : no slab(NULL) for object %p.\n", - object); - goto dumpret; - } - sc = page->slab; - s = (void *) sc; - } else - if (unlikely(sc != page->slab)) { - printk(KERN_CRIT "slab_free %s: object at %p" - " belongs to slab %p\n", - s->sc.name, object, page->slab); - dump_stack(); - sc = page->slab; - s = (void *) sc; - } + if (unlikely(!page)) + goto bad_slab; - if (unlikely(!check_valid_pointer(s, page, object, NULL))) { -dumpret: - dump_stack(); - printk(KERN_CRIT "***** Trying to continue by not" - "freeing object.\n"); - return; - } + if (unlikely(sc != page->slab)) + goto slab_mismatch; +redo: + if (unlikely(s->objects == 1)) + goto single_object_slab; - if (unlikely(s->objects == 1)) { - discard_slab(s, page); - return; - } + if (unlikely(!check_valid_pointer(s, page, object, NULL))) + goto dumpret; local_irq_save(flags); slab_lock(page); #ifdef SLABIFIER_DEBUG - if (on_freelist(s, page, object)) { - printk(KERN_CRIT "slab_free %s: object %p already free.\n", - s->sc.name, object); - dump_stack(); - goto out_unlock; - } + if (on_freelist(s, page, object)) + goto double_free; #endif - prior = page->freelist; - object[s->offset] = prior; - + prior = object[s->offset] = page->freelist; page->freelist = object; page->inuse--; - if (unlikely(PageActive(page))) - goto out_unlock; - - if (unlikely(page->inuse == 0)) { - remove_partial(s, page); - check_free_chain(s, page); + if (likely(PageActive(page) || (page->inuse && prior))) { +out_unlock: slab_unlock(page); - discard_slab(s, page); - goto out; + local_irq_restore(flags); + return; } - if (unlikely(!prior)) + if (!prior) { /* * Page was fully used before. It will only have one free * object now. So move to the partial list. */ add_partial(s, page); + goto out_unlock; + } -out_unlock: + /* + * Slab is completely free + */ + remove_partial(s, page); + check_free_chain(s, page); slab_unlock(page); -out: + discard_slab(s, page); local_irq_restore(flags); + return; + + + +single_object_slab: + discard_slab(s, page); + return; + +#ifdef SLABIFIER_DEBUG +double_free: + printk(KERN_CRIT "slab_free %s: object %p already free.\n", + s->sc.name, object); + dump_stack(); + goto out_unlock; +#endif + +bad_slab: + printk(KERN_CRIT "slab_free %s size %d: attempt to free object" + "(%p) outside of slab.\n", s->sc.name, s->size, object); +dumpret: + dump_stack(); + printk(KERN_CRIT "***** Trying to continue by not" + "freeing object.\n"); + return; + +slab_mismatch: + if (unlikely(!page->slab)) { + printk(KERN_CRIT + "slab_free : no slab(NULL) for object %p.\n", + object); + goto dumpret; + } + + if (s) { + printk(KERN_CRIT "slab_free %s: object at %p" + " belongs to slab %s\n", + s->sc.name, object, page->slab->name); + dump_stack(); + } + sc = page->slab; + s = (void *) sc; + goto redo; + } /*