Index: linux-2.6.19-mm1/mm/slub.c =================================================================== --- linux-2.6.19-mm1.orig/mm/slub.c 2006-12-14 11:53:07.184294729 -0800 +++ linux-2.6.19-mm1/mm/slub.c 2006-12-14 15:13:31.685059949 -0800 @@ -236,6 +236,10 @@ static __always_inline void slab_unlock( */ static void __always_inline add_partial(struct kmem_cache *s, struct page *page) { + if (page->inuse == s->objects) { + printk("Slab %s page=%p adding fully used slab\n", s->name, page); + dump_stack(); + } spin_lock(&s->list_lock); s->nr_partial++; list_add_tail(&page->lru, &s->partial); @@ -411,6 +415,8 @@ void check_free_chain(struct kmem_cache */ static void discard_slab(struct kmem_cache *s, struct page *page) { + printk(KERN_CRIT "discard_slab(%s, %p)\n", s->name, page); + atomic_long_dec(&s->nr_slabs); page->mapping = NULL; @@ -467,6 +473,7 @@ static struct page *new_slab(struct kmem */ static void __always_inline putback_slab(struct kmem_cache *s, struct page *page) { + printk(KERN_CRIT "putback_slab(%s,%p) inuse=%d objects=%d\n",s->name, page,page->inuse, s->objects); if (page->inuse) { if (page->inuse < s->objects) add_partial(s, page); @@ -546,6 +553,8 @@ void check_flush_active(struct work_stru { struct active_slab *a = container_of(w, struct active_slab, flush.work); + printk("check_flush_active: a=%p a->page=%p a->referenced=%d a->flush_active=%d\n", + a, a->page, a->referenced, a->flush_active); if (!a->page) return; @@ -590,6 +599,11 @@ static __always_inline void *allocate(st if (unlikely(!a->page)) goto new_slab; + if (node != -1 && page_to_nid(a->page) != node) { + deactivate_slab(a); + goto new_slab; + } + if (likely(a->nr_free)) goto have_object; @@ -598,10 +612,10 @@ static __always_inline void *allocate(st if (a->page->freelist) goto switch_freelist; - if (node != -1 && page_to_nid(a->page) != node) - deactivate_slab(a); - new_slab: + printk(KERN_CRIT "allocate new slab(%s,%x,%d)\n", + s->name, gfpflags, node); + a->page = get_partial(s, gfpflags, node); if (unlikely(!a->page)) { struct page *page; @@ -646,9 +660,9 @@ new_slab: } __SetPageActive(a->page); - check_free_chain(s, a->page); switch_freelist: + printk(KERN_CRIT "switch_freelist\n"); a->freelist = a->page->freelist; a->page->freelist = NULL; a->nr_free = s->objects - a->page->inuse; @@ -669,7 +683,12 @@ have_object: } #endif out: + check_free_chain(s, a->page); local_irq_restore(flags); +// printk(KERN_CRIT "return %p active freelist=%p nr_free=%d page " +// "inuse=%d freelist=%p\n", object, a->freelist, a->nr_free, +// a->page ? a->page->inuse : -1, +// a->page ? a->page->freelist : (void *)-1L); return object; } @@ -732,11 +751,20 @@ void kmem_cache_free(struct kmem_cache * #ifdef SLAB_DEBUG if (unlikely(s != (void *)page->slab)) goto slab_mismatch; + if (unlikely(!check_valid_pointer(s, page, object, NULL))) goto dumpret; #endif local_irq_save(flags); + +#ifdef SLAB_DEBUG_KFREE + slab_lock(page); + if (on_freelist(s, page, object)) + goto double_free; + slab_unlock(page); +#endif + a = ACTIVE_SLAB(s, smp_processor_id()); if (a->page == page) { void **object = x; @@ -744,19 +772,19 @@ void kmem_cache_free(struct kmem_cache * a->nr_free++; object[s->offset] = a->freelist; a->freelist = object; - goto out; +out: + local_irq_restore(flags); + return; } - if (unlikely(PageSlabsingle(page))) - goto single_object_slab; + if (unlikely(PageSlabsingle(page))) { + /* Slab must be emptyt */ + discard_slab(s, page); + goto out; + } slab_lock(page); -#ifdef SLAB_DEBUG_KFREE - if (on_freelist(s, page, object)) - goto double_free; -#endif - prior = object[page->offset] = page->freelist; page->freelist = object; page->inuse--; @@ -764,8 +792,7 @@ void kmem_cache_free(struct kmem_cache * if (likely(PageActive(page) || (page->inuse && prior))) { out_unlock: slab_unlock(page); - local_irq_restore(flags); - return; + goto out; } if (!prior) { @@ -782,12 +809,8 @@ out_unlock: */ remove_partial(s, page); slab_unlock(page); - -single_object_slab: discard_slab(s, page); -out: - local_irq_restore(flags); - return; + goto out; #ifdef SLAB_DEBUG_KFREE double_free: @@ -951,6 +974,9 @@ int kmem_cache_open(struct kmem_cache *s { int cpu; + printk("kmem_cache_open(%p, %s, %ld, %ld, %lx, %p, %p)\n", + s, name, (long)size, (long)align, flags, ctor, dtor); + BUG_ON(flags & SLUB_UNIMPLEMENTED); memset(s, 0, sizeof(struct kmem_cache)); atomic_long_set(&s->nr_slabs, 0);