Index: linux-2.6/mm/slub.c =================================================================== --- linux-2.6.orig/mm/slub.c 2007-07-02 13:52:27.000000000 -0700 +++ linux-2.6/mm/slub.c 2007-07-02 14:00:28.000000000 -0700 @@ -449,8 +449,8 @@ printk(KERN_ERR "*** SLUB %s: %s@0x%p slab 0x%p\n", s->name, reason, object, page); - printk(KERN_ERR " offset=%tu flags=0x%04lx inuse=%u freelist=0x%p\n", - object - addr, page->flags, page->inuse, page->freelist); + printk(KERN_ERR " offset=%tu flags=0x%04lx freelist=0x%p\n", + object - addr, page->flags, page->freelist); if (object > addr + 16) print_section("Bytes b4", object - 16, 16); print_section("Object", object, min(s->objsize, 128)); @@ -684,13 +684,6 @@ page_count(page)); return 0; } - if (page->inuse > s->objects) { - slab_err(s, page, "inuse %u > max %u @0x%p flags=%lx " - "mapping=0x%p count=%d", - s->name, page->inuse, s->objects, page->flags, - page->mapping, page_count(page)); - return 0; - } /* Slab_pad_check fixes things up after itself */ slab_pad_check(s, page); return 1; @@ -719,7 +712,6 @@ slab_err(s, page, "Freepointer 0x%p corrupt", fp); page->freelist = NULL; - page->inuse = s->objects; printk(KERN_ERR "@@@ SLUB %s: Freelist " "cleared. Slab 0x%p\n", s->name, page); @@ -732,24 +724,16 @@ nr++; } - if (page->inuse != s->objects - nr) { - slab_err(s, page, "Wrong object count. Counter is %d but " - "counted were %d", s, page, page->inuse, - s->objects - nr); - page->inuse = s->objects - nr; - printk(KERN_ERR "@@@ SLUB %s: Object count adjusted. " - "Slab @0x%p\n", s->name, page); - } return search == NULL; } static void trace(struct kmem_cache *s, struct page *page, void *object, int alloc) { if (s->flags & SLAB_TRACE) { - printk(KERN_INFO "TRACE %s %s 0x%p inuse=%d fp=0x%p\n", + printk(KERN_INFO "TRACE %s %s 0x%p fp=0x%p\n", s->name, alloc ? "alloc" : "free", - object, page->inuse, + object, page->freelist); if (!alloc) @@ -828,7 +812,6 @@ */ printk(KERN_ERR "@@@ SLUB: %s slab 0x%p. Marking all objects used.\n", s->name, page); - page->inuse = s->objects; page->freelist = NULL; /* Fix up fields that may be corrupted */ page->offset = s->offset / sizeof(void *); @@ -1054,7 +1037,6 @@ page->freelist = start; page->lockless_freelist = NULL; - page->inuse = 0; out: if (flags & __GFP_WAIT) local_irq_disable(); @@ -1138,14 +1120,6 @@ /* * Management of partially allocated slabs */ -static void add_partial_tail(struct kmem_cache_node *n, struct page *page) -{ - spin_lock(&n->list_lock); - n->nr_partial++; - list_add_tail(&page->lru, &n->partial); - spin_unlock(&n->list_lock); -} - static void add_partial(struct kmem_cache_node *n, struct page *page) { spin_lock(&n->list_lock); @@ -1154,17 +1128,6 @@ spin_unlock(&n->list_lock); } -static void remove_partial(struct kmem_cache *s, - struct page *page) -{ - struct kmem_cache_node *n = get_node(s, page_to_nid(page)); - - spin_lock(&n->list_lock); - list_del(&page->lru); - n->nr_partial--; - spin_unlock(&n->list_lock); -} - /* * Lock slab and remove from the partial list. * @@ -1283,31 +1246,12 @@ struct kmem_cache_node *n = get_node(s, page_to_nid(page)); ClearSlabFrozen(page); - if (page->inuse) { - - if (page->freelist) - add_partial(n, page); - else if (SlabDebug(page) && (s->flags & SLAB_STORE_USER)) - add_full(n, page); - slab_unlock(page); - } else { - if (n->nr_partial < MIN_PARTIAL) { - /* - * Adding an empty slab to the partial slabs in order - * to avoid page allocator overhead. This slab needs - * to come after the other slabs with objects in - * order to fill them up. That way the size of the - * partial list stays small. kmem_cache_shrink can - * reclaim empty slabs from the partial list. - */ - add_partial_tail(n, page); - slab_unlock(page); - } else { - slab_unlock(page); - discard_slab(s, page); - } - } + if (page->freelist) + add_partial(n, page); + else if (SlabDebug(page) && (s->flags & SLAB_STORE_USER)) + add_full(n, page); + slab_unlock(page); } /* @@ -1354,7 +1298,6 @@ /* And put onto the regular freelist */ object[page->offset] = page->freelist; page->freelist = object; - page->inuse--; } unfreeze_slab(s, page); } @@ -1442,7 +1385,6 @@ object = page->freelist; page->lockless_freelist = object[page->offset]; - page->inuse = s->objects; page->freelist = NULL; slab_unlock(page); local_irq_restore(flags); @@ -1495,7 +1437,6 @@ if (!alloc_debug_processing(s, page, object, addr)) goto another_slab; - page->inuse++; page->freelist = object[page->offset]; slab_unlock(page); local_irq_restore(flags); @@ -1597,14 +1538,10 @@ checks_ok: prior = object[page->offset] = page->freelist; page->freelist = object; - page->inuse--; if (unlikely(SlabFrozen(page))) goto out_unlock; - if (unlikely(!page->inuse)) - goto slab_empty; - /* * Objects left in the slab. If it * was not on the partial list before @@ -1618,18 +1555,6 @@ local_irq_restore(flags); return; -slab_empty: - if (prior) - /* - * Slab still on the partial list. - */ - remove_partial(s, page); - - slab_unlock(page); - discard_slab(s, page); - local_irq_restore(flags); - return; - debug: if (!free_debug_processing(s, page, x, addr)) goto out_unlock; @@ -1876,7 +1801,6 @@ n = page->freelist; BUG_ON(!n); page->freelist = get_freepointer(kmalloc_caches, n); - page->inuse++; kmalloc_caches->node[node] = n; setup_object_debug(kmalloc_caches, page, n); init_kmem_cache_node(n); @@ -2130,6 +2054,28 @@ } EXPORT_SYMBOL(kmem_cache_name); +static inline int slab_count_freelist(void **x, int offset) +{ + int count = 0; + + while (x) { + count++; + x = x[offset]; + } + return count; +} + +int slab_inuse(struct kmem_cache *s, struct page *page) +{ + int n = s->objects - slab_count_freelist(page->freelist, + page->offset); + + if (page->lockless_freelist > LOCKLESS_OFF) + n -= slab_count_freelist(page->lockless_freelist, + page->offset); + return n; +} + /* * Attempt to free all slabs on a node. Return the number of slabs we * were unable to free. @@ -2137,19 +2083,18 @@ static int free_list(struct kmem_cache *s, struct kmem_cache_node *n, struct list_head *list) { - int slabs_inuse = 0; unsigned long flags; struct page *page, *h; spin_lock_irqsave(&n->list_lock, flags); list_for_each_entry_safe(page, h, list, lru) - if (!page->inuse) { + if (!slab_inuse(s, page)) { list_del(&page->lru); discard_slab(s, page); - } else - slabs_inuse++; + n->nr_partial--; + } spin_unlock_irqrestore(&n->list_lock, flags); - return slabs_inuse; + return n->nr_partial; } /* @@ -2427,7 +2372,8 @@ * list_lock. page->inuse here is the upper limit. */ list_for_each_entry_safe(page, t, &n->partial, lru) { - if (!page->inuse && slab_trylock(page)) { + int inuse = slab_inuse(s, page); + if (!inuse && slab_trylock(page)) { /* * Must hold slab lock here because slab_free * may have freed the last object and be @@ -2440,7 +2386,7 @@ } else { if (n->nr_partial > MAX_PARTIAL) list_move(&page->lru, - slabs_by_inuse + page->inuse); + slabs_by_inuse + inuse); } } @@ -3144,7 +3090,7 @@ spin_lock_irqsave(&n->list_lock, flags); list_for_each_entry(page, &n->partial, lru) - x += page->inuse; + x += slab_inuse(s, page); spin_unlock_irqrestore(&n->list_lock, flags); return x; } @@ -3184,7 +3130,7 @@ int x = 0; if (flags & SO_OBJECTS) - x = page->inuse; + x = slab_inuse(s, page); else x = 1; total += x;