--- mm/slub.c | 102 +++++++++++++++++++++++++++++++++++++------------------------- 1 file changed, 61 insertions(+), 41 deletions(-) Index: linux-2.6/mm/slub.c =================================================================== --- linux-2.6.orig/mm/slub.c 2007-10-20 10:31:03.000000000 -0700 +++ linux-2.6/mm/slub.c 2007-10-20 10:36:21.000000000 -0700 @@ -1200,22 +1200,25 @@ static void discard_slab(struct kmem_cac /* * Per slab locking using the pagelock */ -static __always_inline void slab_lock(struct page *page) +static __always_inline unsigned long slab_lock(struct page *page) { - bit_spin_lock(PG_locked, &page->flags); + while (TestSetPageLocked(page)) + cpu_relax(); + return page->flags; } -static __always_inline void slab_unlock(struct page *page) +static __always_inline void slab_unlock(struct page *page, + unsigned long state) { - bit_spin_unlock(PG_locked, &page->flags); + smp_wmb(); + page->flags = state & ~(1 << PG_locked); } -static __always_inline int slab_trylock(struct page *page) +static __always_inline unsigned long slab_trylock(struct page *page) { - int rc = 1; - - rc = bit_spin_trylock(PG_locked, &page->flags); - return rc; + if (TestSetPageLocked(page)) + return 0; + return page->flags & ~(1 << PG_locked); } /* @@ -1253,13 +1256,15 @@ static void remove_partial(struct kmem_c * * Must hold list_lock. */ -static inline int lock_and_freeze_slab(struct kmem_cache_node *n, struct page *page) +static inline unsigned long lock_and_freeze_slab(struct kmem_cache_node *n, struct page *page) { - if (slab_trylock(page)) { + unsigned long state; + + state = slab_trylock(page); + if (state) { list_del(&page->lru); n->nr_partial--; - SetSlabFrozen(page); - return 1; + return state |= FROZEN; } return 0; } @@ -1270,6 +1275,7 @@ static inline int lock_and_freeze_slab(s static struct page *get_partial_node(struct kmem_cache_node *n) { struct page *page; + unsigned long state; /* * Racy check. If we mistakenly see no partial slabs then we @@ -1281,9 +1287,13 @@ static struct page *get_partial_node(str return NULL; spin_lock(&n->list_lock); - list_for_each_entry(page, &n->partial, lru) - if (lock_and_freeze_slab(n, page)) + list_for_each_entry(page, &n->partial, lru) { + state = lock_and_freeze_slab(n, page); + if (state) { + page->flags = state; goto out; + } + } page = NULL; out: spin_unlock(&n->list_lock); @@ -1361,18 +1371,19 @@ static struct page *get_partial(struct k * * On exit the slab lock will have been dropped. */ -static void unfreeze_slab(struct kmem_cache *s, struct page *page) +static void unfreeze_slab(struct kmem_cache *s, struct page *page, + unsigned long state) { struct kmem_cache_node *n = get_node(s, page_to_nid(page)); - ClearSlabFrozen(page); + state &= ~FROZEN; if (page->inuse) { if (!is_end(page->freelist)) add_partial(n, page); - else if (SlabDebug(page) && (s->flags & SLAB_STORE_USER)) + else if ((state & SLABDEBUG) && (s->flags & SLAB_STORE_USER)) add_full(n, page); - slab_unlock(page); + slab_unlock(page, state); } else { if (n->nr_partial < MIN_PARTIAL) { @@ -1385,9 +1396,9 @@ static void unfreeze_slab(struct kmem_ca * reclaim empty slabs from the partial list. */ add_partial_tail(n, page); - slab_unlock(page); + slab_unlock(page, state); } else { - slab_unlock(page); + slab_unlock(page, state); discard_slab(s, page); } } @@ -1396,7 +1407,8 @@ static void unfreeze_slab(struct kmem_ca /* * Remove the cpu slab */ -static void deactivate_slab(struct kmem_cache *s, struct kmem_cache_cpu *c) +static void deactivate_slab(struct kmem_cache *s, + struct kmem_cache_cpu *c, unsigned long state) { struct page *page = c->page; /* @@ -1417,13 +1429,15 @@ static void deactivate_slab(struct kmem_ page->inuse--; } c->page = NULL; - unfreeze_slab(s, page); + unfreeze_slab(s, page, state); } static inline void flush_slab(struct kmem_cache *s, struct kmem_cache_cpu *c) { - slab_lock(c->page); - deactivate_slab(s, c); + unsigned long state; + + state = slab_lock(c->page); + deactivate_slab(s, c, state); } /* @@ -1493,6 +1507,7 @@ static void *__slab_alloc(struct kmem_ca { void **object; struct page *new; + unsigned long state; #ifdef FASTPATH_CMPXCHG unsigned long flags; @@ -1502,14 +1517,14 @@ static void *__slab_alloc(struct kmem_ca if (!c->page) goto new_slab; - slab_lock(c->page); + state = slab_lock(c->page); if (unlikely(!node_match(c, node))) goto another_slab; load_freelist: object = c->page->freelist; if (unlikely(is_end(object))) goto another_slab; - if (unlikely(SlabDebug(c->page))) + if (unlikely(state & SLABDEBUG)) goto debug; object = c->page->freelist; @@ -1518,7 +1533,7 @@ load_freelist: c->page->freelist = NULL; c->node = page_to_nid(c->page); unlock_out: - slab_unlock(c->page); + slab_unlock(c->page, state); out: #ifdef FASTPATH_CMPXCHG preempt_disable(); @@ -1527,11 +1542,12 @@ out: return object; another_slab: - deactivate_slab(s, c); + deactivate_slab(s, c, state); new_slab: new = get_partial(s, gfpflags, node); if (new) { + state = new->flags; c->page = new; goto load_freelist; } @@ -1560,14 +1576,13 @@ new_slab: * want the current one since its cache hot */ discard_slab(s, new); - slab_lock(c->page); + state = slab_lock(c->page); goto load_freelist; } /* New slab does not fit our expectations */ flush_slab(s, c); } - slab_lock(new); - SetSlabFrozen(new); + state = slab_lock(new) | FROZEN; c->page = new; goto load_freelist; } @@ -1667,22 +1682,23 @@ static void __slab_free(struct kmem_cach { void *prior; void **object = (void *)x; + unsigned long state; #ifdef FASTPATH_CMPXCHG unsigned long flags; local_irq_save(flags); #endif - slab_lock(page); + state = slab_lock(page); - if (unlikely(SlabDebug(page))) + if (unlikely(state & SLABDEBUG)) goto debug; checks_ok: prior = object[offset] = page->freelist; page->freelist = object; page->inuse--; - if (unlikely(SlabFrozen(page))) + if (unlikely(state & FROZEN)) goto out_unlock; if (unlikely(!page->inuse)) @@ -1697,7 +1713,7 @@ checks_ok: add_partial(get_node(s, page_to_nid(page)), page); out_unlock: - slab_unlock(page); + slab_unlock(page, state); #ifdef FASTPATH_CMPXCHG local_irq_save(flags); #endif @@ -1710,7 +1726,7 @@ slab_empty: */ remove_partial(s, page); - slab_unlock(page); + slab_unlock(page, state); #ifdef FASTPATH_CMPXCHG local_irq_restore(flags); #endif @@ -2735,6 +2751,7 @@ int kmem_cache_shrink(struct kmem_cache struct list_head *slabs_by_inuse = kmalloc(sizeof(struct list_head) * s->objects, GFP_KERNEL); unsigned long flags; + unsigned long state; if (!slabs_by_inuse) return -ENOMEM; @@ -2758,7 +2775,7 @@ int kmem_cache_shrink(struct kmem_cache * list_lock. page->inuse here is the upper limit. */ list_for_each_entry_safe(page, t, &n->partial, lru) { - if (!page->inuse && slab_trylock(page)) { + if (!page->inuse && (state = slab_trylock(page))) { /* * Must hold slab lock here because slab_free * may have freed the last object and be @@ -2766,7 +2783,7 @@ int kmem_cache_shrink(struct kmem_cache */ list_del(&page->lru); n->nr_partial--; - slab_unlock(page); + slab_unlock(page, state); discard_slab(s, page); } else { list_move(&page->lru, @@ -3099,9 +3116,12 @@ static int validate_slab(struct kmem_cac static void validate_slab_slab(struct kmem_cache *s, struct page *page, unsigned long *map) { - if (slab_trylock(page)) { + unsigned long state; + + state = slab_trylock(page); + if (state) { validate_slab(s, page, map); - slab_unlock(page); + slab_unlock(page, state); } else printk(KERN_INFO "SLUB %s: Skipped busy slab 0x%p\n", s->name, page);