--- mm/slub.c | 63 ++++++++++++++++++++++++++------------------------------------ 1 file changed, 27 insertions(+), 36 deletions(-) Index: linux-2.6/mm/slub.c =================================================================== --- linux-2.6.orig/mm/slub.c 2007-10-18 14:07:56.000000000 -0700 +++ linux-2.6/mm/slub.c 2007-10-18 14:09:34.000000000 -0700 @@ -1314,17 +1314,13 @@ static void add_partial(struct kmem_cach * * Must hold list_lock. */ -static inline int lock_and_freeze_slab(struct kmem_cache_node *n, struct page *page) +static inline void lock_and_freeze_slab(struct kmem_cache_node *n, struct page *page) { BUG_ON(!SlabPartial(page)); - if (slab_trylock(page)) { - list_del(&page->lru); - n->nr_partial--; - SetSlabFrozen(page); - ClearSlabPartial(page); - return 1; - } - return 0; + list_del(&page->lru); + n->nr_partial--; + SetSlabFrozen(page); + ClearSlabPartial(page); } /* @@ -1344,11 +1340,11 @@ static struct page *get_partial_node(str return NULL; spin_lock(&n->list_lock); - list_for_each_entry(page, &n->partial, lru) - if (lock_and_freeze_slab(n, page)) - goto out; - page = NULL; -out: + if (n->nr_partial) + lock_and_freeze_slab(n, container_of(n->partial.next, + struct page, lru)); + else + page = NULL; spin_unlock(&n->list_lock); return page; } @@ -1437,7 +1433,6 @@ static void unfreeze_slab(struct kmem_ca add_partial(s, page, tail); else add_full(s, page); - slab_unlock(page); } /* Free an object in a slab and return the old freelist pointer */ @@ -1445,9 +1440,11 @@ static inline void **free_object(struct { void **prior; + slab_lock(page); prior = page->freelist; object[offset] = prior; page->freelist = object; + slab_unlock(page); return prior; } @@ -1458,9 +1455,11 @@ static inline void **alloc_object(struct { void **object; + slab_lock(page); object = page->freelist; if (object) page->freelist = object[offset]; + slab_unlock(page); return object; } @@ -1471,8 +1470,10 @@ static inline void **get_freelist(struct { void **list; + slab_lock(page); list = page->freelist; page->freelist = NULL; + slab_unlock(page); return list; } @@ -1505,7 +1506,6 @@ static void deactivate_slab(struct kmem_ static inline void flush_slab(struct kmem_cache *s, struct kmem_cache_cpu *c) { - slab_lock(c->page); deactivate_slab(s, c); } @@ -1580,7 +1580,6 @@ static void *__slab_alloc(struct kmem_ca if (!c->page) goto new_slab; - slab_lock(c->page); if (unlikely(!node_match(c, node))) goto another_slab; load_freelist: @@ -1593,7 +1592,6 @@ load_freelist: c->freelist = object[c->offset]; c->node = page_to_nid(c->page); - slab_unlock(c->page); return object; another_slab: @@ -1630,19 +1628,20 @@ new_slab: * want the current one since its cache hot */ discard_slab(s, new); - slab_lock(c->page); goto load_freelist; } /* New slab does not fit our expectations */ flush_slab(s, c); } - slab_lock(new); SetSlabFrozen(new); c->page = new; goto load_freelist; } return NULL; debug: + /* FIXME: Thjs onlu works on UP. There needs to be some way + * to guarantee slab consistency. + */ if (!c->page->freelist) goto another_slab; if (!alloc_debug_processing(s, c->page, c->page->freelist, addr)) @@ -1650,7 +1649,6 @@ debug: object = alloc_object(c->page, c->offset); c->node = -1; - slab_unlock(c->page); return object; } @@ -1717,8 +1715,6 @@ static void __slab_free(struct kmem_cach void *prior; void **object = (void *)x; - slab_lock(page); - if (unlikely(SlabDebug(page))) goto debug; checks_ok: @@ -1730,15 +1726,12 @@ checks_ok: */ if (unlikely(!prior)) add_partial(s, page, 0); - -out_unlock: - slab_unlock(page); return; debug: - if (!free_debug_processing(s, page, x, addr)) - goto out_unlock; - goto checks_ok; + if (free_debug_processing(s, page, x, addr)) + goto checks_ok; + return; } /* @@ -2761,7 +2754,7 @@ int kmem_cache_shrink(struct kmem_cache list_for_each_entry_safe(page, t, &n->partial, lru) { int inuse = count_inuse(s, page); - if (!inuse && slab_trylock(page)) { + if (!inuse) { /* * Must hold slab lock here because slab_free * may have freed the last object and be @@ -2770,7 +2763,6 @@ int kmem_cache_shrink(struct kmem_cache ClearSlabPartial(page); list_del(&page->lru); n->nr_partial--; - slab_unlock(page); discard_slab(s, page); } else { list_move(&page->lru, @@ -3103,12 +3095,11 @@ static int validate_slab(struct kmem_cac static void validate_slab_slab(struct kmem_cache *s, struct page *page, unsigned long *map) { - if (slab_trylock(page)) { - validate_slab(s, page, map); - slab_unlock(page); - } else - printk(KERN_INFO "SLUB %s: Skipped busy slab 0x%p\n", - s->name, page); + /* + * FIXME: Need some way to stop allocs and free from the slab + * to be inspected. + */ + validate_slab(s, page, map); if (s->flags & DEBUG_DEFAULT_FLAGS) { if (!SlabDebug(page))