--- mm/slub.c | 106 +++++++++++++++++++++++++++----------------------------------- 1 file changed, 47 insertions(+), 59 deletions(-) Index: linux-2.6/mm/slub.c =================================================================== --- linux-2.6.orig/mm/slub.c 2007-10-28 14:48:03.000000000 -0700 +++ linux-2.6/mm/slub.c 2007-10-28 15:32:21.000000000 -0700 @@ -100,7 +100,6 @@ * the fast path and disables lockless freelists. */ -#define FROZEN (1 << PG_active) #define LOCKED (1 << PG_locked) #ifdef CONFIG_SLUB_DEBUG @@ -849,6 +848,10 @@ static noinline int alloc_debug_processi set_track(s, object, TRACK_ALLOC, addr); trace(s, page, object, 1); init_object(s, object, 1); + if (get_freepointer(s, object) == page->end) { + remove_partial(s, page); + add_full(s, page); + } return 1; bad: @@ -902,7 +905,7 @@ static noinline int free_debug_processin } /* Special debug activities for freeing objects */ - if (!(state & FROZEN) && page->freelist == page->end) + if (page->freelist == page->end) remove_full(s, page); if (s->flags & SLAB_STORE_USER) set_track(s, object, TRACK_FREE, addr); @@ -1281,7 +1284,7 @@ static inline unsigned long lock_and_fre list_del(&page->lru); n->nr_partial--; c->page = page; - return state | FROZEN; + return state; } return 0; } @@ -1384,52 +1387,20 @@ static noinline unsigned long get_partia } /* - * Move a page back to the lists. - * - * Must be called with the slab lock held. - * - * On exit the slab lock will have been dropped. + * Remove the cpu slab */ -static void unfreeze_slab(struct kmem_cache *s, struct page *page, - int tail, unsigned long state) +static void deactivate_slab(struct kmem_cache *s, struct kmem_cache_cpu *c) { - state &= ~FROZEN; - if (page->inuse) { + struct page *page = c->page; + int on_partial; + unsigned long state; - if (page->freelist != page->end) - add_partial(s, page, tail); - else - add_full(s, page, state); - slab_unlock(page, state); + if (is_end(c->freelist) || !c->page) + return; - } else { - if (get_node(s, page_to_nid(page))->nr_partial - < MIN_PARTIAL) { - /* - * Adding an empty slab to the partial slabs in order - * to avoid page allocator overhead. This slab needs - * to come after the other slabs with objects in - * order to fill them up. That way the size of the - * partial list stays small. kmem_cache_shrink can - * reclaim empty slabs from the partial list. - */ - add_partial(s, page, 1); - slab_unlock(page, state); - } else { - slab_unlock(page, state); - discard_slab(s, page); - } - } -} + state = slab_lock(c->page); -/* - * Remove the cpu slab - */ -static void deactivate_slab(struct kmem_cache *s, struct kmem_cache_cpu *c, - unsigned long state) -{ - struct page *page = c->page; - int tail = 1; + on_partial = c->freelist == page->end; /* * Merge cpu freelist into freelist. Typically we get here * because both freelists are empty. So this is unlikely @@ -1442,8 +1413,6 @@ static void deactivate_slab(struct kmem_ while (unlikely(!is_end(c->freelist))) { void **object; - tail = 0; /* Hot objects. Put the slab first */ - /* Retrieve object from cpu_freelist */ object = c->freelist; c->freelist = c->freelist[c->offset]; @@ -1454,15 +1423,37 @@ static void deactivate_slab(struct kmem_ page->inuse--; } c->page = NULL; - unfreeze_slab(s, page, tail, state); + + if (page->inuse) { + if (!on_partial) + /* Page was not on the partial list before but needs to be */ + add_partial(s, page, 0); + slab_unlock(page, state); + + } else { + int need_partial; + + need_partial = get_node(s, + page_to_nid(page))->nr_partial < MIN_PARTIAL; + + if (!on_partial && need_partial) + /* Page was not on the partial list before but needs to be */ + add_partial(s, page, 0); + + if (on_partial && !need_partial) + /* Empty slab that should not longer be on the partial list */ + remove_partial(s, page); + + slab_unlock(page, state); + + if (!on_partial || !need_partial) + discard_slab(s, page); + } } static inline void flush_slab(struct kmem_cache *s, struct kmem_cache_cpu *c) { - unsigned long state; - - state = slab_lock(c->page); - deactivate_slab(s, c, state); + deactivate_slab(s, c); } /* @@ -1549,7 +1540,7 @@ static noinline unsigned long get_new_sl flush_slab(s, c); } c->page = page; - return slab_lock(page) | FROZEN; + return slab_lock(page); } /* @@ -1581,13 +1572,13 @@ static void *__slab_alloc(struct kmem_ca preempt_enable_no_resched(); #endif if (likely(c->page)) { - state = slab_lock(c->page); - if (unlikely(node_match(c, node) && - c->page->freelist != c->page->end)) + c->page->freelist != c->page->end)) { + state = slab_lock(c->page); goto load_freelist; + } - deactivate_slab(s, c, state); + deactivate_slab(s, c); } another_slab: @@ -1739,9 +1730,6 @@ checks_ok: page->freelist = object; page->inuse--; - if (unlikely(state & FROZEN)) - goto out_unlock; - if (unlikely(!page->inuse)) goto slab_empty;