SLUB: Define functions for cpu slab handling instead of using PageActive Use inline functions to access the per cpu bit. Signed-off-by: Christoph Lameter --- mm/slub.c | 52 ++++++++++++++++++++++++++++++++++------------------ 1 file changed, 34 insertions(+), 18 deletions(-) Index: slub/mm/slub.c =================================================================== --- slub.orig/mm/slub.c 2007-05-09 17:14:45.000000000 -0700 +++ slub/mm/slub.c 2007-05-09 17:20:19.000000000 -0700 @@ -78,9 +78,14 @@ * * Overloading of page flags that are otherwise used for LRU management. * - * PageActive The slab is used as a cpu cache. Allocations - * may be performed from the slab. The slab is not - * on any slab list and cannot be moved onto one. + * PageActive The slab is frozen. It is not on + * any of the lists. Only frees may be performed + * on the slab. Frees may not put the slab onto + * any partial lists nor may they free the slab. + * + * One use of this flag is to mark slabs that are + * used for allocations. The slab will not be on + * any slab list and cannot not be moved onto one. * The cpu slab may be equipped with an additioanl * lockless_freelist that allows lockless access to * free objects in addition to the regular freelist @@ -91,6 +96,21 @@ * the fast path and disables lockless freelists. */ +static inline int SlabFrozen(struct page *page) +{ + return PageActive(page); +} + +static inline void SetSlabFrozen(struct page *page) +{ + SetPageActive(page); +} + +static inline void ClearSlabFrozen(struct page *page) +{ + __ClearPageActive(page); +} + static inline int SlabDebug(struct page *page) { #ifdef CONFIG_SLUB_DEBUG @@ -1145,11 +1165,12 @@ static void remove_partial(struct kmem_c * * Must hold list_lock. */ -static int lock_and_del_slab(struct kmem_cache_node *n, struct page *page) +static inline int lock_and_freeze_slab(struct kmem_cache_node *n, struct page *page) { if (slab_trylock(page)) { list_del(&page->lru); n->nr_partial--; + SetSlabFrozen(page); return 1; } return 0; @@ -1173,7 +1194,7 @@ static struct page *get_partial_node(str spin_lock(&n->list_lock); list_for_each_entry(page, &n->partial, lru) - if (lock_and_del_slab(n, page)) + if (lock_and_freeze_slab(n, page)) goto out; page = NULL; out: @@ -1252,10 +1273,11 @@ static struct page *get_partial(struct k * * On exit the slab lock will have been dropped. */ -static void putback_slab(struct kmem_cache *s, struct page *page) +static void unfreeze_slab(struct kmem_cache *s, struct page *page) { struct kmem_cache_node *n = get_node(s, page_to_nid(page)); + ClearSlabFrozen(page); if (page->inuse) { if (page->freelist) @@ -1306,9 +1328,7 @@ static void deactivate_slab(struct kmem_ page->inuse--; } s->cpu_slab[cpu] = NULL; - ClearPageActive(page); - - putback_slab(s, page); + unfreeze_slab(s, page); } static void flush_slab(struct kmem_cache *s, struct page *page, int cpu) @@ -1399,9 +1419,7 @@ another_slab: new_slab: page = get_partial(s, gfpflags, node); if (page) { -have_slab: s->cpu_slab[cpu] = page; - SetPageActive(page); goto load_freelist; } @@ -1431,7 +1449,9 @@ have_slab: flush_slab(s, s->cpu_slab[cpu], cpu); } slab_lock(page); - goto have_slab; + SetPageFrozen(page); + s->cpu_slab[cpu] = page; + goto load_freelist; } return NULL; debug: @@ -1518,11 +1538,7 @@ checks_ok: page->freelist = object; page->inuse--; - if (unlikely(PageActive(page))) - /* - * Cpu slabs are never on partial lists and are - * never freed. - */ + if (unlikely(SlabFrozen(page))) goto out_unlock; if (unlikely(!page->inuse)) @@ -1554,7 +1570,7 @@ slab_empty: debug: if (!free_object_checks(s, page, x)) goto out_unlock; - if (!PageActive(page) && !page->freelist) + if (!SlabFrozen(page) && !page->freelist) remove_full(s, page); if (s->flags & SLAB_STORE_USER) set_track(s, x, TRACK_FREE, addr);