Index: linux-2.6.18-rc4-mm3/mm/slabifier.c =================================================================== --- linux-2.6.18-rc4-mm3.orig/mm/slabifier.c 2006-08-31 17:51:03.826631969 -0700 +++ linux-2.6.18-rc4-mm3/mm/slabifier.c 2006-08-31 18:03:53.243914359 -0700 @@ -1,5 +1,6 @@ /* - * Generic Slabifier for the allocato abstraction framework. + * Generic Slabifier for the allocator abstraction framework. + * * The allocator synchronizes using slab based locks and only * uses a centralized list lock to manage the pool of partial slabs. * @@ -39,7 +40,8 @@ struct slab { * 2. slab->list_lock * * The slabifier assigns one slab for allocation to each processor. - * Allocationss only occur from these active slabs. + * Allocations only occur from these active slabs. + * * If a cpu slab is active then a workqueue thread checks every 10 * seconds if the cpu slab is still in use. The cpu slab is pushed back * to the list if inactive [only needed for SMP]. @@ -54,9 +56,6 @@ struct slab { * fast frees and allocations. */ -#define lru_to_last_page(_head) (list_entry((_head)->next, struct page, lru)) -#define lru_to_first_page(_head) (list_entry((_head)->next, struct page, lru)) - /* * Locking for each individual slab using the pagelock */ @@ -91,7 +90,7 @@ static void __always_inline remove_parti } /* - * Get lock page and remove it from the partial list + * Lock page and remove it from the partial list * * Must hold list_lock */ @@ -320,7 +319,8 @@ static void __always_inline putback_slab } } -static void deactivate_slab(struct slab *s, struct page *page, int cpu) +static void __always_inline deactivate_slab(struct slab *s, + struct page *page, int cpu) { s->active[cpu] = NULL; smp_wmb(); @@ -411,6 +411,7 @@ static void drain_all(struct slab *s) * The offset is used to relocate the free list link in each object. It is * therefore possible to move the free list link behind the object. This * is necessary for RCU to work properly and also useful for debugging. + * * However no freelists are necessary if there is only one element per * slab. */ @@ -471,6 +472,7 @@ static __always_inline void *__slab_allo goto new_slab; slab_lock(page); + check_free_chain(s, page); if (unlikely(!page->freelist)) goto another_slab; @@ -481,7 +483,6 @@ redo: object = page->freelist; page->freelist = next_object = object[page->offset]; __SetPageReferenced(page); - check_free_chain(s, page); slab_unlock(page); local_irq_restore(flags); return object; @@ -503,9 +504,9 @@ new_slab: return NULL; } - /* Racy check. If we mistakenly see no partial slabs then just - * expand the partial list. If we mistakenly try to get a partial - * slab then get_partials will return NULL. + /* Racy check. If we mistakenly see no partial slabs then we + * just allocate an empty slab. If we mistakenly try to get a + * partial slab then get_partials() will return NULL. */ if (s->nr_partial) { page = get_partial(s, node); @@ -618,7 +619,6 @@ out_unlock: * Slab is completely free */ remove_partial(s, page); - check_free_chain(s, page); slab_unlock(page); discard_slab(s, page); local_irq_restore(flags); @@ -832,7 +832,7 @@ static int free_list(struct slab *s, str spin_lock_irqsave(&s->list_lock, flags); list_for_each_entry_safe(page, h, list, lru) if (!page->inuse) - discard_slab(s, lru_to_last_page(list)); + discard_slab(s, page); else slabs_inuse++;