Index: linux-2.6.18-rc4-mm3/mm/slabifier.c =================================================================== --- linux-2.6.18-rc4-mm3.orig/mm/slabifier.c 2006-08-31 16:18:36.643140616 -0700 +++ linux-2.6.18-rc4-mm3/mm/slabifier.c 2006-08-31 16:42:38.518873865 -0700 @@ -12,14 +12,6 @@ #include #include -#define SLABIFIER_DEBUG - -#ifdef SLABIFIER_DEBUG -#define DBUG_ON(_x) BUG_ON(_x) -#else -#define DBUG_ON(_x) -#endif - struct slab { struct slab_cache sc; #ifdef CONFIG_SMP @@ -47,15 +39,15 @@ struct slab { * 2. slab->list_lock * * The slabifier assigns one slab for allocation to each processor. - * Allocators only occur from these active slabs. - * If a cpu slab is active thena workqueue thread checks every 10 + * Allocationss only occur from these active slabs. + * If a cpu slab is active then a workqueue thread checks every 10 * seconds if the cpu slab is still in use. The cpu slab is pushed back - * to the list if inactive. + * to the list if inactive [only needed for SMP]. * - * Leftover slabs with free elements are kept on the partial list. + * Leftover slabs with free elements are kept on a partial list. * There is no list for full slabs. If an object in a full slab is * freed then the slab will show up again on the partial lists. - * Otherwise we have no way of tracking used slabs. + * Otherwise there is no need to track filled up slabs. * * Slabs are freed when they become empty. Teardown and setup is * minimal so we rely on the page allocators per cpu caches for @@ -99,7 +91,8 @@ static void __always_inline remove_parti } /* - * Get a page and remove it from the partial list + * Get lock page and remove it from the partial list + * * Must hold list_lock */ static __always_inline int lock_and_del_slab(struct slab *s, @@ -251,7 +244,7 @@ static int on_freelist(struct slab *s, s s->objects - nr); try_recover: printk(KERN_CRIT "****** Trying to continue by marking " - "all objects used (memory leak!)\n"); + "all objects in the slab used (memory leak!)\n"); page->inuse = s->objects; page->freelist = NULL; } @@ -270,11 +263,8 @@ void check_free_chain(struct slab *s, st */ static void discard_slab(struct slab *s, struct page *page) { - DBUG_ON(PageActive(page)); - DBUG_ON(PageLocked(page)); atomic_long_dec(&s->nr_slabs); - /* Restore page state */ page->mapping = NULL; reset_page_mapcount(page); __ClearPageSlab(page); @@ -327,6 +317,7 @@ static struct page *new_slab(struct slab * Move a page back to the lists. * * Must be called with the slab lock held. + * * On exit the slab lock will have been dropped. */ static void __always_inline putback_slab(struct slab *s, struct page *page) @@ -573,42 +564,49 @@ static __always_inline void *__slab_allo unsigned long flags; int cpu; - if (unlikely(s->objects == 1)) { - struct page *page = new_slab(s, gfpflags, node); - - if (page) - return page_address(page); - else - return NULL; - } - local_irq_save(flags); cpu = smp_processor_id(); page = get_and_lock_active(s, cpu); if (unlikely(!page)) - goto load; + goto new_slab; - while (unlikely(!page->freelist || - (node != -1 && page_to_nid(page) != node))) { - - deactivate_slab(s, page, cpu); -load: - page = reload(s, cpu, gfpflags, node); - if (unlikely(!page)) { - local_irq_restore(flags); - return NULL; - } - } + if (unlikely(!page->freelist)) + goto full_slab; + if (unlikely(node != -1 && page_to_nid(page) != node)) + goto full_slab; +redo: page->inuse++; object = page->freelist; - next_object = object[page->offset]; - page->freelist = next_object; - check_free_chain(s, page); + page->freelist = next_object = object[page->offset]; SetPageReferenced(page); + check_free_chain(s, page); slab_unlock(page); local_irq_restore(flags); return object; + +full_slab: + deactivate_slab(s, page, cpu); + +new_slab: + /* + * This was moved out of line since it dereferences s and thus + * touches an extra cacheline + */ + if (unlikely(s->objects == 1)) { + local_irq_restore(flags); + page = new_slab(s, gfpflags, node); + if (page) + return page_address(page); + else + return NULL; + } + + page = reload(s, cpu, gfpflags, node); + if (unlikely(page)) + goto redo; + local_irq_restore(flags); + return NULL; } static void *slab_alloc(struct slab_cache *sc, gfp_t gfpflags)