Index: linux-2.6.18-rc4-mm3/mm/slabifier.c =================================================================== --- linux-2.6.18-rc4-mm3.orig/mm/slabifier.c 2006-08-31 16:42:38.518873865 -0700 +++ linux-2.6.18-rc4-mm3/mm/slabifier.c 2006-08-31 17:12:40.421159561 -0700 @@ -174,18 +174,6 @@ static void check_slab(struct page *page #endif } -static void check_active_slab(struct page *page) -{ -#ifdef SLABIFIER_DEBUG - if (!PageActive(page)) { - printk(KERN_CRIT "Not an active slab page @%p flags=%lx" - " mapping=%p count=%d \n", - page, page->flags, page->mapping, page_count(page)); - BUG(); - } -#endif -} - static int check_valid_pointer(struct slab *s, struct page *page, void *object, void *origin) { @@ -343,29 +331,6 @@ static void deactivate_slab(struct slab } /* - * Acquire the slab lock from the active array. If there is no active - * slab for this processor then return NULL; - */ -static __always_inline struct page *get_and_lock_active(struct slab *s, - int cpu) -{ - struct page *page; - -redo: - page = s->active[cpu]; - if (unlikely(!page)) - return NULL; - slab_lock(page); - if (unlikely(s->active[cpu] != page)) { - slab_unlock(page); - goto redo; - } - check_active_slab(page); - check_free_chain(s, page); - return page; -} - -/* * Flush an active slab back to the lists. */ static void flush_active(struct slab *s, int cpu) @@ -374,9 +339,11 @@ static void flush_active(struct slab *s, unsigned long flags; local_irq_save(flags); - page = get_and_lock_active(s, cpu); - if (likely(page)) + page = s->active[cpu]; + if (likely(page)) { + slab_lock(page); deactivate_slab(s, page, cpu); + } local_irq_restore(flags); } @@ -487,73 +454,6 @@ static struct slab_cache *slab_create(st return &s->sc; } -/* - * Reload the per cpu slab - * - * If we have reloaded successfully then we exit with holding the slab lock - * and return the pointer to the new page. - * - * Return NULL if we cannot reload. - */ -static struct page *reload(struct slab *s, unsigned long cpu, gfp_t flags, - int node) -{ - struct page *page; - -redo: - /* Racy check. If we mistakenly see no partial slabs then we just - * expand the partial list. If we mistakenly try to get a partial - * slab then get_partials will return NULL. - */ - if (s->nr_partial) { - page = get_partial(s, node); - if (page) - goto gotpage; - } - - if ((flags & __GFP_WAIT)) { - local_irq_enable(); - page = new_slab(s, flags, node); - local_irq_disable(); - } else - page = new_slab(s, flags, node); - - if (!page) - return NULL; - - slab_lock(page); - -gotpage: - /* - * Now we have a page that is isolated from the lists and locked, - */ - SetPageActive(page); - ClearPageReferenced(page); - - if (cmpxchg(&s->active[cpu], NULL, page) != NULL) { - - ClearPageActive(page); - add_partial(s, page); - slab_unlock(page); - - page = get_and_lock_active(s, cpu); - if (page) - return page; - goto redo; - } - - check_free_chain(s, page); - -#ifdef CONFIG_SMP - if (keventd_up() && !s->flusher_active) { - s->flusher_active = 1; - schedule_delayed_work(&s->flush, 10 * HZ); - } -#endif - - return page; -} - static __always_inline void *__slab_alloc(struct slab_cache *sc, gfp_t gfpflags, int node) { @@ -566,10 +466,11 @@ static __always_inline void *__slab_allo local_irq_save(flags); cpu = smp_processor_id(); - page = get_and_lock_active(s, cpu); - if (unlikely(!page)) + page = s->active[cpu]; + if (!page) goto new_slab; + slab_lock(page); if (unlikely(!page->freelist)) goto full_slab; @@ -602,11 +503,44 @@ new_slab: return NULL; } - page = reload(s, cpu, gfpflags, node); - if (unlikely(page)) - goto redo; + /* Racy check. If we mistakenly see no partial slabs then just + * expand the partial list. If we mistakenly try to get a partial + * slab then get_partials will return NULL. + */ + if (s->nr_partial) { + page = get_partial(s, node); + if (page) + goto gotpage; + } + local_irq_restore(flags); - return NULL; + page = new_slab(s, flags, node); + if (!page) + return NULL; + + local_irq_save(flags); + slab_lock(page); + +gotpage: + SetPageActive(page); + if (s->active[cpu]) { + /* We dropped the lock and someone else got a slab */ + ClearPageActive(page); + slab_unlock(page); + discard_slab(s, page); + page = s->active[cpu]; + } else + s->active[cpu] = page; + + check_free_chain(s, page); + +#ifdef CONFIG_SMP + if (keventd_up() && !s->flusher_active) { + s->flusher_active = 1; + schedule_delayed_work(&s->flush, 10 * HZ); + } +#endif + goto redo; } static void *slab_alloc(struct slab_cache *sc, gfp_t gfpflags) @@ -649,9 +583,12 @@ static void slab_free(struct slab_cache if (!object) return; - page = get_object_page(object); + page = virt_to_page(x); + + if (unlikely(PageCompound(page))) + page = page->first_page; - if (unlikely(!page)) + if (unlikely(!PageSlab(page))) goto bad_slab; if (unlikely(sc != page->slab))