From: Peter Zijlstra lru_cache_add_tail() uses the inactive per-cpu pagevec. This causes normal inactive and intactive tail inserts to end up on the wrong end of the list. When the pagevec is completed by lru_cache_add_tail() but still contains normal inactive pages, all pages will be added to the inactive tail and vice versa. Also *add_drain*() will always complete to the inactive head. Add a third per-cpu pagevec to alleviate this problem. Signed-off-by: Peter Zijlstra Acked-by: Con Kolivas Signed-off-by: Andrew Morton --- mm/swap.c | 8 +++++++- 1 files changed, 7 insertions(+), 1 deletion(-) diff -puN mm/swap.c~swap-prefetch-fix-lru_cache_add_tail mm/swap.c --- devel/mm/swap.c~swap-prefetch-fix-lru_cache_add_tail 2006-05-18 00:51:57.000000000 -0700 +++ devel-akpm/mm/swap.c 2006-05-18 00:57:03.000000000 -0700 @@ -138,6 +138,7 @@ EXPORT_SYMBOL(mark_page_accessed); */ static DEFINE_PER_CPU(struct pagevec, lru_add_pvecs) = { 0, }; static DEFINE_PER_CPU(struct pagevec, lru_add_active_pvecs) = { 0, }; +static DEFINE_PER_CPU(struct pagevec, lru_add_tail_pvecs) = { 0, }; void fastcall lru_cache_add(struct page *page) { @@ -159,6 +160,8 @@ void fastcall lru_cache_add_active(struc put_cpu_var(lru_add_active_pvecs); } +static inline void __pagevec_lru_add_tail(struct pagevec *pvec); + static void __lru_add_drain(int cpu) { struct pagevec *pvec = &per_cpu(lru_add_pvecs, cpu); @@ -169,6 +172,9 @@ static void __lru_add_drain(int cpu) pvec = &per_cpu(lru_add_active_pvecs, cpu); if (pagevec_count(pvec)) __pagevec_lru_add_active(pvec); + pvec = &per_cpu(lru_add_tail_pvecs, cpu); + if (pagevec_count(pvec)) + __pagevec_lru_add_tail(pvec); } void lru_add_drain(void) @@ -416,7 +422,7 @@ static inline void __pagevec_lru_add_tai */ void fastcall lru_cache_add_tail(struct page *page) { - struct pagevec *pvec = &get_cpu_var(lru_add_pvecs); + struct pagevec *pvec = &get_cpu_var(lru_add_tail_pvecs); page_cache_get(page); if (!pagevec_add(pvec, page)) _