From: Hugh Dickins Nick's mm-clarify-__add_to_swap_cache-locking.patch is fine for mainline, but soon generates a "kernel BUG at mm/swap_state.c:78!" when it meets mm-implement-swap-prefetching.patch in 2.6.23-rc2-mm1. We could add a fix to the latter, but I think it's better to adjust Nick's, so that it's right for whichever tree it's in: move the responsibility to SetPageLocked from read_swap_cache_async to add_to_swap_cache. Signed-off-by: Hugh Dickins Cc: Nick Piggin Signed-off-by: Andrew Morton --- mm/swap_state.c | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff -puN mm/swap_state.c~mm-clarify-__add_to_swap_cache-locking-fix mm/swap_state.c --- a/mm/swap_state.c~mm-clarify-__add_to_swap_cache-locking-fix +++ a/mm/swap_state.c @@ -99,15 +99,18 @@ static int add_to_swap_cache(struct page { int error; + BUG_ON(PageLocked(page)); if (!swap_duplicate(entry)) { INC_CACHE_INFO(noent_race); return -ENOENT; } + SetPageLocked(page); error = __add_to_swap_cache(page, entry, GFP_KERNEL); /* * Anon pages are already on the LRU, we don't run lru_cache_add here. */ if (error) { + ClearPageLocked(page); swap_free(entry); if (error == -EEXIST) INC_CACHE_INFO(exist_race); @@ -338,7 +341,6 @@ struct page *read_swap_cache_async(swp_e vma, addr); if (!new_page) break; /* Out of memory */ - SetPageLocked(new_page);/* could be non-atomic op */ } /* @@ -362,9 +364,7 @@ struct page *read_swap_cache_async(swp_e } } while (err != -ENOENT && err != -ENOMEM); - if (new_page) { - ClearPageLocked(new_page); + if (new_page) page_cache_release(new_page); - } return found_page; } _