Add gfp_mask to add_to_swap The migration code calls the function with GFP_KERNEL while the swap code calls it with GFP_ATOMIC, because the migration code can ask the swap code to free some pages when we're in a low memory situation. Signed-off-by: Hirokazu Takahashi Signed-off-by: Dave Hansen Signed-off-by: Christoph Lameter Index: linux-2.6.14-rc5-mm1/include/linux/swap.h =================================================================== --- linux-2.6.14-rc5-mm1.orig/include/linux/swap.h 2005-11-04 10:27:52.000000000 -0800 +++ linux-2.6.14-rc5-mm1/include/linux/swap.h 2005-11-04 10:34:36.000000000 -0800 @@ -237,7 +237,7 @@ extern int rw_swap_page_sync(int, swp_en extern struct address_space swapper_space; #define total_swapcache_pages swapper_space.nrpages extern void show_swap_cache_info(void); -extern int add_to_swap(struct page *); +extern int add_to_swap(struct page *, gfp_t); extern void __delete_from_swap_cache(struct page *); extern void delete_from_swap_cache(struct page *); extern int move_to_swap_cache(struct page *, swp_entry_t); Index: linux-2.6.14-rc5-mm1/mm/swap_state.c =================================================================== --- linux-2.6.14-rc5-mm1.orig/mm/swap_state.c 2005-11-04 10:27:52.000000000 -0800 +++ linux-2.6.14-rc5-mm1/mm/swap_state.c 2005-11-04 10:35:10.000000000 -0800 @@ -143,7 +143,7 @@ void __delete_from_swap_cache(struct pag * Allocate swap space for the page and add the page to the * swap cache. Caller needs to hold the page lock. */ -int add_to_swap(struct page * page) +int add_to_swap(struct page * page, gfp_t gfp_mask) { swp_entry_t entry; int err; @@ -171,7 +171,7 @@ int add_to_swap(struct page * page) * Add it to the swap cache and mark it dirty */ err = __add_to_swap_cache(page, entry, - GFP_ATOMIC|__GFP_NOMEMALLOC|__GFP_NOWARN); + gfp_mask|__GFP_NOMEMALLOC|__GFP_NOWARN); switch (err) { case 0: /* Success */ Index: linux-2.6.14-rc5-mm1/mm/vmscan.c =================================================================== --- linux-2.6.14-rc5-mm1.orig/mm/vmscan.c 2005-11-04 10:27:52.000000000 -0800 +++ linux-2.6.14-rc5-mm1/mm/vmscan.c 2005-11-04 10:33:38.000000000 -0800 @@ -457,7 +457,7 @@ static int shrink_list(struct list_head if (PageAnon(page) && !PageSwapCache(page)) { if (!sc->may_swap) goto keep_locked; - if (!add_to_swap(page)) + if (!add_to_swap(page, GFP_ATOMIC)) goto activate_locked; } #endif /* CONFIG_SWAP */ @@ -871,7 +871,7 @@ redo: * preserved. */ if (PageAnon(page) && !PageSwapCache(page)) { - if (!add_to_swap(page)) { + if (!add_to_swap(page, GFP_KERNEL)) { unlock_page(page); list_move(&page->lru, &failed); nr_failed++;