This patch moves active pages that are under capture from the zone->active_list to the zone->inactive_list and then prevents any pages that are under capture from being put back on the active_list. It also initiates swapping of pages when there are pages marked for removal and in use. In limited testing this patch is almost always able to swap all pages in the remove area in highmem on a 2G i386 machine. Signed-off by: Bradley Christiansen --- Signed-off-by: Dave Hansen --- memhotplug-dave/mm/page_alloc.c | 20 ++++++++++++++++++++ memhotplug-dave/mm/swap.c | 3 ++- memhotplug-dave/mm/vmscan.c | 13 ++++++++----- 3 files changed, 30 insertions(+), 6 deletions(-) diff -puN mm/page_alloc.c~K2-swap_changes_mem_remove mm/page_alloc.c --- memhotplug/mm/page_alloc.c~K2-swap_changes_mem_remove 2005-07-28 13:51:11.000000000 -0700 +++ memhotplug-dave/mm/page_alloc.c 2005-07-28 13:51:11.000000000 -0700 @@ -18,6 +18,7 @@ #include #include #include +#include #include #include #include @@ -753,6 +754,24 @@ rmb_and_drain_cpu_pages(void * __unused) drain_local_pages(); } +/* + * This function forces pages that are under capture off the zone->active_list + * and onto the zone->inactive_list + */ +static inline void force_captured_to_inactive_list(struct page *base) +{ + struct zone *zone = page_zone(base); + struct list_head *p, *n; + + list_for_each_safe(p, n, &zone->active_list) { + struct page *page = list_entry(p, struct page, lru); + if (page_under_capture(page)) { + ClearPageActive(page); + del_page_from_active_list(zone, page); + add_page_to_inactive_list(zone, page); + } + } +} /* * Flags a given order of pages to be removed from memory, then removes any @@ -789,6 +808,7 @@ int capture_page_range(unsigned long sta page = pfn_to_page(start_pfn); remove_page_freearea(page, order); + force_captured_to_inactive_list(page); nr_pages = 1<zone_pgdat->kswapd_wait); } -#ifdef CONFIG_PM +#if defined(CONFIG_PM) || defined(CONFIG_MEMORY_HOTPLUG) /* * Try to free `nr_pages' of memory, system-wide. Returns the number of freed * pages. _