This patch moves some LRU list handling code out of the swap-code. These can be used by memory-migration code. Signed-off-by: Hirokazu Takahashi Signed-off-by: Dave Hansen --- memhotplug-dave/include/linux/mm_inline.h | 68 ++++++++++++++++++++++++++++++ memhotplug-dave/mm/vmscan.c | 25 +---------- 2 files changed, 72 insertions(+), 21 deletions(-) diff -puN include/linux/mm_inline.h~AA-PM-01-steal_page_from_lru include/linux/mm_inline.h --- memhotplug/include/linux/mm_inline.h~AA-PM-01-steal_page_from_lru 2005-07-28 13:50:31.000000000 -0700 +++ memhotplug-dave/include/linux/mm_inline.h 2005-07-28 13:50:31.000000000 -0700 @@ -38,3 +38,71 @@ del_page_from_lru(struct zone *zone, str zone->nr_inactive--; } } + +static inline int +isolate_lru_onepage(struct page *page, struct list_head *src, + struct list_head *dst) +{ + if (!TestClearPageLRU(page)) + BUG(); + list_del(&page->lru); + if (get_page_testone(page)) { + /* + * It is being freed elsewhere + */ + __put_page(page); + SetPageLRU(page); + list_add(&page->lru, src); + return 0; + } + list_add(&page->lru, dst); + return 1; +} + + +static inline int +__steal_page_from_lru(struct zone *zone, struct page *page, + struct list_head *dst) +{ + if (PageActive(page)) { + if (!isolate_lru_onepage(page, &zone->active_list, dst)) + return 0; + zone->nr_active--; + } else { + if (!isolate_lru_onepage(page, &zone->inactive_list, dst)) + return 0; + zone->nr_inactive--; + } + return 1; +} + +static inline int +steal_page_from_lru(struct zone *zone, struct page *page, + struct list_head *dst) +{ + int ret; + spin_lock_irq(&zone->lru_lock); + ret = __steal_page_from_lru(zone, page, dst); + spin_unlock_irq(&zone->lru_lock); + return ret; +} + +static inline void +__putback_page_to_lru(struct zone *zone, struct page *page) +{ + if (TestSetPageLRU(page)) + BUG(); + if (PageActive(page)) + add_page_to_active_list(zone, page); + else + add_page_to_inactive_list(zone, page); +} + +static inline void +putback_page_to_lru(struct zone *zone, struct page *page) +{ + spin_lock_irq(&zone->lru_lock); + __putback_page_to_lru(zone, page); + spin_unlock_irq(&zone->lru_lock); +} + diff -puN mm/vmscan.c~AA-PM-01-steal_page_from_lru mm/vmscan.c --- memhotplug/mm/vmscan.c~AA-PM-01-steal_page_from_lru 2005-07-28 13:50:31.000000000 -0700 +++ memhotplug-dave/mm/vmscan.c 2005-07-28 13:50:31.000000000 -0700 @@ -582,22 +582,8 @@ static int isolate_lru_pages(int nr_to_s while (scan++ < nr_to_scan && !list_empty(src)) { page = lru_to_page(src); prefetchw_prev_lru_page(page, src, flags); - - if (!TestClearPageLRU(page)) - BUG(); - list_del(&page->lru); - if (get_page_testone(page)) { - /* - * It is being freed elsewhere - */ - __put_page(page); - SetPageLRU(page); - list_add(&page->lru, src); - continue; - } else { - list_add(&page->lru, dst); + if (isolate_lru_onepage(page, src, dst)) nr_taken++; - } } *scanned = scan; @@ -650,13 +636,10 @@ static void shrink_cache(struct zone *zo */ while (!list_empty(&page_list)) { page = lru_to_page(&page_list); - if (TestSetPageLRU(page)) - BUG(); list_del(&page->lru); - if (PageActive(page)) - add_page_to_active_list(zone, page); - else - add_page_to_inactive_list(zone, page); + if (PageActive(page) && page_under_capture(page)) + ClearPageActive(page); + __putback_page_to_lru(zone, page); if (!pagevec_add(&pvec, page)) { spin_unlock_irq(&zone->lru_lock); __pagevec_release(&pvec); _