Index: linux-2.6/include/linux/page-flags.h =================================================================== --- linux-2.6.orig/include/linux/page-flags.h 2007-07-31 18:43:17.000000000 -0700 +++ linux-2.6/include/linux/page-flags.h 2007-07-31 18:52:41.000000000 -0700 @@ -83,6 +83,7 @@ #define PG_private 11 /* If pagecache, has fs-private data */ #define PG_writeback 12 /* Page is under writeback */ +#define PG_unreclaimable 13 /* Page is not reclaimable */ #define PG_compound 14 /* Part of a compound page */ #define PG_swapcache 15 /* Swap page: swp_entry_t in private */ @@ -93,6 +94,7 @@ /* PG_readahead is only used for file reads; PG_reclaim is only for writes */ #define PG_readahead PG_reclaim /* Reminder to do async read-ahead */ + /* PG_owner_priv_1 users should have descriptive aliases */ #define PG_checked PG_owner_priv_1 /* Used by some filesystems */ #define PG_pinned PG_owner_priv_1 /* Xen pinned pagetable */ @@ -260,6 +262,19 @@ static inline void __ClearPageTail(struc #define SetPageUncached(page) set_bit(PG_uncached, &(page)->flags) #define ClearPageUncached(page) clear_bit(PG_uncached, &(page)->flags) +/* + * PageUnreclaimable means that the page was taken off the LRU because + * something prohibits the reclaim of the page. PageUnreclaimable must + * be cleared before a page is put back onto the LRU. PageUnreclaimable + * is only modified under zone->lru_lock like PageLRU. + */ +#define PageUnreclaimable(page) test_bit(PG_unreclaimable, &(page)->flags) +#define SetPageUnreclaimable(page) set_bit(PG_unreclaimable, &(page)->flags) +#define ClearPageUnreclaimable(page) \ + clear_bit(PG_unreclaimable, &(page)->flags) +#define __ClearPageUnreclaimable(page) \ + __clear_bit(PG_unreclaimable, &(page)->flags) + struct page; /* forward declaration */ extern void cancel_dirty_page(struct page *page, unsigned int account_size); Index: linux-2.6/mm/page_alloc.c =================================================================== --- linux-2.6.orig/mm/page_alloc.c 2007-07-31 18:49:29.000000000 -0700 +++ linux-2.6/mm/page_alloc.c 2007-07-31 18:57:41.000000000 -0700 @@ -215,6 +215,7 @@ static void bad_page(struct page *page) 1 << PG_slab | 1 << PG_swapcache | 1 << PG_writeback | + 1 << PG_unreclaimable | 1 << PG_buddy ); set_page_count(page, 0); reset_page_mapcount(page); @@ -451,6 +452,7 @@ static inline int free_pages_check(struc 1 << PG_swapcache | 1 << PG_writeback | 1 << PG_reserved | + 1 << PG_unreclaimable | 1 << PG_buddy )))) bad_page(page); if (PageDirty(page)) @@ -600,6 +602,7 @@ static int prep_new_page(struct page *pa 1 << PG_swapcache | 1 << PG_writeback | 1 << PG_reserved | + 1 << PG_unreclaimable | 1 << PG_buddy )))) bad_page(page); @@ -783,6 +786,13 @@ static void fastcall free_hot_cold_page( struct per_cpu_pages *pcp; unsigned long flags; + if (PageUnreclaimable(page)) { + __ClearPageUnreclaimable(page); + if (PageAnon(page)) + dec_zone_page_state(page, NR_ANON_UNRECLAIMABLE); + else + dec_zone_page_state(page, NR_FILE_UNRECLAIMABLE); + } if (PageAnon(page)) page->mapping = NULL; if (free_pages_check(page))