Index: linux-2.6/include/linux/mm.h =================================================================== --- linux-2.6.orig/include/linux/mm.h +++ linux-2.6/include/linux/mm.h @@ -285,35 +285,37 @@ struct page { * * Also, many kernel routines increase the page count before a critical * routine so they can be sure the page doesn't go away from under them. - * - * Since 2.6.6 (approx), a free page has ->_count = -1. This is so that we - * can use atomic_add_negative(-1, page->_count) to detect when the page - * becomes free and so that we can also use atomic_inc_and_test to atomically - * detect when we just tried to grab a ref on a page which some other CPU has - * already deemed to be freeable. - * - * NO code should make assumptions about this internal detail! Use the provided - * macros which retain the old rules: page_count(page) == 0 is a free page. */ /* * Drop a ref, return true if the logical refcount fell to zero (the page has * no users) */ -#define put_page_testzero(p) \ - ({ \ - BUG_ON(page_count(p) == 0); \ - atomic_add_negative(-1, &(p)->_count); \ - }) +static inline int put_page_testzero(struct page *page) +{ + BUG_ON(atomic_read(&page->_count) == 0); + return atomic_dec_and_test(&page->_count); +} /* * Grab a ref, return true if the page previously had a logical refcount of * zero. ie: returns true if we just grabbed an already-deemed-to-be-free page */ -#define get_page_testone(p) atomic_inc_and_test(&(p)->_count) +static inline int get_page_unless_zero(struct page *page) +{ + return atomic_inc_not_zero(&page->_count); +} -#define set_page_count(p,v) atomic_set(&(p)->_count, v - 1) -#define __put_page(p) atomic_dec(&(p)->_count) +static inline void set_page_count(struct page *page, int v) +{ + atomic_set(&page->_count, v); +} + +static inline void __put_page(struct page *page) +{ + BUG_ON(atomic_read(&page->_count) == 0); + atomic_dec(&page->_count); +} static inline void set_page_refs(struct page *page, int order) { @@ -338,15 +340,16 @@ extern void FASTCALL(__page_cache_releas static inline int page_count(struct page *page) { - if (PageCompound(page)) + if (unlikely(PageCompound(page))) page = (struct page *)page_private(page); - return atomic_read(&page->_count) + 1; + return atomic_read(&page->_count); } static inline void get_page(struct page *page) { if (unlikely(PageCompound(page))) page = (struct page *)page_private(page); + BUG_ON(atomic_read(&page->_count) == 0); atomic_inc(&page->_count); } @@ -354,15 +357,20 @@ void put_page(struct page *page); #else /* CONFIG_HUGETLB_PAGE */ -#define page_count(p) (atomic_read(&(p)->_count) + 1) +static inline int page_count(struct page *page) +{ + return atomic_read(&page->_count); +} static inline void get_page(struct page *page) { + BUG_ON(atomic_read(&page->_count) == 0); atomic_inc(&page->_count); } static inline void put_page(struct page *page) { + BUG_ON(atomic_read(&page->_count) == 0); if (put_page_testzero(page)) __page_cache_release(page); } Index: linux-2.6/mm/vmscan.c =================================================================== --- linux-2.6.orig/mm/vmscan.c +++ linux-2.6/mm/vmscan.c @@ -585,24 +585,20 @@ static int isolate_lru_pages(int nr_to_s int scan = 0; while (scan++ < nr_to_scan && !list_empty(src)) { + struct list_head *target; page = lru_to_page(src); prefetchw_prev_lru_page(page, src, flags); - if (!TestClearPageLRU(page)) - BUG(); + BUG_ON(!PageLRU(page)); list_del(&page->lru); - if (get_page_testone(page)) { - /* - * It is being freed elsewhere - */ - __put_page(page); - SetPageLRU(page); - list_add(&page->lru, src); - continue; - } else { - list_add(&page->lru, dst); + target = src; + if (get_page_unless_zero(page)) { + ClearPageLRU(page); + target = dst; nr_taken++; - } + } /* else it is being freed elsewhere */ + + list_add(&page->lru, target); } *scanned = scan; Index: linux-2.6/mm/swap.c =================================================================== --- linux-2.6.orig/mm/swap.c +++ linux-2.6/mm/swap.c @@ -40,6 +40,7 @@ void put_page(struct page *page) { if (unlikely(PageCompound(page))) { page = (struct page *)page_private(page); + BUG_ON(page_count(page) == 0); if (put_page_testzero(page)) { void (*dtor)(struct page *page); @@ -48,6 +49,7 @@ void put_page(struct page *page) } return; } + BUG_ON(page_count(page) == 0); if (put_page_testzero(page)) __page_cache_release(page); } @@ -177,17 +179,19 @@ void lru_add_drain(void) */ void fastcall __page_cache_release(struct page *page) { - unsigned long flags; - struct zone *zone = page_zone(page); + if (unlikely(PageLRU(page))) { + unsigned long flags; - spin_lock_irqsave(&zone->lru_lock, flags); - if (TestClearPageLRU(page)) + struct zone *zone = page_zone(page); + spin_lock_irqsave(&zone->lru_lock, flags); + if (!TestClearPageLRU(page)) + BUG(); del_page_from_lru(zone, page); - if (page_count(page) != 0) - page = NULL; - spin_unlock_irqrestore(&zone->lru_lock, flags); - if (page) - free_hot_page(page); + spin_unlock_irqrestore(&zone->lru_lock, flags); + } + + BUG_ON(page_count(page) != 0); + free_hot_page(page); } EXPORT_SYMBOL(__page_cache_release); @@ -213,29 +217,32 @@ void release_pages(struct page **pages, pagevec_init(&pages_to_free, cold); for (i = 0; i < nr; i++) { struct page *page = pages[i]; - struct zone *pagezone; if (!put_page_testzero(page)) continue; - pagezone = page_zone(page); - if (pagezone != zone) { - spin_lock_prefetch(&pagezone->lru_lock); - if (!zone) - local_irq_disable(); - else - spin_unlock(&zone->lru_lock); - zone = pagezone; - spin_lock(&zone->lru_lock); - } - if (TestClearPageLRU(page)) + if (PageLRU(page)) { + struct zone *pagezone = page_zone(page); + if (pagezone != zone) { + spin_lock_prefetch(&pagezone->lru_lock); + if (!zone) + local_irq_disable(); + else + spin_unlock(&zone->lru_lock); + zone = pagezone; + spin_lock(&zone->lru_lock); + } + BUG_ON(!PageLRU(page)); + ClearPageLRU(page); del_page_from_lru(zone, page); + } if (page_count(page) == 0) { if (!pagevec_add(&pages_to_free, page)) { - spin_unlock_irq(&zone->lru_lock); + if (zone) + spin_unlock_irq(&zone->lru_lock); + zone = NULL; /* No lock is held */ __pagevec_free(&pages_to_free); pagevec_reinit(&pages_to_free); - zone = NULL; /* No lock is held */ } } } Index: linux-2.6/include/linux/page-flags.h =================================================================== --- linux-2.6.orig/include/linux/page-flags.h +++ linux-2.6/include/linux/page-flags.h @@ -210,8 +210,9 @@ extern unsigned long *__page_state(unsig #define __ClearPageDirty(page) __clear_bit(PG_dirty, &(page)->flags) #define TestClearPageDirty(page) test_and_clear_bit(PG_dirty, &(page)->flags) -#define SetPageLRU(page) set_bit(PG_lru, &(page)->flags) #define PageLRU(page) test_bit(PG_lru, &(page)->flags) +#define SetPageLRU(page) set_bit(PG_lru, &(page)->flags) +#define ClearPageLRU(page) clear_bit(PG_lru, &(page)->flags) #define TestSetPageLRU(page) test_and_set_bit(PG_lru, &(page)->flags) #define TestClearPageLRU(page) test_and_clear_bit(PG_lru, &(page)->flags)