Index: linux-2.6/include/linux/mm.h =================================================================== --- linux-2.6.orig/include/linux/mm.h +++ linux-2.6/include/linux/mm.h @@ -293,7 +293,6 @@ struct page { */ static inline int put_page_testzero(struct page *page) { - BUG_ON(atomic_read(&page->_count) == 0); return atomic_dec_and_test(&page->_count); } @@ -313,7 +312,6 @@ static inline void set_page_count(struct static inline void __put_page(struct page *page) { - BUG_ON(atomic_read(&page->_count) == 0); atomic_dec(&page->_count); } @@ -349,7 +347,6 @@ static inline void get_page(struct page { if (unlikely(PageCompound(page))) page = (struct page *)page_private(page); - BUG_ON(atomic_read(&page->_count) == 0); atomic_inc(&page->_count); } @@ -364,13 +361,11 @@ static inline int page_count(struct page static inline void get_page(struct page *page) { - BUG_ON(atomic_read(&page->_count) == 0); atomic_inc(&page->_count); } static inline void put_page(struct page *page) { - BUG_ON(atomic_read(&page->_count) == 0); if (put_page_testzero(page)) __page_cache_release(page); } Index: linux-2.6/mm/swap.c =================================================================== --- linux-2.6.orig/mm/swap.c +++ linux-2.6/mm/swap.c @@ -40,7 +40,6 @@ void put_page(struct page *page) { if (unlikely(PageCompound(page))) { page = (struct page *)page_private(page); - BUG_ON(page_count(page) == 0); if (put_page_testzero(page)) { void (*dtor)(struct page *page); @@ -49,7 +48,6 @@ void put_page(struct page *page) } return; } - BUG_ON(page_count(page) == 0); if (put_page_testzero(page)) __page_cache_release(page); } Index: linux-2.6/include/linux/pagemap.h =================================================================== --- linux-2.6.orig/include/linux/pagemap.h +++ linux-2.6/include/linux/pagemap.h @@ -60,10 +60,16 @@ static inline struct page *page_cache_ge return NULL; #ifndef CONFIG_SMP - BUG_ON(page_count(page) == 0); + /* + * Preempt must be disabled here - we rely on rcu_read_lock doing + * this for us. + * + * Pagecache won't be truncated from interrupt context, so if we have + * found a page in the radix tree here, we have pinned its refcount by + * disabling preempt, and hence no need for the "speculative get" that + * SMP requires. + */ atomic_inc(&page->_count); - BUG_ON(page != *pagep); - BUG_ON(PageCompound(page) && (struct page *)page_private(page) != page); return page; #else @@ -105,8 +111,6 @@ static inline struct page *page_cache_ge if (unlikely(page != *pagep)) /* page no longer at *pagep? */ goto bad_page; - BUG_ON(PageCompound(page) && (struct page *)page_private(page) != page); - return page; bad_page: @@ -114,7 +118,6 @@ bad_page: put_page(page); return NULL; #endif - } static inline struct page *page_cache_alloc(struct address_space *x)