In order to allow for interruptible and asynchronous versions of lock_page in conjunction with the wait_on_bit changes, we need to define low-level lock page routines which take an additional argument, i.e a wait queue entry and may return non-zero status, e.g -EINTR, -EIOCBRETRY, -EWOULDBLOCK etc. This patch renames __lock_page to lock_page_slow, so that __lock_page and __lock_page_slow can denote the versions which take a wait queue parameter. Signed-off-by: Suparna Bhattacharya diff -puN include/linux/pagemap.h~lock_page_slow include/linux/pagemap.h --- linux-2.6.20-rc1-root/include/linux/pagemap.h | 4 ++-- linux-2.6.20-rc1-root/mm/filemap.c | 8 ++++---- 2 files changed, 6 insertions(+), 6 deletions(-) diff -puN include/linux/pagemap.h~lock_page_slow include/linux/pagemap.h --- linux-2.6.20-rc1/include/linux/pagemap.h~lock_page_slow 2006-12-19 15:50:03.000000000 +0530 +++ linux-2.6.20-rc1-root/include/linux/pagemap.h 2006-12-20 13:45:20.000000000 +0530 @@ -133,7 +133,7 @@ static inline pgoff_t linear_page_index( return pgoff >> (PAGE_CACHE_SHIFT - PAGE_SHIFT); } -extern void FASTCALL(__lock_page(struct page *page)); +extern void FASTCALL(lock_page_slow(struct page *page)); extern void FASTCALL(__lock_page_nosync(struct page *page)); extern void FASTCALL(unlock_page(struct page *page)); @@ -144,7 +144,7 @@ static inline void lock_page(struct page { might_sleep(); if (TestSetPageLocked(page)) - __lock_page(page); + lock_page_slow(page); } /* diff -puN mm/filemap.c~lock_page_slow mm/filemap.c --- linux-2.6.20-rc1/mm/filemap.c~lock_page_slow 2006-12-19 15:50:03.000000000 +0530 +++ linux-2.6.20-rc1-root/mm/filemap.c 2006-12-20 13:45:20.000000000 +0530 @@ -556,7 +556,7 @@ void end_page_writeback(struct page *pag EXPORT_SYMBOL(end_page_writeback); /** - * __lock_page - get a lock on the page, assuming we need to sleep to get it + * lock_page_slow - get a lock on the page, assuming we need to sleep to get it * @page: the page to lock * * Ugly. Running sync_page() in state TASK_UNINTERRUPTIBLE is scary. If some @@ -564,14 +564,14 @@ EXPORT_SYMBOL(end_page_writeback); * chances are that on the second loop, the block layer's plug list is empty, * so sync_page() will then return in state TASK_UNINTERRUPTIBLE. */ -void fastcall __lock_page(struct page *page) +void fastcall lock_page_slow(struct page *page) { DEFINE_WAIT_BIT(wait, &page->flags, PG_locked); __wait_on_bit_lock(page_waitqueue(page), &wait, sync_page, TASK_UNINTERRUPTIBLE); } -EXPORT_SYMBOL(__lock_page); +EXPORT_SYMBOL(lock_page_slow); /* * Variant of lock_page that does not require the caller to hold a reference @@ -647,7 +647,7 @@ repeat: page_cache_get(page); if (TestSetPageLocked(page)) { read_unlock_irq(&mapping->tree_lock); - __lock_page(page); + lock_page_slow(page); read_lock_irq(&mapping->tree_lock); /* Has the page been truncated while we slept? */ _