This patch adds generic_migrate_page_buffer() to migrate pages with buffers. The function moves pages without write-back I/O even when they have dirty buffers. Signed-off-by: Hirokazu Takahashi Signed-off-by: Dave Hansen --- memhotplug-dave/fs/buffer.c | 45 ++++++++++++++++++++++++++++ memhotplug-dave/include/linux/buffer_head.h | 3 + memhotplug-dave/include/linux/mmigrate.h | 2 + memhotplug-dave/mm/mmigrate.c | 44 +++++++++++++++++++++++++++ 4 files changed, 93 insertions(+), 1 deletion(-) diff -puN fs/buffer.c~AA-PM-20.0-nowriteback fs/buffer.c --- memhotplug/fs/buffer.c~AA-PM-20.0-nowriteback 2005-07-28 13:50:52.000000000 -0700 +++ memhotplug-dave/fs/buffer.c 2005-07-28 13:50:52.000000000 -0700 @@ -3009,6 +3009,50 @@ asmlinkage long sys_bdflush(int func, lo return 0; } +void +generic_move_buffer(struct page *page, struct page *newpage) +{ + struct buffer_head *bh, *head; + + spin_lock(&page->mapping->private_lock); + bh = head = page_buffers(page); + do { + get_bh(bh); + lock_buffer(bh); + } while ((bh = bh->b_this_page) != head); + + newpage->private = page->private; + page->private = 0; + page_cache_release(page); + page_cache_get(newpage); + + /* XXX */ + ClearPagePrivate(page); + SetPagePrivate(newpage); + + bh = head; + do { + BUG_ON(bh->b_page != page); + set_bh_page(bh, newpage, (unsigned long)bh->b_data & (PAGE_SIZE - 1)); + } while ((bh = bh->b_this_page) != head); + spin_unlock(&page->mapping->private_lock); + /* buffers are unlocked when remapping is complete */ +} + +void +unlock_page_buffer(struct page *page) +{ + struct buffer_head *bh, *head; + + spin_lock(&page->mapping->private_lock); + bh = head = page_buffers(page); + do { + put_bh(bh); + unlock_buffer(bh); + } while ((bh = bh->b_this_page) != head); + spin_unlock(&page->mapping->private_lock); +} + /* * Buffer-head allocation */ @@ -3133,6 +3177,7 @@ EXPORT_SYMBOL(fsync_bdev); EXPORT_SYMBOL(generic_block_bmap); EXPORT_SYMBOL(generic_commit_write); EXPORT_SYMBOL(generic_cont_expand); +EXPORT_SYMBOL(generic_move_buffer); EXPORT_SYMBOL(init_buffer); EXPORT_SYMBOL(invalidate_bdev); EXPORT_SYMBOL(ll_rw_block); diff -puN include/linux/buffer_head.h~AA-PM-20.0-nowriteback include/linux/buffer_head.h --- memhotplug/include/linux/buffer_head.h~AA-PM-20.0-nowriteback 2005-07-28 13:50:52.000000000 -0700 +++ memhotplug-dave/include/linux/buffer_head.h 2005-07-28 13:50:52.000000000 -0700 @@ -208,7 +208,8 @@ int nobh_commit_write(struct file *, str int nobh_truncate_page(struct address_space *, loff_t); int nobh_writepage(struct page *page, get_block_t *get_block, struct writeback_control *wbc); - +void generic_move_buffer(struct page *, struct page *); +void unlock_page_buffer(struct page *); /* * inline definitions diff -puN include/linux/mmigrate.h~AA-PM-20.0-nowriteback include/linux/mmigrate.h --- memhotplug/include/linux/mmigrate.h~AA-PM-20.0-nowriteback 2005-07-28 13:50:52.000000000 -0700 +++ memhotplug-dave/include/linux/mmigrate.h 2005-07-28 13:50:52.000000000 -0700 @@ -10,6 +10,8 @@ extern int generic_migrate_page(struct p int (*)(struct page *, struct page *, struct list_head *)); extern int migrate_page_common(struct page *, struct page *, struct list_head *); +extern int migrate_page_buffer(struct page *, struct page *, + struct list_head *); extern int page_migratable(struct page *, struct page *, int, struct list_head *); extern struct page * migrate_onepage(struct page *); diff -puN mm/mmigrate.c~AA-PM-20.0-nowriteback mm/mmigrate.c --- memhotplug/mm/mmigrate.c~AA-PM-20.0-nowriteback 2005-07-28 13:50:52.000000000 -0700 +++ memhotplug-dave/mm/mmigrate.c 2005-07-28 13:50:52.000000000 -0700 @@ -161,6 +161,44 @@ migrate_page_common(struct page *page, s } /* + * Wait for the completion of all operations, which are going on + * against the page. After that, move the buffers the page owns + * to the newpage and copy the page. + */ +int +migrate_page_buffer(struct page *page, struct page *newpage, + struct list_head *vlist) +{ + long timeout = 5000; /* XXXX */ + int ret; + + while (timeout > 0) { + BUG_ON(page_count(page) == 0); + ret = page_migratable(page, newpage, + 2 + !!PagePrivate(page), vlist); + switch (ret) { + case 0: + if (PagePrivate(page)) + generic_move_buffer(page, newpage); + /* fall thru */ + case -ENOENT: /* truncated */ + copy_highpage(newpage, page); + return ret; + case -EBUSY: + return ret; + case -EAGAIN: + wait_on_page_writeback(page); + unlock_page(page); + msleep(10); + timeout -= 10; + lock_page(page); + continue; + } + } + return -EBUSY; +} + +/* * In some cases, a page migration needs to be rolled back. */ static int @@ -314,6 +352,10 @@ generic_migrate_page(struct page *page, clear_page_dirty_for_io(page); set_page_dirty(newpage); } + if (PagePrivate(newpage)) { + BUG_ON(newpage->mapping == NULL); + unlock_page_buffer(newpage); + } /* * Finally, the newpage has become ready! Wake up all waiters, * which have been waiting for the completion of the migration. @@ -349,6 +391,8 @@ out_busy: return ret; out_removing: + if (PagePrivate(newpage)) + BUG(); unlock_page(page); unlock_page(newpage); return ret; _