From: Badari Pulavarty Instead of passing validity of block mapping, do_mpage_readpage() can figure it out using buffer_mapped(). This will reduce one un-needed argument passing. Signed-off-by: Badari Pulavarty Cc: Mingming Cao Signed-off-by: Andrew Morton --- fs/mpage.c | 33 +++++++++++++++++++-------------- 1 files changed, 19 insertions(+), 14 deletions(-) diff -puN fs/mpage.c~map-multiple-blocks-for-mpage_readpages-use-buffer_mapped fs/mpage.c --- 25/fs/mpage.c~map-multiple-blocks-for-mpage_readpages-use-buffer_mapped Tue Mar 7 14:45:20 2006 +++ 25-akpm/fs/mpage.c Tue Mar 7 14:46:40 2006 @@ -163,11 +163,19 @@ map_buffer_to_page(struct page *page, st } while (page_bh != head); } +/* + * This is the worker routine which does all the work of mapping the disk + * blocks and constructs largest possible bios, submits them for IO if the + * blocks are not contiguous on the disk. + * + * We pass a buffer_head back and forth and use its buffer_mapped() flag to + * represent the validity of its disk mapping and to decide when to do the next + * get_block() call. + */ static struct bio * do_mpage_readpage(struct bio *bio, struct page *page, unsigned nr_pages, - sector_t *last_block_in_bio, struct buffer_head *map_bh, - unsigned long *first_logical_block, int *map_valid, - get_block_t get_block) + sector_t *last_block_in_bio, struct buffer_head *map_bh, + unsigned long *first_logical_block, get_block_t get_block) { struct inode *inode = page->mapping->host; const unsigned blkbits = inode->i_blkbits; @@ -199,14 +207,14 @@ do_mpage_readpage(struct bio *bio, struc * Map blocks using the result from the previous get_blocks call first. */ nblocks = map_bh->b_size >> blkbits; - if (*map_valid && block_in_file > *first_logical_block && + if (buffer_mapped(map_bh) && block_in_file > *first_logical_block && block_in_file < (*first_logical_block + nblocks)) { unsigned map_offset = block_in_file - *first_logical_block; unsigned last = nblocks - map_offset; for (relative_block = 0; ; relative_block++) { if (relative_block == last) { - *map_valid = 0; + clear_buffer_mapped(map_bh); break; } if (page_block == blocks_per_page) @@ -232,7 +240,6 @@ do_mpage_readpage(struct bio *bio, struc if (get_block(inode, block_in_file, map_bh, 0)) goto confused; *first_logical_block = block_in_file; - *map_valid = 1; } if (!buffer_mapped(map_bh)) { @@ -241,7 +248,7 @@ do_mpage_readpage(struct bio *bio, struc first_hole = page_block; page_block++; block_in_file++; - *map_valid = 0; + clear_buffer_mapped(map_bh); continue; } @@ -265,7 +272,7 @@ do_mpage_readpage(struct bio *bio, struc nblocks = map_bh->b_size >> blkbits; for (relative_block = 0; ; relative_block++) { if (relative_block == nblocks) { - *map_valid = 0; + clear_buffer_mapped(map_bh); break; } else if (page_block == blocks_per_page) break; @@ -385,8 +392,8 @@ mpage_readpages(struct address_space *ma struct pagevec lru_pvec; struct buffer_head map_bh; unsigned long first_logical_block = 0; - int map_valid = 0; + clear_buffer_mapped(&map_bh); pagevec_init(&lru_pvec, 0); for (page_idx = 0; page_idx < nr_pages; page_idx++) { struct page *page = list_entry(pages->prev, struct page, lru); @@ -399,7 +406,7 @@ mpage_readpages(struct address_space *ma nr_pages - page_idx, &last_block_in_bio, &map_bh, &first_logical_block, - &map_valid, get_block); + get_block); if (!pagevec_add(&lru_pvec, page)) __pagevec_lru_add(&lru_pvec); } else { @@ -423,12 +430,10 @@ int mpage_readpage(struct page *page, ge sector_t last_block_in_bio = 0; struct buffer_head map_bh; unsigned long first_logical_block = 0; - int map_valid = 0; - + clear_buffer_mapped(&map_bh); bio = do_mpage_readpage(bio, page, 1, &last_block_in_bio, - &map_bh, &first_logical_block, &map_valid, - get_block); + &map_bh, &first_logical_block, get_block); if (bio) mpage_bio_submit(READ, bio); return 0; _