From: Wu Fengguang - The backward prefetching method fails near start of file. Fix it. - Make it scale up more quickly by adding ra_min to ra_size. - Do not discount readahead_hit_rate, that's not a documented behavior. Signed-off-by: Wu Fengguang Signed-off-by: Andrew Morton --- mm/readahead.c | 15 ++++++++------- 1 file changed, 8 insertions(+), 7 deletions(-) diff -puN mm/readahead.c~readahead-backward-prefetching-method-fix mm/readahead.c --- a/mm/readahead.c~readahead-backward-prefetching-method-fix +++ a/mm/readahead.c @@ -1650,8 +1650,9 @@ initial_readahead(struct address_space * * Important for certain scientific arenas(i.e. structural analysis). */ static int -try_read_backward(struct file_ra_state *ra, pgoff_t begin_index, - unsigned long ra_size, unsigned long ra_max) +try_read_backward(struct file_ra_state *ra, + pgoff_t begin_index, unsigned long ra_size, + unsigned long ra_min, unsigned long ra_max) { pgoff_t end_index; @@ -1660,11 +1661,11 @@ try_read_backward(struct file_ra_state * return 0; if ((ra->flags & RA_CLASS_MASK) == RA_CLASS_BACKWARD && - ra_has_index(ra, ra->prev_page)) { - ra_size += 2 * ra->hit0; + ra_has_index(ra, ra->prev_page) && ra_cache_hit_ok(ra)) { + ra_size += ra_min + 2 * ra_readahead_size(ra); end_index = ra->la_index; } else { - ra_size += ra_size + ra_size * (readahead_hit_rate - 1) / 2; + ra_size += ra_size * readahead_hit_rate; end_index = ra->prev_page; } @@ -1675,7 +1676,7 @@ try_read_backward(struct file_ra_state * if (end_index > begin_index + ra_size) return 0; - begin_index = end_index - ra_size; + begin_index = end_index > ra_size ? end_index - ra_size : 0; ra_set_class(ra, RA_CLASS_BACKWARD); ra_set_index(ra, begin_index, begin_index); @@ -1882,7 +1883,7 @@ page_cache_readahead_adaptive(struct add * Backward read-ahead. */ if (!page && begin_index == index && - try_read_backward(ra, index, size, ra_max)) + try_read_backward(ra, index, size, ra_min, ra_max)) return ra_dispatch(ra, mapping, filp); /* _