From: Wu Fengguang Readahead policy after thrashing. It tries to recover gracefully from the thrashing. Signed-off-by: Wu Fengguang Signed-off-by: Andrew Morton --- mm/readahead.c | 42 ++++++++++++++++++++++++++++++++++++++++++ 1 file changed, 42 insertions(+) diff -puN mm/readahead.c~readahead-thrashing-recovery-method mm/readahead.c --- a/mm/readahead.c~readahead-thrashing-recovery-method +++ a/mm/readahead.c @@ -1632,6 +1632,48 @@ try_readahead_on_seek(struct file_ra_sta } /* + * Readahead thrashing recovery. + */ +static unsigned long +thrashing_recovery_readahead(struct address_space *mapping, + struct file *filp, struct file_ra_state *ra, + pgoff_t index, unsigned long ra_max) +{ + unsigned long ra_size; + + if (probe_page(mapping, index - 1)) + ra_account(ra, RA_EVENT_READAHEAD_MUTILATE, + ra->readahead_index - index); + ra_account(ra, RA_EVENT_READAHEAD_THRASHING, + ra->readahead_index - index); + + /* + * Some thrashing occur in (ra_index, la_index], in which case the + * old read-ahead chunk is lost soon after the new one is allocated. + * Ensure that we recover all needed pages in the old chunk. + */ + if (index < ra->ra_index) + ra_size = ra->ra_index - index; + else { + /* After thrashing, we know the exact thrashing-threshold. */ + ra_size = ra->hit0; + update_ra_thrash_bytes(mapping->backing_dev_info, ra_size); + + /* And we'd better be a bit conservative. */ + ra_size = ra_size * 3 / 4; + } + + if (ra_size > ra_max) + ra_size = ra_max; + + ra_set_class(ra, RA_CLASS_THRASHING); + ra_set_index(ra, index, index); + ra_set_size(ra, ra_size, ra_size / LOOKAHEAD_RATIO); + + return ra_dispatch(ra, mapping, filp); +} + +/* * ra_min is mainly determined by the size of cache memory. Reasonable? * * Table of concrete numbers for 4KB page size: _