From: Wu Fengguang Readahead policy after thrashing. It tries to recover gracefully from the thrashing. Signed-off-by: Wu Fengguang Signed-off-by: Andrew Morton --- mm/readahead.c | 42 ++++++++++++++++++++++++++++++++++++++++++ 1 files changed, 42 insertions(+) diff -puN mm/readahead.c~readahead-thrashing-recovery-method mm/readahead.c --- a/mm/readahead.c~readahead-thrashing-recovery-method +++ a/mm/readahead.c @@ -1513,6 +1513,48 @@ try_backward_prefetching(struct file_ra_ return 1; } +/* + * Readahead thrashing recovery. + */ +static unsigned long +thrashing_recovery_readahead(struct address_space *mapping, + struct file *filp, struct file_ra_state *ra, + pgoff_t offset, unsigned long ra_max) +{ + unsigned long ra_size; + +#ifdef CONFIG_DEBUG_READAHEAD + if (probe_page(mapping, offset - 1)) + ra_account(ra, RA_EVENT_READAHEAD_MUTILATE, + ra->readahead_index - offset); + ra_account(ra, RA_EVENT_READAHEAD_THRASHING, + ra->readahead_index - offset); +#endif + + if (offset < ra->ra_index) { + /* + * Thrashed when we are in [la_index, ra_index), i.e. + * the old chunk is lost soon after the new one is allocated. + * Ensure that we recover all needed pages in the old chunk. + * And futher keep the lookahead_index untouched. + */ + ra_size = ra->lookahead_index - offset; + } else { + /* After thrashing, we know the exact thrashing-threshold. */ + ra_size = offset - ra->la_index; + update_ra_thrash_bytes(mapping->backing_dev_info, ra_size); + + /* And be cooperative: the system may be hunting for memory. */ + ra_size = MIN_RA_PAGES + ra_size / 2; + } + + ra_set_class(ra, RA_CLASS_THRASHING); + ra_set_index(ra, offset, offset); + ra_set_size(ra, ra_size, 0); + + return ra_submit(ra, mapping, filp); +} + #endif /* CONFIG_ADAPTIVE_READAHEAD */ /* _