From: Wu Fengguang Introduce three guiding sizes for the initial readahead method. - ra_pages0: recommended readahead on start-of-file - ra_expect_bytes: expected read size on start-of-file - ra_thrash_bytes: estimated thrashing threshold Signed-off-by: Wu Fengguang DESC readahead: aggressive initial sizes EDESC From: Wu Fengguang Set ra_expect_bytes and ra_thrash_bytes of default_backing_dev_info to large numbers. Large initial values are better, because - most systems don't have the danger of thrashing - most small files are read in whole - they both increase slowly and drop rapidly Signed-off-by: Wu Fengguang Signed-off-by: Andrew Morton --- block/ll_rw_blk.c | 4 +--- include/linux/backing-dev.h | 3 +++ mm/readahead.c | 6 ++++++ 3 files changed, 10 insertions(+), 3 deletions(-) diff -puN block/ll_rw_blk.c~readahead-initial-method-guiding-sizes block/ll_rw_blk.c --- a/block/ll_rw_blk.c~readahead-initial-method-guiding-sizes +++ a/block/ll_rw_blk.c @@ -213,9 +213,6 @@ void blk_queue_make_request(request_queu blk_queue_max_phys_segments(q, MAX_PHYS_SEGMENTS); blk_queue_max_hw_segments(q, MAX_HW_SEGMENTS); q->make_request_fn = mfn; - q->backing_dev_info.ra_pages = (VM_MAX_READAHEAD * 1024) / PAGE_CACHE_SIZE; - q->backing_dev_info.state = 0; - q->backing_dev_info.capabilities = BDI_CAP_MAP_COPY; blk_queue_max_sectors(q, SAFE_MAX_SECTORS); blk_queue_hardsect_size(q, 512); blk_queue_dma_alignment(q, 511); @@ -1847,6 +1844,7 @@ request_queue_t *blk_alloc_queue_node(gf q->kobj.ktype = &queue_ktype; kobject_init(&q->kobj); + q->backing_dev_info = default_backing_dev_info; q->backing_dev_info.unplug_io_fn = blk_backing_dev_unplug; q->backing_dev_info.unplug_io_data = q; diff -puN include/linux/backing-dev.h~readahead-initial-method-guiding-sizes include/linux/backing-dev.h --- a/include/linux/backing-dev.h~readahead-initial-method-guiding-sizes +++ a/include/linux/backing-dev.h @@ -26,6 +26,9 @@ typedef int (congested_fn)(void *, int); struct backing_dev_info { unsigned long ra_pages; /* max readahead in PAGE_CACHE_SIZE units */ + unsigned long ra_pages0; /* recommended readahead on start of file */ + unsigned long ra_expect_bytes; /* expected read size on start of file */ + unsigned long ra_thrash_bytes; /* thrashing threshold */ unsigned long state; /* Always use atomic bitops on this */ unsigned int capabilities; /* Device capabilities */ congested_fn *congested_fn; /* Function pointer if device is md/dm */ diff -puN mm/readahead.c~readahead-initial-method-guiding-sizes mm/readahead.c --- a/mm/readahead.c~readahead-initial-method-guiding-sizes +++ a/mm/readahead.c @@ -32,6 +32,9 @@ * Adaptive read-ahead parameters. */ +/* Default max read-ahead size for the initial method. */ +#define INITIAL_RA_PAGES DIV_ROUND_UP(128*1024, PAGE_CACHE_SIZE) + /* In laptop mode, poll delayed look-ahead on every ## pages read. */ #define LAPTOP_POLL_INTERVAL 16 @@ -125,6 +128,9 @@ EXPORT_SYMBOL(default_unplug_io_fn); struct backing_dev_info default_backing_dev_info = { .ra_pages = MAX_RA_PAGES, + .ra_pages0 = INITIAL_RA_PAGES, + .ra_expect_bytes = INITIAL_RA_PAGES * PAGE_CACHE_SIZE, + .ra_thrash_bytes = MAX_RA_PAGES * PAGE_CACHE_SIZE, .state = 0, .capabilities = BDI_CAP_MAP_COPY, .unplug_io_fn = default_unplug_io_fn, _