From: Andrew Morton It is a bug to nest read_lock_irq() inside read_lock_irq(). Cc: Wu Fengguang Signed-off-by: Andrew Morton --- mm/readahead.c | 6 ++---- 1 file changed, 2 insertions(+), 4 deletions(-) diff -puN mm/readahead.c~readahead-context-based-method-locking-fix mm/readahead.c --- a/mm/readahead.c~readahead-context-based-method-locking-fix +++ a/mm/readahead.c @@ -1170,10 +1170,10 @@ static inline unsigned long inactive_pag /* * Count/estimate cache hits in range [begin, end). - * The estimation is simple and optimistic. + * The estimation is simple and optimistic. The caller must hold tree_lock. */ #define CACHE_HIT_HASH_KEY 29 /* some prime number */ -static int count_cache_hit(struct address_space *mapping, +static int __count_cache_hit(struct address_space *mapping, pgoff_t begin, pgoff_t end) { int size = end - begin; @@ -1186,14 +1186,12 @@ static int count_cache_hit(struct addres * behavior guarantees a readahead when (size < ra_max) and * (readahead_hit_rate >= 8). */ - read_lock_irq(&mapping->tree_lock); for (i = 0; i < 8;) { struct page *page = radix_tree_lookup(&mapping->page_tree, begin + size * ((i++ * CACHE_HIT_HASH_KEY) & 7) / 8); if (inactive_page_refcnt(page) >= PAGE_REFCNT_1 && ++count >= 2) break; } - read_unlock_irq(&mapping->tree_lock); return size * count / i; } _