From: Con Kolivas Failed radix_tree_insert wasn't being handled leaving stale kmem. The list should be iterated over in the reverse order when prefetching. Make the yield within kprefetchd stronger through the use of cond_resched. Check that the pos entry hasn't been removed while unlocked. Signed-off-by: Con Kolivas Signed-off-by: Andrew Morton --- mm/swap_prefetch.c | 19 ++++++++++--------- 1 files changed, 10 insertions(+), 9 deletions(-) diff -puN mm/swap_prefetch.c~mm-swap-prefetch-more-improvements mm/swap_prefetch.c --- a/mm/swap_prefetch.c~mm-swap-prefetch-more-improvements +++ a/mm/swap_prefetch.c @@ -117,7 +117,8 @@ void add_to_swapped_list(struct page *pa if (likely(!radix_tree_insert(&swapped.swap_tree, index, entry))) { list_add(&entry->swapped_list, &swapped.list); swapped.count++; - } + } else + kmem_cache_free(swapped.cache, entry); out_locked: spin_unlock_irqrestore(&swapped.lock, flags); @@ -431,7 +432,7 @@ out: static enum trickle_return trickle_swap(void) { enum trickle_return ret = TRICKLE_DELAY; - struct list_head *p, *next; + struct swapped_entry *pos, *n; unsigned long flags; if (!prefetch_enabled()) @@ -444,19 +445,19 @@ static enum trickle_return trickle_swap( return TRICKLE_FAILED; spin_lock_irqsave(&swapped.lock, flags); - list_for_each_safe(p, next, &swapped.list) { - struct swapped_entry *entry; + list_for_each_entry_safe_reverse(pos, n, &swapped.list, swapped_list) { swp_entry_t swp_entry; int node; spin_unlock_irqrestore(&swapped.lock, flags); - might_sleep(); - if (!prefetch_suitable()) + /* Yield to anything else running */ + if (cond_resched() || !prefetch_suitable()) goto out_unlocked; spin_lock_irqsave(&swapped.lock, flags); - entry = list_entry(p, struct swapped_entry, swapped_list); - node = get_swap_entry_node(entry); + if (unlikely(!pos)) + continue; + node = get_swap_entry_node(pos); if (!node_isset(node, sp_stat.prefetch_nodes)) { /* * We found an entry that belongs to a node that is @@ -464,7 +465,7 @@ static enum trickle_return trickle_swap( */ continue; } - swp_entry = entry->swp_entry; + swp_entry = pos->swp_entry; spin_unlock_irqrestore(&swapped.lock, flags); if (trickle_swap_cache_async(swp_entry, node) == TRICKLE_DELAY) _