From: David Rientjes Introduces new zone flag interface for testing and setting flags: int zone_test_and_set_flag(struct zone *zone, zone_flags_t flag) Instead of setting and clearing ZONE_RECLAIM_LOCKED each time shrink_zone() is called, this flag is test and set before starting zone reclaim. Zone reclaim starts in __alloc_pages() when a zone's watermark fails and the system is in zone_reclaim_mode. If it's already in reclaim, there's no need to start again so it is simply considered full for that allocation attempt. There is a change of behavior with regard to concurrent zone shrinking. It is now possible for try_to_free_pages() or kswapd to already be shrinking a particular zone when __alloc_pages() starts zone reclaim. In this case, it is possible for two concurrent threads to invoke shrink_zone() for a single zone. This change forbids a zone to be in zone reclaim twice, which was always the behavior, but allows for concurrent try_to_free_pages() or kswapd shrinking when starting zone reclaim. Cc: Andrea Arcangeli Cc: Christoph Lameter Signed-off-by: David Rientjes Signed-off-by: Andrew Morton --- diff -puN include/linux/mmzone.h~mm-test-and-set-zone-reclaim-lock-before-starting include/linux/mmzone.h --- a/include/linux/mmzone.h~mm-test-and-set-zone-reclaim-lock-before-starting +++ a/include/linux/mmzone.h @@ -353,6 +353,10 @@ static inline void zone_set_flag(struct { set_bit(flag, &zone->flags); } +static inline int zone_test_and_set_flag(struct zone *zone, zone_flags_t flag) +{ + return test_and_set_bit(flag, &zone->flags); +} static inline void zone_clear_flag(struct zone *zone, zone_flags_t flag) { clear_bit(flag, &zone->flags); diff -puN mm/vmscan.c~mm-test-and-set-zone-reclaim-lock-before-starting mm/vmscan.c --- a/mm/vmscan.c~mm-test-and-set-zone-reclaim-lock-before-starting +++ a/mm/vmscan.c @@ -1112,8 +1112,6 @@ static unsigned long shrink_zone(int pri unsigned long nr_to_scan; unsigned long nr_reclaimed = 0; - zone_set_flag(zone, ZONE_RECLAIM_LOCKED); - /* * Add one to `nr_to_scan' just to make sure that the kernel will * slowly sift through the active list. @@ -1152,8 +1150,6 @@ static unsigned long shrink_zone(int pri } throttle_vm_writeout(sc->gfp_mask); - - zone_clear_flag(zone, ZONE_RECLAIM_LOCKED); return nr_reclaimed; } @@ -1932,6 +1928,7 @@ static int __zone_reclaim(struct zone *z int zone_reclaim(struct zone *zone, gfp_t gfp_mask, unsigned int order) { int node_id; + int ret; /* * Zone reclaim reclaims unmapped file backed pages and @@ -1949,13 +1946,13 @@ int zone_reclaim(struct zone *zone, gfp_ <= zone->min_slab_pages) return 0; + if (zone_is_all_unreclaimable(zone)) + return 0; + /* - * Avoid concurrent zone reclaims, do not reclaim in a zone that does - * not have reclaimable pages and if we should not delay the allocation - * then do not scan. + * Do not scan if the allocation should not be delayed. */ - if (!(gfp_mask & __GFP_WAIT) || zone_is_all_unreclaimable(zone) || - zone_is_reclaim_locked(zone) || (current->flags & PF_MEMALLOC)) + if (!(gfp_mask & __GFP_WAIT) || (current->flags & PF_MEMALLOC)) return 0; /* @@ -1967,6 +1964,12 @@ int zone_reclaim(struct zone *zone, gfp_ node_id = zone_to_nid(zone); if (node_state(node_id, N_CPU) && node_id != numa_node_id()) return 0; - return __zone_reclaim(zone, gfp_mask, order); + + if (zone_test_and_set_flag(zone, ZONE_RECLAIM_LOCKED)) + return 0; + ret = __zone_reclaim(zone, gfp_mask, order); + zone_clear_flag(zone, ZONE_RECLAIM_LOCKED); + + return ret; } #endif _