From 5925dca29af36045cd5c4fc9fac81f2f0b6e438a Mon Sep 17 00:00:00 2001 From: Christoph Lameter Date: Mon, 14 Apr 2008 19:15:44 +0300 Subject: [PATCH] SLUB: Trigger defragmentation from memory reclaim This patch triggers slab defragmentation from memory reclaim. The logical point for this is after slab shrinking was performed in vmscan.c. At that point the fragmentation ratio of a slab was increased because objects were freed via the LRUs. So we call kmem_cache_defrag() from there. slab_shrink() from vmscan.c is called in some contexts to do global shrinking of slabs and in others to do shrinking for a particular zone. Pass the zone to slab_shrink, so that slab_shrink() can call kmem_cache_defrag() and restrict the defragmentation to the node that is under memory pressure. Reviewed-by: Rik van Riel Signed-off-by: Christoph Lameter Signed-off-by: Pekka Enberg --- fs/drop_caches.c | 2 - include/linux/mm.h | 3 -- include/linux/mmzone.h | 1 mm/vmscan.c | 51 ++++++++++++++++++++++++++++++++++++++++++------- 4 files changed, 47 insertions(+), 10 deletions(-) Index: linux-2.6/fs/drop_caches.c =================================================================== --- linux-2.6.orig/fs/drop_caches.c 2008-04-29 11:52:09.663717892 -0700 +++ linux-2.6/fs/drop_caches.c 2008-04-29 14:10:22.604960416 -0700 @@ -58,7 +58,7 @@ static void drop_slab(void) int nr_objects; do { - nr_objects = shrink_slab(1000, GFP_KERNEL, 1000); + nr_objects = shrink_slab(1000, GFP_KERNEL, 1000, NULL); } while (nr_objects > 10); } Index: linux-2.6/include/linux/mm.h =================================================================== --- linux-2.6.orig/include/linux/mm.h 2008-04-29 11:52:10.313718046 -0700 +++ linux-2.6/include/linux/mm.h 2008-04-29 14:11:06.772011872 -0700 @@ -1242,8 +1242,7 @@ int in_gate_area_no_task(unsigned long a int drop_caches_sysctl_handler(struct ctl_table *, int, struct file *, void __user *, size_t *, loff_t *); unsigned long shrink_slab(unsigned long scanned, gfp_t gfp_mask, - unsigned long lru_pages); - + unsigned long lru_pages, struct zone *z); #ifndef CONFIG_MMU #define randomize_va_space 0 #else Index: linux-2.6/mm/vmscan.c =================================================================== --- linux-2.6.orig/mm/vmscan.c 2008-04-29 11:52:10.803718746 -0700 +++ linux-2.6/mm/vmscan.c 2008-04-29 14:10:22.634967390 -0700 @@ -166,10 +166,18 @@ EXPORT_SYMBOL(unregister_shrinker); * are eligible for the caller's allocation attempt. It is used for balancing * slab reclaim versus page reclaim. * + * zone is the zone for which we are shrinking the slabs. If the intent + * is to do a global shrink then zone may be NULL. Specification of a + * zone is currently only used to limit slab defragmentation to a NUMA node. + * The performace of shrink_slab would be better (in particular under NUMA) + * if it could be targeted as a whole to the zone that is under memory + * pressure but the VFS infrastructure does not allow that at the present + * time. + * * Returns the number of slab objects which we shrunk. */ unsigned long shrink_slab(unsigned long scanned, gfp_t gfp_mask, - unsigned long lru_pages) + unsigned long lru_pages, struct zone *zone) { struct shrinker *shrinker; unsigned long ret = 0; @@ -226,6 +234,33 @@ unsigned long shrink_slab(unsigned long shrinker->nr += total_scan; } up_read(&shrinker_rwsem); + /* + * "ret" doesnt really contain the freed object count. The shrinkers + * fake it. Gotta go with what we are getting though. + * + * Handling of the freed object counter is also racy. If we get the + * wrong counts then we may unnecessarily do a defrag pass or defer + * one. "ret" is already faked. So this is just increasing + * the already existing fuzziness to get some notion as to when + * to initiate slab defrag which will hopefully be okay. + */ + if (zone) { + /* balance_pgdat running on a zone so we only scan one node */ + zone->slab_objects_freed += ret; + if (zone->slab_objects_freed > 1000 && (gfp_mask & __GFP_FS)) { + zone->slab_objects_freed = 0; + kmem_cache_defrag(zone_to_nid(zone)); + } + } else { + static unsigned long global_objects_freed; + + /* Direct (and thus global) reclaim. Scan all nodes */ + global_objects_freed += ret; + if (global_objects_freed > 1000 && (gfp_mask & __GFP_FS)) { + global_objects_freed = 0; + kmem_cache_defrag(-1); + } + } return ret; } @@ -1342,7 +1377,7 @@ static unsigned long do_try_to_free_page * over limit cgroups */ if (scan_global_lru(sc)) { - shrink_slab(sc->nr_scanned, sc->gfp_mask, lru_pages); + shrink_slab(sc->nr_scanned, sc->gfp_mask, lru_pages, NULL); if (reclaim_state) { nr_reclaimed += reclaim_state->reclaimed_slab; reclaim_state->reclaimed_slab = 0; @@ -1567,7 +1602,7 @@ loop_again: nr_reclaimed += shrink_zone(priority, zone, &sc); reclaim_state->reclaimed_slab = 0; nr_slab = shrink_slab(sc.nr_scanned, GFP_KERNEL, - lru_pages); + lru_pages, zone); nr_reclaimed += reclaim_state->reclaimed_slab; total_scanned += sc.nr_scanned; if (zone_is_all_unreclaimable(zone)) @@ -1806,7 +1841,7 @@ unsigned long shrink_all_memory(unsigned /* If slab caches are huge, it's better to hit them first */ while (nr_slab >= lru_pages) { reclaim_state.reclaimed_slab = 0; - shrink_slab(nr_pages, sc.gfp_mask, lru_pages); + shrink_slab(nr_pages, sc.gfp_mask, lru_pages, NULL); if (!reclaim_state.reclaimed_slab) break; @@ -1844,7 +1879,7 @@ unsigned long shrink_all_memory(unsigned reclaim_state.reclaimed_slab = 0; shrink_slab(sc.nr_scanned, sc.gfp_mask, - count_lru_pages()); + count_lru_pages(), NULL); ret += reclaim_state.reclaimed_slab; if (ret >= nr_pages) goto out; @@ -1861,7 +1896,8 @@ unsigned long shrink_all_memory(unsigned if (!ret) { do { reclaim_state.reclaimed_slab = 0; - shrink_slab(nr_pages, sc.gfp_mask, count_lru_pages()); + shrink_slab(nr_pages, sc.gfp_mask, + count_lru_pages(), NULL); ret += reclaim_state.reclaimed_slab; } while (ret < nr_pages && reclaim_state.reclaimed_slab > 0); } @@ -2023,7 +2059,8 @@ static int __zone_reclaim(struct zone *z * Note that shrink_slab will free memory on all zones and may * take a long time. */ - while (shrink_slab(sc.nr_scanned, gfp_mask, order) && + while (shrink_slab(sc.nr_scanned, gfp_mask, order, + zone) && zone_page_state(zone, NR_SLAB_RECLAIMABLE) > slab_reclaimable - nr_pages) ; Index: linux-2.6/include/linux/mmzone.h =================================================================== --- linux-2.6.orig/include/linux/mmzone.h 2008-04-29 08:51:01.223708192 -0700 +++ linux-2.6/include/linux/mmzone.h 2008-04-29 14:10:22.654967222 -0700 @@ -256,6 +256,7 @@ struct zone { unsigned long nr_scan_active; unsigned long nr_scan_inactive; unsigned long pages_scanned; /* since last reclaim */ + unsigned long slab_objects_freed; /* since last defrag */ unsigned long flags; /* zone flags, see below */ /* Zone statistics */