[PATCH v5] mm, vmscan: retry kswapd's priority loop with cache_trim_mode off on failure

From: Byungchul Park
Date: Sun Mar 03 2024 - 21:24:40 EST


With cache_trim_mode on, reclaim logic doesn't bother reclaiming anon
pages. However, it should be more careful to use the mode because it's
going to prevent anon pages from being reclaimed even if there are a
huge number of anon pages that are cold and should be reclaimed. Even
worse, that leads kswapd_failures to reach MAX_RECLAIM_RETRIES and
stopping kswapd from functioning until direct reclaim eventually works
to resume kswapd.

So kswapd needs to retry its scan priority loop with cache_trim_mode
off again if the mode doesn't work for reclaim.

The problematic behavior can be reproduced by:

CONFIG_NUMA_BALANCING enabled
sysctl_numa_balancing_mode set to NUMA_BALANCING_MEMORY_TIERING
numa node0 (8GB local memory, 16 CPUs)
numa node1 (8GB slow tier memory, no CPUs)

Sequence:

1) echo 3 > /proc/sys/vm/drop_caches
2) To emulate the system with full of cold memory in local DRAM, run
the following dummy program and never touch the region:

mmap(0, 8 * 1024 * 1024 * 1024, PROT_READ | PROT_WRITE,
MAP_ANONYMOUS | MAP_PRIVATE | MAP_POPULATE, -1, 0);

3) Run any memory intensive work e.g. XSBench.
4) Check if numa balancing is working e.i. promotion/demotion.
5) Iterate 1) ~ 4) until numa balancing stops.

With this, you could see that promotion/demotion are not working because
kswapd has stopped due to ->kswapd_failures >= MAX_RECLAIM_RETRIES.

Interesting vmstat delta's differences between before and after are like:

+-----------------------+-------------------------------+
| interesting vmstat | before | after |
+-----------------------+-------------------------------+
| nr_inactive_anon | 321935 | 1646193 |
| nr_active_anon | 1780700 | 456388 |
| nr_inactive_file | 30425 | 27836 |
| nr_active_file | 14961 | 1217 |
| pgpromote_success | 356 | 1310120 |
| pgpromote_candidate | 21953245 | 1736872 |
| pgactivate | 1844523 | 3292443 |
| pgdeactivate | 50634 | 1526701 |
| pgfault | 31100294 | 6715375 |
| pgdemote_kswapd | 30856 | 1954199 |
| pgscan_kswapd | 1861981 | 7100099 |
| pgscan_anon | 1822930 | 7061135 |
| pgscan_file | 39051 | 38964 |
| pgsteal_anon | 386 | 1925214 |
| pgsteal_file | 30470 | 28985 |
| pageoutrun | 30 | 500 |
| numa_hint_faults | 27418279 | 3090773 |
| numa_pages_migrated | 356 | 1310120 |
+-----------------------+-------------------------------+

Signed-off-by: Byungchul Park <byungchul@xxxxxx>
---
mm/vmscan.c | 23 +++++++++++++++++++++--
1 file changed, 21 insertions(+), 2 deletions(-)

diff --git a/mm/vmscan.c b/mm/vmscan.c
index bba207f41b14..77948b0f8b5b 100644
--- a/mm/vmscan.c
+++ b/mm/vmscan.c
@@ -108,6 +108,9 @@ struct scan_control {
/* Can folios be swapped as part of reclaim? */
unsigned int may_swap:1;

+ /* Can cache_trim_mode be turned on as part of reclaim? */
+ unsigned int may_cache_trim_mode:1;
+
/* Proactive reclaim invoked by userspace through memory.reclaim */
unsigned int proactive:1;

@@ -1500,6 +1503,7 @@ unsigned int reclaim_clean_pages_from_list(struct zone *zone,
struct scan_control sc = {
.gfp_mask = GFP_KERNEL,
.may_unmap = 1,
+ .may_cache_trim_mode = 1,
};
struct reclaim_stat stat;
unsigned int nr_reclaimed;
@@ -2094,6 +2098,7 @@ static unsigned int reclaim_folio_list(struct list_head *folio_list,
.may_writepage = 1,
.may_unmap = 1,
.may_swap = 1,
+ .may_cache_trim_mode = 1,
.no_demotion = 1,
};

@@ -2268,7 +2273,8 @@ static void prepare_scan_control(pg_data_t *pgdat, struct scan_control *sc)
* anonymous pages.
*/
file = lruvec_page_state(target_lruvec, NR_INACTIVE_FILE);
- if (file >> sc->priority && !(sc->may_deactivate & DEACTIVATE_FILE))
+ if (file >> sc->priority && !(sc->may_deactivate & DEACTIVATE_FILE) &&
+ sc->may_cache_trim_mode)
sc->cache_trim_mode = 1;
else
sc->cache_trim_mode = 0;
@@ -5435,6 +5441,7 @@ static ssize_t lru_gen_seq_write(struct file *file, const char __user *src,
.may_writepage = true,
.may_unmap = true,
.may_swap = true,
+ .may_cache_trim_mode = 1,
.reclaim_idx = MAX_NR_ZONES - 1,
.gfp_mask = GFP_KERNEL,
};
@@ -6394,6 +6401,7 @@ unsigned long try_to_free_pages(struct zonelist *zonelist, int order,
.may_writepage = !laptop_mode,
.may_unmap = 1,
.may_swap = 1,
+ .may_cache_trim_mode = 1,
};

/*
@@ -6439,6 +6447,7 @@ unsigned long mem_cgroup_shrink_node(struct mem_cgroup *memcg,
.may_unmap = 1,
.reclaim_idx = MAX_NR_ZONES - 1,
.may_swap = !noswap,
+ .may_cache_trim_mode = 1,
};

WARN_ON_ONCE(!current->reclaim_state);
@@ -6482,6 +6491,7 @@ unsigned long try_to_free_mem_cgroup_pages(struct mem_cgroup *memcg,
.may_writepage = !laptop_mode,
.may_unmap = 1,
.may_swap = !!(reclaim_options & MEMCG_RECLAIM_MAY_SWAP),
+ .may_cache_trim_mode = 1,
.proactive = !!(reclaim_options & MEMCG_RECLAIM_PROACTIVE),
};
/*
@@ -6744,6 +6754,7 @@ static int balance_pgdat(pg_data_t *pgdat, int order, int highest_zoneidx)
.gfp_mask = GFP_KERNEL,
.order = order,
.may_unmap = 1,
+ .may_cache_trim_mode = 1,
};

set_task_reclaim_state(current, &sc.reclaim_state);
@@ -6898,8 +6909,14 @@ static int balance_pgdat(pg_data_t *pgdat, int order, int highest_zoneidx)
sc.priority--;
} while (sc.priority >= 1);

- if (!sc.nr_reclaimed)
+ if (!sc.nr_reclaimed) {
+ if (sc.may_cache_trim_mode) {
+ sc.may_cache_trim_mode = 0;
+ goto restart;
+ }
+
pgdat->kswapd_failures++;
+ }

out:
clear_reclaim_active(pgdat, highest_zoneidx);
@@ -7202,6 +7219,7 @@ unsigned long shrink_all_memory(unsigned long nr_to_reclaim)
.may_writepage = 1,
.may_unmap = 1,
.may_swap = 1,
+ .may_cache_trim_mode = 1,
.hibernation_mode = 1,
};
struct zonelist *zonelist = node_zonelist(numa_node_id(), sc.gfp_mask);
@@ -7360,6 +7378,7 @@ static int __node_reclaim(struct pglist_data *pgdat, gfp_t gfp_mask, unsigned in
.may_writepage = !!(node_reclaim_mode & RECLAIM_WRITE),
.may_unmap = !!(node_reclaim_mode & RECLAIM_UNMAP),
.may_swap = 1,
+ .may_cache_trim_mode = 1,
.reclaim_idx = gfp_zone(gfp_mask),
};
unsigned long pflags;
--
2.17.1