[PATCH v7 08/12] mm: multigenerational LRU: optimize multiple memcgs

From: Yu Zhao
Date: Tue Feb 08 2022 - 03:20:16 EST


When multiple memcgs are available, it's possible to improve the
overall performance under global memory pressure by making better
choices based on generations and tiers. This patch adds a rudimentary
optimization to select memcgs that can drop single-use unmapped clean
pages first, and thus it reduces the chance of going into the aging
path or swapping, which can be costly. Its goal is to improve the
overall performance when there are mixed types of workloads, e.g.,
heavy anon workload in one memcg and heavy buffered I/O workload in
the other.

Though this optimization can be applied to both kswapd and direct
reclaim, it's only added to kswapd to keep the patchset manageable.
Later improvements will cover the direct reclaim path.

Server benchmark results:
Mixed workloads:
fio (buffered I/O): -[28, 30]%
IOPS BW
patch1-7: 3117k 11.9GiB/s
patch1-8: 2217k 8661MiB/s

memcached (anon): +[247, 251]%
Ops/sec KB/sec
patch1-7: 563772.35 21900.01
patch1-8: 1968343.76 76461.24

Mixed workloads:
fio (buffered I/O): -[4, 6]%
IOPS BW
5.17-rc2: 2338k 9133MiB/s
patch1-8: 2217k 8661MiB/s

memcached (anon): +[524, 530]%
Ops/sec KB/sec
5.17-rc2: 313821.65 12190.55
patch1-8: 1968343.76 76461.24

Configurations:
(changes since patch 5)

cat combined.sh
modprobe brd rd_nr=2 rd_size=56623104

swapoff -a
mkswap /dev/ram0
swapon /dev/ram0

mkfs.ext4 /dev/ram1
mount -t ext4 /dev/ram1 /mnt

memtier_benchmark -S /var/run/memcached/memcached.sock \
-P memcache_binary -n allkeys --key-minimum=1 \
--key-maximum=50000000 --key-pattern=P:P -c 1 -t 36 \
--ratio 1:0 --pipeline 8 -d 2000

fio -name=mglru --numjobs=36 --directory=/mnt --size=1408m \
--buffered=1 --ioengine=io_uring --iodepth=128 \
--iodepth_batch_submit=32 --iodepth_batch_complete=32 \
--rw=randread --random_distribution=random --norandommap \
--time_based --ramp_time=10m --runtime=90m --group_reporting &
pid=$!

sleep 200

memtier_benchmark -S /var/run/memcached/memcached.sock \
-P memcache_binary -n allkeys --key-minimum=1 \
--key-maximum=50000000 --key-pattern=R:R -c 1 -t 36 \
--ratio 0:1 --pipeline 8 --randomize --distinct-client-seed

kill -INT $pid
wait

Client benchmark results:
no change (CONFIG_MEMCG=n)

Signed-off-by: Yu Zhao <yuzhao@xxxxxxxxxx>
Acked-by: Brian Geffon <bgeffon@xxxxxxxxxx>
Acked-by: Jan Alexander Steffens (heftig) <heftig@xxxxxxxxxxxxx>
Acked-by: Oleksandr Natalenko <oleksandr@xxxxxxxxxxxxxx>
Acked-by: Steven Barrett <steven@xxxxxxxxxxxx>
Acked-by: Suleiman Souhlal <suleiman@xxxxxxxxxx>
Tested-by: Daniel Byrne <djbyrne@xxxxxxx>
Tested-by: Donald Carr <d@xxxxxxxxxxxxxxx>
Tested-by: Holger Hoffstätte <holger@xxxxxxxxxxxxxxxxxxxxxx>
Tested-by: Konstantin Kharlamov <Hi-Angel@xxxxxxxxx>
Tested-by: Shuang Zhai <szhai2@xxxxxxxxxxxxxxxx>
Tested-by: Sofia Trinh <sofia.trinh@edi.works>
---
mm/vmscan.c | 45 +++++++++++++++++++++++++++++++++++++++++----
1 file changed, 41 insertions(+), 4 deletions(-)

diff --git a/mm/vmscan.c b/mm/vmscan.c
index 5ab6cd332fcc..fc09b6c10624 100644
--- a/mm/vmscan.c
+++ b/mm/vmscan.c
@@ -127,6 +127,13 @@ struct scan_control {
/* Always discard instead of demoting to lower tier memory */
unsigned int no_demotion:1;

+#ifdef CONFIG_LRU_GEN
+ /* help make better choices when multiple memcgs are available */
+ unsigned int memcgs_need_aging:1;
+ unsigned int memcgs_need_swapping:1;
+ unsigned int memcgs_avoid_swapping:1;
+#endif
+
/* Allocation order */
s8 order;

@@ -4343,6 +4350,22 @@ static void lru_gen_age_node(struct pglist_data *pgdat, struct scan_control *sc)

VM_BUG_ON(!current_is_kswapd());

+ /*
+ * To reduce the chance of going into the aging path or swapping, which
+ * can be costly, optimistically skip them unless their corresponding
+ * flags were cleared in the eviction path. This improves the overall
+ * performance when multiple memcgs are available.
+ */
+ if (!sc->memcgs_need_aging) {
+ sc->memcgs_need_aging = true;
+ sc->memcgs_avoid_swapping = !sc->memcgs_need_swapping;
+ sc->memcgs_need_swapping = true;
+ return;
+ }
+
+ sc->memcgs_need_swapping = true;
+ sc->memcgs_avoid_swapping = true;
+
current->reclaim_state->mm_walk = &pgdat->mm_walk;

memcg = mem_cgroup_iter(NULL, NULL, NULL);
@@ -4745,7 +4768,8 @@ static int isolate_folios(struct lruvec *lruvec, struct scan_control *sc, int sw
return scanned;
}

-static int evict_folios(struct lruvec *lruvec, struct scan_control *sc, int swappiness)
+static int evict_folios(struct lruvec *lruvec, struct scan_control *sc, int swappiness,
+ bool *swapped)
{
int type;
int scanned;
@@ -4810,6 +4834,9 @@ static int evict_folios(struct lruvec *lruvec, struct scan_control *sc, int swap

sc->nr_reclaimed += reclaimed;

+ if (!type && swapped)
+ *swapped = true;
+
return scanned;
}

@@ -4838,8 +4865,10 @@ static long get_nr_to_scan(struct lruvec *lruvec, struct scan_control *sc, bool
if (!nr_to_scan)
return 0;

- if (!need_aging)
+ if (!need_aging) {
+ sc->memcgs_need_aging = false;
return nr_to_scan;
+ }

/* leave the work to lru_gen_age_node() */
if (current_is_kswapd())
@@ -4861,6 +4890,8 @@ static void lru_gen_shrink_lruvec(struct lruvec *lruvec, struct scan_control *sc
{
struct blk_plug plug;
long scanned = 0;
+ bool swapped = false;
+ unsigned long reclaimed = sc->nr_reclaimed;
struct mem_cgroup *memcg = lruvec_memcg(lruvec);
struct pglist_data *pgdat = lruvec_pgdat(lruvec);

@@ -4887,13 +4918,19 @@ static void lru_gen_shrink_lruvec(struct lruvec *lruvec, struct scan_control *sc
if (!nr_to_scan)
break;

- delta = evict_folios(lruvec, sc, swappiness);
+ delta = evict_folios(lruvec, sc, swappiness, &swapped);
if (!delta)
break;

+ if (sc->memcgs_avoid_swapping && swappiness < 200 && swapped)
+ break;
+
scanned += delta;
- if (scanned >= nr_to_scan)
+ if (scanned >= nr_to_scan) {
+ if (!swapped && sc->nr_reclaimed - reclaimed >= MIN_LRU_BATCH)
+ sc->memcgs_need_swapping = false;
break;
+ }

cond_resched();
}
--
2.35.0.263.gb82422642f-goog