[PATCH mm-unstable v1 3/5] mm/mglru: improve reset_mm_stats()

From: Kinsey Ho
Date: Wed Feb 14 2024 - 01:06:48 EST


struct lruvec* is already a field of struct lru_gen_mm_walk.
Remove the parameter struct lruvec* into functions that already have
access to struct lru_gen_mm_walk*.

Also, we do not need to handle reset histogram stats when
!should_walk_mmu(). Remove the call to reset_mm_stats()
in iterate_mm_list_nowalk().

Signed-off-by: Kinsey Ho <kinseyho@xxxxxxxxxx>
---
mm/vmscan.c | 42 ++++++++++++++++++++++--------------------
1 file changed, 22 insertions(+), 20 deletions(-)

diff --git a/mm/vmscan.c b/mm/vmscan.c
index 87de39e7410b..8c77c332a67a 100644
--- a/mm/vmscan.c
+++ b/mm/vmscan.c
@@ -2897,38 +2897,37 @@ static struct mm_struct *get_next_mm(struct lru_gen_mm_walk *walk)

#endif

-static void reset_mm_stats(struct lruvec *lruvec, struct lru_gen_mm_walk *walk, bool last)
+static void reset_mm_stats(struct lru_gen_mm_walk *walk, bool last)
{
int i;
int hist;
+ struct lruvec *lruvec = walk->lruvec;
struct lru_gen_mm_state *mm_state = get_mm_state(lruvec);

lockdep_assert_held(&get_mm_list(lruvec_memcg(lruvec))->lock);

- if (walk) {
- hist = lru_hist_from_seq(walk->max_seq);
+ hist = lru_hist_from_seq(walk->max_seq);

- for (i = 0; i < NR_MM_STATS; i++) {
- WRITE_ONCE(mm_state->stats[hist][i],
- mm_state->stats[hist][i] + walk->mm_stats[i]);
- walk->mm_stats[i] = 0;
- }
+ for (i = 0; i < NR_MM_STATS; i++) {
+ WRITE_ONCE(mm_state->stats[hist][i],
+ mm_state->stats[hist][i] + walk->mm_stats[i]);
+ walk->mm_stats[i] = 0;
}

if (NR_HIST_GENS > 1 && last) {
- hist = lru_hist_from_seq(mm_state->seq + 1);
+ hist = lru_hist_from_seq(walk->max_seq + 1);

for (i = 0; i < NR_MM_STATS; i++)
WRITE_ONCE(mm_state->stats[hist][i], 0);
}
}

-static bool iterate_mm_list(struct lruvec *lruvec, struct lru_gen_mm_walk *walk,
- struct mm_struct **iter)
+static bool iterate_mm_list(struct lru_gen_mm_walk *walk, struct mm_struct **iter)
{
bool first = false;
bool last = false;
struct mm_struct *mm = NULL;
+ struct lruvec *lruvec = walk->lruvec;
struct mem_cgroup *memcg = lruvec_memcg(lruvec);
struct lru_gen_mm_list *mm_list = get_mm_list(memcg);
struct lru_gen_mm_state *mm_state = get_mm_state(lruvec);
@@ -2972,7 +2971,7 @@ static bool iterate_mm_list(struct lruvec *lruvec, struct lru_gen_mm_walk *walk,
} while (!(mm = get_next_mm(walk)));
done:
if (*iter || last)
- reset_mm_stats(lruvec, walk, last);
+ reset_mm_stats(walk, last);

spin_unlock(&mm_list->lock);

@@ -3002,7 +3001,6 @@ static bool iterate_mm_list_nowalk(struct lruvec *lruvec, unsigned long max_seq)
mm_state->head = NULL;
mm_state->tail = NULL;
WRITE_ONCE(mm_state->seq, mm_state->seq + 1);
- reset_mm_stats(lruvec, NULL, true);
success = true;
}

@@ -3177,9 +3175,10 @@ static void update_batch_size(struct lru_gen_mm_walk *walk, struct folio *folio,
walk->nr_pages[new_gen][type][zone] += delta;
}

-static void reset_batch_size(struct lruvec *lruvec, struct lru_gen_mm_walk *walk)
+static void reset_batch_size(struct lru_gen_mm_walk *walk)
{
int gen, type, zone;
+ struct lruvec *lruvec = walk->lruvec;
struct lru_gen_folio *lrugen = &lruvec->lrugen;

walk->batched = 0;
@@ -3609,7 +3608,7 @@ static int walk_pud_range(p4d_t *p4d, unsigned long start, unsigned long end,
return -EAGAIN;
}

-static void walk_mm(struct lruvec *lruvec, struct mm_struct *mm, struct lru_gen_mm_walk *walk)
+static void walk_mm(struct mm_struct *mm, struct lru_gen_mm_walk *walk)
{
static const struct mm_walk_ops mm_walk_ops = {
.test_walk = should_skip_vma,
@@ -3618,6 +3617,7 @@ static void walk_mm(struct lruvec *lruvec, struct mm_struct *mm, struct lru_gen_
};

int err;
+ struct lruvec *lruvec = walk->lruvec;
struct mem_cgroup *memcg = lruvec_memcg(lruvec);

walk->next_addr = FIRST_USER_ADDRESS;
@@ -3646,7 +3646,7 @@ static void walk_mm(struct lruvec *lruvec, struct mm_struct *mm, struct lru_gen_

if (walk->batched) {
spin_lock_irq(&lruvec->lru_lock);
- reset_batch_size(lruvec, walk);
+ reset_batch_size(walk);
spin_unlock_irq(&lruvec->lru_lock);
}

@@ -3874,9 +3874,9 @@ static bool try_to_inc_max_seq(struct lruvec *lruvec, unsigned long max_seq,
walk->force_scan = force_scan;

do {
- success = iterate_mm_list(lruvec, walk, &mm);
+ success = iterate_mm_list(walk, &mm);
if (mm)
- walk_mm(lruvec, mm, walk);
+ walk_mm(mm, walk);
} while (mm);
done:
if (success) {
@@ -4576,8 +4576,10 @@ static int evict_folios(struct lruvec *lruvec, struct scan_control *sc, int swap
move_folios_to_lru(lruvec, &list);

walk = current->reclaim_state->mm_walk;
- if (walk && walk->batched)
- reset_batch_size(lruvec, walk);
+ if (walk && walk->batched) {
+ walk->lruvec = lruvec;
+ reset_batch_size(walk);
+ }

item = PGSTEAL_KSWAPD + reclaimer_offset();
if (!cgroup_reclaim(sc))
--
2.43.0.687.g38aa6559b0-goog