[PATCH v5 3/6] zswap: make shrinking memcg-aware (fix)

From: Nhat Pham
Date: Mon Nov 06 2023 - 19:32:07 EST


Rename get_mem_cgroup_from_entry() to mem_cgroup_from_entry() and use
the zswap_pools_lock for next_shrink update.

Suggested-by: Yosry Ahmed <yosryahmed@xxxxxxxxxx>
Signed-off-by: Nhat Pham <nphamcs@xxxxxxxxx>
---
mm/zswap.c | 22 +++++++++-------------
1 file changed, 9 insertions(+), 13 deletions(-)

diff --git a/mm/zswap.c b/mm/zswap.c
index 2654b0d214cc..f1998dbd3519 100644
--- a/mm/zswap.c
+++ b/mm/zswap.c
@@ -174,7 +174,6 @@ struct zswap_pool {
struct hlist_node node;
char tfm_name[CRYPTO_MAX_ALG_NAME];
struct list_lru list_lru;
- spinlock_t next_shrink_lock;
struct mem_cgroup *next_shrink;
};

@@ -292,7 +291,7 @@ static void zswap_update_total_size(void)
}

/* should be called under RCU */
-static inline struct mem_cgroup *get_mem_cgroup_from_entry(struct zswap_entry *entry)
+static inline struct mem_cgroup *mem_cgroup_from_entry(struct zswap_entry *entry)
{
return entry->objcg ? obj_cgroup_memcg(entry->objcg) : NULL;
}
@@ -309,11 +308,9 @@ void zswap_memcg_offline_cleanup(struct mem_cgroup *memcg)
/* lock out zswap pools list modification */
spin_lock(&zswap_pools_lock);
list_for_each_entry(pool, &zswap_pools, list) {
- spin_lock(&pool->next_shrink_lock);
if (pool->next_shrink == memcg)
pool->next_shrink =
mem_cgroup_iter(NULL, pool->next_shrink, NULL, true);
- spin_unlock(&pool->next_shrink_lock);
}
spin_unlock(&zswap_pools_lock);
}
@@ -360,7 +357,7 @@ static void zswap_lru_add(struct list_lru *list_lru, struct zswap_entry *entry)
* Similar reasoning holds for list_lru_del() and list_lru_putback().
*/
rcu_read_lock();
- memcg = get_mem_cgroup_from_entry(entry);
+ memcg = mem_cgroup_from_entry(entry);
/* will always succeed */
list_lru_add(list_lru, &entry->lru, nid, memcg);
rcu_read_unlock();
@@ -372,7 +369,7 @@ static void zswap_lru_del(struct list_lru *list_lru, struct zswap_entry *entry)
struct mem_cgroup *memcg;

rcu_read_lock();
- memcg = get_mem_cgroup_from_entry(entry);
+ memcg = mem_cgroup_from_entry(entry);
/* will always succeed */
list_lru_del(list_lru, &entry->lru, nid, memcg);
rcu_read_unlock();
@@ -386,7 +383,7 @@ static void zswap_lru_putback(struct list_lru *list_lru,
struct mem_cgroup *memcg;

rcu_read_lock();
- memcg = get_mem_cgroup_from_entry(entry);
+ memcg = mem_cgroup_from_entry(entry);
spin_lock(lock);
/* we cannot use list_lru_add here, because it increments node's lru count */
list_lru_putback(list_lru, &entry->lru, nid, memcg);
@@ -806,13 +803,13 @@ static void shrink_worker(struct work_struct *w)

/* global reclaim will select cgroup in a round-robin fashion. */
do {
- spin_lock(&pool->next_shrink_lock);
+ spin_lock(&zswap_pools_lock);
memcg = pool->next_shrink =
mem_cgroup_iter(NULL, pool->next_shrink, NULL, true);

/* full round trip */
if (!memcg) {
- spin_unlock(&pool->next_shrink_lock);
+ spin_unlock(&zswap_pools_lock);
if (++failures == MAX_RECLAIM_RETRIES)
break;

@@ -824,7 +821,7 @@ static void shrink_worker(struct work_struct *w)
* original reference is dropped by the zswap offlining callback.
*/
css_get(&memcg->css);
- spin_unlock(&pool->next_shrink_lock);
+ spin_unlock(&zswap_pools_lock);

ret = shrink_memcg(memcg);
mem_cgroup_put(memcg);
@@ -898,7 +895,6 @@ static struct zswap_pool *zswap_pool_create(char *type, char *compressor)
INIT_WORK(&pool->shrink_work, shrink_worker);

zswap_pool_debug("created", pool);
- spin_lock_init(&pool->next_shrink_lock);

return pool;

@@ -963,10 +959,10 @@ static void zswap_pool_destroy(struct zswap_pool *pool)
free_percpu(pool->acomp_ctx);
list_lru_destroy(&pool->list_lru);

- spin_lock(&pool->next_shrink_lock);
+ spin_lock(&zswap_pools_lock);
mem_cgroup_put(pool->next_shrink);
pool->next_shrink = NULL;
- spin_unlock(&pool->next_shrink_lock);
+ spin_unlock(&zswap_pools_lock);

for (i = 0; i < ZSWAP_NR_ZPOOLS; i++)
zpool_destroy_pool(pool->zpools[i]);
--
2.34.1