[v2 PATCH 2/9] mm: memcontrol: use shrinker_rwsem to protect shrinker_maps allocation

From: Yang Shi
Date: Mon Dec 14 2020 - 17:39:06 EST


Since memcg_shrinker_map_size just can be changd under holding shrinker_rwsem
exclusively, the read side can be protected by holding read lock, so it sounds
superfluous to have a dedicated mutex. This should not exacerbate the contention
to shrinker_rwsem since just one read side critical section is added.

Signed-off-by: Yang Shi <shy828301@xxxxxxxxx>
---
mm/internal.h | 1 +
mm/memcontrol.c | 17 +++++++----------
mm/vmscan.c | 2 +-
3 files changed, 9 insertions(+), 11 deletions(-)

diff --git a/mm/internal.h b/mm/internal.h
index c43ccdddb0f6..10c79d199aaa 100644
--- a/mm/internal.h
+++ b/mm/internal.h
@@ -108,6 +108,7 @@ extern unsigned long highest_memmap_pfn;
/*
* in mm/vmscan.c:
*/
+extern struct rw_semaphore shrinker_rwsem;
extern int isolate_lru_page(struct page *page);
extern void putback_lru_page(struct page *page);

diff --git a/mm/memcontrol.c b/mm/memcontrol.c
index 29459a6ce1c7..ed942734235f 100644
--- a/mm/memcontrol.c
+++ b/mm/memcontrol.c
@@ -394,8 +394,8 @@ DEFINE_STATIC_KEY_FALSE(memcg_kmem_enabled_key);
EXPORT_SYMBOL(memcg_kmem_enabled_key);
#endif

+/* It is only can be changed with holding shrinker_rwsem exclusively */
static int memcg_shrinker_map_size;
-static DEFINE_MUTEX(memcg_shrinker_map_mutex);

static void memcg_free_shrinker_map_rcu(struct rcu_head *head)
{
@@ -408,8 +408,6 @@ static int memcg_expand_one_shrinker_map(struct mem_cgroup *memcg,
struct memcg_shrinker_map *new, *old;
int nid;

- lockdep_assert_held(&memcg_shrinker_map_mutex);
-
for_each_node(nid) {
old = rcu_dereference_protected(
mem_cgroup_nodeinfo(memcg, nid)->shrinker_map, true);
@@ -458,7 +456,7 @@ static int memcg_alloc_shrinker_maps(struct mem_cgroup *memcg)
if (mem_cgroup_is_root(memcg))
return 0;

- mutex_lock(&memcg_shrinker_map_mutex);
+ down_read(&shrinker_rwsem);
size = memcg_shrinker_map_size;
for_each_node(nid) {
map = kvzalloc_node(sizeof(*map) + size, GFP_KERNEL, nid);
@@ -469,7 +467,7 @@ static int memcg_alloc_shrinker_maps(struct mem_cgroup *memcg)
}
rcu_assign_pointer(memcg->nodeinfo[nid]->shrinker_map, map);
}
- mutex_unlock(&memcg_shrinker_map_mutex);
+ up_read(&shrinker_rwsem);

return ret;
}
@@ -484,9 +482,8 @@ int memcg_expand_shrinker_maps(int new_id)
if (size <= old_size)
return 0;

- mutex_lock(&memcg_shrinker_map_mutex);
if (!root_mem_cgroup)
- goto unlock;
+ goto out;

for_each_mem_cgroup(memcg) {
if (mem_cgroup_is_root(memcg))
@@ -494,13 +491,13 @@ int memcg_expand_shrinker_maps(int new_id)
ret = memcg_expand_one_shrinker_map(memcg, size, old_size);
if (ret) {
mem_cgroup_iter_break(NULL, memcg);
- goto unlock;
+ goto out;
}
}
-unlock:
+out:
if (!ret)
memcg_shrinker_map_size = size;
- mutex_unlock(&memcg_shrinker_map_mutex);
+
return ret;
}

diff --git a/mm/vmscan.c b/mm/vmscan.c
index 48c06c48b97e..912c044301dd 100644
--- a/mm/vmscan.c
+++ b/mm/vmscan.c
@@ -184,7 +184,7 @@ static void set_task_reclaim_state(struct task_struct *task,
}

static LIST_HEAD(shrinker_list);
-static DECLARE_RWSEM(shrinker_rwsem);
+DECLARE_RWSEM(shrinker_rwsem);

#ifdef CONFIG_MEMCG
/*
--
2.26.2