Re: [v3 PATCH 04/11] mm: vmscan: remove memcg_shrinker_map_size

From: Kirill Tkhai
Date: Wed Jan 06 2021 - 05:16:52 EST


On 06.01.2021 01:58, Yang Shi wrote:
> Both memcg_shrinker_map_size and shrinker_nr_max is maintained, but actually the
> map size can be calculated via shrinker_nr_max, so it seems unnecessary to keep both.
> Remove memcg_shrinker_map_size since shrinker_nr_max is also used by iterating the
> bit map.
>
> Signed-off-by: Yang Shi <shy828301@xxxxxxxxx>
> ---
> mm/vmscan.c | 12 ++++--------
> 1 file changed, 4 insertions(+), 8 deletions(-)
>
> diff --git a/mm/vmscan.c b/mm/vmscan.c
> index ddb9f972f856..8da765a85569 100644
> --- a/mm/vmscan.c
> +++ b/mm/vmscan.c
> @@ -185,8 +185,7 @@ static LIST_HEAD(shrinker_list);
> static DECLARE_RWSEM(shrinker_rwsem);
>
> #ifdef CONFIG_MEMCG
> -
> -static int memcg_shrinker_map_size;
> +static int shrinker_nr_max;
>
> static void memcg_free_shrinker_map_rcu(struct rcu_head *head)
> {
> @@ -248,7 +247,7 @@ int memcg_alloc_shrinker_maps(struct mem_cgroup *memcg)
> return 0;
>
> down_read(&shrinker_rwsem);
> - size = memcg_shrinker_map_size;
> + size = DIV_ROUND_UP(shrinker_nr_max, BITS_PER_LONG) * sizeof(unsigned long);
> for_each_node(nid) {
> map = kvzalloc(sizeof(*map) + size, GFP_KERNEL);
> if (!map) {
> @@ -269,7 +268,7 @@ static int memcg_expand_shrinker_maps(int new_id)
> struct mem_cgroup *memcg;
>
> size = DIV_ROUND_UP(new_id + 1, BITS_PER_LONG) * sizeof(unsigned long);
> - old_size = memcg_shrinker_map_size;
> + old_size = DIV_ROUND_UP(shrinker_nr_max, BITS_PER_LONG) * sizeof(unsigned long);
> if (size <= old_size)
> return 0;

These bunch of DIV_ROUND_UP() looks too complex. Since now all the shrinker maps allocation
logic in the only file, can't we simplify this to look better? I mean something like below
to merge in your patch:

diff --git a/mm/vmscan.c b/mm/vmscan.c
index b951c289ef3a..27b6371a1656 100644
--- a/mm/vmscan.c
+++ b/mm/vmscan.c
@@ -247,7 +247,7 @@ int memcg_alloc_shrinker_maps(struct mem_cgroup *memcg)
return 0;

down_read(&shrinker_rwsem);
- size = DIV_ROUND_UP(shrinker_nr_max, BITS_PER_LONG) * sizeof(unsigned long);
+ size = shrinker_nr_max / BITS_PER_BYTE;
for_each_node(nid) {
map = kvzalloc(sizeof(*map) + size, GFP_KERNEL);
if (!map) {
@@ -264,13 +264,11 @@ int memcg_alloc_shrinker_maps(struct mem_cgroup *memcg)

static int memcg_expand_shrinker_maps(int new_id)
{
- int size, old_size, ret = 0;
+ int size, old_size, new_nr_max, ret = 0;
struct mem_cgroup *memcg;

size = DIV_ROUND_UP(new_id + 1, BITS_PER_LONG) * sizeof(unsigned long);
- old_size = DIV_ROUND_UP(shrinker_nr_max, BITS_PER_LONG) * sizeof(unsigned long);
- if (size <= old_size)
- return 0;
+ new_nr_max = size * BITS_PER_BYTE;

if (!root_mem_cgroup)
goto out;
@@ -287,6 +285,9 @@ static int memcg_expand_shrinker_maps(int new_id)
} while ((memcg = mem_cgroup_iter(NULL, memcg, NULL)) != NULL);

out:
+ if (ret == 0)
+ shrinker_nr_max = new_nr_max;
+
return ret;
}

@@ -334,8 +335,6 @@ static int prealloc_memcg_shrinker(struct shrinker *shrinker)
idr_remove(&shrinker_idr, id);
goto unlock;
}
-
- shrinker_nr_max = id + 1;
}
shrinker->id = id;
ret = 0;