Re: [PATCH 2/3] mm: rmap: remove lock_page_memcg()

From: Hugh Dickins
Date: Tue Dec 06 2022 - 20:54:26 EST


On Tue, 6 Dec 2022, Johannes Weiner wrote:

> The previous patch made sure charge moving only touches pages for
> which page_mapped() is stable. lock_page_memcg() is no longer needed.
>
> Signed-off-by: Johannes Weiner <hannes@xxxxxxxxxxx>

Acked-by: Hugh Dickins <hughd@xxxxxxxxxx>

> ---
> mm/rmap.c | 26 ++++++++------------------
> 1 file changed, 8 insertions(+), 18 deletions(-)
>
> diff --git a/mm/rmap.c b/mm/rmap.c
> index b616870a09be..32e48b1c5847 100644
> --- a/mm/rmap.c
> +++ b/mm/rmap.c
> @@ -1222,9 +1222,6 @@ void page_add_anon_rmap(struct page *page,
> bool compound = flags & RMAP_COMPOUND;
> bool first = true;
>
> - if (unlikely(PageKsm(page)))
> - lock_page_memcg(page);
> -
> /* Is page being mapped by PTE? Is this its first map to be added? */
> if (likely(!compound)) {
> first = atomic_inc_and_test(&page->_mapcount);
> @@ -1262,15 +1259,14 @@ void page_add_anon_rmap(struct page *page,
> if (nr)
> __mod_lruvec_page_state(page, NR_ANON_MAPPED, nr);
>
> - if (unlikely(PageKsm(page)))
> - unlock_page_memcg(page);
> -
> - /* address might be in next vma when migration races vma_adjust */
> - else if (first)
> - __page_set_anon_rmap(page, vma, address,
> - !!(flags & RMAP_EXCLUSIVE));
> - else
> - __page_check_anon_rmap(page, vma, address);
> + if (likely(!PageKsm(page))) {
> + /* address might be in next vma when migration races vma_adjust */
> + if (first)
> + __page_set_anon_rmap(page, vma, address,
> + !!(flags & RMAP_EXCLUSIVE));
> + else
> + __page_check_anon_rmap(page, vma, address);
> + }
>
> mlock_vma_page(page, vma, compound);
> }
> @@ -1329,7 +1325,6 @@ void page_add_file_rmap(struct page *page,
> bool first;
>
> VM_BUG_ON_PAGE(compound && !PageTransHuge(page), page);
> - lock_page_memcg(page);
>
> /* Is page being mapped by PTE? Is this its first map to be added? */
> if (likely(!compound)) {
> @@ -1365,7 +1360,6 @@ void page_add_file_rmap(struct page *page,
> NR_SHMEM_PMDMAPPED : NR_FILE_PMDMAPPED, nr_pmdmapped);
> if (nr)
> __mod_lruvec_page_state(page, NR_FILE_MAPPED, nr);
> - unlock_page_memcg(page);
>
> mlock_vma_page(page, vma, compound);
> }
> @@ -1394,8 +1388,6 @@ void page_remove_rmap(struct page *page,
> return;
> }
>
> - lock_page_memcg(page);
> -
> /* Is page being unmapped by PTE? Is this its last map to be removed? */
> if (likely(!compound)) {
> last = atomic_add_negative(-1, &page->_mapcount);
> @@ -1451,8 +1443,6 @@ void page_remove_rmap(struct page *page,
> * and remember that it's only reliable while mapped.
> */
>
> - unlock_page_memcg(page);
> -
> munlock_vma_page(page, vma, compound);
> }
>
> --
> 2.38.1
>
>