Re: [PATCH v6 08/18] arm64/mm: Convert set_pte_at() to set_ptes(..., 1)

From: Mark Rutland
Date: Thu Feb 15 2024 - 06:20:28 EST


On Thu, Feb 15, 2024 at 10:31:55AM +0000, Ryan Roberts wrote:
> Since set_ptes() was introduced, set_pte_at() has been implemented as a
> generic macro around set_ptes(..., 1). So this change should continue to
> generate the same code. However, making this change prepares us for the
> transparent contpte support. It means we can reroute set_ptes() to
> __set_ptes(). Since set_pte_at() is a generic macro, there will be no
> equivalent __set_pte_at() to reroute to.
>
> Note that a couple of calls to set_pte_at() remain in the arch code.
> This is intentional, since those call sites are acting on behalf of
> core-mm and should continue to call into the public set_ptes() rather
> than the arch-private __set_ptes().
>
> Tested-by: John Hubbard <jhubbard@xxxxxxxxxx>
> Signed-off-by: Ryan Roberts <ryan.roberts@xxxxxxx>

Acked-by: Mark Rutland <mark.rutland@xxxxxxx>

Mark.

> ---
> arch/arm64/include/asm/pgtable.h | 2 +-
> arch/arm64/kernel/mte.c | 2 +-
> arch/arm64/kvm/guest.c | 2 +-
> arch/arm64/mm/fault.c | 2 +-
> arch/arm64/mm/hugetlbpage.c | 10 +++++-----
> 5 files changed, 9 insertions(+), 9 deletions(-)
>
> diff --git a/arch/arm64/include/asm/pgtable.h b/arch/arm64/include/asm/pgtable.h
> index de034ca40bad..9a2df85eb493 100644
> --- a/arch/arm64/include/asm/pgtable.h
> +++ b/arch/arm64/include/asm/pgtable.h
> @@ -1084,7 +1084,7 @@ static inline void arch_swap_restore(swp_entry_t entry, struct folio *folio)
> #endif /* CONFIG_ARM64_MTE */
>
> /*
> - * On AArch64, the cache coherency is handled via the set_pte_at() function.
> + * On AArch64, the cache coherency is handled via the set_ptes() function.
> */
> static inline void update_mmu_cache_range(struct vm_fault *vmf,
> struct vm_area_struct *vma, unsigned long addr, pte_t *ptep,
> diff --git a/arch/arm64/kernel/mte.c b/arch/arm64/kernel/mte.c
> index a41ef3213e1e..59bfe2e96f8f 100644
> --- a/arch/arm64/kernel/mte.c
> +++ b/arch/arm64/kernel/mte.c
> @@ -67,7 +67,7 @@ int memcmp_pages(struct page *page1, struct page *page2)
> /*
> * If the page content is identical but at least one of the pages is
> * tagged, return non-zero to avoid KSM merging. If only one of the
> - * pages is tagged, set_pte_at() may zero or change the tags of the
> + * pages is tagged, set_ptes() may zero or change the tags of the
> * other page via mte_sync_tags().
> */
> if (page_mte_tagged(page1) || page_mte_tagged(page2))
> diff --git a/arch/arm64/kvm/guest.c b/arch/arm64/kvm/guest.c
> index aaf1d4939739..6e0df623c8e9 100644
> --- a/arch/arm64/kvm/guest.c
> +++ b/arch/arm64/kvm/guest.c
> @@ -1072,7 +1072,7 @@ int kvm_vm_ioctl_mte_copy_tags(struct kvm *kvm,
> } else {
> /*
> * Only locking to serialise with a concurrent
> - * set_pte_at() in the VMM but still overriding the
> + * set_ptes() in the VMM but still overriding the
> * tags, hence ignoring the return value.
> */
> try_page_mte_tagging(page);
> diff --git a/arch/arm64/mm/fault.c b/arch/arm64/mm/fault.c
> index a254761fa1bd..3235e23309ec 100644
> --- a/arch/arm64/mm/fault.c
> +++ b/arch/arm64/mm/fault.c
> @@ -205,7 +205,7 @@ static void show_pte(unsigned long addr)
> *
> * It needs to cope with hardware update of the accessed/dirty state by other
> * agents in the system and can safely skip the __sync_icache_dcache() call as,
> - * like set_pte_at(), the PTE is never changed from no-exec to exec here.
> + * like set_ptes(), the PTE is never changed from no-exec to exec here.
> *
> * Returns whether or not the PTE actually changed.
> */
> diff --git a/arch/arm64/mm/hugetlbpage.c b/arch/arm64/mm/hugetlbpage.c
> index 2892f925ed66..27f6160890d1 100644
> --- a/arch/arm64/mm/hugetlbpage.c
> +++ b/arch/arm64/mm/hugetlbpage.c
> @@ -247,12 +247,12 @@ void set_huge_pte_at(struct mm_struct *mm, unsigned long addr,
>
> if (!pte_present(pte)) {
> for (i = 0; i < ncontig; i++, ptep++, addr += pgsize)
> - set_pte_at(mm, addr, ptep, pte);
> + set_ptes(mm, addr, ptep, pte, 1);
> return;
> }
>
> if (!pte_cont(pte)) {
> - set_pte_at(mm, addr, ptep, pte);
> + set_ptes(mm, addr, ptep, pte, 1);
> return;
> }
>
> @@ -263,7 +263,7 @@ void set_huge_pte_at(struct mm_struct *mm, unsigned long addr,
> clear_flush(mm, addr, ptep, pgsize, ncontig);
>
> for (i = 0; i < ncontig; i++, ptep++, addr += pgsize, pfn += dpfn)
> - set_pte_at(mm, addr, ptep, pfn_pte(pfn, hugeprot));
> + set_ptes(mm, addr, ptep, pfn_pte(pfn, hugeprot), 1);
> }
>
> pte_t *huge_pte_alloc(struct mm_struct *mm, struct vm_area_struct *vma,
> @@ -471,7 +471,7 @@ int huge_ptep_set_access_flags(struct vm_area_struct *vma,
>
> hugeprot = pte_pgprot(pte);
> for (i = 0; i < ncontig; i++, ptep++, addr += pgsize, pfn += dpfn)
> - set_pte_at(mm, addr, ptep, pfn_pte(pfn, hugeprot));
> + set_ptes(mm, addr, ptep, pfn_pte(pfn, hugeprot), 1);
>
> return 1;
> }
> @@ -500,7 +500,7 @@ void huge_ptep_set_wrprotect(struct mm_struct *mm,
> pfn = pte_pfn(pte);
>
> for (i = 0; i < ncontig; i++, ptep++, addr += pgsize, pfn += dpfn)
> - set_pte_at(mm, addr, ptep, pfn_pte(pfn, hugeprot));
> + set_ptes(mm, addr, ptep, pfn_pte(pfn, hugeprot), 1);
> }
>
> pte_t huge_ptep_clear_flush(struct vm_area_struct *vma,
> --
> 2.25.1
>