Re: [PATCH v5 22/38] riscv: Implement the new page table range API

From: Palmer Dabbelt
Date: Wed Jul 12 2023 - 09:59:06 EST


On Mon, 10 Jul 2023 13:43:23 PDT (-0700), willy@xxxxxxxxxxxxx wrote:
Add set_ptes(), update_mmu_cache_range() and flush_dcache_folio().
Change the PG_dcache_clean flag from being per-page to per-folio.

Signed-off-by: Matthew Wilcox (Oracle) <willy@xxxxxxxxxxxxx>
Reviewed-by: Alexandre Ghiti <alexghiti@xxxxxxxxxxxx>
Acked-by: Mike Rapoport (IBM) <rppt@xxxxxxxxxx>
Cc: Paul Walmsley <paul.walmsley@xxxxxxxxxx>
Cc: Palmer Dabbelt <palmer@xxxxxxxxxxx>
Cc: Albert Ou <aou@xxxxxxxxxxxxxxxxx>
Cc: linux-riscv@xxxxxxxxxxxxxxxxxxx
---
arch/riscv/include/asm/cacheflush.h | 19 +++++++--------
arch/riscv/include/asm/pgtable.h | 38 +++++++++++++++++++----------
arch/riscv/mm/cacheflush.c | 13 +++-------
3 files changed, 37 insertions(+), 33 deletions(-)

diff --git a/arch/riscv/include/asm/cacheflush.h b/arch/riscv/include/asm/cacheflush.h
index 8091b8bf4883..0d8c92c5dfb7 100644
--- a/arch/riscv/include/asm/cacheflush.h
+++ b/arch/riscv/include/asm/cacheflush.h
@@ -15,20 +15,19 @@ static inline void local_flush_icache_all(void)

#define PG_dcache_clean PG_arch_1

-static inline void flush_dcache_page(struct page *page)
+static inline void flush_dcache_folio(struct folio *folio)
{
- /*
- * HugeTLB pages are always fully mapped and only head page will be
- * set PG_dcache_clean (see comments in flush_icache_pte()).
- */
- if (PageHuge(page))
- page = compound_head(page);
-
- if (test_bit(PG_dcache_clean, &page->flags))
- clear_bit(PG_dcache_clean, &page->flags);
+ if (test_bit(PG_dcache_clean, &folio->flags))
+ clear_bit(PG_dcache_clean, &folio->flags);
}
+#define flush_dcache_folio flush_dcache_folio
#define ARCH_IMPLEMENTS_FLUSH_DCACHE_PAGE 1

+static inline void flush_dcache_page(struct page *page)
+{
+ flush_dcache_folio(page_folio(page));
+}
+
/*
* RISC-V doesn't have an instruction to flush parts of the instruction cache,
* so instead we just flush the whole thing.
diff --git a/arch/riscv/include/asm/pgtable.h b/arch/riscv/include/asm/pgtable.h
index 2137e36595b3..c8f897ed5fd0 100644
--- a/arch/riscv/include/asm/pgtable.h
+++ b/arch/riscv/include/asm/pgtable.h
@@ -445,8 +445,9 @@ static inline pte_t pte_modify(pte_t pte, pgprot_t newprot)


/* Commit new configuration to MMU hardware */
-static inline void update_mmu_cache(struct vm_area_struct *vma,
- unsigned long address, pte_t *ptep)
+static inline void update_mmu_cache_range(struct vm_fault *vmf,
+ struct vm_area_struct *vma, unsigned long address,
+ pte_t *ptep, unsigned int nr)
{
/*
* The kernel assumes that TLBs don't cache invalid entries, but
@@ -455,8 +456,11 @@ static inline void update_mmu_cache(struct vm_area_struct *vma,
* Relying on flush_tlb_fix_spurious_fault would suffice, but
* the extra traps reduce performance. So, eagerly SFENCE.VMA.
*/
- local_flush_tlb_page(address);
+ while (nr--)
+ local_flush_tlb_page(address + nr * PAGE_SIZE);
}
+#define update_mmu_cache(vma, addr, ptep) \
+ update_mmu_cache_range(NULL, vma, addr, ptep, 1)

#define __HAVE_ARCH_UPDATE_MMU_TLB
#define update_mmu_tlb update_mmu_cache
@@ -487,8 +491,7 @@ static inline void set_pte(pte_t *ptep, pte_t pteval)

void flush_icache_pte(pte_t pte);

-static inline void __set_pte_at(struct mm_struct *mm,
- unsigned long addr, pte_t *ptep, pte_t pteval)
+static inline void __set_pte_at(pte_t *ptep, pte_t pteval)
{
if (pte_present(pteval) && pte_exec(pteval))
flush_icache_pte(pteval);
@@ -496,17 +499,26 @@ static inline void __set_pte_at(struct mm_struct *mm,
set_pte(ptep, pteval);
}

-static inline void set_pte_at(struct mm_struct *mm,
- unsigned long addr, pte_t *ptep, pte_t pteval)
+static inline void set_ptes(struct mm_struct *mm, unsigned long addr,
+ pte_t *ptep, pte_t pteval, unsigned int nr)
{
- page_table_check_ptes_set(mm, addr, ptep, pteval, 1);
- __set_pte_at(mm, addr, ptep, pteval);
+ page_table_check_ptes_set(mm, addr, ptep, pteval, nr);
+
+ for (;;) {
+ __set_pte_at(ptep, pteval);
+ if (--nr == 0)
+ break;
+ ptep++;
+ addr += PAGE_SIZE;
+ pte_val(pteval) += 1 << _PAGE_PFN_SHIFT;
+ }
}
+#define set_ptes set_ptes

static inline void pte_clear(struct mm_struct *mm,
unsigned long addr, pte_t *ptep)
{
- __set_pte_at(mm, addr, ptep, __pte(0));
+ __set_pte_at(ptep, __pte(0));
}

#define __HAVE_ARCH_PTEP_SET_ACCESS_FLAGS
@@ -515,7 +527,7 @@ static inline int ptep_set_access_flags(struct vm_area_struct *vma,
pte_t entry, int dirty)
{
if (!pte_same(*ptep, entry))
- set_pte_at(vma->vm_mm, address, ptep, entry);
+ __set_pte_at(ptep, entry);
/*
* update_mmu_cache will unconditionally execute, handling both
* the case that the PTE changed and the spurious fault case.
@@ -688,14 +700,14 @@ static inline void set_pmd_at(struct mm_struct *mm, unsigned long addr,
pmd_t *pmdp, pmd_t pmd)
{
page_table_check_pmd_set(mm, addr, pmdp, pmd);
- return __set_pte_at(mm, addr, (pte_t *)pmdp, pmd_pte(pmd));
+ return __set_pte_at((pte_t *)pmdp, pmd_pte(pmd));
}

static inline void set_pud_at(struct mm_struct *mm, unsigned long addr,
pud_t *pudp, pud_t pud)
{
page_table_check_pud_set(mm, addr, pudp, pud);
- return __set_pte_at(mm, addr, (pte_t *)pudp, pud_pte(pud));
+ return __set_pte_at((pte_t *)pudp, pud_pte(pud));
}

#ifdef CONFIG_PAGE_TABLE_CHECK
diff --git a/arch/riscv/mm/cacheflush.c b/arch/riscv/mm/cacheflush.c
index fbc59b3f69f2..f1387272a551 100644
--- a/arch/riscv/mm/cacheflush.c
+++ b/arch/riscv/mm/cacheflush.c
@@ -82,18 +82,11 @@ void flush_icache_mm(struct mm_struct *mm, bool local)
#ifdef CONFIG_MMU
void flush_icache_pte(pte_t pte)
{
- struct page *page = pte_page(pte);
+ struct folio *folio = page_folio(pte_page(pte));

- /*
- * HugeTLB pages are always fully mapped, so only setting head page's
- * PG_dcache_clean flag is enough.
- */
- if (PageHuge(page))
- page = compound_head(page);
-
- if (!test_bit(PG_dcache_clean, &page->flags)) {
+ if (!test_bit(PG_dcache_clean, &folio->flags)) {
flush_icache_all();
- set_bit(PG_dcache_clean, &page->flags);
+ set_bit(PG_dcache_clean, &folio->flags);
}
}
#endif /* CONFIG_MMU */

Sorry I missed this earlier. IIRC it ended up somewhere, but

Acked-by: Palmer Dabbelt <palmer@xxxxxxxxxxxx>

anyway. Thanks!