[PATCH RFC] riscv: Enable pud vmap support for Sv48

From: Liu Shixin
Date: Sat Jun 05 2021 - 01:17:20 EST


Enable pud vmap support and define the required page table functions
for Sv48.

Signed-off-by: Liu Shixin <liushixin2@xxxxxxxxxx>
---
As riscv doesn't have Sv48 support currently, I test pud vmap based on Alex's
patch "riscv: Implement sv48 support".

arch/riscv/include/asm/pgtable-64.h | 10 ++++++++
arch/riscv/include/asm/vmalloc.h | 8 +++++-
arch/riscv/mm/pgtable.c | 39 +++++++++++++++++++++++++++++
3 files changed, 56 insertions(+), 1 deletion(-)

diff --git a/arch/riscv/include/asm/pgtable-64.h b/arch/riscv/include/asm/pgtable-64.h
index e3b7c5dd6a80..8920321770c7 100644
--- a/arch/riscv/include/asm/pgtable-64.h
+++ b/arch/riscv/include/asm/pgtable-64.h
@@ -69,6 +69,16 @@ static inline struct page *pud_page(pud_t pud)
return pfn_to_page(pud_val(pud) >> _PAGE_PFN_SHIFT);
}

+static inline pud_t pfn_pud(unsigned long pfn, pgprot_t prot)
+{
+ return __pud((pfn << _PAGE_PFN_SHIFT) | pgprot_val(prot));
+}
+
+static inline unsigned long _pud_pfn(pud_t pud)
+{
+ return pud_val(pud) >> _PAGE_PFN_SHIFT;
+}
+
static inline pmd_t pfn_pmd(unsigned long pfn, pgprot_t prot)
{
return __pmd((pfn << _PAGE_PFN_SHIFT) | pgprot_val(prot));
diff --git a/arch/riscv/include/asm/vmalloc.h b/arch/riscv/include/asm/vmalloc.h
index 8f17f421f80c..83464895f902 100644
--- a/arch/riscv/include/asm/vmalloc.h
+++ b/arch/riscv/include/asm/vmalloc.h
@@ -3,7 +3,13 @@

#ifdef CONFIG_HAVE_ARCH_HUGE_VMAP

-#define IOREMAP_MAX_ORDER (PMD_SHIFT)
+#define IOREMAP_MAX_ORDER (PUD_SHIFT)
+
+#define arch_vmap_pud_supported arch_vmap_pud_supported
+static inline bool __init arch_vmap_pud_supported(pgprot_t prot)
+{
+ return true;
+}

#define arch_vmap_pmd_supported arch_vmap_pmd_supported
static inline bool __init arch_vmap_pmd_supported(pgprot_t prot)
diff --git a/arch/riscv/mm/pgtable.c b/arch/riscv/mm/pgtable.c
index 738dc6f3530f..ed13b80424e6 100644
--- a/arch/riscv/mm/pgtable.c
+++ b/arch/riscv/mm/pgtable.c
@@ -7,6 +7,45 @@

#ifdef CONFIG_HAVE_ARCH_HUGE_VMAP

+#ifndef __PAGETABLE_PUD_FOLDED
+int pud_set_huge(pud_t *pud, phys_addr_t phys, pgprot_t prot)
+{
+ pud_t new_pud = pfn_pud(__phys_to_pfn(phys), prot);
+
+ set_pud(pud, new_pud);
+ return 1;
+}
+
+int pud_clear_huge(pud_t *pud)
+{
+ if (!pud_leaf(READ_ONCE(*pud)))
+ return 0;
+ pud_clear(pud);
+ return 1;
+}
+#endif
+
+int pud_free_pmd_page(pud_t *pud, unsigned long addr)
+{
+ pmd_t *pmd;
+ pte_t *pte;
+ int i;
+
+ pmd = (pmd_t *)pud_page_vaddr(*pud);
+ pud_clear(pud);
+
+ flush_tlb_kernel_range(addr, addr + PUD_SIZE);
+
+ for (i = 0; i < PTRS_PER_PMD; i++) {
+ if (!pmd_none(pmd[i])) {
+ pte = (pte_t *)pmd_page_vaddr(pmd[i]);
+ pte_free_kernel(NULL, pte);
+ }
+ }
+ pmd_free(NULL, pmd);
+ return 1;
+}
+
#ifndef __PAGETABLE_PMD_FOLDED
int pmd_set_huge(pmd_t *pmd, phys_addr_t phys, pgprot_t prot)
{
--
2.18.0.huawei.25