[PATCH v3 8/8] riscv: mm: Always use ASID to flush MM contexts

From: Samuel Holland
Date: Tue Nov 21 2023 - 20:08:44 EST


Even if multiple ASIDs are not supported, using the single-ASID variant
of the sfence.vma instruction preserves TLB entries for global (kernel)
pages. So it is always more efficient to use the single-ASID code path.

Signed-off-by: Samuel Holland <samuel.holland@xxxxxxxxxx>
---

(no changes since v2)

Changes in v2:
- Update both copies of __flush_tlb_range()

arch/riscv/include/asm/mmu_context.h | 2 --
arch/riscv/mm/context.c | 3 +--
arch/riscv/mm/tlbflush.c | 5 ++---
3 files changed, 3 insertions(+), 7 deletions(-)

diff --git a/arch/riscv/include/asm/mmu_context.h b/arch/riscv/include/asm/mmu_context.h
index 7030837adc1a..b0659413a080 100644
--- a/arch/riscv/include/asm/mmu_context.h
+++ b/arch/riscv/include/asm/mmu_context.h
@@ -33,8 +33,6 @@ static inline int init_new_context(struct task_struct *tsk,
return 0;
}

-DECLARE_STATIC_KEY_FALSE(use_asid_allocator);
-
#include <asm-generic/mmu_context.h>

#endif /* _ASM_RISCV_MMU_CONTEXT_H */
diff --git a/arch/riscv/mm/context.c b/arch/riscv/mm/context.c
index 3ca9b653df7d..20057085ab8a 100644
--- a/arch/riscv/mm/context.c
+++ b/arch/riscv/mm/context.c
@@ -18,8 +18,7 @@

#ifdef CONFIG_MMU

-DEFINE_STATIC_KEY_FALSE(use_asid_allocator);
-
+static DEFINE_STATIC_KEY_FALSE(use_asid_allocator);
static unsigned long num_asids;

static atomic_long_t current_version;
diff --git a/arch/riscv/mm/tlbflush.c b/arch/riscv/mm/tlbflush.c
index 23409d70440f..d6619be10341 100644
--- a/arch/riscv/mm/tlbflush.c
+++ b/arch/riscv/mm/tlbflush.c
@@ -90,8 +90,7 @@ static void __flush_tlb_range(struct mm_struct *mm, unsigned long start,
/* check if the tlbflush needs to be sent to other CPUs */
broadcast = cpumask_any_but(cmask, cpuid) < nr_cpu_ids;

- if (static_branch_unlikely(&use_asid_allocator))
- asid = cntx2asid(atomic_long_read(&mm->context.id));
+ asid = cntx2asid(atomic_long_read(&mm->context.id));
} else {
cmask = cpu_online_mask;
broadcast = true;
@@ -122,7 +121,7 @@ static void __flush_tlb_range(struct mm_struct *mm, unsigned long start,
{
unsigned long asid = FLUSH_TLB_NO_ASID;

- if (mm && static_branch_unlikely(&use_asid_allocator))
+ if (mm)
asid = cntx2asid(atomic_long_read(&mm->context.id));

local_flush_tlb_range_asid(start, size, stride, asid);
--
2.42.0