Re: [PATCH v2 07/23] mips: update_mmu_cache() can replace __update_tlb()

From: Hugh Dickins
Date: Thu Jun 15 2023 - 01:43:58 EST


On Wed, 14 Jun 2023, Hugh Dickins wrote:
> On Wed, 14 Jun 2023, Nathan Chancellor wrote:
> >
> > I just bisected a crash while powering down a MIPS machine in QEMU to
> > this change as commit 8044511d3893 ("mips: update_mmu_cache() can
> > replace __update_tlb()") in linux-next.
>
> Thank you, Nathan, that's very helpful indeed. This patch certainly knew
> that it wanted testing, and I'm glad to hear that it is now seeing some.
>
> While powering down? The messages below look like it was just coming up,
> but no doubt that's because you were bisecting (or because I'm unfamiliar
> with what messages to expect there). It's probably irrelevant information,
> but I wonder whether the (V)machine worked well enough for a while before
> you first powered down and spotted the problem, or whether it's never got
> much further than trying to run init (busybox)? I'm trying to get a feel
> for whether the problem occurs under common or uncommon conditions.
>
> > Unfortunately, I can still
> > reproduce it with the existing fix you have for this change on the
> > mailing list, which is present in next-20230614.
>
> Right, that later fix was only for a build warning, nothing functional
> (or at least I hoped that it wasn't making any functional difference).
>
> Thanks a lot for the detailed instructions below: unfortunately, those
> would draw me into a realm of testing I've never needed to enter before,
> so a lot of time spent on setup and learning. Usually, I just stare at
> the source.
>
> What this probably says is that I should revert most my cleanup there,
> and keep as close to the existing code as possible. But some change is
> needed, and I may need to understand (or have a good guess at) what was
> going wrong, to decide what kind of retreat will be successful.
>
> Back to the source for a while: I hope I'll find examples in nearby MIPS
> kernel source (and git history), which will hint at the right way forward.
> Then send you a patch against next-20230614 to try, when I'm reasonably
> confident that it's enough to satisfy my purpose, but likely not to waste
> your time.

I'm going to take advantage of your good nature by attaching
two alternative patches, either to go on top of next-20230614.

mips1.patch,
arch/mips/mm/tlb-r4k.c | 12 +-----------
1 file changed, 1 insertion(+), 11 deletions(-)

is by far my favourite. I couldn't see anything wrong with what's
already there for mips, but it seems possible that (though I didn't
find it) somewhere calls update_mmu_cache_pmd() on a page table. So
mips1.patch restores the pmd_huge() check, and cleans up further by
removing the silly pgdp, p4dp, pudp, pmdp stuff: the pointer has now
been passed in by the caller, why walk the tree again? I should have
done it this way before.

But if that doesn't work, then I'm afraid it will have to be
mips2.patch,
arch/mips/include/asm/pgtable.h | 15 ++++++++++++---
arch/mips/mm/tlb-r3k.c | 5 ++---
arch/mips/mm/tlb-r4k.c | 27 ++++++++++++++++++---------
3 files changed, 32 insertions(+), 15 deletions(-)

which reverts all of the original patch and its build warning fix,
and does a pte_unmap() to balance the silly pte_offset_map() there;
with an apologetic comment for this being about the only place in
the tree where I have no idea what to do if ptep were NULL.

I do hope that you find the first fixes the breakage; but if not, then
I even more fervently hope that the second will, despite my hating it.
Touch wood for the first, fingers crossed for the second, thanks,

Hugh--- a/arch/mips/mm/tlb-r4k.c
+++ b/arch/mips/mm/tlb-r4k.c
@@ -293,12 +293,6 @@ void local_flush_tlb_one(unsigned long page)
void update_mmu_cache(struct vm_area_struct *vma,
unsigned long address, pte_t *ptep)
{
-#ifdef CONFIG_MIPS_HUGE_TLB_SUPPORT
- pgd_t *pgdp;
- p4d_t *p4dp;
- pud_t *pudp;
- pmd_t *pmdp;
-#endif
unsigned long flags;
int idx, pid;

@@ -323,12 +317,8 @@ void update_mmu_cache(struct vm_area_struct *vma,
tlb_probe_hazard();
idx = read_c0_index();
#ifdef CONFIG_MIPS_HUGE_TLB_SUPPORT
- pgdp = pgd_offset(vma->vm_mm, address);
- p4dp = p4d_offset(pgdp, address);
- pudp = pud_offset(p4dp, address);
- pmdp = pmd_offset(pudp, address);
/* this could be a huge page */
- if (ptep == (pte_t *)pmdp) {
+ if (pmd_huge(*(pmd_t *)ptep)) {
unsigned long lo;
write_c0_pagemask(PM_HUGE_MASK);
lo = pte_to_entrylo(pte_val(*ptep));
--- a/arch/mips/include/asm/pgtable.h
+++ b/arch/mips/include/asm/pgtable.h
@@ -565,8 +565,15 @@ static inline pte_t pte_swp_clear_exclusive(pte_t pte)
}
#endif

-extern void update_mmu_cache(struct vm_area_struct *vma,
- unsigned long address, pte_t *ptep);
+extern void __update_tlb(struct vm_area_struct *vma, unsigned long address,
+ pte_t pte);
+
+static inline void update_mmu_cache(struct vm_area_struct *vma,
+ unsigned long address, pte_t *ptep)
+{
+ pte_t pte = *ptep;
+ __update_tlb(vma, address, pte);
+}

#define __HAVE_ARCH_UPDATE_MMU_TLB
#define update_mmu_tlb update_mmu_cache
@@ -574,7 +581,9 @@ extern void update_mmu_cache(struct vm_area_struct *vma,
static inline void update_mmu_cache_pmd(struct vm_area_struct *vma,
unsigned long address, pmd_t *pmdp)
{
- update_mmu_cache(vma, address, (pte_t *)pmdp);
+ pte_t pte = *(pte_t *)pmdp;
+
+ __update_tlb(vma, address, pte);
}

/*
--- a/arch/mips/mm/tlb-r3k.c
+++ b/arch/mips/mm/tlb-r3k.c
@@ -176,8 +176,7 @@ void local_flush_tlb_page(struct vm_area_struct *vma, unsigned long page)
}
}

-void update_mmu_cache(struct vm_area_struct *vma,
- unsigned long address, pte_t *ptep)
+void __update_tlb(struct vm_area_struct *vma, unsigned long address, pte_t pte)
{
unsigned long asid_mask = cpu_asid_mask(&current_cpu_data);
unsigned long flags;
@@ -204,7 +203,7 @@ void update_mmu_cache(struct vm_area_struct *vma,
BARRIER;
tlb_probe();
idx = read_c0_index();
- write_c0_entrylo0(pte_val(*ptep));
+ write_c0_entrylo0(pte_val(pte));
write_c0_entryhi(address | pid);
if (idx < 0) { /* BARRIER */
tlb_write_random();
--- a/arch/mips/mm/tlb-r4k.c
+++ b/arch/mips/mm/tlb-r4k.c
@@ -290,16 +290,14 @@ void local_flush_tlb_one(unsigned long page)
* updates the TLB with the new pte(s), and another which also checks
* for the R4k "end of page" hardware bug and does the needy.
*/
-void update_mmu_cache(struct vm_area_struct *vma,
- unsigned long address, pte_t *ptep)
+void __update_tlb(struct vm_area_struct * vma, unsigned long address, pte_t pte)
{
-#ifdef CONFIG_MIPS_HUGE_TLB_SUPPORT
+ unsigned long flags;
pgd_t *pgdp;
p4d_t *p4dp;
pud_t *pudp;
pmd_t *pmdp;
-#endif
- unsigned long flags;
+ pte_t *ptep, *ptemap = NULL;
int idx, pid;

/*
@@ -318,19 +316,20 @@ void update_mmu_cache(struct vm_area_struct *vma,
pid = read_c0_entryhi() & cpu_asid_mask(&current_cpu_data);
write_c0_entryhi(address | pid);
}
+ pgdp = pgd_offset(vma->vm_mm, address);
mtc0_tlbw_hazard();
tlb_probe();
tlb_probe_hazard();
- idx = read_c0_index();
-#ifdef CONFIG_MIPS_HUGE_TLB_SUPPORT
- pgdp = pgd_offset(vma->vm_mm, address);
p4dp = p4d_offset(pgdp, address);
pudp = pud_offset(p4dp, address);
pmdp = pmd_offset(pudp, address);
+ idx = read_c0_index();
+#ifdef CONFIG_MIPS_HUGE_TLB_SUPPORT
/* this could be a huge page */
- if (ptep == (pte_t *)pmdp) {
+ if (pmd_huge(*pmdp)) {
unsigned long lo;
write_c0_pagemask(PM_HUGE_MASK);
+ ptep = (pte_t *)pmdp;
lo = pte_to_entrylo(pte_val(*ptep));
write_c0_entrylo0(lo);
write_c0_entrylo1(lo + (HPAGE_SIZE >> 7));
@@ -345,6 +344,13 @@ void update_mmu_cache(struct vm_area_struct *vma,
} else
#endif
{
+ ptemap = ptep = pte_offset_map(pmdp, address);
+ /*
+ * update_mmu_cache() is called between pte_offset_map_lock()
+ * and pte_unmap_unlock(), so we can assume that ptep is not
+ * NULL here: and what should be done below if it were NULL?
+ */
+
#if defined(CONFIG_PHYS_ADDR_T_64BIT) && defined(CONFIG_CPU_MIPS32)
#ifdef CONFIG_XPA
write_c0_entrylo0(pte_to_entrylo(ptep->pte_high));
@@ -372,6 +378,9 @@ void update_mmu_cache(struct vm_area_struct *vma,
tlbw_use_hazard();
htw_start();
flush_micro_tlb_vm(vma);
+
+ if (ptemap)
+ pte_unmap(ptemap);
local_irq_restore(flags);
}