[RFC PATCH v5 6/8] mm: tlb: Pass struct mmu_gather to flush_hugetlb_tlb_range

From: Zhenyu Ye
Date: Tue Mar 31 2020 - 10:30:15 EST


Preparations to support for passing struct mmu_gather to
flush_tlb_range. See in future patches.

Signed-off-by: Zhenyu Ye <yezhenyu2@xxxxxxxxxx>
---
arch/powerpc/include/asm/book3s/64/tlbflush.h | 3 ++-
mm/hugetlb.c | 17 ++++++++++++-----
2 files changed, 14 insertions(+), 6 deletions(-)

diff --git a/arch/powerpc/include/asm/book3s/64/tlbflush.h b/arch/powerpc/include/asm/book3s/64/tlbflush.h
index 6445d179ac15..968f10ef3d51 100644
--- a/arch/powerpc/include/asm/book3s/64/tlbflush.h
+++ b/arch/powerpc/include/asm/book3s/64/tlbflush.h
@@ -57,7 +57,8 @@ static inline void flush_pmd_tlb_range(struct mmu_gather *tlb,
}

#define __HAVE_ARCH_FLUSH_HUGETLB_TLB_RANGE
-static inline void flush_hugetlb_tlb_range(struct vm_area_struct *vma,
+static inline void flush_hugetlb_tlb_range(struct mmu_gather *tlb,
+ struct vm_area_struct *vma,
unsigned long start,
unsigned long end)
{
diff --git a/mm/hugetlb.c b/mm/hugetlb.c
index dd8737a94bec..f913ce0b4831 100644
--- a/mm/hugetlb.c
+++ b/mm/hugetlb.c
@@ -4441,7 +4441,8 @@ long follow_hugetlb_page(struct mm_struct *mm, struct vm_area_struct *vma,
* ARCHes with special requirements for evicting HUGETLB backing TLB entries can
* implement this.
*/
-#define flush_hugetlb_tlb_range(vma, addr, end) flush_tlb_range(vma, addr, end)
+#define flush_hugetlb_tlb_range(tlb, vma, addr, end) \
+ flush_tlb_range(vma, addr, end)
#endif

unsigned long hugetlb_change_protection(struct vm_area_struct *vma,
@@ -4455,6 +4456,7 @@ unsigned long hugetlb_change_protection(struct vm_area_struct *vma,
unsigned long pages = 0;
bool shared_pmd = false;
struct mmu_notifier_range range;
+ struct mmu_gather tlb;

/*
* In the case of shared PMDs, the area to flush could be beyond
@@ -4520,10 +4522,15 @@ unsigned long hugetlb_change_protection(struct vm_area_struct *vma,
* and that page table be reused and filled with junk. If we actually
* did unshare a page of pmds, flush the range corresponding to the pud.
*/
- if (shared_pmd)
- flush_hugetlb_tlb_range(vma, range.start, range.end);
- else
- flush_hugetlb_tlb_range(vma, start, end);
+ if (shared_pmd) {
+ tlb_gather_mmu(&tlb, mm, range.start, range.end);
+ flush_hugetlb_tlb_range(&tlb, vma, range.start, range.end);
+ tlb_finish_mmu(&tlb, range.start, range.end);
+ } else {
+ tlb_gather_mmu(&tlb, mm, start, end);
+ flush_hugetlb_tlb_range(&tlb, vma, start, end);
+ tlb_finish_mmu(&tlb, start, end);
+ }
/*
* No need to call mmu_notifier_invalidate_range() we are downgrading
* page table protection not changing it to point to a new page.
--
2.19.1