[RFC 05/20] mm/tlb: move BATCHED_UNMAP_TLB_FLUSH to tlb.h

From: Nadav Amit
Date: Sat Jan 30 2021 - 19:17:36 EST


From: Nadav Amit <namit@xxxxxxxxxx>

Arguably, tlb.h is the natural place for TLB related code. In addition,
task_mmu needs to be able to call to flush_tlb_batched_pending() and
therefore cannot (or should not) use mm/internal.h.

Move all the functions that are controlled by
CONFIG_ARCH_WANT_BATCHED_UNMAP_TLB_FLUSH to tlb.h.

Signed-off-by: Nadav Amit <namit@xxxxxxxxxx>
Cc: Mel Gorman <mgorman@xxxxxxxxxxxxxxxxxxx>
Cc: Andrea Arcangeli <aarcange@xxxxxxxxxx>
Cc: Andrew Morton <akpm@xxxxxxxxxxxxxxxxxxxx>
Cc: Andy Lutomirski <luto@xxxxxxxxxx>
Cc: Dave Hansen <dave.hansen@xxxxxxxxxxxxxxx>
Cc: Peter Zijlstra <peterz@xxxxxxxxxxxxx>
Cc: Thomas Gleixner <tglx@xxxxxxxxxxxxx>
Cc: Will Deacon <will@xxxxxxxxxx>
Cc: Yu Zhao <yuzhao@xxxxxxxxxx>
Cc: x86@xxxxxxxxxx
---
include/asm-generic/tlb.h | 17 +++++++++++++++++
mm/internal.h | 16 ----------------
mm/mremap.c | 2 +-
mm/vmscan.c | 1 +
4 files changed, 19 insertions(+), 17 deletions(-)

diff --git a/include/asm-generic/tlb.h b/include/asm-generic/tlb.h
index c2deec0b6919..517c89398c83 100644
--- a/include/asm-generic/tlb.h
+++ b/include/asm-generic/tlb.h
@@ -658,6 +658,23 @@ static inline void tlb_flush_p4d_range(struct mmu_gather *tlb,
static inline bool pte_may_need_flush(pte_t oldpte, pte_t newpte) { return true; }
#endif

+#ifdef CONFIG_ARCH_WANT_BATCHED_UNMAP_TLB_FLUSH
+void try_to_unmap_flush(void);
+void try_to_unmap_flush_dirty(void);
+void flush_tlb_batched_pending(struct mm_struct *mm);
+#else
+static inline void try_to_unmap_flush(void)
+{
+}
+static inline void try_to_unmap_flush_dirty(void)
+{
+}
+static inline void flush_tlb_batched_pending(struct mm_struct *mm)
+{
+}
+static inline void tlb_batch_init(void) { }
+#endif /* CONFIG_ARCH_WANT_BATCHED_UNMAP_TLB_FLUSH */
+
#endif /* CONFIG_MMU */

#endif /* _ASM_GENERIC__TLB_H */
diff --git a/mm/internal.h b/mm/internal.h
index 25d2b2439f19..d3860f9fbb83 100644
--- a/mm/internal.h
+++ b/mm/internal.h
@@ -585,22 +585,6 @@ struct tlbflush_unmap_batch;
*/
extern struct workqueue_struct *mm_percpu_wq;

-#ifdef CONFIG_ARCH_WANT_BATCHED_UNMAP_TLB_FLUSH
-void try_to_unmap_flush(void);
-void try_to_unmap_flush_dirty(void);
-void flush_tlb_batched_pending(struct mm_struct *mm);
-#else
-static inline void try_to_unmap_flush(void)
-{
-}
-static inline void try_to_unmap_flush_dirty(void)
-{
-}
-static inline void flush_tlb_batched_pending(struct mm_struct *mm)
-{
-}
-#endif /* CONFIG_ARCH_WANT_BATCHED_UNMAP_TLB_FLUSH */
-
extern const struct trace_print_flags pageflag_names[];
extern const struct trace_print_flags vmaflag_names[];
extern const struct trace_print_flags gfpflag_names[];
diff --git a/mm/mremap.c b/mm/mremap.c
index f554320281cc..57655d1b1031 100644
--- a/mm/mremap.c
+++ b/mm/mremap.c
@@ -26,7 +26,7 @@
#include <linux/userfaultfd_k.h>

#include <asm/cacheflush.h>
-#include <asm/tlbflush.h>
+#include <asm/tlb.h>

#include "internal.h"

diff --git a/mm/vmscan.c b/mm/vmscan.c
index b1b574ad199d..ee144c359b41 100644
--- a/mm/vmscan.c
+++ b/mm/vmscan.c
@@ -52,6 +52,7 @@

#include <asm/tlbflush.h>
#include <asm/div64.h>
+#include <asm/tlb.h>

#include <linux/swapops.h>
#include <linux/balloon_compaction.h>
--
2.25.1