[PATCH v2 1/2] mm: factor out the numa mapping rebuilding into a new helper

From: Baolin Wang
Date: Fri Mar 29 2024 - 02:57:11 EST


To support large folio's numa balancing, factor out the numa mapping rebuilding
into a new helper as a preparation.

Signed-off-by: Baolin Wang <baolin.wang@xxxxxxxxxxxxxxxxx>
---
mm/memory.c | 22 +++++++++++++++-------
1 file changed, 15 insertions(+), 7 deletions(-)

diff --git a/mm/memory.c b/mm/memory.c
index 62ee4a15092a..c30fb4b95e15 100644
--- a/mm/memory.c
+++ b/mm/memory.c
@@ -5054,6 +5054,20 @@ int numa_migrate_prep(struct folio *folio, struct vm_fault *vmf,
return mpol_misplaced(folio, vmf, addr);
}

+static void numa_rebuild_single_mapping(struct vm_fault *vmf, struct vm_area_struct *vma,
+ bool writable)
+{
+ pte_t pte, old_pte;
+
+ old_pte = ptep_modify_prot_start(vma, vmf->address, vmf->pte);
+ pte = pte_modify(old_pte, vma->vm_page_prot);
+ pte = pte_mkyoung(pte);
+ if (writable)
+ pte = pte_mkwrite(pte, vma);
+ ptep_modify_prot_commit(vma, vmf->address, vmf->pte, old_pte, pte);
+ update_mmu_cache_range(vmf, vma, vmf->address, vmf->pte, 1);
+}
+
static vm_fault_t do_numa_page(struct vm_fault *vmf)
{
struct vm_area_struct *vma = vmf->vma;
@@ -5159,13 +5173,7 @@ static vm_fault_t do_numa_page(struct vm_fault *vmf)
* Make it present again, depending on how arch implements
* non-accessible ptes, some can allow access by kernel mode.
*/
- old_pte = ptep_modify_prot_start(vma, vmf->address, vmf->pte);
- pte = pte_modify(old_pte, vma->vm_page_prot);
- pte = pte_mkyoung(pte);
- if (writable)
- pte = pte_mkwrite(pte, vma);
- ptep_modify_prot_commit(vma, vmf->address, vmf->pte, old_pte, pte);
- update_mmu_cache_range(vmf, vma, vmf->address, vmf->pte, 1);
+ numa_rebuild_single_mapping(vmf, vma, writable);
pte_unmap_unlock(vmf->pte, vmf->ptl);
goto out;
}
--
2.39.3