[PATCH v2 4/4] mm: migrate: change to return the number of pages migrated successfully

From: Baolin Wang
Date: Mon Aug 21 2023 - 20:54:41 EST


Change the migrate_misplaced_page() to return the number of pages migrated
successfully, which is used to calculate how many pages are failed to
migrate for batch migration. For the compound page's NUMA balancing support,
it is possible that partial pages were successfully migrated, so it is
necessary to return the number of pages that were successfully migrated from
migrate_misplaced_page().

Signed-off-by: Baolin Wang <baolin.wang@xxxxxxxxxxxxxxxxx>
---
mm/huge_memory.c | 9 +++++----
mm/memory.c | 4 +++-
mm/migrate.c | 5 +----
3 files changed, 9 insertions(+), 9 deletions(-)

diff --git a/mm/huge_memory.c b/mm/huge_memory.c
index 4401a3493544..951f73d6b5bf 100644
--- a/mm/huge_memory.c
+++ b/mm/huge_memory.c
@@ -1494,10 +1494,11 @@ vm_fault_t do_huge_pmd_numa_page(struct vm_fault *vmf)
unsigned long haddr = vmf->address & HPAGE_PMD_MASK;
int page_nid = NUMA_NO_NODE;
int target_nid, last_cpupid = (-1 & LAST_CPUPID_MASK);
- bool migrated = false, writable = false;
+ bool writable = false;
int flags = 0;
pg_data_t *pgdat;
LIST_HEAD(migratepages);
+ int nr_successed;

vmf->ptl = pmd_lock(vma->vm_mm, vmf->pmd);
if (unlikely(!pmd_same(oldpmd, *vmf->pmd))) {
@@ -1554,9 +1555,9 @@ vm_fault_t do_huge_pmd_numa_page(struct vm_fault *vmf)
}

list_add(&page->lru, &migratepages);
- migrated = migrate_misplaced_page(&migratepages, vma,
- page_nid, target_nid);
- if (migrated) {
+ nr_successed = migrate_misplaced_page(&migratepages, vma,
+ page_nid, target_nid);
+ if (nr_successed) {
flags |= TNF_MIGRATED;
page_nid = target_nid;
} else {
diff --git a/mm/memory.c b/mm/memory.c
index 9e417e8dd5d5..2773cd804ee9 100644
--- a/mm/memory.c
+++ b/mm/memory.c
@@ -4771,6 +4771,7 @@ static vm_fault_t do_numa_page(struct vm_fault *vmf)
int flags = 0;
pg_data_t *pgdat;
LIST_HEAD(migratepages);
+ int nr_succeeded;

/*
* The "pte" at this point cannot be used safely without
@@ -4854,7 +4855,8 @@ static vm_fault_t do_numa_page(struct vm_fault *vmf)

list_add(&page->lru, &migratepages);
/* Migrate to the requested node */
- if (migrate_misplaced_page(&migratepages, vma, page_nid, target_nid)) {
+ nr_succeeded = migrate_misplaced_page(&migratepages, vma, page_nid, target_nid);
+ if (nr_succeeded) {
page_nid = target_nid;
flags |= TNF_MIGRATED;
} else {
diff --git a/mm/migrate.c b/mm/migrate.c
index fae7224b8e64..5435cfb225ab 100644
--- a/mm/migrate.c
+++ b/mm/migrate.c
@@ -2523,7 +2523,6 @@ int migrate_misplaced_page(struct list_head *migratepages, struct vm_area_struct
int source_nid, int target_nid)
{
pg_data_t *pgdat = NODE_DATA(target_nid);
- int migrated = 1;
int nr_remaining;
unsigned int nr_succeeded;

@@ -2533,8 +2532,6 @@ int migrate_misplaced_page(struct list_head *migratepages, struct vm_area_struct
if (nr_remaining) {
if (!list_empty(migratepages))
putback_movable_pages(migratepages);
-
- migrated = 0;
}
if (nr_succeeded) {
count_vm_numa_events(NUMA_PAGE_MIGRATE, nr_succeeded);
@@ -2543,7 +2540,7 @@ int migrate_misplaced_page(struct list_head *migratepages, struct vm_area_struct
nr_succeeded);
}
BUG_ON(!list_empty(migratepages));
- return migrated;
+ return nr_succeeded;
}
#endif /* CONFIG_NUMA_BALANCING */
#endif /* CONFIG_NUMA */
--
2.39.3