[PATCH] mm: page_alloc: optimize free_unref_page_list()

From: Johannes Weiner
Date: Fri Sep 15 2023 - 16:03:24 EST


Move direct freeing of isolated pages to the lock-breaking block in
the second loop. This saves an unnecessary migratetype reassessment.

Minor comment and local variable scoping cleanups.

Suggested-by: Vlastimil Babka <vbabka@xxxxxxx>
Signed-off-by: Johannes Weiner <hannes@xxxxxxxxxxx>
---
mm/page_alloc.c | 44 ++++++++++++++++++--------------------------
1 file changed, 18 insertions(+), 26 deletions(-)

diff --git a/mm/page_alloc.c b/mm/page_alloc.c
index bfffc1af94cd..665930ffe22a 100644
--- a/mm/page_alloc.c
+++ b/mm/page_alloc.c
@@ -2466,48 +2466,40 @@ void free_unref_page_list(struct list_head *list)
struct per_cpu_pages *pcp = NULL;
struct zone *locked_zone = NULL;
int batch_count = 0;
- int migratetype;
-
- /* Prepare pages for freeing */
- list_for_each_entry_safe(page, next, list, lru) {
- unsigned long pfn = page_to_pfn(page);
-
- if (!free_pages_prepare(page, 0, FPI_NONE)) {
- list_del(&page->lru);
- continue;
- }

- /*
- * Free isolated pages directly to the allocator, see
- * comment in free_unref_page.
- */
- migratetype = get_pfnblock_migratetype(page, pfn);
- if (unlikely(is_migrate_isolate(migratetype))) {
+ list_for_each_entry_safe(page, next, list, lru)
+ if (!free_pages_prepare(page, 0, FPI_NONE))
list_del(&page->lru);
- free_one_page(page_zone(page), page, pfn, 0, FPI_NONE);
- continue;
- }
- }

list_for_each_entry_safe(page, next, list, lru) {
unsigned long pfn = page_to_pfn(page);
struct zone *zone = page_zone(page);
+ int migratetype;

list_del(&page->lru);
migratetype = get_pfnblock_migratetype(page, pfn);

/*
- * Either different zone requiring a different pcp lock or
- * excessive lock hold times when freeing a large list of
- * pages.
+ * Zone switch, batch complete, or non-pcp freeing?
+ * Drop the pcp lock and evaluate.
*/
- if (zone != locked_zone || batch_count == SWAP_CLUSTER_MAX) {
+ if (unlikely(zone != locked_zone ||
+ batch_count == SWAP_CLUSTER_MAX ||
+ is_migrate_isolate(migratetype))) {
if (pcp) {
pcp_spin_unlock(pcp);
pcp_trylock_finish(UP_flags);
+ locked_zone = NULL;
}

- batch_count = 0;
+ /*
+ * Free isolated pages directly to the
+ * allocator, see comment in free_unref_page.
+ */
+ if (is_migrate_isolate(migratetype)) {
+ free_one_page(zone, page, pfn, 0, FPI_NONE);
+ continue;
+ }

/*
* trylock is necessary as pages may be getting freed
@@ -2518,10 +2510,10 @@ void free_unref_page_list(struct list_head *list)
if (unlikely(!pcp)) {
pcp_trylock_finish(UP_flags);
free_one_page(zone, page, pfn, 0, FPI_NONE);
- locked_zone = NULL;
continue;
}
locked_zone = zone;
+ batch_count = 0;
}

/*
--
2.42.0