[PATCH] mm, compaction: make fast_isolate_around() robust against pfns from a wrong zone

From: Vlastimil Babka
Date: Tue Feb 16 2021 - 11:32:34 EST


TBD

Signed-off-by: Vlastimil Babka <vbabka@xxxxxxx>
---
mm/compaction.c | 10 +++++++---
1 file changed, 7 insertions(+), 3 deletions(-)

diff --git a/mm/compaction.c b/mm/compaction.c
index 190ccdaa6c19..b75645e4678d 100644
--- a/mm/compaction.c
+++ b/mm/compaction.c
@@ -1288,7 +1288,7 @@ static void
fast_isolate_around(struct compact_control *cc, unsigned long pfn, unsigned long nr_isolated)
{
unsigned long start_pfn, end_pfn;
- struct page *page = pfn_to_page(pfn);
+ struct page *page;

/* Do not search around if there are enough pages already */
if (cc->nr_freepages >= cc->nr_migratepages)
@@ -1300,7 +1300,11 @@ fast_isolate_around(struct compact_control *cc, unsigned long pfn, unsigned long

/* Pageblock boundaries */
start_pfn = pageblock_start_pfn(pfn);
- end_pfn = min(pageblock_end_pfn(pfn), zone_end_pfn(cc->zone)) - 1;
+ end_pfn = min(pageblock_end_pfn(pfn), zone_end_pfn(cc->zone));
+
+ page = pageblock_pfn_to_page(start_pfn, end_pfn, cc->zone);
+ if (!page)
+ return;

/* Scan before */
if (start_pfn != pfn) {
@@ -1486,7 +1490,7 @@ fast_isolate_freepages(struct compact_control *cc)
}

cc->total_free_scanned += nr_scanned;
- if (!page)
+ if (!page || page_zone(page) != cc->zone)
return cc->free_pfn;

low_pfn = page_to_pfn(page);
--
2.30.0