[RFC PATCH 25/26] mm: page_alloc: disallow fallbacks when 2M defrag is enabled

From: Johannes Weiner
Date: Tue Apr 18 2023 - 15:15:40 EST


Fallbacks are already unlikely due to watermarks being enforced
against MIGRATE_FREE blocks. Eliminate them altogether. This allows
compaction to look exclusively at movable blocks, reducing the number
of pageblocks it needs to scan on an ongoing basis.

Signed-off-by: Johannes Weiner <hannes@xxxxxxxxxxx>
---
mm/compaction.c | 52 +++++--------------------------------------------
mm/internal.h | 2 +-
mm/page_alloc.c | 8 ++++++++
3 files changed, 14 insertions(+), 48 deletions(-)

diff --git a/mm/compaction.c b/mm/compaction.c
index e33c99eb34a8..37dfd1878bef 100644
--- a/mm/compaction.c
+++ b/mm/compaction.c
@@ -1258,46 +1258,6 @@ isolate_migratepages_range(struct compact_control *cc, unsigned long start_pfn,
#endif /* CONFIG_COMPACTION || CONFIG_CMA */
#ifdef CONFIG_COMPACTION

-static bool suitable_migration_source(struct compact_control *cc,
- struct page *page)
-{
- int block_mt;
-
- if (pageblock_skip_persistent(page))
- return false;
-
- if ((cc->mode != MIGRATE_ASYNC) || !cc->direct_compaction)
- return true;
-
- block_mt = get_pageblock_migratetype(page);
-
- if (cc->migratetype == MIGRATE_MOVABLE)
- return is_migrate_movable(block_mt);
- else
- return block_mt == cc->migratetype;
-}
-
-/* Returns true if the page is within a block suitable for migration to */
-static bool suitable_migration_target(struct compact_control *cc,
- struct page *page)
-{
- int mt = get_pageblock_migratetype(page);
-
- /* If the page is a large free page, then disallow migration */
- if (mt == MIGRATE_FREE)
- return false;
-
- if (cc->ignore_block_suitable)
- return true;
-
- /* If the block is MIGRATE_MOVABLE or MIGRATE_CMA, allow migration */
- if (is_migrate_movable(mt))
- return true;
-
- /* Otherwise skip the block */
- return false;
-}
-
static inline unsigned int
freelist_scan_limit(struct compact_control *cc)
{
@@ -1620,7 +1580,7 @@ static void isolate_freepages(struct compact_control *cc)
continue;

/* Check the block is suitable for migration */
- if (!suitable_migration_target(cc, page))
+ if (!is_migrate_movable(get_pageblock_migratetype(page)))
continue;

/* If isolation recently failed, do not retry */
@@ -1927,14 +1887,12 @@ static isolate_migrate_t isolate_migratepages(struct compact_control *cc)
continue;

/*
- * For async direct compaction, only scan the pageblocks of the
- * same migratetype without huge pages. Async direct compaction
- * is optimistic to see if the minimum amount of work satisfies
- * the allocation. The cached PFN is updated as it's possible
- * that all remaining blocks between source and target are
+ * The cached PFN is updated as it's possible that all
+ * remaining blocks between source and target are
* unsuitable and the compaction scanners fail to meet.
*/
- if (!suitable_migration_source(cc, page)) {
+ if (pageblock_skip_persistent(page) ||
+ !is_migrate_movable(get_pageblock_migratetype(page))) {
update_cached_migrate(cc, block_end_pfn);
continue;
}
diff --git a/mm/internal.h b/mm/internal.h
index 24f43f5db88b..1c0886c3ce0e 100644
--- a/mm/internal.h
+++ b/mm/internal.h
@@ -741,7 +741,7 @@ unsigned int reclaim_clean_pages_from_list(struct zone *zone,
#define ALLOC_HIGH 0x20 /* __GFP_HIGH set */
#define ALLOC_CPUSET 0x40 /* check for correct cpuset */
#define ALLOC_CMA 0x80 /* allow allocations from CMA areas */
-#ifdef CONFIG_ZONE_DMA32
+#if defined(CONFIG_ZONE_DMA32) && !defined(CONFIG_COMPACTION)
#define ALLOC_NOFRAGMENT 0x100 /* avoid mixing pageblock types */
#else
#define ALLOC_NOFRAGMENT 0x0
diff --git a/mm/page_alloc.c b/mm/page_alloc.c
index f835a5548164..9db588a1de3b 100644
--- a/mm/page_alloc.c
+++ b/mm/page_alloc.c
@@ -2622,11 +2622,19 @@ struct page *__rmqueue_smallest(struct zone *zone, unsigned int order,
*
* The other migratetypes do not have fallbacks.
*/
+#ifdef CONFIG_COMPACTION
+static int fallbacks[MIGRATE_TYPES][2] = {
+ [MIGRATE_UNMOVABLE] = { MIGRATE_FREE, MIGRATE_TYPES },
+ [MIGRATE_MOVABLE] = { MIGRATE_FREE, MIGRATE_TYPES },
+ [MIGRATE_RECLAIMABLE] = { MIGRATE_FREE, MIGRATE_TYPES },
+};
+#else
static int fallbacks[MIGRATE_TYPES][4] = {
[MIGRATE_UNMOVABLE] = { MIGRATE_FREE, MIGRATE_RECLAIMABLE, MIGRATE_MOVABLE, MIGRATE_TYPES },
[MIGRATE_MOVABLE] = { MIGRATE_FREE, MIGRATE_RECLAIMABLE, MIGRATE_UNMOVABLE, MIGRATE_TYPES },
[MIGRATE_RECLAIMABLE] = { MIGRATE_FREE, MIGRATE_UNMOVABLE, MIGRATE_MOVABLE, MIGRATE_TYPES },
};
+#endif

#ifdef CONFIG_CMA
static __always_inline struct page *__rmqueue_cma_fallback(struct zone *zone,
--
2.39.2