[RFC PATCH 09/26] mm: page_alloc: move expand() above compaction_capture()

From: Johannes Weiner
Date: Tue Apr 18 2023 - 15:15:06 EST


The next patch will allow compaction to capture from
larger-than-requested page blocks and free the remainder.

Rearrange the code in advance to make the diff more readable. No
functional change.

Signed-off-by: Johannes Weiner <hannes@xxxxxxxxxxx>
---
mm/page_alloc.c | 186 ++++++++++++++++++++++++------------------------
1 file changed, 93 insertions(+), 93 deletions(-)

diff --git a/mm/page_alloc.c b/mm/page_alloc.c
index 8e5996f8b4b4..cd86f80d7bbe 100644
--- a/mm/page_alloc.c
+++ b/mm/page_alloc.c
@@ -950,61 +950,6 @@ static inline void set_buddy_order(struct page *page, unsigned int order)
__SetPageBuddy(page);
}

-#ifdef CONFIG_COMPACTION
-static inline struct capture_control *task_capc(struct zone *zone)
-{
- struct capture_control *capc = current->capture_control;
-
- return unlikely(capc && capc->cc) &&
- !(current->flags & PF_KTHREAD) &&
- !capc->page &&
- capc->cc->zone == zone ? capc : NULL;
-}
-
-static inline bool
-compaction_capture(struct capture_control *capc, struct page *page,
- int order, int migratetype)
-{
- if (!capc || order != capc->cc->order)
- return false;
-
- /* Do not accidentally pollute CMA or isolated regions*/
- if (is_migrate_cma(migratetype) ||
- is_migrate_isolate(migratetype))
- return false;
-
- if (order >= pageblock_order) {
- migratetype = capc->migratetype;
- change_pageblock_range(page, order, migratetype);
- } else if (migratetype == MIGRATE_MOVABLE) {
- /*
- * Do not let lower order allocations pollute a
- * movable pageblock. This might let an unmovable
- * request use a reclaimable pageblock and vice-versa
- * but no more than normal fallback logic which can
- * have trouble finding a high-order free page.
- */
- return false;
- }
-
- capc->page = page;
- return true;
-}
-
-#else
-static inline struct capture_control *task_capc(struct zone *zone)
-{
- return NULL;
-}
-
-static inline bool
-compaction_capture(struct capture_control *capc, struct page *page,
- int order, int migratetype)
-{
- return false;
-}
-#endif /* CONFIG_COMPACTION */
-
static inline void account_freepages(struct page *page, struct zone *zone,
int nr_pages, int migratetype)
{
@@ -1072,6 +1017,99 @@ static inline void del_page_from_free_list(struct page *page, struct zone *zone,
account_freepages(page, zone, -(1 << order), migratetype);
}

+/*
+ * The order of subdivision here is critical for the IO subsystem.
+ * Please do not alter this order without good reasons and regression
+ * testing. Specifically, as large blocks of memory are subdivided,
+ * the order in which smaller blocks are delivered depends on the order
+ * they're subdivided in this function. This is the primary factor
+ * influencing the order in which pages are delivered to the IO
+ * subsystem according to empirical testing, and this is also justified
+ * by considering the behavior of a buddy system containing a single
+ * large block of memory acted on by a series of small allocations.
+ * This behavior is a critical factor in sglist merging's success.
+ *
+ * -- nyc
+ */
+static inline void expand(struct zone *zone, struct page *page,
+ int low, int high, int migratetype)
+{
+ unsigned long size = 1 << high;
+
+ while (high > low) {
+ high--;
+ size >>= 1;
+ VM_BUG_ON_PAGE(bad_range(zone, &page[size]), &page[size]);
+
+ /*
+ * Mark as guard pages (or page), that will allow to
+ * merge back to allocator when buddy will be freed.
+ * Corresponding page table entries will not be touched,
+ * pages will stay not present in virtual address space
+ */
+ if (set_page_guard(zone, &page[size], high))
+ continue;
+
+ add_to_free_list(&page[size], zone, high, migratetype, false);
+ set_buddy_order(&page[size], high);
+ }
+}
+
+#ifdef CONFIG_COMPACTION
+static inline struct capture_control *task_capc(struct zone *zone)
+{
+ struct capture_control *capc = current->capture_control;
+
+ return unlikely(capc && capc->cc) &&
+ !(current->flags & PF_KTHREAD) &&
+ !capc->page &&
+ capc->cc->zone == zone ? capc : NULL;
+}
+
+static inline bool
+compaction_capture(struct capture_control *capc, struct page *page,
+ int order, int migratetype)
+{
+ if (!capc || order != capc->cc->order)
+ return false;
+
+ /* Do not accidentally pollute CMA or isolated regions*/
+ if (is_migrate_cma(migratetype) ||
+ is_migrate_isolate(migratetype))
+ return false;
+
+ if (order >= pageblock_order) {
+ migratetype = capc->migratetype;
+ change_pageblock_range(page, order, migratetype);
+ } else if (migratetype == MIGRATE_MOVABLE) {
+ /*
+ * Do not let lower order allocations pollute a
+ * movable pageblock. This might let an unmovable
+ * request use a reclaimable pageblock and vice-versa
+ * but no more than normal fallback logic which can
+ * have trouble finding a high-order free page.
+ */
+ return false;
+ }
+
+ capc->page = page;
+ return true;
+}
+
+#else
+static inline struct capture_control *task_capc(struct zone *zone)
+{
+ return NULL;
+}
+
+static inline bool
+compaction_capture(struct capture_control *capc, struct page *page,
+ int order, int migratetype)
+{
+ return false;
+}
+#endif /* CONFIG_COMPACTION */
+
/*
* If this is not the largest possible page, check if the buddy
* of the next-highest order is free. If it is, it's possible
@@ -2345,44 +2383,6 @@ void __init init_cma_reserved_pageblock(struct page *page)
}
#endif

-/*
- * The order of subdivision here is critical for the IO subsystem.
- * Please do not alter this order without good reasons and regression
- * testing. Specifically, as large blocks of memory are subdivided,
- * the order in which smaller blocks are delivered depends on the order
- * they're subdivided in this function. This is the primary factor
- * influencing the order in which pages are delivered to the IO
- * subsystem according to empirical testing, and this is also justified
- * by considering the behavior of a buddy system containing a single
- * large block of memory acted on by a series of small allocations.
- * This behavior is a critical factor in sglist merging's success.
- *
- * -- nyc
- */
-static inline void expand(struct zone *zone, struct page *page,
- int low, int high, int migratetype)
-{
- unsigned long size = 1 << high;
-
- while (high > low) {
- high--;
- size >>= 1;
- VM_BUG_ON_PAGE(bad_range(zone, &page[size]), &page[size]);
-
- /*
- * Mark as guard pages (or page), that will allow to
- * merge back to allocator when buddy will be freed.
- * Corresponding page table entries will not be touched,
- * pages will stay not present in virtual address space
- */
- if (set_page_guard(zone, &page[size], high))
- continue;
-
- add_to_free_list(&page[size], zone, high, migratetype, false);
- set_buddy_order(&page[size], high);
- }
-}
-
static void check_new_page_bad(struct page *page)
{
if (unlikely(page->flags & __PG_HWPOISON)) {
--
2.39.2