[PATCH 04/12] hugetlb: Convert remove_pool_huge_page() to remove_pool_hugetlb_folio()

From: Mike Kravetz
Date: Fri Aug 25 2023 - 15:07:10 EST


From: "Matthew Wilcox (Oracle)" <willy@xxxxxxxxxxxxx>

Convert the callers to expect a folio and remove the unnecesary conversion
back to a struct page.

Signed-off-by: Matthew Wilcox (Oracle) <willy@xxxxxxxxxxxxx>
Reviewed-by: Mike Kravetz <mike.kravetz@xxxxxxxxxx>
Cc: Sidhartha Kumar <sidhartha.kumar@xxxxxxxxxx>
---
mm/hugetlb.c | 29 +++++++++++++++--------------
1 file changed, 15 insertions(+), 14 deletions(-)

diff --git a/mm/hugetlb.c b/mm/hugetlb.c
index a5348dfada89..ec10e3804060 100644
--- a/mm/hugetlb.c
+++ b/mm/hugetlb.c
@@ -1446,7 +1446,7 @@ static int hstate_next_node_to_alloc(struct hstate *h,
}

/*
- * helper for remove_pool_huge_page() - return the previously saved
+ * helper for remove_pool_hugetlb_folio() - return the previously saved
* node ["this node"] from which to free a huge page. Advance the
* next node id whether or not we find a free huge page to free so
* that the next attempt to free addresses the next node.
@@ -2217,9 +2217,8 @@ static int alloc_pool_huge_page(struct hstate *h, nodemask_t *nodes_allowed,
* an additional call to free the page to low level allocators.
* Called with hugetlb_lock locked.
*/
-static struct page *remove_pool_huge_page(struct hstate *h,
- nodemask_t *nodes_allowed,
- bool acct_surplus)
+static struct folio *remove_pool_hugetlb_folio(struct hstate *h,
+ nodemask_t *nodes_allowed, bool acct_surplus)
{
int nr_nodes, node;
struct folio *folio = NULL;
@@ -2239,7 +2238,7 @@ static struct page *remove_pool_huge_page(struct hstate *h,
}
}

- return &folio->page;
+ return folio;
}

/*
@@ -2593,7 +2592,6 @@ static void return_unused_surplus_pages(struct hstate *h,
unsigned long unused_resv_pages)
{
unsigned long nr_pages;
- struct page *page;
LIST_HEAD(page_list);

lockdep_assert_held(&hugetlb_lock);
@@ -2614,15 +2612,17 @@ static void return_unused_surplus_pages(struct hstate *h,
* evenly across all nodes with memory. Iterate across these nodes
* until we can no longer free unreserved surplus pages. This occurs
* when the nodes with surplus pages have no free pages.
- * remove_pool_huge_page() will balance the freed pages across the
+ * remove_pool_hugetlb_folio() will balance the freed pages across the
* on-line nodes with memory and will handle the hstate accounting.
*/
while (nr_pages--) {
- page = remove_pool_huge_page(h, &node_states[N_MEMORY], 1);
- if (!page)
+ struct folio *folio;
+
+ folio = remove_pool_hugetlb_folio(h, &node_states[N_MEMORY], 1);
+ if (!folio)
goto out;

- list_add(&page->lru, &page_list);
+ list_add(&folio->lru, &page_list);
}

out:
@@ -3417,7 +3417,6 @@ static int set_max_huge_pages(struct hstate *h, unsigned long count, int nid,
nodemask_t *nodes_allowed)
{
unsigned long min_count, ret;
- struct page *page;
LIST_HEAD(page_list);
NODEMASK_ALLOC(nodemask_t, node_alloc_noretry, GFP_KERNEL);

@@ -3537,11 +3536,13 @@ static int set_max_huge_pages(struct hstate *h, unsigned long count, int nid,
* Collect pages to be removed on list without dropping lock
*/
while (min_count < persistent_huge_pages(h)) {
- page = remove_pool_huge_page(h, nodes_allowed, 0);
- if (!page)
+ struct folio *folio;
+
+ folio = remove_pool_hugetlb_folio(h, nodes_allowed, 0);
+ if (!folio)
break;

- list_add(&page->lru, &page_list);
+ list_add(&folio->lru, &page_list);
}
/* free the pages after dropping lock */
spin_unlock_irq(&hugetlb_lock);
--
2.41.0