[PATCH mm-unstable 8/8] mm/hugetlb: convert demote_free_huge_page to folios

From: Sidhartha Kumar
Date: Tue Jan 03 2023 - 14:17:04 EST


Change demote_free_huge_page to demote_free_hugetlb_folio() and change
demote_pool_huge_page() pass in a folio.

Signed-off-by: Sidhartha Kumar <sidhartha.kumar@xxxxxxxxxx>
---
mm/hugetlb.c | 31 ++++++++++++++++---------------
1 file changed, 16 insertions(+), 15 deletions(-)

diff --git a/mm/hugetlb.c b/mm/hugetlb.c
index 2bb69b098117..a89728c6987d 100644
--- a/mm/hugetlb.c
+++ b/mm/hugetlb.c
@@ -3438,12 +3438,12 @@ static int set_max_huge_pages(struct hstate *h, unsigned long count, int nid,
return 0;
}

-static int demote_free_huge_page(struct hstate *h, struct page *page)
+static int demote_free_hugetlb_folio(struct hstate *h, struct folio *folio)
{
- int i, nid = page_to_nid(page);
+ int i, nid = folio_nid(folio);
struct hstate *target_hstate;
- struct folio *folio = page_folio(page);
struct page *subpage;
+ struct folio *subfolio;
int rc = 0;

target_hstate = size_to_hstate(PAGE_SIZE << h->demote_order);
@@ -3451,18 +3451,18 @@ static int demote_free_huge_page(struct hstate *h, struct page *page)
remove_hugetlb_folio_for_demote(h, folio, false);
spin_unlock_irq(&hugetlb_lock);

- rc = hugetlb_vmemmap_restore(h, page);
+ rc = hugetlb_vmemmap_restore(h, &folio->page);
if (rc) {
- /* Allocation of vmemmmap failed, we can not demote page */
+ /* Allocation of vmemmmap failed, we can not demote folio */
spin_lock_irq(&hugetlb_lock);
- set_page_refcounted(page);
- add_hugetlb_folio(h, page_folio(page), false);
+ folio_ref_unfreeze(folio, 1);
+ add_hugetlb_folio(h, folio, false);
return rc;
}

/*
* Use destroy_compound_hugetlb_folio_for_demote for all huge page
- * sizes as it will not ref count pages.
+ * sizes as it will not ref count folios.
*/
destroy_compound_hugetlb_folio_for_demote(folio, huge_page_order(h));

@@ -3477,15 +3477,15 @@ static int demote_free_huge_page(struct hstate *h, struct page *page)
mutex_lock(&target_hstate->resize_lock);
for (i = 0; i < pages_per_huge_page(h);
i += pages_per_huge_page(target_hstate)) {
- subpage = nth_page(page, i);
- folio = page_folio(subpage);
+ subpage = folio_page(folio, i);
+ subfolio = page_folio(subpage);
if (hstate_is_gigantic(target_hstate))
- prep_compound_gigantic_folio_for_demote(folio,
+ prep_compound_gigantic_folio_for_demote(subfolio,
target_hstate->order);
else
prep_compound_page(subpage, target_hstate->order);
- set_page_private(subpage, 0);
- prep_new_hugetlb_folio(target_hstate, folio, nid);
+ folio_change_private(subfolio, NULL);
+ prep_new_hugetlb_folio(target_hstate, subfolio, nid);
free_huge_page(subpage);
}
mutex_unlock(&target_hstate->resize_lock);
@@ -3508,6 +3508,7 @@ static int demote_pool_huge_page(struct hstate *h, nodemask_t *nodes_allowed)
{
int nr_nodes, node;
struct page *page;
+ struct folio *folio;

lockdep_assert_held(&hugetlb_lock);

@@ -3521,8 +3522,8 @@ static int demote_pool_huge_page(struct hstate *h, nodemask_t *nodes_allowed)
list_for_each_entry(page, &h->hugepage_freelists[node], lru) {
if (PageHWPoison(page))
continue;
-
- return demote_free_huge_page(h, page);
+ folio = page_folio(page);
+ return demote_free_hugetlb_folio(h, folio);
}
}

--
2.39.0