Re: [PATCH mm-unstable 5/8] mm/hugetlb: increase use of folios in alloc_huge_page()

From: Mike Kravetz
Date: Fri Jan 06 2023 - 19:31:38 EST


On 01/03/23 13:13, Sidhartha Kumar wrote:
> Change hugetlb_cgroup_commit_charge{,_rsvd}(), dequeue_huge_page_vma()
> and alloc_buddy_huge_page_with_mpol() to use folios

Nice that the only 'conversion' was to eliminate the page to folio or
folio to page calls in those routines.

> so alloc_huge_page()
> is cleaned by operating on folios until its return.
>
> Signed-off-by: Sidhartha Kumar <sidhartha.kumar@xxxxxxxxxx>
> ---
> include/linux/hugetlb_cgroup.h | 8 ++++----
> mm/hugetlb.c | 33 ++++++++++++++++-----------------
> mm/hugetlb_cgroup.c | 8 ++------
> 3 files changed, 22 insertions(+), 27 deletions(-)

Thanks,

Reviewed-by: Mike Kravetz <mike.kravetz@xxxxxxxxxx>
--
Mike Kravetz
>
> diff --git a/include/linux/hugetlb_cgroup.h b/include/linux/hugetlb_cgroup.h
> index f706626a8063..3d82d91f49ac 100644
> --- a/include/linux/hugetlb_cgroup.h
> +++ b/include/linux/hugetlb_cgroup.h
> @@ -141,10 +141,10 @@ extern int hugetlb_cgroup_charge_cgroup_rsvd(int idx, unsigned long nr_pages,
> struct hugetlb_cgroup **ptr);
> extern void hugetlb_cgroup_commit_charge(int idx, unsigned long nr_pages,
> struct hugetlb_cgroup *h_cg,
> - struct page *page);
> + struct folio *folio);
> extern void hugetlb_cgroup_commit_charge_rsvd(int idx, unsigned long nr_pages,
> struct hugetlb_cgroup *h_cg,
> - struct page *page);
> + struct folio *folio);
> extern void hugetlb_cgroup_uncharge_folio(int idx, unsigned long nr_pages,
> struct folio *folio);
> extern void hugetlb_cgroup_uncharge_folio_rsvd(int idx, unsigned long nr_pages,
> @@ -230,14 +230,14 @@ static inline int hugetlb_cgroup_charge_cgroup_rsvd(int idx,
>
> static inline void hugetlb_cgroup_commit_charge(int idx, unsigned long nr_pages,
> struct hugetlb_cgroup *h_cg,
> - struct page *page)
> + struct folio *folio)
> {
> }
>
> static inline void
> hugetlb_cgroup_commit_charge_rsvd(int idx, unsigned long nr_pages,
> struct hugetlb_cgroup *h_cg,
> - struct page *page)
> + struct folio *folio)
> {
> }
>
> diff --git a/mm/hugetlb.c b/mm/hugetlb.c
> index 0b8bab52bc7e..640ca4eaccf2 100644
> --- a/mm/hugetlb.c
> +++ b/mm/hugetlb.c
> @@ -1209,7 +1209,7 @@ static unsigned long available_huge_pages(struct hstate *h)
> return h->free_huge_pages - h->resv_huge_pages;
> }
>
> -static struct page *dequeue_huge_page_vma(struct hstate *h,
> +static struct folio *dequeue_hugetlb_folio_vma(struct hstate *h,
> struct vm_area_struct *vma,
> unsigned long address, int avoid_reserve,
> long chg)
> @@ -1253,7 +1253,7 @@ static struct page *dequeue_huge_page_vma(struct hstate *h,
> }
>
> mpol_cond_put(mpol);
> - return &folio->page;
> + return folio;
>
> err:
> return NULL;
> @@ -2305,7 +2305,7 @@ static struct page *alloc_migrate_huge_page(struct hstate *h, gfp_t gfp_mask,
> * Use the VMA's mpolicy to allocate a huge page from the buddy.
> */
> static
> -struct page *alloc_buddy_huge_page_with_mpol(struct hstate *h,
> +struct folio *alloc_buddy_hugetlb_folio_with_mpol(struct hstate *h,
> struct vm_area_struct *vma, unsigned long addr)
> {
> struct folio *folio = NULL;
> @@ -2328,7 +2328,7 @@ struct page *alloc_buddy_huge_page_with_mpol(struct hstate *h,
> if (!folio)
> folio = alloc_surplus_hugetlb_folio(h, gfp_mask, nid, nodemask);
> mpol_cond_put(mpol);
> - return &folio->page;
> + return folio;
> }
>
> /* page migration callback function */
> @@ -2877,7 +2877,6 @@ struct page *alloc_huge_page(struct vm_area_struct *vma,
> {
> struct hugepage_subpool *spool = subpool_vma(vma);
> struct hstate *h = hstate_vma(vma);
> - struct page *page;
> struct folio *folio;
> long map_chg, map_commit;
> long gbl_chg;
> @@ -2941,34 +2940,34 @@ struct page *alloc_huge_page(struct vm_area_struct *vma,
> * from the global free pool (global change). gbl_chg == 0 indicates
> * a reservation exists for the allocation.
> */
> - page = dequeue_huge_page_vma(h, vma, addr, avoid_reserve, gbl_chg);
> - if (!page) {
> + folio = dequeue_hugetlb_folio_vma(h, vma, addr, avoid_reserve, gbl_chg);
> + if (!folio) {
> spin_unlock_irq(&hugetlb_lock);
> - page = alloc_buddy_huge_page_with_mpol(h, vma, addr);
> - if (!page)
> + folio = alloc_buddy_hugetlb_folio_with_mpol(h, vma, addr);
> + if (!folio)
> goto out_uncharge_cgroup;
> spin_lock_irq(&hugetlb_lock);
> if (!avoid_reserve && vma_has_reserves(vma, gbl_chg)) {
> - SetHPageRestoreReserve(page);
> + folio_set_hugetlb_restore_reserve(folio);
> h->resv_huge_pages--;
> }
> - list_add(&page->lru, &h->hugepage_activelist);
> - set_page_refcounted(page);
> + list_add(&folio->lru, &h->hugepage_activelist);
> + folio_ref_unfreeze(folio, 1);
> /* Fall through */
> }
> - folio = page_folio(page);
> - hugetlb_cgroup_commit_charge(idx, pages_per_huge_page(h), h_cg, page);
> +
> + hugetlb_cgroup_commit_charge(idx, pages_per_huge_page(h), h_cg, folio);
> /* If allocation is not consuming a reservation, also store the
> * hugetlb_cgroup pointer on the page.
> */
> if (deferred_reserve) {
> hugetlb_cgroup_commit_charge_rsvd(idx, pages_per_huge_page(h),
> - h_cg, page);
> + h_cg, folio);
> }
>
> spin_unlock_irq(&hugetlb_lock);
>
> - hugetlb_set_page_subpool(page, spool);
> + hugetlb_set_folio_subpool(folio, spool);
>
> map_commit = vma_commit_reservation(h, vma, addr);
> if (unlikely(map_chg > map_commit)) {
> @@ -2989,7 +2988,7 @@ struct page *alloc_huge_page(struct vm_area_struct *vma,
> hugetlb_cgroup_uncharge_folio_rsvd(hstate_index(h),
> pages_per_huge_page(h), folio);
> }
> - return page;
> + return &folio->page;
>
> out_uncharge_cgroup:
> hugetlb_cgroup_uncharge_cgroup(idx, pages_per_huge_page(h), h_cg);
> diff --git a/mm/hugetlb_cgroup.c b/mm/hugetlb_cgroup.c
> index d9e4425d81ac..dedd2edb076e 100644
> --- a/mm/hugetlb_cgroup.c
> +++ b/mm/hugetlb_cgroup.c
> @@ -331,19 +331,15 @@ static void __hugetlb_cgroup_commit_charge(int idx, unsigned long nr_pages,
>
> void hugetlb_cgroup_commit_charge(int idx, unsigned long nr_pages,
> struct hugetlb_cgroup *h_cg,
> - struct page *page)
> + struct folio *folio)
> {
> - struct folio *folio = page_folio(page);
> -
> __hugetlb_cgroup_commit_charge(idx, nr_pages, h_cg, folio, false);
> }
>
> void hugetlb_cgroup_commit_charge_rsvd(int idx, unsigned long nr_pages,
> struct hugetlb_cgroup *h_cg,
> - struct page *page)
> + struct folio *folio)
> {
> - struct folio *folio = page_folio(page);
> -
> __hugetlb_cgroup_commit_charge(idx, nr_pages, h_cg, folio, true);
> }
>
> --
> 2.39.0
>