Re: [PATCH v2] mm: memory: move mem_cgroup_charge() into alloc_anon_folio()

From: Ryan Roberts
Date: Thu Jan 18 2024 - 09:49:21 EST


On 17/01/2024 10:39, Kefeng Wang wrote:
> mem_cgroup_charge() uses the GFP flags in a fairly sophisticated way.
> In addition to checking gfpflags_allow_blocking(), it pays attention
> to __GFP_NORETRY and __GFP_RETRY_MAYFAIL to ensure that processes within
> this memcg do not exceed their quotas. Using the same GFP flags ensures
> that we handle large anonymous folios correctly, including falling back
> to smaller orders when there is plenty of memory available in the system
> but this memcg is close to its limits.
>
> Signed-off-by: Kefeng Wang <wangkefeng.wang@xxxxxxxxxx>

Reviewed-by: Ryan Roberts <ryan.roberts@xxxxxxx>

> ---
> v2:
> - fix built when !CONFIG_TRANSPARENT_HUGEPAGE
> - update changelog suggested by Matthew Wilcox
>
> mm/memory.c | 16 ++++++++--------
> 1 file changed, 8 insertions(+), 8 deletions(-)
>
> diff --git a/mm/memory.c b/mm/memory.c
> index 5e88d5379127..551f0b21bc42 100644
> --- a/mm/memory.c
> +++ b/mm/memory.c
> @@ -4153,8 +4153,8 @@ static bool pte_range_none(pte_t *pte, int nr_pages)
>
> static struct folio *alloc_anon_folio(struct vm_fault *vmf)
> {
> -#ifdef CONFIG_TRANSPARENT_HUGEPAGE
> struct vm_area_struct *vma = vmf->vma;
> +#ifdef CONFIG_TRANSPARENT_HUGEPAGE
> unsigned long orders;
> struct folio *folio;
> unsigned long addr;
> @@ -4206,15 +4206,21 @@ static struct folio *alloc_anon_folio(struct vm_fault *vmf)
> addr = ALIGN_DOWN(vmf->address, PAGE_SIZE << order);
> folio = vma_alloc_folio(gfp, order, vma, addr, true);
> if (folio) {
> + if (mem_cgroup_charge(folio, vma->vm_mm, gfp)) {
> + folio_put(folio);
> + goto next;
> + }
> + folio_throttle_swaprate(folio, gfp);
> clear_huge_page(&folio->page, vmf->address, 1 << order);
> return folio;
> }
> +next:
> order = next_order(&orders, order);
> }
>
> fallback:
> #endif
> - return vma_alloc_zeroed_movable_folio(vmf->vma, vmf->address);
> + return folio_prealloc(vma->vm_mm, vma, vmf->address, true);
> }
>
> /*
> @@ -4281,10 +4287,6 @@ static vm_fault_t do_anonymous_page(struct vm_fault *vmf)
> nr_pages = folio_nr_pages(folio);
> addr = ALIGN_DOWN(vmf->address, nr_pages * PAGE_SIZE);
>
> - if (mem_cgroup_charge(folio, vma->vm_mm, GFP_KERNEL))
> - goto oom_free_page;
> - folio_throttle_swaprate(folio, GFP_KERNEL);
> -
> /*
> * The memory barrier inside __folio_mark_uptodate makes sure that
> * preceding stores to the page contents become visible before
> @@ -4338,8 +4340,6 @@ static vm_fault_t do_anonymous_page(struct vm_fault *vmf)
> release:
> folio_put(folio);
> goto unlock;
> -oom_free_page:
> - folio_put(folio);
> oom:
> return VM_FAULT_OOM;
> }