Re: [PATCH 1/1] mm/khugepaged: bypassing unnecessary scans with MMF_DISABLE_THP check

From: Yang Shi
Date: Mon Jan 29 2024 - 13:53:28 EST


On Sun, Jan 28, 2024 at 9:46 PM Lance Yang <ioworker0@xxxxxxxxx> wrote:
>
> khugepaged scans the entire address space in the
> background for each given mm, looking for
> opportunities to merge sequences of basic pages
> into huge pages. However, when an mm is inserted
> to the mm_slots list, and the MMF_DISABLE_THP flag
> is set later, this scanning process becomes
> unnecessary for that mm and can be skipped to avoid
> redundant operations, especially in scenarios with
> a large address space.
>
> This commit introduces a check before each scanning
> process to test the MMF_DISABLE_THP flag for the
> given mm; if the flag is set, the scanning process
> is bypassed, thereby improving the efficiency of
> khugepaged.
>
> Signed-off-by: Lance Yang <ioworker0@xxxxxxxxx>
> ---
> mm/khugepaged.c | 18 ++++++++++++------
> 1 file changed, 12 insertions(+), 6 deletions(-)
>
> diff --git a/mm/khugepaged.c b/mm/khugepaged.c
> index 2b219acb528e..d6a700834edc 100644
> --- a/mm/khugepaged.c
> +++ b/mm/khugepaged.c
> @@ -410,6 +410,12 @@ static inline int hpage_collapse_test_exit(struct mm_struct *mm)
> return atomic_read(&mm->mm_users) == 0;
> }
>
> +static inline int hpage_collapse_test_exit_or_disable(struct mm_struct *mm)
> +{
> + return hpage_collapse_test_exit(mm) ||
> + test_bit(MMF_DISABLE_THP, &mm->flags);
> +}
> +
> void __khugepaged_enter(struct mm_struct *mm)
> {
> struct khugepaged_mm_slot *mm_slot;
> @@ -1422,7 +1428,7 @@ static void collect_mm_slot(struct khugepaged_mm_slot *mm_slot)
>
> lockdep_assert_held(&khugepaged_mm_lock);
>
> - if (hpage_collapse_test_exit(mm)) {
> + if (hpage_collapse_test_exit_or_disable(mm)) {
> /* free mm_slot */
> hash_del(&slot->hash);
> list_del(&slot->mm_node);
> @@ -2360,7 +2366,7 @@ static unsigned int khugepaged_scan_mm_slot(unsigned int pages, int *result,
> goto breakouterloop_mmap_lock;
>
> progress++;
> - if (unlikely(hpage_collapse_test_exit(mm)))
> + if (unlikely(hpage_collapse_test_exit_or_disable(mm)))
> goto breakouterloop;
>
> vma_iter_init(&vmi, mm, khugepaged_scan.address);
> @@ -2368,7 +2374,7 @@ static unsigned int khugepaged_scan_mm_slot(unsigned int pages, int *result,
> unsigned long hstart, hend;
>
> cond_resched();
> - if (unlikely(hpage_collapse_test_exit(mm))) {
> + if (unlikely(hpage_collapse_test_exit_or_disable(mm))) {

The later thp_vma_allowable_order() does check whether MMF_DISABLE_THP
is set or not. And the hugepage_vma_revalidate() after re-acquiring
mmap_lock does the same check too. The checking in khugepaged should
be already serialized with prctl, which takes mmap_lock in write.

> progress++;
> break;
> }
> @@ -2390,7 +2396,7 @@ static unsigned int khugepaged_scan_mm_slot(unsigned int pages, int *result,
> bool mmap_locked = true;
>
> cond_resched();
> - if (unlikely(hpage_collapse_test_exit(mm)))
> + if (unlikely(hpage_collapse_test_exit_or_disable(mm)))
> goto breakouterloop;
>
> VM_BUG_ON(khugepaged_scan.address < hstart ||
> @@ -2408,7 +2414,7 @@ static unsigned int khugepaged_scan_mm_slot(unsigned int pages, int *result,
> fput(file);
> if (*result == SCAN_PTE_MAPPED_HUGEPAGE) {
> mmap_read_lock(mm);
> - if (hpage_collapse_test_exit(mm))
> + if (hpage_collapse_test_exit_or_disable(mm))
> goto breakouterloop;
> *result = collapse_pte_mapped_thp(mm,
> khugepaged_scan.address, false);
> @@ -2450,7 +2456,7 @@ static unsigned int khugepaged_scan_mm_slot(unsigned int pages, int *result,
> * Release the current mm_slot if this mm is about to die, or
> * if we scanned all vmas of this mm.
> */
> - if (hpage_collapse_test_exit(mm) || !vma) {
> + if (hpage_collapse_test_exit_or_disable(mm) || !vma) {
> /*
> * Make sure that if mm_users is reaching zero while
> * khugepaged runs here, khugepaged_exit will find
> --
> 2.33.1
>