[PATCH v4 6/7] hugetlb: parallelize 2M hugetlb allocation and initialization

From: Gang Li
Date: Thu Jan 18 2024 - 07:40:53 EST


By distributing both the allocation and the initialization tasks across
multiple threads, the initialization of 2M hugetlb will be faster,
thereby improving the boot speed.

Here are some test results:
test no patch(ms) patched(ms) saved
------------------- -------------- ------------- --------
256c2t(4 node) 2M 3336 1051 68.52%
128c1t(2 node) 2M 1943 716 63.15%

Signed-off-by: Gang Li <gang.li@xxxxxxxxx>
Tested-by: David Rientjes <rientjes@xxxxxxxxxx>
---
mm/hugetlb.c | 70 ++++++++++++++++++++++++++++++++++++++--------------
1 file changed, 52 insertions(+), 18 deletions(-)

diff --git a/mm/hugetlb.c b/mm/hugetlb.c
index effe5539e545..9b348ba418f5 100644
--- a/mm/hugetlb.c
+++ b/mm/hugetlb.c
@@ -35,6 +35,7 @@
#include <linux/delayacct.h>
#include <linux/memory.h>
#include <linux/mm_inline.h>
+#include <linux/padata.h>

#include <asm/page.h>
#include <asm/pgalloc.h>
@@ -3510,43 +3511,76 @@ static void __init hugetlb_hstate_alloc_pages_errcheck(unsigned long allocated,
}
}

-static unsigned long __init hugetlb_gigantic_pages_alloc_boot(struct hstate *h)
+static void __init hugetlb_alloc_node(unsigned long start, unsigned long end, void *arg)
{
- unsigned long i;
+ struct hstate *h = (struct hstate *)arg;
+ int i, num = end - start;
+ nodemask_t node_alloc_noretry;
+ unsigned long flags;
+ int next_node = 0;

- for (i = 0; i < h->max_huge_pages; ++i) {
- if (!alloc_bootmem_huge_page(h, NUMA_NO_NODE))
+ /* Bit mask controlling how hard we retry per-node allocations.*/
+ nodes_clear(node_alloc_noretry);
+
+ for (i = 0; i < num; ++i) {
+ struct folio *folio = alloc_pool_huge_folio(h, &node_states[N_MEMORY],
+ &node_alloc_noretry, &next_node);
+ if (!folio)
break;
+ spin_lock_irqsave(&hugetlb_lock, flags);
+ __prep_account_new_huge_page(h, folio_nid(folio));
+ enqueue_hugetlb_folio(h, folio);
+ spin_unlock_irqrestore(&hugetlb_lock, flags);
cond_resched();
}
+}

- return i;
+static void __init hugetlb_vmemmap_optimize_node(unsigned long start, unsigned long end, void *arg)
+{
+ struct hstate *h = (struct hstate *)arg;
+ int nid = start;
+
+ hugetlb_vmemmap_optimize_folios(h, &h->hugepage_freelists[nid]);
}

-static unsigned long __init hugetlb_pages_alloc_boot(struct hstate *h)
+static unsigned long __init hugetlb_gigantic_pages_alloc_boot(struct hstate *h)
{
unsigned long i;
- struct folio *folio;
- LIST_HEAD(folio_list);
- nodemask_t node_alloc_noretry;
-
- /* Bit mask controlling how hard we retry per-node allocations.*/
- nodes_clear(node_alloc_noretry);

for (i = 0; i < h->max_huge_pages; ++i) {
- folio = alloc_pool_huge_folio(h, &node_states[N_MEMORY],
- &node_alloc_noretry);
- if (!folio)
+ if (!alloc_bootmem_huge_page(h, NUMA_NO_NODE))
break;
- list_add(&folio->lru, &folio_list);
cond_resched();
}

- prep_and_add_allocated_folios(h, &folio_list);
-
return i;
}

+static unsigned long __init hugetlb_pages_alloc_boot(struct hstate *h)
+{
+ struct padata_mt_job job = {
+ .fn_arg = h,
+ .align = 1,
+ .numa_aware = true
+ };
+
+ job.thread_fn = hugetlb_alloc_node;
+ job.start = 0;
+ job.size = h->max_huge_pages;
+ job.min_chunk = h->max_huge_pages / num_node_state(N_MEMORY) / 2;
+ job.max_threads = num_node_state(N_MEMORY) * 2;
+ padata_do_multithreaded(&job);
+
+ job.thread_fn = hugetlb_vmemmap_optimize_node;
+ job.start = 0;
+ job.size = num_node_state(N_MEMORY);
+ job.min_chunk = 1;
+ job.max_threads = num_node_state(N_MEMORY);
+ padata_do_multithreaded(&job);
+
+ return h->nr_huge_pages;
+}
+
/*
* NOTE: this routine is called in different contexts for gigantic and
* non-gigantic pages.
--
2.20.1