[PATCH v2 2/7] mm/memory_hotplug: avoid poisoning memmap during mhp addition phase

From: Sumanth Korikkar
Date: Thu Nov 23 2023 - 04:24:21 EST


When memory block is marked inaccessible, avoid page_init_poison() on
memmap during memory hotplug addition phase. Instead, perform it later
when the memory is physically accessible in memory hotplug online phase.

When the memory block is marked accessible (by default turned on without
passing any mhp_flags), page poisoning initialization on memmap is
performed in sparse_add_section().

Page init poisining on memmap is performed with cond_resched(). This
reflects the functionality of commit d33695b16a9f ("mm/memory_hotplug:
poison memmap in remove_pfn_range_from_zone()")

Architectures can pass MHP_OFFLINE_INACCESSIBLE mhp_flag in add_memory()
to mark the memory block as initially inaccessible during memory hotplug
addition phase.

Signed-off-by: Sumanth Korikkar <sumanthk@xxxxxxxxxxxxx>
---
drivers/base/memory.c | 3 ++-
include/linux/memory_hotplug.h | 2 +-
mm/memory_hotplug.c | 27 ++++++++++++++++++++++++++-
mm/sparse.c | 3 ++-
4 files changed, 31 insertions(+), 4 deletions(-)

diff --git a/drivers/base/memory.c b/drivers/base/memory.c
index 51915d5c3f88..cbff43b2ef44 100644
--- a/drivers/base/memory.c
+++ b/drivers/base/memory.c
@@ -209,7 +209,8 @@ static int memory_block_online(struct memory_block *mem)

mem_hotplug_begin();
if (nr_vmemmap_pages) {
- ret = mhp_init_memmap_on_memory(start_pfn, nr_vmemmap_pages, zone);
+ ret = mhp_init_memmap_on_memory(start_pfn, nr_vmemmap_pages,
+ zone, mem->inaccessible);
if (ret)
goto out;
}
diff --git a/include/linux/memory_hotplug.h b/include/linux/memory_hotplug.h
index 8988cd5ad55d..791bc019e992 100644
--- a/include/linux/memory_hotplug.h
+++ b/include/linux/memory_hotplug.h
@@ -164,7 +164,7 @@ extern void adjust_present_page_count(struct page *page,
long nr_pages);
/* VM interface that may be used by firmware interface */
extern int mhp_init_memmap_on_memory(unsigned long pfn, unsigned long nr_pages,
- struct zone *zone);
+ struct zone *zone, bool mhp_off_inaccessible);
extern void mhp_deinit_memmap_on_memory(unsigned long pfn, unsigned long nr_pages);
extern int online_pages(unsigned long pfn, unsigned long nr_pages,
struct zone *zone, struct memory_group *group);
diff --git a/mm/memory_hotplug.c b/mm/memory_hotplug.c
index 1e02eba166b0..ac7cfc09502d 100644
--- a/mm/memory_hotplug.c
+++ b/mm/memory_hotplug.c
@@ -1083,8 +1083,25 @@ void adjust_present_page_count(struct page *page, struct memory_group *group,
group->present_kernel_pages += nr_pages;
}

+static void page_init_poison_with_resched(unsigned long start_pfn, unsigned long nr_pages)
+{
+ const unsigned long end_pfn = start_pfn + nr_pages;
+ unsigned long pfn, cur_nr_pages;
+
+ /* Poison struct pages because they are now uninitialized again. */
+ for (pfn = start_pfn; pfn < end_pfn; pfn += cur_nr_pages) {
+ cond_resched();
+
+ /* Select all remaining pages up to the next section boundary */
+ cur_nr_pages =
+ min(end_pfn - pfn, SECTION_ALIGN_UP(pfn + 1) - pfn);
+ page_init_poison(pfn_to_page(pfn),
+ sizeof(struct page) * cur_nr_pages);
+ }
+}
+
int mhp_init_memmap_on_memory(unsigned long pfn, unsigned long nr_pages,
- struct zone *zone)
+ struct zone *zone, bool mhp_off_inaccessible)
{
unsigned long end_pfn = pfn + nr_pages;
int ret, i;
@@ -1092,6 +1109,14 @@ int mhp_init_memmap_on_memory(unsigned long pfn, unsigned long nr_pages,
ret = kasan_add_zero_shadow(__va(PFN_PHYS(pfn)), PFN_PHYS(nr_pages));
if (ret)
return ret;
+ /*
+ * Memory block is accessible at this stage and hence poison the struct
+ * pages now. If the memory block is accessible during memory hotplug
+ * addition phase, then page poisining is already performed in
+ * sparse_add_section().
+ */
+ if (mhp_off_inaccessible)
+ page_init_poison_with_resched(pfn, nr_pages);
move_pfn_range_to_zone(zone, pfn, nr_pages, NULL, MIGRATE_UNMOVABLE);

for (i = 0; i < nr_pages; i++)
diff --git a/mm/sparse.c b/mm/sparse.c
index 77d91e565045..3991c717b769 100644
--- a/mm/sparse.c
+++ b/mm/sparse.c
@@ -907,7 +907,8 @@ int __meminit sparse_add_section(int nid, unsigned long start_pfn,
* Poison uninitialized struct pages in order to catch invalid flags
* combinations.
*/
- page_init_poison(memmap, sizeof(struct page) * nr_pages);
+ if (!altmap || !altmap->inaccessible)
+ page_init_poison(memmap, sizeof(struct page) * nr_pages);

ms = __nr_to_section(section_nr);
set_section_nid(section_nr, nid);
--
2.39.2