[PATCH 03/24] mm/swap: move no readahead swapin code to a stand alone helper

From: Kairui Song
Date: Sun Nov 19 2023 - 14:48:27 EST


From: Kairui Song <kasong@xxxxxxxxxxx>

No feature change, simply move the routine to a standalone function to
be used later. The error path handling is copied from the "out_page"
label, to make the code change minimized for easier reviewing.

Signed-off-by: Kairui Song <kasong@xxxxxxxxxxx>
---
mm/memory.c | 33 +++++----------------------------
mm/swap.h | 2 ++
mm/swap_state.c | 48 ++++++++++++++++++++++++++++++++++++++++++++++++
3 files changed, 55 insertions(+), 28 deletions(-)

diff --git a/mm/memory.c b/mm/memory.c
index 70ffa867b1be..fba4a5229163 100644
--- a/mm/memory.c
+++ b/mm/memory.c
@@ -3794,7 +3794,6 @@ vm_fault_t do_swap_page(struct vm_fault *vmf)
swp_entry_t entry;
pte_t pte;
vm_fault_t ret = 0;
- void *shadow = NULL;

if (!pte_unmap_same(vmf))
goto out;
@@ -3858,33 +3857,11 @@ vm_fault_t do_swap_page(struct vm_fault *vmf)
if (!folio) {
if (data_race(si->flags & SWP_SYNCHRONOUS_IO) &&
__swap_count(entry) == 1) {
- /* skip swapcache */
- folio = vma_alloc_folio(GFP_HIGHUSER_MOVABLE, 0,
- vma, vmf->address, false);
- if (folio) {
- __folio_set_locked(folio);
- __folio_set_swapbacked(folio);
-
- if (mem_cgroup_swapin_charge_folio(folio,
- vma->vm_mm, GFP_KERNEL,
- entry)) {
- ret = VM_FAULT_OOM;
- goto out_page;
- }
- mem_cgroup_swapin_uncharge_swap(entry);
-
- shadow = get_shadow_from_swap_cache(entry);
- if (shadow)
- workingset_refault(folio, shadow);
-
- folio_add_lru(folio);
- page = &folio->page;
-
- /* To provide entry to swap_readpage() */
- folio->swap = entry;
- swap_readpage(page, true, NULL);
- folio->private = NULL;
- }
+ /* skip swapcache and readahead */
+ page = swapin_no_readahead(entry, GFP_HIGHUSER_MOVABLE,
+ vmf);
+ if (page)
+ folio = page_folio(page);
} else {
page = swapin_readahead(entry, GFP_HIGHUSER_MOVABLE,
vmf);
diff --git a/mm/swap.h b/mm/swap.h
index 73c332ee4d91..ea4be4791394 100644
--- a/mm/swap.h
+++ b/mm/swap.h
@@ -56,6 +56,8 @@ struct page *swap_cluster_readahead(swp_entry_t entry, gfp_t flag,
struct mempolicy *mpol, pgoff_t ilx);
struct page *swapin_readahead(swp_entry_t entry, gfp_t flag,
struct vm_fault *vmf);
+struct page *swapin_no_readahead(swp_entry_t entry, gfp_t flag,
+ struct vm_fault *vmf);

static inline unsigned int folio_swap_flags(struct folio *folio)
{
diff --git a/mm/swap_state.c b/mm/swap_state.c
index 85d9e5806a6a..ac4fa404eaa7 100644
--- a/mm/swap_state.c
+++ b/mm/swap_state.c
@@ -853,6 +853,54 @@ static struct page *swap_vma_readahead(swp_entry_t targ_entry, gfp_t gfp_mask,
return page;
}

+/**
+ * swapin_no_readahead - swap in pages skipping swap cache and readahead
+ * @entry: swap entry of this memory
+ * @gfp_mask: memory allocation flags
+ * @vmf: fault information
+ *
+ * Returns the struct page for entry and addr after the swap entry is read
+ * in.
+ */
+struct page *swapin_no_readahead(swp_entry_t entry, gfp_t gfp_mask,
+ struct vm_fault *vmf)
+{
+ struct vm_area_struct *vma = vmf->vma;
+ struct page *page = NULL;
+ struct folio *folio;
+ void *shadow = NULL;
+
+ folio = vma_alloc_folio(GFP_HIGHUSER_MOVABLE, 0,
+ vma, vmf->address, false);
+ if (folio) {
+ __folio_set_locked(folio);
+ __folio_set_swapbacked(folio);
+
+ if (mem_cgroup_swapin_charge_folio(folio,
+ vma->vm_mm, GFP_KERNEL,
+ entry)) {
+ folio_unlock(folio);
+ folio_put(folio);
+ return NULL;
+ }
+ mem_cgroup_swapin_uncharge_swap(entry);
+
+ shadow = get_shadow_from_swap_cache(entry);
+ if (shadow)
+ workingset_refault(folio, shadow);
+
+ folio_add_lru(folio);
+
+ /* To provide entry to swap_readpage() */
+ folio->swap = entry;
+ page = &folio->page;
+ swap_readpage(page, true, NULL);
+ folio->private = NULL;
+ }
+
+ return page;
+}
+
/**
* swapin_readahead - swap in pages in hope we need them soon
* @entry: swap entry of this memory
--
2.42.0