[PATCH RFC v2 10/27] mm: Call arch_swap_prepare_to_restore() before arch_swap_restore()

From: Alexandru Elisei
Date: Sun Nov 19 2023 - 11:59:00 EST


arm64 uses arch_swap_restore() to restore saved tags before the page is
swapped in and it's called in atomic context (with the ptl lock held).

Introduce arch_swap_prepare_to_restore() that will allow an architecture to
perform extra work during swap in and outside of a critical section.

Signed-off-by: Alexandru Elisei <alexandru.elisei@xxxxxxx>
---
include/linux/pgtable.h | 7 +++++++
mm/memory.c | 4 ++++
mm/shmem.c | 9 +++++++++
mm/swapfile.c | 7 +++++++
4 files changed, 27 insertions(+)

diff --git a/include/linux/pgtable.h b/include/linux/pgtable.h
index b1001ce361ac..ffdb9b6bed6c 100644
--- a/include/linux/pgtable.h
+++ b/include/linux/pgtable.h
@@ -938,6 +938,13 @@ static inline void arch_swap_invalidate_area(int type)
}
#endif

+#ifndef __HAVE_ARCH_SWAP_PREPARE_TO_RESTORE
+static inline vm_fault_t arch_swap_prepare_to_restore(swp_entry_t entry, struct folio *folio)
+{
+ return 0;
+}
+#endif
+
#ifndef __HAVE_ARCH_SWAP_RESTORE
static inline void arch_swap_restore(swp_entry_t entry, struct folio *folio)
{
diff --git a/mm/memory.c b/mm/memory.c
index 1f18ed4a5497..e137f7673749 100644
--- a/mm/memory.c
+++ b/mm/memory.c
@@ -3957,6 +3957,10 @@ vm_fault_t do_swap_page(struct vm_fault *vmf)

folio_throttle_swaprate(folio, GFP_KERNEL);

+ ret = arch_swap_prepare_to_restore(entry, folio);
+ if (ret)
+ goto out_page;
+
/*
* Back out if somebody else already faulted in this pte.
*/
diff --git a/mm/shmem.c b/mm/shmem.c
index 71ce5fe5c779..0449c03dbdfd 100644
--- a/mm/shmem.c
+++ b/mm/shmem.c
@@ -1840,6 +1840,7 @@ static int shmem_swapin_folio(struct inode *inode, pgoff_t index,
struct swap_info_struct *si;
struct folio *folio = NULL;
swp_entry_t swap;
+ vm_fault_t ret;
int error;

VM_BUG_ON(!*foliop || !xa_is_value(*foliop));
@@ -1888,6 +1889,14 @@ static int shmem_swapin_folio(struct inode *inode, pgoff_t index,
}
folio_wait_writeback(folio);

+ ret = arch_swap_prepare_to_restore(swap, folio);
+ if (ret) {
+ if (fault_type)
+ *fault_type = ret;
+ error = -EINVAL;
+ goto unlock;
+ }
+
/*
* Some architectures may have to restore extra metadata to the
* folio after reading from swap.
diff --git a/mm/swapfile.c b/mm/swapfile.c
index 4bc70f459164..9983dffce47b 100644
--- a/mm/swapfile.c
+++ b/mm/swapfile.c
@@ -1746,6 +1746,7 @@ static int unuse_pte(struct vm_area_struct *vma, pmd_t *pmd,
spinlock_t *ptl;
pte_t *pte, new_pte, old_pte;
bool hwpoisoned = PageHWPoison(page);
+ vm_fault_t err;
int ret = 1;

swapcache = page;
@@ -1779,6 +1780,12 @@ static int unuse_pte(struct vm_area_struct *vma, pmd_t *pmd,
goto setpte;
}

+ err = arch_swap_prepare_to_restore(entry, page_folio(page));
+ if (err) {
+ ret = -EINVAL;
+ goto out;
+ }
+
/*
* Some architectures may have to restore extra metadata to the page
* when reading from swap. This metadata may be indexed by swap entry
--
2.42.1