[PATCH 04/24] mm/swap: avoid setting page lock bit and doing extra unlock check

From: Kairui Song
Date: Sun Nov 19 2023 - 14:48:27 EST


From: Kairui Song <kasong@xxxxxxxxxxx>

When swapping in a page, mem_cgroup_swapin_charge_folio is called for new
allocated folio, nothing else is referencing the folio so no need to set
the lock bit. This avoided doing unlock check on error path.

Signed-off-by: Kairui Song <kasong@xxxxxxxxxxx>
---
mm/swap_state.c | 20 +++++++++-----------
1 file changed, 9 insertions(+), 11 deletions(-)

diff --git a/mm/swap_state.c b/mm/swap_state.c
index ac4fa404eaa7..45dd8b7c195d 100644
--- a/mm/swap_state.c
+++ b/mm/swap_state.c
@@ -458,6 +458,8 @@ struct page *__read_swap_cache_async(swp_entry_t entry, gfp_t gfp_mask,
mpol, ilx, numa_node_id());
if (!folio)
goto fail_put_swap;
+ if (mem_cgroup_swapin_charge_folio(folio, NULL, gfp_mask, entry))
+ goto fail_put_folio;

/*
* Swap entry may have been freed since our caller observed it.
@@ -483,13 +485,9 @@ struct page *__read_swap_cache_async(swp_entry_t entry, gfp_t gfp_mask,
/*
* The swap entry is ours to swap in. Prepare the new page.
*/
-
__folio_set_locked(folio);
__folio_set_swapbacked(folio);

- if (mem_cgroup_swapin_charge_folio(folio, NULL, gfp_mask, entry))
- goto fail_unlock;
-
/* May fail (-ENOMEM) if XArray node allocation failed. */
if (add_to_swap_cache(folio, entry, gfp_mask & GFP_RECLAIM_MASK, &shadow))
goto fail_unlock;
@@ -510,6 +508,7 @@ struct page *__read_swap_cache_async(swp_entry_t entry, gfp_t gfp_mask,
fail_unlock:
put_swap_folio(folio, entry);
folio_unlock(folio);
+fail_put_folio:
folio_put(folio);
fail_put_swap:
put_swap_device(si);
@@ -873,16 +872,15 @@ struct page *swapin_no_readahead(swp_entry_t entry, gfp_t gfp_mask,
folio = vma_alloc_folio(GFP_HIGHUSER_MOVABLE, 0,
vma, vmf->address, false);
if (folio) {
- __folio_set_locked(folio);
- __folio_set_swapbacked(folio);
-
- if (mem_cgroup_swapin_charge_folio(folio,
- vma->vm_mm, GFP_KERNEL,
- entry)) {
- folio_unlock(folio);
+ if (mem_cgroup_swapin_charge_folio(folio, vma->vm_mm,
+ GFP_KERNEL, entry)) {
folio_put(folio);
return NULL;
}
+
+ __folio_set_locked(folio);
+ __folio_set_swapbacked(folio);
+
mem_cgroup_swapin_uncharge_swap(entry);

shadow = get_shadow_from_swap_cache(entry);
--
2.42.0