[PATCH v4 4/6] mm/zswap: cleanup zswap_load()

From: Chengming Zhou
Date: Tue Dec 26 2023 - 10:56:22 EST


After the common decompress part goes to __zswap_load(), we can cleanup
the zswap_load() a little.

Reviewed-by: Yosry Ahmed <yosryahmed@xxxxxxxxxx>
Acked-by: Chis Li <chrisl@xxxxxxxxxx> (Google)
Signed-off-by: Chengming Zhou <zhouchengming@xxxxxxxxxxxxx>
---
mm/zswap.c | 12 ++++--------
1 file changed, 4 insertions(+), 8 deletions(-)

diff --git a/mm/zswap.c b/mm/zswap.c
index 3433bd6b3cef..86886276cb81 100644
--- a/mm/zswap.c
+++ b/mm/zswap.c
@@ -1759,7 +1759,6 @@ bool zswap_load(struct folio *folio)
struct zswap_tree *tree = zswap_trees[type];
struct zswap_entry *entry;
u8 *dst;
- bool ret;

VM_WARN_ON_ONCE(!folio_test_locked(folio));

@@ -1776,19 +1775,16 @@ bool zswap_load(struct folio *folio)
dst = kmap_local_page(page);
zswap_fill_page(dst, entry->value);
kunmap_local(dst);
- ret = true;
- goto stats;
+ } else {
+ __zswap_load(entry, page);
}

- __zswap_load(entry, page);
- ret = true;
-stats:
count_vm_event(ZSWPIN);
if (entry->objcg)
count_objcg_event(entry->objcg, ZSWPIN);

spin_lock(&tree->lock);
- if (ret && zswap_exclusive_loads_enabled) {
+ if (zswap_exclusive_loads_enabled) {
zswap_invalidate_entry(tree, entry);
folio_mark_dirty(folio);
} else if (entry->length) {
@@ -1798,7 +1794,7 @@ bool zswap_load(struct folio *folio)
zswap_entry_put(tree, entry);
spin_unlock(&tree->lock);

- return ret;
+ return true;
}

void zswap_invalidate(int type, pgoff_t offset)

--
b4 0.10.1