[PATCH v5 3/5] mm/zswap: cleanup zswap_load()

From: Chengming Zhou
Date: Thu Dec 28 2023 - 04:47:26 EST


After the common decompress part goes to __zswap_load(), we can cleanup
the zswap_load() a little.

Reviewed-by: Yosry Ahmed <yosryahmed@xxxxxxxxxx>
Acked-by: Chis Li <chrisl@xxxxxxxxxx> (Google)
Signed-off-by: Chengming Zhou <zhouchengming@xxxxxxxxxxxxx>
---
mm/zswap.c | 14 +++++---------
1 file changed, 5 insertions(+), 9 deletions(-)

diff --git a/mm/zswap.c b/mm/zswap.c
index b25d7d03851d..618989463535 100644
--- a/mm/zswap.c
+++ b/mm/zswap.c
@@ -1760,7 +1760,6 @@ bool zswap_load(struct folio *folio)
struct zswap_tree *tree = zswap_trees[type];
struct zswap_entry *entry;
u8 *dst;
- bool ret;

VM_WARN_ON_ONCE(!folio_test_locked(folio));

@@ -1773,23 +1772,20 @@ bool zswap_load(struct folio *folio)
}
spin_unlock(&tree->lock);

- if (!entry->length) {
+ if (entry->length)
+ __zswap_load(entry, page);
+ else {
dst = kmap_local_page(page);
zswap_fill_page(dst, entry->value);
kunmap_local(dst);
- ret = true;
- goto stats;
}

- __zswap_load(entry, page);
- ret = true;
-stats:
count_vm_event(ZSWPIN);
if (entry->objcg)
count_objcg_event(entry->objcg, ZSWPIN);

spin_lock(&tree->lock);
- if (ret && zswap_exclusive_loads_enabled) {
+ if (zswap_exclusive_loads_enabled) {
zswap_invalidate_entry(tree, entry);
folio_mark_dirty(folio);
} else if (entry->length) {
@@ -1799,7 +1795,7 @@ bool zswap_load(struct folio *folio)
zswap_entry_put(tree, entry);
spin_unlock(&tree->lock);

- return ret;
+ return true;
}

void zswap_invalidate(int type, pgoff_t offset)

--
b4 0.10.1