[PATCH v3 11/14] mm/ksm: remove get_ksm_page and related info

From: alexs
Date: Mon Mar 25 2024 - 15:31:53 EST


From: "Alex Shi (tencent)" <alexs@xxxxxxxxxx>

Now since all caller are changed to ksm_get_folio, let's sync up the
related usages with ksm_get_folio, and remove get_ksm_page.

Signed-off-by: Alex Shi (tencent) <alexs@xxxxxxxxxx>
Cc: Izik Eidus <izik.eidus@xxxxxxxxxxxxxxxxxx>
Cc: Matthew Wilcox <willy@xxxxxxxxxxxxx>
Cc: Andrea Arcangeli <aarcange@xxxxxxxxxx>
Cc: Hugh Dickins <hughd@xxxxxxxxxx>
Cc: Chris Wright <chrisw@xxxxxxxxxxxx>
---
mm/ksm.c | 34 +++++++++++++---------------------
mm/migrate.c | 2 +-
2 files changed, 14 insertions(+), 22 deletions(-)

diff --git a/mm/ksm.c b/mm/ksm.c
index e92445f29685..0ad02524e363 100644
--- a/mm/ksm.c
+++ b/mm/ksm.c
@@ -890,14 +890,14 @@ static void remove_node_from_stable_tree(struct ksm_stable_node *stable_node)
free_stable_node(stable_node);
}

-enum get_ksm_page_flags {
+enum ksm_get_folio_flags {
GET_KSM_PAGE_NOLOCK,
GET_KSM_PAGE_LOCK,
GET_KSM_PAGE_TRYLOCK
};

/*
- * get_ksm_page: checks if the page indicated by the stable node
+ * ksm_get_folio: checks if the page indicated by the stable node
* is still its ksm page, despite having held no reference to it.
* In which case we can trust the content of the page, and it
* returns the gotten page; but if the page has now been zapped,
@@ -916,7 +916,7 @@ enum get_ksm_page_flags {
* is on its way to being freed; but it is an anomaly to bear in mind.
*/
static struct folio *ksm_get_folio(struct ksm_stable_node *stable_node,
- enum get_ksm_page_flags flags)
+ enum ksm_get_folio_flags flags)
{
struct folio *folio;
void *expected_mapping;
@@ -990,14 +990,6 @@ static struct folio *ksm_get_folio(struct ksm_stable_node *stable_node,
return NULL;
}

-static struct page *get_ksm_page(struct ksm_stable_node *stable_node,
- enum get_ksm_page_flags flags)
-{
- struct folio *folio = ksm_get_folio(stable_node, flags);
-
- return &folio->page;
-}
-
/*
* Removing rmap_item from stable or unstable tree.
* This function will clean the information from the stable/unstable tree.
@@ -1127,7 +1119,7 @@ static int remove_stable_node(struct ksm_stable_node *stable_node)
folio = ksm_get_folio(stable_node, GET_KSM_PAGE_LOCK);
if (!folio) {
/*
- * get_ksm_page did remove_node_from_stable_tree itself.
+ * ksm_get_folio did remove_node_from_stable_tree itself.
*/
return 0;
}
@@ -1140,7 +1132,7 @@ static int remove_stable_node(struct ksm_stable_node *stable_node)
err = -EBUSY;
if (!folio_mapped(folio)) {
/*
- * The stable node did not yet appear stale to get_ksm_page(),
+ * The stable node did not yet appear stale to ksm_get_folio(),
* since that allows for an unmapped ksm folio to be recognized
* right up until it is freed; but the node is safe to remove.
* This folio might be in an LRU cache waiting to be freed,
@@ -1657,7 +1649,7 @@ static struct page *stable_node_dup(struct ksm_stable_node **_stable_node_dup,
* We must walk all stable_node_dup to prune the stale
* stable nodes during lookup.
*
- * get_ksm_page can drop the nodes from the
+ * ksm_get_folio can drop the nodes from the
* stable_node->hlist if they point to freed pages
* (that's why we do a _safe walk). The "dup"
* stable_node parameter itself will be freed from
@@ -1764,7 +1756,7 @@ static struct ksm_stable_node *stable_node_dup_any(struct ksm_stable_node *stabl
}

/*
- * Like for get_ksm_page, this function can free the *_stable_node and
+ * Like for ksm_get_folio, this function can free the *_stable_node and
* *_stable_node_dup if the returned tree_page is NULL.
*
* It can also free and overwrite *_stable_node with the found
@@ -1786,7 +1778,7 @@ static void *__stable_node_chain(struct ksm_stable_node **_stable_node_dup,
if (!is_stable_node_chain(stable_node)) {
if (is_page_sharing_candidate(stable_node)) {
*_stable_node_dup = stable_node;
- return get_ksm_page(stable_node, GET_KSM_PAGE_NOLOCK);
+ return ksm_get_folio(stable_node, GET_KSM_PAGE_NOLOCK);
}
/*
* _stable_node_dup set to NULL means the stable_node
@@ -1900,7 +1892,7 @@ static void *stable_tree_search(struct page *page)
if (!tree_folio) {
/*
* If we walked over a stale stable_node,
- * get_ksm_page() will call rb_erase() and it
+ * ksm_get_folio() will call rb_erase() and it
* may rebalance the tree from under us. So
* restart the search from scratch. Returning
* NULL would be safe too, but we'd generate
@@ -2133,7 +2125,7 @@ static struct ksm_stable_node *stable_tree_insert(struct folio *kfolio)
if (!tree_folio) {
/*
* If we walked over a stale stable_node,
- * get_ksm_page() will call rb_erase() and it
+ * ksm_get_folio() will call rb_erase() and it
* may rebalance the tree from under us. So
* restart the search from scratch. Returning
* NULL would be safe too, but we'd generate
@@ -3245,7 +3237,7 @@ void folio_migrate_ksm(struct folio *newfolio, struct folio *folio)
/*
* newfolio->mapping was set in advance; now we need smp_wmb()
* to make sure that the new stable_node->kpfn is visible
- * to get_ksm_page() before it can see that folio->mapping
+ * to ksm_get_folio() before it can see that folio->mapping
* has gone stale (or that folio_test_swapcache has been cleared).
*/
smp_wmb();
@@ -3272,7 +3264,7 @@ static bool stable_node_dup_remove_range(struct ksm_stable_node *stable_node,
if (stable_node->kpfn >= start_pfn &&
stable_node->kpfn < end_pfn) {
/*
- * Don't get_ksm_page, page has already gone:
+ * Don't ksm_get_folio, page has already gone:
* which is why we keep kpfn instead of page*
*/
remove_node_from_stable_tree(stable_node);
@@ -3360,7 +3352,7 @@ static int ksm_memory_callback(struct notifier_block *self,
* Most of the work is done by page migration; but there might
* be a few stable_nodes left over, still pointing to struct
* pages which have been offlined: prune those from the tree,
- * otherwise get_ksm_page() might later try to access a
+ * otherwise ksm_get_folio() might later try to access a
* non-existent struct page.
*/
ksm_check_stable_tree(mn->start_pfn,
diff --git a/mm/migrate.c b/mm/migrate.c
index 73a052a382f1..9f0494fd902c 100644
--- a/mm/migrate.c
+++ b/mm/migrate.c
@@ -616,7 +616,7 @@ void folio_migrate_flags(struct folio *newfolio, struct folio *folio)
folio_migrate_ksm(newfolio, folio);
/*
* Please do not reorder this without considering how mm/ksm.c's
- * get_ksm_page() depends upon ksm_migrate_page() and PageSwapCache().
+ * ksm_get_folio() depends upon ksm_migrate_page() and PageSwapCache().
*/
if (folio_test_swapcache(folio))
folio_clear_swapcache(folio);
--
2.43.0