[PATCH 57/75] mm/rmap: Turn page_lock_anon_vma_read() into folio_lock_anon_vma_read()

From: Matthew Wilcox (Oracle)
Date: Fri Feb 04 2022 - 15:03:46 EST


Add back page_lock_anon_vma_read() as a wrapper. This saves a few calls
to compound_head(). If any callers were passing a tail page before,
this would have failed to lock the anon VMA as page->mapping is not
valid for tail pages.

Signed-off-by: Matthew Wilcox (Oracle) <willy@xxxxxxxxxxxxx>
---
include/linux/rmap.h | 1 +
mm/folio-compat.c | 5 +++++
mm/memory-failure.c | 3 ++-
mm/rmap.c | 12 ++++++------
4 files changed, 14 insertions(+), 7 deletions(-)

diff --git a/include/linux/rmap.h b/include/linux/rmap.h
index 85d17a38642c..71798112a575 100644
--- a/include/linux/rmap.h
+++ b/include/linux/rmap.h
@@ -269,6 +269,7 @@ void remove_migration_ptes(struct folio *src, struct folio *dst, bool locked);
* Called by memory-failure.c to kill processes.
*/
struct anon_vma *page_lock_anon_vma_read(struct page *page);
+struct anon_vma *folio_lock_anon_vma_read(struct folio *folio);
void page_unlock_anon_vma_read(struct anon_vma *anon_vma);
int page_mapped_in_vma(struct page *page, struct vm_area_struct *vma);

diff --git a/mm/folio-compat.c b/mm/folio-compat.c
index 3804fd8c1f20..e04fba5e45e5 100644
--- a/mm/folio-compat.c
+++ b/mm/folio-compat.c
@@ -185,3 +185,8 @@ void page_mlock(struct page *page)
{
folio_mlock(page_folio(page));
}
+
+struct anon_vma *page_lock_anon_vma_read(struct page *page)
+{
+ return folio_lock_anon_vma_read(page_folio(page));
+}
diff --git a/mm/memory-failure.c b/mm/memory-failure.c
index 1c7a71b5248e..ed1a47d9c35d 100644
--- a/mm/memory-failure.c
+++ b/mm/memory-failure.c
@@ -487,12 +487,13 @@ static struct task_struct *task_early_kill(struct task_struct *tsk,
static void collect_procs_anon(struct page *page, struct list_head *to_kill,
int force_early)
{
+ struct folio *folio = page_folio(page);
struct vm_area_struct *vma;
struct task_struct *tsk;
struct anon_vma *av;
pgoff_t pgoff;

- av = page_lock_anon_vma_read(page);
+ av = folio_lock_anon_vma_read(folio);
if (av == NULL) /* Not actually mapped anymore */
return;

diff --git a/mm/rmap.c b/mm/rmap.c
index ffc1b2f0cf24..ba65d5d3eb5a 100644
--- a/mm/rmap.c
+++ b/mm/rmap.c
@@ -526,28 +526,28 @@ struct anon_vma *page_get_anon_vma(struct page *page)
* atomic op -- the trylock. If we fail the trylock, we fall back to getting a
* reference like with page_get_anon_vma() and then block on the mutex.
*/
-struct anon_vma *page_lock_anon_vma_read(struct page *page)
+struct anon_vma *folio_lock_anon_vma_read(struct folio *folio)
{
struct anon_vma *anon_vma = NULL;
struct anon_vma *root_anon_vma;
unsigned long anon_mapping;

rcu_read_lock();
- anon_mapping = (unsigned long)READ_ONCE(page->mapping);
+ anon_mapping = (unsigned long)READ_ONCE(folio->mapping);
if ((anon_mapping & PAGE_MAPPING_FLAGS) != PAGE_MAPPING_ANON)
goto out;
- if (!page_mapped(page))
+ if (!folio_mapped(folio))
goto out;

anon_vma = (struct anon_vma *) (anon_mapping - PAGE_MAPPING_ANON);
root_anon_vma = READ_ONCE(anon_vma->root);
if (down_read_trylock(&root_anon_vma->rwsem)) {
/*
- * If the page is still mapped, then this anon_vma is still
+ * If the folio is still mapped, then this anon_vma is still
* its anon_vma, and holding the mutex ensures that it will
* not go away, see anon_vma_free().
*/
- if (!page_mapped(page)) {
+ if (!folio_mapped(folio)) {
up_read(&root_anon_vma->rwsem);
anon_vma = NULL;
}
@@ -560,7 +560,7 @@ struct anon_vma *page_lock_anon_vma_read(struct page *page)
goto out;
}

- if (!page_mapped(page)) {
+ if (!folio_mapped(folio)) {
rcu_read_unlock();
put_anon_vma(anon_vma);
return NULL;
--
2.34.1