[PATCH] mm: Complete documenting the use of lru for pgd_list

From: Ira Weiny
Date: Wed Apr 29 2020 - 14:10:59 EST


Signed-off-by: Ira Weiny <ira.weiny@xxxxxxxxx>
---
arch/x86/mm/fault.c | 2 +-
arch/x86/mm/init_64.c | 4 ++--
arch/x86/mm/pat/set_memory.c | 2 +-
arch/x86/mm/pgtable.c | 4 ++--
arch/x86/xen/mmu_pv.c | 4 ++--
5 files changed, 8 insertions(+), 8 deletions(-)

diff --git a/arch/x86/mm/fault.c b/arch/x86/mm/fault.c
index a51df516b87b..f07d477f8787 100644
--- a/arch/x86/mm/fault.c
+++ b/arch/x86/mm/fault.c
@@ -203,7 +203,7 @@ static void vmalloc_sync(void)
struct page *page;

spin_lock(&pgd_lock);
- list_for_each_entry(page, &pgd_list, lru) {
+ list_for_each_entry(page, &pgd_list, pgd_list) {
spinlock_t *pgt_lock;

/* the pgt_lock only for Xen */
diff --git a/arch/x86/mm/init_64.c b/arch/x86/mm/init_64.c
index 3b289c2f75cd..e2ae3618a65d 100644
--- a/arch/x86/mm/init_64.c
+++ b/arch/x86/mm/init_64.c
@@ -140,7 +140,7 @@ static void sync_global_pgds_l5(unsigned long start, unsigned long end)
continue;

spin_lock(&pgd_lock);
- list_for_each_entry(page, &pgd_list, lru) {
+ list_for_each_entry(page, &pgd_list, pgd_list) {
pgd_t *pgd;
spinlock_t *pgt_lock;

@@ -181,7 +181,7 @@ static void sync_global_pgds_l4(unsigned long start, unsigned long end)
continue;

spin_lock(&pgd_lock);
- list_for_each_entry(page, &pgd_list, lru) {
+ list_for_each_entry(page, &pgd_list, pgd_list) {
pgd_t *pgd;
p4d_t *p4d;
spinlock_t *pgt_lock;
diff --git a/arch/x86/mm/pat/set_memory.c b/arch/x86/mm/pat/set_memory.c
index 59eca6a94ce7..a1edfc593141 100644
--- a/arch/x86/mm/pat/set_memory.c
+++ b/arch/x86/mm/pat/set_memory.c
@@ -723,7 +723,7 @@ static void __set_pmd_pte(pte_t *kpte, unsigned long address, pte_t pte)
if (!SHARED_KERNEL_PMD) {
struct page *page;

- list_for_each_entry(page, &pgd_list, lru) {
+ list_for_each_entry(page, &pgd_list, pgd_list) {
pgd_t *pgd;
p4d_t *p4d;
pud_t *pud;
diff --git a/arch/x86/mm/pgtable.c b/arch/x86/mm/pgtable.c
index 8f4255662c5a..28ea8cc3f3a2 100644
--- a/arch/x86/mm/pgtable.c
+++ b/arch/x86/mm/pgtable.c
@@ -87,14 +87,14 @@ static inline void pgd_list_add(pgd_t *pgd)
{
struct page *page = virt_to_page(pgd);

- list_add(&page->lru, &pgd_list);
+ list_add(&page->pgd_list, &pgd_list);
}

static inline void pgd_list_del(pgd_t *pgd)
{
struct page *page = virt_to_page(pgd);

- list_del(&page->lru);
+ list_del(&page->pgd_list);
page->pt_mm = NULL;
}

diff --git a/arch/x86/xen/mmu_pv.c b/arch/x86/xen/mmu_pv.c
index bbba8b17829a..df6592be3208 100644
--- a/arch/x86/xen/mmu_pv.c
+++ b/arch/x86/xen/mmu_pv.c
@@ -844,7 +844,7 @@ void xen_mm_pin_all(void)

spin_lock(&pgd_lock);

- list_for_each_entry(page, &pgd_list, lru) {
+ list_for_each_entry(page, &pgd_list, pgd_list) {
if (!PagePinned(page)) {
__xen_pgd_pin(&init_mm, (pgd_t *)page_address(page));
SetPageSavePinned(page);
@@ -963,7 +963,7 @@ void xen_mm_unpin_all(void)

spin_lock(&pgd_lock);

- list_for_each_entry(page, &pgd_list, lru) {
+ list_for_each_entry(page, &pgd_list, pgd_list) {
if (PageSavePinned(page)) {
BUG_ON(!PagePinned(page));
__xen_pgd_unpin(&init_mm, (pgd_t *)page_address(page));
--
2.25.1