[PATCH 5.9 082/105] xen: dont use page->lru for ZONE_DEVICE memory

From: Greg Kroah-Hartman
Date: Mon Dec 14 2020 - 13:25:55 EST


From: Juergen Gross <jgross@xxxxxxxx>

commit ee32f32335e8c7f6154bf397f4ac9b6175b488a8 upstream.

Commit 9e2369c06c8a18 ("xen: add helpers to allocate unpopulated
memory") introduced usage of ZONE_DEVICE memory for foreign memory
mappings.

Unfortunately this collides with using page->lru for Xen backend
private page caches.

Fix that by using page->zone_device_data instead.

Cc: <stable@xxxxxxxxxxxxxxx> # 5.9
Fixes: 9e2369c06c8a18 ("xen: add helpers to allocate unpopulated memory")
Signed-off-by: Juergen Gross <jgross@xxxxxxxx>
Reviewed-by: Boris Ostrovsky <boris.ostrovksy@xxxxxxxxxx>
Reviewed-by: Jason Andryuk <jandryuk@xxxxxxxxx>
Signed-off-by: Juergen Gross <jgross@xxxxxxxx>
Signed-off-by: Greg Kroah-Hartman <gregkh@xxxxxxxxxxxxxxxxxxx>

---
drivers/xen/grant-table.c | 65 +++++++++++++++++++++++++++++++++++-----
drivers/xen/unpopulated-alloc.c | 20 ++++++------
include/xen/grant_table.h | 4 ++
3 files changed, 73 insertions(+), 16 deletions(-)

--- a/drivers/xen/grant-table.c
+++ b/drivers/xen/grant-table.c
@@ -813,10 +813,63 @@ int gnttab_alloc_pages(int nr_pages, str
}
EXPORT_SYMBOL_GPL(gnttab_alloc_pages);

+#ifdef CONFIG_XEN_UNPOPULATED_ALLOC
+static inline void cache_init(struct gnttab_page_cache *cache)
+{
+ cache->pages = NULL;
+}
+
+static inline bool cache_empty(struct gnttab_page_cache *cache)
+{
+ return !cache->pages;
+}
+
+static inline struct page *cache_deq(struct gnttab_page_cache *cache)
+{
+ struct page *page;
+
+ page = cache->pages;
+ cache->pages = page->zone_device_data;
+
+ return page;
+}
+
+static inline void cache_enq(struct gnttab_page_cache *cache, struct page *page)
+{
+ page->zone_device_data = cache->pages;
+ cache->pages = page;
+}
+#else
+static inline void cache_init(struct gnttab_page_cache *cache)
+{
+ INIT_LIST_HEAD(&cache->pages);
+}
+
+static inline bool cache_empty(struct gnttab_page_cache *cache)
+{
+ return list_empty(&cache->pages);
+}
+
+static inline struct page *cache_deq(struct gnttab_page_cache *cache)
+{
+ struct page *page;
+
+ page = list_first_entry(&cache->pages, struct page, lru);
+ list_del(&page->lru);
+
+ return page;
+}
+
+static inline void cache_enq(struct gnttab_page_cache *cache, struct page *page)
+{
+ list_add(&page->lru, &cache->pages);
+}
+#endif
+
void gnttab_page_cache_init(struct gnttab_page_cache *cache)
{
spin_lock_init(&cache->lock);
- INIT_LIST_HEAD(&cache->pages);
+ cache_init(cache);
cache->num_pages = 0;
}
EXPORT_SYMBOL_GPL(gnttab_page_cache_init);
@@ -827,13 +880,12 @@ int gnttab_page_cache_get(struct gnttab_

spin_lock_irqsave(&cache->lock, flags);

- if (list_empty(&cache->pages)) {
+ if (cache_empty(cache)) {
spin_unlock_irqrestore(&cache->lock, flags);
return gnttab_alloc_pages(1, page);
}

- page[0] = list_first_entry(&cache->pages, struct page, lru);
- list_del(&page[0]->lru);
+ page[0] = cache_deq(cache);
cache->num_pages--;

spin_unlock_irqrestore(&cache->lock, flags);
@@ -851,7 +903,7 @@ void gnttab_page_cache_put(struct gnttab
spin_lock_irqsave(&cache->lock, flags);

for (i = 0; i < num; i++)
- list_add(&page[i]->lru, &cache->pages);
+ cache_enq(cache, page[i]);
cache->num_pages += num;

spin_unlock_irqrestore(&cache->lock, flags);
@@ -867,8 +919,7 @@ void gnttab_page_cache_shrink(struct gnt
spin_lock_irqsave(&cache->lock, flags);

while (cache->num_pages > num) {
- page[i] = list_first_entry(&cache->pages, struct page, lru);
- list_del(&page[i]->lru);
+ page[i] = cache_deq(cache);
cache->num_pages--;
if (++i == ARRAY_SIZE(page)) {
spin_unlock_irqrestore(&cache->lock, flags);
--- a/drivers/xen/unpopulated-alloc.c
+++ b/drivers/xen/unpopulated-alloc.c
@@ -12,7 +12,7 @@
#include <xen/xen.h>

static DEFINE_MUTEX(list_lock);
-static LIST_HEAD(page_list);
+static struct page *page_list;
static unsigned int list_count;

static int fill_list(unsigned int nr_pages)
@@ -75,7 +75,8 @@ static int fill_list(unsigned int nr_pag
struct page *pg = virt_to_page(vaddr + PAGE_SIZE * i);

BUG_ON(!virt_addr_valid(vaddr + PAGE_SIZE * i));
- list_add(&pg->lru, &page_list);
+ pg->zone_device_data = page_list;
+ page_list = pg;
list_count++;
}

@@ -101,12 +102,10 @@ int xen_alloc_unpopulated_pages(unsigned
}

for (i = 0; i < nr_pages; i++) {
- struct page *pg = list_first_entry_or_null(&page_list,
- struct page,
- lru);
+ struct page *pg = page_list;

BUG_ON(!pg);
- list_del(&pg->lru);
+ page_list = pg->zone_device_data;
list_count--;
pages[i] = pg;

@@ -117,7 +116,8 @@ int xen_alloc_unpopulated_pages(unsigned
unsigned int j;

for (j = 0; j <= i; j++) {
- list_add(&pages[j]->lru, &page_list);
+ pages[j]->zone_device_data = page_list;
+ page_list = pages[j];
list_count++;
}
goto out;
@@ -143,7 +143,8 @@ void xen_free_unpopulated_pages(unsigned

mutex_lock(&list_lock);
for (i = 0; i < nr_pages; i++) {
- list_add(&pages[i]->lru, &page_list);
+ pages[i]->zone_device_data = page_list;
+ page_list = pages[i];
list_count++;
}
mutex_unlock(&list_lock);
@@ -172,7 +173,8 @@ static int __init init(void)
struct page *pg =
pfn_to_page(xen_extra_mem[i].start_pfn + j);

- list_add(&pg->lru, &page_list);
+ pg->zone_device_data = page_list;
+ page_list = pg;
list_count++;
}
}
--- a/include/xen/grant_table.h
+++ b/include/xen/grant_table.h
@@ -200,7 +200,11 @@ void gnttab_free_pages(int nr_pages, str

struct gnttab_page_cache {
spinlock_t lock;
+#ifdef CONFIG_XEN_UNPOPULATED_ALLOC
+ struct page *pages;
+#else
struct list_head pages;
+#endif
unsigned int num_pages;
};