[PATCH RFC 06/20] kasan: introduce kasan_mempool_poison_pages

From: andrey . konovalov
Date: Mon Nov 06 2023 - 15:11:51 EST


From: Andrey Konovalov <andreyknvl@xxxxxxxxxx>

Introduce and document a kasan_mempool_poison_pages hook to be used
by the mempool code instead of kasan_poison_pages.

Compated to kasan_poison_pages, the new hook:

1. For the tag-based modes, skips checking and poisoning allocations that
were not tagged due to sampling.

2. Checks for double-free and invalid-free bugs.

In the future, kasan_poison_pages can also be updated to handle #2, but
this is out-of-scope of this series.

Signed-off-by: Andrey Konovalov <andreyknvl@xxxxxxxxxx>
---
include/linux/kasan.h | 27 +++++++++++++++++++++++++++
mm/kasan/common.c | 23 +++++++++++++++++++++++
2 files changed, 50 insertions(+)

diff --git a/include/linux/kasan.h b/include/linux/kasan.h
index c5fe303bc1c2..de2a695ad34d 100644
--- a/include/linux/kasan.h
+++ b/include/linux/kasan.h
@@ -212,6 +212,29 @@ static __always_inline void * __must_check kasan_krealloc(const void *object,
return (void *)object;
}

+bool __kasan_mempool_poison_pages(struct page *page, unsigned int order,
+ unsigned long ip);
+/**
+ * kasan_mempool_poison_pages - Check and poison a mempool page allocation.
+ * @page: Pointer to the page allocation.
+ * @order: Order of the allocation.
+ *
+ * This function is intended for kernel subsystems that cache page allocations
+ * to reuse them instead of freeing them back to page_alloc (e.g. mempool).
+ *
+ * This function is similar to kasan_mempool_poison_object() but operates on
+ * page allocations.
+ *
+ * Return: true if the allocation can be safely reused; false otherwise.
+ */
+static __always_inline bool kasan_mempool_poison_pages(struct page *page,
+ unsigned int order)
+{
+ if (kasan_enabled())
+ return __kasan_mempool_poison_pages(page, order, _RET_IP_);
+ return true;
+}
+
bool __kasan_mempool_poison_object(void *ptr, unsigned long ip);
/**
* kasan_mempool_poison_object - Check and poison a mempool slab allocation.
@@ -326,6 +349,10 @@ static inline void *kasan_krealloc(const void *object, size_t new_size,
{
return (void *)object;
}
+static inline bool kasan_mempool_poison_pages(struct page *page, unsigned int order)
+{
+ return true;
+}
static inline bool kasan_mempool_poison_object(void *ptr)
{
return true;
diff --git a/mm/kasan/common.c b/mm/kasan/common.c
index 033c860afe51..9ccc78b20cf2 100644
--- a/mm/kasan/common.c
+++ b/mm/kasan/common.c
@@ -416,6 +416,29 @@ void * __must_check __kasan_krealloc(const void *object, size_t size, gfp_t flag
return ____kasan_kmalloc(slab->slab_cache, object, size, flags);
}

+bool __kasan_mempool_poison_pages(struct page *page, unsigned int order,
+ unsigned long ip)
+{
+ unsigned long *ptr;
+
+ if (unlikely(PageHighMem(page)))
+ return true;
+
+ /* Bail out if allocation was excluded due to sampling. */
+ if (!IS_ENABLED(CONFIG_KASAN_GENERIC) &&
+ page_kasan_tag(page) == KASAN_TAG_KERNEL)
+ return true;
+
+ ptr = page_address(page);
+
+ if (check_page_allocation(ptr, ip))
+ return false;
+
+ kasan_poison(ptr, PAGE_SIZE << order, KASAN_PAGE_FREE, false);
+
+ return true;
+}
+
bool __kasan_mempool_poison_object(void *ptr, unsigned long ip)
{
struct folio *folio;
--
2.25.1