[PATCH RFC 08/20] kasan: clean up __kasan_mempool_poison_object

From: andrey . konovalov
Date: Mon Nov 06 2023 - 15:11:53 EST


From: Andrey Konovalov <andreyknvl@xxxxxxxxxx>

Reorganize the code and reword the comment in
__kasan_mempool_poison_object to improve the code readability.

Signed-off-by: Andrey Konovalov <andreyknvl@xxxxxxxxxx>
---
mm/kasan/common.c | 19 +++++++------------
1 file changed, 7 insertions(+), 12 deletions(-)

diff --git a/mm/kasan/common.c b/mm/kasan/common.c
index 6283f0206ef6..7c28d0a5af2c 100644
--- a/mm/kasan/common.c
+++ b/mm/kasan/common.c
@@ -447,27 +447,22 @@ void __kasan_mempool_unpoison_pages(struct page *page, unsigned int order,

bool __kasan_mempool_poison_object(void *ptr, unsigned long ip)
{
- struct folio *folio;
-
- folio = virt_to_folio(ptr);
+ struct folio *folio = virt_to_folio(ptr);
+ struct slab *slab;

/*
- * Even though this function is only called for kmem_cache_alloc and
- * kmalloc backed mempool allocations, those allocations can still be
- * !PageSlab() when the size provided to kmalloc is larger than
- * KMALLOC_MAX_SIZE, and kmalloc falls back onto page_alloc.
+ * This function can be called for large kmalloc allocation that get
+ * their memory from page_alloc. Thus, the folio might not be a slab.
*/
if (unlikely(!folio_test_slab(folio))) {
if (check_page_allocation(ptr, ip))
return false;
kasan_poison(ptr, folio_size(folio), KASAN_PAGE_FREE, false);
return true;
- } else {
- struct slab *slab = folio_slab(folio);
-
- return !____kasan_slab_free(slab->slab_cache, ptr, ip,
- false, false);
}
+
+ slab = folio_slab(folio);
+ return !____kasan_slab_free(slab->slab_cache, ptr, ip, false, false);
}

void __kasan_mempool_unpoison_object(void *ptr, size_t size, unsigned long ip)
--
2.25.1