[PATCH] mm/slub, kasan: fix checking page_alloc allocations on free

From: andrey . konovalov
Date: Tue Aug 10 2021 - 19:47:16 EST


From: Andrey Konovalov <andreyknvl@xxxxxxxxx>

A fix for stat counters f227f0faf63b ("slub: fix unreclaimable slab stat
for bulk free") used page_address(page) as kfree_hook() argument instead
of object. While the change is technically correct, it breaks KASAN's
ability to detect improper (unaligned) pointers passed to kfree() and
causes the kmalloc_pagealloc_invalid_free test to fail.

This patch changes free_nonslab_page() to pass object to kfree_hook()
instead of page_address(page) as it was before the fix.

Fixed: f227f0faf63b ("slub: fix unreclaimable slab stat for bulk free")
Signed-off-by: Andrey Konovalov <andreyknvl@xxxxxxxxx>
---
mm/slub.c | 8 ++++----
1 file changed, 4 insertions(+), 4 deletions(-)

diff --git a/mm/slub.c b/mm/slub.c
index af984e4990e8..56079dd33c74 100644
--- a/mm/slub.c
+++ b/mm/slub.c
@@ -3236,12 +3236,12 @@ struct detached_freelist {
struct kmem_cache *s;
};

-static inline void free_nonslab_page(struct page *page)
+static inline void free_nonslab_page(void *object, struct page *page)
{
unsigned int order = compound_order(page);

VM_BUG_ON_PAGE(!PageCompound(page), page);
- kfree_hook(page_address(page));
+ kfree_hook(object);
mod_lruvec_page_state(page, NR_SLAB_UNRECLAIMABLE_B, -(PAGE_SIZE << order));
__free_pages(page, order);
}
@@ -3282,7 +3282,7 @@ int build_detached_freelist(struct kmem_cache *s, size_t size,
if (!s) {
/* Handle kalloc'ed objects */
if (unlikely(!PageSlab(page))) {
- free_nonslab_page(page);
+ free_nonslab_page(object, page);
p[size] = NULL; /* mark object processed */
return size;
}
@@ -4258,7 +4258,7 @@ void kfree(const void *x)

page = virt_to_head_page(x);
if (unlikely(!PageSlab(page))) {
- free_nonslab_page(page);
+ free_nonslab_page(object, page);
return;
}
slab_free(page->slab_cache, page, object, NULL, 1, _RET_IP_);
--
2.25.1