[PATCH v2 4/7] slab: Add __alloc_size attributes for better bounds checking

From: Kees Cook
Date: Wed Aug 18 2021 - 17:40:48 EST


As already done in GrapheneOS, add the __alloc_size attribute for
regular kmalloc interfaces, to provide additional hinting for better
bounds checking, assisting CONFIG_FORTIFY_SOURCE and other compiler
optimizations.

Co-developed-by: Daniel Micay <danielmicay@xxxxxxxxx>
Signed-off-by: Daniel Micay <danielmicay@xxxxxxxxx>
Cc: Christoph Lameter <cl@xxxxxxxxx>
Cc: Pekka Enberg <penberg@xxxxxxxxxx>
Cc: David Rientjes <rientjes@xxxxxxxxxx>
Cc: Joonsoo Kim <iamjoonsoo.kim@xxxxxxx>
Cc: Andrew Morton <akpm@xxxxxxxxxxxxxxxxxxxx>
Cc: Vlastimil Babka <vbabka@xxxxxxx>
Cc: linux-mm@xxxxxxxxx
Signed-off-by: Kees Cook <keescook@xxxxxxxxxxxx>
---
include/linux/slab.h | 20 ++++++++++++++++++--
1 file changed, 18 insertions(+), 2 deletions(-)

diff --git a/include/linux/slab.h b/include/linux/slab.h
index 10fd0a8c816a..6ce826d8194d 100644
--- a/include/linux/slab.h
+++ b/include/linux/slab.h
@@ -181,7 +181,7 @@ int kmem_cache_shrink(struct kmem_cache *s);
/*
* Common kmalloc functions provided by all allocators
*/
-__must_check
+__must_check __alloc_size(2)
void *krealloc(const void *objp, size_t new_size, gfp_t flags);
void kfree(const void *objp);
void kfree_sensitive(const void *objp);
@@ -426,6 +426,7 @@ static __always_inline unsigned int __kmalloc_index(size_t size,
#define kmalloc_index(s) __kmalloc_index(s, true)
#endif /* !CONFIG_SLOB */

+__alloc_size(1)
void *__kmalloc(size_t size, gfp_t flags) __assume_kmalloc_alignment __malloc;
void *kmem_cache_alloc(struct kmem_cache *s, gfp_t flags) __assume_kmalloc_alignment __malloc;
void kmem_cache_free(struct kmem_cache *s, void *objp);
@@ -450,6 +451,7 @@ static __always_inline void kfree_bulk(size_t size, void **p)
}

#ifdef CONFIG_NUMA
+__alloc_size(1)
void *__kmalloc_node(size_t size, gfp_t flags, int node) __assume_slab_alignment __malloc;
void *kmem_cache_alloc_node(struct kmem_cache *s, gfp_t flags, int node)
__assume_slab_alignment __malloc;
@@ -574,6 +576,7 @@ static __always_inline void *kmalloc_large(size_t size, gfp_t flags)
* Try really hard to succeed the allocation but fail
* eventually.
*/
+__alloc_size(1)
static __always_inline void *kmalloc(size_t size, gfp_t flags)
{
if (__builtin_constant_p(size)) {
@@ -596,6 +599,7 @@ static __always_inline void *kmalloc(size_t size, gfp_t flags)
return __kmalloc(size, flags);
}

+__alloc_size(1)
static __always_inline void *kmalloc_node(size_t size, gfp_t flags, int node)
{
#ifndef CONFIG_SLOB
@@ -620,6 +624,7 @@ static __always_inline void *kmalloc_node(size_t size, gfp_t flags, int node)
* @size: element size.
* @flags: the type of memory to allocate (see kmalloc).
*/
+__alloc_size(1, 2)
static inline void *kmalloc_array(size_t n, size_t size, gfp_t flags)
{
size_t bytes;
@@ -638,7 +643,7 @@ static inline void *kmalloc_array(size_t n, size_t size, gfp_t flags)
* @new_size: new size of a single member of the array
* @flags: the type of memory to allocate (see kmalloc)
*/
-__must_check
+__must_check __alloc_size(2, 3)
static inline void *krealloc_array(void *p, size_t new_n, size_t new_size,
gfp_t flags)
{
@@ -656,6 +661,7 @@ static inline void *krealloc_array(void *p, size_t new_n, size_t new_size,
* @size: element size.
* @flags: the type of memory to allocate (see kmalloc).
*/
+__alloc_size(1, 2)
static inline void *kcalloc(size_t n, size_t size, gfp_t flags)
{
return kmalloc_array(n, size, flags | __GFP_ZERO);
@@ -685,6 +691,7 @@ static inline void *kmalloc_array_node(size_t n, size_t size, gfp_t flags,
return __kmalloc_node(bytes, flags, node);
}

+__alloc_size(1, 2)
static inline void *kcalloc_node(size_t n, size_t size, gfp_t flags, int node)
{
return kmalloc_array_node(n, size, flags | __GFP_ZERO, node);
@@ -718,6 +725,7 @@ static inline void *kmem_cache_zalloc(struct kmem_cache *k, gfp_t flags)
* @size: how many bytes of memory are required.
* @flags: the type of memory to allocate (see kmalloc).
*/
+__alloc_size(1)
static inline void *kzalloc(size_t size, gfp_t flags)
{
return kmalloc(size, flags | __GFP_ZERO);
@@ -729,25 +737,31 @@ static inline void *kzalloc(size_t size, gfp_t flags)
* @flags: the type of memory to allocate (see kmalloc).
* @node: memory node from which to allocate
*/
+__alloc_size(1)
static inline void *kzalloc_node(size_t size, gfp_t flags, int node)
{
return kmalloc_node(size, flags | __GFP_ZERO, node);
}

+__alloc_size(1)
extern void *kvmalloc_node(size_t size, gfp_t flags, int node);
+__alloc_size(1)
static inline void *kvmalloc(size_t size, gfp_t flags)
{
return kvmalloc_node(size, flags, NUMA_NO_NODE);
}
+__alloc_size(1)
static inline void *kvzalloc_node(size_t size, gfp_t flags, int node)
{
return kvmalloc_node(size, flags | __GFP_ZERO, node);
}
+__alloc_size(1)
static inline void *kvzalloc(size_t size, gfp_t flags)
{
return kvmalloc(size, flags | __GFP_ZERO);
}

+__alloc_size(1, 2)
static inline void *kvmalloc_array(size_t n, size_t size, gfp_t flags)
{
size_t bytes;
@@ -758,11 +772,13 @@ static inline void *kvmalloc_array(size_t n, size_t size, gfp_t flags)
return kvmalloc(bytes, flags);
}

+__alloc_size(1, 2)
static inline void *kvcalloc(size_t n, size_t size, gfp_t flags)
{
return kvmalloc_array(n, size, flags | __GFP_ZERO);
}

+__alloc_size(3)
extern void *kvrealloc(const void *p, size_t oldsize, size_t newsize,
gfp_t flags);
extern void kvfree(const void *addr);
--
2.30.2