[tip: locking/kcsan] kcsan: Prefer __always_inline for fast-path

From: tip-bot2 for Marco Elver
Date: Tue Mar 24 2020 - 05:11:27 EST


The following commit has been merged into the locking/kcsan branch of tip:

Commit-ID: 5c361425744d1e3b03d835dde659708683ca27d1
Gitweb: https://git.kernel.org/tip/5c361425744d1e3b03d835dde659708683ca27d1
Author: Marco Elver <elver@xxxxxxxxxx>
AuthorDate: Tue, 07 Jan 2020 17:31:04 +01:00
Committer: Ingo Molnar <mingo@xxxxxxxxxx>
CommitterDate: Sat, 21 Mar 2020 09:40:19 +01:00

kcsan: Prefer __always_inline for fast-path

Prefer __always_inline for fast-path functions that are called outside
of user_access_save, to avoid generating UACCESS warnings when
optimizing for size (CC_OPTIMIZE_FOR_SIZE). It will also avoid future
surprises with compiler versions that change the inlining heuristic even
when optimizing for performance.

Reported-by: Randy Dunlap <rdunlap@xxxxxxxxxxxxx>
Acked-by: Randy Dunlap <rdunlap@xxxxxxxxxxxxx> # build-tested
Signed-off-by: Marco Elver <elver@xxxxxxxxxx>
Signed-off-by: Paul E. McKenney <paulmck@xxxxxxxxxx>
Signed-off-by: Ingo Molnar <mingo@xxxxxxxxxx>
Link: http://lkml.kernel.org/r/58708908-84a0-0a81-a836-ad97e33dbb62@xxxxxxxxxxxxx
---
kernel/kcsan/atomic.h | 2 +-
kernel/kcsan/core.c | 18 +++++++++---------
kernel/kcsan/encoding.h | 14 +++++++-------
3 files changed, 17 insertions(+), 17 deletions(-)

diff --git a/kernel/kcsan/atomic.h b/kernel/kcsan/atomic.h
index 576e03d..a9c1930 100644
--- a/kernel/kcsan/atomic.h
+++ b/kernel/kcsan/atomic.h
@@ -18,7 +18,7 @@
* than cast to volatile. Eventually, we hope to be able to remove this
* function.
*/
-static inline bool kcsan_is_atomic(const volatile void *ptr)
+static __always_inline bool kcsan_is_atomic(const volatile void *ptr)
{
/* only jiffies for now */
return ptr == &jiffies;
diff --git a/kernel/kcsan/core.c b/kernel/kcsan/core.c
index 3314fc2..4d4ab5c 100644
--- a/kernel/kcsan/core.c
+++ b/kernel/kcsan/core.c
@@ -78,10 +78,10 @@ static atomic_long_t watchpoints[CONFIG_KCSAN_NUM_WATCHPOINTS + NUM_SLOTS-1];
*/
static DEFINE_PER_CPU(long, kcsan_skip);

-static inline atomic_long_t *find_watchpoint(unsigned long addr,
- size_t size,
- bool expect_write,
- long *encoded_watchpoint)
+static __always_inline atomic_long_t *find_watchpoint(unsigned long addr,
+ size_t size,
+ bool expect_write,
+ long *encoded_watchpoint)
{
const int slot = watchpoint_slot(addr);
const unsigned long addr_masked = addr & WATCHPOINT_ADDR_MASK;
@@ -146,7 +146,7 @@ insert_watchpoint(unsigned long addr, size_t size, bool is_write)
* 2. the thread that set up the watchpoint already removed it;
* 3. the watchpoint was removed and then re-used.
*/
-static inline bool
+static __always_inline bool
try_consume_watchpoint(atomic_long_t *watchpoint, long encoded_watchpoint)
{
return atomic_long_try_cmpxchg_relaxed(watchpoint, &encoded_watchpoint, CONSUMED_WATCHPOINT);
@@ -160,7 +160,7 @@ static inline bool remove_watchpoint(atomic_long_t *watchpoint)
return atomic_long_xchg_relaxed(watchpoint, INVALID_WATCHPOINT) != CONSUMED_WATCHPOINT;
}

-static inline struct kcsan_ctx *get_ctx(void)
+static __always_inline struct kcsan_ctx *get_ctx(void)
{
/*
* In interrupts, use raw_cpu_ptr to avoid unnecessary checks, that would
@@ -169,7 +169,7 @@ static inline struct kcsan_ctx *get_ctx(void)
return in_task() ? &current->kcsan_ctx : raw_cpu_ptr(&kcsan_cpu_ctx);
}

-static inline bool is_atomic(const volatile void *ptr)
+static __always_inline bool is_atomic(const volatile void *ptr)
{
struct kcsan_ctx *ctx = get_ctx();

@@ -193,7 +193,7 @@ static inline bool is_atomic(const volatile void *ptr)
return kcsan_is_atomic(ptr);
}

-static inline bool should_watch(const volatile void *ptr, int type)
+static __always_inline bool should_watch(const volatile void *ptr, int type)
{
/*
* Never set up watchpoints when memory operations are atomic.
@@ -226,7 +226,7 @@ static inline void reset_kcsan_skip(void)
this_cpu_write(kcsan_skip, skip_count);
}

-static inline bool kcsan_is_enabled(void)
+static __always_inline bool kcsan_is_enabled(void)
{
return READ_ONCE(kcsan_enabled) && get_ctx()->disable_count == 0;
}
diff --git a/kernel/kcsan/encoding.h b/kernel/kcsan/encoding.h
index b63890e..f03562a 100644
--- a/kernel/kcsan/encoding.h
+++ b/kernel/kcsan/encoding.h
@@ -59,10 +59,10 @@ encode_watchpoint(unsigned long addr, size_t size, bool is_write)
(addr & WATCHPOINT_ADDR_MASK));
}

-static inline bool decode_watchpoint(long watchpoint,
- unsigned long *addr_masked,
- size_t *size,
- bool *is_write)
+static __always_inline bool decode_watchpoint(long watchpoint,
+ unsigned long *addr_masked,
+ size_t *size,
+ bool *is_write)
{
if (watchpoint == INVALID_WATCHPOINT ||
watchpoint == CONSUMED_WATCHPOINT)
@@ -78,13 +78,13 @@ static inline bool decode_watchpoint(long watchpoint,
/*
* Return watchpoint slot for an address.
*/
-static inline int watchpoint_slot(unsigned long addr)
+static __always_inline int watchpoint_slot(unsigned long addr)
{
return (addr / PAGE_SIZE) % CONFIG_KCSAN_NUM_WATCHPOINTS;
}

-static inline bool matching_access(unsigned long addr1, size_t size1,
- unsigned long addr2, size_t size2)
+static __always_inline bool matching_access(unsigned long addr1, size_t size1,
+ unsigned long addr2, size_t size2)
{
unsigned long end_range1 = addr1 + size1 - 1;
unsigned long end_range2 = addr2 + size2 - 1;