[for-next][PATCH 09/20] tracepoint: Optimize the critical region of mutex_lock in tracepoint_module_coming()

From: Steven Rostedt
Date: Tue Sep 27 2022 - 12:02:35 EST


From: Zhen Lei <thunder.leizhen@xxxxxxxxxx>

The memory allocation of 'tp_mod' does not require mutex_lock()
protection, move it out.

Link: https://lkml.kernel.org/r/20220914061416.1630-1-thunder.leizhen@xxxxxxxxxx

Signed-off-by: Zhen Lei <thunder.leizhen@xxxxxxxxxx>
Signed-off-by: Steven Rostedt (Google) <rostedt@xxxxxxxxxxx>
---
kernel/tracepoint.c | 14 ++++++--------
1 file changed, 6 insertions(+), 8 deletions(-)

diff --git a/kernel/tracepoint.c b/kernel/tracepoint.c
index ef42c1a11920..f23144af5743 100644
--- a/kernel/tracepoint.c
+++ b/kernel/tracepoint.c
@@ -640,7 +640,6 @@ static void tp_module_going_check_quiescent(struct tracepoint *tp, void *priv)
static int tracepoint_module_coming(struct module *mod)
{
struct tp_module *tp_mod;
- int ret = 0;

if (!mod->num_tracepoints)
return 0;
@@ -652,19 +651,18 @@ static int tracepoint_module_coming(struct module *mod)
*/
if (trace_module_has_bad_taint(mod))
return 0;
- mutex_lock(&tracepoint_module_list_mutex);
+
tp_mod = kmalloc(sizeof(struct tp_module), GFP_KERNEL);
- if (!tp_mod) {
- ret = -ENOMEM;
- goto end;
- }
+ if (!tp_mod)
+ return -ENOMEM;
tp_mod->mod = mod;
+
+ mutex_lock(&tracepoint_module_list_mutex);
list_add_tail(&tp_mod->list, &tracepoint_module_list);
blocking_notifier_call_chain(&tracepoint_notify_list,
MODULE_STATE_COMING, tp_mod);
-end:
mutex_unlock(&tracepoint_module_list_mutex);
- return ret;
+ return 0;
}

static void tracepoint_module_going(struct module *mod)
--
2.35.1