Re: WARNING in tracepoint_probe_register_prio

From: Jens Axboe
Date: Sat Nov 04 2017 - 13:26:28 EST


On 10/31/2017 05:14 PM, Steven Rostedt wrote:
> On Tue, 31 Oct 2017 16:30:41 -0600
> Jens Axboe <axboe@xxxxxxxxx> wrote:
>
>> This code dates back to:
>>
>> commit c71a896154119f4ca9e89d6078f5f63ad60ef199
>> Author: Arnaldo Carvalho de Melo <acme@xxxxxxxxxx>
>> Date: Fri Jan 23 12:06:27 2009 -0200
>>
>> blktrace: add ftrace plugin
>>
>> so not really a recent regression :-)
>
> How many people run two instances of blktrace? ;->
> Love fuzzers!

The core code is fine, the bug is actually in sg which added
hooks for both doing setup/teardown and start/stop of tracing.
This was done bypassing the internal locking...

The below should do the trick. It's a fix for this commit:

commit 6da127ad0918f93ea93678dad62ce15ffed18797
Author: Christof Schmitt <christof.schmitt@xxxxxxxxxx>
Date: Fri Jan 11 10:09:43 2008 +0100

blktrace: Add blktrace ioctls to SCSI generic devices


diff --git a/kernel/trace/blktrace.c b/kernel/trace/blktrace.c
index 45a3928544ce..ea57dd94b2b2 100644
--- a/kernel/trace/blktrace.c
+++ b/kernel/trace/blktrace.c
@@ -336,7 +336,7 @@ static void blk_trace_cleanup(struct blk_trace *bt)
blk_unregister_tracepoints();
}

-int blk_trace_remove(struct request_queue *q)
+static int __blk_trace_remove(struct request_queue *q)
{
struct blk_trace *bt;

@@ -349,6 +349,17 @@ int blk_trace_remove(struct request_queue *q)

return 0;
}
+
+int blk_trace_remove(struct request_queue *q)
+{
+ int ret;
+
+ mutex_lock(&q->blk_trace_mutex);
+ ret = __blk_trace_remove(q);
+ mutex_unlock(&q->blk_trace_mutex);
+
+ return ret;
+}
EXPORT_SYMBOL_GPL(blk_trace_remove);

static ssize_t blk_dropped_read(struct file *filp, char __user *buffer,
@@ -550,9 +561,8 @@ static int do_blk_trace_setup(struct request_queue *q, char *name, dev_t dev,
return ret;
}

-int blk_trace_setup(struct request_queue *q, char *name, dev_t dev,
- struct block_device *bdev,
- char __user *arg)
+static int __blk_trace_setup(struct request_queue *q, char *name, dev_t dev,
+ struct block_device *bdev, char __user *arg)
{
struct blk_user_trace_setup buts;
int ret;
@@ -571,6 +581,19 @@ int blk_trace_setup(struct request_queue *q, char *name, dev_t dev,
}
return 0;
}
+
+int blk_trace_setup(struct request_queue *q, char *name, dev_t dev,
+ struct block_device *bdev,
+ char __user *arg)
+{
+ int ret;
+
+ mutex_lock(&q->blk_trace_mutex);
+ ret = __blk_trace_setup(q, name, dev, bdev, arg);
+ mutex_unlock(&q->blk_trace_mutex);
+
+ return ret;
+}
EXPORT_SYMBOL_GPL(blk_trace_setup);

#if defined(CONFIG_COMPAT) && defined(CONFIG_X86_64)
@@ -607,7 +630,7 @@ static int compat_blk_trace_setup(struct request_queue *q, char *name,
}
#endif

-int blk_trace_startstop(struct request_queue *q, int start)
+static int __blk_trace_startstop(struct request_queue *q, int start)
{
int ret;
struct blk_trace *bt = q->blk_trace;
@@ -646,6 +669,17 @@ int blk_trace_startstop(struct request_queue *q, int start)

return ret;
}
+
+int blk_trace_startstop(struct request_queue *q, int start)
+{
+ int ret;
+
+ mutex_lock(&q->blk_trace_mutex);
+ ret = __blk_trace_startstop(q, start);
+ mutex_unlock(&q->blk_trace_mutex);
+
+ return ret;
+}
EXPORT_SYMBOL_GPL(blk_trace_startstop);

/*
@@ -676,7 +710,7 @@ int blk_trace_ioctl(struct block_device *bdev, unsigned cmd, char __user *arg)
switch (cmd) {
case BLKTRACESETUP:
bdevname(bdev, b);
- ret = blk_trace_setup(q, b, bdev->bd_dev, bdev, arg);
+ ret = __blk_trace_setup(q, b, bdev->bd_dev, bdev, arg);
break;
#if defined(CONFIG_COMPAT) && defined(CONFIG_X86_64)
case BLKTRACESETUP32:
@@ -687,10 +721,10 @@ int blk_trace_ioctl(struct block_device *bdev, unsigned cmd, char __user *arg)
case BLKTRACESTART:
start = 1;
case BLKTRACESTOP:
- ret = blk_trace_startstop(q, start);
+ ret = __blk_trace_startstop(q, start);
break;
case BLKTRACETEARDOWN:
- ret = blk_trace_remove(q);
+ ret = __blk_trace_remove(q);
break;
default:
ret = -ENOTTY;
@@ -708,10 +742,14 @@ int blk_trace_ioctl(struct block_device *bdev, unsigned cmd, char __user *arg)
**/
void blk_trace_shutdown(struct request_queue *q)
{
+ mutex_lock(&q->blk_trace_mutex);
+
if (q->blk_trace) {
- blk_trace_startstop(q, 0);
- blk_trace_remove(q);
+ __blk_trace_startstop(q, 0);
+ __blk_trace_remove(q);
}
+
+ mutex_unlock(&q->blk_trace_mutex);
}

#ifdef CONFIG_BLK_CGROUP

--
Jens Axboe