Re: [PATCH] tracing: Add trace_trigger kernel command line option

From: Paul E. McKenney
Date: Thu Oct 20 2022 - 13:55:25 EST


On Thu, Oct 20, 2022 at 01:05:28PM -0400, Steven Rostedt wrote:
> On Thu, 20 Oct 2022 12:33:57 -0400
> Steven Rostedt <rostedt@xxxxxxxxxxx> wrote:
>
> > diff --git a/kernel/trace/trace.c b/kernel/trace/trace.c
> > index c03fd7037add..79ac31a6a87b 100644
> > --- a/kernel/trace/trace.c
> > +++ b/kernel/trace/trace.c
> > @@ -2748,9 +2748,12 @@ void trace_buffered_event_disable(void)
> > disable_trace_buffered_event, NULL, 1);
> > preempt_enable();
> >
> > + if (!irqs_disabled())
> > + printk("IRQS DISABLED!! before %s:%d\n", __func__, __LINE__);
> > /* Wait for all current users to finish */
> > - if (!early_boot_irqs_disabled)
> > - synchronize_rcu();
> > + synchronize_rcu();
> > + if (!irqs_disabled())
> > + printk("IRQS DISABLED!! after %s:%d\n", __func__, __LINE__);
> >
> > for_each_tracing_cpu(cpu) {
> > free_page((unsigned long)per_cpu(trace_buffered_event, cpu));
>
>
> Continuing the above printk()s I found the culprit.
>
> synchronize_rcu() {
> rcu_poll_gp_seq_start_unlocked() {
> struct rcu_node *rnp = rcu_get_root();
>
> if (rcu_init_invoked()) {
> lockdep_assert_irqs_enabled();
> raw_spin_lock_irq_rcu_node(rnp);
> }
> rcu_poll_gp_seq_start(snap);
> if (rcu_init_invoked())
> raw_spin_unlock_irq_rcu_node(rnp); <<-- Interrupts enabled here
> }

Thank you for digging into this!

Does the following help?

Thanx, Paul

------------------------------------------------------------------------

diff --git a/kernel/rcu/tree.c b/kernel/rcu/tree.c
index abc615808b6e8..45ad9ef00d647 100644
--- a/kernel/rcu/tree.c
+++ b/kernel/rcu/tree.c
@@ -1425,30 +1425,32 @@ static void rcu_poll_gp_seq_end(unsigned long *snap)
// where caller does not hold the root rcu_node structure's lock.
static void rcu_poll_gp_seq_start_unlocked(unsigned long *snap)
{
+ unsigned long flags;
struct rcu_node *rnp = rcu_get_root();

if (rcu_init_invoked()) {
lockdep_assert_irqs_enabled();
- raw_spin_lock_irq_rcu_node(rnp);
+ raw_spin_lock_irqsave_rcu_node(rnp, flags);
}
rcu_poll_gp_seq_start(snap);
if (rcu_init_invoked())
- raw_spin_unlock_irq_rcu_node(rnp);
+ raw_spin_unlock_irqrestore_rcu_node(rnp, flags);
}

// Make the polled API aware of the end of a grace period, but where
// caller does not hold the root rcu_node structure's lock.
static void rcu_poll_gp_seq_end_unlocked(unsigned long *snap)
{
+ unsigned long flags;
struct rcu_node *rnp = rcu_get_root();

if (rcu_init_invoked()) {
lockdep_assert_irqs_enabled();
- raw_spin_lock_irq_rcu_node(rnp);
+ raw_spin_lock_irqsave_rcu_node(rnp, flags);
}
rcu_poll_gp_seq_end(snap);
if (rcu_init_invoked())
- raw_spin_unlock_irq_rcu_node(rnp);
+ raw_spin_unlock_irqrestore_rcu_node(rnp, flags);
}

/*