Re: [PATCH 1/1] genirq/proc: Try to jump over the unallocated irq hole whenever possible

From: Huang Adrian
Date: Thu Mar 28 2024 - 06:03:24 EST


On Wed, Mar 27, 2024 at 7:36 AM Thomas Gleixner <tglx@xxxxxxxxxxxxx> wrote:
>
> > @@ -19,6 +19,10 @@ static void *int_seq_next(struct seq_file *f, void
> > *v, loff_t *pos)
> [ 3 more citation lines. Click/Enter to show. ]
> > (*pos)++;
> > if (*pos > nr_irqs)
> > return NULL;
> > +
> > + if (!irq_to_desc(*pos))
> > + *pos = irq_get_next_irq(*pos);
>
> How is irq_get_next_irq() valid without either holding the sparse irq
> lock or RCU read lock?

Indeed, thanks for pointing this out. How about the patch below?

---
diff --git a/fs/proc/interrupts.c b/fs/proc/interrupts.c
index cb0edc7cb..111ea8a3c 100644
--- a/fs/proc/interrupts.c
+++ b/fs/proc/interrupts.c
@@ -19,6 +19,12 @@ static void *int_seq_next(struct seq_file *f, void
*v, loff_t *pos)
(*pos)++;
if (*pos > nr_irqs)
return NULL;
+
+ rcu_read_lock();
+ if (!irq_to_desc(*pos))
+ *pos = irq_get_next_irq(*pos);
+ rcu_read_unlock();
+
return pos;
}


> But that aside you are missing a massive performance problem in the
> generic show_interrupts() code:
>
> if (desc->kstat_irqs) {
> for_each_online_cpu(j)
> any_count |= data_race(*per_cpu_ptr(desc->kstat_irqs, j));
> }
>
> if ((!desc->action || irq_desc_is_chained(desc)) && !any_count)
> goto outsparse;
>

Yes, good point. The change is shown below.
BTW, I also made the change for iterating each cpu's irq stat. (No
need to check the percpu 'kstat_irqs' pointer for each iteration).

If you're ok, I'll send out the series.

---
diff --git a/kernel/irq/proc.c b/kernel/irq/proc.c
index 623b8136e..bfa341fac 100644
--- a/kernel/irq/proc.c
+++ b/kernel/irq/proc.c
@@ -461,7 +461,7 @@ int show_interrupts(struct seq_file *p, void *v)
{
static int prec;

- unsigned long flags, any_count = 0;
+ unsigned long flags, print_irq = 1;
int i = *(loff_t *) v, j;
struct irqaction *action;
struct irq_desc *desc;
@@ -488,18 +488,28 @@ int show_interrupts(struct seq_file *p, void *v)
if (!desc || irq_settings_is_hidden(desc))
goto outsparse;

- if (desc->kstat_irqs) {
- for_each_online_cpu(j)
- any_count |=
data_race(*per_cpu_ptr(desc->kstat_irqs, j));
+ if ((!desc->action || irq_desc_is_chained(desc)) && desc->kstat_irqs) {
+ print_irq = 0;
+ for_each_online_cpu(j) {
+ if (data_race(*per_cpu_ptr(desc->kstat_irqs, j))) {
+ print_irq = 1;
+ break;
+ }
+ }
}

- if ((!desc->action || irq_desc_is_chained(desc)) && !any_count)
+ if (!print_irq)
goto outsparse;

seq_printf(p, "%*d: ", prec, i);
- for_each_online_cpu(j)
- seq_printf(p, "%10u ", desc->kstat_irqs ?
- *per_cpu_ptr(desc->kstat_irqs, j) : 0);
+
+ if (desc->kstat_irqs) {
+ for_each_online_cpu(j)
+ seq_printf(p, "%10u ",
data_race(*per_cpu_ptr(desc->kstat_irqs, j)));
+ } else {
+ for_each_online_cpu(j)
+ seq_printf(p, "%10u ", 0);
+ }

raw_spin_lock_irqsave(&desc->lock, flags);
if (desc->irq_data.chip) {


-- Adrian