Re: [PATCH] irqchip/sifive-plic: Add support for multiple PLICs

From: Atish Patra
Date: Tue Dec 10 2019 - 01:30:20 EST


On Fri, 2019-12-06 at 09:32 +0000, Marc Zyngier wrote:
> [Fixing Palmer's email address]
>
> On 2019-12-06 02:31, Atish Patra wrote:
> > Current, PLIC driver can support only 1 PLIC on the board. However,
> > there can be multiple PLICs present on a two socket systems in
> > RISC-V.
> >
> > Modify the driver so that each PLIC handler can have a information
> > about individual PLIC registers and an irqdomain associated with
> > it.
> >
> > Tested on two socket RISC-V system based on VCU118 FPGA connected
> > via
> > OmniXtend protocol.
> >
> > Signed-off-by: Atish Patra <atish.patra@xxxxxxx>
> > Signed-off-by: Anup Patel <anup.patel@xxxxxxx>
>
> There seem to be some confusion here about who the author of the
> patch
> is.
> If this is a co-development, please use the appropriate tag.
>

ok. I will fix this in the next version.

> > ---
> > drivers/irqchip/irq-sifive-plic.c | 81
> > +++++++++++++++++++------------
> > 1 file changed, 51 insertions(+), 30 deletions(-)
> >
> > diff --git a/drivers/irqchip/irq-sifive-plic.c
> > b/drivers/irqchip/irq-sifive-plic.c
> > index c72c036aea76..aea1f2f0f0d5 100644
> > --- a/drivers/irqchip/irq-sifive-plic.c
> > +++ b/drivers/irqchip/irq-sifive-plic.c
> > @@ -55,7 +55,11 @@
> > #define CONTEXT_THRESHOLD 0x00
> > #define CONTEXT_CLAIM 0x04
> >
> > -static void __iomem *plic_regs;
> > +struct plic_hw {
> > + struct cpumask lmask;
> > + struct irq_domain *irqdomain;
> > + void __iomem *regs;
> > +};
>
> The '_hw' suffix is a bit unfortunate, as there is mostly SW
> constructs
> in this structure. Maybe something more general like 'context' would
> be more appropriate.
>

Sure. I will rename it to something more meaningful.

> > struct plic_handler {
> > bool present;
> > @@ -66,6 +70,7 @@ struct plic_handler {
> > */
> > raw_spinlock_t enable_lock;
> > void __iomem *enable_base;
> > + struct plic_hw *hw;
> > };
> > static DEFINE_PER_CPU(struct plic_handler, plic_handlers);
> >
> > @@ -84,31 +89,40 @@ static inline void plic_toggle(struct
> > plic_handler *handler,
> > }
> >
> > static inline void plic_irq_toggle(const struct cpumask *mask,
> > - int hwirq, int enable)
> > + struct irq_data *d, int enable)
> > {
> > int cpu;
> > + struct plic_hw *hw = d->domain->host_data;
>
> The usual construct is to transfer the domain->host_data pointer to
> the irq_data->chip_data pointer at map() time, using
> irq_set_chip_data().
>
> You can then retrieve the pointer with irq_get_chip_data(), and save
> yourselves some pointer chasing.
>

Sure. I will do that.

> > - writel(enable, plic_regs + PRIORITY_BASE + hwirq *
> > PRIORITY_PER_ID);
> > + writel(enable, hw->regs + PRIORITY_BASE + d->hwirq *
> > PRIORITY_PER_ID);
> > for_each_cpu(cpu, mask) {
> > struct plic_handler *handler =
> > per_cpu_ptr(&plic_handlers, cpu);
> >
> > - if (handler->present)
> > - plic_toggle(handler, hwirq, enable);
> > + if (handler->present &&
> > + cpumask_test_cpu(cpu, &handler->hw->lmask))
> > + plic_toggle(handler, d->hwirq, enable);
> > }
> > }
> >
> > static void plic_irq_enable(struct irq_data *d)
> > {
> > - unsigned int cpu =
> > cpumask_any_and(irq_data_get_affinity_mask(d),
> > - cpu_online_mask);
> > + struct cpumask amask;
> > + unsigned int cpu;
> > + struct plic_hw *hw = d->domain->host_data;
> > +
> > + cpumask_and(&amask, &hw->lmask, cpu_online_mask);
> > + cpu = cpumask_any_and(irq_data_get_affinity_mask(d),
> > + &amask);
> > if (WARN_ON_ONCE(cpu >= nr_cpu_ids))
> > return;
> > - plic_irq_toggle(cpumask_of(cpu), d->hwirq, 1);
> > + plic_irq_toggle(cpumask_of(cpu), d, 1);
> > }
> >
> > static void plic_irq_disable(struct irq_data *d)
> > {
> > - plic_irq_toggle(cpu_possible_mask, d->hwirq, 0);
> > + struct plic_hw *hw = d->domain->host_data;
> > +
> > + plic_irq_toggle(&hw->lmask, d, 0);
> > }
> >
> > #ifdef CONFIG_SMP
> > @@ -116,18 +130,22 @@ static int plic_set_affinity(struct irq_data
> > *d,
> > const struct cpumask *mask_val, bool
> > force)
> > {
> > unsigned int cpu;
> > + struct cpumask amask;
> > + struct plic_hw *hw = d->domain->host_data;
> > +
> > + cpumask_and(&amask, &hw->lmask, mask_val);
>
> So this means that an interrupt cannot move between sockets?

Unfortunately, that's correct for the first version of the experimental
platform. Hopefully, future platforms will have better support.

> How is that going to work with CPU hotplug? This seems like
> a pretty bad restriction for anything but the most basic
> experimental platform.
>
> > if (force)
> > - cpu = cpumask_first(mask_val);
> > + cpu = cpumask_first(&amask);
> > else
> > - cpu = cpumask_any_and(mask_val, cpu_online_mask);
> > + cpu = cpumask_any_and(&amask, cpu_online_mask);
> >
> > if (cpu >= nr_cpu_ids)
> > return -EINVAL;
> >
> > if (!irqd_irq_disabled(d)) {
> > - plic_irq_toggle(cpu_possible_mask, d->hwirq, 0);
> > - plic_irq_toggle(cpumask_of(cpu), d->hwirq, 1);
> > + plic_irq_toggle(&hw->lmask, d, 0);
> > + plic_irq_toggle(cpumask_of(cpu), d, 1);
> > }
> >
> > irq_data_update_effective_affinity(d, cpumask_of(cpu));
> > @@ -163,8 +181,6 @@ static const struct irq_domain_ops
> > plic_irqdomain_ops = {
> > .xlate = irq_domain_xlate_onecell,
> > };
> >
> > -static struct irq_domain *plic_irqdomain;
> > -
> > /*
> > * Handling an interrupt is a two-step process: first you claim
> > the
> > interrupt
> > * by reading the claim register, then you complete the interrupt
> > by
> > writing
> > @@ -181,7 +197,7 @@ static void plic_handle_irq(struct pt_regs
> > *regs)
> >
> > csr_clear(sie, SIE_SEIE);
> > while ((hwirq = readl(claim))) {
> > - int irq = irq_find_mapping(plic_irqdomain, hwirq);
> > + int irq = irq_find_mapping(handler->hw->irqdomain,
> > hwirq);
> >
> > if (unlikely(irq <= 0))
> > pr_warn_ratelimited("can't find mapping for
> > hwirq %lu\n",
> > @@ -212,15 +228,17 @@ static int __init plic_init(struct
> > device_node
> > *node,
> > {
> > int error = 0, nr_contexts, nr_handlers = 0, i;
> > u32 nr_irqs;
> > + struct plic_hw *hw;
> >
> > - if (plic_regs) {
> > - pr_warn("PLIC already present.\n");
> > - return -ENXIO;
> > - }
> > + hw = kzalloc(sizeof(*hw), GFP_KERNEL);
> > + if (!hw)
> > + return -ENOMEM;
> >
> > - plic_regs = of_iomap(node, 0);
> > - if (WARN_ON(!plic_regs))
> > - return -EIO;
> > + hw->regs = of_iomap(node, 0);
> > + if (WARN_ON(!hw->regs)) {
> > + error = -EIO;
> > + goto out_freehw;
> > + }
> >
> > error = -EINVAL;
> > of_property_read_u32(node, "riscv,ndev", &nr_irqs);
> > @@ -234,9 +252,9 @@ static int __init plic_init(struct device_node
> > *node,
> > goto out_iounmap;
> >
> > error = -ENOMEM;
> > - plic_irqdomain = irq_domain_add_linear(node, nr_irqs + 1,
> > - &plic_irqdomain_ops, NULL);
> > - if (WARN_ON(!plic_irqdomain))
> > + hw->irqdomain = irq_domain_add_linear(node, nr_irqs + 1,
> > + &plic_irqdomain_ops, hw);
> > + if (WARN_ON(!hw->irqdomain))
> > goto out_iounmap;
> >
> > for (i = 0; i < nr_contexts; i++) {
> > @@ -279,13 +297,14 @@ static int __init plic_init(struct
> > device_node
> > *node,
> > goto done;
> > }
> >
> > + cpumask_set_cpu(cpu, &hw->lmask);
> > handler->present = true;
> > handler->hart_base =
> > - plic_regs + CONTEXT_BASE + i *
> > CONTEXT_PER_HART;
> > + hw->regs + CONTEXT_BASE + i * CONTEXT_PER_HART;
> > raw_spin_lock_init(&handler->enable_lock);
> > handler->enable_base =
> > - plic_regs + ENABLE_BASE + i * ENABLE_PER_HART;
> > -
> > + hw->regs + ENABLE_BASE + i * ENABLE_PER_HART;
> > + handler->hw = hw;
> > done:
> > /* priority must be > threshold to trigger an interrupt
> > */
> > writel(threshold, handler->hart_base +
> > CONTEXT_THRESHOLD);
> > @@ -300,7 +319,9 @@ static int __init plic_init(struct device_node
> > *node,
> > return 0;
> >
> > out_iounmap:
> > - iounmap(plic_regs);
> > + iounmap(hw->regs);
> > +out_freehw:
> > + kfree(hw);
> > return error;
> > }
>
> This otherwise seems like a very straightforward change.
>
Thanks for the review. I will send out the v2 soon.

> Thanks,
>
> M.

--
Regards,
Atish