[PATCH v7 12/30] LoongArch: KVM: Implement vcpu interrupt operations

From: Tianrui Zhao
Date: Mon Apr 17 2023 - 07:23:01 EST


Implement vcpu interrupt operations such as vcpu set irq and
vcpu clear irq, using set_gcsr_estat to set irq which is
parsed by the irq bitmap.

Signed-off-by: Tianrui Zhao <zhaotianrui@xxxxxxxxxxx>
---
arch/loongarch/kvm/interrupt.c | 126 +++++++++++++++++++++++++++++++++
arch/loongarch/kvm/vcpu.c | 45 ++++++++++++
2 files changed, 171 insertions(+)
create mode 100644 arch/loongarch/kvm/interrupt.c

diff --git a/arch/loongarch/kvm/interrupt.c b/arch/loongarch/kvm/interrupt.c
new file mode 100644
index 000000000000..02267a71d1aa
--- /dev/null
+++ b/arch/loongarch/kvm/interrupt.c
@@ -0,0 +1,126 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright (C) 2020-2023 Loongson Technology Corporation Limited
+ */
+
+#include <linux/errno.h>
+#include <linux/err.h>
+#include <asm/kvm_vcpu.h>
+
+static unsigned int int_to_coreint[LOONGARCH_EXC_MAX] = {
+ [LARCH_INT_TIMER] = CPU_TIMER,
+ [LARCH_INT_IPI] = CPU_IPI,
+ [LARCH_INT_SIP0] = CPU_SIP0,
+ [LARCH_INT_SIP1] = CPU_SIP1,
+ [LARCH_INT_IP0] = CPU_IP0,
+ [LARCH_INT_IP1] = CPU_IP1,
+ [LARCH_INT_IP2] = CPU_IP2,
+ [LARCH_INT_IP3] = CPU_IP3,
+ [LARCH_INT_IP4] = CPU_IP4,
+ [LARCH_INT_IP5] = CPU_IP5,
+ [LARCH_INT_IP6] = CPU_IP6,
+ [LARCH_INT_IP7] = CPU_IP7,
+};
+
+static int _kvm_irq_deliver(struct kvm_vcpu *vcpu, unsigned int priority)
+{
+ unsigned int irq = 0;
+
+ clear_bit(priority, &vcpu->arch.irq_pending);
+ if (priority < LOONGARCH_EXC_MAX)
+ irq = int_to_coreint[priority];
+
+ switch (priority) {
+ case LARCH_INT_TIMER:
+ case LARCH_INT_IPI:
+ case LARCH_INT_SIP0:
+ case LARCH_INT_SIP1:
+ set_gcsr_estat(irq);
+ break;
+
+ case LARCH_INT_IP0:
+ case LARCH_INT_IP1:
+ case LARCH_INT_IP2:
+ case LARCH_INT_IP3:
+ case LARCH_INT_IP4:
+ case LARCH_INT_IP5:
+ case LARCH_INT_IP6:
+ case LARCH_INT_IP7:
+ set_csr_gintc(irq);
+ break;
+
+ default:
+ break;
+ }
+
+ return 1;
+}
+
+static int _kvm_irq_clear(struct kvm_vcpu *vcpu, unsigned int priority)
+{
+ unsigned int irq = 0;
+
+ clear_bit(priority, &vcpu->arch.irq_clear);
+ if (priority < LOONGARCH_EXC_MAX)
+ irq = int_to_coreint[priority];
+
+ switch (priority) {
+ case LARCH_INT_TIMER:
+ case LARCH_INT_IPI:
+ case LARCH_INT_SIP0:
+ case LARCH_INT_SIP1:
+ clear_gcsr_estat(irq);
+ break;
+
+ case LARCH_INT_IP0:
+ case LARCH_INT_IP1:
+ case LARCH_INT_IP2:
+ case LARCH_INT_IP3:
+ case LARCH_INT_IP4:
+ case LARCH_INT_IP5:
+ case LARCH_INT_IP6:
+ case LARCH_INT_IP7:
+ clear_csr_gintc(irq);
+ break;
+
+ default:
+ break;
+ }
+
+ return 1;
+}
+
+void _kvm_deliver_intr(struct kvm_vcpu *vcpu)
+{
+ unsigned long *pending = &vcpu->arch.irq_pending;
+ unsigned long *pending_clr = &vcpu->arch.irq_clear;
+ unsigned int priority;
+
+ if (!(*pending) && !(*pending_clr))
+ return;
+
+ if (*pending_clr) {
+ priority = __ffs(*pending_clr);
+ while (priority <= LOONGARCH_EXC_IPNUM) {
+ _kvm_irq_clear(vcpu, priority);
+ priority = find_next_bit(pending_clr,
+ BITS_PER_BYTE * sizeof(*pending_clr),
+ priority + 1);
+ }
+ }
+
+ if (*pending) {
+ priority = __ffs(*pending);
+ while (priority <= LOONGARCH_EXC_IPNUM) {
+ _kvm_irq_deliver(vcpu, priority);
+ priority = find_next_bit(pending,
+ BITS_PER_BYTE * sizeof(*pending),
+ priority + 1);
+ }
+ }
+}
+
+int _kvm_pending_timer(struct kvm_vcpu *vcpu)
+{
+ return test_bit(LARCH_INT_TIMER, &vcpu->arch.irq_pending);
+}
diff --git a/arch/loongarch/kvm/vcpu.c b/arch/loongarch/kvm/vcpu.c
index d1e8cd402900..ee648bdb657c 100644
--- a/arch/loongarch/kvm/vcpu.c
+++ b/arch/loongarch/kvm/vcpu.c
@@ -305,6 +305,51 @@ void kvm_lose_fpu(struct kvm_vcpu *vcpu)
preempt_enable();
}

+int kvm_vcpu_ioctl_interrupt(struct kvm_vcpu *vcpu,
+ struct kvm_loongarch_interrupt *irq)
+{
+ int intr = (int)irq->irq;
+ struct kvm_vcpu *dvcpu = NULL;
+
+ if (irq->cpu == -1)
+ dvcpu = vcpu;
+ else
+ dvcpu = kvm_get_vcpu(vcpu->kvm, irq->cpu);
+
+ if (intr > 0)
+ _kvm_queue_irq(dvcpu, intr);
+ else if (intr < 0)
+ _kvm_dequeue_irq(dvcpu, -intr);
+ else {
+ kvm_err("%s: invalid interrupt ioctl (%d:%d)\n", __func__,
+ irq->cpu, irq->irq);
+ return -EINVAL;
+ }
+
+ kvm_vcpu_kick(dvcpu);
+ return 0;
+}
+
+long kvm_arch_vcpu_async_ioctl(struct file *filp,
+ unsigned int ioctl, unsigned long arg)
+{
+ struct kvm_vcpu *vcpu = filp->private_data;
+ void __user *argp = (void __user *)arg;
+
+ if (ioctl == KVM_INTERRUPT) {
+ struct kvm_loongarch_interrupt irq;
+
+ if (copy_from_user(&irq, argp, sizeof(irq)))
+ return -EFAULT;
+ kvm_debug("[%d] %s: irq: %d\n", vcpu->vcpu_id, __func__,
+ irq.irq);
+
+ return kvm_vcpu_ioctl_interrupt(vcpu, &irq);
+ }
+
+ return -ENOIOCTLCMD;
+}
+
int kvm_arch_vcpu_precreate(struct kvm *kvm, unsigned int id)
{
return 0;
--
2.31.1