Re: [PATCH v12 07/31] LoongArch: KVM: Implement vcpu run interface

From: bibo, mao
Date: Mon Jun 05 2023 - 08:53:39 EST




在 2023/5/30 09:51, Tianrui Zhao 写道:
> Implement vcpu run interface, handling mmio, iocsr reading fault
> and deliver interrupt, lose fpu before vcpu enter guest.
>
> Signed-off-by: Tianrui Zhao <zhaotianrui@xxxxxxxxxxx>
> ---
> arch/loongarch/kvm/vcpu.c | 86 +++++++++++++++++++++++++++++++++++++++
> 1 file changed, 86 insertions(+)
>
> diff --git a/arch/loongarch/kvm/vcpu.c b/arch/loongarch/kvm/vcpu.c
> index 24b5b00266a1..eca8b96a3e6e 100644
> --- a/arch/loongarch/kvm/vcpu.c
> +++ b/arch/loongarch/kvm/vcpu.c
> @@ -17,6 +17,44 @@ int kvm_arch_vcpu_precreate(struct kvm *kvm, unsigned int id)
> return 0;
> }
>
> +/* Returns 1 if the guest TLB may be clobbered */
> +static int _kvm_check_requests(struct kvm_vcpu *vcpu, int cpu)
> +{
> + int ret = 0;
> +
> + if (!kvm_request_pending(vcpu))
> + return 0;
> +
> + if (kvm_check_request(KVM_REQ_TLB_FLUSH, vcpu)) {
> + /* Drop vpid for this vCPU */
> + vcpu->arch.vpid = 0;
> + /* This will clobber guest TLB contents too */
> + ret = 1;
> + }
> +
> + return ret;
> +}
can parameter cpu in function _kvm_check_requests be removed since it is not used here?

Regards
Bibo, Mao
> +
> +static void kvm_pre_enter_guest(struct kvm_vcpu *vcpu)
> +{
> + int cpu;
> +
> + /*
> + * handle vcpu timer, interrupts, check requests and
> + * check vmid before vcpu enter guest
> + */
> + kvm_acquire_timer(vcpu);
> + _kvm_deliver_intr(vcpu);
> + /* make sure the vcpu mode has been written */
> + smp_store_mb(vcpu->mode, IN_GUEST_MODE);
> + cpu = smp_processor_id();
> + _kvm_check_requests(vcpu, cpu);
> + _kvm_check_vmid(vcpu, cpu);
> + vcpu->arch.host_eentry = csr_read64(LOONGARCH_CSR_EENTRY);
> + /* clear KVM_LARCH_CSR as csr will change when enter guest */
> + vcpu->arch.aux_inuse &= ~KVM_LARCH_CSR;
> +}
> +
> int kvm_arch_vcpu_create(struct kvm_vcpu *vcpu)
> {
> unsigned long timer_hz;
> @@ -86,3 +124,51 @@ void kvm_arch_vcpu_destroy(struct kvm_vcpu *vcpu)
> context->last_vcpu = NULL;
> }
> }
> +
> +int kvm_arch_vcpu_ioctl_run(struct kvm_vcpu *vcpu)
> +{
> + int r = -EINTR;
> + struct kvm_run *run = vcpu->run;
> +
> + vcpu_load(vcpu);
> +
> + kvm_sigset_activate(vcpu);
> +
> + if (vcpu->mmio_needed) {
> + if (!vcpu->mmio_is_write)
> + _kvm_complete_mmio_read(vcpu, run);
> + vcpu->mmio_needed = 0;
> + }
> +
> + if (run->exit_reason == KVM_EXIT_LOONGARCH_IOCSR) {
> + if (!run->iocsr_io.is_write)
> + _kvm_complete_iocsr_read(vcpu, run);
> + }
> +
> + /* clear exit_reason */
> + run->exit_reason = KVM_EXIT_UNKNOWN;
> + if (run->immediate_exit)
> + goto out;
> +
> + lose_fpu(1);
> +
> + local_irq_disable();
> + guest_timing_enter_irqoff();
> +
> + kvm_pre_enter_guest(vcpu);
> + trace_kvm_enter(vcpu);
> +
> + guest_state_enter_irqoff();
> + r = kvm_loongarch_ops->enter_guest(run, vcpu);
> +
> + /* guest_state_exit_irqoff() already done. */
> + trace_kvm_out(vcpu);
> + guest_timing_exit_irqoff();
> + local_irq_enable();
> +
> +out:
> + kvm_sigset_deactivate(vcpu);
> +
> + vcpu_put(vcpu);
> + return r;
> +}