Re: [PATCH v4 6/7] KVM: X86: TSCDEADLINE MSR emulation fastpath

From: Wanpeng Li
Date: Tue Apr 28 2020 - 06:05:51 EST


On Tue, 28 Apr 2020 at 17:59, æéé <linxl3@xxxxxxxxxx> wrote:
>
> On Tuesday, 28 Apr 2020 at 14:23, Wanpeng Li <kernellwp@xxxxxxxxx> wrote:
> &gt;
> &gt; From: Wanpeng Li <wanpengli@xxxxxxxxxxx>
> &gt;
> &gt; This patch implements tscdealine msr emulation fastpath, after wrmsr
> &gt; tscdeadline vmexit, handle it as soon as possible and vmentry immediately
> &gt; without checking various kvm stuff when possible.
> &gt;
> &gt; Tested-by: Haiwei Li <lihaiwei@xxxxxxxxxxx>
> &gt; Cc: Haiwei Li <lihaiwei@xxxxxxxxxxx>
> &gt; Signed-off-by: Wanpeng Li <wanpengli@xxxxxxxxxxx>
> &gt; ---
> &gt; arch/x86/kvm/lapic.c | 18 ++++++++++++------
> &gt; arch/x86/kvm/vmx/vmx.c | 12 ++++++++----
> &gt; arch/x86/kvm/x86.c | 30 ++++++++++++++++++++++++------
> &gt; 3 files changed, 44 insertions(+), 16 deletions(-)
> &gt;
> &gt; diff --git a/arch/x86/kvm/lapic.c b/arch/x86/kvm/lapic.c
> &gt; index 38f7dc9..3589237 100644
> &gt; --- a/arch/x86/kvm/lapic.c
> &gt; +++ b/arch/x86/kvm/lapic.c
> &gt; @@ -1593,7 +1593,7 @@ static void kvm_apic_inject_pending_timer_irqs(struct kvm_lapic *apic)
> &gt; }
> &gt; }
> &gt;
> &gt; -static void apic_timer_expired(struct kvm_lapic *apic)
> &gt; +static void apic_timer_expired(struct kvm_lapic *apic, bool from_timer_fn)
> &gt; {
> &gt; struct kvm_vcpu *vcpu = apic-&gt;vcpu;
> &gt; struct kvm_timer *ktimer = &amp;apic-&gt;lapic_timer;
> &gt; @@ -1604,6 +1604,12 @@ static void apic_timer_expired(struct kvm_lapic *apic)
> &gt; if (apic_lvtt_tscdeadline(apic) || ktimer-&gt;hv_timer_in_use)
> &gt; ktimer-&gt;expired_tscdeadline = ktimer-&gt;tscdeadline;
> &gt;
> &gt; + if (!from_timer_fn &amp;&amp; vcpu-&gt;arch.apicv_active) {
> &gt; + WARN_ON(kvm_get_running_vcpu() != vcpu);
> &gt; + kvm_apic_inject_pending_timer_irqs(apic);
> &gt; + return;
> &gt; + }
> &gt; +
> &gt; if (kvm_use_posted_timer_interrupt(apic-&gt;vcpu)) {
> &gt; if (apic-&gt;lapic_timer.timer_advance_ns)
> &gt; __kvm_wait_lapic_expire(vcpu);
> &gt; @@ -1643,7 +1649,7 @@ static void start_sw_tscdeadline(struct kvm_lapic *apic)
> &gt; expire = ktime_sub_ns(expire, ktimer-&gt;timer_advance_ns);
> &gt; hrtimer_start(&amp;ktimer-&gt;timer, expire, HRTIMER_MODE_ABS_HARD);
> &gt; } else
> &gt; - apic_timer_expired(apic);
> &gt; + apic_timer_expired(apic, false);
> &gt;
> &gt; local_irq_restore(flags);
> &gt; }
> &gt; @@ -1751,7 +1757,7 @@ static void start_sw_period(struct kvm_lapic *apic)
> &gt;
> &gt; if (ktime_after(ktime_get(),
> &gt; apic-&gt;lapic_timer.target_expiration)) {
> &gt; - apic_timer_expired(apic);
> &gt; + apic_timer_expired(apic, false);
> &gt;
> &gt; if (apic_lvtt_oneshot(apic))
> &gt; return;
> &gt; @@ -1813,7 +1819,7 @@ static bool start_hv_timer(struct kvm_lapic *apic)
> &gt; if (atomic_read(&amp;ktimer-&gt;pending)) {
> &gt; cancel_hv_timer(apic);
> &gt; } else if (expired) {
> &gt; - apic_timer_expired(apic);
> &gt; + apic_timer_expired(apic, false);
> &gt; cancel_hv_timer(apic);
> &gt; }
> &gt; }
> &gt; @@ -1863,7 +1869,7 @@ void kvm_lapic_expired_hv_timer(struct kvm_vcpu *vcpu)
> &gt; goto out;
> &gt; WARN_ON(swait_active(&amp;vcpu-&gt;wq));
> &gt; cancel_hv_timer(apic);
> &gt; - apic_timer_expired(apic);
> &gt; + apic_timer_expired(apic, false);
> &gt;
> &gt; if (apic_lvtt_period(apic) &amp;&amp; apic-&gt;lapic_timer.period) {
> &gt; advance_periodic_target_expiration(apic);
> &gt; @@ -2369,7 +2375,7 @@ static enum hrtimer_restart apic_timer_fn(struct hrtimer *data)
> &gt; struct kvm_timer *ktimer = container_of(data, struct kvm_timer, timer);
> &gt; struct kvm_lapic *apic = container_of(ktimer, struct kvm_lapic, lapic_timer);
> &gt;
> &gt; - apic_timer_expired(apic);
> &gt; + apic_timer_expired(apic, true);
> &gt;
> &gt; if (lapic_is_periodic(apic)) {
> &gt; advance_periodic_target_expiration(apic);
> &gt; diff --git a/arch/x86/kvm/vmx/vmx.c b/arch/x86/kvm/vmx/vmx.c
> &gt; index ce19b0e..bb5c4f1 100644
> &gt; --- a/arch/x86/kvm/vmx/vmx.c
> &gt; +++ b/arch/x86/kvm/vmx/vmx.c
> &gt; @@ -5994,7 +5994,8 @@ static int vmx_handle_exit(struct kvm_vcpu *vcpu, fastpath_t exit_fastpath)
> &gt; if (exit_fastpath == EXIT_FASTPATH_SKIP_EMUL_INS) {
> &gt; kvm_skip_emulated_instruction(vcpu);
> Can we move this kvm_skip_emulated_instruction to handle_fastpath_set_msr_irqoff? This will keep the style consistent.

It can have other users sooner or later.

Wanpeng