[PATCH v3 4/5] KVM: X86: TSCDEADLINE MSR emulation fastpath

From: Wanpeng Li
Date: Fri Apr 24 2020 - 02:23:13 EST


From: Wanpeng Li <wanpengli@xxxxxxxxxxx>

This patch implements tscdealine msr emulation fastpath, after wrmsr
tscdeadline vmexit, handle it as soon as possible and vmentry immediately
without checking various kvm stuff when possible.

Tested-by: Haiwei Li <lihaiwei@xxxxxxxxxxx>
Cc: Haiwei Li <lihaiwei@xxxxxxxxxxx>
Signed-off-by: Wanpeng Li <wanpengli@xxxxxxxxxxx>
---
arch/x86/kvm/lapic.c | 18 ++++++++++++------
arch/x86/kvm/x86.c | 30 ++++++++++++++++++++++++------
2 files changed, 36 insertions(+), 12 deletions(-)

diff --git a/arch/x86/kvm/lapic.c b/arch/x86/kvm/lapic.c
index 38f7dc9..3589237 100644
--- a/arch/x86/kvm/lapic.c
+++ b/arch/x86/kvm/lapic.c
@@ -1593,7 +1593,7 @@ static void kvm_apic_inject_pending_timer_irqs(struct kvm_lapic *apic)
}
}

-static void apic_timer_expired(struct kvm_lapic *apic)
+static void apic_timer_expired(struct kvm_lapic *apic, bool from_timer_fn)
{
struct kvm_vcpu *vcpu = apic->vcpu;
struct kvm_timer *ktimer = &apic->lapic_timer;
@@ -1604,6 +1604,12 @@ static void apic_timer_expired(struct kvm_lapic *apic)
if (apic_lvtt_tscdeadline(apic) || ktimer->hv_timer_in_use)
ktimer->expired_tscdeadline = ktimer->tscdeadline;

+ if (!from_timer_fn && vcpu->arch.apicv_active) {
+ WARN_ON(kvm_get_running_vcpu() != vcpu);
+ kvm_apic_inject_pending_timer_irqs(apic);
+ return;
+ }
+
if (kvm_use_posted_timer_interrupt(apic->vcpu)) {
if (apic->lapic_timer.timer_advance_ns)
__kvm_wait_lapic_expire(vcpu);
@@ -1643,7 +1649,7 @@ static void start_sw_tscdeadline(struct kvm_lapic *apic)
expire = ktime_sub_ns(expire, ktimer->timer_advance_ns);
hrtimer_start(&ktimer->timer, expire, HRTIMER_MODE_ABS_HARD);
} else
- apic_timer_expired(apic);
+ apic_timer_expired(apic, false);

local_irq_restore(flags);
}
@@ -1751,7 +1757,7 @@ static void start_sw_period(struct kvm_lapic *apic)

if (ktime_after(ktime_get(),
apic->lapic_timer.target_expiration)) {
- apic_timer_expired(apic);
+ apic_timer_expired(apic, false);

if (apic_lvtt_oneshot(apic))
return;
@@ -1813,7 +1819,7 @@ static bool start_hv_timer(struct kvm_lapic *apic)
if (atomic_read(&ktimer->pending)) {
cancel_hv_timer(apic);
} else if (expired) {
- apic_timer_expired(apic);
+ apic_timer_expired(apic, false);
cancel_hv_timer(apic);
}
}
@@ -1863,7 +1869,7 @@ void kvm_lapic_expired_hv_timer(struct kvm_vcpu *vcpu)
goto out;
WARN_ON(swait_active(&vcpu->wq));
cancel_hv_timer(apic);
- apic_timer_expired(apic);
+ apic_timer_expired(apic, false);

if (apic_lvtt_period(apic) && apic->lapic_timer.period) {
advance_periodic_target_expiration(apic);
@@ -2369,7 +2375,7 @@ static enum hrtimer_restart apic_timer_fn(struct hrtimer *data)
struct kvm_timer *ktimer = container_of(data, struct kvm_timer, timer);
struct kvm_lapic *apic = container_of(ktimer, struct kvm_lapic, lapic_timer);

- apic_timer_expired(apic);
+ apic_timer_expired(apic, true);

if (lapic_is_periodic(apic)) {
advance_periodic_target_expiration(apic);
diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c
index 4561104..99061ba 100644
--- a/arch/x86/kvm/x86.c
+++ b/arch/x86/kvm/x86.c
@@ -1616,27 +1616,45 @@ static int handle_fastpath_set_x2apic_icr_irqoff(struct kvm_vcpu *vcpu, u64 data
return 1;
}

+static int handle_fastpath_set_tscdeadline(struct kvm_vcpu *vcpu, u64 data)
+{
+ if (!kvm_x86_ops.set_hv_timer ||
+ kvm_mwait_in_guest(vcpu->kvm) ||
+ kvm_can_post_timer_interrupt(vcpu))
+ return 1;
+
+ kvm_set_lapic_tscdeadline_msr(vcpu, data);
+ return 0;
+}
+
enum exit_fastpath_completion handle_fastpath_set_msr_irqoff(struct kvm_vcpu *vcpu)
{
u32 msr = kvm_rcx_read(vcpu);
u64 data;
- int ret = 0;
+ int ret = EXIT_FASTPATH_NONE;

switch (msr) {
case APIC_BASE_MSR + (APIC_ICR >> 4):
data = kvm_read_edx_eax(vcpu);
- ret = handle_fastpath_set_x2apic_icr_irqoff(vcpu, data);
+ if (!handle_fastpath_set_x2apic_icr_irqoff(vcpu, data))
+ ret = EXIT_FASTPATH_SKIP_EMUL_INS;
+ break;
+ case MSR_IA32_TSCDEADLINE:
+ data = kvm_read_edx_eax(vcpu);
+ if (!handle_fastpath_set_tscdeadline(vcpu, data))
+ ret = EXIT_FASTPATH_CONT_RUN;
break;
default:
- return EXIT_FASTPATH_NONE;
+ ret = EXIT_FASTPATH_NONE;
}

- if (!ret) {
+ if (ret != EXIT_FASTPATH_NONE) {
trace_kvm_msr_write(msr, data);
- return EXIT_FASTPATH_SKIP_EMUL_INS;
+ if (ret == EXIT_FASTPATH_CONT_RUN)
+ kvm_skip_emulated_instruction(vcpu);
}

- return EXIT_FASTPATH_NONE;
+ return ret;
}
EXPORT_SYMBOL_GPL(handle_fastpath_set_msr_irqoff);

--
2.7.4