[PATCH 02/13] KVM: nSVM: don't call nested_sync_control_from_vmcb02 on each VM exit

From: Maxim Levitsky
Date: Thu Nov 17 2022 - 09:35:46 EST


Calling nested_sync_control_from_vmcb02 on each VM exit (nested or not),
was an attempt to keep the int_ctl field in the vmcb12 cache
up to date on each VM exit.

However all other fields in the vmcb12 cache are not kept up to date,
therefore for consistency it is better to do this on a nested VM exit only.

No functional change intended.

Signed-off-by: Maxim Levitsky <mlevitsk@xxxxxxxxxx>
---
arch/x86/kvm/svm/nested.c | 17 ++++++++---------
arch/x86/kvm/svm/svm.c | 2 --
arch/x86/kvm/svm/svm.h | 1 -
3 files changed, 8 insertions(+), 12 deletions(-)

diff --git a/arch/x86/kvm/svm/nested.c b/arch/x86/kvm/svm/nested.c
index 43cc4a5d22e012..91a51e75717dca 100644
--- a/arch/x86/kvm/svm/nested.c
+++ b/arch/x86/kvm/svm/nested.c
@@ -407,11 +407,12 @@ void nested_copy_vmcb_save_to_cache(struct vcpu_svm *svm,
* Synchronize fields that are written by the processor, so that
* they can be copied back into the vmcb12.
*/
-void nested_sync_control_from_vmcb02(struct vcpu_svm *svm)
+static void nested_sync_control_from_vmcb02(struct vcpu_svm *svm,
+ struct vmcb *vmcb12)
{
u32 mask;
- svm->nested.ctl.event_inj = svm->vmcb->control.event_inj;
- svm->nested.ctl.event_inj_err = svm->vmcb->control.event_inj_err;
+ vmcb12->control.event_inj = svm->vmcb->control.event_inj;
+ vmcb12->control.event_inj_err = svm->vmcb->control.event_inj_err;

/* Only a few fields of int_ctl are written by the processor. */
mask = V_IRQ_MASK | V_TPR_MASK;
@@ -431,8 +432,8 @@ void nested_sync_control_from_vmcb02(struct vcpu_svm *svm)
if (nested_vgif_enabled(svm))
mask |= V_GIF_MASK;

- svm->nested.ctl.int_ctl &= ~mask;
- svm->nested.ctl.int_ctl |= svm->vmcb->control.int_ctl & mask;
+ vmcb12->control.int_ctl &= ~mask;
+ vmcb12->control.int_ctl |= svm->vmcb->control.int_ctl & mask;
}

/*
@@ -985,13 +986,11 @@ int nested_svm_vmexit(struct vcpu_svm *svm)
if (vmcb12->control.exit_code != SVM_EXIT_ERR)
nested_save_pending_event_to_vmcb12(svm, vmcb12);

+ nested_sync_control_from_vmcb02(svm, vmcb12);
+
if (svm->nrips_enabled)
vmcb12->control.next_rip = vmcb02->control.next_rip;

- vmcb12->control.int_ctl = svm->nested.ctl.int_ctl;
- vmcb12->control.event_inj = svm->nested.ctl.event_inj;
- vmcb12->control.event_inj_err = svm->nested.ctl.event_inj_err;
-
if (!kvm_pause_in_guest(vcpu->kvm)) {
vmcb01->control.pause_filter_count = vmcb02->control.pause_filter_count;
vmcb_mark_dirty(vmcb01, VMCB_INTERCEPTS);
diff --git a/arch/x86/kvm/svm/svm.c b/arch/x86/kvm/svm/svm.c
index 527f18d8cc4489..03acbe8ff34edb 100644
--- a/arch/x86/kvm/svm/svm.c
+++ b/arch/x86/kvm/svm/svm.c
@@ -4016,8 +4016,6 @@ static __no_kcsan fastpath_t svm_vcpu_run(struct kvm_vcpu *vcpu)

svm->next_rip = 0;
if (is_guest_mode(vcpu)) {
- nested_sync_control_from_vmcb02(svm);
-
/* Track VMRUNs that have made past consistency checking */
if (svm->nested.nested_run_pending &&
svm->vmcb->control.exit_code != SVM_EXIT_ERR)
diff --git a/arch/x86/kvm/svm/svm.h b/arch/x86/kvm/svm/svm.h
index 199a2ecef1cec6..f5383104d00580 100644
--- a/arch/x86/kvm/svm/svm.h
+++ b/arch/x86/kvm/svm/svm.h
@@ -618,7 +618,6 @@ void nested_copy_vmcb_control_to_cache(struct vcpu_svm *svm,
struct vmcb_control_area *control);
void nested_copy_vmcb_save_to_cache(struct vcpu_svm *svm,
struct vmcb_save_area *save);
-void nested_sync_control_from_vmcb02(struct vcpu_svm *svm);
void nested_vmcb02_compute_g_pat(struct vcpu_svm *svm);
void svm_switch_vmcb(struct vcpu_svm *svm, struct kvm_vmcb_info *target_vmcb);

--
2.34.3