Re: [PATCH v3 2/4] KVM: x86: add more information to the kvm_entry tracepoint

From: Paolo Bonzini
Date: Fri Nov 24 2023 - 11:08:37 EST


On 9/28/23 12:36, Maxim Levitsky wrote:
Add VMX/SVM specific interrupt injection info to vm entry tracepoint.
Also add a flag showing that immediate vm exit is set to happen after
the entry.

Signed-off-by: Maxim Levitsky <mlevitsk@xxxxxxxxxx>
---
arch/x86/include/asm/kvm-x86-ops.h | 1 +
arch/x86/include/asm/kvm_host.h | 5 ++++-
arch/x86/kvm/svm/svm.c | 17 +++++++++++++++++
arch/x86/kvm/trace.h | 15 +++++++++++++--
arch/x86/kvm/vmx/vmx.c | 12 ++++++++++++
5 files changed, 47 insertions(+), 3 deletions(-)

diff --git a/arch/x86/include/asm/kvm-x86-ops.h b/arch/x86/include/asm/kvm-x86-ops.h
index f654a7f4cc8c0c..346fed6e3c33aa 100644
--- a/arch/x86/include/asm/kvm-x86-ops.h
+++ b/arch/x86/include/asm/kvm-x86-ops.h
@@ -99,6 +99,7 @@ KVM_X86_OP(get_l2_tsc_multiplier)
KVM_X86_OP(write_tsc_offset)
KVM_X86_OP(write_tsc_multiplier)
KVM_X86_OP(get_exit_info)
+KVM_X86_OP(get_entry_info)
KVM_X86_OP(check_intercept)
KVM_X86_OP(handle_exit_irqoff)
KVM_X86_OP(sched_in)
diff --git a/arch/x86/include/asm/kvm_host.h b/arch/x86/include/asm/kvm_host.h
index 383a1d0cc0743b..321721813474f7 100644
--- a/arch/x86/include/asm/kvm_host.h
+++ b/arch/x86/include/asm/kvm_host.h
@@ -1679,13 +1679,16 @@ struct kvm_x86_ops {
void (*write_tsc_multiplier)(struct kvm_vcpu *vcpu);
/*
- * Retrieve somewhat arbitrary exit information. Intended to
+ * Retrieve somewhat arbitrary exit/entry information. Intended to
* be used only from within tracepoints or error paths.
*/
void (*get_exit_info)(struct kvm_vcpu *vcpu, u32 *reason,
u64 *info1, u64 *info2,
u32 *exit_int_info, u32 *exit_int_info_err_code);
+ void (*get_entry_info)(struct kvm_vcpu *vcpu,
+ u32 *inj_info, u32 *inj_info_error_code);
+
int (*check_intercept)(struct kvm_vcpu *vcpu,
struct x86_instruction_info *info,
enum x86_intercept_stage stage,
diff --git a/arch/x86/kvm/svm/svm.c b/arch/x86/kvm/svm/svm.c
index 60b130b7f9d510..cd65c04be3d0e2 100644
--- a/arch/x86/kvm/svm/svm.c
+++ b/arch/x86/kvm/svm/svm.c
@@ -3504,6 +3504,22 @@ static void svm_get_exit_info(struct kvm_vcpu *vcpu, u32 *reason,
*error_code = 0;
}
+static void svm_get_entry_info(struct kvm_vcpu *vcpu,
+ u32 *inj_info,
+ u32 *inj_info_error_code)
+{
+ struct vmcb_control_area *control = &to_svm(vcpu)->vmcb->control;
+
+ *inj_info = control->event_inj;
+
+ if ((*inj_info & SVM_EXITINTINFO_VALID) &&
+ (*inj_info & SVM_EXITINTINFO_VALID_ERR))
+ *inj_info_error_code = control->event_inj_err;
+ else
+ *inj_info_error_code = 0;
+
+}
+
static int svm_handle_exit(struct kvm_vcpu *vcpu, fastpath_t exit_fastpath)
{
struct vcpu_svm *svm = to_svm(vcpu);
@@ -4992,6 +5008,7 @@ static struct kvm_x86_ops svm_x86_ops __initdata = {
.required_apicv_inhibits = AVIC_REQUIRED_APICV_INHIBITS,
.get_exit_info = svm_get_exit_info,
+ .get_entry_info = svm_get_entry_info,
.vcpu_after_set_cpuid = svm_vcpu_after_set_cpuid,
diff --git a/arch/x86/kvm/trace.h b/arch/x86/kvm/trace.h
index 83843379813ee3..28e8a63368cc02 100644
--- a/arch/x86/kvm/trace.h
+++ b/arch/x86/kvm/trace.h
@@ -21,14 +21,25 @@ TRACE_EVENT(kvm_entry,
TP_STRUCT__entry(
__field( unsigned int, vcpu_id )
__field( unsigned long, rip )
- ),
+ __field( u32, inj_info )
+ __field( u32, inj_info_err )
+ __field( bool, req_imm_exit )
+ ),
TP_fast_assign(
__entry->vcpu_id = vcpu->vcpu_id;
__entry->rip = kvm_rip_read(vcpu);
+ __entry->req_imm_exit = vcpu->arch.req_immediate_exit;
+
+ static_call(kvm_x86_get_entry_info)(vcpu,
+ &__entry->inj_info,
+ &__entry->inj_info_err);
),
- TP_printk("vcpu %u, rip 0x%lx", __entry->vcpu_id, __entry->rip)
+ TP_printk("vcpu %u, rip 0x%lx inj 0x%08x inj_error_code 0x%08x%s",
+ __entry->vcpu_id, __entry->rip,
+ __entry->inj_info, __entry->inj_info_err,
+ __entry->req_imm_exit ? " [req_imm_exit]" : "")
);
/*
diff --git a/arch/x86/kvm/vmx/vmx.c b/arch/x86/kvm/vmx/vmx.c
index eb7e42235e8811..9dd13f52d4999c 100644
--- a/arch/x86/kvm/vmx/vmx.c
+++ b/arch/x86/kvm/vmx/vmx.c
@@ -6156,6 +6156,17 @@ static void vmx_get_exit_info(struct kvm_vcpu *vcpu, u32 *reason,
}
}
+static void vmx_get_entry_info(struct kvm_vcpu *vcpu,
+ u32 *inj_info,
+ u32 *inj_info_error_code)
+{
+ *inj_info = vmcs_read32(VM_ENTRY_INTR_INFO_FIELD);
+ if (is_exception_with_error_code(*inj_info))
+ *inj_info_error_code = vmcs_read32(VM_ENTRY_EXCEPTION_ERROR_CODE);
+ else
+ *inj_info_error_code = 0;
+}
+
static void vmx_destroy_pml_buffer(struct vcpu_vmx *vmx)
{
if (vmx->pml_pg) {
@@ -8297,6 +8308,7 @@ static struct kvm_x86_ops vmx_x86_ops __initdata = {
.get_mt_mask = vmx_get_mt_mask,
.get_exit_info = vmx_get_exit_info,
+ .get_entry_info = vmx_get_entry_info,
.vcpu_after_set_cpuid = vmx_vcpu_after_set_cpuid,

Reviewed-by: Paolo Bonzini <pbonzini@xxxxxxxxxx>