Re: [PATCH v18 007/121] KVM: VMX: Reorder vmx initialization with kvm vendor initialization

From: Yuan Yao
Date: Mon Jan 29 2024 - 03:57:43 EST


On Mon, Jan 22, 2024 at 03:52:43PM -0800, isaku.yamahata@xxxxxxxxx wrote:
> From: Isaku Yamahata <isaku.yamahata@xxxxxxxxx>
>
> To match vmx_exit cleanup. Now vmx_init() is before kvm_x86_vendor_init(),
> vmx_init() can initialize loaded_vmcss_on_cpu. Oppertunistically move it
> back into vmx_init().
>
> Signed-off-by: Isaku Yamahata <isaku.yamahata@xxxxxxxxx>
> ---
> v18:
> - move the loaded_vmcss_on_cpu initialization to vmx_init().
> - fix error path of vt_init(). by Chao and Binbin
> ---
> arch/x86/kvm/vmx/main.c | 17 +++++++----------
> arch/x86/kvm/vmx/vmx.c | 6 ++++--
> arch/x86/kvm/vmx/x86_ops.h | 2 --
> 3 files changed, 11 insertions(+), 14 deletions(-)
>
> diff --git a/arch/x86/kvm/vmx/main.c b/arch/x86/kvm/vmx/main.c
> index 18cecf12c7c8..443db8ec5cd5 100644
> --- a/arch/x86/kvm/vmx/main.c
> +++ b/arch/x86/kvm/vmx/main.c
> @@ -171,7 +171,7 @@ struct kvm_x86_init_ops vt_init_ops __initdata = {
> static int __init vt_init(void)
> {
> unsigned int vcpu_size, vcpu_align;
> - int cpu, r;
> + int r;
>
> if (!kvm_is_vmx_supported())
> return -EOPNOTSUPP;
> @@ -182,18 +182,14 @@ static int __init vt_init(void)
> */
> hv_init_evmcs();
>
> - /* vmx_hardware_disable() accesses loaded_vmcss_on_cpu. */
> - for_each_possible_cpu(cpu)
> - INIT_LIST_HEAD(&per_cpu(loaded_vmcss_on_cpu, cpu));
> -
> - r = kvm_x86_vendor_init(&vt_init_ops);
> - if (r)
> - return r;
> -
> r = vmx_init();
> if (r)
> goto err_vmx_init;
>
> + r = kvm_x86_vendor_init(&vt_init_ops);

Do kvm_x86_vendor_init() *after* vmx_init() leads to
"enable_ept" is used before set to 0 in some cases.

vmx_init() depends on "enable_ept" variable for below 2:
vmx_setup_l1d_flush()
allow_smaller_maxphyaddr = true;

And "enable_ept" can be set to 0 in:
kvm_x86_vendor_init()
vmx_hardware_setup()

> + if (r)
> + goto err_vendor_init;
> +
> /*
> * Common KVM initialization _must_ come last, after this, /dev/kvm is
> * exposed to userspace!
> @@ -207,9 +203,10 @@ static int __init vt_init(void)
> return 0;
>
> err_kvm_init:
> + kvm_x86_vendor_exit();
> +err_vendor_init:
> vmx_exit();
> err_vmx_init:
> - kvm_x86_vendor_exit();
> return r;
> }
> module_init(vt_init);
> diff --git a/arch/x86/kvm/vmx/vmx.c b/arch/x86/kvm/vmx/vmx.c
> index 8efb956591d5..3f4dad3acb13 100644
> --- a/arch/x86/kvm/vmx/vmx.c
> +++ b/arch/x86/kvm/vmx/vmx.c
> @@ -477,7 +477,7 @@ DEFINE_PER_CPU(struct vmcs *, current_vmcs);
> * We maintain a per-CPU linked-list of VMCS loaded on that CPU. This is needed
> * when a CPU is brought down, and we need to VMCLEAR all VMCSs loaded on it.
> */
> -DEFINE_PER_CPU(struct list_head, loaded_vmcss_on_cpu);
> +static DEFINE_PER_CPU(struct list_head, loaded_vmcss_on_cpu);
>
> static DECLARE_BITMAP(vmx_vpid_bitmap, VMX_NR_VPIDS);
> static DEFINE_SPINLOCK(vmx_vpid_lock);
> @@ -8528,8 +8528,10 @@ int __init vmx_init(void)
> if (r)
> return r;
>
> - for_each_possible_cpu(cpu)
> + for_each_possible_cpu(cpu) {
> + INIT_LIST_HEAD(&per_cpu(loaded_vmcss_on_cpu, cpu));
> pi_init_cpu(cpu);
> + }
>
> cpu_emergency_register_virt_callback(vmx_emergency_disable);
>
> diff --git a/arch/x86/kvm/vmx/x86_ops.h b/arch/x86/kvm/vmx/x86_ops.h
> index b936388853ab..bca2d27b3dfd 100644
> --- a/arch/x86/kvm/vmx/x86_ops.h
> +++ b/arch/x86/kvm/vmx/x86_ops.h
> @@ -14,8 +14,6 @@ static inline __init void hv_init_evmcs(void) {}
> static inline void hv_reset_evmcs(void) {}
> #endif /* IS_ENABLED(CONFIG_HYPERV) */
>
> -DECLARE_PER_CPU(struct list_head, loaded_vmcss_on_cpu);
> -
> bool kvm_is_vmx_supported(void);
> int __init vmx_init(void);
> void vmx_exit(void);
> --
> 2.25.1
>
>