Re: [PATCH v10 5/9] KVM: x86: Virtualize CR3.LAM_{U48,U57}

From: Sean Christopherson
Date: Wed Aug 16 2023 - 17:45:23 EST


On Wed, Jul 19, 2023, Binbin Wu wrote:
> Add support to allow guests to set two new CR3 non-address control bits for
> guests to enable the new Intel CPU feature Linear Address Masking (LAM) on user
> pointers.

Same feedback as the LAM_SUP patch.

> ---
> arch/x86/kvm/cpuid.h | 3 +++
> arch/x86/kvm/mmu.h | 8 ++++++++
> arch/x86/kvm/mmu/mmu.c | 2 +-
> arch/x86/kvm/vmx/vmx.c | 3 ++-
> 4 files changed, 14 insertions(+), 2 deletions(-)
>
> diff --git a/arch/x86/kvm/cpuid.h b/arch/x86/kvm/cpuid.h
> index 8b26d946f3e3..274f41d2250b 100644
> --- a/arch/x86/kvm/cpuid.h
> +++ b/arch/x86/kvm/cpuid.h
> @@ -285,6 +285,9 @@ static __always_inline bool guest_can_use(struct kvm_vcpu *vcpu,
>
> static inline bool kvm_vcpu_is_legal_cr3(struct kvm_vcpu *vcpu, unsigned long cr3)
> {
> + if (guest_can_use(vcpu, X86_FEATURE_LAM))
> + cr3 &= ~(X86_CR3_LAM_U48 | X86_CR3_LAM_U57);
> +
> return kvm_vcpu_is_legal_gpa(vcpu, cr3);
> }
>
> diff --git a/arch/x86/kvm/mmu.h b/arch/x86/kvm/mmu.h
> index 92d5a1924fc1..e92395e6b876 100644
> --- a/arch/x86/kvm/mmu.h
> +++ b/arch/x86/kvm/mmu.h
> @@ -144,6 +144,14 @@ static inline unsigned long kvm_get_active_pcid(struct kvm_vcpu *vcpu)
> return kvm_get_pcid(vcpu, kvm_read_cr3(vcpu));
> }
>
> +static inline unsigned long kvm_get_active_cr3_lam_bits(struct kvm_vcpu *vcpu)
> +{
> + if (!guest_can_use(vcpu, X86_FEATURE_LAM))
> + return 0;
> +
> + return kvm_read_cr3(vcpu) & (X86_CR3_LAM_U48 | X86_CR3_LAM_U57);
> +}
> +
> static inline void kvm_mmu_load_pgd(struct kvm_vcpu *vcpu)
> {
> u64 root_hpa = vcpu->arch.mmu->root.hpa;
> diff --git a/arch/x86/kvm/mmu/mmu.c b/arch/x86/kvm/mmu/mmu.c
> index ec169f5c7dce..0285536346c1 100644
> --- a/arch/x86/kvm/mmu/mmu.c
> +++ b/arch/x86/kvm/mmu/mmu.c
> @@ -3819,7 +3819,7 @@ static int mmu_alloc_shadow_roots(struct kvm_vcpu *vcpu)
> hpa_t root;
>
> root_pgd = kvm_mmu_get_guest_pgd(vcpu, mmu);
> - root_gfn = root_pgd >> PAGE_SHIFT;
> + root_gfn = (root_pgd & __PT_BASE_ADDR_MASK) >> PAGE_SHIFT;

And as mentioned previously, this should be in the patch that adds __PT_BASE_ADDR_MASK.

> if (mmu_check_root(vcpu, root_gfn))
> return 1;
> diff --git a/arch/x86/kvm/vmx/vmx.c b/arch/x86/kvm/vmx/vmx.c
> index a0d6ea87a2d0..bcee5dc3dd0b 100644
> --- a/arch/x86/kvm/vmx/vmx.c
> +++ b/arch/x86/kvm/vmx/vmx.c
> @@ -3358,7 +3358,8 @@ static void vmx_load_mmu_pgd(struct kvm_vcpu *vcpu, hpa_t root_hpa,
> update_guest_cr3 = false;
> vmx_ept_load_pdptrs(vcpu);
> } else {
> - guest_cr3 = root_hpa | kvm_get_active_pcid(vcpu);
> + guest_cr3 = root_hpa | kvm_get_active_pcid(vcpu) |
> + kvm_get_active_cr3_lam_bits(vcpu);
> }
>
> if (update_guest_cr3)
> --
> 2.25.1
>