Re: [RFC PATCH 4/5] KVM: TDX: Implement moving private pages between 2 TDs

From: Isaku Yamahata
Date: Fri Jun 02 2023 - 03:00:29 EST


On Fri, Apr 07, 2023 at 08:19:20PM +0000,
Sagi Shahar <sagis@xxxxxxxxxx> wrote:

> diff --git a/arch/x86/kvm/mmu/tdp_mmu.c b/arch/x86/kvm/mmu/tdp_mmu.c
> index 327dee4f6170e..685528fdc0ad6 100644
> --- a/arch/x86/kvm/mmu/tdp_mmu.c
> +++ b/arch/x86/kvm/mmu/tdp_mmu.c
> @@ -296,6 +296,23 @@ static void tdp_mmu_init_sp(struct kvm_mmu_page *sp, tdp_ptep_t sptep,
> trace_kvm_mmu_get_page(sp, true);
> }
>
> +static struct kvm_mmu_page *
> +kvm_tdp_mmu_get_vcpu_root_no_alloc(struct kvm_vcpu *vcpu, union kvm_mmu_page_role role)
> +{
> + struct kvm *kvm = vcpu->kvm;
> + struct kvm_mmu_page *root;
> +
> + lockdep_assert_held_read(&kvm->mmu_lock);

Because kvm_tdp_mmu_get_cpu_root() holds write lock,
this should be lockdep_assert_held(&kvm->mmu_lock)

Thanks,

> +
> + for_each_tdp_mmu_root(kvm, root, kvm_mmu_role_as_id(role)) {
> + if (root->role.word == role.word &&
> + kvm_tdp_mmu_get_root(root))
> + return root;
> + }
> +
> + return NULL;
> +}
> +
> static struct kvm_mmu_page *kvm_tdp_mmu_get_vcpu_root(struct kvm_vcpu *vcpu,
> bool private)
> {
> @@ -311,11 +328,9 @@ static struct kvm_mmu_page *kvm_tdp_mmu_get_vcpu_root(struct kvm_vcpu *vcpu,
> */
> if (private)
> kvm_mmu_page_role_set_private(&role);
> - for_each_tdp_mmu_root(kvm, root, kvm_mmu_role_as_id(role)) {
> - if (root->role.word == role.word &&
> - kvm_tdp_mmu_get_root(root))
> - goto out;
> - }
> + root = kvm_tdp_mmu_get_vcpu_root_no_alloc(vcpu, role);
> + if (!!root)
> + goto out;
>
> root = tdp_mmu_alloc_sp(vcpu, role);
> tdp_mmu_init_sp(root, NULL, 0);
> @@ -330,6 +345,58 @@ static struct kvm_mmu_page *kvm_tdp_mmu_get_vcpu_root(struct kvm_vcpu *vcpu,
> return root;
> }
>
> +hpa_t kvm_tdp_mmu_move_private_pages_from(struct kvm_vcpu *vcpu,
> + struct kvm_vcpu *src_vcpu)
> +{
> + union kvm_mmu_page_role role = vcpu->arch.mmu->root_role;
> + struct kvm *kvm = vcpu->kvm;
> + struct kvm *src_kvm = src_vcpu->kvm;
> + struct kvm_mmu_page *private_root = NULL;
> + struct kvm_mmu_page *root;
> + s64 num_private_pages, old;
> +
> + lockdep_assert_held_write(&vcpu->kvm->mmu_lock);
> + lockdep_assert_held_write(&src_vcpu->kvm->mmu_lock);
> +
> + /* Find the private root of the source. */
> + kvm_mmu_page_role_set_private(&role);
> + for_each_tdp_mmu_root(src_kvm, root, kvm_mmu_role_as_id(role)) {
> + if (root->role.word == role.word) {
> + private_root = root;
> + break;
> + }
> + }
> + if (!private_root)
> + return INVALID_PAGE;
> +
> + /* Remove the private root from the src kvm and add it to dst kvm. */
> + list_del_rcu(&private_root->link);
> + list_add_rcu(&private_root->link, &kvm->arch.tdp_mmu_roots);
> +
> + num_private_pages = atomic64_read(&src_kvm->arch.tdp_private_mmu_pages);
> + old = atomic64_cmpxchg(&kvm->arch.tdp_private_mmu_pages, 0,
> + num_private_pages);
> + /* The destination VM should have no private pages at this point. */
> + WARN_ON(old);
> + atomic64_set(&src_kvm->arch.tdp_private_mmu_pages, 0);
> +
> + return __pa(private_root->spt);
> +}
> +
> +hpa_t kvm_tdp_mmu_get_vcpu_root_hpa_no_alloc(struct kvm_vcpu *vcpu, bool private)
> +{
> + struct kvm_mmu_page *root;
> + union kvm_mmu_page_role role = vcpu->arch.mmu->root_role;
> +
> + if (private)
> + kvm_mmu_page_role_set_private(&role);
> + root = kvm_tdp_mmu_get_vcpu_root_no_alloc(vcpu, role);
> + if (!root)
> + return INVALID_PAGE;
> +
> + return __pa(root->spt);
> +}
> +
> hpa_t kvm_tdp_mmu_get_vcpu_root_hpa(struct kvm_vcpu *vcpu, bool private)
> {
> return __pa(kvm_tdp_mmu_get_vcpu_root(vcpu, private)->spt);

--
Isaku Yamahata <isaku.yamahata@xxxxxxxxx>