Re: [PATCH v5 2/7] KVM: x86/mmu: Rename NX huge pages fields/functions for consistency

From: Yan Zhao
Date: Sun Oct 09 2022 - 22:27:38 EST


On Fri, Sep 30, 2022 at 11:48:49PM +0000, Sean Christopherson wrote:
<...>
> -static void kvm_recover_nx_lpages(struct kvm *kvm)
> +static void kvm_recover_nx_huge_pages(struct kvm *kvm)
> {
> unsigned long nx_lpage_splits = kvm->stat.nx_lpage_splits;
> int rcu_idx;
> @@ -6833,23 +6834,25 @@ static void kvm_recover_nx_lpages(struct kvm *kvm)
> ratio = READ_ONCE(nx_huge_pages_recovery_ratio);
> to_zap = ratio ? DIV_ROUND_UP(nx_lpage_splits, ratio) : 0;
> for ( ; to_zap; --to_zap) {
> - if (list_empty(&kvm->arch.lpage_disallowed_mmu_pages))
> + if (list_empty(&kvm->arch.possible_nx_huge_pages))
> break;
>
> /*
> * We use a separate list instead of just using active_mmu_pages
> - * because the number of lpage_disallowed pages is expected to
> - * be relatively small compared to the total.
> + * because the number of shadow pages that be replaced with an
> + * NX huge page is expected to be relatively small compared to
> + * the total number of shadow pages. And because the TDP MMU
> + * doesn't use active_mmu_pages.
> */
> - sp = list_first_entry(&kvm->arch.lpage_disallowed_mmu_pages,
> + sp = list_first_entry(&kvm->arch.possible_nx_huge_pages,
> struct kvm_mmu_page,
> - lpage_disallowed_link);
> - WARN_ON_ONCE(!sp->lpage_disallowed);
> + possible_nx_huge_page_link);
> + WARN_ON_ONCE(!sp->nx_huge_page_disallowed);
> if (is_tdp_mmu_page(sp)) {
> flush |= kvm_tdp_mmu_zap_sp(kvm, sp);
> } else {
> kvm_mmu_prepare_zap_page(kvm, sp, &invalid_list);
> - WARN_ON_ONCE(sp->lpage_disallowed);
> + WARN_ON_ONCE(sp->nx_huge_page_disallowed);
Can this WARN_ON_ONCE(sp->nx_huge_page_disallowed) also be applied to
tdp mmu case as it holds write lock now?

Thanks
Yan
> }
>
> if (need_resched() || rwlock_needbreak(&kvm->mmu_lock)) {
> @@ -6870,7 +6873,7 @@ static void kvm_recover_nx_lpages(struct kvm *kvm)
> srcu_read_unlock(&kvm->srcu, rcu_idx);
> }
>