Re: [PATCH 2/3] KVM: x86: Use standard mmu_notifier invalidate hooks for APIC access page

From: Alistair Popple
Date: Mon Jun 05 2023 - 22:21:09 EST



Again I'm no KVM expert but read through this and it all looked correct
and made sense so feel free to add:

Reviewed-by: Alistair Popple <apopple@xxxxxxxxxx>

Sean Christopherson <seanjc@xxxxxxxxxx> writes:

> Now that KVM honors past and in-progress mmu_notifier invalidations when
> reloading the APIC-access page, use KVM's "standard" invalidation hooks
> to trigger a reload and delete the one-off usage of invalidate_range().
>
> Aside from eliminating one-off code in KVM, dropping KVM's use of
> invalidate_range() will allow common mmu_notifier to redefine the API to
> be more strictly focused on invalidating secondary TLBs that share the
> primary MMU's page tables.
>
> Suggested-by: Jason Gunthorpe <jgg@xxxxxxxxxx>
> Cc: Alistair Popple <apopple@xxxxxxxxxx>
> Cc: Robin Murphy <robin.murphy@xxxxxxx>
> Signed-off-by: Sean Christopherson <seanjc@xxxxxxxxxx>
> ---
> arch/x86/kvm/mmu/mmu.c | 3 +++
> arch/x86/kvm/x86.c | 14 --------------
> include/linux/kvm_host.h | 3 ---
> virt/kvm/kvm_main.c | 18 ------------------
> 4 files changed, 3 insertions(+), 35 deletions(-)
>
> diff --git a/arch/x86/kvm/mmu/mmu.c b/arch/x86/kvm/mmu/mmu.c
> index c8961f45e3b1..01a11ce68e57 100644
> --- a/arch/x86/kvm/mmu/mmu.c
> +++ b/arch/x86/kvm/mmu/mmu.c
> @@ -1600,6 +1600,9 @@ bool kvm_unmap_gfn_range(struct kvm *kvm, struct kvm_gfn_range *range)
> if (tdp_mmu_enabled)
> flush = kvm_tdp_mmu_unmap_gfn_range(kvm, range, flush);
>
> + if (range->slot->id == APIC_ACCESS_PAGE_PRIVATE_MEMSLOT)
> + kvm_make_all_cpus_request(kvm, KVM_REQ_APIC_PAGE_RELOAD);
> +
> return flush;
> }
>
> diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c
> index ceb7c5e9cf9e..141a62e59d2b 100644
> --- a/arch/x86/kvm/x86.c
> +++ b/arch/x86/kvm/x86.c
> @@ -10431,20 +10431,6 @@ static void vcpu_load_eoi_exitmap(struct kvm_vcpu *vcpu)
> vcpu, (u64 *)vcpu->arch.ioapic_handled_vectors);
> }
>
> -void kvm_arch_mmu_notifier_invalidate_range(struct kvm *kvm,
> - unsigned long start, unsigned long end)
> -{
> - unsigned long apic_address;
> -
> - /*
> - * The physical address of apic access page is stored in the VMCS.
> - * Update it when it becomes invalid.
> - */
> - apic_address = gfn_to_hva(kvm, APIC_DEFAULT_PHYS_BASE >> PAGE_SHIFT);
> - if (start <= apic_address && apic_address < end)
> - kvm_make_all_cpus_request(kvm, KVM_REQ_APIC_PAGE_RELOAD);
> -}
> -
> void kvm_arch_guest_memory_reclaimed(struct kvm *kvm)
> {
> static_call_cond(kvm_x86_guest_memory_reclaimed)(kvm);
> diff --git a/include/linux/kvm_host.h b/include/linux/kvm_host.h
> index 0e571e973bc2..cb66f4100be7 100644
> --- a/include/linux/kvm_host.h
> +++ b/include/linux/kvm_host.h
> @@ -2237,9 +2237,6 @@ static inline long kvm_arch_vcpu_async_ioctl(struct file *filp,
> }
> #endif /* CONFIG_HAVE_KVM_VCPU_ASYNC_IOCTL */
>
> -void kvm_arch_mmu_notifier_invalidate_range(struct kvm *kvm,
> - unsigned long start, unsigned long end);
> -
> void kvm_arch_guest_memory_reclaimed(struct kvm *kvm);
>
> #ifdef CONFIG_HAVE_KVM_VCPU_RUN_PID_CHANGE
> diff --git a/virt/kvm/kvm_main.c b/virt/kvm/kvm_main.c
> index cb5c13eee193..844ff6b0b21d 100644
> --- a/virt/kvm/kvm_main.c
> +++ b/virt/kvm/kvm_main.c
> @@ -154,11 +154,6 @@ static unsigned long long kvm_active_vms;
>
> static DEFINE_PER_CPU(cpumask_var_t, cpu_kick_mask);
>
> -__weak void kvm_arch_mmu_notifier_invalidate_range(struct kvm *kvm,
> - unsigned long start, unsigned long end)
> -{
> -}
> -
> __weak void kvm_arch_guest_memory_reclaimed(struct kvm *kvm)
> {
> }
> @@ -521,18 +516,6 @@ static inline struct kvm *mmu_notifier_to_kvm(struct mmu_notifier *mn)
> return container_of(mn, struct kvm, mmu_notifier);
> }
>
> -static void kvm_mmu_notifier_invalidate_range(struct mmu_notifier *mn,
> - struct mm_struct *mm,
> - unsigned long start, unsigned long end)
> -{
> - struct kvm *kvm = mmu_notifier_to_kvm(mn);
> - int idx;
> -
> - idx = srcu_read_lock(&kvm->srcu);
> - kvm_arch_mmu_notifier_invalidate_range(kvm, start, end);
> - srcu_read_unlock(&kvm->srcu, idx);
> -}
> -
> typedef bool (*hva_handler_t)(struct kvm *kvm, struct kvm_gfn_range *range);
>
> typedef void (*on_lock_fn_t)(struct kvm *kvm, unsigned long start,
> @@ -892,7 +875,6 @@ static void kvm_mmu_notifier_release(struct mmu_notifier *mn,
> }
>
> static const struct mmu_notifier_ops kvm_mmu_notifier_ops = {
> - .invalidate_range = kvm_mmu_notifier_invalidate_range,
> .invalidate_range_start = kvm_mmu_notifier_invalidate_range_start,
> .invalidate_range_end = kvm_mmu_notifier_invalidate_range_end,
> .clear_flush_young = kvm_mmu_notifier_clear_flush_young,