Re: [PATCH 2/2] KVM: x86: MMU: Move handle_mmio_page_fault() call to kvm_mmu_page_fault()

From: Paolo Bonzini
Date: Mon Feb 22 2016 - 07:24:14 EST




On 22/02/2016 09:23, Takuya Yoshikawa wrote:
> Rather than placing a handle_mmio_page_fault() call in each
> vcpu->arch.mmu.page_fault() handler, moving it up to
> kvm_mmu_page_fault() makes the code better:
>
> - avoids code duplication
> - for kvm_arch_async_page_ready(), which is the other caller of
> vcpu->arch.mmu.page_fault(), removes an extra error_code check
> - avoids returning both RET_MMIO_PF_* values and raw integer values
> from vcpu->arch.mmu.page_fault()
>
> Signed-off-by: Takuya Yoshikawa <yoshikawa_takuya_b1@xxxxxxxxxxxxx>
> ---
> arch/x86/kvm/mmu.c | 39 ++++++++++++++++-----------------------
> arch/x86/kvm/paging_tmpl.h | 19 ++++++-------------
> 2 files changed, 22 insertions(+), 36 deletions(-)
>
> diff --git a/arch/x86/kvm/mmu.c b/arch/x86/kvm/mmu.c
> index a28b734..2ce3892 100644
> --- a/arch/x86/kvm/mmu.c
> +++ b/arch/x86/kvm/mmu.c
> @@ -3370,13 +3370,6 @@ static int nonpaging_page_fault(struct kvm_vcpu *vcpu, gva_t gva,
>
> pgprintk("%s: gva %lx error %x\n", __func__, gva, error_code);
>
> - if (unlikely(error_code & PFERR_RSVD_MASK)) {
> - r = handle_mmio_page_fault(vcpu, gva, true);
> -
> - if (likely(r != RET_MMIO_PF_INVALID))
> - return r;
> - }
> -
> r = mmu_topup_memory_caches(vcpu);
> if (r)
> return r;
> @@ -3460,13 +3453,6 @@ static int tdp_page_fault(struct kvm_vcpu *vcpu, gva_t gpa, u32 error_code,
>
> MMU_WARN_ON(!VALID_PAGE(vcpu->arch.mmu.root_hpa));
>
> - if (unlikely(error_code & PFERR_RSVD_MASK)) {
> - r = handle_mmio_page_fault(vcpu, gpa, true);
> -
> - if (likely(r != RET_MMIO_PF_INVALID))
> - return r;
> - }
> -
> r = mmu_topup_memory_caches(vcpu);
> if (r)
> return r;
> @@ -4361,18 +4347,27 @@ int kvm_mmu_page_fault(struct kvm_vcpu *vcpu, gva_t cr2, u32 error_code,
> enum emulation_result er;
> bool direct = vcpu->arch.mmu.direct_map || mmu_is_nested(vcpu);
>
> + if (unlikely(error_code & PFERR_RSVD_MASK)) {
> + r = handle_mmio_page_fault(vcpu, cr2, direct);
> + if (r == RET_MMIO_PF_EMULATE) {
> + emulation_type = 0;
> + goto emulate;
> + }
> + if (r == RET_MMIO_PF_RETRY)
> + return 1;
> + if (r < 0)
> + return r;

It's a bit weird how RET_MMIO_PF_RETRY is zero, but unifying all the
return values of page fault routines is best left for another day.

Applied to queue, thanks.

Paolo

> + }
> +
> r = vcpu->arch.mmu.page_fault(vcpu, cr2, error_code, false);
> if (r < 0)
> - goto out;
> -
> - if (!r) {
> - r = 1;
> - goto out;
> - }
> + return r;
> + if (!r)
> + return 1;
>
> if (mmio_info_in_cache(vcpu, cr2, direct))
> emulation_type = 0;
> -
> +emulate:
> er = x86_emulate_instruction(vcpu, cr2, emulation_type, insn, insn_len);
>
> switch (er) {
> @@ -4386,8 +4381,6 @@ int kvm_mmu_page_fault(struct kvm_vcpu *vcpu, gva_t cr2, u32 error_code,
> default:
> BUG();
> }
> -out:
> - return r;
> }
> EXPORT_SYMBOL_GPL(kvm_mmu_page_fault);
>
> diff --git a/arch/x86/kvm/paging_tmpl.h b/arch/x86/kvm/paging_tmpl.h
> index 6c9fed9..05827ff 100644
> --- a/arch/x86/kvm/paging_tmpl.h
> +++ b/arch/x86/kvm/paging_tmpl.h
> @@ -702,24 +702,17 @@ static int FNAME(page_fault)(struct kvm_vcpu *vcpu, gva_t addr, u32 error_code,
>
> pgprintk("%s: addr %lx err %x\n", __func__, addr, error_code);
>
> - if (unlikely(error_code & PFERR_RSVD_MASK)) {
> - r = handle_mmio_page_fault(vcpu, addr, mmu_is_nested(vcpu));
> - if (likely(r != RET_MMIO_PF_INVALID))
> - return r;
> -
> - /*
> - * page fault with PFEC.RSVD = 1 is caused by shadow
> - * page fault, should not be used to walk guest page
> - * table.
> - */
> - error_code &= ~PFERR_RSVD_MASK;
> - };
> -
> r = mmu_topup_memory_caches(vcpu);
> if (r)
> return r;
>
> /*
> + * If PFEC.RSVD is set, this is a shadow page fault.
> + * The bit needs to be cleared before walking guest page tables.
> + */
> + error_code &= ~PFERR_RSVD_MASK;
> +
> + /*
> * Look up the guest pte for the faulting address.
> */
> r = FNAME(walk_addr)(&walker, vcpu, addr, error_code);
>