Re: [PATCH v3 02/11] KVM: MMU: introduce kvm_mmu_gfn_{allow,disallow}_lpage

From: Paolo Bonzini
Date: Fri Feb 19 2016 - 06:09:18 EST




On 14/02/2016 12:31, Xiao Guangrong wrote:
> Abstract the common operations from account_shadowed() and
> unaccount_shadowed(), then introduce kvm_mmu_gfn_disallow_lpage()
> and kvm_mmu_gfn_allow_lpage()
>
> These two functions will be used by page tracking in the later patch
>
> Signed-off-by: Xiao Guangrong <guangrong.xiao@xxxxxxxxxxxxxxx>
> ---
> arch/x86/kvm/mmu.c | 38 +++++++++++++++++++++++++-------------
> arch/x86/kvm/mmu.h | 3 +++
> 2 files changed, 28 insertions(+), 13 deletions(-)
>
> diff --git a/arch/x86/kvm/mmu.c b/arch/x86/kvm/mmu.c
> index de9e992..e1bb66c 100644
> --- a/arch/x86/kvm/mmu.c
> +++ b/arch/x86/kvm/mmu.c
> @@ -776,21 +776,39 @@ static struct kvm_lpage_info *lpage_info_slot(gfn_t gfn,
> return &slot->arch.lpage_info[level - 2][idx];
> }
>
> +static void update_gfn_disallow_lpage_count(struct kvm_memory_slot *slot,
> + gfn_t gfn, int count)
> +{
> + struct kvm_lpage_info *linfo;
> + int i;
> +
> + for (i = PT_DIRECTORY_LEVEL; i <= PT_MAX_HUGEPAGE_LEVEL; ++i) {
> + linfo = lpage_info_slot(gfn, slot, i);
> + linfo->disallow_lpage += count;
> + WARN_ON(linfo->disallow_lpage < 0);
> + }
> +}
> +
> +void kvm_mmu_gfn_disallow_lpage(struct kvm_memory_slot *slot, gfn_t gfn)
> +{
> + update_gfn_disallow_lpage_count(slot, gfn, 1);
> +}
> +
> +void kvm_mmu_gfn_allow_lpage(struct kvm_memory_slot *slot, gfn_t gfn)
> +{
> + update_gfn_disallow_lpage_count(slot, gfn, -1);
> +}
> +
> static void account_shadowed(struct kvm *kvm, struct kvm_mmu_page *sp)
> {
> struct kvm_memslots *slots;
> struct kvm_memory_slot *slot;
> - struct kvm_lpage_info *linfo;
> gfn_t gfn;
> - int i;
>
> gfn = sp->gfn;
> slots = kvm_memslots_for_spte_role(kvm, sp->role);
> slot = __gfn_to_memslot(slots, gfn);
> - for (i = PT_DIRECTORY_LEVEL; i <= PT_MAX_HUGEPAGE_LEVEL; ++i) {
> - linfo = lpage_info_slot(gfn, slot, i);
> - linfo->disallow_lpage += 1;
> - }
> + kvm_mmu_gfn_disallow_lpage(slot, gfn);
> kvm->arch.indirect_shadow_pages++;
> }
>
> @@ -798,18 +816,12 @@ static void unaccount_shadowed(struct kvm *kvm, struct kvm_mmu_page *sp)
> {
> struct kvm_memslots *slots;
> struct kvm_memory_slot *slot;
> - struct kvm_lpage_info *linfo;
> gfn_t gfn;
> - int i;
>
> gfn = sp->gfn;
> slots = kvm_memslots_for_spte_role(kvm, sp->role);
> slot = __gfn_to_memslot(slots, gfn);
> - for (i = PT_DIRECTORY_LEVEL; i <= PT_MAX_HUGEPAGE_LEVEL; ++i) {
> - linfo = lpage_info_slot(gfn, slot, i);
> - linfo->disallow_lpage -= 1;
> - WARN_ON(linfo->disallow_lpage < 0);
> - }
> + kvm_mmu_gfn_allow_lpage(slot, gfn);
> kvm->arch.indirect_shadow_pages--;
> }
>
> diff --git a/arch/x86/kvm/mmu.h b/arch/x86/kvm/mmu.h
> index 55ffb7b..de92bed 100644
> --- a/arch/x86/kvm/mmu.h
> +++ b/arch/x86/kvm/mmu.h
> @@ -174,4 +174,7 @@ static inline bool permission_fault(struct kvm_vcpu *vcpu, struct kvm_mmu *mmu,
>
> void kvm_mmu_invalidate_zap_all_pages(struct kvm *kvm);
> void kvm_zap_gfn_range(struct kvm *kvm, gfn_t gfn_start, gfn_t gfn_end);
> +
> +void kvm_mmu_gfn_disallow_lpage(struct kvm_memory_slot *slot, gfn_t gfn);
> +void kvm_mmu_gfn_allow_lpage(struct kvm_memory_slot *slot, gfn_t gfn);
> #endif
>

Reviewed-by: Paolo Bonzini <pbonzini@xxxxxxxxxx>