Re: [PATCH 7/7] KVM: MMU: don't walk every parent pages while markunsync

From: Marcelo Tosatti
Date: Mon Jun 14 2010 - 18:20:30 EST


On Fri, Jun 11, 2010 at 09:35:15PM +0800, Xiao Guangrong wrote:
> While we mark the parent's unsync_child_bitmap, if the parent is already
> unsynced, it no need walk it's parent, it can reduce some unnecessary
> workload
>
> Signed-off-by: Xiao Guangrong <xiaoguangrong@xxxxxxxxxxxxxx>
> ---
> arch/x86/kvm/mmu.c | 61 ++++++++++++++-------------------------------------
> 1 files changed, 17 insertions(+), 44 deletions(-)
>
> diff --git a/arch/x86/kvm/mmu.c b/arch/x86/kvm/mmu.c
> index eb20682..a92863f 100644
> --- a/arch/x86/kvm/mmu.c
> +++ b/arch/x86/kvm/mmu.c
> @@ -175,7 +175,7 @@ struct kvm_shadow_walk_iterator {
> shadow_walk_okay(&(_walker)); \
> shadow_walk_next(&(_walker)))
>
> -typedef int (*mmu_parent_walk_fn) (struct kvm_mmu_page *sp);
> +typedef void (*mmu_parent_walk_fn) (struct kvm_mmu_page *sp, u64 *spte);
>
> static struct kmem_cache *pte_chain_cache;
> static struct kmem_cache *rmap_desc_cache;
> @@ -1024,7 +1024,6 @@ static void mmu_page_remove_parent_pte(struct kvm_mmu_page *sp,
> BUG();
> }
>
> -
> static void mmu_parent_walk(struct kvm_mmu_page *sp, mmu_parent_walk_fn fn)
> {
> struct kvm_pte_chain *pte_chain;
> @@ -1034,63 +1033,37 @@ static void mmu_parent_walk(struct kvm_mmu_page *sp, mmu_parent_walk_fn fn)
>
> if (!sp->multimapped && sp->parent_pte) {
> parent_sp = page_header(__pa(sp->parent_pte));
> - fn(parent_sp);
> - mmu_parent_walk(parent_sp, fn);
> + fn(parent_sp, sp->parent_pte);
> return;
> }
> +
> hlist_for_each_entry(pte_chain, node, &sp->parent_ptes, link)
> for (i = 0; i < NR_PTE_CHAIN_ENTRIES; ++i) {
> - if (!pte_chain->parent_ptes[i])
> + u64 *spte = pte_chain->parent_ptes[i];
> +
> + if (!spte)
> break;
> - parent_sp = page_header(__pa(pte_chain->parent_ptes[i]));
> - fn(parent_sp);
> - mmu_parent_walk(parent_sp, fn);
> + parent_sp = page_header(__pa(spte));
> + fn(parent_sp, spte);
> }
> }
>
> -static void kvm_mmu_update_unsync_bitmap(u64 *spte)
> +static void mark_unsync(struct kvm_mmu_page *sp, u64 *spte);
> +static void kvm_mmu_mark_parents_unsync(struct kvm_mmu_page *sp)
> {
> - unsigned int index;
> - struct kvm_mmu_page *sp = page_header(__pa(spte));
> -
> - index = spte - sp->spt;
> - if (!__test_and_set_bit(index, sp->unsync_child_bitmap))
> - sp->unsync_children++;
> - WARN_ON(!sp->unsync_children);
> + mmu_parent_walk(sp, mark_unsync);
> }
>
> -static void kvm_mmu_update_parents_unsync(struct kvm_mmu_page *sp)
> +static void mark_unsync(struct kvm_mmu_page *sp, u64 *spte)
> {
> - struct kvm_pte_chain *pte_chain;
> - struct hlist_node *node;
> - int i;
> + unsigned int index;
>
> - if (!sp->parent_pte)
> + index = spte - sp->spt;
> + if (__test_and_set_bit(index, sp->unsync_child_bitmap))
> return;
> -
> - if (!sp->multimapped) {
> - kvm_mmu_update_unsync_bitmap(sp->parent_pte);
> + if (sp->unsync_children++)
> return;

This looks wrong. If the sp has an unrelated children marked as
unsync (which increased sp->unsync_children), you stop the walk?

Applied 1-6, thanks.

--
To unsubscribe from this list: send the line "unsubscribe linux-kernel" in
the body of a message to majordomo@xxxxxxxxxxxxxxx
More majordomo info at http://vger.kernel.org/majordomo-info.html
Please read the FAQ at http://www.tux.org/lkml/