[patch 21/71] KVM: MMU: protect kvm_mmu_change_mmu_pages with mmu_lock

From: Greg KH
Date: Fri Sep 04 2009 - 20:27:26 EST



2.6.30-stable review patch. If anyone has any objections, please let us know.

------------------
From: Marcelo Tosatti <mtosatti@xxxxxxxxxx>

(cherry picked from commit 7c8a83b75a38a807d37f5a4398eca2a42c8cf513)

kvm_handle_hva, called by MMU notifiers, manipulates mmu data only with
the protection of mmu_lock.

Update kvm_mmu_change_mmu_pages callers to take mmu_lock, thus protecting
against kvm_handle_hva.

Signed-off-by: Marcelo Tosatti <mtosatti@xxxxxxxxxx>
Signed-off-by: Avi Kivity <avi@xxxxxxxxxx>
Signed-off-by: Greg Kroah-Hartman <gregkh@xxxxxxx>
---
arch/x86/kvm/mmu.c | 2 --
arch/x86/kvm/x86.c | 6 ++++++
2 files changed, 6 insertions(+), 2 deletions(-)

--- a/arch/x86/kvm/mmu.c
+++ b/arch/x86/kvm/mmu.c
@@ -2729,7 +2729,6 @@ void kvm_mmu_slot_remove_write_access(st
{
struct kvm_mmu_page *sp;

- spin_lock(&kvm->mmu_lock);
list_for_each_entry(sp, &kvm->arch.active_mmu_pages, link) {
int i;
u64 *pt;
@@ -2744,7 +2743,6 @@ void kvm_mmu_slot_remove_write_access(st
pt[i] &= ~PT_WRITABLE_MASK;
}
kvm_flush_remote_tlbs(kvm);
- spin_unlock(&kvm->mmu_lock);
}

void kvm_mmu_zap_all(struct kvm *kvm)
--- a/arch/x86/kvm/x86.c
+++ b/arch/x86/kvm/x86.c
@@ -1608,10 +1608,12 @@ static int kvm_vm_ioctl_set_nr_mmu_pages
return -EINVAL;

down_write(&kvm->slots_lock);
+ spin_lock(&kvm->mmu_lock);

kvm_mmu_change_mmu_pages(kvm, kvm_nr_mmu_pages);
kvm->arch.n_requested_mmu_pages = kvm_nr_mmu_pages;

+ spin_unlock(&kvm->mmu_lock);
up_write(&kvm->slots_lock);
return 0;
}
@@ -1787,7 +1789,9 @@ int kvm_vm_ioctl_get_dirty_log(struct kv

/* If nothing is dirty, don't bother messing with page tables. */
if (is_dirty) {
+ spin_lock(&kvm->mmu_lock);
kvm_mmu_slot_remove_write_access(kvm, log->slot);
+ spin_unlock(&kvm->mmu_lock);
kvm_flush_remote_tlbs(kvm);
memslot = &kvm->memslots[log->slot];
n = ALIGN(memslot->npages, BITS_PER_LONG) / 8;
@@ -4419,12 +4423,14 @@ int kvm_arch_set_memory_region(struct kv
}
}

+ spin_lock(&kvm->mmu_lock);
if (!kvm->arch.n_requested_mmu_pages) {
unsigned int nr_mmu_pages = kvm_mmu_calculate_mmu_pages(kvm);
kvm_mmu_change_mmu_pages(kvm, nr_mmu_pages);
}

kvm_mmu_slot_remove_write_access(kvm, mem->slot);
+ spin_unlock(&kvm->mmu_lock);
kvm_flush_remote_tlbs(kvm);

return 0;


--
To unsubscribe from this list: send the line "unsubscribe linux-kernel" in
the body of a message to majordomo@xxxxxxxxxxxxxxx
More majordomo info at http://vger.kernel.org/majordomo-info.html
Please read the FAQ at http://www.tux.org/lkml/