[PATCH] KVM: do not prepare new memslot for KVM_MR_DELETE

From: Yan Zhao
Date: Wed Nov 09 2022 - 20:45:26 EST


kvm_prepare_memory_region() is not useful for KVM_MR_DELETE,
and each kvm_arch_prepare_memory_region() does nothing more than returning
0 for KVM_MR_DELETE.
So, just don't call into kvm_prepare_memory_region() to avoid unnecessary
error handling for KVM_MR_DELETE.

Signed-off-by: Yan Zhao <yan.y.zhao@xxxxxxxxx>
---
virt/kvm/kvm_main.c | 52 ++++++++++++++++++++++-----------------------
1 file changed, 26 insertions(+), 26 deletions(-)

diff --git a/virt/kvm/kvm_main.c b/virt/kvm/kvm_main.c
index 25d7872b29c1..44e7fb1c376b 100644
--- a/virt/kvm/kvm_main.c
+++ b/virt/kvm/kvm_main.c
@@ -1612,19 +1612,17 @@ static int kvm_prepare_memory_region(struct kvm *kvm,
* new and KVM isn't using a ring buffer, allocate and initialize a
* new bitmap.
*/
- if (change != KVM_MR_DELETE) {
- if (!(new->flags & KVM_MEM_LOG_DIRTY_PAGES))
- new->dirty_bitmap = NULL;
- else if (old && old->dirty_bitmap)
- new->dirty_bitmap = old->dirty_bitmap;
- else if (!kvm->dirty_ring_size) {
- r = kvm_alloc_dirty_bitmap(new);
- if (r)
- return r;
+ if (!(new->flags & KVM_MEM_LOG_DIRTY_PAGES))
+ new->dirty_bitmap = NULL;
+ else if (old && old->dirty_bitmap)
+ new->dirty_bitmap = old->dirty_bitmap;
+ else if (!kvm->dirty_ring_size) {
+ r = kvm_alloc_dirty_bitmap(new);
+ if (r)
+ return r;

- if (kvm_dirty_log_manual_protect_and_init_set(kvm))
- bitmap_set(new->dirty_bitmap, 0, new->npages);
- }
+ if (kvm_dirty_log_manual_protect_and_init_set(kvm))
+ bitmap_set(new->dirty_bitmap, 0, new->npages);
}

r = kvm_arch_prepare_memory_region(kvm, old, new, change);
@@ -1849,21 +1847,23 @@ static int kvm_set_memslot(struct kvm *kvm,
kvm_invalidate_memslot(kvm, old, invalid_slot);
}

- r = kvm_prepare_memory_region(kvm, old, new, change);
- if (r) {
- /*
- * For DELETE/MOVE, revert the above INVALID change. No
- * modifications required since the original slot was preserved
- * in the inactive slots. Changing the active memslots also
- * release slots_arch_lock.
- */
- if (change == KVM_MR_DELETE || change == KVM_MR_MOVE) {
- kvm_activate_memslot(kvm, invalid_slot, old);
- kfree(invalid_slot);
- } else {
- mutex_unlock(&kvm->slots_arch_lock);
+ if (change != KVM_MR_DELETE) {
+ r = kvm_prepare_memory_region(kvm, old, new, change);
+ if (r) {
+ /*
+ * For MOVE, revert the above INVALID change. No
+ * modifications required since the original slot was preserved
+ * in the inactive slots. Changing the active memslots also
+ * release slots_arch_lock.
+ */
+ if (change == KVM_MR_MOVE) {
+ kvm_activate_memslot(kvm, invalid_slot, old);
+ kfree(invalid_slot);
+ } else {
+ mutex_unlock(&kvm->slots_arch_lock);
+ }
+ return r;
}
- return r;
}

/*
--
2.17.1