[PATCH 09/27] drm/i915/gvt: Protect gfn hash table with dedicated mutex

From: Sean Christopherson
Date: Thu Dec 22 2022 - 19:59:04 EST


Add and use a new mutex, gfn_lock, to protect accesses to the hash table
used to track which gfns are write-protected when shadowing the guest's
GTT. This fixes a bug where kvmgt_page_track_write(), which doesn't hold
kvm->mmu_lock, could race with intel_gvt_page_track_remove() and trigger
a use-after-free.

Fixing kvmgt_page_track_write() by taking kvm->mmu_lock is not an option
as mmu_lock is a r/w spinlock, and intel_vgpu_page_track_handler() might
sleep when acquiring vgpu->cache_lock deep down the callstack:

intel_vgpu_page_track_handler()
|
|-> page_track->handler / ppgtt_write_protection_handler()
|
|-> ppgtt_handle_guest_write_page_table_bytes()
|
|-> ppgtt_handle_guest_write_page_table()
|
|-> ppgtt_handle_guest_entry_removal()
|
|-> ppgtt_invalidate_pte()
|
|-> intel_gvt_dma_unmap_guest_page()
|
|-> mutex_lock(&vgpu->cache_lock);

Signed-off-by: Sean Christopherson <seanjc@xxxxxxxxxx>
---
drivers/gpu/drm/i915/gvt/gvt.h | 1 +
drivers/gpu/drm/i915/gvt/kvmgt.c | 65 ++++++++++++++++++++------------
drivers/gpu/drm/i915/gvt/vgpu.c | 1 +
3 files changed, 43 insertions(+), 24 deletions(-)

diff --git a/drivers/gpu/drm/i915/gvt/gvt.h b/drivers/gpu/drm/i915/gvt/gvt.h
index dbf8d7470b2c..fbfd7eafec14 100644
--- a/drivers/gpu/drm/i915/gvt/gvt.h
+++ b/drivers/gpu/drm/i915/gvt/gvt.h
@@ -176,6 +176,7 @@ struct intel_vgpu {
struct vfio_device vfio_device;
struct intel_gvt *gvt;
struct mutex vgpu_lock;
+ struct mutex gfn_lock;
int id;
bool active;
bool attached;
diff --git a/drivers/gpu/drm/i915/gvt/kvmgt.c b/drivers/gpu/drm/i915/gvt/kvmgt.c
index ca9926061cd8..a4747e153dad 100644
--- a/drivers/gpu/drm/i915/gvt/kvmgt.c
+++ b/drivers/gpu/drm/i915/gvt/kvmgt.c
@@ -366,6 +366,8 @@ __kvmgt_protect_table_find(struct intel_vgpu *info, gfn_t gfn)
{
struct kvmgt_pgfn *p, *res = NULL;

+ lockdep_assert_held(&info->gfn_lock);
+
hash_for_each_possible(info->ptable, p, hnode, gfn) {
if (gfn == p->gfn) {
res = p;
@@ -388,6 +390,8 @@ static void kvmgt_protect_table_add(struct intel_vgpu *info, gfn_t gfn)
{
struct kvmgt_pgfn *p;

+ lockdep_assert_held(&info->gfn_lock);
+
if (kvmgt_gfn_is_write_protected(info, gfn))
return;

@@ -1563,60 +1567,68 @@ int intel_gvt_page_track_add(struct intel_vgpu *info, u64 gfn)
{
struct kvm *kvm = info->vfio_device.kvm;
struct kvm_memory_slot *slot;
- int idx;
+ int idx, ret = 0;

if (!info->attached)
return -ESRCH;

+ mutex_lock(&info->gfn_lock);
+
+ if (kvmgt_gfn_is_write_protected(info, gfn))
+ goto out;
+
idx = srcu_read_lock(&kvm->srcu);
slot = gfn_to_memslot(kvm, gfn);
if (!slot) {
srcu_read_unlock(&kvm->srcu, idx);
- return -EINVAL;
+ ret = -EINVAL;
+ goto out;
}

write_lock(&kvm->mmu_lock);
-
- if (kvmgt_gfn_is_write_protected(info, gfn))
- goto out;
-
kvm_slot_page_track_add_page(kvm, slot, gfn, KVM_PAGE_TRACK_WRITE);
+ write_unlock(&kvm->mmu_lock);
+
+ srcu_read_unlock(&kvm->srcu, idx);
+
kvmgt_protect_table_add(info, gfn);
-
out:
- write_unlock(&kvm->mmu_lock);
- srcu_read_unlock(&kvm->srcu, idx);
- return 0;
+ mutex_unlock(&info->gfn_lock);
+ return ret;
}

int intel_gvt_page_track_remove(struct intel_vgpu *info, u64 gfn)
{
struct kvm *kvm = info->vfio_device.kvm;
struct kvm_memory_slot *slot;
- int idx;
+ int idx, ret = 0;

if (!info->attached)
return 0;

- idx = srcu_read_lock(&kvm->srcu);
- slot = gfn_to_memslot(kvm, gfn);
- if (!slot) {
- srcu_read_unlock(&kvm->srcu, idx);
- return -EINVAL;
- }
-
- write_lock(&kvm->mmu_lock);
+ mutex_lock(&info->gfn_lock);

if (!kvmgt_gfn_is_write_protected(info, gfn))
goto out;

+ idx = srcu_read_lock(&kvm->srcu);
+ slot = gfn_to_memslot(kvm, gfn);
+ if (!slot) {
+ srcu_read_unlock(&kvm->srcu, idx);
+ ret = -EINVAL;
+ goto out;
+ }
+
+ write_lock(&kvm->mmu_lock);
kvm_slot_page_track_remove_page(kvm, slot, gfn, KVM_PAGE_TRACK_WRITE);
+ write_unlock(&kvm->mmu_lock);
+ srcu_read_unlock(&kvm->srcu, idx);
+
kvmgt_protect_table_del(info, gfn);

out:
- write_unlock(&kvm->mmu_lock);
- srcu_read_unlock(&kvm->srcu, idx);
- return 0;
+ mutex_unlock(&info->gfn_lock);
+ return ret;
}

static void kvmgt_page_track_write(struct kvm_vcpu *vcpu, gpa_t gpa,
@@ -1627,11 +1639,13 @@ static void kvmgt_page_track_write(struct kvm_vcpu *vcpu, gpa_t gpa,
container_of(node, struct intel_vgpu, track_node);

mutex_lock(&info->vgpu_lock);
+ mutex_lock(&info->gfn_lock);

if (kvmgt_gfn_is_write_protected(info, gpa_to_gfn(gpa)))
intel_vgpu_page_track_handler(info, gpa,
(void *)val, len);

+ mutex_unlock(&info->gfn_lock);
mutex_unlock(&info->vgpu_lock);
}

@@ -1644,16 +1658,19 @@ static void kvmgt_page_track_flush_slot(struct kvm *kvm,
struct intel_vgpu *info =
container_of(node, struct intel_vgpu, track_node);

- write_lock(&kvm->mmu_lock);
+ mutex_lock(&info->gfn_lock);
for (i = 0; i < slot->npages; i++) {
gfn = slot->base_gfn + i;
if (kvmgt_gfn_is_write_protected(info, gfn)) {
+ write_lock(&kvm->mmu_lock);
kvm_slot_page_track_remove_page(kvm, slot, gfn,
KVM_PAGE_TRACK_WRITE);
+ write_unlock(&kvm->mmu_lock);
+
kvmgt_protect_table_del(info, gfn);
}
}
- write_unlock(&kvm->mmu_lock);
+ mutex_unlock(&info->gfn_lock);
}

void intel_vgpu_detach_regions(struct intel_vgpu *vgpu)
diff --git a/drivers/gpu/drm/i915/gvt/vgpu.c b/drivers/gpu/drm/i915/gvt/vgpu.c
index 56c71474008a..f2479781b770 100644
--- a/drivers/gpu/drm/i915/gvt/vgpu.c
+++ b/drivers/gpu/drm/i915/gvt/vgpu.c
@@ -277,6 +277,7 @@ struct intel_vgpu *intel_gvt_create_idle_vgpu(struct intel_gvt *gvt)
vgpu->id = IDLE_VGPU_IDR;
vgpu->gvt = gvt;
mutex_init(&vgpu->vgpu_lock);
+ mutex_init(&vgpu->gfn_lock);

for (i = 0; i < I915_NUM_ENGINES; i++)
INIT_LIST_HEAD(&vgpu->submission.workload_q_head[i]);
--
2.39.0.314.g84b9a713c41-goog