[PATCH 05/15] drm/msm: Split out idr_lock

From: Rob Clark
Date: Sat Jun 25 2022 - 18:55:18 EST


From: Rob Clark <robdclark@xxxxxxxxxxxx>

Otherwise if we hit reclaim pinning objects in the submit path, we'll be
blocking retire_worker trying to free a submit.

Signed-off-by: Rob Clark <robdclark@xxxxxxxxxxxx>
---
drivers/gpu/drm/msm/msm_drv.c | 4 ++--
drivers/gpu/drm/msm/msm_gem_submit.c | 10 ++++++++--
drivers/gpu/drm/msm/msm_gpu.h | 4 +++-
drivers/gpu/drm/msm/msm_submitqueue.c | 1 +
4 files changed, 14 insertions(+), 5 deletions(-)

diff --git a/drivers/gpu/drm/msm/msm_drv.c b/drivers/gpu/drm/msm/msm_drv.c
index acc940d32ab4..ace91ead2caf 100644
--- a/drivers/gpu/drm/msm/msm_drv.c
+++ b/drivers/gpu/drm/msm/msm_drv.c
@@ -838,13 +838,13 @@ static int wait_fence(struct msm_gpu_submitqueue *queue, uint32_t fence_id,
* retired, so if the fence is not found it means there is nothing
* to wait for
*/
- ret = mutex_lock_interruptible(&queue->lock);
+ ret = mutex_lock_interruptible(&queue->idr_lock);
if (ret)
return ret;
fence = idr_find(&queue->fence_idr, fence_id);
if (fence)
fence = dma_fence_get_rcu(fence);
- mutex_unlock(&queue->lock);
+ mutex_unlock(&queue->idr_lock);

if (!fence)
return 0;
diff --git a/drivers/gpu/drm/msm/msm_gem_submit.c b/drivers/gpu/drm/msm/msm_gem_submit.c
index c7819781879c..16c662808522 100644
--- a/drivers/gpu/drm/msm/msm_gem_submit.c
+++ b/drivers/gpu/drm/msm/msm_gem_submit.c
@@ -72,9 +72,9 @@ void __msm_gem_submit_destroy(struct kref *kref)
unsigned i;

if (submit->fence_id) {
- mutex_lock(&submit->queue->lock);
+ mutex_lock(&submit->queue->idr_lock);
idr_remove(&submit->queue->fence_idr, submit->fence_id);
- mutex_unlock(&submit->queue->lock);
+ mutex_unlock(&submit->queue->idr_lock);
}

dma_fence_put(submit->user_fence);
@@ -881,6 +881,8 @@ int msm_ioctl_gem_submit(struct drm_device *dev, void *data,

submit->nr_cmds = i;

+ mutex_lock(&queue->idr_lock);
+
/*
* If using userspace provided seqno fence, validate that the id
* is available before arming sched job. Since access to fence_idr
@@ -889,6 +891,7 @@ int msm_ioctl_gem_submit(struct drm_device *dev, void *data,
*/
if ((args->flags & MSM_SUBMIT_FENCE_SN_IN) &&
idr_find(&queue->fence_idr, args->fence)) {
+ mutex_unlock(&queue->idr_lock);
ret = -EINVAL;
goto out;
}
@@ -921,6 +924,9 @@ int msm_ioctl_gem_submit(struct drm_device *dev, void *data,
submit->user_fence, 1,
INT_MAX, GFP_KERNEL);
}
+
+ mutex_unlock(&queue->idr_lock);
+
if (submit->fence_id < 0) {
ret = submit->fence_id;
submit->fence_id = 0;
diff --git a/drivers/gpu/drm/msm/msm_gpu.h b/drivers/gpu/drm/msm/msm_gpu.h
index 4911943ba53b..4ca56d96344a 100644
--- a/drivers/gpu/drm/msm/msm_gpu.h
+++ b/drivers/gpu/drm/msm/msm_gpu.h
@@ -457,7 +457,8 @@ static inline int msm_gpu_convert_priority(struct msm_gpu *gpu, int prio,
* @node: node in the context's list of submitqueues
* @fence_idr: maps fence-id to dma_fence for userspace visible fence
* seqno, protected by submitqueue lock
- * @lock: submitqueue lock
+ * @idr_lock: for serializing access to fence_idr
+ * @lock: submitqueue lock for serializing submits on a queue
* @ref: reference count
* @entity: the submit job-queue
*/
@@ -470,6 +471,7 @@ struct msm_gpu_submitqueue {
struct msm_file_private *ctx;
struct list_head node;
struct idr fence_idr;
+ struct mutex idr_lock;
struct mutex lock;
struct kref ref;
struct drm_sched_entity *entity;
diff --git a/drivers/gpu/drm/msm/msm_submitqueue.c b/drivers/gpu/drm/msm/msm_submitqueue.c
index f486a3cd4e55..c6929e205b51 100644
--- a/drivers/gpu/drm/msm/msm_submitqueue.c
+++ b/drivers/gpu/drm/msm/msm_submitqueue.c
@@ -200,6 +200,7 @@ int msm_submitqueue_create(struct drm_device *drm, struct msm_file_private *ctx,
*id = queue->id;

idr_init(&queue->fence_idr);
+ mutex_init(&queue->idr_lock);
mutex_init(&queue->lock);

list_add_tail(&queue->node, &ctx->submitqueues);
--
2.36.1