[PATCH 5.8 064/118] drm/i915/gem: Reduce context termination list iteration guard to RCU

From: Greg Kroah-Hartman
Date: Mon Sep 21 2020 - 12:46:34 EST


From: Chris Wilson <chris@xxxxxxxxxxxxxxxxxx>

[ Upstream commit c2314b8bd4c009793b6f9d57bc8363af034e02ca ]

As we now protect the timeline list using RCU, we can drop the
timeline->mutex for guarding the list iteration during context close, as
we are searching for an inflight request. Any new request will see the
context is banned and not be submitted. In doing so, pull the checks for
a concurrent submission of the request (notably the
i915_request_completed()) under the engine spinlock, to fully serialise
with __i915_request_submit()). That is in the case of preempt-to-busy
where the request may be completed during the __i915_request_submit(),
we need to be careful that we sample the request status after
serialising so that we don't miss the request the engine is actually
submitting.

Fixes: 4a3174152147 ("drm/i915/gem: Refine occupancy test in kill_context()")
Signed-off-by: Chris Wilson <chris@xxxxxxxxxxxxxxxxxx>
Reviewed-by: Tvrtko Ursulin <tvrtko.ursulin@xxxxxxxxx>
Link: https://patchwork.freedesktop.org/patch/msgid/20200806105954.7766-1-chris@xxxxxxxxxxxxxxxxxx
Signed-off-by: Rodrigo Vivi <rodrigo.vivi@xxxxxxxxx>
Signed-off-by: Joonas Lahtinen <joonas.lahtinen@xxxxxxxxxxxxxxx>
(cherry picked from commit 736e785f9b28cd9ef2d16a80960a04fd00e64b22)
Signed-off-by: Jani Nikula <jani.nikula@xxxxxxxxx>
Signed-off-by: Sasha Levin <sashal@xxxxxxxxxx>
---
drivers/gpu/drm/i915/gem/i915_gem_context.c | 32 ++++++++++++---------
1 file changed, 19 insertions(+), 13 deletions(-)

diff --git a/drivers/gpu/drm/i915/gem/i915_gem_context.c b/drivers/gpu/drm/i915/gem/i915_gem_context.c
index 30c229fcb4046..6dce7d8b463eb 100644
--- a/drivers/gpu/drm/i915/gem/i915_gem_context.c
+++ b/drivers/gpu/drm/i915/gem/i915_gem_context.c
@@ -440,29 +440,36 @@ static bool __cancel_engine(struct intel_engine_cs *engine)
return __reset_engine(engine);
}

-static struct intel_engine_cs *__active_engine(struct i915_request *rq)
+static bool
+__active_engine(struct i915_request *rq, struct intel_engine_cs **active)
{
struct intel_engine_cs *engine, *locked;
+ bool ret = false;

/*
* Serialise with __i915_request_submit() so that it sees
* is-banned?, or we know the request is already inflight.
+ *
+ * Note that rq->engine is unstable, and so we double
+ * check that we have acquired the lock on the final engine.
*/
locked = READ_ONCE(rq->engine);
spin_lock_irq(&locked->active.lock);
while (unlikely(locked != (engine = READ_ONCE(rq->engine)))) {
spin_unlock(&locked->active.lock);
- spin_lock(&engine->active.lock);
locked = engine;
+ spin_lock(&locked->active.lock);
}

- engine = NULL;
- if (i915_request_is_active(rq) && rq->fence.error != -EIO)
- engine = rq->engine;
+ if (!i915_request_completed(rq)) {
+ if (i915_request_is_active(rq) && rq->fence.error != -EIO)
+ *active = locked;
+ ret = true;
+ }

spin_unlock_irq(&locked->active.lock);

- return engine;
+ return ret;
}

static struct intel_engine_cs *active_engine(struct intel_context *ce)
@@ -473,17 +480,16 @@ static struct intel_engine_cs *active_engine(struct intel_context *ce)
if (!ce->timeline)
return NULL;

- mutex_lock(&ce->timeline->mutex);
- list_for_each_entry_reverse(rq, &ce->timeline->requests, link) {
- if (i915_request_completed(rq))
- break;
+ rcu_read_lock();
+ list_for_each_entry_rcu(rq, &ce->timeline->requests, link) {
+ if (i915_request_is_active(rq) && i915_request_completed(rq))
+ continue;

/* Check with the backend if the request is inflight */
- engine = __active_engine(rq);
- if (engine)
+ if (__active_engine(rq, &engine))
break;
}
- mutex_unlock(&ce->timeline->mutex);
+ rcu_read_unlock();

return engine;
}
--
2.25.1