[PATCH 5.13 247/351] io_uring: inline __tctx_task_work()

From: Greg Kroah-Hartman
Date: Mon Jul 19 2021 - 13:43:09 EST


From: Pavel Begunkov <asml.silence@xxxxxxxxx>

[ Upstream commit 3f18407dc6f2db0968daaa36c39a772c2c9f8ea7 ]

Inline __tctx_task_work() into tctx_task_work() in preparation for
further optimisations.

Signed-off-by: Pavel Begunkov <asml.silence@xxxxxxxxx>
Link: https://lore.kernel.org/r/f9c05c4bc9763af7bd8e25ebc3c5f7b6f69148f8.1623949695.git.asml.silence@xxxxxxxxx
Signed-off-by: Jens Axboe <axboe@xxxxxxxxx>
Signed-off-by: Sasha Levin <sashal@xxxxxxxxxx>
---
fs/io_uring.c | 67 ++++++++++++++++++++++++---------------------------
1 file changed, 31 insertions(+), 36 deletions(-)

diff --git a/fs/io_uring.c b/fs/io_uring.c
index ab1dcf69217f..262e6748c88a 100644
--- a/fs/io_uring.c
+++ b/fs/io_uring.c
@@ -1883,48 +1883,43 @@ static void ctx_flush_and_put(struct io_ring_ctx *ctx)
percpu_ref_put(&ctx->refs);
}

-static bool __tctx_task_work(struct io_uring_task *tctx)
-{
- struct io_ring_ctx *ctx = NULL;
- struct io_wq_work_list list;
- struct io_wq_work_node *node;
-
- if (wq_list_empty(&tctx->task_list))
- return false;
-
- spin_lock_irq(&tctx->task_lock);
- list = tctx->task_list;
- INIT_WQ_LIST(&tctx->task_list);
- spin_unlock_irq(&tctx->task_lock);
-
- node = list.first;
- while (node) {
- struct io_wq_work_node *next = node->next;
- struct io_kiocb *req;
-
- req = container_of(node, struct io_kiocb, io_task_work.node);
- if (req->ctx != ctx) {
- ctx_flush_and_put(ctx);
- ctx = req->ctx;
- percpu_ref_get(&ctx->refs);
- }
-
- req->task_work.func(&req->task_work);
- node = next;
- }
-
- ctx_flush_and_put(ctx);
- return list.first != NULL;
-}
-
static void tctx_task_work(struct callback_head *cb)
{
- struct io_uring_task *tctx = container_of(cb, struct io_uring_task, task_work);
+ struct io_uring_task *tctx = container_of(cb, struct io_uring_task,
+ task_work);

clear_bit(0, &tctx->task_state);

- while (__tctx_task_work(tctx))
+ while (!wq_list_empty(&tctx->task_list)) {
+ struct io_ring_ctx *ctx = NULL;
+ struct io_wq_work_list list;
+ struct io_wq_work_node *node;
+
+ spin_lock_irq(&tctx->task_lock);
+ list = tctx->task_list;
+ INIT_WQ_LIST(&tctx->task_list);
+ spin_unlock_irq(&tctx->task_lock);
+
+ node = list.first;
+ while (node) {
+ struct io_wq_work_node *next = node->next;
+ struct io_kiocb *req = container_of(node, struct io_kiocb,
+ io_task_work.node);
+
+ if (req->ctx != ctx) {
+ ctx_flush_and_put(ctx);
+ ctx = req->ctx;
+ percpu_ref_get(&ctx->refs);
+ }
+ req->task_work.func(&req->task_work);
+ node = next;
+ }
+
+ ctx_flush_and_put(ctx);
+ if (!list.first)
+ break;
cond_resched();
+ }
}

static int io_req_task_work_add(struct io_kiocb *req)
--
2.30.2