[PATCH] block: loop: avoiding too many pending per work I/O

From: Ming Lei
Date: Sun Apr 26 2015 - 05:53:56 EST


If there are too many pending per work I/O, too many
high priority work thread can be generated so that
system performance can be effected.

This patch limits the max pending per work I/O as 32,
and will degrage to single queue mode when the max number
is reached.

This patch fixes Fedora 22 live booting performance
regression when it is booted from squashfs over dm
based on loop.

Signed-off-by: Ming Lei <ming.lei@xxxxxxxxxxxxx>
---
drivers/block/loop.c | 21 ++++++++++++++++++---
drivers/block/loop.h | 2 ++
2 files changed, 20 insertions(+), 3 deletions(-)

diff --git a/drivers/block/loop.c b/drivers/block/loop.c
index c6b3726..55bd04f 100644
--- a/drivers/block/loop.c
+++ b/drivers/block/loop.c
@@ -1448,13 +1448,24 @@ static int loop_queue_rq(struct blk_mq_hw_ctx *hctx,
const struct blk_mq_queue_data *bd)
{
struct loop_cmd *cmd = blk_mq_rq_to_pdu(bd->rq);
+ struct loop_device *lo = cmd->rq->q->queuedata;
+ bool single_queue = !!(cmd->rq->cmd_flags & REQ_WRITE);
+
+ /*
+ * Degrade to single queue mode if the pending per work
+ * I/O number reaches 16, otherwise too many high priority
+ * worker threads may effect system performance as reported
+ * in fedora live booting from squashfs over loop.
+ */
+ if (atomic_read(&lo->pending_per_work_io) >= 16)
+ single_queue = true;

blk_mq_start_request(bd->rq);

- if (cmd->rq->cmd_flags & REQ_WRITE) {
- struct loop_device *lo = cmd->rq->q->queuedata;
+ if (single_queue) {
bool need_sched = true;

+ cmd->per_work_io = false;
spin_lock_irq(&lo->lo_lock);
if (lo->write_started)
need_sched = false;
@@ -1466,6 +1477,8 @@ static int loop_queue_rq(struct blk_mq_hw_ctx *hctx,
if (need_sched)
queue_work(loop_wq, &lo->write_work);
} else {
+ cmd->per_work_io = true;
+ atomic_inc(&lo->pending_per_work_io);
queue_work(loop_wq, &cmd->read_work);
}

@@ -1490,6 +1503,8 @@ static void loop_handle_cmd(struct loop_cmd *cmd)
if (ret)
cmd->rq->errors = -EIO;
blk_mq_complete_request(cmd->rq);
+ if (cmd->per_work_io)
+ atomic_dec(&lo->pending_per_work_io);
}

static void loop_queue_write_work(struct work_struct *work)
@@ -1831,7 +1846,7 @@ static int __init loop_init(void)
}

loop_wq = alloc_workqueue("kloopd",
- WQ_MEM_RECLAIM | WQ_HIGHPRI | WQ_UNBOUND, 0);
+ WQ_MEM_RECLAIM | WQ_HIGHPRI, 0);
if (!loop_wq) {
err = -ENOMEM;
goto misc_out;
diff --git a/drivers/block/loop.h b/drivers/block/loop.h
index ffb6dd6..06d8f1a 100644
--- a/drivers/block/loop.h
+++ b/drivers/block/loop.h
@@ -57,6 +57,7 @@ struct loop_device {
struct list_head write_cmd_head;
struct work_struct write_work;
bool write_started;
+ atomic_t pending_per_work_io;
int lo_state;
struct mutex lo_ctl_mutex;

@@ -68,6 +69,7 @@ struct loop_device {
struct loop_cmd {
struct work_struct read_work;
struct request *rq;
+ bool per_work_io;
struct list_head list;
};

--
1.9.1


Thanks,
Ming Lei

--
To unsubscribe from this list: send the line "unsubscribe linux-kernel" in
the body of a message to majordomo@xxxxxxxxxxxxxxx
More majordomo info at http://vger.kernel.org/majordomo-info.html
Please read the FAQ at http://www.tux.org/lkml/