[PATCH RFC 3/7] blk-mq: support to track active queues from blk_mq_tags

From: Yu Kuai
Date: Sun Jun 18 2023 - 04:09:34 EST


From: Yu Kuai <yukuai3@xxxxxxxxxx>

In order to refactor how tags is shared, it's necessary to acquire some
information for each disk/hctx, so that more tags can be assigned to the
one with higher pressure.

Prepare to refactor tag sharing.

Signed-off-by: Yu Kuai <yukuai3@xxxxxxxxxx>
---
block/blk-mq-tag.c | 13 +++++++++++++
include/linux/blk-mq.h | 2 ++
include/linux/blkdev.h | 5 +++++
3 files changed, 20 insertions(+)

diff --git a/block/blk-mq-tag.c b/block/blk-mq-tag.c
index 1c2bde917195..8c527e68d4e4 100644
--- a/block/blk-mq-tag.c
+++ b/block/blk-mq-tag.c
@@ -64,6 +64,7 @@ void __blk_mq_driver_tag_busy(struct blk_mq_hw_ctx *hctx)
void __blk_mq_tag_busy(struct blk_mq_hw_ctx *hctx)
{
struct blk_mq_tags *tags = hctx->tags;
+ struct tag_sharing *tag_sharing;

/*
* calling test_bit() prior to test_and_set_bit() is intentional,
@@ -75,13 +76,18 @@ void __blk_mq_tag_busy(struct blk_mq_hw_ctx *hctx)
if (test_bit(QUEUE_FLAG_HCTX_ACTIVE, &q->queue_flags) ||
test_and_set_bit(QUEUE_FLAG_HCTX_ACTIVE, &q->queue_flags))
return;
+
+ tag_sharing = &q->tag_sharing;
} else {
if (test_bit(BLK_MQ_S_TAG_ACTIVE, &hctx->state) ||
test_and_set_bit(BLK_MQ_S_TAG_ACTIVE, &hctx->state))
return;
+
+ tag_sharing = &hctx->tag_sharing;
}

spin_lock_irq(&tags->lock);
+ list_add(&tag_sharing->node, &tags->ctl.head);
WRITE_ONCE(tags->ctl.active_queues, tags->ctl.active_queues + 1);
spin_unlock_irq(&tags->lock);
}
@@ -111,6 +117,7 @@ static void __blk_mq_driver_tag_idle(struct blk_mq_hw_ctx *hctx)
void __blk_mq_tag_idle(struct blk_mq_hw_ctx *hctx)
{
struct blk_mq_tags *tags = hctx->tags;
+ struct tag_sharing *tag_sharing;

if (blk_mq_is_shared_tags(hctx->flags)) {
struct request_queue *q = hctx->queue;
@@ -118,12 +125,17 @@ void __blk_mq_tag_idle(struct blk_mq_hw_ctx *hctx)
if (!test_and_clear_bit(QUEUE_FLAG_HCTX_ACTIVE,
&q->queue_flags))
return;
+
+ tag_sharing = &q->tag_sharing;
} else {
if (!test_and_clear_bit(BLK_MQ_S_TAG_ACTIVE, &hctx->state))
return;
+
+ tag_sharing = &hctx->tag_sharing;
}

spin_lock_irq(&tags->lock);
+ list_del_init(&tag_sharing->node);
__blk_mq_driver_tag_idle(hctx);
WRITE_ONCE(tags->ctl.active_queues, tags->ctl.active_queues - 1);
WRITE_ONCE(tags->ctl.share_queues, tags->ctl.active_queues);
@@ -619,6 +631,7 @@ struct blk_mq_tags *blk_mq_init_tags(unsigned int total_tags,
tags->nr_tags = total_tags;
tags->nr_reserved_tags = reserved_tags;
spin_lock_init(&tags->lock);
+ INIT_LIST_HEAD(&tags->ctl.head);

if (blk_mq_init_bitmaps(&tags->bitmap_tags, &tags->breserved_tags,
total_tags, reserved_tags, node,
diff --git a/include/linux/blk-mq.h b/include/linux/blk-mq.h
index bc3ac22edb07..639d618e6ca8 100644
--- a/include/linux/blk-mq.h
+++ b/include/linux/blk-mq.h
@@ -390,6 +390,7 @@ struct blk_mq_hw_ctx {
* assigned when a request is dispatched from a hardware queue.
*/
struct blk_mq_tags *tags;
+ struct tag_sharing tag_sharing;
/**
* @sched_tags: Tags owned by I/O scheduler. If there is an I/O
* scheduler associated with a request queue, a tag is assigned when
@@ -737,6 +738,7 @@ struct request *blk_mq_alloc_request_hctx(struct request_queue *q,
struct tag_sharing_ctl {
unsigned int active_queues;
unsigned int share_queues;
+ struct list_head head;
};

/*
diff --git a/include/linux/blkdev.h b/include/linux/blkdev.h
index 0994707f6a68..62f8fcc20c30 100644
--- a/include/linux/blkdev.h
+++ b/include/linux/blkdev.h
@@ -375,6 +375,10 @@ struct blk_independent_access_ranges {
struct blk_independent_access_range ia_range[];
};

+struct tag_sharing {
+ struct list_head node;
+};
+
struct request_queue {
struct request *last_merge;
struct elevator_queue *elevator;
@@ -513,6 +517,7 @@ struct request_queue {

struct blk_mq_tag_set *tag_set;
struct list_head tag_set_list;
+ struct tag_sharing tag_sharing;

struct dentry *debugfs_dir;
struct dentry *sched_debugfs_dir;
--
2.39.2