hulk inclusion category: feature bugzilla: https://gitee.com/openeuler/kernel/issues/I8RGGC CVE: NA
-----------------------------------------------
The fair sharing algorithm has a negative performance impact for storage devices for which the full queue depth is required to reach peak performance, e.g. UFS devices. This is because it takes long after a request queue became inactive until tags are reassigned to the active request queue(s). Since making tag sharing fair is not needed if the request processing latency is similar for all request queues, introduce a function for configuring fair tag sharing.
Originally-from: Bart Van Assche bvanassche@acm.org Link: https://lore.kernel.org/all/20231130193139.880955-2-bvanassche@acm.org/ Signed-off-by: Yu Kuai yukuai3@huawei.com --- block/blk-mq-debugfs.c | 1 + block/blk-mq.c | 38 +++++++++++++++++++++++++++++++++++++- block/blk-mq.h | 3 ++- include/linux/blk-mq.h | 2 ++ 4 files changed, 42 insertions(+), 2 deletions(-)
diff --git a/block/blk-mq-debugfs.c b/block/blk-mq-debugfs.c index c3b5930106b2..aa7a1357c3e1 100644 --- a/block/blk-mq-debugfs.c +++ b/block/blk-mq-debugfs.c @@ -198,6 +198,7 @@ static const char *const hctx_flag_name[] = { HCTX_FLAG_NAME(NO_SCHED), HCTX_FLAG_NAME(STACKING), HCTX_FLAG_NAME(TAG_HCTX_SHARED), + HCTX_FLAG_NAME(DISABLE_FAIR_TAG_SHARING), }; #undef HCTX_FLAG_NAME
diff --git a/block/blk-mq.c b/block/blk-mq.c index 6ab7f360ff2a..dd7c9e3eca1b 100644 --- a/block/blk-mq.c +++ b/block/blk-mq.c @@ -3706,7 +3706,8 @@ blk_mq_alloc_hctx(struct request_queue *q, struct blk_mq_tag_set *set, spin_lock_init(&hctx->lock); INIT_LIST_HEAD(&hctx->dispatch); hctx->queue = q; - hctx->flags = set->flags & ~BLK_MQ_F_TAG_QUEUE_SHARED; + hctx->flags = set->flags & ~(BLK_MQ_F_TAG_QUEUE_SHARED | + BLK_MQ_F_DISABLE_FAIR_TAG_SHARING);
INIT_LIST_HEAD(&hctx->hctx_list);
@@ -3935,6 +3936,37 @@ static void blk_mq_map_swqueue(struct request_queue *q) } }
+static void queue_update_fair_tag_sharing(struct request_queue *q) +{ + struct blk_mq_hw_ctx *hctx; + unsigned long i; + bool disabled = q->tag_set->flags & BLK_MQ_F_DISABLE_FAIR_TAG_SHARING; + + lockdep_assert_held(&q->tag_set->tag_list_lock); + + queue_for_each_hw_ctx(q, hctx, i) { + if (disabled) + hctx->flags |= BLK_MQ_F_DISABLE_FAIR_TAG_SHARING; + else + hctx->flags &= ~BLK_MQ_F_DISABLE_FAIR_TAG_SHARING; + } + +} + +void blk_mq_update_fair_tag_sharing(struct blk_mq_tag_set *set) +{ + struct request_queue *q; + + lockdep_assert_held(&set->tag_list_lock); + + list_for_each_entry(q, &set->tag_list, tag_set_list) { + blk_mq_freeze_queue(q); + queue_update_fair_tag_sharing(q); + blk_mq_unfreeze_queue(q); + } +} +EXPORT_SYMBOL_GPL(blk_mq_update_fair_tag_sharing); + /* * Caller needs to ensure that we're either frozen/quiesced, or that * the queue isn't live yet. @@ -3989,6 +4021,7 @@ static void blk_mq_add_queue_tag_set(struct blk_mq_tag_set *set, { mutex_lock(&set->tag_list_lock);
+ queue_update_fair_tag_sharing(q); /* * Check to see if we're transitioning to shared (from 1 to 2 queues). */ @@ -4767,6 +4800,9 @@ static void __blk_mq_update_nr_hw_queues(struct blk_mq_tag_set *set, blk_mq_map_swqueue(q); }
+ list_for_each_entry(q, &set->tag_list, tag_set_list) + queue_update_fair_tag_sharing(q); + reregister: list_for_each_entry(q, &set->tag_list, tag_set_list) { blk_mq_sysfs_register_hctxs(q); diff --git a/block/blk-mq.h b/block/blk-mq.h index 1743857e0b01..8b9aac701035 100644 --- a/block/blk-mq.h +++ b/block/blk-mq.h @@ -393,7 +393,8 @@ static inline bool hctx_may_queue(struct blk_mq_hw_ctx *hctx, { unsigned int depth, users;
- if (!hctx || !(hctx->flags & BLK_MQ_F_TAG_QUEUE_SHARED)) + if (!hctx || !(hctx->flags & BLK_MQ_F_TAG_QUEUE_SHARED) || + (hctx->flags & BLK_MQ_F_DISABLE_FAIR_TAG_SHARING)) return true;
/* diff --git a/include/linux/blk-mq.h b/include/linux/blk-mq.h index 958ed7e89b30..3d74f3e5b995 100644 --- a/include/linux/blk-mq.h +++ b/include/linux/blk-mq.h @@ -656,6 +656,7 @@ enum { */ BLK_MQ_F_STACKING = 1 << 2, BLK_MQ_F_TAG_HCTX_SHARED = 1 << 3, + BLK_MQ_F_DISABLE_FAIR_TAG_SHARING = 1 << 4, BLK_MQ_F_BLOCKING = 1 << 5, /* Do not allow an I/O scheduler to be configured. */ BLK_MQ_F_NO_SCHED = 1 << 6, @@ -998,6 +999,7 @@ int blk_rq_append_bio(struct request *rq, struct bio *bio); void blk_execute_rq_nowait(struct request *rq, bool at_head); blk_status_t blk_execute_rq(struct request *rq, bool at_head); bool blk_rq_is_poll(struct request *rq); +void blk_mq_update_fair_tag_sharing(struct blk_mq_tag_set *set);
struct req_iterator { struct bvec_iter iter;