hulk inclusion category: feature bugzilla: https://gitee.com/openeuler/kernel/issues/I8RGGC CVE: NA
-----------------------------------------------
The fair sharing algorithm has a negative performance impact for storage devices for which the full queue depth is required to reach peak performance, e.g. UFS devices. This is because it takes long after a request queue became inactive until tags are reassigned to the active request queue(s). Since making tag sharing fair is not needed if the request processing latency is similar for all request queues, introduce a function for configuring fair tag sharing.
Originally-from: Bart Van Assche bvanassche@acm.org Link: https://lore.kernel.org/all/20231130193139.880955-2-bvanassche@acm.org/ Signed-off-by: Yu Kuai yukuai3@huawei.com --- block/blk-mq-debugfs.c | 1 + block/blk-mq.c | 33 +++++++++++++++++++++++++++++++++ block/blk-mq.h | 3 ++- include/linux/blk-mq.h | 3 +++ 4 files changed, 39 insertions(+), 1 deletion(-)
diff --git a/block/blk-mq-debugfs.c b/block/blk-mq-debugfs.c index c3b5930106b2..aa7a1357c3e1 100644 --- a/block/blk-mq-debugfs.c +++ b/block/blk-mq-debugfs.c @@ -198,6 +198,7 @@ static const char *const hctx_flag_name[] = { HCTX_FLAG_NAME(NO_SCHED), HCTX_FLAG_NAME(STACKING), HCTX_FLAG_NAME(TAG_HCTX_SHARED), + HCTX_FLAG_NAME(DISABLE_FAIR_TAG_SHARING), }; #undef HCTX_FLAG_NAME
diff --git a/block/blk-mq.c b/block/blk-mq.c index 6ab7f360ff2a..97ecabc0d107 100644 --- a/block/blk-mq.c +++ b/block/blk-mq.c @@ -3935,6 +3935,35 @@ static void blk_mq_map_swqueue(struct request_queue *q) } }
+static void queue_update_fair_tag_sharing(struct request_queue *q) +{ + struct blk_mq_hw_ctx *hctx; + unsigned long i; + bool disabled = q->tag_set->disable_fair_tag_sharing; + + queue_for_each_hw_ctx(q, hctx, i) { + if (disabled) + hctx->flags |= BLK_MQ_F_DISABLE_FAIR_TAG_SHARING; + else + hctx->flags &= ~BLK_MQ_F_DISABLE_FAIR_TAG_SHARING; + } + +} + +void blk_mq_update_fair_tag_sharing(struct blk_mq_tag_set *set) +{ + struct request_queue *q; + + lockdep_assert_held(&set->tag_list_lock); + + list_for_each_entry(q, &set->tag_list, tag_set_list) { + blk_mq_freeze_queue(q); + queue_update_fair_tag_sharing(q); + blk_mq_unfreeze_queue(q); + } +} +EXPORT_SYMBOL_GPL(blk_mq_update_fair_tag_sharing); + /* * Caller needs to ensure that we're either frozen/quiesced, or that * the queue isn't live yet. @@ -3989,6 +4018,7 @@ static void blk_mq_add_queue_tag_set(struct blk_mq_tag_set *set, { mutex_lock(&set->tag_list_lock);
+ queue_update_fair_tag_sharing(q); /* * Check to see if we're transitioning to shared (from 1 to 2 queues). */ @@ -4767,6 +4797,9 @@ static void __blk_mq_update_nr_hw_queues(struct blk_mq_tag_set *set, blk_mq_map_swqueue(q); }
+ list_for_each_entry(q, &set->tag_list, tag_set_list) + queue_update_fair_tag_sharing(q); + reregister: list_for_each_entry(q, &set->tag_list, tag_set_list) { blk_mq_sysfs_register_hctxs(q); diff --git a/block/blk-mq.h b/block/blk-mq.h index 1743857e0b01..8b9aac701035 100644 --- a/block/blk-mq.h +++ b/block/blk-mq.h @@ -393,7 +393,8 @@ static inline bool hctx_may_queue(struct blk_mq_hw_ctx *hctx, { unsigned int depth, users;
- if (!hctx || !(hctx->flags & BLK_MQ_F_TAG_QUEUE_SHARED)) + if (!hctx || !(hctx->flags & BLK_MQ_F_TAG_QUEUE_SHARED) || + (hctx->flags & BLK_MQ_F_DISABLE_FAIR_TAG_SHARING)) return true;
/* diff --git a/include/linux/blk-mq.h b/include/linux/blk-mq.h index 958ed7e89b30..65e95a2c4718 100644 --- a/include/linux/blk-mq.h +++ b/include/linux/blk-mq.h @@ -506,6 +506,7 @@ struct blk_mq_tag_set { int numa_node; unsigned int timeout; unsigned int flags; + bool disable_fair_tag_sharing; void *driver_data;
struct blk_mq_tags **tags; @@ -656,6 +657,7 @@ enum { */ BLK_MQ_F_STACKING = 1 << 2, BLK_MQ_F_TAG_HCTX_SHARED = 1 << 3, + BLK_MQ_F_DISABLE_FAIR_TAG_SHARING = 1 << 4, BLK_MQ_F_BLOCKING = 1 << 5, /* Do not allow an I/O scheduler to be configured. */ BLK_MQ_F_NO_SCHED = 1 << 6, @@ -998,6 +1000,7 @@ int blk_rq_append_bio(struct request *rq, struct bio *bio); void blk_execute_rq_nowait(struct request *rq, bool at_head); blk_status_t blk_execute_rq(struct request *rq, bool at_head); bool blk_rq_is_poll(struct request *rq); +void blk_mq_update_fair_tag_sharing(struct blk_mq_tag_set *set);
struct req_iterator { struct bvec_iter iter;