From: John Garry john.garry@huawei.com
mainline inclusion from mainline-v5.16-rc1 commit 645db34e50501aac141713fb47a315e5202ff890 category: performance bugzilla: 186917, https://gitee.com/openeuler/kernel/issues/I5N1S5 CVE: NA
Reference: https://git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git/commit/?i...
--------------------------------
Refactor blk_mq_free_map_and_requests() such that it can be used at many sites at which the tag map and rqs are freed.
Also rename to blk_mq_free_map_and_rqs(), which is shorter and matches the alloc equivalent.
Suggested-by: Ming Lei ming.lei@redhat.com Signed-off-by: John Garry john.garry@huawei.com Reviewed-by: Hannes Reinecke hare@suse.de Link: https://lore.kernel.org/r/1633429419-228500-12-git-send-email-john.garry@hua... Signed-off-by: Jens Axboe axboe@kernel.dk
Conflict: commit a846a8e6c9a5 ("blk-mq: don't free tags if the tag_set is used by other device in queue initialztion") is already backported, blk_mq_free_map_and_rqs() is moved to __blk_mq_update_nr_hw_queues() instead of blk_mq_realloc_hw_ctxs(). Signed-off-by: Yu Kuai yukuai3@huawei.com Reviewed-by: Yu Kuai yukuai3@huawei.com Reviewed-by: Jason Yan yanaijie@huawei.com Signed-off-by: Zheng Zengkai zhengzengkai@huawei.com --- block/blk-mq-tag.c | 3 +-- block/blk-mq.c | 38 +++++++++++++++++++++++--------------- block/blk-mq.h | 4 +++- 3 files changed, 27 insertions(+), 18 deletions(-)
diff --git a/block/blk-mq-tag.c b/block/blk-mq-tag.c index c986251cf10c..7b0c09ac1de9 100644 --- a/block/blk-mq-tag.c +++ b/block/blk-mq-tag.c @@ -674,8 +674,7 @@ int blk_mq_tag_update_depth(struct blk_mq_hw_ctx *hctx, if (!new) return -ENOMEM;
- blk_mq_free_rqs(set, *tagsptr, hctx->queue_num); - blk_mq_free_rq_map(*tagsptr, set->flags); + blk_mq_free_map_and_rqs(set, *tagsptr, hctx->queue_num); *tagsptr = new; } else { /* diff --git a/block/blk-mq.c b/block/blk-mq.c index 2d388a7c76a8..d7fe8c5c5369 100644 --- a/block/blk-mq.c +++ b/block/blk-mq.c @@ -3016,15 +3016,15 @@ static bool __blk_mq_alloc_map_and_rqs(struct blk_mq_tag_set *set, return set->tags[hctx_idx]; }
-static void blk_mq_free_map_and_requests(struct blk_mq_tag_set *set, - unsigned int hctx_idx) +void blk_mq_free_map_and_rqs(struct blk_mq_tag_set *set, + struct blk_mq_tags *tags, + unsigned int hctx_idx) { unsigned int flags = set->flags;
- if (set->tags && set->tags[hctx_idx]) { - blk_mq_free_rqs(set, set->tags[hctx_idx], hctx_idx); - blk_mq_free_rq_map(set->tags[hctx_idx], flags); - set->tags[hctx_idx] = NULL; + if (tags) { + blk_mq_free_rqs(set, tags, hctx_idx); + blk_mq_free_rq_map(tags, flags); } }
@@ -3105,8 +3105,10 @@ static void blk_mq_map_swqueue(struct request_queue *q) * fallback in case of a new remap fails * allocation */ - if (i && set->tags[i]) - blk_mq_free_map_and_requests(set, i); + if (i && set->tags[i]) { + blk_mq_free_map_and_rqs(set, set->tags[i], i); + set->tags[i] = NULL; + }
hctx->tags = NULL; continue; @@ -3533,8 +3535,10 @@ static int __blk_mq_alloc_rq_maps(struct blk_mq_tag_set *set) return 0;
out_unwind: - while (--i >= 0) - blk_mq_free_map_and_requests(set, i); + while (--i >= 0) { + blk_mq_free_map_and_rqs(set, set->tags[i], i); + set->tags[i] = NULL; + }
return -ENOMEM; } @@ -3724,8 +3728,10 @@ int blk_mq_alloc_tag_set(struct blk_mq_tag_set *set) return 0;
out_free_mq_rq_maps: - for (i = 0; i < set->nr_hw_queues; i++) - blk_mq_free_map_and_requests(set, i); + for (i = 0; i < set->nr_hw_queues; i++) { + blk_mq_free_map_and_rqs(set, set->tags[i], i); + set->tags[i] = NULL; + } out_free_mq_map: for (i = 0; i < set->nr_maps; i++) { kfree(set->map[i].mq_map); @@ -3741,8 +3747,10 @@ void blk_mq_free_tag_set(struct blk_mq_tag_set *set) { int i, j;
- for (i = 0; i < set->nr_hw_queues; i++) - blk_mq_free_map_and_requests(set, i); + for (i = 0; i < set->nr_hw_queues; i++) { + blk_mq_free_map_and_rqs(set, set->tags[i], i); + set->tags[i] = NULL; + }
if (blk_mq_is_sbitmap_shared(set->flags)) blk_mq_exit_shared_sbitmap(set); @@ -3932,7 +3940,7 @@ static void __blk_mq_update_nr_hw_queues(struct blk_mq_tag_set *set, pr_warn("Increasing nr_hw_queues to %d fails, fallback to %d\n", nr_hw_queues, prev_nr_hw_queues); for (; i < set->nr_hw_queues; i++) - blk_mq_free_map_and_requests(set, i); + blk_mq_free_map_and_rqs(set, set->tags[i], i);
set->nr_hw_queues = prev_nr_hw_queues; blk_mq_map_queues(&set->map[HCTX_TYPE_DEFAULT]); diff --git a/block/blk-mq.h b/block/blk-mq.h index f4021afa443a..15ce74c266a6 100644 --- a/block/blk-mq.h +++ b/block/blk-mq.h @@ -57,7 +57,9 @@ void blk_mq_free_rqs(struct blk_mq_tag_set *set, struct blk_mq_tags *tags, void blk_mq_free_rq_map(struct blk_mq_tags *tags, unsigned int flags); struct blk_mq_tags *blk_mq_alloc_map_and_rqs(struct blk_mq_tag_set *set, unsigned int hctx_idx, unsigned int depth); - +void blk_mq_free_map_and_rqs(struct blk_mq_tag_set *set, + struct blk_mq_tags *tags, + unsigned int hctx_idx); /* * Internal helpers for request insertion into sw queues */