From: Yu Kuai yukuai3@huawei.com
hulk inclusion category: bugfix bugzilla: 186143, https://gitee.com/openeuler/kernel/issues/I4WC06 CVE: NA
-----------------------------------------------
If precise_iostat is enabled, inflight will be recorded and cleared by atomic operations. However, for dm multipath, inflight will be recorded by dm_requeue_original_request(), and if some error happened, multipath_release_clone() won't clear inflight, which will cause inflight to leak. Furthermore, %util will always be 100 in iostat.
Fix the problem by calling __blk_mq_end_request() in multipath_release_clone() instead.
Signed-off-by: Yu Kuai yukuai3@huawei.com Signed-off-by: Zhang Wensheng zhangwensheng5@huawei.com Reviewed-by: Jason Yan yanaijie@huawei.com Signed-off-by: Yang Yingliang yangyingliang@huawei.com --- block/blk-core.c | 22 ++++++++++++++++++++-- block/blk.h | 9 +++++++++ drivers/md/dm-rq.c | 2 +- include/linux/blkdev.h | 2 ++ 4 files changed, 32 insertions(+), 3 deletions(-)
diff --git a/block/blk-core.c b/block/blk-core.c index df733e8caa6a1..a5d80ab911707 100644 --- a/block/blk-core.c +++ b/block/blk-core.c @@ -2669,8 +2669,10 @@ static blk_status_t blk_cloned_rq_check_limits(struct request_queue *q, * blk_insert_cloned_request - Helper for stacking drivers to submit a request * @q: the queue to submit the request * @rq: the request being queued + * @precise: true if io account with start and done will be balanced */ -blk_status_t blk_insert_cloned_request(struct request_queue *q, struct request *rq) +blk_status_t __blk_insert_cloned_request(struct request_queue *q, + struct request *rq, bool precise) { unsigned long flags; int where = ELEVATOR_INSERT_BACK; @@ -2693,7 +2695,16 @@ blk_status_t blk_insert_cloned_request(struct request_queue *q, struct request * * bypass a potential scheduler on the bottom device for * insert. */ - return blk_mq_request_issue_directly(rq); + ret = blk_mq_request_issue_directly(rq); + if (ret && precise) { + u64 now = 0; + + if (blk_mq_need_time_stamp(rq)) + now = ktime_get_ns(); + + blk_account_io_done(rq, now); + } + return ret; }
spin_lock_irqsave(q->queue_lock, flags); @@ -2718,6 +2729,13 @@ blk_status_t blk_insert_cloned_request(struct request_queue *q, struct request *
return BLK_STS_OK; } +EXPORT_SYMBOL_GPL(__blk_insert_cloned_request); + +blk_status_t blk_insert_cloned_request(struct request_queue *q, + struct request *rq) +{ + return __blk_insert_cloned_request(q, rq, false); +} EXPORT_SYMBOL_GPL(blk_insert_cloned_request);
/** diff --git a/block/blk.h b/block/blk.h index e496e26630f71..dde2141a32dde 100644 --- a/block/blk.h +++ b/block/blk.h @@ -405,6 +405,15 @@ static inline struct io_context *create_io_context(gfp_t gfp_mask, int node) return current->io_context; }
+/* + * Only need start/end time stamping if we have stats enabled, or using + * an IO scheduler. + */ +static inline bool blk_mq_need_time_stamp(struct request *rq) +{ + return (rq->rq_flags & RQF_IO_STAT) || rq->q->elevator; +} + /* * Internal throttling interface */ diff --git a/drivers/md/dm-rq.c b/drivers/md/dm-rq.c index 80683f2b723d5..3bd805f7ce85b 100644 --- a/drivers/md/dm-rq.c +++ b/drivers/md/dm-rq.c @@ -410,7 +410,7 @@ static blk_status_t dm_dispatch_clone_request(struct request *clone, struct requ clone->rq_flags |= RQF_IO_STAT;
clone->start_time_ns = ktime_get_ns(); - r = blk_insert_cloned_request(clone->q, clone); + r = __blk_insert_cloned_request(clone->q, clone, true); if (r != BLK_STS_OK && r != BLK_STS_RESOURCE && r != BLK_STS_DEV_RESOURCE) /* must complete clone in terms of original request */ dm_complete_request(rq, r); diff --git a/include/linux/blkdev.h b/include/linux/blkdev.h index a86659e78d987..1deaf36eb2371 100644 --- a/include/linux/blkdev.h +++ b/include/linux/blkdev.h @@ -1012,6 +1012,8 @@ extern int blk_rq_prep_clone(struct request *rq, struct request *rq_src, extern void blk_rq_unprep_clone(struct request *rq); extern blk_status_t blk_insert_cloned_request(struct request_queue *q, struct request *rq); +extern blk_status_t __blk_insert_cloned_request(struct request_queue *q, + struct request *rq, bool precise); extern int blk_rq_append_bio(struct request *rq, struct bio **bio); extern void blk_delay_queue(struct request_queue *, unsigned long); extern void blk_queue_split(struct request_queue *, struct bio **);