From: Yu Kuai yukuai3@huawei.com
hulk inclusion category: bugfix bugzilla: 173974 CVE: NA ---------------------------
Prepare to support concurrent quiesce queue between drivers and block layer, no functional changes.
Signed-off-by: Yu Kuai yukuai3@huawei.com Reviewed-by: Hou Tao houtao1@huawei.com Signed-off-by: Yang Yingliang yangyingliang@huawei.com --- block/blk-mq.c | 58 ++++++++++++++++++++++++++++++++++---------------- 1 file changed, 40 insertions(+), 18 deletions(-)
diff --git a/block/blk-mq.c b/block/blk-mq.c index ef62a83314a5d..f9b4b73a2f38d 100644 --- a/block/blk-mq.c +++ b/block/blk-mq.c @@ -211,32 +211,29 @@ void blk_mq_unfreeze_queue(struct request_queue *q) } EXPORT_SYMBOL_GPL(blk_mq_unfreeze_queue);
+static void __blk_mq_quiesce_queue_nowait(struct request_queue *q, + unsigned int flag) +{ + blk_queue_flag_set(flag, q); +} + /* * FIXME: replace the scsi_internal_device_*block_nowait() calls in the * mpt3sas driver such that this function can be removed. */ void blk_mq_quiesce_queue_nowait(struct request_queue *q) { - blk_queue_flag_set(QUEUE_FLAG_QUIESCED, q); + __blk_mq_quiesce_queue_nowait(q, QUEUE_FLAG_QUIESCED); } EXPORT_SYMBOL_GPL(blk_mq_quiesce_queue_nowait);
-/** - * blk_mq_quiesce_queue() - wait until all ongoing dispatches have finished - * @q: request queue. - * - * Note: this function does not prevent that the struct request end_io() - * callback function is invoked. Once this function is returned, we make - * sure no dispatch can happen until the queue is unquiesced via - * blk_mq_unquiesce_queue(). - */ -void blk_mq_quiesce_queue(struct request_queue *q) +static void __blk_mq_quiesce_queue(struct request_queue *q, unsigned int flag) { struct blk_mq_hw_ctx *hctx; unsigned int i; bool rcu = false;
- blk_mq_quiesce_queue_nowait(q); + __blk_mq_quiesce_queue_nowait(q, flag);
queue_for_each_hw_ctx(q, hctx, i) { if (hctx->flags & BLK_MQ_F_BLOCKING) @@ -247,15 +244,30 @@ void blk_mq_quiesce_queue(struct request_queue *q) if (rcu) synchronize_rcu(); } + +/** + * blk_mq_quiesce_queue() - wait until all ongoing dispatches have finished + * @q: request queue. + * + * Note: this function does not prevent that the struct request end_io() + * callback function is invoked. Once this function is returned, we make + * sure no dispatch can happen until the queue is unquiesced via + * blk_mq_unquiesce_queue(). + */ +void blk_mq_quiesce_queue(struct request_queue *q) +{ + __blk_mq_quiesce_queue(q, QUEUE_FLAG_QUIESCED); +} EXPORT_SYMBOL_GPL(blk_mq_quiesce_queue);
-bool blk_mq_quiesce_queue_without_rcu(struct request_queue *q) +static bool __blk_mq_quiesce_queue_without_rcu(struct request_queue *q, + unsigned int flag) { struct blk_mq_hw_ctx *hctx; unsigned int i; bool rcu = false;
- blk_mq_quiesce_queue_nowait(q); + __blk_mq_quiesce_queue_nowait(q, flag);
queue_for_each_hw_ctx(q, hctx, i) { if (hctx->flags & BLK_MQ_F_BLOCKING) @@ -265,8 +277,21 @@ bool blk_mq_quiesce_queue_without_rcu(struct request_queue *q) } return rcu; } + +bool blk_mq_quiesce_queue_without_rcu(struct request_queue *q) +{ + return __blk_mq_quiesce_queue_without_rcu(q, QUEUE_FLAG_QUIESCED); +} EXPORT_SYMBOL_GPL(blk_mq_quiesce_queue_without_rcu);
+static void __blk_mq_unquiesce_queue(struct request_queue *q, unsigned int flag) +{ + blk_queue_flag_clear(flag, q); + + /* dispatch requests which are inserted during quiescing */ + blk_mq_run_hw_queues(q, true); +} + /* * blk_mq_unquiesce_queue() - counterpart of blk_mq_quiesce_queue() * @q: request queue. @@ -276,10 +301,7 @@ EXPORT_SYMBOL_GPL(blk_mq_quiesce_queue_without_rcu); */ void blk_mq_unquiesce_queue(struct request_queue *q) { - blk_queue_flag_clear(QUEUE_FLAG_QUIESCED, q); - - /* dispatch requests which are inserted during quiescing */ - blk_mq_run_hw_queues(q, true); + __blk_mq_unquiesce_queue(q, QUEUE_FLAG_QUIESCED); } EXPORT_SYMBOL_GPL(blk_mq_unquiesce_queue);