From: Yu Kuai yukuai3@huawei.com
hulk inclusion category: bugfix bugzilla: 173974 CVE: NA ---------------------------
Queue will be quiesced if the old or the new flag is set, and the queue will be unqiesced if both flags is cleared.
Signed-off-by: Yu Kuai yukuai3@huawei.com Reviewed-by: Hou Tao houtao1@huawei.com Signed-off-by: Yang Yingliang yangyingliang@huawei.com --- block/blk-mq-sched.c | 3 ++- block/blk-mq.c | 14 +++++++++++++- block/blk-mq.h | 2 ++ include/linux/blkdev.h | 9 +++++++-- 4 files changed, 24 insertions(+), 4 deletions(-)
diff --git a/block/blk-mq-sched.c b/block/blk-mq-sched.c index 3521eca1b2984..b7be8e74fab8c 100644 --- a/block/blk-mq-sched.c +++ b/block/blk-mq-sched.c @@ -195,7 +195,8 @@ void blk_mq_sched_dispatch_requests(struct blk_mq_hw_ctx *hctx) LIST_HEAD(rq_list);
/* RCU or SRCU read lock is needed before checking quiesced flag */ - if (unlikely(blk_mq_hctx_stopped(hctx) || blk_queue_quiesced(q))) + if (unlikely(blk_mq_hctx_stopped(hctx) || blk_queue_quiesced(q) || + blk_queue_quiesced_internal(q))) return;
hctx->run++; diff --git a/block/blk-mq.c b/block/blk-mq.c index f9b4b73a2f38d..8bf3da9b7178e 100644 --- a/block/blk-mq.c +++ b/block/blk-mq.c @@ -260,6 +260,11 @@ void blk_mq_quiesce_queue(struct request_queue *q) } EXPORT_SYMBOL_GPL(blk_mq_quiesce_queue);
+void blk_mq_quiesce_queue_internal(struct request_queue *q) +{ + __blk_mq_quiesce_queue(q, QUEUE_FLAG_QUIESCED_INTERNAL); +} + static bool __blk_mq_quiesce_queue_without_rcu(struct request_queue *q, unsigned int flag) { @@ -305,6 +310,11 @@ void blk_mq_unquiesce_queue(struct request_queue *q) } EXPORT_SYMBOL_GPL(blk_mq_unquiesce_queue);
+void blk_mq_unquiesce_queue_internal(struct request_queue *q) +{ + __blk_mq_unquiesce_queue(q, QUEUE_FLAG_QUIESCED_INTERNAL); +} + void blk_mq_wake_waiters(struct request_queue *q) { struct blk_mq_hw_ctx *hctx; @@ -1491,6 +1501,7 @@ bool blk_mq_run_hw_queue(struct blk_mq_hw_ctx *hctx, bool async) */ hctx_lock(hctx, &srcu_idx); need_run = !blk_queue_quiesced(hctx->queue) && + !blk_queue_quiesced_internal(hctx->queue) && blk_mq_hctx_has_pending(hctx); hctx_unlock(hctx, srcu_idx);
@@ -1844,7 +1855,8 @@ static blk_status_t __blk_mq_try_issue_directly(struct blk_mq_hw_ctx *hctx, * blk_mq_request_issue_directly(), and return BLK_STS_OK to caller, * and avoid driver to try to dispatch again. */ - if (blk_mq_hctx_stopped(hctx) || blk_queue_quiesced(q)) { + if (blk_mq_hctx_stopped(hctx) || blk_queue_quiesced(q) || + blk_queue_quiesced_internal(q)) { run_queue = false; bypass_insert = false; goto insert; diff --git a/block/blk-mq.h b/block/blk-mq.h index bbb0c1d8849b4..88b590c245476 100644 --- a/block/blk-mq.h +++ b/block/blk-mq.h @@ -46,6 +46,8 @@ bool blk_mq_get_driver_tag(struct request *rq); struct request *blk_mq_dequeue_from_ctx(struct blk_mq_hw_ctx *hctx, struct blk_mq_ctx *start); void blk_mq_put_rq_ref(struct request *rq); +void blk_mq_quiesce_queue_internal(struct request_queue *q); +void blk_mq_unquiesce_queue_internal(struct request_queue *q);
/* * Internal helpers for allocating/freeing the request map diff --git a/include/linux/blkdev.h b/include/linux/blkdev.h index 50f3b1eaa021f..109add33b3318 100644 --- a/include/linux/blkdev.h +++ b/include/linux/blkdev.h @@ -725,8 +725,11 @@ struct request_queue { #define QUEUE_FLAG_POLL_STATS 25 /* collecting stats for hybrid polling */ #define QUEUE_FLAG_REGISTERED 26 /* queue has been registered to a disk */ #define QUEUE_FLAG_SCSI_PASSTHROUGH 27 /* queue supports SCSI commands */ -#define QUEUE_FLAG_QUIESCED 28 /* queue has been quiesced */ +/* queue has been quiesced, used in driver */ +#define QUEUE_FLAG_QUIESCED 28 #define QUEUE_FLAG_FORECE_QUIESCE 29 /* force quiesce when cleanup queue */ +/* queue has bee quiesced, used in block layer */ +#define QUEUE_FLAG_QUIESCED_INTERNAL 30
#define QUEUE_FLAG_DEFAULT ((1 << QUEUE_FLAG_IO_STAT) | \ (1 << QUEUE_FLAG_SAME_COMP) | \ @@ -763,7 +766,9 @@ bool blk_queue_flag_test_and_clear(unsigned int flag, struct request_queue *q); #define blk_noretry_request(rq) \ ((rq)->cmd_flags & (REQ_FAILFAST_DEV|REQ_FAILFAST_TRANSPORT| \ REQ_FAILFAST_DRIVER)) -#define blk_queue_quiesced(q) test_bit(QUEUE_FLAG_QUIESCED, &(q)->queue_flags) +#define blk_queue_quiesced(q) test_bit(QUEUE_FLAG_QUIESCED, &(q)->queue_flags) +#define blk_queue_quiesced_internal(q) \ + test_bit(QUEUE_FLAG_QUIESCED_INTERNAL, &(q)->queue_flags) #define blk_queue_pm_only(q) atomic_read(&(q)->pm_only) #define blk_queue_fua(q) test_bit(QUEUE_FLAG_FUA, &(q)->queue_flags)