From: Bart Van Assche bvanassche@acm.org
mainline inclusion from mainline-v5.3-rc1 commit c05f42206f4de12b6807270fc669b45472f1bdb7 category: feature bugzilla: https://gitee.com/openeuler/kernel/issues/IAGRKP
Reference: https://git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git/commit/?i...
--------------------------------
No code that occurs between blk_mq_get_ctx() and blk_mq_put_ctx() depends on preemption being disabled for its correctness. Since removing the CPU preemption calls does not measurably affect performance, simplify the blk-mq code by removing the blk_mq_put_ctx() function and also by not disabling preemption in blk_mq_get_ctx().
Cc: Hannes Reinecke hare@suse.com Cc: Omar Sandoval osandov@fb.com Reviewed-by: Christoph Hellwig hch@lst.de Reviewed-by: Ming Lei ming.lei@redhat.com Signed-off-by: Bart Van Assche bvanassche@acm.org Signed-off-by: Jens Axboe axboe@kernel.dk
Conflicts: block/blk-mq-sched.c block/blk-mq-tag.c block/blk-mq.c block/blk-mq.h block/kyber-iosched.c [Context conflicts] Signed-off-by: Yu Kuai yukuai3@huawei.com --- block/blk-mq-sched.c | 5 +---- block/blk-mq-tag.c | 8 -------- block/blk-mq.c | 16 +++------------- block/blk-mq.h | 7 +------ block/kyber-iosched.c | 1 - 5 files changed, 5 insertions(+), 32 deletions(-)
diff --git a/block/blk-mq-sched.c b/block/blk-mq-sched.c index 0fb33abac3f6..52b119cc6616 100644 --- a/block/blk-mq-sched.c +++ b/block/blk-mq-sched.c @@ -389,10 +389,8 @@ bool __blk_mq_sched_bio_merge(struct request_queue *q, struct bio *bio) struct blk_mq_hw_ctx *hctx = blk_mq_map_queue(q, ctx->cpu); bool ret = false;
- if (e && e->type->ops.mq.bio_merge) { - blk_mq_put_ctx(ctx); + if (e && e->type->ops.mq.bio_merge) return e->type->ops.mq.bio_merge(hctx, bio); - }
if ((hctx->flags & BLK_MQ_F_SHOULD_MERGE) && !list_empty_careful(&ctx->rq_list)) { @@ -402,7 +400,6 @@ bool __blk_mq_sched_bio_merge(struct request_queue *q, struct bio *bio) spin_unlock(&ctx->lock); }
- blk_mq_put_ctx(ctx); return ret; }
diff --git a/block/blk-mq-tag.c b/block/blk-mq-tag.c index bee92ab06a5e..65464f0fe0fa 100644 --- a/block/blk-mq-tag.c +++ b/block/blk-mq-tag.c @@ -113,7 +113,6 @@ unsigned int blk_mq_get_tag(struct blk_mq_alloc_data *data) struct sbq_wait_state *ws; DEFINE_WAIT(wait); unsigned int tag_offset; - bool drop_ctx; int tag;
if (data->flags & BLK_MQ_REQ_RESERVED) { @@ -136,7 +135,6 @@ unsigned int blk_mq_get_tag(struct blk_mq_alloc_data *data) return BLK_MQ_TAG_FAIL;
ws = bt_wait_ptr(bt, data->hctx); - drop_ctx = data->ctx == NULL; do { struct sbitmap_queue *bt_prev;
@@ -162,9 +160,6 @@ unsigned int blk_mq_get_tag(struct blk_mq_alloc_data *data) if (tag != -1) break;
- if (data->ctx) - blk_mq_put_ctx(data->ctx); - bt_prev = bt; io_schedule();
@@ -189,9 +184,6 @@ unsigned int blk_mq_get_tag(struct blk_mq_alloc_data *data) ws = bt_wait_ptr(bt, data->hctx); } while (1);
- if (drop_ctx && data->ctx) - blk_mq_put_ctx(data->ctx); - finish_wait(&ws->wait, &wait);
found_tag: diff --git a/block/blk-mq.c b/block/blk-mq.c index 8f23109c797c..b4cd46e75a1e 100644 --- a/block/blk-mq.c +++ b/block/blk-mq.c @@ -406,13 +406,13 @@ static struct request *blk_mq_get_request(struct request_queue *q, struct elevator_queue *e = q->elevator; struct request *rq; unsigned int tag; - bool put_ctx_on_error = false; + bool clear_ctx_on_error = false;
blk_queue_enter_live(q); data->q = q; if (likely(!data->ctx)) { data->ctx = blk_mq_get_ctx(q); - put_ctx_on_error = true; + clear_ctx_on_error = true; } if (likely(!data->hctx)) data->hctx = blk_mq_map_queue(q, data->ctx->cpu); @@ -436,10 +436,8 @@ static struct request *blk_mq_get_request(struct request_queue *q,
tag = blk_mq_get_tag(data); if (tag == BLK_MQ_TAG_FAIL) { - if (put_ctx_on_error) { - blk_mq_put_ctx(data->ctx); + if (clear_ctx_on_error) data->ctx = NULL; - } blk_queue_exit(q); return NULL; } @@ -476,8 +474,6 @@ struct request *blk_mq_alloc_request(struct request_queue *q, unsigned int op, if (!rq) return ERR_PTR(-EWOULDBLOCK);
- blk_mq_put_ctx(alloc_data.ctx); - rq->__data_len = 0; rq->__sector = (sector_t) -1; rq->bio = rq->biotail = NULL; @@ -2032,7 +2028,6 @@ static blk_qc_t blk_mq_make_request(struct request_queue *q, struct bio *bio)
plug = current->plug; if (unlikely(is_flush_fua)) { - blk_mq_put_ctx(data.ctx); blk_mq_bio_to_request(rq, bio);
/* bypass scheduler for flush rq */ @@ -2041,7 +2036,6 @@ static blk_qc_t blk_mq_make_request(struct request_queue *q, struct bio *bio) } else if (plug && q->nr_hw_queues == 1) { struct request *last = NULL;
- blk_mq_put_ctx(data.ctx); blk_mq_bio_to_request(rq, bio);
/* @@ -2081,8 +2075,6 @@ static blk_qc_t blk_mq_make_request(struct request_queue *q, struct bio *bio) list_del_init(&same_queue_rq->queuelist); list_add_tail(&rq->queuelist, &plug->mq_list);
- blk_mq_put_ctx(data.ctx); - if (same_queue_rq) { data.hctx = blk_mq_map_queue(q, same_queue_rq->mq_ctx->cpu); @@ -2091,11 +2083,9 @@ static blk_qc_t blk_mq_make_request(struct request_queue *q, struct bio *bio) } } else if ((q->nr_hw_queues > 1 && is_sync) || (!q->elevator && !data.hctx->dispatch_busy)) { - blk_mq_put_ctx(data.ctx); blk_mq_bio_to_request(rq, bio); blk_mq_try_issue_directly(data.hctx, rq, &cookie); } else { - blk_mq_put_ctx(data.ctx); blk_mq_bio_to_request(rq, bio); blk_mq_sched_insert_request(rq, false, true, true); } diff --git a/block/blk-mq.h b/block/blk-mq.h index 0072728a078b..7d5b04b09b5a 100644 --- a/block/blk-mq.h +++ b/block/blk-mq.h @@ -145,12 +145,7 @@ static inline struct blk_mq_ctx *__blk_mq_get_ctx(struct request_queue *q, */ static inline struct blk_mq_ctx *blk_mq_get_ctx(struct request_queue *q) { - return __blk_mq_get_ctx(q, get_cpu()); -} - -static inline void blk_mq_put_ctx(struct blk_mq_ctx *ctx) -{ - put_cpu(); + return __blk_mq_get_ctx(q, raw_smp_processor_id()); }
struct blk_mq_alloc_data { diff --git a/block/kyber-iosched.c b/block/kyber-iosched.c index f370d3e3f6e0..6dd2d3ac7528 100644 --- a/block/kyber-iosched.c +++ b/block/kyber-iosched.c @@ -517,7 +517,6 @@ static bool kyber_bio_merge(struct blk_mq_hw_ctx *hctx_q, struct bio *bio) spin_lock(&kcq->lock); merged = blk_mq_bio_list_merge(hctx->queue, rq_list, bio); spin_unlock(&kcq->lock); - blk_mq_put_ctx(ctx);
return merged; }