Li Nan (1): kyber: fix kabi broken in ->bio_merge()
Omar Sandoval (1): kyber: fix out of bounds access when preempted
block/kyber-iosched.c | 6 ++++-- 1 file changed, 4 insertions(+), 2 deletions(-)
From: Omar Sandoval osandov@fb.com
mainline inclusion from mainline-v5.13-rc2 commit efed9a3337e341bd0989161b97453b52567bc59d category: bugfix bugzilla: https://gitee.com/src-openeuler/kernel/issues/I96DIN CVE: CVE-2021-46984
Reference: https://git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git/commit/?i...
--------------------------------
__blk_mq_sched_bio_merge() gets the ctx and hctx for the current CPU and passes the hctx to ->bio_merge(). kyber_bio_merge() then gets the ctx for the current CPU again and uses that to get the corresponding Kyber context in the passed hctx. However, the thread may be preempted between the two calls to blk_mq_get_ctx(), and the ctx returned the second time may no longer correspond to the passed hctx. This "works" accidentally most of the time, but it can cause us to read garbage if the second ctx came from an hctx with more ctx's than the first one (i.e., if ctx->index_hw[hctx->type] > hctx->nr_ctx).
This manifested as this UBSAN array index out of bounds error reported by Jakub:
UBSAN: array-index-out-of-bounds in ../kernel/locking/qspinlock.c:130:9 index 13106 is out of range for type 'long unsigned int [128]' Call Trace: dump_stack+0xa4/0xe5 ubsan_epilogue+0x5/0x40 __ubsan_handle_out_of_bounds.cold.13+0x2a/0x34 queued_spin_lock_slowpath+0x476/0x480 do_raw_spin_lock+0x1c2/0x1d0 kyber_bio_merge+0x112/0x180 blk_mq_submit_bio+0x1f5/0x1100 submit_bio_noacct+0x7b0/0x870 submit_bio+0xc2/0x3a0 btrfs_map_bio+0x4f0/0x9d0 btrfs_submit_data_bio+0x24e/0x310 submit_one_bio+0x7f/0xb0 submit_extent_page+0xc4/0x440 __extent_writepage_io+0x2b8/0x5e0 __extent_writepage+0x28d/0x6e0 extent_write_cache_pages+0x4d7/0x7a0 extent_writepages+0xa2/0x110 do_writepages+0x8f/0x180 __writeback_single_inode+0x99/0x7f0 writeback_sb_inodes+0x34e/0x790 __writeback_inodes_wb+0x9e/0x120 wb_writeback+0x4d2/0x660 wb_workfn+0x64d/0xa10 process_one_work+0x53a/0xa80 worker_thread+0x69/0x5b0 kthread+0x20b/0x240 ret_from_fork+0x1f/0x30
Only Kyber uses the hctx, so fix it by passing the request_queue to ->bio_merge() instead. BFQ and mq-deadline just use that, and Kyber can map the queues itself to avoid the mismatch.
Fixes: a6088845c2bf ("block: kyber: make kyber more friendly with merging") Reported-by: Jakub Kicinski kuba@kernel.org Signed-off-by: Omar Sandoval osandov@fb.com Link: https://lore.kernel.org/r/c7598605401a48d5cfeadebb678abd10af22b83f.162069132... Signed-off-by: Jens Axboe axboe@kernel.dk Signed-off-by: Li Nan linan122@huawei.com --- include/linux/elevator.h | 2 +- block/bfq-iosched.c | 3 +-- block/blk-mq-sched.c | 8 +++++--- block/kyber-iosched.c | 5 +++-- block/mq-deadline.c | 3 +-- 5 files changed, 11 insertions(+), 10 deletions(-)
diff --git a/include/linux/elevator.h b/include/linux/elevator.h index 663ce1780c5d..de25a1779b73 100644 --- a/include/linux/elevator.h +++ b/include/linux/elevator.h @@ -102,7 +102,7 @@ struct elevator_mq_ops { void (*depth_updated)(struct blk_mq_hw_ctx *);
bool (*allow_merge)(struct request_queue *, struct request *, struct bio *); - bool (*bio_merge)(struct blk_mq_hw_ctx *, struct bio *); + bool (*bio_merge)(struct request_queue *, struct bio *); int (*request_merge)(struct request_queue *q, struct request **, struct bio *); void (*request_merged)(struct request_queue *, struct request *, enum elv_merge); void (*requests_merged)(struct request_queue *, struct request *, struct request *); diff --git a/block/bfq-iosched.c b/block/bfq-iosched.c index 473d9e31ff87..5dadc91f88f1 100644 --- a/block/bfq-iosched.c +++ b/block/bfq-iosched.c @@ -1856,9 +1856,8 @@ static void bfq_remove_request(struct request_queue *q,
}
-static bool bfq_bio_merge(struct blk_mq_hw_ctx *hctx, struct bio *bio) +static bool bfq_bio_merge(struct request_queue *q, struct bio *bio) { - struct request_queue *q = hctx->queue; struct bfq_data *bfqd = q->elevator->elevator_data; struct request *free = NULL; /* diff --git a/block/blk-mq-sched.c b/block/blk-mq-sched.c index 0fb33abac3f6..9697134cc6af 100644 --- a/block/blk-mq-sched.c +++ b/block/blk-mq-sched.c @@ -385,15 +385,17 @@ static bool blk_mq_attempt_merge(struct request_queue *q, bool __blk_mq_sched_bio_merge(struct request_queue *q, struct bio *bio) { struct elevator_queue *e = q->elevator; - struct blk_mq_ctx *ctx = blk_mq_get_ctx(q); - struct blk_mq_hw_ctx *hctx = blk_mq_map_queue(q, ctx->cpu); + struct blk_mq_ctx *ctx; + struct blk_mq_hw_ctx *hctx; bool ret = false;
if (e && e->type->ops.mq.bio_merge) { blk_mq_put_ctx(ctx); - return e->type->ops.mq.bio_merge(hctx, bio); + return e->type->ops.mq.bio_merge(q, bio); }
+ ctx = blk_mq_get_ctx(q); + hctx = blk_mq_map_queue(q, ctx->cpu); if ((hctx->flags & BLK_MQ_F_SHOULD_MERGE) && !list_empty_careful(&ctx->rq_list)) { /* default per sw-queue merge */ diff --git a/block/kyber-iosched.c b/block/kyber-iosched.c index d8f3cb1bffa6..a5f9ab2047ee 100644 --- a/block/kyber-iosched.c +++ b/block/kyber-iosched.c @@ -503,10 +503,11 @@ static void kyber_limit_depth(unsigned int op, struct blk_mq_alloc_data *data) } }
-static bool kyber_bio_merge(struct blk_mq_hw_ctx *hctx, struct bio *bio) +static bool kyber_bio_merge(struct request_queue *q, struct bio *bio) { + struct blk_mq_ctx *ctx = blk_mq_get_ctx(q); + struct blk_mq_hw_ctx *hctx = blk_mq_map_queue(q, ctx->cpu); struct kyber_hctx_data *khd = hctx->sched_data; - struct blk_mq_ctx *ctx = blk_mq_get_ctx(hctx->queue); struct kyber_ctx_queue *kcq = &khd->kcqs[ctx->index_hw]; unsigned int sched_domain = kyber_sched_domain(bio->bi_opf); struct list_head *rq_list = &kcq->rq_list[sched_domain]; diff --git a/block/mq-deadline.c b/block/mq-deadline.c index 7ad820050675..4703840c2160 100644 --- a/block/mq-deadline.c +++ b/block/mq-deadline.c @@ -458,9 +458,8 @@ static int dd_request_merge(struct request_queue *q, struct request **rq, return ELEVATOR_NO_MERGE; }
-static bool dd_bio_merge(struct blk_mq_hw_ctx *hctx, struct bio *bio) +static bool dd_bio_merge(struct request_queue *q, struct bio *bio) { - struct request_queue *q = hctx->queue; struct deadline_data *dd = q->elevator->elevator_data; struct request *free = NULL; bool ret;
hulk inclusion category: bugfix bugzilla: https://gitee.com/src-openeuler/kernel/issues/I96DIN CVE: CVE-2021-46984
--------------------------------
Fix kabi as follows: 1. Revert origin path. Revert "kyber: fix out of bounds access when preempted"
2. Use a new way to fix issue: Mainline fixed this issue by passing a request_queue instead of hctx to ->bio_merge() and get hctx in kyber_bio_merge(), but it changed kabi. In redhat-8.1.x, keep passing hctx, use hctx to get request_queue, and get hctx in kyber_bio_merge() again, so that kabi will not be changed.
Fixes: efed9a3337e3 ("kyber: fix out of bounds access when preempted") Signed-off-by: Li Nan linan122@huawei.com --- include/linux/elevator.h | 2 +- block/bfq-iosched.c | 3 ++- block/blk-mq-sched.c | 8 +++----- block/kyber-iosched.c | 3 ++- block/mq-deadline.c | 3 ++- 5 files changed, 10 insertions(+), 9 deletions(-)
diff --git a/include/linux/elevator.h b/include/linux/elevator.h index de25a1779b73..663ce1780c5d 100644 --- a/include/linux/elevator.h +++ b/include/linux/elevator.h @@ -102,7 +102,7 @@ struct elevator_mq_ops { void (*depth_updated)(struct blk_mq_hw_ctx *);
bool (*allow_merge)(struct request_queue *, struct request *, struct bio *); - bool (*bio_merge)(struct request_queue *, struct bio *); + bool (*bio_merge)(struct blk_mq_hw_ctx *, struct bio *); int (*request_merge)(struct request_queue *q, struct request **, struct bio *); void (*request_merged)(struct request_queue *, struct request *, enum elv_merge); void (*requests_merged)(struct request_queue *, struct request *, struct request *); diff --git a/block/bfq-iosched.c b/block/bfq-iosched.c index 5dadc91f88f1..473d9e31ff87 100644 --- a/block/bfq-iosched.c +++ b/block/bfq-iosched.c @@ -1856,8 +1856,9 @@ static void bfq_remove_request(struct request_queue *q,
}
-static bool bfq_bio_merge(struct request_queue *q, struct bio *bio) +static bool bfq_bio_merge(struct blk_mq_hw_ctx *hctx, struct bio *bio) { + struct request_queue *q = hctx->queue; struct bfq_data *bfqd = q->elevator->elevator_data; struct request *free = NULL; /* diff --git a/block/blk-mq-sched.c b/block/blk-mq-sched.c index 9697134cc6af..0fb33abac3f6 100644 --- a/block/blk-mq-sched.c +++ b/block/blk-mq-sched.c @@ -385,17 +385,15 @@ static bool blk_mq_attempt_merge(struct request_queue *q, bool __blk_mq_sched_bio_merge(struct request_queue *q, struct bio *bio) { struct elevator_queue *e = q->elevator; - struct blk_mq_ctx *ctx; - struct blk_mq_hw_ctx *hctx; + struct blk_mq_ctx *ctx = blk_mq_get_ctx(q); + struct blk_mq_hw_ctx *hctx = blk_mq_map_queue(q, ctx->cpu); bool ret = false;
if (e && e->type->ops.mq.bio_merge) { blk_mq_put_ctx(ctx); - return e->type->ops.mq.bio_merge(q, bio); + return e->type->ops.mq.bio_merge(hctx, bio); }
- ctx = blk_mq_get_ctx(q); - hctx = blk_mq_map_queue(q, ctx->cpu); if ((hctx->flags & BLK_MQ_F_SHOULD_MERGE) && !list_empty_careful(&ctx->rq_list)) { /* default per sw-queue merge */ diff --git a/block/kyber-iosched.c b/block/kyber-iosched.c index a5f9ab2047ee..833e9eaae640 100644 --- a/block/kyber-iosched.c +++ b/block/kyber-iosched.c @@ -503,8 +503,9 @@ static void kyber_limit_depth(unsigned int op, struct blk_mq_alloc_data *data) } }
-static bool kyber_bio_merge(struct request_queue *q, struct bio *bio) +static bool kyber_bio_merge(struct blk_mq_hw_ctx *hctx_q, struct bio *bio) { + struct request_queue *q = hctx_q->queue; struct blk_mq_ctx *ctx = blk_mq_get_ctx(q); struct blk_mq_hw_ctx *hctx = blk_mq_map_queue(q, ctx->cpu); struct kyber_hctx_data *khd = hctx->sched_data; diff --git a/block/mq-deadline.c b/block/mq-deadline.c index 4703840c2160..7ad820050675 100644 --- a/block/mq-deadline.c +++ b/block/mq-deadline.c @@ -458,8 +458,9 @@ static int dd_request_merge(struct request_queue *q, struct request **rq, return ELEVATOR_NO_MERGE; }
-static bool dd_bio_merge(struct request_queue *q, struct bio *bio) +static bool dd_bio_merge(struct blk_mq_hw_ctx *hctx, struct bio *bio) { + struct request_queue *q = hctx->queue; struct deadline_data *dd = q->elevator->elevator_data; struct request *free = NULL; bool ret;
反馈: 您发送到kernel@openeuler.org的补丁/补丁集,已成功转换为PR! PR链接地址: https://gitee.com/openeuler/kernel/pulls/4999 邮件列表地址:https://mailweb.openeuler.org/hyperkitty/list/kernel@openeuler.org/message/S...
FeedBack: The patch(es) which you have sent to kernel@openeuler.org mailing list has been converted to a pull request successfully! Pull request link: https://gitee.com/openeuler/kernel/pulls/4999 Mailing list address: https://mailweb.openeuler.org/hyperkitty/list/kernel@openeuler.org/message/S...