hulk inclusion category: bugfix bugzilla: https://gitee.com/src-openeuler/kernel/issues/I96DIN CVE: CVE-2021-46984
--------------------------------
Fix kabi as follows: 1. Revert origin path. Revert "kyber: fix out of bounds access when preempted"
2. Use a new way to fix issue: Mainline fixed this issue by passing a request_queue instead of hctx to ->bio_merge() and get hctx in kyber_bio_merge(), but it changed kabi. In redhat-8.1.x, keep passing hctx, use hctx to get request_queue, and get hctx in kyber_bio_merge() again, so that kabi will not be changed.
Fixes: efed9a3337e3 ("kyber: fix out of bounds access when preempted") Signed-off-by: Li Nan linan122@huawei.com --- include/linux/elevator.h | 2 +- block/bfq-iosched.c | 3 ++- block/blk-mq-sched.c | 8 +++----- block/kyber-iosched.c | 3 ++- block/mq-deadline.c | 3 ++- 5 files changed, 10 insertions(+), 9 deletions(-)
diff --git a/include/linux/elevator.h b/include/linux/elevator.h index de25a1779b73..663ce1780c5d 100644 --- a/include/linux/elevator.h +++ b/include/linux/elevator.h @@ -102,7 +102,7 @@ struct elevator_mq_ops { void (*depth_updated)(struct blk_mq_hw_ctx *);
bool (*allow_merge)(struct request_queue *, struct request *, struct bio *); - bool (*bio_merge)(struct request_queue *, struct bio *); + bool (*bio_merge)(struct blk_mq_hw_ctx *, struct bio *); int (*request_merge)(struct request_queue *q, struct request **, struct bio *); void (*request_merged)(struct request_queue *, struct request *, enum elv_merge); void (*requests_merged)(struct request_queue *, struct request *, struct request *); diff --git a/block/bfq-iosched.c b/block/bfq-iosched.c index 5dadc91f88f1..473d9e31ff87 100644 --- a/block/bfq-iosched.c +++ b/block/bfq-iosched.c @@ -1856,8 +1856,9 @@ static void bfq_remove_request(struct request_queue *q,
}
-static bool bfq_bio_merge(struct request_queue *q, struct bio *bio) +static bool bfq_bio_merge(struct blk_mq_hw_ctx *hctx, struct bio *bio) { + struct request_queue *q = hctx->queue; struct bfq_data *bfqd = q->elevator->elevator_data; struct request *free = NULL; /* diff --git a/block/blk-mq-sched.c b/block/blk-mq-sched.c index 9697134cc6af..0fb33abac3f6 100644 --- a/block/blk-mq-sched.c +++ b/block/blk-mq-sched.c @@ -385,17 +385,15 @@ static bool blk_mq_attempt_merge(struct request_queue *q, bool __blk_mq_sched_bio_merge(struct request_queue *q, struct bio *bio) { struct elevator_queue *e = q->elevator; - struct blk_mq_ctx *ctx; - struct blk_mq_hw_ctx *hctx; + struct blk_mq_ctx *ctx = blk_mq_get_ctx(q); + struct blk_mq_hw_ctx *hctx = blk_mq_map_queue(q, ctx->cpu); bool ret = false;
if (e && e->type->ops.mq.bio_merge) { blk_mq_put_ctx(ctx); - return e->type->ops.mq.bio_merge(q, bio); + return e->type->ops.mq.bio_merge(hctx, bio); }
- ctx = blk_mq_get_ctx(q); - hctx = blk_mq_map_queue(q, ctx->cpu); if ((hctx->flags & BLK_MQ_F_SHOULD_MERGE) && !list_empty_careful(&ctx->rq_list)) { /* default per sw-queue merge */ diff --git a/block/kyber-iosched.c b/block/kyber-iosched.c index a5f9ab2047ee..833e9eaae640 100644 --- a/block/kyber-iosched.c +++ b/block/kyber-iosched.c @@ -503,8 +503,9 @@ static void kyber_limit_depth(unsigned int op, struct blk_mq_alloc_data *data) } }
-static bool kyber_bio_merge(struct request_queue *q, struct bio *bio) +static bool kyber_bio_merge(struct blk_mq_hw_ctx *hctx_q, struct bio *bio) { + struct request_queue *q = hctx_q->queue; struct blk_mq_ctx *ctx = blk_mq_get_ctx(q); struct blk_mq_hw_ctx *hctx = blk_mq_map_queue(q, ctx->cpu); struct kyber_hctx_data *khd = hctx->sched_data; diff --git a/block/mq-deadline.c b/block/mq-deadline.c index 4703840c2160..7ad820050675 100644 --- a/block/mq-deadline.c +++ b/block/mq-deadline.c @@ -458,8 +458,9 @@ static int dd_request_merge(struct request_queue *q, struct request **rq, return ELEVATOR_NO_MERGE; }
-static bool dd_bio_merge(struct request_queue *q, struct bio *bio) +static bool dd_bio_merge(struct blk_mq_hw_ctx *hctx, struct bio *bio) { + struct request_queue *q = hctx->queue; struct deadline_data *dd = q->elevator->elevator_data; struct request *free = NULL; bool ret;