hulk inclusion category: feature bugzilla: https://gitee.com/openeuler/kernel/issues/IAGRKP CVE: NA
--------------------------------
Add a new struct request_wrapper to fix kabi broken by adding new fields in struct request:
Before: |request|cmd| Kabi broken: |request with new fields|cmd| After: |request_wrapper with new fields|request|cmd|
Signed-off-by: Yu Kuai yukuai3@huawei.com --- block/blk-flush.c | 10 ++++++---- block/blk-mq.c | 5 +++-- block/blk-mq.h | 11 +++++++++++ block/blk.h | 25 +++++++++++++++++++------ include/linux/blkdev.h | 3 --- 5 files changed, 39 insertions(+), 15 deletions(-)
diff --git a/block/blk-flush.c b/block/blk-flush.c index 5dda142819b2..a09c11678184 100644 --- a/block/blk-flush.c +++ b/block/blk-flush.c @@ -602,7 +602,8 @@ struct blk_flush_queue *blk_alloc_flush_queue(struct request_queue *q, int node, int cmd_size, gfp_t flags) { struct blk_flush_queue *fq; - int rq_sz = sizeof(struct request); + struct request_wrapper *wrapper; + int rq_sz = sizeof(struct request) + sizeof(struct request_wrapper);
fq = kzalloc_node(sizeof(*fq), flags, node); if (!fq) @@ -612,10 +613,11 @@ struct blk_flush_queue *blk_alloc_flush_queue(struct request_queue *q, spin_lock_init(&fq->mq_flush_lock);
rq_sz = round_up(rq_sz + cmd_size, cache_line_size()); - fq->flush_rq = kzalloc_node(rq_sz, flags, node); - if (!fq->flush_rq) + wrapper = kzalloc_node(rq_sz, flags, node); + if (!wrapper) goto fail_rq;
+ fq->flush_rq = (struct request *)(wrapper + 1); INIT_LIST_HEAD(&fq->flush_queue[0]); INIT_LIST_HEAD(&fq->flush_queue[1]); INIT_LIST_HEAD(&fq->flush_data_in_flight); @@ -634,6 +636,6 @@ void blk_free_flush_queue(struct blk_flush_queue *fq) if (!fq) return;
- kfree(fq->flush_rq); + kfree(request_to_wrapper(fq->flush_rq)); kfree(fq); } diff --git a/block/blk-mq.c b/block/blk-mq.c index f96f4bb8be92..fdd440c3f31b 100644 --- a/block/blk-mq.c +++ b/block/blk-mq.c @@ -2241,7 +2241,8 @@ int blk_mq_alloc_rqs(struct blk_mq_tag_set *set, struct blk_mq_tags *tags, * rq_size is the size of the request plus driver payload, rounded * to the cacheline size */ - rq_size = round_up(sizeof(struct request) + set->cmd_size, + rq_size = round_up(sizeof(struct request) + + sizeof(struct request_wrapper) + set->cmd_size, cache_line_size()); left = rq_size * depth;
@@ -2282,7 +2283,7 @@ int blk_mq_alloc_rqs(struct blk_mq_tag_set *set, struct blk_mq_tags *tags, to_do = min(entries_per_page, depth - i); left -= to_do * rq_size; for (j = 0; j < to_do; j++) { - struct request *rq = p; + struct request *rq = p + sizeof(struct request_wrapper);
tags->static_rqs[i] = rq; if (blk_mq_init_request(set, rq, hctx_idx, node)) { diff --git a/block/blk-mq.h b/block/blk-mq.h index c6ec9aa12fb2..380362e37504 100644 --- a/block/blk-mq.h +++ b/block/blk-mq.h @@ -36,6 +36,17 @@ struct blk_mq_ctx { struct kobject kobj; } ____cacheline_aligned_in_smp;
+struct request_wrapper { +#ifdef CONFIG_BLK_BIO_ALLOC_TIME + u64 bi_alloc_time_ns; +#endif +} ____cacheline_aligned_in_smp; + +static inline struct request_wrapper *request_to_wrapper(void *rq) +{ + return rq - sizeof(struct request_wrapper); +} + void blk_mq_freeze_queue(struct request_queue *q); void blk_mq_exit_queue(struct request_queue *q); int blk_mq_update_nr_requests(struct request_queue *q, unsigned int nr); diff --git a/block/blk.h b/block/blk.h index e1278f790ac7..b7af0eff95b7 100644 --- a/block/blk.h +++ b/block/blk.h @@ -152,8 +152,12 @@ static inline u64 blk_time_get_ns(void); static inline void blk_rq_init_bi_alloc_time(struct request *rq, struct request *first_rq) { - rq->bi_alloc_time_ns = first_rq ? first_rq->bi_alloc_time_ns : - blk_time_get_ns(); + if (!rq->q->mq_ops) + return; + + request_to_wrapper(rq)->bi_alloc_time_ns = + first_rq ? request_to_wrapper(first_rq)->bi_alloc_time_ns : + blk_time_get_ns(); }
/* @@ -167,17 +171,26 @@ static inline void blk_rq_update_bi_alloc_time(struct request *rq, struct bio *bio, struct request *merged_rq) { + struct request_wrapper *rq_wrapper; + struct request_wrapper *merged_rq_wrapper; + + if (!rq->q->mq_ops) + return; + + rq_wrapper = request_to_wrapper(rq); if (bio) { - if (rq->bi_alloc_time_ns > bio->bi_alloc_time_ns) - rq->bi_alloc_time_ns = bio->bi_alloc_time_ns; + if (rq_wrapper->bi_alloc_time_ns > bio->bi_alloc_time_ns) + rq_wrapper->bi_alloc_time_ns = bio->bi_alloc_time_ns; return; }
if (WARN_ON_ONCE(!merged_rq)) return;
- if (rq->bi_alloc_time_ns > merged_rq->bi_alloc_time_ns) - rq->bi_alloc_time_ns = merged_rq->bi_alloc_time_ns; + merged_rq_wrapper = request_to_wrapper(merged_rq); + if (rq_wrapper->bi_alloc_time_ns > merged_rq_wrapper->bi_alloc_time_ns) + rq_wrapper->bi_alloc_time_ns = + merged_rq_wrapper->bi_alloc_time_ns; } #else /* CONFIG_BLK_BIO_ALLOC_TIME */ static inline void blk_rq_init_bi_alloc_time(struct request *rq, diff --git a/include/linux/blkdev.h b/include/linux/blkdev.h index c487c32b1bf4..241f59eb5b64 100644 --- a/include/linux/blkdev.h +++ b/include/linux/blkdev.h @@ -225,9 +225,6 @@ struct request { u64 start_time_ns; /* Time that I/O was submitted to the device. */ u64 io_start_time_ns; -#ifdef CONFIG_BLK_BIO_ALLOC_TIME - u64 bi_alloc_time_ns; -#endif
#ifdef CONFIG_BLK_WBT unsigned short wbt_flags;