-----邮件原件----- 发件人: linan (AK) linan122@huawei.com 发送时间: 2022年11月17日 21:42 收件人: Zhengzengkai zhengzengkai@huawei.com; Xiexiuqi xiexiuqi@huawei.com; pmail_patchwork patchwork@huawei.com 抄送: linan (AK) linan122@huawei.com; zhangyi (F) yi.zhang@huawei.com; houtao (A) houtao1@huawei.com; yukuai (C) yukuai3@huawei.com; chenxiaosong (A) chenxiaosong2@huawei.com; chengzhihao chengzhihao1@huawei.com; libaokun (C) libaokun1@huawei.com; luomeng luomeng12@huawei.com; yanaijie yanaijie@huawei.com; yangerkun yangerkun@huawei.com; yebin (H) yebin10@huawei.com; yuyufen yuyufen@huawei.com; zhangxiaoxu (A) zhangxiaoxu5@huawei.com; Zhengbin (OSKernel) zhengbin13@huawei.com; koulihong koulihong@huawei.com; qiulaibin qiulaibin@huawei.com 主题: [PATCH openEuler-22.03-LTS 2/2] block: Fix kabi broken in blk-merge.h and blk-cgroup.h
hulk inclusion category: bugfix bugzilla: 187443, https://gitee.com/openeuler/kernel/issues/I5Z7O2 CVE: NA
--------------------------------
Include additional files and add new function will cause kabi broken. So move changes to blk-mq.h. bio_issue_as_root_blkg() is needed by blk_cgroup_mergeable(), move it together. It is used by iocost, too, so add blk-mq.h to blk-iocost.c.
Signed-off-by: Li Nan linan122@huawei.com Reviewed-by: Jason Yan yanaijie@huawei.com Signed-off-by: Zheng Zengkai zhengzengkai@huawei.com --- block/blk-iocost.c | 1 + block/blk-merge.c | 1 - block/blk-mq.h | 34 ++++++++++++++++++++++++++++++++++ include/linux/blk-cgroup.h | 33 --------------------------------- 4 files changed, 35 insertions(+), 34 deletions(-)
diff --git a/block/blk-iocost.c b/block/blk-iocost.c index 08e4ba856e3b..462dbb766ed1 100644 --- a/block/blk-iocost.c +++ b/block/blk-iocost.c @@ -184,6 +184,7 @@ #include "blk-rq-qos.h" #include "blk-stat.h" #include "blk-wbt.h" +#include "blk-mq.h"
#ifdef CONFIG_TRACEPOINTS
diff --git a/block/blk-merge.c b/block/blk-merge.c index f3fca8bb326d..1ed1b5146db2 100644 --- a/block/blk-merge.c +++ b/block/blk-merge.c @@ -7,7 +7,6 @@ #include <linux/bio.h> #include <linux/blkdev.h> #include <linux/scatterlist.h> -#include <linux/blk-cgroup.h>
#include <trace/events/block.h>
diff --git a/block/blk-mq.h b/block/blk-mq.h index 5572277cf9a3..1c86f7d56e72 100644 --- a/block/blk-mq.h +++ b/block/blk-mq.h @@ -338,5 +338,39 @@ static inline bool hctx_may_queue(struct blk_mq_hw_ctx *hctx, return __blk_mq_active_requests(hctx) < depth; }
+/** + * bio_issue_as_root_blkg - see if this bio needs to be issued as root +blkg + * @return: true if this bio needs to be submitted with the root blkg context. + * + * In order to avoid priority inversions we sometimes need to issue a +bio as if + * it were attached to the root blkg, and then backcharge to the actual +owning + * blkg. The idea is we do bio_blkcg() to look up the actual context +for the + * bio and attach the appropriate blkg to the bio. Then we call this +helper and + * if it is true run with the root blkg for that queue and then do any + * backcharging to the originating cgroup once the io is complete. + */ +static inline bool bio_issue_as_root_blkg(struct bio *bio) { + return (bio->bi_opf & (REQ_META | REQ_SWAP)) != 0; } + +#ifdef CONFIG_BLK_CGROUP +/** + * blk_cgroup_mergeable - Determine whether to allow or disallow merges + * @rq: request to merge into + * @bio: bio to merge + * + * @bio and @rq should belong to the same cgroup and their +issue_as_root should + * match. The latter is necessary as we don't want to throttle e.g. a +metadata + * update because it happens to be next to a regular IO. + */ +static inline bool blk_cgroup_mergeable(struct request *rq, struct bio +*bio) { + return rq->bio->bi_blkg == bio->bi_blkg && + bio_issue_as_root_blkg(rq->bio) == bio_issue_as_root_blkg(bio); } +#else /* CONFIG_BLK_CGROUP */ +static inline bool blk_cgroup_mergeable(struct request *rq, struct bio *bio) { return true; } +#endif /* CONFIG_BLK_CGROUP */
#endif diff --git a/include/linux/blk-cgroup.h b/include/linux/blk-cgroup.h index dac9804907df..994ff06de40f 100644 --- a/include/linux/blk-cgroup.h +++ b/include/linux/blk-cgroup.h @@ -25,7 +25,6 @@ #include <linux/atomic.h> #include <linux/kthread.h> #include <linux/fs.h> -#include <linux/blk-mq.h>
/* percpu_counter batch for blkg_[rw]stats, per-cpu drift doesn't matter */ #define BLKG_STAT_CPU_BATCH (INT_MAX / 2) @@ -297,22 +296,6 @@ static inline bool blk_cgroup_congested(void) return ret; }
-/** - * bio_issue_as_root_blkg - see if this bio needs to be issued as root blkg - * @return: true if this bio needs to be submitted with the root blkg context. - * - * In order to avoid priority inversions we sometimes need to issue a bio as if - * it were attached to the root blkg, and then backcharge to the actual owning - * blkg. The idea is we do bio_blkcg() to look up the actual context for the - * bio and attach the appropriate blkg to the bio. Then we call this helper and - * if it is true run with the root blkg for that queue and then do any - * backcharging to the originating cgroup once the io is complete. - */ -static inline bool bio_issue_as_root_blkg(struct bio *bio) -{ - return (bio->bi_opf & (REQ_META | REQ_SWAP)) != 0; -} - /** * blkcg_parent - get the parent of a blkcg * @blkcg: blkcg of interest @@ -611,21 +594,6 @@ static inline void blkcg_clear_delay(struct blkcg_gq *blkg) atomic_dec(&blkg->blkcg->css.cgroup->congestion_count); }
-/** - * blk_cgroup_mergeable - Determine whether to allow or disallow merges - * @rq: request to merge into - * @bio: bio to merge - * - * @bio and @rq should belong to the same cgroup and their issue_as_root should - * match. The latter is necessary as we don't want to throttle e.g. a metadata - * update because it happens to be next to a regular IO. - */ -static inline bool blk_cgroup_mergeable(struct request *rq, struct bio *bio) -{ - return rq->bio->bi_blkg == bio->bi_blkg && - bio_issue_as_root_blkg(rq->bio) == bio_issue_as_root_blkg(bio); -} - void blk_cgroup_bio_start(struct bio *bio); void blkcg_add_delay(struct blkcg_gq *blkg, u64 now, u64 delta); void blkcg_schedule_throttle(struct request_queue *q, bool use_memdelay); @@ -681,7 +649,6 @@ static inline void blkg_put(struct blkcg_gq *blkg) { } static inline bool blkcg_punt_bio_submit(struct bio *bio) { return false; } static inline void blkcg_bio_issue_init(struct bio *bio) { } static inline void blk_cgroup_bio_start(struct bio *bio) { } -static inline bool blk_cgroup_mergeable(struct request *rq, struct bio *bio) { return true; }
#define blk_queue_for_each_rl(rl, q) \ for ((rl) = &(q)->root_rl; (rl); (rl) = NULL) -- 2.31.1