hulk inclusion category: feature bugzilla: https://gitee.com/openeuler/kernel/issues/I9VTE3 CVE: NA
--------------------------------
Fix kabi breakage in struct queue_limits.
Signed-off-by: Long Li leo.lilong@huawei.com --- block/blk-core.c | 16 ++++++++++++++- block/blk-merge.c | 2 +- block/blk-settings.c | 29 +++++++++++++++++++++------ block/blk-sysfs.c | 1 + block/blk.h | 1 + drivers/md/dm-table.c | 1 + drivers/nvme/host/core.c | 12 ++++++++---- include/linux/blkdev.h | 42 +++++++++++++++++++++++----------------- 8 files changed, 74 insertions(+), 30 deletions(-)
diff --git a/block/blk-core.c b/block/blk-core.c index a84128c11aa4..2956df3aca5b 100644 --- a/block/blk-core.c +++ b/block/blk-core.c @@ -81,6 +81,7 @@ __setup("precise_iostat=", precise_iostat_setup); * For queue allocation */ struct kmem_cache *blk_requestq_cachep; +struct kmem_cache *queue_atomic_write_cachep;
/* * Controlling structure to kblockd @@ -532,6 +533,7 @@ static void blk_timeout_work(struct work_struct *work) struct request_queue *blk_alloc_queue(int node_id) { struct request_queue *q; + struct queue_atomic_write_limits *aw_limits; int ret;
q = kmem_cache_alloc_node(blk_requestq_cachep, @@ -539,11 +541,18 @@ struct request_queue *blk_alloc_queue(int node_id) if (!q) return NULL;
+ aw_limits = kmem_cache_alloc_node(queue_atomic_write_cachep, + GFP_KERNEL | __GFP_ZERO, node_id); + if (!aw_limits) + goto fail_q; + + q->limits.aw_limits = aw_limits; + q->last_merge = NULL;
q->id = ida_simple_get(&blk_queue_ida, 0, 0, GFP_KERNEL); if (q->id < 0) - goto fail_q; + goto fail_aw;
ret = bioset_init(&q->bio_split, BIO_POOL_SIZE, 0, BIOSET_NEED_BVECS); if (ret) @@ -594,6 +603,7 @@ struct request_queue *blk_alloc_queue(int node_id)
blk_queue_dma_alignment(q, 511); blk_set_default_limits(&q->limits); + blk_set_default_atomic_write_limits(&q->limits); q->nr_requests = BLKDEV_MAX_RQ;
return q; @@ -608,6 +618,8 @@ struct request_queue *blk_alloc_queue(int node_id) bioset_exit(&q->bio_split); fail_id: ida_simple_remove(&blk_queue_ida, q->id); +fail_aw: + kmem_cache_free(queue_atomic_write_cachep, aw_limits); fail_q: kmem_cache_free(blk_requestq_cachep, q); return NULL; @@ -1923,6 +1935,8 @@ int __init blk_dev_init(void)
blk_requestq_cachep = kmem_cache_create("request_queue", sizeof(struct request_queue), 0, SLAB_PANIC, NULL); + queue_atomic_write_cachep = kmem_cache_create("queue_atomic_write", + sizeof(struct queue_atomic_write_limits), 0, SLAB_PANIC, NULL);
blk_debugfs_root = debugfs_create_dir("block", NULL);
diff --git a/block/blk-merge.c b/block/blk-merge.c index 7ca680a6c037..3b2004308e93 100644 --- a/block/blk-merge.c +++ b/block/blk-merge.c @@ -195,7 +195,7 @@ static inline unsigned get_max_io_size(struct request_queue *q, * it may less than the bio size, which we cannot tolerate. */ if (bio->bi_opf & REQ_ATOMIC) - max_sectors = q->limits.atomic_write_max_sectors; + max_sectors = q->limits.aw_limits->atomic_write_max_sectors; else max_sectors = sectors;
diff --git a/block/blk-settings.c b/block/blk-settings.c index 0c1326e8c5d6..4961d4ab3bfa 100644 --- a/block/blk-settings.c +++ b/block/blk-settings.c @@ -63,6 +63,20 @@ void blk_set_default_limits(struct queue_limits *lim) } EXPORT_SYMBOL(blk_set_default_limits);
+void blk_set_default_atomic_write_limits(struct queue_limits *lim) +{ + if (lim->aw_limits) { + lim->aw_limits->atomic_write_hw_max = 0; + lim->aw_limits->atomic_write_max_sectors = 0; + lim->aw_limits->atomic_write_hw_boundary = 0; + lim->aw_limits->atomic_write_hw_unit_min = 0; + lim->aw_limits->atomic_write_unit_min = 0; + lim->aw_limits->atomic_write_hw_unit_max = 0; + lim->aw_limits->atomic_write_unit_max = 0; + } +} +EXPORT_SYMBOL(blk_set_default_atomic_write_limits); + /** * blk_set_stacking_limits - set default limits for stacking devices * @lim: the queue_limits structure to reset @@ -153,13 +167,16 @@ static void blk_atomic_writes_update_limits(struct queue_limits *limits)
unit_limit = rounddown_pow_of_two(unit_limit);
- limits->atomic_write_max_sectors = - min(limits->atomic_write_hw_max >> SECTOR_SHIFT, + if (!limits->aw_limits) + return; + + limits->aw_limits->atomic_write_max_sectors = + min(limits->aw_limits->atomic_write_hw_max >> SECTOR_SHIFT, limits->max_hw_sectors); - limits->atomic_write_unit_min = - min(limits->atomic_write_hw_unit_min, unit_limit); - limits->atomic_write_unit_max = - min(limits->atomic_write_hw_unit_max, unit_limit); + limits->aw_limits->atomic_write_unit_min = + min(limits->aw_limits->atomic_write_hw_unit_min, unit_limit); + limits->aw_limits->atomic_write_unit_max = + min(limits->aw_limits->atomic_write_hw_unit_max, unit_limit); }
/** diff --git a/block/blk-sysfs.c b/block/blk-sysfs.c index 6916f1ed4590..8e35d63b30c2 100644 --- a/block/blk-sysfs.c +++ b/block/blk-sysfs.c @@ -764,6 +764,7 @@ static void blk_free_queue_rcu(struct rcu_head *rcu_head) rcu_head);
percpu_ref_exit(&q->q_usage_counter); + kmem_cache_free(queue_atomic_write_cachep, q->limits.aw_limits); kmem_cache_free(blk_requestq_cachep, q); }
diff --git a/block/blk.h b/block/blk.h index 4bbcc971d4f7..1bdd07040d42 100644 --- a/block/blk.h +++ b/block/blk.h @@ -29,6 +29,7 @@ struct blk_flush_queue { };
extern struct kmem_cache *blk_requestq_cachep; +extern struct kmem_cache *queue_atomic_write_cachep; extern struct kobj_type blk_queue_ktype; extern struct ida blk_queue_ida;
diff --git a/drivers/md/dm-table.c b/drivers/md/dm-table.c index eb95b5ce7b8f..d8f54d2a74d9 100644 --- a/drivers/md/dm-table.c +++ b/drivers/md/dm-table.c @@ -1816,6 +1816,7 @@ void dm_table_set_restrictions(struct dm_table *t, struct request_queue *q, /* * Copy table's limits to the DM device's request_queue */ + limits->aw_limits = q->limits.aw_limits; q->limits = *limits;
if (dm_table_supports_nowait(t)) diff --git a/drivers/nvme/host/core.c b/drivers/nvme/host/core.c index 4fa0c55787d2..76626f5a737f 100644 --- a/drivers/nvme/host/core.c +++ b/drivers/nvme/host/core.c @@ -2052,10 +2052,14 @@ static void nvme_update_atomic_write_disk_info(struct nvme_ns *ns, if (le16_to_cpu(id->nabspf)) boundary = (le16_to_cpu(id->nabspf) + 1) * bs; } - lim->atomic_write_hw_max = atomic_bs; - lim->atomic_write_hw_boundary = boundary; - lim->atomic_write_hw_unit_min = bs; - lim->atomic_write_hw_unit_max = rounddown_pow_of_two(atomic_bs); + + if (!lim->aw_limits) + return; + + lim->aw_limits->atomic_write_hw_max = atomic_bs; + lim->aw_limits->atomic_write_hw_boundary = boundary; + lim->aw_limits->atomic_write_hw_unit_min = bs; + lim->aw_limits->atomic_write_hw_unit_max = rounddown_pow_of_two(atomic_bs); }
static void nvme_set_queue_limits(struct nvme_ctrl *ctrl, diff --git a/include/linux/blkdev.h b/include/linux/blkdev.h index 8d4ba7b501ec..3f33b94cc37c 100644 --- a/include/linux/blkdev.h +++ b/include/linux/blkdev.h @@ -323,6 +323,17 @@ enum blk_zoned_model { BLK_ZONED_HM, /* Host-managed zoned block device */ };
+struct queue_atomic_write_limits { + /* atomic write limits */ + unsigned int atomic_write_hw_max; + unsigned int atomic_write_max_sectors; + unsigned int atomic_write_hw_boundary; + unsigned int atomic_write_hw_unit_min; + unsigned int atomic_write_unit_min; + unsigned int atomic_write_hw_unit_max; + unsigned int atomic_write_unit_max; +}; + struct queue_limits { unsigned long bounce_pfn; unsigned long seg_boundary_mask; @@ -346,16 +357,6 @@ struct queue_limits { unsigned int discard_granularity; unsigned int discard_alignment;
- /* atomic write limits */ - unsigned int atomic_write_hw_max; - unsigned int atomic_write_max_sectors; - unsigned int atomic_write_hw_boundary; - unsigned int atomic_write_hw_unit_min; - unsigned int atomic_write_unit_min; - unsigned int atomic_write_hw_unit_max; - unsigned int atomic_write_unit_max; - - unsigned short max_segments; unsigned short max_integrity_segments; unsigned short max_discard_segments; @@ -365,7 +366,11 @@ struct queue_limits { unsigned char raid_partial_stripes_expensive; enum blk_zoned_model zoned;
+#ifndef __GENKSYMS__ + struct queue_atomic_write_limits *aw_limits; +#else KABI_RESERVE(1) +#endif };
typedef int (*report_zones_cb)(struct blk_zone *zone, unsigned int idx, @@ -1115,7 +1120,7 @@ static inline unsigned int blk_queue_get_max_sectors_wrapper(struct request *rq) int op = req_op(rq);
if (rq->cmd_flags & REQ_ATOMIC) - return q->limits.atomic_write_max_sectors; + return q->limits.aw_limits->atomic_write_max_sectors;
return blk_queue_get_max_sectors(q, op); } @@ -1214,6 +1219,7 @@ extern void blk_limits_io_opt(struct queue_limits *limits, unsigned int opt); extern void blk_queue_io_opt(struct request_queue *q, unsigned int opt); extern void blk_set_queue_depth(struct request_queue *q, unsigned int depth); extern void blk_set_default_limits(struct queue_limits *lim); +extern void blk_set_default_atomic_write_limits(struct queue_limits *lim); extern void blk_set_stacking_limits(struct queue_limits *lim); extern int blk_stack_limits(struct queue_limits *t, struct queue_limits *b, sector_t offset); @@ -1672,25 +1678,25 @@ static inline unsigned int bdev_max_active_zones(struct block_device *bdev) static inline unsigned int queue_atomic_write_unit_max_bytes(const struct request_queue *q) { - return q->limits.atomic_write_unit_max; + return q->limits.aw_limits->atomic_write_unit_max; }
static inline unsigned int queue_atomic_write_unit_min_bytes(const struct request_queue *q) { - return q->limits.atomic_write_unit_min; + return q->limits.aw_limits->atomic_write_unit_min; }
static inline unsigned int queue_atomic_write_boundary_bytes(const struct request_queue *q) { - return q->limits.atomic_write_hw_boundary; + return q->limits.aw_limits->atomic_write_hw_boundary; }
static inline unsigned int queue_atomic_write_max_bytes(const struct request_queue *q) { - return q->limits.atomic_write_max_sectors << SECTOR_SHIFT; + return q->limits.aw_limits->atomic_write_max_sectors << SECTOR_SHIFT; }
static inline int queue_dma_alignment(const struct request_queue *q) @@ -2151,14 +2157,14 @@ static inline bool bdev_can_atomic_write(struct block_device *bdev) struct request_queue *bd_queue = bdev_get_queue(bdev); struct queue_limits *limits = &bd_queue->limits;
- if (!limits->atomic_write_unit_min) + if (!limits->aw_limits->atomic_write_unit_min) return false;
if (bdev_is_partition(bdev)) { sector_t bd_start_sect = bdev->bd_part->start_sect; unsigned int alignment = - max(limits->atomic_write_unit_min, - limits->atomic_write_hw_boundary); + max(limits->aw_limits->atomic_write_unit_min, + limits->aw_limits->atomic_write_hw_boundary); if (!IS_ALIGNED(bd_start_sect, alignment)) return false; }