hulk inclusion category: feature bugzilla: https://gitee.com/openeuler/kernel/issues/I9VTE3 CVE: NA
--------------------------------
Fix kabi breakage in struct queue_limits.
Signed-off-by: Long Li leo.lilong@huawei.com --- block/blk-core.c | 16 +++++++++++++++- block/blk-merge.c | 2 +- block/blk-settings.c | 29 +++++++++++++++++++++++------ block/blk-sysfs.c | 1 + block/blk.h | 1 + drivers/md/dm-table.c | 5 +++-- drivers/md/dm.c | 4 ++-- drivers/nvme/host/core.c | 12 ++++++++---- include/linux/blkdev.h | 40 +++++++++++++++++++++------------------- 9 files changed, 75 insertions(+), 35 deletions(-)
diff --git a/block/blk-core.c b/block/blk-core.c index fe63998bfde3..e3e2659d0673 100644 --- a/block/blk-core.c +++ b/block/blk-core.c @@ -81,6 +81,7 @@ __setup("precise_iostat=", precise_iostat_setup); * For queue allocation */ struct kmem_cache *blk_requestq_cachep; +struct kmem_cache *queue_atomic_write_cachep;
/* * Controlling structure to kblockd @@ -760,6 +761,7 @@ static void blk_timeout_work(struct work_struct *work) struct request_queue *blk_alloc_queue(int node_id) { struct request_queue *q; + struct queue_atomic_write_limits *aw_limits; int ret;
q = kmem_cache_alloc_node(blk_requestq_cachep, @@ -767,10 +769,17 @@ struct request_queue *blk_alloc_queue(int node_id) if (!q) return NULL;
+ aw_limits = kmem_cache_alloc_node(queue_atomic_write_cachep, + GFP_KERNEL | __GFP_ZERO, node_id); + if (!aw_limits) + goto fail_q; + + q->limits.aw_limits = aw_limits; + q->last_merge = NULL;
if (blk_alloc_queue_dispatch_async(q)) - goto fail_q; + goto fail_aw;
q->id = ida_simple_get(&blk_queue_ida, 0, 0, GFP_KERNEL); if (q->id < 0) @@ -825,6 +834,7 @@ struct request_queue *blk_alloc_queue(int node_id)
blk_queue_dma_alignment(q, 511); blk_set_default_limits(&q->limits); + blk_set_default_atomic_write_limits(&q->limits); q->nr_requests = BLKDEV_MAX_RQ;
return q; @@ -841,6 +851,8 @@ struct request_queue *blk_alloc_queue(int node_id) ida_simple_remove(&blk_queue_ida, q->id); fail_dispatch_async: blk_free_queue_dispatch_async(q); +fail_aw: + kmem_cache_free(queue_atomic_write_cachep, aw_limits); fail_q: kmem_cache_free(blk_requestq_cachep, q); return NULL; @@ -2159,6 +2171,8 @@ int __init blk_dev_init(void)
blk_requestq_cachep = kmem_cache_create("request_queue", sizeof(struct request_queue), 0, SLAB_PANIC, NULL); + queue_atomic_write_cachep = kmem_cache_create("queue_atomic_write", + sizeof(struct queue_atomic_write_limits), 0, SLAB_PANIC, NULL);
blk_debugfs_root = debugfs_create_dir("block", NULL);
diff --git a/block/blk-merge.c b/block/blk-merge.c index 7ca680a6c037..3b2004308e93 100644 --- a/block/blk-merge.c +++ b/block/blk-merge.c @@ -195,7 +195,7 @@ static inline unsigned get_max_io_size(struct request_queue *q, * it may less than the bio size, which we cannot tolerate. */ if (bio->bi_opf & REQ_ATOMIC) - max_sectors = q->limits.atomic_write_max_sectors; + max_sectors = q->limits.aw_limits->atomic_write_max_sectors; else max_sectors = sectors;
diff --git a/block/blk-settings.c b/block/blk-settings.c index de9cba9eb948..d1a1f963c3eb 100644 --- a/block/blk-settings.c +++ b/block/blk-settings.c @@ -63,6 +63,20 @@ void blk_set_default_limits(struct queue_limits *lim) } EXPORT_SYMBOL(blk_set_default_limits);
+void blk_set_default_atomic_write_limits(struct queue_limits *lim) +{ + if (lim->aw_limits) { + lim->aw_limits->atomic_write_hw_max = 0; + lim->aw_limits->atomic_write_max_sectors = 0; + lim->aw_limits->atomic_write_hw_boundary = 0; + lim->aw_limits->atomic_write_hw_unit_min = 0; + lim->aw_limits->atomic_write_unit_min = 0; + lim->aw_limits->atomic_write_hw_unit_max = 0; + lim->aw_limits->atomic_write_unit_max = 0; + } +} +EXPORT_SYMBOL(blk_set_default_atomic_write_limits); + /** * blk_set_stacking_limits - set default limits for stacking devices * @lim: the queue_limits structure to reset @@ -153,13 +167,16 @@ void blk_atomic_writes_update_limits(struct queue_limits *limits)
unit_limit = rounddown_pow_of_two(unit_limit);
- limits->atomic_write_max_sectors = - min(limits->atomic_write_hw_max >> SECTOR_SHIFT, + if (!limits->aw_limits) + return; + + limits->aw_limits->atomic_write_max_sectors = + min(limits->aw_limits->atomic_write_hw_max >> SECTOR_SHIFT, limits->max_hw_sectors); - limits->atomic_write_unit_min = - min(limits->atomic_write_hw_unit_min, unit_limit); - limits->atomic_write_unit_max = - min(limits->atomic_write_hw_unit_max, unit_limit); + limits->aw_limits->atomic_write_unit_min = + min(limits->aw_limits->atomic_write_hw_unit_min, unit_limit); + limits->aw_limits->atomic_write_unit_max = + min(limits->aw_limits->atomic_write_hw_unit_max, unit_limit); }
EXPORT_SYMBOL(blk_atomic_writes_update_limits); diff --git a/block/blk-sysfs.c b/block/blk-sysfs.c index 14a527212241..078aace75204 100644 --- a/block/blk-sysfs.c +++ b/block/blk-sysfs.c @@ -822,6 +822,7 @@ static void blk_free_queue_rcu(struct rcu_head *rcu_head) rcu_head);
percpu_ref_exit(&q->q_usage_counter); + kmem_cache_free(queue_atomic_write_cachep, q->limits.aw_limits); kmem_cache_free(blk_requestq_cachep, q); }
diff --git a/block/blk.h b/block/blk.h index 5e7c00356ddc..c86d27d80ba0 100644 --- a/block/blk.h +++ b/block/blk.h @@ -29,6 +29,7 @@ struct blk_flush_queue { };
extern struct kmem_cache *blk_requestq_cachep; +extern struct kmem_cache *queue_atomic_write_cachep; extern struct kobj_type blk_queue_ktype; extern struct ida blk_queue_ida;
diff --git a/drivers/md/dm-table.c b/drivers/md/dm-table.c index eb95b5ce7b8f..d407fe88daea 100644 --- a/drivers/md/dm-table.c +++ b/drivers/md/dm-table.c @@ -624,7 +624,7 @@ static int validate_hardware_logical_block_alignment(struct dm_table *table, unsigned short remaining = 0;
struct dm_target *ti; - struct queue_limits ti_limits; + struct queue_limits ti_limits = {0}; unsigned i;
/* @@ -1482,7 +1482,7 @@ int dm_calculate_queue_limits(struct dm_table *table, struct queue_limits *limits) { struct dm_target *ti; - struct queue_limits ti_limits; + struct queue_limits ti_limits = {0}; unsigned i; enum blk_zoned_model zoned_model = BLK_ZONED_NONE; unsigned int zone_sectors = 0; @@ -1816,6 +1816,7 @@ void dm_table_set_restrictions(struct dm_table *t, struct request_queue *q, /* * Copy table's limits to the DM device's request_queue */ + limits->aw_limits = q->limits.aw_limits; q->limits = *limits;
if (dm_table_supports_nowait(t)) diff --git a/drivers/md/dm.c b/drivers/md/dm.c index e90b3e96fafc..9048cfc0d000 100644 --- a/drivers/md/dm.c +++ b/drivers/md/dm.c @@ -2129,7 +2129,7 @@ EXPORT_SYMBOL_GPL(dm_get_queue_limits); int dm_setup_md_queue(struct mapped_device *md, struct dm_table *t) { int r; - struct queue_limits limits; + struct queue_limits limits = {0}; enum dm_queue_mode type = dm_get_md_type(md);
switch (type) { @@ -2382,7 +2382,7 @@ static void dm_queue_flush(struct mapped_device *md) struct dm_table *dm_swap_table(struct mapped_device *md, struct dm_table *table) { struct dm_table *live_map = NULL, *map = ERR_PTR(-EINVAL); - struct queue_limits limits; + struct queue_limits limits = {0}; int r;
mutex_lock(&md->suspend_lock); diff --git a/drivers/nvme/host/core.c b/drivers/nvme/host/core.c index c0e78d8d9ba1..d52ea24deb45 100644 --- a/drivers/nvme/host/core.c +++ b/drivers/nvme/host/core.c @@ -2052,10 +2052,14 @@ static void nvme_update_atomic_write_disk_info(struct nvme_ns *ns, if (le16_to_cpu(id->nabspf)) boundary = (le16_to_cpu(id->nabspf) + 1) * bs; } - lim->atomic_write_hw_max = atomic_bs; - lim->atomic_write_hw_boundary = boundary; - lim->atomic_write_hw_unit_min = bs; - lim->atomic_write_hw_unit_max = rounddown_pow_of_two(atomic_bs); + + if (!lim->aw_limits) + return; + + lim->aw_limits->atomic_write_hw_max = atomic_bs; + lim->aw_limits->atomic_write_hw_boundary = boundary; + lim->aw_limits->atomic_write_hw_unit_min = bs; + lim->aw_limits->atomic_write_hw_unit_max = rounddown_pow_of_two(atomic_bs);
blk_atomic_writes_update_limits(lim); } diff --git a/include/linux/blkdev.h b/include/linux/blkdev.h index a24e5ebbb3dd..f27a0916a75e 100644 --- a/include/linux/blkdev.h +++ b/include/linux/blkdev.h @@ -323,6 +323,17 @@ enum blk_zoned_model { BLK_ZONED_HM, /* Host-managed zoned block device */ };
+struct queue_atomic_write_limits { + /* atomic write limits */ + unsigned int atomic_write_hw_max; + unsigned int atomic_write_max_sectors; + unsigned int atomic_write_hw_boundary; + unsigned int atomic_write_hw_unit_min; + unsigned int atomic_write_unit_min; + unsigned int atomic_write_hw_unit_max; + unsigned int atomic_write_unit_max; +}; + struct queue_limits { unsigned long bounce_pfn; unsigned long seg_boundary_mask; @@ -346,16 +357,6 @@ struct queue_limits { unsigned int discard_granularity; unsigned int discard_alignment;
- /* atomic write limits */ - unsigned int atomic_write_hw_max; - unsigned int atomic_write_max_sectors; - unsigned int atomic_write_hw_boundary; - unsigned int atomic_write_hw_unit_min; - unsigned int atomic_write_unit_min; - unsigned int atomic_write_hw_unit_max; - unsigned int atomic_write_unit_max; - - unsigned short max_segments; unsigned short max_integrity_segments; unsigned short max_discard_segments; @@ -365,7 +366,7 @@ struct queue_limits { unsigned char raid_partial_stripes_expensive; enum blk_zoned_model zoned;
- KABI_RESERVE(1) + KABI_USE(1, struct queue_atomic_write_limits *aw_limits) };
typedef int (*report_zones_cb)(struct blk_zone *zone, unsigned int idx, @@ -1124,7 +1125,7 @@ static inline unsigned int blk_queue_get_max_sectors_wrapper(struct request *rq) int op = req_op(rq);
if (rq->cmd_flags & REQ_ATOMIC) - return q->limits.atomic_write_max_sectors; + return q->limits.aw_limits->atomic_write_max_sectors;
return blk_queue_get_max_sectors(q, op); } @@ -1224,6 +1225,7 @@ extern void blk_limits_io_opt(struct queue_limits *limits, unsigned int opt); extern void blk_queue_io_opt(struct request_queue *q, unsigned int opt); extern void blk_set_queue_depth(struct request_queue *q, unsigned int depth); extern void blk_set_default_limits(struct queue_limits *lim); +extern void blk_set_default_atomic_write_limits(struct queue_limits *lim); extern void blk_set_stacking_limits(struct queue_limits *lim); extern int blk_stack_limits(struct queue_limits *t, struct queue_limits *b, sector_t offset); @@ -1682,25 +1684,25 @@ static inline unsigned int bdev_max_active_zones(struct block_device *bdev) static inline unsigned int queue_atomic_write_unit_max_bytes(const struct request_queue *q) { - return q->limits.atomic_write_unit_max; + return q->limits.aw_limits->atomic_write_unit_max; }
static inline unsigned int queue_atomic_write_unit_min_bytes(const struct request_queue *q) { - return q->limits.atomic_write_unit_min; + return q->limits.aw_limits->atomic_write_unit_min; }
static inline unsigned int queue_atomic_write_boundary_bytes(const struct request_queue *q) { - return q->limits.atomic_write_hw_boundary; + return q->limits.aw_limits->atomic_write_hw_boundary; }
static inline unsigned int queue_atomic_write_max_bytes(const struct request_queue *q) { - return q->limits.atomic_write_max_sectors << SECTOR_SHIFT; + return q->limits.aw_limits->atomic_write_max_sectors << SECTOR_SHIFT; }
static inline int queue_dma_alignment(const struct request_queue *q) @@ -2161,14 +2163,14 @@ static inline bool bdev_can_atomic_write(struct block_device *bdev) struct request_queue *bd_queue = bdev_get_queue(bdev); struct queue_limits *limits = &bd_queue->limits;
- if (!limits->atomic_write_unit_min) + if (!limits->aw_limits->atomic_write_unit_min) return false;
if (bdev_is_partition(bdev)) { sector_t bd_start_sect = bdev->bd_part->start_sect; unsigned int alignment = - max(limits->atomic_write_unit_min, - limits->atomic_write_hw_boundary); + max(limits->aw_limits->atomic_write_unit_min, + limits->aw_limits->atomic_write_hw_boundary); if (!IS_ALIGNED(bd_start_sect, alignment)) return false; }