Fix CVE-2024-53093.
Christoph Hellwig (2): nvme: factor out a nvme_tryget_ns_head helper nvme: replace the GENHD_FL_UP check in nvme_mpath_shutdown_disk
Daniel Wagner (1): nvme: avoid race in shutdown namespace removal
Hannes Reinecke (2): nvme: fix refcounting imbalance when all paths are down nvme-multipath: avoid hang on inaccessible namespaces
Keith Busch (1): nvme-multipath: defer partition scanning
Zheng Qixing (1): nvme: multipath: fix kabi broken by adding struct nvme_ns_head_wrapper
drivers/nvme/host/core.c | 36 ++++++++++++++------ drivers/nvme/host/multipath.c | 64 +++++++++++++++++++++++++++++++++-- drivers/nvme/host/nvme.h | 18 +++++----- fs/block_dev.c | 1 + 4 files changed, 96 insertions(+), 23 deletions(-)
From: Hannes Reinecke hare@suse.de
mainline inclusion from mainline-v5.14-rc3 commit 5396fdac56d87d04e75e5068c0c92d33625f51e7 category: bugfix bugzilla: https://gitee.com/src-openeuler/kernel/issues/IB67YI CVE: CVE-2024-53093
Reference: https://git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git/commit/?i...
--------------------------------
When the last path to a ns_head drops the current code removes the ns_head from the subsystem list, but will only delete the disk itself if the last reference to the ns_head drops. This is causing an refcounting imbalance eg when applications have a reference to the disk, as then they'll never get notified that the disk is in fact dead. This patch moves the call 'del_gendisk' into nvme_mpath_check_last_path(), ensuring that the disk can be properly removed and applications get the appropriate notifications.
Signed-off-by: Hannes Reinecke hare@suse.de Reviewed-by: Keith Busch kbusch@kernel.org Signed-off-by: Christoph Hellwig hch@lst.de
Conflicts: drivers/nvme/host/multipath.c [Context inconsistency.] drivers/nvme/host/nvme.h [Context inconsistency.] Signed-off-by: Zheng Qixing zhengqixing@huawei.com --- drivers/nvme/host/core.c | 14 +++++++++++--- drivers/nvme/host/multipath.c | 9 ++++++++- drivers/nvme/host/nvme.h | 11 ++--------- 3 files changed, 21 insertions(+), 13 deletions(-)
diff --git a/drivers/nvme/host/core.c b/drivers/nvme/host/core.c index 7d1bfac3f998..cab35f0b5949 100644 --- a/drivers/nvme/host/core.c +++ b/drivers/nvme/host/core.c @@ -4040,6 +4040,8 @@ static void nvme_alloc_ns(struct nvme_ctrl *ctrl, unsigned nsid,
static void nvme_ns_remove(struct nvme_ns *ns) { + bool last_path = false; + if (test_and_set_bit(NVME_NS_REMOVING, &ns->flags)) return;
@@ -4048,8 +4050,6 @@ static void nvme_ns_remove(struct nvme_ns *ns)
mutex_lock(&ns->ctrl->subsys->lock); list_del_rcu(&ns->siblings); - if (list_empty(&ns->head->list)) - list_del_init(&ns->head->entry); mutex_unlock(&ns->ctrl->subsys->lock);
synchronize_rcu(); /* guarantee not available in head->list */ @@ -4067,7 +4067,15 @@ static void nvme_ns_remove(struct nvme_ns *ns) list_del_init(&ns->list); up_write(&ns->ctrl->namespaces_rwsem);
- nvme_mpath_check_last_path(ns); + /* Synchronize with nvme_init_ns_head() */ + mutex_lock(&ns->head->subsys->lock); + if (list_empty(&ns->head->list)) { + list_del_init(&ns->head->entry); + last_path = true; + } + mutex_unlock(&ns->head->subsys->lock); + if (last_path) + nvme_mpath_shutdown_disk(ns->head); nvme_put_ns(ns); }
diff --git a/drivers/nvme/host/multipath.c b/drivers/nvme/host/multipath.c index 379d6818a063..13e6d8b66790 100644 --- a/drivers/nvme/host/multipath.c +++ b/drivers/nvme/host/multipath.c @@ -709,12 +709,19 @@ void nvme_mpath_add_disk(struct nvme_ns *ns, struct nvme_id_ns *id) #endif }
-void nvme_mpath_remove_disk(struct nvme_ns_head *head) +void nvme_mpath_shutdown_disk(struct nvme_ns_head *head) { if (!head->disk) return; + kblockd_schedule_work(&head->requeue_work); if (head->disk->flags & GENHD_FL_UP) del_gendisk(head->disk); +} + +void nvme_mpath_remove_disk(struct nvme_ns_head *head) +{ + if (!head->disk) + return; blk_set_queue_dying(head->disk->queue); /* make sure all pending bios are cleaned up */ kblockd_schedule_work(&head->requeue_work); diff --git a/drivers/nvme/host/nvme.h b/drivers/nvme/host/nvme.h index 577f189702ff..5f67a866be1f 100644 --- a/drivers/nvme/host/nvme.h +++ b/drivers/nvme/host/nvme.h @@ -748,14 +748,7 @@ bool nvme_mpath_clear_current_path(struct nvme_ns *ns); void nvme_mpath_clear_ctrl_paths(struct nvme_ctrl *ctrl); struct nvme_ns *nvme_find_path(struct nvme_ns_head *head); blk_qc_t nvme_ns_head_submit_bio(struct bio *bio); - -static inline void nvme_mpath_check_last_path(struct nvme_ns *ns) -{ - struct nvme_ns_head *head = ns->head; - - if (head->disk && list_empty(&head->list)) - kblockd_schedule_work(&head->requeue_work); -} +void nvme_mpath_shutdown_disk(struct nvme_ns_head *head);
static inline void nvme_trace_bio_complete(struct request *req, blk_status_t status) @@ -810,7 +803,7 @@ static inline bool nvme_mpath_clear_current_path(struct nvme_ns *ns) static inline void nvme_mpath_clear_ctrl_paths(struct nvme_ctrl *ctrl) { } -static inline void nvme_mpath_check_last_path(struct nvme_ns *ns) +static inline void nvme_mpath_shutdown_disk(struct nvme_ns_head *head) { } static inline void nvme_trace_bio_complete(struct request *req,
From: Christoph Hellwig hch@lst.de
mainline inclusion from mainline-v5.13-rc1 commit 871ca3ef132650b9b7777c2f2fd15b72c282d792 category: bugfix bugzilla: https://gitee.com/src-openeuler/kernel/issues/IB67YI CVE: CVE-2024-53093
Reference: https://git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git/commit/?i...
--------------------------------
Add a helper to avoid opencoding ns_head->ref manipulations.
Signed-off-by: Christoph Hellwig hch@lst.de Reviewed-by: Keith Busch kbusch@kernel.org Reviewed-by: Javier González javier.gonz@samsung.com Reviewed-by: Kanchan Joshi joshi.k@samsung.com Reviewed-by: Chaitanya Kulkarni chaitanya.kulkarni@wdc.com Signed-off-by: Zheng Qixing zhengqixing@huawei.com --- drivers/nvme/host/core.c | 11 +++++++---- 1 file changed, 7 insertions(+), 4 deletions(-)
diff --git a/drivers/nvme/host/core.c b/drivers/nvme/host/core.c index cab35f0b5949..88cf8af5f1a5 100644 --- a/drivers/nvme/host/core.c +++ b/drivers/nvme/host/core.c @@ -485,6 +485,11 @@ static void nvme_free_ns_head(struct kref *ref) kfree(head); }
+static bool nvme_tryget_ns_head(struct nvme_ns_head *head) +{ + return kref_get_unless_zero(&head->ref); +} + static void nvme_put_ns_head(struct nvme_ns_head *head) { kref_put(&head->ref, nvme_free_ns_head); @@ -2382,9 +2387,7 @@ static const struct block_device_operations nvme_fops = { #ifdef CONFIG_NVME_MULTIPATH static int nvme_ns_head_open(struct block_device *bdev, fmode_t mode) { - struct nvme_ns_head *head = bdev->bd_disk->private_data; - - if (!kref_get_unless_zero(&head->ref)) + if (!nvme_tryget_ns_head(bdev->bd_disk->private_data)) return -ENXIO; return 0; } @@ -3787,7 +3790,7 @@ static struct nvme_ns_head *nvme_find_ns_head(struct nvme_subsystem *subsys, lockdep_assert_held(&subsys->lock);
list_for_each_entry(h, &subsys->nsheads, entry) { - if (h->ns_id == nsid && kref_get_unless_zero(&h->ref)) + if (h->ns_id == nsid && nvme_tryget_ns_head(h)) return h; }
From: Daniel Wagner dwagner@suse.de
mainline inclusion from mainline-v5.15-rc2 commit 9edceaf43050f5ba1dd7d0011bcf68a736a17743 category: bugfix bugzilla: https://gitee.com/src-openeuler/kernel/issues/IB67YI CVE: CVE-2024-53093
Reference: https://git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git/commit/?i...
--------------------------------
When we remove the siblings entry, we update ns->head->list, hence we can't separate the removal and test for being empty. They have to be in the same critical section to avoid a race.
To avoid breaking the refcounting imbalance again, add a list empty check to nvme_find_ns_head.
Fixes: 5396fdac56d8 ("nvme: fix refcounting imbalance when all paths are down") Signed-off-by: Daniel Wagner dwagner@suse.de Reviewed-by: Hannes Reinecke hare@suse.de Tested-by: Hannes Reinecke hare@suse.de Signed-off-by: Christoph Hellwig hch@lst.de
Conflicts: drivers/nvme/host/core.c [Context inconsistency.] Signed-off-by: Zheng Qixing zhengqixing@huawei.com --- drivers/nvme/host/core.c | 15 +++++++-------- 1 file changed, 7 insertions(+), 8 deletions(-)
diff --git a/drivers/nvme/host/core.c b/drivers/nvme/host/core.c index 88cf8af5f1a5..32f5ac997fd2 100644 --- a/drivers/nvme/host/core.c +++ b/drivers/nvme/host/core.c @@ -3790,7 +3790,9 @@ static struct nvme_ns_head *nvme_find_ns_head(struct nvme_subsystem *subsys, lockdep_assert_held(&subsys->lock);
list_for_each_entry(h, &subsys->nsheads, entry) { - if (h->ns_id == nsid && nvme_tryget_ns_head(h)) + if (h->ns_id != nsid) + continue; + if (!list_empty(&h->list) && nvme_tryget_ns_head(h)) return h; }
@@ -4053,6 +4055,10 @@ static void nvme_ns_remove(struct nvme_ns *ns)
mutex_lock(&ns->ctrl->subsys->lock); list_del_rcu(&ns->siblings); + if (list_empty(&ns->head->list)) { + list_del_init(&ns->head->entry); + last_path = true; + } mutex_unlock(&ns->ctrl->subsys->lock);
synchronize_rcu(); /* guarantee not available in head->list */ @@ -4070,13 +4076,6 @@ static void nvme_ns_remove(struct nvme_ns *ns) list_del_init(&ns->list); up_write(&ns->ctrl->namespaces_rwsem);
- /* Synchronize with nvme_init_ns_head() */ - mutex_lock(&ns->head->subsys->lock); - if (list_empty(&ns->head->list)) { - list_del_init(&ns->head->entry); - last_path = true; - } - mutex_unlock(&ns->head->subsys->lock); if (last_path) nvme_mpath_shutdown_disk(ns->head); nvme_put_ns(ns);
From: Christoph Hellwig hch@lst.de
mainline inclusion from mainline-v5.15-rc1 commit 916a470da02f909cabb65337f65438b8bc3965b2 category: bugfix bugzilla: https://gitee.com/src-openeuler/kernel/issues/IB67YI CVE: CVE-2024-53093
Reference: https://git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git/commit/?i...
--------------------------------
Use the nvme-internal NVME_NSHEAD_DISK_LIVE flag instead of abusing the block layer state.
Signed-off-by: Christoph Hellwig hch@lst.de Link: https://lore.kernel.org/r/20210809064028.1198327-5-hch@lst.de Signed-off-by: Jens Axboe axboe@kernel.dk
Conflicts: drivers/nvme/host/multipath.c [Context inconsistency.] Signed-off-by: Zheng Qixing zhengqixing@huawei.com --- drivers/nvme/host/multipath.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-)
diff --git a/drivers/nvme/host/multipath.c b/drivers/nvme/host/multipath.c index 13e6d8b66790..7c14931bcf67 100644 --- a/drivers/nvme/host/multipath.c +++ b/drivers/nvme/host/multipath.c @@ -714,7 +714,7 @@ void nvme_mpath_shutdown_disk(struct nvme_ns_head *head) if (!head->disk) return; kblockd_schedule_work(&head->requeue_work); - if (head->disk->flags & GENHD_FL_UP) + if (test_bit(NVME_NSHEAD_DISK_LIVE, &head->flags)) del_gendisk(head->disk); }
From: Hannes Reinecke hare@kernel.org
mainline inclusion from mainline-v6.12-rc1 commit 3b97f5a05cfc55e7729ff3769f63eef64e2178bb category: bugfix bugzilla: https://gitee.com/src-openeuler/kernel/issues/IB67YI CVE: CVE-2024-53093
Reference: https://git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git/commit/?i...
--------------------------------
During repetitive namespace remapping operations on the target the namespace might have changed between the time the initial scan was performed, and partition scan was invoked by device_add_disk() in nvme_mpath_set_live(). We then end up with a stuck scanning process:
[<0>] folio_wait_bit_common+0x12a/0x310 [<0>] filemap_read_folio+0x97/0xd0 [<0>] do_read_cache_folio+0x108/0x390 [<0>] read_part_sector+0x31/0xa0 [<0>] read_lba+0xc5/0x160 [<0>] efi_partition+0xd9/0x8f0 [<0>] bdev_disk_changed+0x23d/0x6d0 [<0>] blkdev_get_whole+0x78/0xc0 [<0>] bdev_open+0x2c6/0x3b0 [<0>] bdev_file_open_by_dev+0xcb/0x120 [<0>] disk_scan_partitions+0x5d/0x100 [<0>] device_add_disk+0x402/0x420 [<0>] nvme_mpath_set_live+0x4f/0x1f0 [nvme_core] [<0>] nvme_mpath_add_disk+0x107/0x120 [nvme_core] [<0>] nvme_alloc_ns+0xac6/0xe60 [nvme_core] [<0>] nvme_scan_ns+0x2dd/0x3e0 [nvme_core] [<0>] nvme_scan_work+0x1a3/0x490 [nvme_core]
This happens when we have several paths, some of which are inaccessible, and the active paths are removed first. Then nvme_find_path() will requeue I/O in the ns_head (as paths are present), but the requeue list is never triggered as all remaining paths are inactive.
This patch checks for NVME_NSHEAD_DISK_LIVE in nvme_available_path(), and requeue I/O after NVME_NSHEAD_DISK_LIVE has been cleared once the last path has been removed to properly terminate pending I/O.
Signed-off-by: Hannes Reinecke hare@kernel.org Reviewed-by: Sagi Grimberg sagi@grimberg.me Signed-off-by: Keith Busch kbusch@kernel.org
Conflicts: drivers/nvme/host/multipath.c [Context inconsistency.] Signed-off-by: Zheng Qixing zhengqixing@huawei.com --- drivers/nvme/host/multipath.c | 12 ++++++++++-- 1 file changed, 10 insertions(+), 2 deletions(-)
diff --git a/drivers/nvme/host/multipath.c b/drivers/nvme/host/multipath.c index 7c14931bcf67..def9270642fa 100644 --- a/drivers/nvme/host/multipath.c +++ b/drivers/nvme/host/multipath.c @@ -277,6 +277,9 @@ static bool nvme_available_path(struct nvme_ns_head *head) { struct nvme_ns *ns;
+ if (!test_bit(NVME_NSHEAD_DISK_LIVE, &head->flags)) + return NULL; + list_for_each_entry_rcu(ns, &head->list, siblings) { switch (ns->ctrl->state) { case NVME_CTRL_LIVE: @@ -713,9 +716,14 @@ void nvme_mpath_shutdown_disk(struct nvme_ns_head *head) { if (!head->disk) return; - kblockd_schedule_work(&head->requeue_work); - if (test_bit(NVME_NSHEAD_DISK_LIVE, &head->flags)) + if (test_and_clear_bit(NVME_NSHEAD_DISK_LIVE, &head->flags)) del_gendisk(head->disk); + /* + * requeue I/O after NVME_NSHEAD_DISK_LIVE has been cleared + * to allow multipath to fail all I/O. + */ + synchronize_srcu(&head->srcu); + kblockd_schedule_work(&head->requeue_work); }
void nvme_mpath_remove_disk(struct nvme_ns_head *head)
From: Keith Busch kbusch@kernel.org
mainline inclusion from mainline-v6.12-rc4 commit 1f021341eef41e77a633186e9be5223de2ce5d48 category: bugfix bugzilla: https://gitee.com/src-openeuler/kernel/issues/IB67YI CVE: CVE-2024-53093
Reference: https://git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git/commit/?i...
--------------------------------
We need to suppress the partition scan from occuring within the controller's scan_work context. If a path error occurs here, the IO will wait until a path becomes available or all paths are torn down, but that action also occurs within scan_work, so it would deadlock. Defer the partion scan to a different context that does not block scan_work.
Reported-by: Hannes Reinecke hare@suse.de Reviewed-by: Christoph Hellwig hch@lst.de Signed-off-by: Keith Busch kbusch@kernel.org
Conflicts: drivers/nvme/host/multipath.c [Context inconsistency.] fs/block_dev.c [Need to export bdget_part.] Signed-off-by: Zheng Qixing zhengqixing@huawei.com --- drivers/nvme/host/multipath.c | 51 +++++++++++++++++++++++++++++------ drivers/nvme/host/nvme.h | 1 + fs/block_dev.c | 1 + 3 files changed, 45 insertions(+), 8 deletions(-)
diff --git a/drivers/nvme/host/multipath.c b/drivers/nvme/host/multipath.c index def9270642fa..eee881f970f7 100644 --- a/drivers/nvme/host/multipath.c +++ b/drivers/nvme/host/multipath.c @@ -335,6 +335,26 @@ blk_qc_t nvme_ns_head_submit_bio(struct bio *bio) return ret; }
+static void nvme_partition_scan_work(struct work_struct *work) +{ + struct nvme_ns_head *head = + container_of(work, struct nvme_ns_head, partition_scan_work); + struct block_device *bdev; + + if (WARN_ON_ONCE(!test_and_clear_bit(GENHD_FL_NO_PART_SCAN, + &head->disk->state))) + return; + + bdev = bdget_part(&head->disk->part0); + if (!bdev) + return; + + mutex_lock(&bdev->bd_mutex); + bdev_disk_changed(bdev, false); + mutex_unlock(&bdev->bd_mutex); + bdput(bdev); +} + static void nvme_requeue_work(struct work_struct *work) { struct nvme_ns_head *head = @@ -367,6 +387,7 @@ int nvme_mpath_alloc_disk(struct nvme_ctrl *ctrl, struct nvme_ns_head *head) bio_list_init(&head->requeue_list); spin_lock_init(&head->requeue_lock); INIT_WORK(&head->requeue_work, nvme_requeue_work); + INIT_WORK(&head->partition_scan_work, nvme_partition_scan_work);
/* * Add a multipath node if the subsystems supports multiple controllers. @@ -396,6 +417,16 @@ int nvme_mpath_alloc_disk(struct nvme_ctrl *ctrl, struct nvme_ns_head *head) head->disk->private_data = head; head->disk->queue = q; head->disk->flags = GENHD_FL_EXT_DEVT; + + /* + * We need to suppress the partition scan from occuring within the + * controller's scan_work context. If a path error occurs here, the IO + * will wait until a path becomes available or all paths are torn down, + * but that action also occurs within scan_work, so it would deadlock. + * Defer the partion scan to a different context that does not block + * scan_work. + */ + set_bit(GENHD_FL_NO_PART_SCAN, &head->disk->state); sprintf(head->disk->disk_name, "nvme%dn%d", ctrl->subsys->instance, head->instance); return 0; @@ -413,9 +444,11 @@ static void nvme_mpath_set_live(struct nvme_ns *ns) if (!head->disk) return;
- if (!test_and_set_bit(NVME_NSHEAD_DISK_LIVE, &head->flags)) + if (!test_and_set_bit(NVME_NSHEAD_DISK_LIVE, &head->flags)) { device_add_disk(&head->subsys->dev, head->disk, nvme_ns_id_attr_groups); + kblockd_schedule_work(&head->partition_scan_work); + }
mutex_lock(&head->lock); if (nvme_path_is_optimized(ns)) { @@ -716,14 +749,15 @@ void nvme_mpath_shutdown_disk(struct nvme_ns_head *head) { if (!head->disk) return; - if (test_and_clear_bit(NVME_NSHEAD_DISK_LIVE, &head->flags)) + if (test_and_clear_bit(NVME_NSHEAD_DISK_LIVE, &head->flags)) { + /* + * requeue I/O after NVME_NSHEAD_DISK_LIVE has been cleared + * to allow multipath to fail all I/O. + */ + synchronize_srcu(&head->srcu); + kblockd_schedule_work(&head->requeue_work); del_gendisk(head->disk); - /* - * requeue I/O after NVME_NSHEAD_DISK_LIVE has been cleared - * to allow multipath to fail all I/O. - */ - synchronize_srcu(&head->srcu); - kblockd_schedule_work(&head->requeue_work); + } }
void nvme_mpath_remove_disk(struct nvme_ns_head *head) @@ -734,6 +768,7 @@ void nvme_mpath_remove_disk(struct nvme_ns_head *head) /* make sure all pending bios are cleaned up */ kblockd_schedule_work(&head->requeue_work); flush_work(&head->requeue_work); + flush_work(&head->partition_scan_work); blk_cleanup_queue(head->disk->queue); if (!test_bit(NVME_NSHEAD_DISK_LIVE, &head->flags)) { /* diff --git a/drivers/nvme/host/nvme.h b/drivers/nvme/host/nvme.h index 5f67a866be1f..960850720e32 100644 --- a/drivers/nvme/host/nvme.h +++ b/drivers/nvme/host/nvme.h @@ -430,6 +430,7 @@ struct nvme_ns_head { struct bio_list requeue_list; spinlock_t requeue_lock; struct work_struct requeue_work; + struct work_struct partition_scan_work; struct mutex lock; unsigned long flags; #define NVME_NSHEAD_DISK_LIVE 0 diff --git a/fs/block_dev.c b/fs/block_dev.c index b9104fc0a395..f2babcde16e3 100644 --- a/fs/block_dev.c +++ b/fs/block_dev.c @@ -976,6 +976,7 @@ struct block_device *bdget_part(struct hd_struct *part) { return bdget(part_devt(part)); } +EXPORT_SYMBOL_GPL(bdget_part);
long nr_blockdev_pages(void) {
hulk inclusion category: bugfix bugzilla: https://gitee.com/src-openeuler/kernel/issues/IB67YI CVE: CVE-2024-53093
--------------------
Fix kabi broken caused by commit 08d945a8dbe9 ("nvme-multipath: defer partition scanning").
Fixes: 08d945a8dbe9 ("nvme-multipath: defer partition scanning") Signed-off-by: Zheng Qixing zhengqixing@huawei.com --- drivers/nvme/host/core.c | 14 +++++++++----- drivers/nvme/host/multipath.c | 18 +++++++++++++----- drivers/nvme/host/nvme.h | 8 +++++++- 3 files changed, 29 insertions(+), 11 deletions(-)
diff --git a/drivers/nvme/host/core.c b/drivers/nvme/host/core.c index 32f5ac997fd2..dc32ffb7cf14 100644 --- a/drivers/nvme/host/core.c +++ b/drivers/nvme/host/core.c @@ -477,12 +477,14 @@ static void nvme_free_ns_head(struct kref *ref) { struct nvme_ns_head *head = container_of(ref, struct nvme_ns_head, ref); + struct nvme_ns_head_wrapper *head_wrapper = + container_of(head, struct nvme_ns_head_wrapper, head);
nvme_mpath_remove_disk(head); ida_simple_remove(&head->subsys->ns_ida, head->instance); cleanup_srcu_struct(&head->srcu); nvme_put_subsystem(head->subsys); - kfree(head); + kfree(head_wrapper); }
static bool nvme_tryget_ns_head(struct nvme_ns_head *head) @@ -3817,17 +3819,19 @@ static int nvme_subsys_check_duplicate_ids(struct nvme_subsystem *subsys, static struct nvme_ns_head *nvme_alloc_ns_head(struct nvme_ctrl *ctrl, unsigned nsid, struct nvme_ns_ids *ids) { + struct nvme_ns_head_wrapper *head_wrapper; struct nvme_ns_head *head; - size_t size = sizeof(*head); + size_t size = sizeof(*head_wrapper); int ret = -ENOMEM;
#ifdef CONFIG_NVME_MULTIPATH size += num_possible_nodes() * sizeof(struct nvme_ns *); #endif
- head = kzalloc(size, GFP_KERNEL); - if (!head) + head_wrapper = kzalloc(size, GFP_KERNEL); + if (!head_wrapper) goto out; + head = &head_wrapper->head; ret = ida_simple_get(&ctrl->subsys->ns_ida, 1, 0, GFP_KERNEL); if (ret < 0) goto out_free_head; @@ -3869,7 +3873,7 @@ static struct nvme_ns_head *nvme_alloc_ns_head(struct nvme_ctrl *ctrl, out_ida_remove: ida_simple_remove(&ctrl->subsys->ns_ida, head->instance); out_free_head: - kfree(head); + kfree(head_wrapper); out: if (ret > 0) ret = blk_status_to_errno(nvme_error_status(ret)); diff --git a/drivers/nvme/host/multipath.c b/drivers/nvme/host/multipath.c index eee881f970f7..c79b67989a22 100644 --- a/drivers/nvme/host/multipath.c +++ b/drivers/nvme/host/multipath.c @@ -337,8 +337,9 @@ blk_qc_t nvme_ns_head_submit_bio(struct bio *bio)
static void nvme_partition_scan_work(struct work_struct *work) { - struct nvme_ns_head *head = - container_of(work, struct nvme_ns_head, partition_scan_work); + struct nvme_ns_head_wrapper *head_wrapper = + container_of(work, struct nvme_ns_head_wrapper, partition_scan_work); + struct nvme_ns_head *head = &head_wrapper->head; struct block_device *bdev;
if (WARN_ON_ONCE(!test_and_clear_bit(GENHD_FL_NO_PART_SCAN, @@ -381,13 +382,15 @@ static void nvme_requeue_work(struct work_struct *work) int nvme_mpath_alloc_disk(struct nvme_ctrl *ctrl, struct nvme_ns_head *head) { struct request_queue *q; + struct nvme_ns_head_wrapper *head_wrapper = + container_of(head, struct nvme_ns_head_wrapper, head); bool vwc = false;
mutex_init(&head->lock); bio_list_init(&head->requeue_list); spin_lock_init(&head->requeue_lock); INIT_WORK(&head->requeue_work, nvme_requeue_work); - INIT_WORK(&head->partition_scan_work, nvme_partition_scan_work); + INIT_WORK(&head_wrapper->partition_scan_work, nvme_partition_scan_work);
/* * Add a multipath node if the subsystems supports multiple controllers. @@ -440,6 +443,8 @@ int nvme_mpath_alloc_disk(struct nvme_ctrl *ctrl, struct nvme_ns_head *head) static void nvme_mpath_set_live(struct nvme_ns *ns) { struct nvme_ns_head *head = ns->head; + struct nvme_ns_head_wrapper *head_wrapper = + container_of(head, struct nvme_ns_head_wrapper, head);
if (!head->disk) return; @@ -447,7 +452,7 @@ static void nvme_mpath_set_live(struct nvme_ns *ns) if (!test_and_set_bit(NVME_NSHEAD_DISK_LIVE, &head->flags)) { device_add_disk(&head->subsys->dev, head->disk, nvme_ns_id_attr_groups); - kblockd_schedule_work(&head->partition_scan_work); + kblockd_schedule_work(&head_wrapper->partition_scan_work); }
mutex_lock(&head->lock); @@ -762,13 +767,16 @@ void nvme_mpath_shutdown_disk(struct nvme_ns_head *head)
void nvme_mpath_remove_disk(struct nvme_ns_head *head) { + struct nvme_ns_head_wrapper *head_wrapper = + container_of(head, struct nvme_ns_head_wrapper, head); + if (!head->disk) return; blk_set_queue_dying(head->disk->queue); /* make sure all pending bios are cleaned up */ kblockd_schedule_work(&head->requeue_work); flush_work(&head->requeue_work); - flush_work(&head->partition_scan_work); + flush_work(&head_wrapper->partition_scan_work); blk_cleanup_queue(head->disk->queue); if (!test_bit(NVME_NSHEAD_DISK_LIVE, &head->flags)) { /* diff --git a/drivers/nvme/host/nvme.h b/drivers/nvme/host/nvme.h index 960850720e32..0b3a46b0378a 100644 --- a/drivers/nvme/host/nvme.h +++ b/drivers/nvme/host/nvme.h @@ -430,7 +430,6 @@ struct nvme_ns_head { struct bio_list requeue_list; spinlock_t requeue_lock; struct work_struct requeue_work; - struct work_struct partition_scan_work; struct mutex lock; unsigned long flags; #define NVME_NSHEAD_DISK_LIVE 0 @@ -438,6 +437,13 @@ struct nvme_ns_head { #endif };
+struct nvme_ns_head_wrapper { +#ifdef CONFIG_NVME_MULTIPATH + struct work_struct partition_scan_work; +#endif + struct nvme_ns_head head; +}; + enum nvme_ns_features { NVME_NS_EXT_LBAS = 1 << 0, /* support extended LBA format */ NVME_NS_METADATA_SUPPORTED = 1 << 1, /* support getting generated md */
反馈: 您发送到kernel@openeuler.org的补丁/补丁集,已成功转换为PR! PR链接地址: https://gitee.com/openeuler/kernel/pulls/14237 邮件列表地址:https://mailweb.openeuler.org/hyperkitty/list/kernel@openeuler.org/message/X...
FeedBack: The patch(es) which you have sent to kernel@openeuler.org mailing list has been converted to a pull request successfully! Pull request link: https://gitee.com/openeuler/kernel/pulls/14237 Mailing list address: https://mailweb.openeuler.org/hyperkitty/list/kernel@openeuler.org/message/X...