v1->v2: Apply patch to remove nvme_fc_wq.
Daniel Wagner (1): nvme-fc: do not wait in vain when unloading module
James Smart (1): nvme-fc: remove err_work work item
drivers/nvme/host/fc.c | 87 ++++++++---------------------------------- 1 file changed, 16 insertions(+), 71 deletions(-)
From: James Smart james.smart@broadcom.com
mainline inclusion from mainline-v5.8-rc1 commit eb4ee8f125157926cf36a3c275b04825f1bf8cfa category: bugfix bugzilla: https://gitee.com/src-openeuler/kernel/issues/I9HJSI CVE: CVE-2024-26846
Reference: https://git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git/commit/?i...
--------------------------------
err_work was created to handle errors (mainly I/O timeouts) while in CONNECTING state. The flag for err_work_active is also unneeded.
Remove err_work_active and err_work. The actions to abort I/Os are moved inline to nvme_error_recovery().
Signed-off-by: James Smart james.smart@broadcom.com Signed-off-by: Christoph Hellwig hch@lst.de
Conflict: drivers/nvme/host/fc.c Commit eb4ee8f12515 remove assoc_active from nvme_fc_ctrl and change the type of flags. Signed-off-by: Li Lingfeng lilingfeng3@huawei.com --- drivers/nvme/host/fc.c | 40 ++++++++++------------------------------ 1 file changed, 10 insertions(+), 30 deletions(-)
diff --git a/drivers/nvme/host/fc.c b/drivers/nvme/host/fc.c index 8cc714c4c35c..2578726181a0 100644 --- a/drivers/nvme/host/fc.c +++ b/drivers/nvme/host/fc.c @@ -144,7 +144,6 @@ struct nvme_fc_ctrl {
bool ioq_live; bool assoc_active; - atomic_t err_work_active; u64 association_id;
struct list_head ctrl_list; /* rport->ctrl_list */ @@ -153,7 +152,6 @@ struct nvme_fc_ctrl { struct blk_mq_tag_set tag_set;
struct delayed_work connect_work; - struct work_struct err_work;
struct kref ref; u32 flags; @@ -2051,11 +2049,11 @@ nvme_fc_nvme_ctrl_freed(struct nvme_ctrl *nctrl) nvme_fc_ctrl_put(ctrl); }
+static void __nvme_fc_terminate_io(struct nvme_fc_ctrl *ctrl); + static void nvme_fc_error_recovery(struct nvme_fc_ctrl *ctrl, char *errmsg) { - int active; - /* * if an error (io timeout, etc) while (re)connecting, * it's an error on creating the new association. @@ -2064,11 +2062,14 @@ nvme_fc_error_recovery(struct nvme_fc_ctrl *ctrl, char *errmsg) * ios hitting this path before things are cleaned up. */ if (ctrl->ctrl.state == NVME_CTRL_CONNECTING) { - active = atomic_xchg(&ctrl->err_work_active, 1); - if (!active && !queue_work(nvme_fc_wq, &ctrl->err_work)) { - atomic_set(&ctrl->err_work_active, 0); - WARN_ON(1); - } + __nvme_fc_terminate_io(ctrl); + + /* + * Rescheduling the connection after recovering + * from the io error is left to the reconnect work + * item, which is what should have stalled waiting on + * the io that had the error that scheduled this work. + */ return; }
@@ -2848,7 +2849,6 @@ nvme_fc_delete_ctrl(struct nvme_ctrl *nctrl) { struct nvme_fc_ctrl *ctrl = to_fc_ctrl(nctrl);
- cancel_work_sync(&ctrl->err_work); cancel_delayed_work_sync(&ctrl->connect_work); /* * kill the association on the link side. this will block @@ -2951,23 +2951,6 @@ nvme_fc_reset_ctrl_work(struct work_struct *work) ctrl->cnum); }
-static void -nvme_fc_connect_err_work(struct work_struct *work) -{ - struct nvme_fc_ctrl *ctrl = - container_of(work, struct nvme_fc_ctrl, err_work); - - __nvme_fc_terminate_io(ctrl); - - atomic_set(&ctrl->err_work_active, 0); - - /* - * Rescheduling the connection after recovering - * from the io error is left to the reconnect work - * item, which is what should have stalled waiting on - * the io that had the error that scheduled this work. - */ -}
static const struct nvme_ctrl_ops nvme_fc_ctrl_ops = { .name = "fc", @@ -3084,7 +3067,6 @@ nvme_fc_init_ctrl(struct device *dev, struct nvmf_ctrl_options *opts, ctrl->cnum = idx; ctrl->ioq_live = false; ctrl->assoc_active = false; - atomic_set(&ctrl->err_work_active, 0); init_waitqueue_head(&ctrl->ioabort_wait);
get_device(ctrl->dev); @@ -3092,7 +3074,6 @@ nvme_fc_init_ctrl(struct device *dev, struct nvmf_ctrl_options *opts,
INIT_WORK(&ctrl->ctrl.reset_work, nvme_fc_reset_ctrl_work); INIT_DELAYED_WORK(&ctrl->connect_work, nvme_fc_connect_ctrl_work); - INIT_WORK(&ctrl->err_work, nvme_fc_connect_err_work); spin_lock_init(&ctrl->lock);
/* io queue count */ @@ -3180,7 +3161,6 @@ nvme_fc_init_ctrl(struct device *dev, struct nvmf_ctrl_options *opts, fail_ctrl: nvme_change_ctrl_state(&ctrl->ctrl, NVME_CTRL_DELETING); cancel_work_sync(&ctrl->ctrl.reset_work); - cancel_work_sync(&ctrl->err_work); cancel_delayed_work_sync(&ctrl->connect_work);
ctrl->ctrl.opts = NULL;
From: Daniel Wagner dwagner@suse.de
mainline inclusion from mainline-v6.8-rc3 commit 70fbfc47a392b98e5f8dba70c6efc6839205c982 category: bugfix bugzilla: https://gitee.com/src-openeuler/kernel/issues/I9HJSI CVE: CVE-2024-26846
Reference: https://git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git/commit/?i...
--------------------------------
The module exit path has race between deleting all controllers and freeing 'left over IDs'. To prevent double free a synchronization between nvme_delete_ctrl and ida_destroy has been added by the initial commit.
There is some logic around trying to prevent from hanging forever in wait_for_completion, though it does not handling all cases. E.g. blktests is able to reproduce the situation where the module unload hangs forever.
If we completely rely on the cleanup code executed from the nvme_delete_ctrl path, all IDs will be freed eventually. This makes calling ida_destroy unnecessary. We only have to ensure that all nvme_delete_ctrl code has been executed before we leave nvme_fc_exit_module. This is done by flushing the nvme_delete_wq workqueue.
While at it, remove the unused nvme_fc_wq workqueue too.
Reviewed-by: Christoph Hellwig hch@lst.de Reviewed-by: Hannes Reinecke hare@suse.de Signed-off-by: Daniel Wagner dwagner@suse.de Signed-off-by: Keith Busch kbusch@kernel.org
Conflict: drivers/nvme/host/fc.c Commit 97faec531460("nvme_fc: add 'nvme_discovery' sysfs attribute to fc transport device") define static fc_class and replace class_destroy with class_unregister to release fc_class; Commit 3dd83f4013f0("nvme-fc: replace ida_simple[get|remove] with the simler ida_[alloc|free]") replace ida_simple_remove with the simler ida_free; Signed-off-by: Li Lingfeng lilingfeng3@huawei.com --- drivers/nvme/host/fc.c | 47 ++++++------------------------------------ 1 file changed, 6 insertions(+), 41 deletions(-)
diff --git a/drivers/nvme/host/fc.c b/drivers/nvme/host/fc.c index 2578726181a0..59696249f2cf 100644 --- a/drivers/nvme/host/fc.c +++ b/drivers/nvme/host/fc.c @@ -207,11 +207,6 @@ static LIST_HEAD(nvme_fc_lport_list); static DEFINE_IDA(nvme_fc_local_port_cnt); static DEFINE_IDA(nvme_fc_ctrl_cnt);
-static struct workqueue_struct *nvme_fc_wq; - -static bool nvme_fc_waiting_to_unload; -static DECLARE_COMPLETION(nvme_fc_unload_proceed); - /* * These items are short-term. They will eventually be moved into * a generic FC class. See comments in module init. @@ -238,8 +233,6 @@ nvme_fc_free_lport(struct kref *ref) /* remove from transport list */ spin_lock_irqsave(&nvme_fc_lock, flags); list_del(&lport->port_list); - if (nvme_fc_waiting_to_unload && list_empty(&nvme_fc_lport_list)) - complete(&nvme_fc_unload_proceed); spin_unlock_irqrestore(&nvme_fc_lock, flags);
ida_simple_remove(&nvme_fc_local_port_cnt, lport->localport.port_num); @@ -3329,10 +3322,6 @@ static int __init nvme_fc_init_module(void) { int ret;
- nvme_fc_wq = alloc_workqueue("nvme_fc_wq", WQ_MEM_RECLAIM, 0); - if (!nvme_fc_wq) - return -ENOMEM; - /* * NOTE: * It is expected that in the future the kernel will combine @@ -3351,7 +3340,7 @@ static int __init nvme_fc_init_module(void) if (IS_ERR(fc_class)) { pr_err("couldn't register class fc\n"); ret = PTR_ERR(fc_class); - goto out_destroy_wq; + return ret; }
/* @@ -3375,8 +3364,6 @@ static int __init nvme_fc_init_module(void) device_destroy(fc_class, MKDEV(0, 0)); out_destroy_class: class_destroy(fc_class); -out_destroy_wq: - destroy_workqueue(nvme_fc_wq);
return ret; } @@ -3396,45 +3383,23 @@ nvme_fc_delete_controllers(struct nvme_fc_rport *rport) spin_unlock(&rport->lock); }
-static void -nvme_fc_cleanup_for_unload(void) +static void __exit nvme_fc_exit_module(void) { struct nvme_fc_lport *lport; struct nvme_fc_rport *rport; - - list_for_each_entry(lport, &nvme_fc_lport_list, port_list) { - list_for_each_entry(rport, &lport->endp_list, endp_list) { - nvme_fc_delete_controllers(rport); - } - } -} - -static void __exit nvme_fc_exit_module(void) -{ unsigned long flags; - bool need_cleanup = false;
spin_lock_irqsave(&nvme_fc_lock, flags); - nvme_fc_waiting_to_unload = true; - if (!list_empty(&nvme_fc_lport_list)) { - need_cleanup = true; - nvme_fc_cleanup_for_unload(); - } + list_for_each_entry(lport, &nvme_fc_lport_list, port_list) + list_for_each_entry(rport, &lport->endp_list, endp_list) + nvme_fc_delete_controllers(rport); spin_unlock_irqrestore(&nvme_fc_lock, flags); - if (need_cleanup) { - pr_info("%s: waiting for ctlr deletes\n", __func__); - wait_for_completion(&nvme_fc_unload_proceed); - pr_info("%s: ctrl deletes complete\n", __func__); - } + flush_workqueue(nvme_delete_wq);
nvmf_unregister_transport(&nvme_fc_transport);
- ida_destroy(&nvme_fc_local_port_cnt); - ida_destroy(&nvme_fc_ctrl_cnt); - device_destroy(fc_class, MKDEV(0, 0)); class_destroy(fc_class); - destroy_workqueue(nvme_fc_wq); }
module_init(nvme_fc_init_module);
反馈: 您发送到kernel@openeuler.org的补丁/补丁集,已成功转换为PR! PR链接地址: https://gitee.com/openeuler/kernel/pulls/6601 邮件列表地址:https://mailweb.openeuler.org/hyperkitty/list/kernel@openeuler.org/message/S...
FeedBack: The patch(es) which you have sent to kernel@openeuler.org mailing list has been converted to a pull request successfully! Pull request link: https://gitee.com/openeuler/kernel/pulls/6601 Mailing list address: https://mailweb.openeuler.org/hyperkitty/list/kernel@openeuler.org/message/S...