From: Jia He justin.he@arm.com
stable inclusion from linux-4.19.202 commit 6a6047bf892cc1ee2abe3a96e59c8fcbecbf9483
--------------------------------
[ Upstream commit 6206b7981a36476f4695d661ae139f7db36a802d ]
Liajian reported a bug_on hit on a ThunderX2 arm64 server with FastLinQ QL41000 ethernet controller: BUG: scheduling while atomic: kworker/0:4/531/0x00000200 [qed_probe:488()]hw prepare failed kernel BUG at mm/vmalloc.c:2355! Internal error: Oops - BUG: 0 [#1] SMP CPU: 0 PID: 531 Comm: kworker/0:4 Tainted: G W 5.4.0-77-generic #86-Ubuntu pstate: 00400009 (nzcv daif +PAN -UAO) Call trace: vunmap+0x4c/0x50 iounmap+0x48/0x58 qed_free_pci+0x60/0x80 [qed] qed_probe+0x35c/0x688 [qed] __qede_probe+0x88/0x5c8 [qede] qede_probe+0x60/0xe0 [qede] local_pci_probe+0x48/0xa0 work_for_cpu_fn+0x24/0x38 process_one_work+0x1d0/0x468 worker_thread+0x238/0x4e0 kthread+0xf0/0x118 ret_from_fork+0x10/0x18
In this case, qed_hw_prepare() returns error due to hw/fw error, but in theory work queue should be in process context instead of interrupt.
The root cause might be the unpaired spin_{un}lock_bh() in _qed_mcp_cmd_and_union(), which causes botton half is disabled incorrectly.
Reported-by: Lijian Zhang Lijian.Zhang@arm.com Signed-off-by: Jia He justin.he@arm.com Signed-off-by: David S. Miller davem@davemloft.net Signed-off-by: Sasha Levin sashal@kernel.org Signed-off-by: Yang Yingliang yangyingliang@huawei.com --- drivers/net/ethernet/qlogic/qed/qed_mcp.c | 23 +++++++++++++++++------ 1 file changed, 17 insertions(+), 6 deletions(-)
diff --git a/drivers/net/ethernet/qlogic/qed/qed_mcp.c b/drivers/net/ethernet/qlogic/qed/qed_mcp.c index 938ace333af10..0d62db3241bed 100644 --- a/drivers/net/ethernet/qlogic/qed/qed_mcp.c +++ b/drivers/net/ethernet/qlogic/qed/qed_mcp.c @@ -498,14 +498,18 @@ _qed_mcp_cmd_and_union(struct qed_hwfn *p_hwfn,
spin_lock_bh(&p_hwfn->mcp_info->cmd_lock);
- if (!qed_mcp_has_pending_cmd(p_hwfn)) + if (!qed_mcp_has_pending_cmd(p_hwfn)) { + spin_unlock_bh(&p_hwfn->mcp_info->cmd_lock); break; + }
rc = qed_mcp_update_pending_cmd(p_hwfn, p_ptt); - if (!rc) + if (!rc) { + spin_unlock_bh(&p_hwfn->mcp_info->cmd_lock); break; - else if (rc != -EAGAIN) + } else if (rc != -EAGAIN) { goto err; + }
spin_unlock_bh(&p_hwfn->mcp_info->cmd_lock);
@@ -522,6 +526,8 @@ _qed_mcp_cmd_and_union(struct qed_hwfn *p_hwfn, return -EAGAIN; }
+ spin_lock_bh(&p_hwfn->mcp_info->cmd_lock); + /* Send the mailbox command */ qed_mcp_reread_offsets(p_hwfn, p_ptt); seq_num = ++p_hwfn->mcp_info->drv_mb_seq; @@ -548,14 +554,18 @@ _qed_mcp_cmd_and_union(struct qed_hwfn *p_hwfn,
spin_lock_bh(&p_hwfn->mcp_info->cmd_lock);
- if (p_cmd_elem->b_is_completed) + if (p_cmd_elem->b_is_completed) { + spin_unlock_bh(&p_hwfn->mcp_info->cmd_lock); break; + }
rc = qed_mcp_update_pending_cmd(p_hwfn, p_ptt); - if (!rc) + if (!rc) { + spin_unlock_bh(&p_hwfn->mcp_info->cmd_lock); break; - else if (rc != -EAGAIN) + } else if (rc != -EAGAIN) { goto err; + }
spin_unlock_bh(&p_hwfn->mcp_info->cmd_lock); } while (++cnt < max_retries); @@ -576,6 +586,7 @@ _qed_mcp_cmd_and_union(struct qed_hwfn *p_hwfn, return -EAGAIN; }
+ spin_lock_bh(&p_hwfn->mcp_info->cmd_lock); qed_mcp_cmd_del_elem(p_hwfn, p_cmd_elem); spin_unlock_bh(&p_hwfn->mcp_info->cmd_lock);