From: JiangShui Yang yangjiangshui@h-partners.com
Longfang Liu (4): vfio/migration: added map length page alignment vfio/migration: bugfix some driver code vfio/migration: add eq and aeq interruption restore vfio/migration: bugfix cache write-back issue
Weili Qian (12): crypto: hisilicon/qm - reset device before enabling it crypto: hisilicon/qm - modify interrupt processing resource application crypto: hisilicon/qm - disable same error report before resetting crypto: hisilicon/hpre - mask cluster timeout error crypto: hisilicon/qm - obtain the mailbox configuration at one time crypto: hisilicon/qm - fix the pf2vf timeout when global reset crypto: hisilicon/qm - mask error bit before flr crypto: hisilicon/qm - check device status before sending mailbox crypto: hisilicon/qm - hardware error does not reset during binding/unbinding crypto: hisilicon/sec2: fix memory use-after-free issue vfio/migration: remove unused local variable hisi_acc_vfio_pci: obtain the mailbox configuration at one time
drivers/crypto/hisilicon/hpre/hpre_main.c | 85 ++- drivers/crypto/hisilicon/qm.c | 679 +++++++++++------- drivers/crypto/hisilicon/sec2/sec.h | 2 - drivers/crypto/hisilicon/sec2/sec_crypto.c | 49 +- drivers/crypto/hisilicon/sec2/sec_main.c | 49 +- drivers/crypto/hisilicon/zip/zip_main.c | 51 +- .../vfio/pci/hisilicon/hisi_acc_vfio_pci.c | 140 ++-- .../vfio/pci/hisilicon/hisi_acc_vfio_pci.h | 3 + include/linux/hisi_acc_qm.h | 18 +- 9 files changed, 646 insertions(+), 430 deletions(-)
driver inclusion category: feature bugzilla: https://gitee.com/openeuler/kernel/issues/I9CB6L CVE: NA
----------------------------------------------------------------------
When the device is re-enabled, data that has not been cleared may exist on the device. Therefore, reset the device to the initial state before enabling the device. If an abnormal interrupt event is reported when the device is enabled, the device will not be reset.
Signed-off-by: Weili Qian qianweili@huawei.com Signed-off-by: JiangShui Yang yangjiangshui@h-partners.com --- drivers/crypto/hisilicon/hpre/hpre_main.c | 24 +++-- drivers/crypto/hisilicon/qm.c | 115 +++++++++++++++------- drivers/crypto/hisilicon/sec2/sec_main.c | 18 ++-- drivers/crypto/hisilicon/zip/zip_main.c | 18 +++- 4 files changed, 119 insertions(+), 56 deletions(-)
diff --git a/drivers/crypto/hisilicon/hpre/hpre_main.c b/drivers/crypto/hisilicon/hpre/hpre_main.c index e970dd629234..e11d0b189896 100644 --- a/drivers/crypto/hisilicon/hpre/hpre_main.c +++ b/drivers/crypto/hisilicon/hpre/hpre_main.c @@ -357,6 +357,8 @@ static struct dfx_diff_registers hpre_diff_regs[] = { }, };
+static const struct hisi_qm_err_ini hpre_err_ini; + bool hpre_check_alg_support(struct hisi_qm *qm, u32 alg) { u32 cap_val; @@ -1161,6 +1163,7 @@ static int hpre_qm_init(struct hisi_qm *qm, struct pci_dev *pdev) qm->qp_num = pf_q_num; qm->debug.curr_qm_qp_num = pf_q_num; qm->qm_list = &hpre_devices; + qm->err_ini = &hpre_err_ini; if (pf_q_num_flag) set_bit(QM_MODULE_PARAM, &qm->misc_ctl); } @@ -1224,7 +1227,7 @@ static void hpre_show_last_regs_uninit(struct hisi_qm *qm) { struct qm_debug *debug = &qm->debug;
- if (qm->fun_type == QM_HW_VF || !debug->last_words) + if (!debug->last_words) return;
kfree(debug->last_words); @@ -1339,6 +1342,17 @@ static const struct hisi_qm_err_ini hpre_err_ini = { .err_info_init = hpre_err_info_init, };
+static void hpre_probe_uninit(struct hisi_qm *qm) +{ + if (qm->fun_type == QM_HW_VF) + return; + + hpre_cnt_regs_clear(qm); + qm->debug.curr_qm_qp_num = 0; + hpre_show_last_regs_uninit(qm); + hpre_close_sva_prefetch(qm); +} + static int hpre_pf_probe_init(struct hpre *hpre) { struct hisi_qm *qm = &hpre->qm; @@ -1350,8 +1364,6 @@ static int hpre_pf_probe_init(struct hpre *hpre)
hpre_open_sva_prefetch(qm);
- qm->err_ini = &hpre_err_ini; - qm->err_ini->err_info_init(qm); hisi_qm_dev_err_init(qm); ret = hpre_show_last_regs_init(qm); if (ret) @@ -1445,7 +1457,7 @@ static int hpre_probe(struct pci_dev *pdev, const struct pci_device_id *id) hisi_qm_stop(qm, QM_NORMAL);
err_with_err_init: - hpre_show_last_regs_uninit(qm); + hpre_probe_uninit(qm); hisi_qm_dev_err_uninit(qm);
err_with_qm_init: @@ -1469,9 +1481,7 @@ static void hpre_remove(struct pci_dev *pdev) hisi_qm_stop(qm, QM_NORMAL);
if (qm->fun_type == QM_HW_PF) { - hpre_cnt_regs_clear(qm); - qm->debug.curr_qm_qp_num = 0; - hpre_show_last_regs_uninit(qm); + hpre_probe_uninit(qm); hisi_qm_dev_err_uninit(qm); }
diff --git a/drivers/crypto/hisilicon/qm.c b/drivers/crypto/hisilicon/qm.c index d9caff12cd4d..bbc94cd6e265 100644 --- a/drivers/crypto/hisilicon/qm.c +++ b/drivers/crypto/hisilicon/qm.c @@ -451,6 +451,7 @@ static struct qm_typical_qos_table shaper_cbs_s[] = { };
static void qm_irqs_unregister(struct hisi_qm *qm); +static int qm_reset_device(struct hisi_qm *qm);
static u32 qm_get_hw_error_status(struct hisi_qm *qm) { @@ -4186,6 +4187,23 @@ static int qm_controller_reset_prepare(struct hisi_qm *qm) return 0; }
+static int qm_master_ooo_check(struct hisi_qm *qm) +{ + u32 val; + int ret; + + /* Check the ooo register of the device before resetting the device. */ + writel(ACC_MASTER_GLOBAL_CTRL_SHUTDOWN, qm->io_base + ACC_MASTER_GLOBAL_CTRL); + ret = readl_relaxed_poll_timeout(qm->io_base + ACC_MASTER_TRANS_RETURN, + val, + (val == ACC_MASTER_TRANS_RETURN_RW), + POLL_PERIOD, POLL_TIMEOUT); + if (ret) + pci_warn(qm->pdev, "Bus lock! Please reset system.\n"); + + return ret; +} + static void qm_dev_ecc_mbit_handle(struct hisi_qm *qm) { u32 nfe_enb = 0; @@ -4208,11 +4226,10 @@ static void qm_dev_ecc_mbit_handle(struct hisi_qm *qm) } }
-static int qm_soft_reset(struct hisi_qm *qm) +static int qm_soft_reset_prepare(struct hisi_qm *qm) { struct pci_dev *pdev = qm->pdev; int ret; - u32 val;
/* Ensure all doorbells and mailboxes received by QM */ ret = qm_check_req_recv(qm); @@ -4234,29 +4251,23 @@ static int qm_soft_reset(struct hisi_qm *qm) }
qm_dev_ecc_mbit_handle(qm); - - /* OOO register set and check */ - writel(ACC_MASTER_GLOBAL_CTRL_SHUTDOWN, - qm->io_base + ACC_MASTER_GLOBAL_CTRL); - - /* If bus lock, reset chip */ - ret = readl_relaxed_poll_timeout(qm->io_base + ACC_MASTER_TRANS_RETURN, - val, - (val == ACC_MASTER_TRANS_RETURN_RW), - POLL_PERIOD, POLL_TIMEOUT); - if (ret) { - pci_emerg(pdev, "Bus lock! Please reset system.\n"); + ret = qm_master_ooo_check(qm); + if (ret) return ret; - }
if (qm->err_ini->close_sva_prefetch) qm->err_ini->close_sva_prefetch(qm);
ret = qm_set_pf_mse(qm, false); - if (ret) { + if (ret) pci_err(pdev, "Fails to disable pf MSE bit.\n"); - return ret; - } + + return ret; +} + +static int qm_reset_device(struct hisi_qm *qm) +{ + struct pci_dev *pdev = qm->pdev;
/* The reset related sub-control registers are not in PCI BAR */ if (ACPI_HANDLE(&pdev->dev)) { @@ -4275,12 +4286,23 @@ static int qm_soft_reset(struct hisi_qm *qm) pci_err(pdev, "Reset step %llu failed!\n", value); return -EIO; } - } else { - pci_err(pdev, "No reset method!\n"); - return -EINVAL; + + return 0; }
- return 0; + pci_err(pdev, "No reset method!\n"); + return -EINVAL; +} + +static int qm_soft_reset(struct hisi_qm *qm) +{ + int ret; + + ret = qm_soft_reset_prepare(qm); + if (ret) + return ret; + + return qm_reset_device(qm); }
static int qm_vf_reset_done(struct hisi_qm *qm) @@ -5233,6 +5255,35 @@ static int qm_get_pci_res(struct hisi_qm *qm) return ret; }
+static int qm_clear_device(struct hisi_qm *qm) +{ + acpi_handle handle = ACPI_HANDLE(&qm->pdev->dev); + int ret; + + if (qm->fun_type == QM_HW_VF) + return 0; + + /* Device does not support reset, return */ + if (!qm->err_ini->err_info_init) + return 0; + qm->err_ini->err_info_init(qm); + + if (!handle) + return 0; + + /* No reset method, return */ + if (!acpi_has_method(handle, qm->err_info.acpi_rst)) + return 0; + + ret = qm_master_ooo_check(qm); + if (ret) { + writel(0x0, qm->io_base + ACC_MASTER_GLOBAL_CTRL); + return ret; + } + + return qm_reset_device(qm); +} + static int hisi_qm_pci_init(struct hisi_qm *qm) { struct pci_dev *pdev = qm->pdev; @@ -5262,8 +5313,14 @@ static int hisi_qm_pci_init(struct hisi_qm *qm) goto err_get_pci_res; }
+ ret = qm_clear_device(qm); + if (ret) + goto err_free_vectors; + return 0;
+err_free_vectors: + pci_free_irq_vectors(pdev); err_get_pci_res: qm_put_pci_res(qm); err_disable_pcidev: @@ -5564,7 +5621,6 @@ static int qm_prepare_for_suspend(struct hisi_qm *qm) { struct pci_dev *pdev = qm->pdev; int ret; - u32 val;
ret = qm->ops->set_msi(qm, false); if (ret) { @@ -5572,18 +5628,9 @@ static int qm_prepare_for_suspend(struct hisi_qm *qm) return ret; }
- /* shutdown OOO register */ - writel(ACC_MASTER_GLOBAL_CTRL_SHUTDOWN, - qm->io_base + ACC_MASTER_GLOBAL_CTRL); - - ret = readl_relaxed_poll_timeout(qm->io_base + ACC_MASTER_TRANS_RETURN, - val, - (val == ACC_MASTER_TRANS_RETURN_RW), - POLL_PERIOD, POLL_TIMEOUT); - if (ret) { - pci_emerg(pdev, "Bus lock! Please reset system.\n"); + ret = qm_master_ooo_check(qm); + if (ret) return ret; - }
ret = qm_set_pf_mse(qm, false); if (ret) diff --git a/drivers/crypto/hisilicon/sec2/sec_main.c b/drivers/crypto/hisilicon/sec2/sec_main.c index d7047200ee7a..85ca2050f13d 100644 --- a/drivers/crypto/hisilicon/sec2/sec_main.c +++ b/drivers/crypto/hisilicon/sec2/sec_main.c @@ -954,7 +954,7 @@ static void sec_show_last_regs_uninit(struct hisi_qm *qm) { struct qm_debug *debug = &qm->debug;
- if (qm->fun_type == QM_HW_VF || !debug->last_words) + if (!debug->last_words) return;
kfree(debug->last_words); @@ -1065,9 +1065,6 @@ static int sec_pf_probe_init(struct sec_dev *sec) struct hisi_qm *qm = &sec->qm; int ret;
- qm->err_ini = &sec_err_ini; - qm->err_ini->err_info_init(qm); - ret = sec_set_user_domain_and_cache(qm); if (ret) return ret; @@ -1122,6 +1119,7 @@ static int sec_qm_init(struct hisi_qm *qm, struct pci_dev *pdev) qm->qp_num = pf_q_num; qm->debug.curr_qm_qp_num = pf_q_num; qm->qm_list = &sec_devices; + qm->err_ini = &sec_err_ini; if (pf_q_num_flag) set_bit(QM_MODULE_PARAM, &qm->misc_ctl); } else if (qm->fun_type == QM_HW_VF && qm->ver == QM_HW_V1) { @@ -1186,6 +1184,12 @@ static int sec_probe_init(struct sec_dev *sec)
static void sec_probe_uninit(struct hisi_qm *qm) { + if (qm->fun_type == QM_HW_VF) + return; + + sec_debug_regs_clear(qm); + sec_show_last_regs_uninit(qm); + sec_close_sva_prefetch(qm); hisi_qm_dev_err_uninit(qm); }
@@ -1254,7 +1258,6 @@ static int sec_probe(struct pci_dev *pdev, const struct pci_device_id *id) sec_debugfs_exit(qm); hisi_qm_stop(qm, QM_NORMAL); err_probe_uninit: - sec_show_last_regs_uninit(qm); sec_probe_uninit(qm); err_qm_uninit: sec_qm_uninit(qm); @@ -1276,11 +1279,6 @@ static void sec_remove(struct pci_dev *pdev) sec_debugfs_exit(qm);
(void)hisi_qm_stop(qm, QM_NORMAL); - - if (qm->fun_type == QM_HW_PF) - sec_debug_regs_clear(qm); - sec_show_last_regs_uninit(qm); - sec_probe_uninit(qm);
sec_qm_uninit(qm); diff --git a/drivers/crypto/hisilicon/zip/zip_main.c b/drivers/crypto/hisilicon/zip/zip_main.c index 6239dc47d8a0..26b4c848ff12 100644 --- a/drivers/crypto/hisilicon/zip/zip_main.c +++ b/drivers/crypto/hisilicon/zip/zip_main.c @@ -986,7 +986,7 @@ static void hisi_zip_show_last_regs_uninit(struct hisi_qm *qm) { struct qm_debug *debug = &qm->debug;
- if (qm->fun_type == QM_HW_VF || !debug->last_words) + if (!debug->last_words) return;
kfree(debug->last_words); @@ -1150,8 +1150,6 @@ static int hisi_zip_pf_probe_init(struct hisi_zip *hisi_zip)
hisi_zip->ctrl = ctrl; ctrl->hisi_zip = hisi_zip; - qm->err_ini = &hisi_zip_err_ini; - qm->err_ini->err_info_init(qm);
ret = hisi_zip_set_user_domain_and_cache(qm); if (ret) @@ -1212,6 +1210,7 @@ static int hisi_zip_qm_init(struct hisi_qm *qm, struct pci_dev *pdev) qm->qp_num = pf_q_num; qm->debug.curr_qm_qp_num = pf_q_num; qm->qm_list = &zip_devices; + qm->err_ini = &hisi_zip_err_ini; if (pf_q_num_flag) set_bit(QM_MODULE_PARAM, &qm->misc_ctl); } else if (qm->fun_type == QM_HW_VF && qm->ver == QM_HW_V1) { @@ -1278,6 +1277,15 @@ static int hisi_zip_probe_init(struct hisi_zip *hisi_zip) return 0; }
+static void hisi_zip_probe_uninit(struct hisi_qm *qm) +{ + if (qm->fun_type == QM_HW_VF) + return; + + hisi_zip_show_last_regs_uninit(qm); + hisi_zip_close_sva_prefetch(qm); +} + static int hisi_zip_probe(struct pci_dev *pdev, const struct pci_device_id *id) { struct hisi_zip *hisi_zip; @@ -1342,7 +1350,7 @@ static int hisi_zip_probe(struct pci_dev *pdev, const struct pci_device_id *id) hisi_qm_stop(qm, QM_NORMAL);
err_dev_err_uninit: - hisi_zip_show_last_regs_uninit(qm); + hisi_zip_probe_uninit(qm); hisi_qm_dev_err_uninit(qm);
err_qm_uninit: @@ -1365,7 +1373,7 @@ static void hisi_zip_remove(struct pci_dev *pdev)
hisi_zip_debugfs_exit(qm); hisi_qm_stop(qm, QM_NORMAL); - hisi_zip_show_last_regs_uninit(qm); + hisi_zip_probe_uninit(qm); hisi_qm_dev_err_uninit(qm); hisi_zip_qm_uninit(qm); }
driver inclusion category: cleanup bugzilla: https://gitee.com/openeuler/kernel/issues/I9CB6L CVE: NA
----------------------------------------------------------------------
The resources required by the interrupt processing like workqueue are applied for in function hisi_qm_memory_init(). Resources are requested regardless of whether interrupts are enabled. As a result, the applied resources may not be used. To avoid waste of resources, the resource application is moved to function qm_irqs_register(). When the interrupt type is not supported, resource is not applied. In addition, Interrupt registrations and interrupt resource application in the same interface are easy to maintain.
Signed-off-by: Weili Qian qianweili@huawei.com Signed-off-by: JiangShui Yang yangjiangshui@h-partners.com --- drivers/crypto/hisilicon/hpre/hpre_main.c | 11 +- drivers/crypto/hisilicon/qm.c | 187 +++++++++++++--------- drivers/crypto/hisilicon/sec2/sec_main.c | 2 - drivers/crypto/hisilicon/zip/zip_main.c | 3 - include/linux/hisi_acc_qm.h | 2 - 5 files changed, 108 insertions(+), 97 deletions(-)
diff --git a/drivers/crypto/hisilicon/hpre/hpre_main.c b/drivers/crypto/hisilicon/hpre/hpre_main.c index e11d0b189896..6bd3d8277d47 100644 --- a/drivers/crypto/hisilicon/hpre/hpre_main.c +++ b/drivers/crypto/hisilicon/hpre/hpre_main.c @@ -1363,8 +1363,6 @@ static int hpre_pf_probe_init(struct hpre *hpre) return ret;
hpre_open_sva_prefetch(qm); - - hisi_qm_dev_err_init(qm); ret = hpre_show_last_regs_init(qm); if (ret) pci_err(qm->pdev, "Failed to init last word regs!\n"); @@ -1458,8 +1456,6 @@ static int hpre_probe(struct pci_dev *pdev, const struct pci_device_id *id)
err_with_err_init: hpre_probe_uninit(qm); - hisi_qm_dev_err_uninit(qm); - err_with_qm_init: hisi_qm_uninit(qm);
@@ -1479,12 +1475,7 @@ static void hpre_remove(struct pci_dev *pdev)
hpre_debugfs_exit(qm); hisi_qm_stop(qm, QM_NORMAL); - - if (qm->fun_type == QM_HW_PF) { - hpre_probe_uninit(qm); - hisi_qm_dev_err_uninit(qm); - } - + hpre_probe_uninit(qm); hisi_qm_uninit(qm); }
diff --git a/drivers/crypto/hisilicon/qm.c b/drivers/crypto/hisilicon/qm.c index bbc94cd6e265..939fb633e481 100644 --- a/drivers/crypto/hisilicon/qm.c +++ b/drivers/crypto/hisilicon/qm.c @@ -2790,10 +2790,8 @@ static void hisi_qp_memory_uninit(struct hisi_qm *qm, int num) for (i = num - 1; i >= 0; i--) { qdma = &qm->qp_array[i].qdma; dma_free_coherent(dev, qdma->size, qdma->va, qdma->dma); - kfree(qm->poll_data[i].qp_finish_id); }
- kfree(qm->poll_data); kfree(qm->qp_array); }
@@ -2803,18 +2801,12 @@ static int hisi_qp_memory_init(struct hisi_qm *qm, size_t dma_size, int id, struct device *dev = &qm->pdev->dev; size_t off = qm->sqe_size * sq_depth; struct hisi_qp *qp; - int ret = -ENOMEM; - - qm->poll_data[id].qp_finish_id = kcalloc(qm->eq_depth, sizeof(u16), - GFP_KERNEL); - if (!qm->poll_data[id].qp_finish_id) - return -ENOMEM;
qp = &qm->qp_array[id]; qp->qdma.va = dma_alloc_coherent(dev, dma_size, &qp->qdma.dma, GFP_KERNEL); if (!qp->qdma.va) - goto err_free_qp_finish_id; + return -ENOMEM;
qp->sqe = qp->qdma.va; qp->sqe_dma = qp->qdma.dma; @@ -2827,10 +2819,6 @@ static int hisi_qp_memory_init(struct hisi_qm *qm, size_t dma_size, int id, qp->qp_id = id;
return 0; - -err_free_qp_finish_id: - kfree(qm->poll_data[id].qp_finish_id); - return ret; }
static inline bool is_iommu_used(struct device *dev) @@ -2923,11 +2911,6 @@ static void hisi_qm_set_state(struct hisi_qm *qm, u8 state) writel(state, qm->io_base + QM_VF_STATE); }
-static void hisi_qm_unint_work(struct hisi_qm *qm) -{ - destroy_workqueue(qm->wq); -} - static void hisi_qm_free_rsv_buf(struct hisi_qm *qm) { struct qm_dma *xqc_dma = &qm->xqc_buf.qcdma; @@ -2962,9 +2945,6 @@ static void hisi_qm_memory_uninit(struct hisi_qm *qm) */ void hisi_qm_uninit(struct hisi_qm *qm) { - qm_cmd_uninit(qm); - hisi_qm_unint_work(qm); - down_write(&qm->qps_lock); hisi_qm_memory_uninit(qm); hisi_qm_set_state(qm, QM_NOT_READY); @@ -3317,12 +3297,12 @@ static enum acc_err_result qm_hw_error_handle(struct hisi_qm *qm) }
/** - * hisi_qm_dev_err_init() - Initialize device error configuration. + * qm_dev_err_init() - Initialize device error configuration. * @qm: The qm for which we want to do error initialization. * * Initialize QM and device error related configuration. */ -void hisi_qm_dev_err_init(struct hisi_qm *qm) +static void qm_dev_err_init(struct hisi_qm *qm) { if (qm->fun_type == QM_HW_VF) return; @@ -3335,15 +3315,14 @@ void hisi_qm_dev_err_init(struct hisi_qm *qm) } qm->err_ini->hw_err_enable(qm); } -EXPORT_SYMBOL_GPL(hisi_qm_dev_err_init);
/** - * hisi_qm_dev_err_uninit() - Uninitialize device error configuration. + * qm_dev_err_uninit() - Uninitialize device error configuration. * @qm: The qm for which we want to do error uninitialization. * * Uninitialize QM and device error related configuration. */ -void hisi_qm_dev_err_uninit(struct hisi_qm *qm) +static void qm_dev_err_uninit(struct hisi_qm *qm) { if (qm->fun_type == QM_HW_VF) return; @@ -3356,7 +3335,6 @@ void hisi_qm_dev_err_uninit(struct hisi_qm *qm) } qm->err_ini->hw_err_disable(qm); } -EXPORT_SYMBOL_GPL(hisi_qm_dev_err_uninit);
/** * hisi_qm_free_qps() - free multiple queue pairs. @@ -4451,7 +4429,7 @@ static int qm_controller_reset_done(struct hisi_qm *qm) }
qm_restart_prepare(qm); - hisi_qm_dev_err_init(qm); + qm_dev_err_init(qm); if (qm->err_ini->open_axi_master_ooo) qm->err_ini->open_axi_master_ooo(qm);
@@ -4557,7 +4535,7 @@ void hisi_qm_reset_prepare(struct pci_dev *pdev) u32 delay = 0; int ret;
- hisi_qm_dev_err_uninit(pf_qm); + qm_dev_err_uninit(pf_qm);
/* * Check whether there is an ECC mbit error, If it occurs, need to @@ -4628,7 +4606,7 @@ void hisi_qm_reset_done(struct pci_dev *pdev) } }
- hisi_qm_dev_err_init(pf_qm); + qm_dev_err_init(pf_qm);
ret = qm_restart(qm); if (ret) { @@ -4944,10 +4922,15 @@ static void qm_unregister_abnormal_irq(struct hisi_qm *qm) if (qm->fun_type == QM_HW_VF) return;
+ if (!qm->err_ini->err_info_init) + return; + val = qm->cap_tables.qm_cap_table[QM_ABN_IRQ_TYPE_CAP_IDX].cap_val; if (!((val >> QM_IRQ_TYPE_SHIFT) & QM_ABN_IRQ_TYPE_MASK)) return;
+ qm_dev_err_uninit(qm); + irq_vector = val & QM_IRQ_VECTOR_MASK; free_irq(pci_irq_vector(pdev, irq_vector), qm); } @@ -4961,16 +4944,27 @@ static int qm_register_abnormal_irq(struct hisi_qm *qm) if (qm->fun_type == QM_HW_VF) return 0;
+ if (!qm->err_ini->err_info_init) { + dev_info(&qm->pdev->dev, "device doesnot support error init!\n"); + return 0; + } + val = qm->cap_tables.qm_cap_table[QM_ABN_IRQ_TYPE_CAP_IDX].cap_val; if (!((val >> QM_IRQ_TYPE_SHIFT) & QM_ABN_IRQ_TYPE_MASK)) return 0;
+ INIT_WORK(&qm->rst_work, hisi_qm_controller_reset); + irq_vector = val & QM_IRQ_VECTOR_MASK; ret = request_irq(pci_irq_vector(pdev, irq_vector), qm_abnormal_irq, 0, qm->dev_name, qm); - if (ret) - dev_err(&qm->pdev->dev, "failed to request abnormal irq, ret = %d", ret); + if (ret) { + dev_err(&qm->pdev->dev, "failed to request abnormal irq, ret = %d!\n", ret); + return ret; + }
- return ret; + qm_dev_err_init(qm); + + return 0; }
static void qm_unregister_mb_cmd_irq(struct hisi_qm *qm) @@ -4982,6 +4976,8 @@ static void qm_unregister_mb_cmd_irq(struct hisi_qm *qm) if (!((val >> QM_IRQ_TYPE_SHIFT) & QM_IRQ_TYPE_MASK)) return;
+ qm_cmd_uninit(qm); + irq_vector = val & QM_IRQ_VECTOR_MASK; free_irq(pci_irq_vector(pdev, irq_vector), qm); } @@ -4996,14 +4992,22 @@ static int qm_register_mb_cmd_irq(struct hisi_qm *qm) if (!((val >> QM_IRQ_TYPE_SHIFT) & QM_IRQ_TYPE_MASK)) return 0;
+ INIT_WORK(&qm->cmd_process, qm_cmd_process); + irq_vector = val & QM_IRQ_VECTOR_MASK; ret = request_irq(pci_irq_vector(pdev, irq_vector), qm_mb_cmd_irq, 0, qm->dev_name, qm); - if (ret) - dev_err(&pdev->dev, "failed to request function communication irq, ret = %d", ret); + if (ret) { + dev_err(&pdev->dev, + "failed to request function communication irq, ret = %d!\n", ret); + return ret; + }
- return ret; + qm_cmd_init(qm); + + return 0; }
+/* Disable aeq interrupt by hisi_qm_stop(). */ static void qm_unregister_aeq_irq(struct hisi_qm *qm) { struct pci_dev *pdev = qm->pdev; @@ -5017,6 +5021,7 @@ static void qm_unregister_aeq_irq(struct hisi_qm *qm) free_irq(pci_irq_vector(pdev, irq_vector), qm); }
+/* Enable aeq interrupt by hisi_qm_start(). */ static int qm_register_aeq_irq(struct hisi_qm *qm) { struct pci_dev *pdev = qm->pdev; @@ -5031,11 +5036,62 @@ static int qm_register_aeq_irq(struct hisi_qm *qm) ret = request_threaded_irq(pci_irq_vector(pdev, irq_vector), NULL, qm_aeq_thread, IRQF_ONESHOT, qm->dev_name, qm); if (ret) - dev_err(&pdev->dev, "failed to request eq irq, ret = %d", ret); + dev_err(&pdev->dev, "failed to request aeq irq, ret = %d!\n", ret);
return ret; }
+static void qm_uninit_eq_work(struct hisi_qm *qm) +{ + int i; + + destroy_workqueue(qm->wq); + for (i = qm->qp_num - 1; i >= 0; i--) + kfree(qm->poll_data[i].qp_finish_id); + + kfree(qm->poll_data); +} + +static int qm_init_eq_work(struct hisi_qm *qm) +{ + int ret = -ENOMEM; + u16 eq_depth; + int i; + + qm->poll_data = kcalloc(qm->qp_num, sizeof(struct hisi_qm_poll_data), GFP_KERNEL); + if (!qm->poll_data) + return ret; + + qm_get_xqc_depth(qm, &qm->eq_depth, &qm->aeq_depth, QM_XEQ_DEPTH_CAP); + eq_depth = qm->eq_depth >> 1; + for (i = 0; i < qm->qp_num; i++) { + qm->poll_data[i].qp_finish_id = kcalloc(eq_depth, sizeof(u16), GFP_KERNEL); + if (!qm->poll_data[i].qp_finish_id) + goto free_qp_finish_id; + + INIT_WORK(&qm->poll_data[i].work, qm_work_process); + qm->poll_data[i].qm = qm; + } + + qm->wq = alloc_workqueue("%s", WQ_HIGHPRI | WQ_MEM_RECLAIM | + WQ_UNBOUND, num_online_cpus(), + pci_name(qm->pdev)); + if (!qm->wq) { + dev_err(&qm->pdev->dev, "failed to alloc workqueue!\n"); + goto free_qp_finish_id; + } + + return 0; + +free_qp_finish_id: + for (i = i - 1; i >= 0; i--) + kfree(qm->poll_data[i].qp_finish_id); + + kfree(qm->poll_data); + return ret; +} + +/* Disable eq interrupt by hisi_qm_stop(). */ static void qm_unregister_eq_irq(struct hisi_qm *qm) { struct pci_dev *pdev = qm->pdev; @@ -5047,8 +5103,11 @@ static void qm_unregister_eq_irq(struct hisi_qm *qm)
irq_vector = val & QM_IRQ_VECTOR_MASK; free_irq(pci_irq_vector(pdev, irq_vector), qm); + + qm_uninit_eq_work(qm); }
+/* Enable eq interrupt by hisi_qm_start(). */ static int qm_register_eq_irq(struct hisi_qm *qm) { struct pci_dev *pdev = qm->pdev; @@ -5059,10 +5118,18 @@ static int qm_register_eq_irq(struct hisi_qm *qm) if (!((val >> QM_IRQ_TYPE_SHIFT) & QM_IRQ_TYPE_MASK)) return 0;
+ ret = qm_init_eq_work(qm); + if (ret) { + dev_err(&pdev->dev, "failed to init eq work!\n"); + return ret; + } + irq_vector = val & QM_IRQ_VECTOR_MASK; ret = request_irq(pci_irq_vector(pdev, irq_vector), qm_eq_irq, 0, qm->dev_name, qm); - if (ret) - dev_err(&pdev->dev, "failed to request eq irq, ret = %d", ret); + if (ret) { + dev_err(&pdev->dev, "failed to request eq irq, ret = %d!\n", ret); + qm_uninit_eq_work(qm); + }
return ret; } @@ -5328,30 +5395,6 @@ static int hisi_qm_pci_init(struct hisi_qm *qm) return ret; }
-static int hisi_qm_init_work(struct hisi_qm *qm) -{ - int i; - - for (i = 0; i < qm->qp_num; i++) - INIT_WORK(&qm->poll_data[i].work, qm_work_process); - - if (qm->fun_type == QM_HW_PF) - INIT_WORK(&qm->rst_work, hisi_qm_controller_reset); - - if (qm->ver > QM_HW_V2) - INIT_WORK(&qm->cmd_process, qm_cmd_process); - - qm->wq = alloc_workqueue("%s", WQ_HIGHPRI | WQ_MEM_RECLAIM | - WQ_UNBOUND, num_online_cpus(), - pci_name(qm->pdev)); - if (!qm->wq) { - pci_err(qm->pdev, "failed to alloc workqueue!\n"); - return -ENOMEM; - } - - return 0; -} - static int hisi_qp_alloc_memory(struct hisi_qm *qm) { struct device *dev = &qm->pdev->dev; @@ -5363,19 +5406,12 @@ static int hisi_qp_alloc_memory(struct hisi_qm *qm) if (!qm->qp_array) return -ENOMEM;
- qm->poll_data = kcalloc(qm->qp_num, sizeof(struct hisi_qm_poll_data), GFP_KERNEL); - if (!qm->poll_data) { - kfree(qm->qp_array); - return -ENOMEM; - } - qm_get_xqc_depth(qm, &sq_depth, &cq_depth, QM_QP_DEPTH_CAP);
/* one more page for device or qp statuses */ qp_dma_size = qm->sqe_size * sq_depth + sizeof(struct qm_cqe) * cq_depth; qp_dma_size = PAGE_ALIGN(qp_dma_size) + PAGE_SIZE; for (i = 0; i < qm->qp_num; i++) { - qm->poll_data[i].qm = qm; ret = hisi_qp_memory_init(qm, qp_dma_size, i, sq_depth, cq_depth); if (ret) goto err_init_qp_mem; @@ -5443,7 +5479,6 @@ static int hisi_qm_memory_init(struct hisi_qm *qm) } while (0)
idr_init(&qm->qp_idr); - qm_get_xqc_depth(qm, &qm->eq_depth, &qm->aeq_depth, QM_XEQ_DEPTH_CAP); qm->qdma.size = QMC_ALIGN(sizeof(struct qm_eqe) * qm->eq_depth) + QMC_ALIGN(sizeof(struct qm_aeqe) * qm->aeq_depth) + QMC_ALIGN(sizeof(struct qm_sqc) * qm->qp_num) + @@ -5526,16 +5561,8 @@ int hisi_qm_init(struct hisi_qm *qm) if (ret) goto err_alloc_uacce;
- ret = hisi_qm_init_work(qm); - if (ret) - goto err_free_qm_memory; - - qm_cmd_init(qm); - return 0;
-err_free_qm_memory: - hisi_qm_memory_uninit(qm); err_alloc_uacce: qm_remove_uacce(qm); err_irq_register: @@ -5663,7 +5690,7 @@ static int qm_rebuild_for_resume(struct hisi_qm *qm) }
qm_cmd_init(qm); - hisi_qm_dev_err_init(qm); + qm_dev_err_init(qm); /* Set the doorbell timeout to QM_DB_TIMEOUT_CFG ns. */ writel(QM_DB_TIMEOUT_SET, qm->io_base + QM_DB_TIMEOUT_CFG); qm_disable_clock_gate(qm); diff --git a/drivers/crypto/hisilicon/sec2/sec_main.c b/drivers/crypto/hisilicon/sec2/sec_main.c index 85ca2050f13d..6c09857e2b82 100644 --- a/drivers/crypto/hisilicon/sec2/sec_main.c +++ b/drivers/crypto/hisilicon/sec2/sec_main.c @@ -1070,7 +1070,6 @@ static int sec_pf_probe_init(struct sec_dev *sec) return ret;
sec_open_sva_prefetch(qm); - hisi_qm_dev_err_init(qm); sec_debug_regs_clear(qm); ret = sec_show_last_regs_init(qm); if (ret) @@ -1190,7 +1189,6 @@ static void sec_probe_uninit(struct hisi_qm *qm) sec_debug_regs_clear(qm); sec_show_last_regs_uninit(qm); sec_close_sva_prefetch(qm); - hisi_qm_dev_err_uninit(qm); }
static int sec_probe(struct pci_dev *pdev, const struct pci_device_id *id) diff --git a/drivers/crypto/hisilicon/zip/zip_main.c b/drivers/crypto/hisilicon/zip/zip_main.c index 26b4c848ff12..21cf1401cbc3 100644 --- a/drivers/crypto/hisilicon/zip/zip_main.c +++ b/drivers/crypto/hisilicon/zip/zip_main.c @@ -1160,7 +1160,6 @@ static int hisi_zip_pf_probe_init(struct hisi_zip *hisi_zip) return ret;
hisi_zip_open_sva_prefetch(qm); - hisi_qm_dev_err_init(qm); hisi_zip_debug_regs_clear(qm);
ret = hisi_zip_show_last_regs_init(qm); @@ -1351,7 +1350,6 @@ static int hisi_zip_probe(struct pci_dev *pdev, const struct pci_device_id *id)
err_dev_err_uninit: hisi_zip_probe_uninit(qm); - hisi_qm_dev_err_uninit(qm);
err_qm_uninit: hisi_zip_qm_uninit(qm); @@ -1374,7 +1372,6 @@ static void hisi_zip_remove(struct pci_dev *pdev) hisi_zip_debugfs_exit(qm); hisi_qm_stop(qm, QM_NORMAL); hisi_zip_probe_uninit(qm); - hisi_qm_dev_err_uninit(qm); hisi_zip_qm_uninit(qm); }
diff --git a/include/linux/hisi_acc_qm.h b/include/linux/hisi_acc_qm.h index f6383689974f..d7527371c915 100644 --- a/include/linux/hisi_acc_qm.h +++ b/include/linux/hisi_acc_qm.h @@ -542,8 +542,6 @@ void hisi_qm_debug_regs_clear(struct hisi_qm *qm); int hisi_qm_sriov_enable(struct pci_dev *pdev, int max_vfs); int hisi_qm_sriov_disable(struct pci_dev *pdev, bool is_frozen); int hisi_qm_sriov_configure(struct pci_dev *pdev, int num_vfs); -void hisi_qm_dev_err_init(struct hisi_qm *qm); -void hisi_qm_dev_err_uninit(struct hisi_qm *qm); int hisi_qm_regs_debugfs_init(struct hisi_qm *qm, struct dfx_diff_registers *dregs, u32 reg_len); void hisi_qm_regs_debugfs_uninit(struct hisi_qm *qm, u32 reg_len);
driver inclusion category: bugfix bugzilla: https://gitee.com/openeuler/kernel/issues/I9CB6L CVE: NA
----------------------------------------------------------------------
If an error that requires a device reset occurs, disable the error reporting before device reset is complete, enable the error reporting function after the reset is complete to prevent the same error from being reported repeatedly.
Fixes: eaebf4c3b103 ("crypto: hisilicon - Unify hardware error init/uninit into QM") Signed-off-by: Weili Qian qianweili@huawei.com Signed-off-by: JiangShui Yang yangjiangshui@h-partners.com (cherry picked from commit 6cf358afff7be27d3cd89b72737cbcc5427ff71b) --- drivers/crypto/hisilicon/hpre/hpre_main.c | 25 ++++++-- drivers/crypto/hisilicon/qm.c | 76 ++++++++++++++++------- drivers/crypto/hisilicon/sec2/sec_main.c | 24 +++++-- drivers/crypto/hisilicon/zip/zip_main.c | 24 +++++-- include/linux/hisi_acc_qm.h | 4 ++ 5 files changed, 119 insertions(+), 34 deletions(-)
diff --git a/drivers/crypto/hisilicon/hpre/hpre_main.c b/drivers/crypto/hisilicon/hpre/hpre_main.c index 6bd3d8277d47..af077934c1a5 100644 --- a/drivers/crypto/hisilicon/hpre/hpre_main.c +++ b/drivers/crypto/hisilicon/hpre/hpre_main.c @@ -1290,11 +1290,26 @@ static u32 hpre_get_hw_err_status(struct hisi_qm *qm)
static void hpre_clear_hw_err_status(struct hisi_qm *qm, u32 err_sts) { - u32 nfe; - writel(err_sts, qm->io_base + HPRE_HAC_SOURCE_INT); - nfe = hisi_qm_get_hw_info(qm, hpre_basic_info, HPRE_NFE_MASK_CAP, qm->cap_ver); - writel(nfe, qm->io_base + HPRE_RAS_NFE_ENB); +} + +static void hpre_disable_error_report(struct hisi_qm *qm, u32 err_type) +{ + u32 nfe_mask; + + nfe_mask = hisi_qm_get_hw_info(qm, hpre_basic_info, HPRE_NFE_MASK_CAP, qm->cap_ver); + writel(nfe_mask & (~err_type), qm->io_base + HPRE_RAS_NFE_ENB); +} + +static void hpre_enable_error_report(struct hisi_qm *qm) +{ + u32 nfe_mask, ce_mask; + + nfe_mask = hisi_qm_get_hw_info(qm, hpre_basic_info, HPRE_NFE_MASK_CAP, qm->cap_ver); + ce_mask = hisi_qm_get_hw_info(qm, hpre_basic_info, HPRE_CE_MASK_CAP, qm->cap_ver); + + writel(nfe_mask, qm->io_base + HPRE_RAS_NFE_ENB); + writel(ce_mask, qm->io_base + HPRE_RAS_CE_ENB); }
static void hpre_open_axi_master_ooo(struct hisi_qm *qm) @@ -1334,6 +1349,8 @@ static const struct hisi_qm_err_ini hpre_err_ini = { .hw_err_disable = hpre_hw_error_disable, .get_dev_hw_err_status = hpre_get_hw_err_status, .clear_dev_hw_err_status = hpre_clear_hw_err_status, + .disable_error_report = hpre_disable_error_report, + .enable_error_report = hpre_enable_error_report, .log_dev_hw_err = hpre_log_hw_error, .open_axi_master_ooo = hpre_open_axi_master_ooo, .open_sva_prefetch = hpre_open_sva_prefetch, diff --git a/drivers/crypto/hisilicon/qm.c b/drivers/crypto/hisilicon/qm.c index 939fb633e481..5883e56e26e4 100644 --- a/drivers/crypto/hisilicon/qm.c +++ b/drivers/crypto/hisilicon/qm.c @@ -390,7 +390,7 @@ struct hisi_qm_hw_ops { int (*debug_init)(struct hisi_qm *qm); void (*hw_error_init)(struct hisi_qm *qm); void (*hw_error_uninit)(struct hisi_qm *qm); - enum acc_err_result (*hw_error_handle)(struct hisi_qm *qm); + enum acc_err_result (*hw_error_handle)(struct hisi_qm *qm, bool need_reset); int (*set_msi)(struct hisi_qm *qm, bool set); };
@@ -1449,24 +1449,35 @@ static void qm_log_hw_error(struct hisi_qm *qm, u32 error_status) } }
-static enum acc_err_result qm_hw_error_handle_v2(struct hisi_qm *qm) +static enum acc_err_result qm_hw_error_handle_v2(struct hisi_qm *qm, bool need_reset) { u32 error_status, tmp;
/* read err sts */ tmp = readl(qm->io_base + QM_ABNORMAL_INT_STATUS); - error_status = qm->error_mask & tmp; - + error_status = tmp & (~qm->err_info.qm_err_type); + qm->err_info.qm_err_type |= tmp; if (error_status) { if (error_status & QM_ECC_MBIT) qm->err_status.is_qm_ecc_mbit = true;
qm_log_hw_error(qm, error_status); - if (error_status & qm->err_info.qm_reset_mask) + /* If the device is ready to reset, only print new error type. */ + if (!need_reset) + return ACC_ERR_RECOVERED; + + if (error_status & qm->err_info.qm_reset_mask) { + /* Disable the same error reporting until the error is recovered. */ + writel(qm->err_info.nfe & (~qm->err_info.qm_err_type), + qm->io_base + QM_RAS_NFE_ENABLE); return ACC_ERR_NEED_RESET; + }
- writel(error_status, qm->io_base + QM_ABNORMAL_INT_SOURCE); + /* Clear error source if not need reset. */ + writel(qm->err_info.qm_err_type, qm->io_base + QM_ABNORMAL_INT_SOURCE); + /* Avoid bios disable error type in v2 version, re-enable. */ writel(qm->err_info.nfe, qm->io_base + QM_RAS_NFE_ENABLE); + writel(qm->err_info.ce, qm->io_base + QM_RAS_CE_ENABLE); }
return ACC_ERR_RECOVERED; @@ -3286,14 +3297,14 @@ static void qm_hw_error_uninit(struct hisi_qm *qm) qm->ops->hw_error_uninit(qm); }
-static enum acc_err_result qm_hw_error_handle(struct hisi_qm *qm) +static enum acc_err_result qm_hw_error_handle(struct hisi_qm *qm, bool need_reset) { if (!qm->ops->hw_error_handle) { dev_err(&qm->pdev->dev, "QM doesn't support hw error report!\n"); return ACC_ERR_NONE; }
- return qm->ops->hw_error_handle(qm); + return qm->ops->hw_error_handle(qm, need_reset); }
/** @@ -3915,17 +3926,19 @@ int hisi_qm_sriov_configure(struct pci_dev *pdev, int num_vfs) } EXPORT_SYMBOL_GPL(hisi_qm_sriov_configure);
-static enum acc_err_result qm_dev_err_handle(struct hisi_qm *qm) +static enum acc_err_result qm_dev_err_handle(struct hisi_qm *qm, bool need_reset) { - u32 err_sts; + u32 err_sts, tmp;
if (!qm->err_ini->get_dev_hw_err_status) { dev_err(&qm->pdev->dev, "Device doesn't support get hw error status!\n"); return ACC_ERR_NONE; }
- /* get device hardware error status */ - err_sts = qm->err_ini->get_dev_hw_err_status(qm); + /* Get device hardware new error status */ + tmp = qm->err_ini->get_dev_hw_err_status(qm); + err_sts = tmp & (~qm->err_info.dev_err_type); + qm->err_info.dev_err_type |= tmp; if (err_sts) { if (err_sts & qm->err_info.ecc_2bits_mask) qm->err_status.is_dev_ecc_mbit = true; @@ -3933,11 +3946,21 @@ static enum acc_err_result qm_dev_err_handle(struct hisi_qm *qm) if (qm->err_ini->log_dev_hw_err) qm->err_ini->log_dev_hw_err(qm, err_sts);
- if (err_sts & qm->err_info.dev_reset_mask) + /* If the device is ready to reset, only print new error type. */ + if (!need_reset) + return ACC_ERR_RECOVERED; + + if (err_sts & qm->err_info.dev_reset_mask) { + /* Disable the same error reporting until the error is recovered. */ + qm->err_ini->disable_error_report(qm, qm->err_info.dev_err_type); return ACC_ERR_NEED_RESET; + }
- if (qm->err_ini->clear_dev_hw_err_status) - qm->err_ini->clear_dev_hw_err_status(qm, err_sts); + /* Clear error source if not need reset. */ + if (qm->err_ini->clear_dev_hw_err_status) { + qm->err_ini->clear_dev_hw_err_status(qm, qm->err_info.dev_err_type); + qm->err_ini->enable_error_report(qm); + } }
return ACC_ERR_RECOVERED; @@ -3946,16 +3969,25 @@ static enum acc_err_result qm_dev_err_handle(struct hisi_qm *qm) static enum acc_err_result qm_process_dev_error(struct hisi_qm *qm) { enum acc_err_result qm_ret, dev_ret; + bool need_reset = true; + + if (!test_bit(QM_RST_SCHED, &qm->misc_ctl)) { + qm->err_info.qm_err_type = 0; + qm->err_info.dev_err_type = 0; + } else { + need_reset = false; + }
/* log qm error */ - qm_ret = qm_hw_error_handle(qm); + qm_ret = qm_hw_error_handle(qm, need_reset);
/* log device error */ - dev_ret = qm_dev_err_handle(qm); + dev_ret = qm_dev_err_handle(qm, need_reset); + if (need_reset && (qm_ret == ACC_ERR_NEED_RESET || + dev_ret == ACC_ERR_NEED_RESET)) + return ACC_ERR_NEED_RESET;
- return (qm_ret == ACC_ERR_NEED_RESET || - dev_ret == ACC_ERR_NEED_RESET) ? - ACC_ERR_NEED_RESET : ACC_ERR_RECOVERED; + return ACC_ERR_RECOVERED; }
/** @@ -4160,8 +4192,6 @@ static int qm_controller_reset_prepare(struct hisi_qm *qm) if (ret) pci_err(pdev, "failed to stop by vfs in soft reset!\n");
- clear_bit(QM_RST_SCHED, &qm->misc_ctl); - return 0; }
@@ -4421,6 +4451,7 @@ static int qm_controller_reset_done(struct hisi_qm *qm) return ret; } } + clear_bit(QM_RST_SCHED, &qm->misc_ctl);
ret = qm_dev_hw_init(qm); if (ret) { @@ -4494,6 +4525,7 @@ static int qm_controller_reset(struct hisi_qm *qm)
err_reset: pci_err(pdev, "Controller reset failed (%d)\n", ret); + clear_bit(QM_RST_SCHED, &qm->misc_ctl); qm_reset_bit_clear(qm);
/* if resetting fails, isolate the device */ diff --git a/drivers/crypto/hisilicon/sec2/sec_main.c b/drivers/crypto/hisilicon/sec2/sec_main.c index 6c09857e2b82..566581039c87 100644 --- a/drivers/crypto/hisilicon/sec2/sec_main.c +++ b/drivers/crypto/hisilicon/sec2/sec_main.c @@ -1010,11 +1010,25 @@ static u32 sec_get_hw_err_status(struct hisi_qm *qm)
static void sec_clear_hw_err_status(struct hisi_qm *qm, u32 err_sts) { - u32 nfe; - writel(err_sts, qm->io_base + SEC_CORE_INT_SOURCE); - nfe = hisi_qm_get_hw_info(qm, sec_basic_info, SEC_NFE_MASK_CAP, qm->cap_ver); - writel(nfe, qm->io_base + SEC_RAS_NFE_REG); +} + +static void sec_disable_error_report(struct hisi_qm *qm, u32 err_type) +{ + u32 nfe_mask; + + nfe_mask = hisi_qm_get_hw_info(qm, sec_basic_info, SEC_NFE_MASK_CAP, qm->cap_ver); + writel(nfe_mask & (~err_type), qm->io_base + SEC_RAS_NFE_REG); +} + +static void sec_enable_error_report(struct hisi_qm *qm) +{ + u32 nfe_mask, ce_mask; + + nfe_mask = hisi_qm_get_hw_info(qm, sec_basic_info, SEC_NFE_MASK_CAP, qm->cap_ver); + ce_mask = hisi_qm_get_hw_info(qm, sec_basic_info, SEC_CE_MASK_CAP, qm->cap_ver); + writel(nfe_mask, qm->io_base + SEC_RAS_NFE_REG); + writel(ce_mask, qm->io_base + SEC_RAS_CE_REG); }
static void sec_open_axi_master_ooo(struct hisi_qm *qm) @@ -1052,6 +1066,8 @@ static const struct hisi_qm_err_ini sec_err_ini = { .hw_err_disable = sec_hw_error_disable, .get_dev_hw_err_status = sec_get_hw_err_status, .clear_dev_hw_err_status = sec_clear_hw_err_status, + .disable_error_report = sec_disable_error_report, + .enable_error_report = sec_enable_error_report, .log_dev_hw_err = sec_log_hw_error, .open_axi_master_ooo = sec_open_axi_master_ooo, .open_sva_prefetch = sec_open_sva_prefetch, diff --git a/drivers/crypto/hisilicon/zip/zip_main.c b/drivers/crypto/hisilicon/zip/zip_main.c index 21cf1401cbc3..59f7b9fdb816 100644 --- a/drivers/crypto/hisilicon/zip/zip_main.c +++ b/drivers/crypto/hisilicon/zip/zip_main.c @@ -1068,11 +1068,25 @@ static u32 hisi_zip_get_hw_err_status(struct hisi_qm *qm)
static void hisi_zip_clear_hw_err_status(struct hisi_qm *qm, u32 err_sts) { - u32 nfe; - writel(err_sts, qm->io_base + HZIP_CORE_INT_SOURCE); - nfe = hisi_qm_get_hw_info(qm, zip_basic_cap_info, ZIP_NFE_MASK_CAP, qm->cap_ver); - writel(nfe, qm->io_base + HZIP_CORE_INT_RAS_NFE_ENB); +} + +static void hisi_zip_disable_error_report(struct hisi_qm *qm, u32 err_type) +{ + u32 nfe_mask; + + nfe_mask = hisi_qm_get_hw_info(qm, zip_basic_cap_info, ZIP_NFE_MASK_CAP, qm->cap_ver); + writel(nfe_mask & (~err_type), qm->io_base + HZIP_CORE_INT_RAS_NFE_ENB); +} + +static void hisi_zip_enable_error_report(struct hisi_qm *qm) +{ + u32 nfe_mask, ce_mask; + + nfe_mask = hisi_qm_get_hw_info(qm, zip_basic_cap_info, ZIP_NFE_MASK_CAP, qm->cap_ver); + ce_mask = hisi_qm_get_hw_info(qm, zip_basic_cap_info, ZIP_CE_MASK_CAP, qm->cap_ver); + writel(nfe_mask, qm->io_base + HZIP_CORE_INT_RAS_NFE_ENB); + writel(ce_mask, qm->io_base + HZIP_CORE_INT_RAS_CE_ENB); }
static void hisi_zip_open_axi_master_ooo(struct hisi_qm *qm) @@ -1129,6 +1143,8 @@ static const struct hisi_qm_err_ini hisi_zip_err_ini = { .hw_err_disable = hisi_zip_hw_error_disable, .get_dev_hw_err_status = hisi_zip_get_hw_err_status, .clear_dev_hw_err_status = hisi_zip_clear_hw_err_status, + .disable_error_report = hisi_zip_disable_error_report, + .enable_error_report = hisi_zip_enable_error_report, .log_dev_hw_err = hisi_zip_log_hw_error, .open_axi_master_ooo = hisi_zip_open_axi_master_ooo, .close_axi_master_ooo = hisi_zip_close_axi_master_ooo, diff --git a/include/linux/hisi_acc_qm.h b/include/linux/hisi_acc_qm.h index d7527371c915..4e4f96cff387 100644 --- a/include/linux/hisi_acc_qm.h +++ b/include/linux/hisi_acc_qm.h @@ -241,6 +241,8 @@ struct hisi_qm_err_info { u32 ce; u32 nfe; u32 fe; + u32 qm_err_type; + u32 dev_err_type; };
struct hisi_qm_err_status { @@ -254,6 +256,8 @@ struct hisi_qm_err_ini { void (*hw_err_disable)(struct hisi_qm *qm); u32 (*get_dev_hw_err_status)(struct hisi_qm *qm); void (*clear_dev_hw_err_status)(struct hisi_qm *qm, u32 err_sts); + void (*disable_error_report)(struct hisi_qm *qm, u32 err_type); + void (*enable_error_report)(struct hisi_qm *qm); void (*open_axi_master_ooo)(struct hisi_qm *qm); void (*close_axi_master_ooo)(struct hisi_qm *qm); void (*open_sva_prefetch)(struct hisi_qm *qm);
driver inclusion category: bugfix bugzilla: https://gitee.com/openeuler/kernel/issues/I9CB6L CVE: NA
----------------------------------------------------------------------
The maximum timeout of the hpre cluster is 16 ms. When a page fault occurs, the CPU core may process the page fault event longer than 16 ms, especially when the guest OS processes the page fault event reported by the SMMU.
In the current test, there is a high probability that the cluster times out. However, the cluster is waiting for the completion of memory access, which is not an error. Therefore, the device does not need to be reset. If an error occurs in the cluster, the QM also reports the error. Therefore, the cluster timeout error of hpre can be masked.
Signed-off-by: Weili Qian qianweili@huawei.com Signed-off-by: JiangShui Yang yangjiangshui@h-partners.com --- drivers/crypto/hisilicon/hpre/hpre_main.c | 22 ++++++---------------- 1 file changed, 6 insertions(+), 16 deletions(-)
diff --git a/drivers/crypto/hisilicon/hpre/hpre_main.c b/drivers/crypto/hisilicon/hpre/hpre_main.c index af077934c1a5..16fa09aba2d1 100644 --- a/drivers/crypto/hisilicon/hpre/hpre_main.c +++ b/drivers/crypto/hisilicon/hpre/hpre_main.c @@ -12,9 +12,7 @@ #include <linux/topology.h> #include "hpre.h"
-#define HPRE_QM_ABNML_INT_MASK 0x100004 #define HPRE_CTRL_CNT_CLR_CE_BIT BIT(0) -#define HPRE_COMM_CNT_CLR_CE 0x0 #define HPRE_CTRL_CNT_CLR_CE 0x301000 #define HPRE_FSM_MAX_CNT 0x301008 #define HPRE_VFG_AXQOS 0x30100c @@ -41,7 +39,6 @@ #define HPRE_HAC_INT_SET 0x301500 #define HPRE_RNG_TIMEOUT_NUM 0x301A34 #define HPRE_CORE_INT_ENABLE 0 -#define HPRE_CORE_INT_DISABLE GENMASK(21, 0) #define HPRE_RDCHN_INI_ST 0x301a00 #define HPRE_CLSTR_BASE 0x302000 #define HPRE_CORE_EN_OFFSET 0x04 @@ -65,7 +62,6 @@ #define HPRE_CLSTR_ADDR_INTRVL 0x1000 #define HPRE_CLUSTER_INQURY 0x100 #define HPRE_CLSTR_ADDR_INQRY_RSLT 0x104 -#define HPRE_TIMEOUT_ABNML_BIT 6 #define HPRE_PASID_EN_BIT 9 #define HPRE_REG_RD_INTVRL_US 10 #define HPRE_REG_RD_TMOUT_US 1000 @@ -202,9 +198,9 @@ static const struct hisi_qm_cap_info hpre_basic_info[] = { {HPRE_QM_RESET_MASK_CAP, 0x3128, 0, GENMASK(31, 0), 0x0, 0xC37, 0x6C37}, {HPRE_QM_OOO_SHUTDOWN_MASK_CAP, 0x3128, 0, GENMASK(31, 0), 0x0, 0x4, 0x6C37}, {HPRE_QM_CE_MASK_CAP, 0x312C, 0, GENMASK(31, 0), 0x0, 0x8, 0x8}, - {HPRE_NFE_MASK_CAP, 0x3130, 0, GENMASK(31, 0), 0x0, 0x3FFFFE, 0x1FFFFFE}, - {HPRE_RESET_MASK_CAP, 0x3134, 0, GENMASK(31, 0), 0x0, 0x3FFFFE, 0xBFFFFE}, - {HPRE_OOO_SHUTDOWN_MASK_CAP, 0x3134, 0, GENMASK(31, 0), 0x0, 0x22, 0xBFFFFE}, + {HPRE_NFE_MASK_CAP, 0x3130, 0, GENMASK(31, 0), 0x0, 0x3FFFFE, 0x1FFFC3E}, + {HPRE_RESET_MASK_CAP, 0x3134, 0, GENMASK(31, 0), 0x0, 0x3FFFFE, 0xBFFC3E}, + {HPRE_OOO_SHUTDOWN_MASK_CAP, 0x3134, 0, GENMASK(31, 0), 0x0, 0x22, 0xBFFC3E}, {HPRE_CE_MASK_CAP, 0x3138, 0, GENMASK(31, 0), 0x0, 0x1, 0x1}, {HPRE_CLUSTER_NUM_CAP, 0x313c, 20, GENMASK(3, 0), 0x0, 0x4, 0x1}, {HPRE_CORE_TYPE_NUM_CAP, 0x313c, 16, GENMASK(3, 0), 0x0, 0x2, 0x2}, @@ -655,11 +651,6 @@ static int hpre_set_user_domain_and_cache(struct hisi_qm *qm) writel(HPRE_QM_USR_CFG_MASK, qm->io_base + QM_AWUSER_M_CFG_ENABLE); writel_relaxed(HPRE_QM_AXI_CFG_MASK, qm->io_base + QM_AXI_M_CFG);
- /* HPRE need more time, we close this interrupt */ - val = readl_relaxed(qm->io_base + HPRE_QM_ABNML_INT_MASK); - val |= BIT(HPRE_TIMEOUT_ABNML_BIT); - writel_relaxed(val, qm->io_base + HPRE_QM_ABNML_INT_MASK); - if (qm->ver >= QM_HW_V3) writel(HPRE_RSA_ENB | HPRE_ECC_ENB, qm->io_base + HPRE_TYPES_ENB); @@ -668,9 +659,7 @@ static int hpre_set_user_domain_and_cache(struct hisi_qm *qm)
writel(HPRE_QM_VFG_AX_MASK, qm->io_base + HPRE_VFG_AXCACHE); writel(0x0, qm->io_base + HPRE_BD_ENDIAN); - writel(0x0, qm->io_base + HPRE_INT_MASK); writel(0x0, qm->io_base + HPRE_POISON_BYPASS); - writel(0x0, qm->io_base + HPRE_COMM_CNT_CLR_CE); writel(0x0, qm->io_base + HPRE_ECC_BYPASS);
writel(HPRE_BD_USR_MASK, qm->io_base + HPRE_BD_ARUSR_CFG); @@ -760,7 +749,7 @@ static void hpre_hw_error_disable(struct hisi_qm *qm)
static void hpre_hw_error_enable(struct hisi_qm *qm) { - u32 ce, nfe; + u32 ce, nfe, err_en;
ce = hisi_qm_get_hw_info(qm, hpre_basic_info, HPRE_CE_MASK_CAP, qm->cap_ver); nfe = hisi_qm_get_hw_info(qm, hpre_basic_info, HPRE_NFE_MASK_CAP, qm->cap_ver); @@ -777,7 +766,8 @@ static void hpre_hw_error_enable(struct hisi_qm *qm) hpre_master_ooo_ctrl(qm, true);
/* enable hpre hw error interrupts */ - writel(HPRE_CORE_INT_ENABLE, qm->io_base + HPRE_INT_MASK); + err_en = ce | nfe | HPRE_HAC_RAS_FE_ENABLE; + writel(~err_en, qm->io_base + HPRE_INT_MASK); }
static inline struct hisi_qm *hpre_file_to_qm(struct hpre_debugfs_file *file)
driver inclusion category: bugfix bugzilla: https://gitee.com/openeuler/kernel/issues/I9CB6L CVE: NA
----------------------------------------------------------------------
The malibox needs to be triggered by a 128bit atomic operation. The reason is that one QM hardware entity in one accelerator servers QM mailbox MMIO interfaces in related PF and VFs. A mutex cannot lock mailbox processes in different functions. When multiple functions access the mailbox simultaneously, if the generic IO interface readq/writeq is used to access the mailbox, the data read from mailbox or written to mailbox is unpredictable. Therefore, the generic IO interface is changed to a 128bit atomic operation.
Signed-off-by: Weili Qian qianweili@huawei.com Signed-off-by: JiangShui Yang yangjiangshui@h-partners.com --- drivers/crypto/hisilicon/qm.c | 160 ++++++++++++++++++++++------------ include/linux/hisi_acc_qm.h | 1 - 2 files changed, 105 insertions(+), 56 deletions(-)
diff --git a/drivers/crypto/hisilicon/qm.c b/drivers/crypto/hisilicon/qm.c index 5883e56e26e4..6257d62cdb65 100644 --- a/drivers/crypto/hisilicon/qm.c +++ b/drivers/crypto/hisilicon/qm.c @@ -34,6 +34,10 @@ #define QM_MB_CMD_DATA_SHIFT 32 #define QM_MB_CMD_DATA_MASK GENMASK(31, 0) #define QM_MB_STATUS_MASK GENMASK(12, 9) +#define QM_MB_BUSY_MASK BIT(13) +#define QM_MB_SIZE 16 +#define QM_MB_MAX_WAIT_CNT 20000 +#define QM_MB_WAIT_READY_CNT 10
/* sqc shift */ #define QM_SQ_HOP_NUM_SHIFT 0 @@ -529,17 +533,6 @@ static void qm_mb_pre_init(struct qm_mailbox *mailbox, u8 cmd, mailbox->rsvd = 0; }
-/* return 0 mailbox ready, -ETIMEDOUT hardware timeout */ -int hisi_qm_wait_mb_ready(struct hisi_qm *qm) -{ - u32 val; - - return readl_relaxed_poll_timeout(qm->io_base + QM_MB_CMD_SEND_BASE, - val, !((val >> QM_MB_BUSY_SHIFT) & - 0x1), POLL_PERIOD, POLL_TIMEOUT); -} -EXPORT_SYMBOL_GPL(hisi_qm_wait_mb_ready); - /* 128 bit should be written to hardware at one time to trigger a mailbox */ static void qm_mb_write(struct hisi_qm *qm, const void *src) { @@ -550,7 +543,7 @@ static void qm_mb_write(struct hisi_qm *qm, const void *src) #endif
if (!IS_ENABLED(CONFIG_ARM64)) { - memcpy_toio(fun_base, src, 16); + memcpy_toio(fun_base, src, QM_MB_SIZE); dma_wmb(); return; } @@ -567,35 +560,95 @@ static void qm_mb_write(struct hisi_qm *qm, const void *src) #endif }
-static int qm_mb_nolock(struct hisi_qm *qm, struct qm_mailbox *mailbox) +/* 128 bit should be read from hardware at one time */ +static void qm_mb_read(struct hisi_qm *qm, void *dst) { - int ret; - u32 val; + const void __iomem *fun_base = qm->io_base + QM_MB_CMD_SEND_BASE; + +#if IS_ENABLED(CONFIG_ARM64) + unsigned long tmp0 = 0, tmp1 = 0; +#endif
- if (unlikely(hisi_qm_wait_mb_ready(qm))) { - dev_err(&qm->pdev->dev, "QM mailbox is busy to start!\n"); - ret = -EBUSY; - goto mb_busy; + if (!IS_ENABLED(CONFIG_ARM64)) { + memcpy_fromio(dst, fun_base, QM_MB_SIZE); + dma_wmb(); + return; }
- qm_mb_write(qm, mailbox); +#if IS_ENABLED(CONFIG_ARM64) + asm volatile("ldp %0, %1, %3\n" + "stp %0, %1, %2\n" + "dmb oshst\n" + : "=&r" (tmp0), + "=&r" (tmp1), + "+Q" (*((char *)dst)) + : "Q" (*((char __iomem *)fun_base)) + : "memory"); +#endif +} + +int hisi_qm_wait_mb_ready(struct hisi_qm *qm) +{ + struct qm_mailbox mailbox; + int i = 0; + + while (i++ < QM_MB_WAIT_READY_CNT) { + qm_mb_read(qm, &mailbox); + if (!(le16_to_cpu(mailbox.w0) & QM_MB_BUSY_MASK)) + return 0;
- if (unlikely(hisi_qm_wait_mb_ready(qm))) { - dev_err(&qm->pdev->dev, "QM mailbox operation timeout!\n"); - ret = -ETIMEDOUT; - goto mb_busy; + usleep_range(WAIT_PERIOD_US_MIN, WAIT_PERIOD_US_MAX); }
- val = readl(qm->io_base + QM_MB_CMD_SEND_BASE); - if (val & QM_MB_STATUS_MASK) { - dev_err(&qm->pdev->dev, "QM mailbox operation failed!\n"); - ret = -EIO; - goto mb_busy; + dev_err(&qm->pdev->dev, "QM mailbox is busy to start!\n"); + + return -EBUSY; +} +EXPORT_SYMBOL_GPL(hisi_qm_wait_mb_ready); + +static int qm_wait_mb_finish(struct hisi_qm *qm, struct qm_mailbox *mailbox) +{ + struct device *dev = &qm->pdev->dev; + int i = 0; + + while (++i) { + qm_mb_read(qm, mailbox); + if (!(le16_to_cpu(mailbox->w0) & QM_MB_BUSY_MASK)) + break; + + if (i == QM_MB_MAX_WAIT_CNT) { + dev_err(dev, "QM mailbox operation timeout!\n"); + return -ETIMEDOUT; + } + + usleep_range(WAIT_PERIOD_US_MIN, WAIT_PERIOD_US_MAX); + } + + if (le16_to_cpu(mailbox->w0) & QM_MB_STATUS_MASK) { + dev_err(dev, "QM mailbox operation failed!\n"); + return -EIO; }
return 0; +} + +static int qm_mb_nolock(struct hisi_qm *qm, struct qm_mailbox *mailbox) +{ + int ret; + + ret = hisi_qm_wait_mb_ready(qm); + if (ret) + goto mb_err_cnt_increase; + + qm_mb_write(qm, mailbox); + + ret = qm_wait_mb_finish(qm, mailbox); + if (ret) + goto mb_err_cnt_increase; + + return 0;
-mb_busy: +mb_err_cnt_increase: atomic64_inc(&qm->debug.dfx.mb_err_cnt); return ret; } @@ -616,6 +669,24 @@ int hisi_qm_mb(struct hisi_qm *qm, u8 cmd, dma_addr_t dma_addr, u16 queue, } EXPORT_SYMBOL_GPL(hisi_qm_mb);
+static int hisi_qm_mb_read(struct hisi_qm *qm, u64 *base, u8 cmd, u16 queue) +{ + struct qm_mailbox mailbox; + int ret; + + qm_mb_pre_init(&mailbox, cmd, 0, queue, 1); + mutex_lock(&qm->mailbox_lock); + ret = qm_mb_nolock(qm, &mailbox); + mutex_unlock(&qm->mailbox_lock); + if (ret) + return ret; + + *base = le32_to_cpu(mailbox.base_l) | + ((u64)le32_to_cpu(mailbox.base_h) << 32); + + return 0; +} + /* op 0: set xqc information to hardware, 1: get xqc information from hardware. */ int qm_set_and_get_xqc(struct hisi_qm *qm, u8 cmd, void *xqc, u32 qp_id, bool op) { @@ -1330,12 +1401,10 @@ static int qm_get_vft_v2(struct hisi_qm *qm, u32 *base, u32 *number) u64 sqc_vft; int ret;
- ret = hisi_qm_mb(qm, QM_MB_CMD_SQC_VFT_V2, 0, 0, 1); + ret = hisi_qm_mb_read(qm, &sqc_vft, QM_MB_CMD_SQC_VFT_V2, 0); if (ret) return ret;
- sqc_vft = readl(qm->io_base + QM_MB_CMD_DATA_ADDR_L) | - ((u64)readl(qm->io_base + QM_MB_CMD_DATA_ADDR_H) << 32); *base = QM_SQC_VFT_BASE_MASK_V2 & (sqc_vft >> QM_SQC_VFT_BASE_SHIFT_V2); *number = (QM_SQC_VFT_NUM_MASK_V2 & (sqc_vft >> QM_SQC_VFT_NUM_SHIFT_V2)) + 1; @@ -1483,25 +1552,6 @@ static enum acc_err_result qm_hw_error_handle_v2(struct hisi_qm *qm, bool need_r return ACC_ERR_RECOVERED; }
-static int qm_get_mb_cmd(struct hisi_qm *qm, u64 *msg, u16 fun_num) -{ - struct qm_mailbox mailbox; - int ret; - - qm_mb_pre_init(&mailbox, QM_MB_CMD_DST, 0, fun_num, 0); - mutex_lock(&qm->mailbox_lock); - ret = qm_mb_nolock(qm, &mailbox); - if (ret) - goto err_unlock; - - *msg = readl(qm->io_base + QM_MB_CMD_DATA_ADDR_L) | - ((u64)readl(qm->io_base + QM_MB_CMD_DATA_ADDR_H) << 32); - -err_unlock: - mutex_unlock(&qm->mailbox_lock); - return ret; -} - static void qm_clear_cmd_interrupt(struct hisi_qm *qm, u64 vf_mask) { u32 val; @@ -1521,7 +1571,7 @@ static void qm_handle_vf_msg(struct hisi_qm *qm, u32 vf_id) u64 msg; int ret;
- ret = qm_get_mb_cmd(qm, &msg, vf_id); + ret = hisi_qm_mb_read(qm, &msg, QM_MB_CMD_DST, vf_id); if (ret) { dev_err(dev, "failed to get msg from VF(%u)!\n", vf_id); return; @@ -4795,7 +4845,7 @@ static int qm_wait_pf_reset_finish(struct hisi_qm *qm) * Whether message is got successfully, * VF needs to ack PF by clearing the interrupt. */ - ret = qm_get_mb_cmd(qm, &msg, 0); + ret = hisi_qm_mb_read(qm, &msg, QM_MB_CMD_DST, 0); qm_clear_cmd_interrupt(qm, 0); if (ret) { dev_err(dev, "failed to get msg from PF in reset done!\n"); @@ -4849,7 +4899,7 @@ static void qm_handle_cmd_msg(struct hisi_qm *qm, u32 fun_num) * Get the msg from source by sending mailbox. Whether message is got * successfully, destination needs to ack source by clearing the interrupt. */ - ret = qm_get_mb_cmd(qm, &msg, fun_num); + ret = hisi_qm_mb_read(qm, &msg, QM_MB_CMD_DST, fun_num); qm_clear_cmd_interrupt(qm, BIT(fun_num)); if (ret) { dev_err(dev, "failed to get msg from source!\n"); diff --git a/include/linux/hisi_acc_qm.h b/include/linux/hisi_acc_qm.h index 4e4f96cff387..ec00a109292d 100644 --- a/include/linux/hisi_acc_qm.h +++ b/include/linux/hisi_acc_qm.h @@ -54,7 +54,6 @@ #define QM_MB_OP_SHIFT 14 #define QM_MB_CMD_DATA_ADDR_L 0x304 #define QM_MB_CMD_DATA_ADDR_H 0x308 -#define QM_MB_MAX_WAIT_CNT 6000
/* doorbell */ #define QM_DOORBELL_CMD_SQ 0
driver inclusion category: bugfix bugzilla: https://gitee.com/openeuler/kernel/issues/I9CB6L CVE: NA
----------------------------------------------------------------------
When funcitons communicate with each other, if the mailbox operation fails, funciton cannot obtain the message from the communication source. If the vf does not receive the message from pf to stop function when reset, it will cause the vf to be unavailable.
For the global reset scenario: 1. Increase the QM_DEVICE_DOWN state. Before IO operation, check the state to avoid mailbox busy during communication. 2. When vf obtains pf message, if the mailbox fails, it is considered to be a global reset, and stop function directly. When pf sends reset message to vf, if the mailbox fails, it still send interrupt event to vf. 3. Increase the response time of PF waiting for vf.
Signed-off-by: Weili Qian qianweili@huawei.com Signed-off-by: JiangShui Yang yangjiangshui@h-partners.com --- drivers/crypto/hisilicon/qm.c | 27 +++++++++++++++++---------- 1 file changed, 17 insertions(+), 10 deletions(-)
diff --git a/drivers/crypto/hisilicon/qm.c b/drivers/crypto/hisilicon/qm.c index 6257d62cdb65..396978072b08 100644 --- a/drivers/crypto/hisilicon/qm.c +++ b/drivers/crypto/hisilicon/qm.c @@ -187,9 +187,9 @@ #define QM_IFC_INT_DISABLE BIT(0) #define QM_IFC_INT_STATUS_MASK BIT(0) #define QM_IFC_INT_SET_MASK BIT(0) -#define QM_WAIT_DST_ACK 10 -#define QM_MAX_PF_WAIT_COUNT 10 -#define QM_MAX_VF_WAIT_COUNT 40 +#define QM_WAIT_DST_ACK 100 +#define QM_MAX_PF_WAIT_COUNT 50 +#define QM_MAX_VF_WAIT_COUNT 100 #define QM_VF_RESET_WAIT_US 20000 #define QM_VF_RESET_WAIT_CNT 3000 #define QM_VF_RESET_WAIT_TIMEOUT_US \ @@ -1707,8 +1707,8 @@ static int qm_ping_all_vfs(struct hisi_qm *qm, u64 cmd) mutex_lock(&qm->mailbox_lock); /* PF sends command to all VFs by mailbox */ ret = qm_mb_nolock(qm, &mailbox); - if (ret) { - dev_err(dev, "failed to send command to VFs!\n"); + if (ret && cmd != QM_PF_FLR_PREPARE && cmd != QM_PF_SRST_PREPARE) { + dev_err(dev, "failed to send command to all vfs, cmd = %llu!\n", cmd); mutex_unlock(&qm->mailbox_lock); return ret; } @@ -1748,8 +1748,8 @@ static int qm_ping_pf(struct hisi_qm *qm, u64 cmd) qm_mb_pre_init(&mailbox, QM_MB_CMD_SRC, cmd, 0, 0); mutex_lock(&qm->mailbox_lock); ret = qm_mb_nolock(qm, &mailbox); - if (ret) { - dev_err(&qm->pdev->dev, "failed to send command to PF!\n"); + if (ret && (cmd > QM_VF_START_FAIL || cmd < QM_VF_PREPARE_DONE)) { + dev_err(&qm->pdev->dev, "failed to send command to PF, cmd = %llu!\n", cmd); goto unlock; }
@@ -1758,8 +1758,10 @@ static int qm_ping_pf(struct hisi_qm *qm, u64 cmd) while (true) { msleep(QM_WAIT_DST_ACK); val = readl(qm->io_base + QM_IFC_INT_SET_V); - if (!(val & QM_IFC_INT_STATUS_MASK)) + if (!(val & QM_IFC_INT_STATUS_MASK)) { + ret = 0; break; + }
if (++cnt > QM_MAX_VF_WAIT_COUNT) { ret = -ETIMEDOUT; @@ -4902,8 +4904,13 @@ static void qm_handle_cmd_msg(struct hisi_qm *qm, u32 fun_num) ret = hisi_qm_mb_read(qm, &msg, QM_MB_CMD_DST, fun_num); qm_clear_cmd_interrupt(qm, BIT(fun_num)); if (ret) { - dev_err(dev, "failed to get msg from source!\n"); - return; + if (!fun_num) { + msg = QM_PF_SRST_PREPARE; + dev_err(dev, "failed to get response from PF, suppos it is soft reset!\n"); + } else { + dev_err(dev, "failed to get msg from source!\n"); + return; + } }
cmd = msg & QM_MB_CMD_DATA_MASK;
driver inclusion category: bugfix bugzilla: https://gitee.com/openeuler/kernel/issues/I9CB6L CVE: NA
----------------------------------------------------------------------
When FLR is reset, the current driver first masks the error bits and then waits for the error reset to complete. However, when the error is recovered, the error bits will be enabled again, resulting in invalid mask. Now it is modified to detect that there is no error after mask error bits, and then do FLR.
Signed-off-by: Weili Qian qianweili@huawei.com Signed-off-by: JiangShui Yang yangjiangshui@h-partners.com --- drivers/crypto/hisilicon/qm.c | 49 +++++++++++++++++++++++------------ 1 file changed, 32 insertions(+), 17 deletions(-)
diff --git a/drivers/crypto/hisilicon/qm.c b/drivers/crypto/hisilicon/qm.c index 396978072b08..fc676a977f2d 100644 --- a/drivers/crypto/hisilicon/qm.c +++ b/drivers/crypto/hisilicon/qm.c @@ -4619,22 +4619,30 @@ void hisi_qm_reset_prepare(struct pci_dev *pdev) u32 delay = 0; int ret;
- qm_dev_err_uninit(pf_qm); - - /* - * Check whether there is an ECC mbit error, If it occurs, need to - * wait for soft reset to fix it. - */ - while (qm_check_dev_error(pf_qm)) { - msleep(++delay); - if (delay > QM_RESET_WAIT_TIMEOUT) + while (true) { + ret = qm_reset_prepare_ready(qm); + if (ret) { + pci_err(pdev, "FLR not ready!\n"); return; - } + }
- ret = qm_reset_prepare_ready(qm); - if (ret) { - pci_err(pdev, "FLR not ready!\n"); - return; + qm_dev_err_uninit(pf_qm); + /* + * Check whether there is an ECC mbit error, + * If it occurs, need to wait for soft reset + * to fix it. + */ + if (qm_check_dev_error(pf_qm)) { + qm_reset_bit_clear(qm); + if (delay > QM_RESET_WAIT_TIMEOUT) { + pci_err(pdev, "the hardware error was not recovered!\n"); + return; + } + + msleep(++delay); + } else { + break; + } }
/* PF obtains the information of VF by querying the register. */ @@ -4648,16 +4656,23 @@ void hisi_qm_reset_prepare(struct pci_dev *pdev) ret = hisi_qm_stop(qm, QM_DOWN); if (ret) { pci_err(pdev, "Failed to stop QM, ret = %d.\n", ret); - hisi_qm_set_hw_reset(qm, QM_RESET_STOP_TX_OFFSET); - hisi_qm_set_hw_reset(qm, QM_RESET_STOP_RX_OFFSET); - return; + goto err_prepare; }
ret = qm_wait_vf_prepare_finish(qm); if (ret) pci_err(pdev, "failed to stop by vfs in FLR!\n");
+ hisi_qm_cache_wb(qm); pci_info(pdev, "FLR resetting...\n"); + return; + +err_prepare: + pci_info(pdev, "FLR resetting prepare failed!\n"); + hisi_qm_set_hw_reset(qm, QM_RESET_STOP_TX_OFFSET); + hisi_qm_set_hw_reset(qm, QM_RESET_STOP_RX_OFFSET); + atomic_set(&qm->status.flags, QM_STOP); + hisi_qm_cache_wb(qm); } EXPORT_SYMBOL_GPL(hisi_qm_reset_prepare);
driver inclusion category: feature bugzilla: https://gitee.com/openeuler/kernel/issues/I9CB6L CVE: NA
----------------------------------------------------------------------
Check device status before sending mailbox.
Signed-off-by: Weili Qian qianweili@huawei.com Signed-off-by: JiangShui Yang yangjiangshui@h-partners.com --- drivers/crypto/hisilicon/qm.c | 39 ++++++++++++++++++++++++----------- include/linux/hisi_acc_qm.h | 1 + 2 files changed, 28 insertions(+), 12 deletions(-)
diff --git a/drivers/crypto/hisilicon/qm.c b/drivers/crypto/hisilicon/qm.c index fc676a977f2d..a53c5b031c2b 100644 --- a/drivers/crypto/hisilicon/qm.c +++ b/drivers/crypto/hisilicon/qm.c @@ -470,15 +470,24 @@ static u32 qm_get_dev_err_status(struct hisi_qm *qm) /* Check if the error causes the master ooo block */ static bool qm_check_dev_error(struct hisi_qm *qm) { - u32 val, dev_val; + struct hisi_qm *pf_qm = pci_get_drvdata(pci_physfn(qm->pdev)); + u32 hw_status, dev_status;
- if (qm->fun_type == QM_HW_VF) + if (test_bit(QM_DEVICE_DOWN, &qm->misc_ctl)) + return true; + + /* VF cannot read status register, return false */ + if (pf_qm->fun_type == QM_HW_VF) return false;
- val = qm_get_hw_error_status(qm) & qm->err_info.qm_shutdown_mask; - dev_val = qm_get_dev_err_status(qm) & qm->err_info.dev_shutdown_mask; + hw_status = qm_get_hw_error_status(pf_qm) & + pf_qm->err_info.qm_shutdown_mask; + dev_status = qm_get_dev_err_status(pf_qm) & + pf_qm->err_info.dev_shutdown_mask; + if (hw_status || dev_status) + return true;
- return val || dev_val; + return false; }
static int qm_wait_reset_finish(struct hisi_qm *qm) @@ -659,6 +668,12 @@ int hisi_qm_mb(struct hisi_qm *qm, u8 cmd, dma_addr_t dma_addr, u16 queue, struct qm_mailbox mailbox; int ret;
+ /* No need to judge if master OOO is blocked. */ + if (qm_check_dev_error(qm)) { + dev_err(&qm->pdev->dev, "QM mailbox operation failed since qm is stop!\n"); + return -EIO; + } + qm_mb_pre_init(&mailbox, cmd, dma_addr, queue, op);
mutex_lock(&qm->mailbox_lock); @@ -690,7 +705,6 @@ static int hisi_qm_mb_read(struct hisi_qm *qm, u64 *base, u8 cmd, u16 queue) /* op 0: set xqc information to hardware, 1: get xqc information from hardware. */ int qm_set_and_get_xqc(struct hisi_qm *qm, u8 cmd, void *xqc, u32 qp_id, bool op) { - struct hisi_qm *pf_qm = pci_get_drvdata(pci_physfn(qm->pdev)); struct qm_mailbox mailbox; dma_addr_t xqc_dma; void *tmp_xqc; @@ -721,7 +735,7 @@ int qm_set_and_get_xqc(struct hisi_qm *qm, u8 cmd, void *xqc, u32 qp_id, bool op }
/* Setting xqc will fail if master OOO is blocked. */ - if (qm_check_dev_error(pf_qm)) { + if (qm_check_dev_error(qm)) { dev_err(&qm->pdev->dev, "failed to send mailbox since qm is stop!\n"); return -EIO; } @@ -1091,11 +1105,10 @@ static void qm_disable_qp(struct hisi_qm *qm, u32 qp_id)
static void qm_reset_function(struct hisi_qm *qm) { - struct hisi_qm *pf_qm = pci_get_drvdata(pci_physfn(qm->pdev)); struct device *dev = &qm->pdev->dev; int ret;
- if (qm_check_dev_error(pf_qm)) + if (qm_check_dev_error(qm)) return;
ret = qm_reset_prepare_ready(qm); @@ -2185,12 +2198,11 @@ static int qm_wait_qp_empty(struct hisi_qm *qm, u32 *state, u32 qp_id) static int qm_drain_qp(struct hisi_qp *qp) { struct hisi_qm *qm = qp->qm; - struct hisi_qm *pf_qm = pci_get_drvdata(pci_physfn(qm->pdev)); u32 state = 0; int ret;
/* No need to judge if master OOO is blocked. */ - if (qm_check_dev_error(pf_qm)) + if (qm_check_dev_error(qm)) return 0;
/* HW V3 supports drain qp by device */ @@ -4632,7 +4644,7 @@ void hisi_qm_reset_prepare(struct pci_dev *pdev) * If it occurs, need to wait for soft reset * to fix it. */ - if (qm_check_dev_error(pf_qm)) { + if (qm_check_dev_error(qm)) { qm_reset_bit_clear(qm); if (delay > QM_RESET_WAIT_TIMEOUT) { pci_err(pdev, "the hardware error was not recovered!\n"); @@ -4894,6 +4906,7 @@ static void qm_pf_reset_vf_process(struct hisi_qm *qm, if (ret) goto err_get_status;
+ clear_bit(QM_DEVICE_DOWN, &qm->misc_ctl); qm_pf_reset_vf_done(qm);
dev_info(dev, "device reset done.\n"); @@ -4901,6 +4914,7 @@ static void qm_pf_reset_vf_process(struct hisi_qm *qm, return;
err_get_status: + clear_bit(QM_DEVICE_DOWN, &qm->misc_ctl); qm_cmd_init(qm); qm_reset_bit_clear(qm); } @@ -4934,6 +4948,7 @@ static void qm_handle_cmd_msg(struct hisi_qm *qm, u32 fun_num) qm_pf_reset_vf_process(qm, QM_DOWN); break; case QM_PF_SRST_PREPARE: + set_bit(QM_DEVICE_DOWN, &qm->misc_ctl); qm_pf_reset_vf_process(qm, QM_SOFT_RESET); break; case QM_VF_GET_QOS: diff --git a/include/linux/hisi_acc_qm.h b/include/linux/hisi_acc_qm.h index ec00a109292d..7116932e83c0 100644 --- a/include/linux/hisi_acc_qm.h +++ b/include/linux/hisi_acc_qm.h @@ -146,6 +146,7 @@ enum qm_misc_ctl_bits { QM_RST_SCHED, QM_RESETTING, QM_MODULE_PARAM, + QM_DEVICE_DOWN, };
enum qm_cap_bits {
driver inclusion category: bugfix bugzilla: https://gitee.com/openeuler/kernel/issues/I9CB6L CVE: NA
----------------------------------------------------------------------
If there is a hardware error when driver and device bind/unbind, resetting the device may cause the reset thread to access released memory. Therefore, reset is not performed in this scenario. When the driver is re-bound to the device, the driver will reset the device.
Signed-off-by: Weili Qian qianweili@huawei.com Signed-off-by: JiangShui Yang yangjiangshui@h-partners.com --- drivers/crypto/hisilicon/hpre/hpre_main.c | 5 +++++ drivers/crypto/hisilicon/qm.c | 17 ++++++++++------- drivers/crypto/hisilicon/sec2/sec_main.c | 5 +++++ drivers/crypto/hisilicon/zip/zip_main.c | 6 +++++- include/linux/hisi_acc_qm.h | 2 +- 5 files changed, 26 insertions(+), 9 deletions(-)
diff --git a/drivers/crypto/hisilicon/hpre/hpre_main.c b/drivers/crypto/hisilicon/hpre/hpre_main.c index 16fa09aba2d1..27ae8d10ad9d 100644 --- a/drivers/crypto/hisilicon/hpre/hpre_main.c +++ b/drivers/crypto/hisilicon/hpre/hpre_main.c @@ -1408,6 +1408,7 @@ static int hpre_probe(struct pci_dev *pdev, const struct pci_device_id *id) return -ENOMEM;
qm = &hpre->qm; + set_bit(QM_DRIVER_DOWN, &qm->misc_ctl); ret = hpre_qm_init(qm, pdev); if (ret) { pci_err(pdev, "Failed to init HPRE QM (%d)!\n", ret); @@ -1424,6 +1425,9 @@ static int hpre_probe(struct pci_dev *pdev, const struct pci_device_id *id) if (ret) goto err_with_err_init;
+ /* Device is enabled, clear the flag. */ + clear_bit(QM_DRIVER_DOWN, &qm->misc_ctl); + ret = hpre_debugfs_init(qm); if (ret) dev_warn(&pdev->dev, "init debugfs fail!\n"); @@ -1458,6 +1462,7 @@ static int hpre_probe(struct pci_dev *pdev, const struct pci_device_id *id)
err_qm_del_list: hisi_qm_del_list(qm, &hpre_devices); + hisi_qm_wait_task_finish(qm, &hpre_devices); hpre_debugfs_exit(qm); hisi_qm_stop(qm, QM_NORMAL);
diff --git a/drivers/crypto/hisilicon/qm.c b/drivers/crypto/hisilicon/qm.c index a53c5b031c2b..6be0506ac419 100644 --- a/drivers/crypto/hisilicon/qm.c +++ b/drivers/crypto/hisilicon/qm.c @@ -1070,7 +1070,7 @@ static irqreturn_t qm_mb_cmd_irq(int irq, void *data) if (!val) return IRQ_NONE;
- if (test_bit(QM_DRIVER_REMOVING, &qm->misc_ctl)) { + if (test_bit(QM_DRIVER_DOWN, &qm->misc_ctl)) { dev_warn(&qm->pdev->dev, "Driver is down, message cannot be processed!\n"); return IRQ_HANDLED; } @@ -2784,7 +2784,7 @@ EXPORT_SYMBOL_GPL(qm_register_uacce); */ static int qm_frozen(struct hisi_qm *qm) { - if (test_bit(QM_DRIVER_REMOVING, &qm->misc_ctl)) + if (test_bit(QM_DRIVER_DOWN, &qm->misc_ctl)) return 0;
down_write(&qm->qps_lock); @@ -2792,7 +2792,7 @@ static int qm_frozen(struct hisi_qm *qm) if (!qm->qp_in_used) { qm->qp_in_used = qm->qp_num; up_write(&qm->qps_lock); - set_bit(QM_DRIVER_REMOVING, &qm->misc_ctl); + set_bit(QM_DRIVER_DOWN, &qm->misc_ctl); return 0; }
@@ -4751,10 +4751,13 @@ static irqreturn_t qm_abnormal_irq(int irq, void *data)
atomic64_inc(&qm->debug.dfx.abnormal_irq_cnt); ret = qm_process_dev_error(qm); - if (ret == ACC_ERR_NEED_RESET && - !test_bit(QM_DRIVER_REMOVING, &qm->misc_ctl) && - !test_and_set_bit(QM_RST_SCHED, &qm->misc_ctl)) - schedule_work(&qm->rst_work); + if (ret == ACC_ERR_NEED_RESET) { + if (!test_bit(QM_DRIVER_DOWN, &qm->misc_ctl) && + !test_and_set_bit(QM_RST_SCHED, &qm->misc_ctl)) + schedule_work(&qm->rst_work); + else if (test_bit(QM_DRIVER_DOWN, &qm->misc_ctl)) + pci_warn(qm->pdev, "Driver is down, need reload driver!\n"); + }
return IRQ_HANDLED; } diff --git a/drivers/crypto/hisilicon/sec2/sec_main.c b/drivers/crypto/hisilicon/sec2/sec_main.c index 566581039c87..43f9bd0b6dbd 100644 --- a/drivers/crypto/hisilicon/sec2/sec_main.c +++ b/drivers/crypto/hisilicon/sec2/sec_main.c @@ -1218,6 +1218,7 @@ static int sec_probe(struct pci_dev *pdev, const struct pci_device_id *id) return -ENOMEM;
qm = &sec->qm; + set_bit(QM_DRIVER_DOWN, &qm->misc_ctl); ret = sec_qm_init(qm, pdev); if (ret) { pci_err(pdev, "Failed to init SEC QM (%d)!\n", ret); @@ -1238,6 +1239,9 @@ static int sec_probe(struct pci_dev *pdev, const struct pci_device_id *id) goto err_probe_uninit; }
+ /* Device is enabled, clear the flag. */ + clear_bit(QM_DRIVER_DOWN, &qm->misc_ctl); + ret = sec_debugfs_init(qm); if (ret) pci_warn(pdev, "Failed to init debugfs!\n"); @@ -1269,6 +1273,7 @@ static int sec_probe(struct pci_dev *pdev, const struct pci_device_id *id) hisi_qm_alg_unregister(qm, &sec_devices, ctx_q_num); err_qm_del_list: hisi_qm_del_list(qm, &sec_devices); + hisi_qm_wait_task_finish(qm, &sec_devices); sec_debugfs_exit(qm); hisi_qm_stop(qm, QM_NORMAL); err_probe_uninit: diff --git a/drivers/crypto/hisilicon/zip/zip_main.c b/drivers/crypto/hisilicon/zip/zip_main.c index 59f7b9fdb816..6e9dbfc059a8 100644 --- a/drivers/crypto/hisilicon/zip/zip_main.c +++ b/drivers/crypto/hisilicon/zip/zip_main.c @@ -1312,7 +1312,7 @@ static int hisi_zip_probe(struct pci_dev *pdev, const struct pci_device_id *id) return -ENOMEM;
qm = &hisi_zip->qm; - + set_bit(QM_DRIVER_DOWN, &qm->misc_ctl); ret = hisi_zip_qm_init(qm, pdev); if (ret) { pci_err(pdev, "Failed to init ZIP QM (%d)!\n", ret); @@ -1329,6 +1329,9 @@ static int hisi_zip_probe(struct pci_dev *pdev, const struct pci_device_id *id) if (ret) goto err_dev_err_uninit;
+ /* Device is enabled, clear the flag. */ + clear_bit(QM_DRIVER_DOWN, &qm->misc_ctl); + ret = hisi_zip_debugfs_init(qm); if (ret) pci_err(pdev, "failed to init debugfs (%d)!\n", ret); @@ -1360,6 +1363,7 @@ static int hisi_zip_probe(struct pci_dev *pdev, const struct pci_device_id *id) hisi_qm_alg_unregister(qm, &zip_devices, HZIP_CTX_Q_NUM_DEF);
err_qm_del_list: + hisi_qm_wait_task_finish(qm, &zip_devices); hisi_qm_del_list(qm, &zip_devices); hisi_zip_debugfs_exit(qm); hisi_qm_stop(qm, QM_NORMAL); diff --git a/include/linux/hisi_acc_qm.h b/include/linux/hisi_acc_qm.h index 7116932e83c0..c7ce373ec09a 100644 --- a/include/linux/hisi_acc_qm.h +++ b/include/linux/hisi_acc_qm.h @@ -142,7 +142,7 @@ enum qm_vf_state { };
enum qm_misc_ctl_bits { - QM_DRIVER_REMOVING = 0x0, + QM_DRIVER_DOWN = 0x0, QM_RST_SCHED, QM_RESETTING, QM_MODULE_PARAM,
driver inclusion category: bugfix bugzilla: https://gitee.com/openeuler/kernel/issues/I9CB6L CVE: NA
----------------------------------------------------------------------
When the number of packets being processed in the hardware queue is greater than 512, the SEC driver continues to send the packet to the hardware, but adds the packet to the backlog list. Then, the SEC driver returns -EBUSY to the caller, and the caller stops sending packets. When the number of packets in the queue queried in the callback is less than 512, The packet sending thread is woken up.
When the number of send packet threads is greater than 512, packages in the backlog may be complete but the packet is not deleted from list. The released memory is accessed during the deletion, causing a system panic.
Therefore, delete the backlog, determine whether the packet sending thread needs to be woken up based on 'fake_busy' in the sec_req, and then invoke the callback function of the user to ensure that the thread is woken up before releasing the req memory.
log likes: [ 169.430697][ T1354] CPU: 27 PID: 1354 Comm: kworker/u262:1 Kdump: loaded Not tainted 5.10.0+ #1 [ 169.439678][ T1354] Hardware name: Huawei TaiShan 200 (Model 2280)/BC82AMDD, BIOS 2280-V2 CS V5.B211.01 11/10/2021 [ 169.450421][ T1354] Workqueue: 0000:76:00.0 qm_work_process [hisi_qm] [ 169.457050][ T1354] Call trace: [ 169.460285][ T1354] dump_backtrace+0x0/0x300 [ 169.464780][ T1354] show_stack+0x20/0x30 [ 169.468936][ T1354] dump_stack+0x104/0x17c [ 169.473240][ T1354] print_address_description.constprop.0+0x68/0x204 [ 169.479889][ T1354] __kasan_report+0xe0/0x140 [ 169.484463][ T1354] kasan_report+0x44/0xe0 [ 169.488771][ T1354] __asan_load8+0x94/0xd0 [ 169.493088][ T1354] __list_del_entry_valid+0x20/0x180 [ 169.498408][ T1354] sec_back_req_clear+0x184/0x2dc [hisi_sec2] [ 169.504494][ T1354] sec_skcipher_callback+0x84/0x150 [hisi_sec2] [ 169.510800][ T1354] sec_req_cb+0x1d4/0x334 [hisi_sec2] [ 169.516227][ T1354] qm_poll_req_cb+0x170/0x20c [hisi_qm] [ 169.524821][ T1354] qm_work_process+0xf8/0x124 [hisi_qm] [ 169.533436][ T1354] process_one_work+0x3a8/0x860 [ 169.541063][ T1354] worker_thread+0x280/0x670 [ 169.548349][ T1354] kthread+0x18c/0x1d0 [ 169.555169][ T1354] ret_from_fork+0x10/0x18 [ 169.562107][ T1354]
Signed-off-by: Weili Qian qianweili@huawei.com Signed-off-by: JiangShui Yang yangjiangshui@h-partners.com --- drivers/crypto/hisilicon/sec2/sec.h | 2 - drivers/crypto/hisilicon/sec2/sec_crypto.c | 49 ++++------------------ 2 files changed, 8 insertions(+), 43 deletions(-)
diff --git a/drivers/crypto/hisilicon/sec2/sec.h b/drivers/crypto/hisilicon/sec2/sec.h index 410c83712e28..7b5d7b27446c 100644 --- a/drivers/crypto/hisilicon/sec2/sec.h +++ b/drivers/crypto/hisilicon/sec2/sec.h @@ -55,7 +55,6 @@ struct sec_req { dma_addr_t in_dma; struct sec_cipher_req c_req; struct sec_aead_req aead_req; - struct list_head backlog_head;
int err_type; int req_id; @@ -121,7 +120,6 @@ struct sec_qp_ctx { struct sec_alg_res *res; struct sec_ctx *ctx; spinlock_t req_lock; - struct list_head backlog; struct hisi_acc_sgl_pool *c_in_pool; struct hisi_acc_sgl_pool *c_out_pool; }; diff --git a/drivers/crypto/hisilicon/sec2/sec_crypto.c b/drivers/crypto/hisilicon/sec2/sec_crypto.c index a2fd43f2e883..ffbffe9b5659 100644 --- a/drivers/crypto/hisilicon/sec2/sec_crypto.c +++ b/drivers/crypto/hisilicon/sec2/sec_crypto.c @@ -288,10 +288,10 @@ static int sec_bd_send(struct sec_ctx *ctx, struct sec_req *req) ret = hisi_qp_send(qp_ctx->qp, &req->sec_sqe); if (ctx->fake_req_limit <= atomic_read(&qp_ctx->qp->qp_status.used) && !ret) { - list_add_tail(&req->backlog_head, &qp_ctx->backlog); + req->fake_busy = true; + spin_unlock_bh(&qp_ctx->req_lock); atomic64_inc(&ctx->sec->debug.dfx.send_cnt); atomic64_inc(&ctx->sec->debug.dfx.send_busy_cnt); - spin_unlock_bh(&qp_ctx->req_lock); return -EBUSY; } spin_unlock_bh(&qp_ctx->req_lock); @@ -557,7 +557,6 @@ static int sec_create_qp_ctx(struct sec_ctx *ctx, int qp_ctx_id)
spin_lock_init(&qp_ctx->req_lock); idr_init(&qp_ctx->req_idr); - INIT_LIST_HEAD(&qp_ctx->backlog);
ret = sec_alloc_qp_ctx_resource(ctx, qp_ctx); if (ret) @@ -1419,31 +1418,10 @@ static void sec_update_iv(struct sec_req *req, enum sec_alg_type alg_type) } }
-static struct sec_req *sec_back_req_clear(struct sec_ctx *ctx, - struct sec_qp_ctx *qp_ctx) -{ - struct sec_req *backlog_req = NULL; - - spin_lock_bh(&qp_ctx->req_lock); - if (ctx->fake_req_limit >= - atomic_read(&qp_ctx->qp->qp_status.used) && - !list_empty(&qp_ctx->backlog)) { - backlog_req = list_first_entry(&qp_ctx->backlog, - typeof(*backlog_req), backlog_head); - list_del(&backlog_req->backlog_head); - } - spin_unlock_bh(&qp_ctx->req_lock); - - return backlog_req; -} - static void sec_skcipher_callback(struct sec_ctx *ctx, struct sec_req *req, int err) { struct skcipher_request *sk_req = req->c_req.sk_req; - struct sec_qp_ctx *qp_ctx = req->qp_ctx; - struct skcipher_request *backlog_sk_req; - struct sec_req *backlog_req;
sec_free_req_id(req);
@@ -1452,13 +1430,8 @@ static void sec_skcipher_callback(struct sec_ctx *ctx, struct sec_req *req, ctx->c_ctx.c_mode == SEC_CMODE_CTR) && req->c_req.encrypt) sec_update_iv(req, SEC_SKCIPHER);
- while (1) { - backlog_req = sec_back_req_clear(ctx, qp_ctx); - if (!backlog_req) - break; - - backlog_sk_req = backlog_req->c_req.sk_req; - skcipher_request_complete(backlog_sk_req, -EINPROGRESS); + if (req->fake_busy) { + skcipher_request_complete(sk_req, -EINPROGRESS); atomic64_inc(&ctx->sec->debug.dfx.recv_busy_cnt); }
@@ -1704,9 +1677,6 @@ static void sec_aead_callback(struct sec_ctx *c, struct sec_req *req, int err) struct sec_aead_req *aead_req = &req->aead_req; struct sec_cipher_req *c_req = &req->c_req; size_t authsize = crypto_aead_authsize(tfm); - struct sec_qp_ctx *qp_ctx = req->qp_ctx; - struct aead_request *backlog_aead_req; - struct sec_req *backlog_req; size_t sz;
if (!err && c->c_ctx.c_mode == SEC_CMODE_CBC && c_req->encrypt) @@ -1728,13 +1698,8 @@ static void sec_aead_callback(struct sec_ctx *c, struct sec_req *req, int err)
sec_free_req_id(req);
- while (1) { - backlog_req = sec_back_req_clear(c, qp_ctx); - if (!backlog_req) - break; - - backlog_aead_req = backlog_req->aead_req.aead_req; - aead_request_complete(backlog_aead_req, -EINPROGRESS); + if (req->fake_busy) { + aead_request_complete(a_req, -EINPROGRESS); atomic64_inc(&c->sec->debug.dfx.recv_busy_cnt); }
@@ -2120,6 +2085,7 @@ static int sec_skcipher_crypto(struct skcipher_request *sk_req, bool encrypt) req->c_req.sk_req = sk_req; req->c_req.encrypt = encrypt; req->ctx = ctx; + req->fake_busy = false;
ret = sec_skcipher_param_check(ctx, req); if (unlikely(ret)) @@ -2360,6 +2326,7 @@ static int sec_aead_crypto(struct aead_request *a_req, bool encrypt) req->aead_req.aead_req = a_req; req->c_req.encrypt = encrypt; req->ctx = ctx; + req->fake_busy = false;
ret = sec_aead_param_check(ctx, req); if (unlikely(ret)) {
From: Longfang Liu liulongfang@huawei.com
virt inclusion category: bugfix bugzilla: https://gitee.com/openeuler/kernel/issues/I9CB6L CVE: NA
----------------------------------------------------------------------
On systems with more than 64KB pages, since the ACC dev BAR2 configuration space will only map 32KB to the VM, if page alignment is not performed, the length of the allocated space will fail the length check.
This will cause a VF device to fail to be enabled successfully on systems with pages above 64KB.
Signed-off-by: Longfang Liu liulongfang@huawei.com Signed-off-by: JiangShui Yang yangjiangshui@h-partners.com --- drivers/vfio/pci/hisilicon/hisi_acc_vfio_pci.c | 16 +++++++++++++++- 1 file changed, 15 insertions(+), 1 deletion(-)
diff --git a/drivers/vfio/pci/hisilicon/hisi_acc_vfio_pci.c b/drivers/vfio/pci/hisilicon/hisi_acc_vfio_pci.c index 4d27465c8f1a..38789a69798a 100644 --- a/drivers/vfio/pci/hisilicon/hisi_acc_vfio_pci.c +++ b/drivers/vfio/pci/hisilicon/hisi_acc_vfio_pci.c @@ -1221,7 +1221,21 @@ static int hisi_acc_vfio_pci_mmap(struct vfio_device *core_vdev, index = vma->vm_pgoff >> (VFIO_PCI_OFFSET_SHIFT - PAGE_SHIFT); if (index == VFIO_PCI_BAR2_REGION_INDEX) { u64 req_len, pgoff, req_start; - resource_size_t end = pci_resource_len(vdev->pdev, index) / 2; + resource_size_t end; + + /* + * ACC VF dev 64KB BAR2 region consists of both functional + * register space and migration control register space, each + * uses 32KB BAR2 region, on the system with more than 64KB + * page size, even if the migration control register space + * is written by VM, it will only affects the VF. + * + * In order to support the live migration function in the + * system with a page size above 64KB, the driver needs + * to ensure that the VF region size is aligned with the + * system page size. + */ + end = PAGE_ALIGN(pci_resource_len(vdev->pdev, index) / 2);
req_len = vma->vm_end - vma->vm_start; pgoff = vma->vm_pgoff &
From: Longfang Liu liulongfang@huawei.com
virt inclusion category: cleanup bugzilla: https://gitee.com/openeuler/kernel/issues/I9CB6L CVE: NA
----------------------------------------------------------------------
1.clean some codestyle issues 2.bugfix some code issues
Signed-off-by: Longfang Liu liulongfang@huawei.com Signed-off-by: JiangShui Yang yangjiangshui@h-partners.com --- .../vfio/pci/hisilicon/hisi_acc_vfio_pci.c | 42 +++++++++---------- 1 file changed, 20 insertions(+), 22 deletions(-)
diff --git a/drivers/vfio/pci/hisilicon/hisi_acc_vfio_pci.c b/drivers/vfio/pci/hisilicon/hisi_acc_vfio_pci.c index 38789a69798a..fe2f5d7af99d 100644 --- a/drivers/vfio/pci/hisilicon/hisi_acc_vfio_pci.c +++ b/drivers/vfio/pci/hisilicon/hisi_acc_vfio_pci.c @@ -360,9 +360,12 @@ static int vf_qm_check_match(struct hisi_acc_vf_core_device *hisi_acc_vdev, u32 que_iso_state; int ret;
- if (migf->total_length < QM_MATCH_SIZE || hisi_acc_vdev->match_done) + if (hisi_acc_vdev->match_done) return 0;
+ if (migf->total_length < QM_MATCH_SIZE) + return -EINVAL; + if (vf_data->acc_magic != ACC_DEV_MAGIC) { dev_err(dev, "failed to match ACC_DEV_MAGIC\n"); return -EINVAL; @@ -537,6 +540,7 @@ static int vf_qm_state_save(struct hisi_acc_vf_core_device *hisi_acc_vdev, }
migf->total_length = sizeof(struct acc_vf_data); + return 0; }
@@ -637,15 +641,16 @@ static void hisi_acc_vf_disable_fds(struct hisi_acc_vf_core_device *hisi_acc_vde static void hisi_acc_vf_state_mutex_unlock(struct hisi_acc_vf_core_device *hisi_acc_vdev) { -again: - spin_lock(&hisi_acc_vdev->reset_lock); - if (hisi_acc_vdev->deferred_reset) { + while (true) { + spin_lock(&hisi_acc_vdev->reset_lock); + if (!hisi_acc_vdev->deferred_reset) + break; + hisi_acc_vdev->deferred_reset = false; spin_unlock(&hisi_acc_vdev->reset_lock); hisi_acc_vdev->vf_qm_state = QM_NOT_READY; hisi_acc_vdev->mig_state = VFIO_DEVICE_STATE_RUNNING; hisi_acc_vf_disable_fds(hisi_acc_vdev); - goto again; } mutex_unlock(&hisi_acc_vdev->state_mutex); spin_unlock(&hisi_acc_vdev->reset_lock); @@ -699,12 +704,8 @@ static ssize_t hisi_acc_vf_resume_write(struct file *filp, const char __user *bu ssize_t done = 0; int ret;
- if (pos) - return -ESPIPE; - pos = &filp->f_pos; - - if (*pos < 0 || - check_add_overflow((loff_t)len, *pos, &requested_length)) + if (filp->f_pos < 0 || + check_add_overflow((loff_t)len, filp->f_pos, &requested_length)) return -EINVAL;
if (requested_length > sizeof(struct acc_vf_data)) @@ -721,7 +722,7 @@ static ssize_t hisi_acc_vf_resume_write(struct file *filp, const char __user *bu done = -EFAULT; goto out_unlock; } - *pos += len; + filp->f_pos += len; done = len; migf->total_length += len;
@@ -817,14 +818,11 @@ static ssize_t hisi_acc_vf_save_read(struct file *filp, char __user *buf, size_t { struct hisi_acc_vf_migration_file *migf = filp->private_data; ssize_t done = 0; + size_t min_len; int ret;
- if (pos) - return -ESPIPE; - pos = &filp->f_pos; - mutex_lock(&migf->lock); - if (*pos > migf->total_length) { + if (filp->f_pos > migf->total_length) { done = -EINVAL; goto out_unlock; } @@ -834,17 +832,17 @@ static ssize_t hisi_acc_vf_save_read(struct file *filp, char __user *buf, size_t goto out_unlock; }
- len = min_t(size_t, migf->total_length - *pos, len); - if (len) { + min_len = min_t(size_t, migf->total_length - filp->f_pos, len); + if (min_len) { u8 *vf_data = (u8 *)&migf->vf_data;
- ret = copy_to_user(buf, vf_data + *pos, len); + ret = copy_to_user(buf, &migf->vf_data, min_len); if (ret) { done = -EFAULT; goto out_unlock; } - *pos += len; - done = len; + filp->f_pos += min_len; + done = min_len; } out_unlock: mutex_unlock(&migf->lock);
From: Longfang Liu liulongfang@huawei.com
virt inclusion category: bugfix bugzilla: https://gitee.com/openeuler/kernel/issues/I9CB6L CVE: NA
----------------------------------------------------------------------
For the current live migration process, supplement the doorbell command operations of EQ and AEQ
Signed-off-by: Longfang Liu liulongfang@huawei.com Signed-off-by: JiangShui Yang yangjiangshui@h-partners.com --- drivers/vfio/pci/hisilicon/hisi_acc_vfio_pci.c | 17 +++++++++++++++++ 1 file changed, 17 insertions(+)
diff --git a/drivers/vfio/pci/hisilicon/hisi_acc_vfio_pci.c b/drivers/vfio/pci/hisilicon/hisi_acc_vfio_pci.c index fe2f5d7af99d..73c6228fc7c0 100644 --- a/drivers/vfio/pci/hisilicon/hisi_acc_vfio_pci.c +++ b/drivers/vfio/pci/hisilicon/hisi_acc_vfio_pci.c @@ -444,6 +444,19 @@ static int vf_qm_get_match_data(struct hisi_acc_vf_core_device *hisi_acc_vdev, return 0; }
+static void vf_qm_xqc_restore(struct hisi_qm *qm, + struct hisi_acc_vf_migration_file *migf) +{ + struct acc_vf_data *vf_data = &migf->vf_data; + u16 eq_head, aeq_head; + + eq_head = vf_data->qm_eqc_dw[0] & 0xFFFF; + qm_db(qm, 0, QM_DOORBELL_CMD_EQ, eq_head, 0); + + aeq_head = vf_data->qm_aeqc_dw[0] & 0xFFFF; + qm_db(qm, 0, QM_DOORBELL_CMD_AEQ, aeq_head, 0); +} + static int vf_qm_load_data(struct hisi_acc_vf_core_device *hisi_acc_vdev, struct hisi_acc_vf_migration_file *migf) { @@ -673,6 +686,7 @@ static int hisi_acc_vf_load_state(struct hisi_acc_vf_core_device *hisi_acc_vdev) { struct device *dev = &hisi_acc_vdev->vf_dev->dev; struct hisi_acc_vf_migration_file *migf = hisi_acc_vdev->resuming_migf; + struct hisi_qm *vf_qm = &hisi_acc_vdev->vf_qm; int ret;
/* Recover data to VF */ @@ -682,6 +696,9 @@ static int hisi_acc_vf_load_state(struct hisi_acc_vf_core_device *hisi_acc_vdev) return ret; }
+ /* Restore eqc and aeqc interrupt information */ + vf_qm_xqc_restore(vf_qm, migf); + return 0; }
From: Longfang Liu liulongfang@huawei.com
virt inclusion category: bugfix bugzilla: https://gitee.com/openeuler/kernel/issues/I9CB6L CVE: NA
----------------------------------------------------------------------
In the new vfio framework, cache write-back is placed in the device data copy stage after stopping the device operation. Writing back to the cache at this stage will cause the data obtained by the cache to be written back to be empty.
In order to ensure that the cache data is written back successfully, the data needs to be written back into the stop device stage.
Signed-off-by: Longfang Liu liulongfang@huawei.com Signed-off-by: JiangShui Yang yangjiangshui@h-partners.com --- .../vfio/pci/hisilicon/hisi_acc_vfio_pci.c | 29 ++++++++++--------- .../vfio/pci/hisilicon/hisi_acc_vfio_pci.h | 3 ++ 2 files changed, 18 insertions(+), 14 deletions(-)
diff --git a/drivers/vfio/pci/hisilicon/hisi_acc_vfio_pci.c b/drivers/vfio/pci/hisilicon/hisi_acc_vfio_pci.c index 73c6228fc7c0..6922b067d354 100644 --- a/drivers/vfio/pci/hisilicon/hisi_acc_vfio_pci.c +++ b/drivers/vfio/pci/hisilicon/hisi_acc_vfio_pci.c @@ -444,7 +444,7 @@ static int vf_qm_get_match_data(struct hisi_acc_vf_core_device *hisi_acc_vdev, return 0; }
-static void vf_qm_xqc_restore(struct hisi_qm *qm, +static void vf_qm_xeqc_save(struct hisi_qm *qm, struct hisi_acc_vf_migration_file *migf) { struct acc_vf_data *vf_data = &migf->vf_data; @@ -521,23 +521,17 @@ static int vf_qm_state_save(struct hisi_acc_vf_core_device *hisi_acc_vdev, vf_data->vf_qm_state = QM_READY; hisi_acc_vdev->vf_qm_state = vf_data->vf_qm_state;
- ret = vf_qm_cache_wb(vf_qm); - if (ret) { - dev_err(dev, "failed to writeback QM Cache!\n"); - return ret; - } - ret = qm_get_regs(vf_qm, vf_data); if (ret) return -EINVAL;
/* Every reg is 32 bit, the dma address is 64 bit. */ - vf_data->eqe_dma = vf_data->qm_eqc_dw[1]; + vf_data->eqe_dma = vf_data->qm_eqc_dw[QM_XQC_ADDR_HIGH]; vf_data->eqe_dma <<= QM_XQC_ADDR_OFFSET; - vf_data->eqe_dma |= vf_data->qm_eqc_dw[0]; - vf_data->aeqe_dma = vf_data->qm_aeqc_dw[1]; + vf_data->eqe_dma |= vf_data->qm_eqc_dw[QM_XQC_ADDR_LOW]; + vf_data->aeqe_dma = vf_data->qm_aeqc_dw[QM_XQC_ADDR_HIGH]; vf_data->aeqe_dma <<= QM_XQC_ADDR_OFFSET; - vf_data->aeqe_dma |= vf_data->qm_aeqc_dw[0]; + vf_data->aeqe_dma |= vf_data->qm_aeqc_dw[QM_XQC_ADDR_LOW];
/* Through SQC_BT/CQC_BT to get sqc and cqc address */ ret = qm_get_sqc(vf_qm, &vf_data->sqc_dma); @@ -554,6 +548,9 @@ static int vf_qm_state_save(struct hisi_acc_vf_core_device *hisi_acc_vdev,
migf->total_length = sizeof(struct acc_vf_data);
+ /* Save eqc and aeqc interrupt information */ + vf_qm_xeqc_save(vf_qm, migf); + return 0; }
@@ -696,9 +693,6 @@ static int hisi_acc_vf_load_state(struct hisi_acc_vf_core_device *hisi_acc_vdev) return ret; }
- /* Restore eqc and aeqc interrupt information */ - vf_qm_xqc_restore(vf_qm, migf); - return 0; }
@@ -962,6 +956,13 @@ static int hisi_acc_vf_stop_device(struct hisi_acc_vf_core_device *hisi_acc_vdev dev_err(dev, "failed to check QM INT state!\n"); return ret; } + + ret = vf_qm_cache_wb(vf_qm); + if (ret) { + dev_err(dev, "failed to writeback QM cache!\n"); + return ret; + } + return 0; }
diff --git a/drivers/vfio/pci/hisilicon/hisi_acc_vfio_pci.h b/drivers/vfio/pci/hisilicon/hisi_acc_vfio_pci.h index dcabfeec6ca1..0c2d5c810caa 100644 --- a/drivers/vfio/pci/hisilicon/hisi_acc_vfio_pci.h +++ b/drivers/vfio/pci/hisilicon/hisi_acc_vfio_pci.h @@ -38,6 +38,9 @@ #define QM_REG_ADDR_OFFSET 0x0004
#define QM_XQC_ADDR_OFFSET 32U +#define QM_XQC_ADDR_LOW 0x1 +#define QM_XQC_ADDR_HIGH 0x2 + #define QM_VF_AEQ_INT_MASK 0x0004 #define QM_VF_EQ_INT_MASK 0x000c #define QM_IFC_INT_SOURCE_V 0x0020
virt inclusion category: bugfix bugzilla: https://gitee.com/openeuler/kernel/issues/I9CB6L CVE: NA
----------------------------------------------------------------------
Remove unused local variable 'vf_qm' in function hisi_acc_vf_load_state.
Signed-off-by: Weili Qian qianweili@huawei.com Signed-off-by: JiangShui Yang yangjiangshui@h-partners.com --- drivers/vfio/pci/hisilicon/hisi_acc_vfio_pci.c | 1 - 1 file changed, 1 deletion(-)
diff --git a/drivers/vfio/pci/hisilicon/hisi_acc_vfio_pci.c b/drivers/vfio/pci/hisilicon/hisi_acc_vfio_pci.c index 6922b067d354..22b5fddcc70e 100644 --- a/drivers/vfio/pci/hisilicon/hisi_acc_vfio_pci.c +++ b/drivers/vfio/pci/hisilicon/hisi_acc_vfio_pci.c @@ -683,7 +683,6 @@ static int hisi_acc_vf_load_state(struct hisi_acc_vf_core_device *hisi_acc_vdev) { struct device *dev = &hisi_acc_vdev->vf_dev->dev; struct hisi_acc_vf_migration_file *migf = hisi_acc_vdev->resuming_migf; - struct hisi_qm *vf_qm = &hisi_acc_vdev->vf_qm; int ret;
/* Recover data to VF */
virt inclusion category: bugfix bugzilla: https://gitee.com/openeuler/kernel/issues/I9CB6L CVE: NA
----------------------------------------------------------------------
The mailbox configuration(128 bits) needs to be obtained from the hardware at one time. If the mailbox configuration is obtained for multiple times, the read value may be incorrect. Use the instruction to read mailbox data instead of readl().
Signed-off-by: Weili Qian qianweili@huawei.com Signed-off-by: JiangShui Yang yangjiangshui@h-partners.com --- drivers/crypto/hisilicon/qm.c | 15 ++++--- .../vfio/pci/hisilicon/hisi_acc_vfio_pci.c | 45 +++---------------- include/linux/hisi_acc_qm.h | 8 ++-- 3 files changed, 17 insertions(+), 51 deletions(-)
diff --git a/drivers/crypto/hisilicon/qm.c b/drivers/crypto/hisilicon/qm.c index 6be0506ac419..5ab2de816165 100644 --- a/drivers/crypto/hisilicon/qm.c +++ b/drivers/crypto/hisilicon/qm.c @@ -662,7 +662,7 @@ static int qm_mb_nolock(struct hisi_qm *qm, struct qm_mailbox *mailbox) return ret; }
-int hisi_qm_mb(struct hisi_qm *qm, u8 cmd, dma_addr_t dma_addr, u16 queue, +int hisi_qm_mb_write(struct hisi_qm *qm, u8 cmd, dma_addr_t dma_addr, u16 queue, bool op) { struct qm_mailbox mailbox; @@ -682,9 +682,9 @@ int hisi_qm_mb(struct hisi_qm *qm, u8 cmd, dma_addr_t dma_addr, u16 queue,
return ret; } -EXPORT_SYMBOL_GPL(hisi_qm_mb); +EXPORT_SYMBOL_GPL(hisi_qm_mb_write);
-static int hisi_qm_mb_read(struct hisi_qm *qm, u64 *base, u8 cmd, u16 queue) +int hisi_qm_mb_read(struct hisi_qm *qm, u64 *base, u8 cmd, u16 queue) { struct qm_mailbox mailbox; int ret; @@ -701,6 +701,7 @@ static int hisi_qm_mb_read(struct hisi_qm *qm, u64 *base, u8 cmd, u16 queue)
return 0; } +EXPORT_SYMBOL_GPL(hisi_qm_mb_read);
/* op 0: set xqc information to hardware, 1: get xqc information from hardware. */ int qm_set_and_get_xqc(struct hisi_qm *qm, u8 cmd, void *xqc, u32 qp_id, bool op) @@ -1789,12 +1790,12 @@ static int qm_ping_pf(struct hisi_qm *qm, u64 cmd)
static int qm_drain_qm(struct hisi_qm *qm) { - return hisi_qm_mb(qm, QM_MB_CMD_FLUSH_QM, 0, 0, 0); + return hisi_qm_mb_write(qm, QM_MB_CMD_FLUSH_QM, 0, 0, 0); }
static int qm_stop_qp(struct hisi_qp *qp) { - return hisi_qm_mb(qp->qm, QM_MB_CMD_STOP_QP, 0, qp->qp_id, 0); + return hisi_qm_mb_write(qp->qm, QM_MB_CMD_STOP_QP, 0, qp->qp_id, 0); }
static int qm_set_msi(struct hisi_qm *qm, bool set) @@ -3164,11 +3165,11 @@ static int __hisi_qm_start(struct hisi_qm *qm) if (ret) return ret;
- ret = hisi_qm_mb(qm, QM_MB_CMD_SQC_BT, qm->sqc_dma, 0, 0); + ret = hisi_qm_mb_write(qm, QM_MB_CMD_SQC_BT, qm->sqc_dma, 0, 0); if (ret) return ret;
- ret = hisi_qm_mb(qm, QM_MB_CMD_CQC_BT, qm->cqc_dma, 0, 0); + ret = hisi_qm_mb_write(qm, QM_MB_CMD_CQC_BT, qm->cqc_dma, 0, 0); if (ret) return ret;
diff --git a/drivers/vfio/pci/hisilicon/hisi_acc_vfio_pci.c b/drivers/vfio/pci/hisilicon/hisi_acc_vfio_pci.c index 22b5fddcc70e..886f4e58eb18 100644 --- a/drivers/vfio/pci/hisilicon/hisi_acc_vfio_pci.c +++ b/drivers/vfio/pci/hisilicon/hisi_acc_vfio_pci.c @@ -81,13 +81,10 @@ static int qm_get_vft(struct hisi_qm *qm, u32 *base) u32 qp_num; int ret;
- ret = hisi_qm_mb(qm, QM_MB_CMD_SQC_VFT_V2, 0, 0, 1); + ret = hisi_qm_mb_read(qm, &sqc_vft, QM_MB_CMD_SQC_VFT_V2, 0); if (ret) return ret;
- sqc_vft = readl(qm->io_base + QM_MB_CMD_DATA_ADDR_L) | - ((u64)readl(qm->io_base + QM_MB_CMD_DATA_ADDR_H) << - QM_XQC_ADDR_OFFSET); *base = QM_SQC_VFT_BASE_MASK_V2 & (sqc_vft >> QM_SQC_VFT_BASE_SHIFT_V2); qp_num = (QM_SQC_VFT_NUM_MASK_V2 & (sqc_vft >> QM_SQC_VFT_NUM_SHIFT_V2)) + 1; @@ -95,36 +92,6 @@ static int qm_get_vft(struct hisi_qm *qm, u32 *base) return qp_num; }
-static int qm_get_sqc(struct hisi_qm *qm, u64 *addr) -{ - int ret; - - ret = hisi_qm_mb(qm, QM_MB_CMD_SQC_BT, 0, 0, 1); - if (ret) - return ret; - - *addr = readl(qm->io_base + QM_MB_CMD_DATA_ADDR_L) | - ((u64)readl(qm->io_base + QM_MB_CMD_DATA_ADDR_H) << - QM_XQC_ADDR_OFFSET); - - return 0; -} - -static int qm_get_cqc(struct hisi_qm *qm, u64 *addr) -{ - int ret; - - ret = hisi_qm_mb(qm, QM_MB_CMD_CQC_BT, 0, 0, 1); - if (ret) - return ret; - - *addr = readl(qm->io_base + QM_MB_CMD_DATA_ADDR_L) | - ((u64)readl(qm->io_base + QM_MB_CMD_DATA_ADDR_H) << - QM_XQC_ADDR_OFFSET); - - return 0; -} - static int qm_get_regs(struct hisi_qm *qm, struct acc_vf_data *vf_data) { struct device *dev = &qm->pdev->dev; @@ -347,7 +314,7 @@ static void vf_qm_fun_reset(struct hisi_qm *qm)
static int vf_qm_func_stop(struct hisi_qm *qm) { - return hisi_qm_mb(qm, QM_MB_CMD_PAUSE_QM, 0, 0, 0); + return hisi_qm_mb_write(qm, QM_MB_CMD_PAUSE_QM, 0, 0, 0); }
static int vf_qm_check_match(struct hisi_acc_vf_core_device *hisi_acc_vdev, @@ -486,13 +453,13 @@ static int vf_qm_load_data(struct hisi_acc_vf_core_device *hisi_acc_vdev, return ret; }
- ret = hisi_qm_mb(qm, QM_MB_CMD_SQC_BT, qm->sqc_dma, 0, 0); + ret = hisi_qm_mb_write(qm, QM_MB_CMD_SQC_BT, qm->sqc_dma, 0, 0); if (ret) { dev_err(dev, "set sqc failed\n"); return ret; }
- ret = hisi_qm_mb(qm, QM_MB_CMD_CQC_BT, qm->cqc_dma, 0, 0); + ret = hisi_qm_mb_write(qm, QM_MB_CMD_CQC_BT, qm->cqc_dma, 0, 0); if (ret) { dev_err(dev, "set cqc failed\n"); return ret; @@ -534,13 +501,13 @@ static int vf_qm_state_save(struct hisi_acc_vf_core_device *hisi_acc_vdev, vf_data->aeqe_dma |= vf_data->qm_aeqc_dw[QM_XQC_ADDR_LOW];
/* Through SQC_BT/CQC_BT to get sqc and cqc address */ - ret = qm_get_sqc(vf_qm, &vf_data->sqc_dma); + ret = hisi_qm_mb_read(vf_qm, &vf_data->sqc_dma, QM_MB_CMD_SQC_BT, 0); if (ret) { dev_err(dev, "failed to read SQC addr!\n"); return -EINVAL; }
- ret = qm_get_cqc(vf_qm, &vf_data->cqc_dma); + ret = hisi_qm_mb_read(vf_qm, &vf_data->cqc_dma, QM_MB_CMD_CQC_BT, 0); if (ret) { dev_err(dev, "failed to read CQC addr!\n"); return -EINVAL; diff --git a/include/linux/hisi_acc_qm.h b/include/linux/hisi_acc_qm.h index c7ce373ec09a..1d5fbffbbd2e 100644 --- a/include/linux/hisi_acc_qm.h +++ b/include/linux/hisi_acc_qm.h @@ -52,8 +52,6 @@ #define QM_MB_EVENT_SHIFT 8 #define QM_MB_BUSY_SHIFT 13 #define QM_MB_OP_SHIFT 14 -#define QM_MB_CMD_DATA_ADDR_L 0x304 -#define QM_MB_CMD_DATA_ADDR_H 0x308
/* doorbell */ #define QM_DOORBELL_CMD_SQ 0 @@ -559,9 +557,9 @@ void hisi_qm_reset_prepare(struct pci_dev *pdev); void hisi_qm_reset_done(struct pci_dev *pdev);
int hisi_qm_wait_mb_ready(struct hisi_qm *qm); -int hisi_qm_mb(struct hisi_qm *qm, u8 cmd, dma_addr_t dma_addr, u16 queue, - bool op); - +int hisi_qm_mb_write(struct hisi_qm *qm, u8 cmd, dma_addr_t dma_addr, + u16 queue, bool op); +int hisi_qm_mb_read(struct hisi_qm *qm, u64 *msg, u8 cmd, u16 queue); struct hisi_acc_sgl_pool; struct hisi_acc_hw_sgl *hisi_acc_sg_buf_map_to_hw_sgl(struct device *dev, struct scatterlist *sgl, struct hisi_acc_sgl_pool *pool,
反馈: 您发送到kernel@openeuler.org的补丁/补丁集,已成功转换为PR! PR链接地址: https://gitee.com/openeuler/kernel/pulls/5640 邮件列表地址:https://mailweb.openeuler.org/hyperkitty/list/kernel@openeuler.org/message/3...
FeedBack: The patch(es) which you have sent to kernel@openeuler.org mailing list has been converted to a pull request successfully! Pull request link: https://gitee.com/openeuler/kernel/pulls/5640 Mailing list address: https://mailweb.openeuler.org/hyperkitty/list/kernel@openeuler.org/message/3...