From: Zhang Wei zhangwei375@huawei.com
driver inclusion category: bugfix bugzilla: NA CVE: NA
1. add ecc-mbit processing in reset process 2. fixup some codingstyle by code review 3. fixup by security problem 4. add qm_wait_task_complete before fail callback
Signed-off-by: Zhang Wei zhangwei375@huawei.com Reviewed-by: zhouguangwei zhouguangwei5@huawei.com Reviewed-by: hucheng hucheng.hu@huawei.com Signed-off-by: Yang Yingliang yangyingliang@huawei.com --- drivers/crypto/hisilicon/hpre/hpre_crypto.c | 15 +- drivers/crypto/hisilicon/hpre/hpre_main.c | 157 +++++++------ drivers/crypto/hisilicon/qm.c | 205 ++++++++++++++-- drivers/crypto/hisilicon/qm.h | 23 +- drivers/crypto/hisilicon/rde/rde.h | 25 +- drivers/crypto/hisilicon/rde/rde_api.c | 244 ++++++++++---------- drivers/crypto/hisilicon/rde/rde_api.h | 2 +- drivers/crypto/hisilicon/rde/rde_data.c | 28 +-- drivers/crypto/hisilicon/rde/rde_main.c | 183 ++++++++------- drivers/crypto/hisilicon/sec/sec_algs.c | 13 +- drivers/crypto/hisilicon/sec2/sec.h | 10 +- drivers/crypto/hisilicon/sec2/sec_crypto.c | 42 ++-- drivers/crypto/hisilicon/sec2/sec_crypto.h | 10 +- drivers/crypto/hisilicon/sec2/sec_main.c | 244 +++++++++++--------- drivers/crypto/hisilicon/sec2/sec_usr_if.h | 10 +- drivers/crypto/hisilicon/sgl.c | 6 +- drivers/crypto/hisilicon/zip/zip_crypto.c | 4 +- drivers/crypto/hisilicon/zip/zip_main.c | 190 +++++++++------ 18 files changed, 840 insertions(+), 571 deletions(-)
diff --git a/drivers/crypto/hisilicon/hpre/hpre_crypto.c b/drivers/crypto/hisilicon/hpre/hpre_crypto.c index e58bedeb006f..5ca82af73ed9 100644 --- a/drivers/crypto/hisilicon/hpre/hpre_crypto.c +++ b/drivers/crypto/hisilicon/hpre/hpre_crypto.c @@ -677,7 +677,7 @@ static int hpre_rsa_enc(struct akcipher_request *req) if (ret) return ret;
- msg->dw0 |= HPRE_ALG_NC_NCRT; + msg->dw0 |= cpu_to_le32(HPRE_ALG_NC_NCRT); msg->key = cpu_to_le64((u64)ctx->rsa.dma_pubkey);
ret = hpre_hw_data_init(hpre_req, req->src, req->src_len, 1, 0); @@ -807,10 +807,8 @@ static int hpre_rsa_set_e(struct hpre_ctx *ctx, const char *value,
hpre_rsa_drop_leading_zeros(&ptr, &vlen);
- if (!ctx->key_sz || !vlen || vlen > ctx->key_sz) { - ctx->rsa.pubkey = NULL; + if (!ctx->key_sz || !vlen || vlen > ctx->key_sz) return -EINVAL; - }
memcpy(ctx->rsa.pubkey + ctx->key_sz - vlen, ptr, vlen);
@@ -927,7 +925,7 @@ static void hpre_rsa_clear_ctx(struct hpre_ctx *ctx, bool is_clear_all) }
if (ctx->rsa.prikey) { - memset(ctx->rsa.prikey, 0, ctx->key_sz); + memset(ctx->rsa.prikey, 0, ctx->key_sz << 1); dma_free_coherent(dev, ctx->key_sz << 1, ctx->rsa.prikey, ctx->rsa.dma_prikey); ctx->rsa.prikey = NULL; @@ -1039,6 +1037,7 @@ static unsigned int hpre_rsa_max_size(struct crypto_akcipher *tfm) static int hpre_rsa_init_tfm(struct crypto_akcipher *tfm) { struct hpre_ctx *ctx = akcipher_tfm_ctx(tfm); + int ret;
ctx->rsa.soft_tfm = crypto_alloc_akcipher("rsa-generic", 0, 0); if (IS_ERR(ctx->rsa.soft_tfm)) { @@ -1046,7 +1045,11 @@ static int hpre_rsa_init_tfm(struct crypto_akcipher *tfm) return PTR_ERR(ctx->rsa.soft_tfm); }
- return hpre_ctx_init(ctx); + ret = hpre_ctx_init(ctx); + if (ret) + crypto_free_akcipher(ctx->rsa.soft_tfm); + + return ret; }
static void hpre_rsa_exit_tfm(struct crypto_akcipher *tfm) diff --git a/drivers/crypto/hisilicon/hpre/hpre_main.c b/drivers/crypto/hisilicon/hpre/hpre_main.c index 9824d212e0e4..78c32047bd85 100644 --- a/drivers/crypto/hisilicon/hpre/hpre_main.c +++ b/drivers/crypto/hisilicon/hpre/hpre_main.c @@ -49,9 +49,9 @@ #define HPRE_CORE_IS_SCHD_OFFSET 0x90
#define HPRE_RAS_CE_ENB 0x301410 -#define HPRE_HAC_RAS_CE_ENABLE 0x3f +#define HPRE_HAC_RAS_CE_ENABLE 0x1 #define HPRE_RAS_NFE_ENB 0x301414 -#define HPRE_HAC_RAS_NFE_ENABLE 0x3fffc0 +#define HPRE_HAC_RAS_NFE_ENABLE 0x3ffffe #define HPRE_RAS_FE_ENB 0x301418 #define HPRE_HAC_RAS_FE_ENABLE 0
@@ -87,6 +87,7 @@ #define HPRE_AM_OOO_SHUTDOWN_ENB 0x301044 #define AM_OOO_SHUTDOWN_ENABLE BIT(0) #define AM_OOO_SHUTDOWN_DISABLE 0xFFFFFFFE +#define HPRE_WR_MSI_PORT 0xFFFB
#define HPRE_HW_ERROR_IRQ_ENABLE 1 #define HPRE_HW_ERROR_IRQ_DISABLE 0 @@ -94,6 +95,11 @@ #define HPRE_CORE_ECC_2BIT_ERR BIT(1) #define HPRE_OOO_ECC_2BIT_ERR BIT(5)
+#define HPRE_QM_BME_FLR BIT(7) +#define HPRE_QM_PM_FLR BIT(11) +#define HPRE_QM_SRIOV_FLR BIT(12) + +#define HPRE_USLEEP 10
/* function index: * 1 for hpre bypass mode, @@ -355,10 +361,6 @@ static int hpre_set_user_domain_and_cache(struct hpre *hpre) writel(HPRE_QM_USR_CFG_MASK, HPRE_ADDR(qm, QM_AWUSER_M_CFG_ENABLE)); writel_relaxed(HPRE_QM_AXI_CFG_MASK, HPRE_ADDR(qm, QM_AXI_M_CFG));
- /* disable FLR triggered by BME(bus master enable) */ - writel(PEH_AXUSER_CFG, HPRE_ADDR(qm, QM_PEH_AXUSER_CFG)); - writel(PEH_AXUSER_CFG_ENABLE, HPRE_ADDR(qm, QM_PEH_AXUSER_CFG_ENABLE)); - /* HPRE need more time, we close this interrupt */ val = readl_relaxed(HPRE_ADDR(qm, HPRE_QM_ABNML_INT_MASK)); val |= BIT(HPRE_TIMEOUT_ABNML_BIT); @@ -399,6 +401,13 @@ static int hpre_set_user_domain_and_cache(struct hpre *hpre) if (ret) dev_err(dev, "acpi_evaluate_dsm err.\n");
+ /* disable FLR triggered by BME(bus master enable) */ + val = readl(hpre->qm.io_base + QM_PEH_AXUSER_CFG); + val &= ~(HPRE_QM_BME_FLR | HPRE_QM_SRIOV_FLR); + val |= HPRE_QM_PM_FLR; + writel(val, HPRE_ADDR(qm, QM_PEH_AXUSER_CFG)); + writel(PEH_AXUSER_CFG_ENABLE, HPRE_ADDR(qm, QM_PEH_AXUSER_CFG_ENABLE)); + return ret; }
@@ -442,6 +451,9 @@ static void hpre_hw_error_enable(struct hpre *hpre) struct hisi_qm *qm = &hpre->qm; u32 val;
+ /* clear HPRE hw error source if having */ + writel(HPRE_CORE_INT_DISABLE, qm->io_base + HPRE_HAC_SOURCE_INT); + /* enable hpre hw error interrupts */ writel(HPRE_CORE_INT_ENABLE, qm->io_base + HPRE_INT_MASK); writel(HPRE_HAC_RAS_CE_ENABLE, qm->io_base + HPRE_RAS_CE_ENB); @@ -476,7 +488,6 @@ static int hpre_current_qm_write(struct hpre_debugfs_file *file, u32 val) u32 num_vfs = hpre->num_vfs; u32 vfq_num, tmp;
- if (val > num_vfs) return -EINVAL;
@@ -700,7 +711,9 @@ static int hpre_cluster_debugfs_init(struct hpre_debug *debug) int i, ret;
for (i = 0; i < HPRE_CLUSTERS_NUM; i++) { - sprintf(buf, "cluster%d", i); + ret = snprintf(buf, HPRE_DBGFS_VAL_MAX_LEN, "cluster%d", i); + if (ret < 0) + return -EINVAL;
tmp_d = debugfs_create_dir(buf, debug->debug_root); if (!tmp_d) @@ -836,11 +849,46 @@ static int hpre_qm_pre_init(struct hisi_qm *qm, struct pci_dev *pdev)
static void hpre_hw_err_init(struct hpre *hpre) { - hisi_qm_hw_error_init(&hpre->qm, QM_BASE_CE, QM_BASE_NFE, - 0, QM_DB_RANDOM_INVALID); + hisi_qm_hw_error_init(&hpre->qm, QM_BASE_CE, + QM_BASE_NFE | QM_ACC_DO_TASK_TIMEOUT, + 0, QM_DB_RANDOM_INVALID); hpre_hw_error_enable(hpre); }
+static void hpre_open_master_ooo(struct hisi_qm *qm) +{ + u32 val; + + val = readl(qm->io_base + HPRE_AM_OOO_SHUTDOWN_ENB); + writel(val & AM_OOO_SHUTDOWN_DISABLE, + HPRE_ADDR(qm, HPRE_AM_OOO_SHUTDOWN_ENB)); + writel(val | AM_OOO_SHUTDOWN_ENABLE, + HPRE_ADDR(qm, HPRE_AM_OOO_SHUTDOWN_ENB)); +} + +static u32 hpre_get_hw_err_status(struct hisi_qm *qm) +{ + return readl(qm->io_base + HPRE_HAC_INT_STATUS); +} + +static void hpre_clear_hw_err_status(struct hisi_qm *qm, u32 err_sts) +{ + writel(err_sts, qm->io_base + HPRE_HAC_SOURCE_INT); +} + +static void hpre_log_hw_error(struct hisi_qm *qm, u32 err_sts) +{ + const struct hpre_hw_error *err = hpre_hw_errors; + struct device *dev = &qm->pdev->dev; + + while (err->msg) { + if (err->int_msk & err_sts) + dev_warn(dev, "%s [error status=0x%x] found\n", + err->msg, err->int_msk); + err++; + } +} + static int hpre_pf_probe_init(struct hpre *hpre) { struct hisi_qm *qm = &hpre->qm; @@ -850,6 +898,13 @@ static int hpre_pf_probe_init(struct hpre *hpre) return -EINVAL;
qm->ctrl_q_num = HPRE_QUEUE_NUM_V2; + qm->err_ini.qm_wr_port = HPRE_WR_MSI_PORT; + qm->err_ini.ecc_2bits_mask = (HPRE_CORE_ECC_2BIT_ERR | + HPRE_OOO_ECC_2BIT_ERR); + qm->err_ini.open_axi_master_ooo = hpre_open_master_ooo; + qm->err_ini.get_dev_hw_err_status = hpre_get_hw_err_status; + qm->err_ini.clear_dev_hw_err_status = hpre_clear_hw_err_status; + qm->err_ini.log_dev_hw_err = hpre_log_hw_error;
ret = hpre_set_user_domain_and_cache(hpre); if (ret) @@ -1058,8 +1113,12 @@ static int hpre_sriov_configure(struct pci_dev *pdev, int num_vfs)
static void hpre_remove_wait_delay(struct hpre *hpre) { - while (hisi_qm_frozen(&hpre->qm)) - ; + struct hisi_qm *qm = &hpre->qm; + + while (hisi_qm_frozen(&hpre->qm) || + ((qm->fun_type == QM_HW_PF) && + hpre_try_frozen_vfs(hpre->qm.pdev))) + usleep_range(HPRE_USLEEP, HPRE_USLEEP); udelay(HPRE_WAIT_DELAY); }
@@ -1067,20 +1126,15 @@ static void hpre_remove(struct pci_dev *pdev) { struct hpre *hpre = pci_get_drvdata(pdev); struct hisi_qm *qm = &hpre->qm; - int ret;
if (uacce_mode != UACCE_MODE_NOUACCE) hpre_remove_wait_delay(hpre);
hpre_algs_unregister(); hpre_remove_from_list(hpre); - if (qm->fun_type == QM_HW_PF && hpre->num_vfs != 0) { - ret = hpre_sriov_disable(pdev); - if (ret) { - pci_err(pdev, "Disable SRIOV fail!\n"); - return; - } - } + if (qm->fun_type == QM_HW_PF && hpre->num_vfs != 0) + hpre_sriov_disable(pdev); + if (qm->fun_type == QM_HW_PF) { hpre_cnt_regs_clear(qm); qm->debug.curr_qm_qp_num = 0; @@ -1095,50 +1149,11 @@ static void hpre_remove(struct pci_dev *pdev) hisi_qm_uninit(qm); }
-static void hpre_log_hw_error(struct hpre *hpre, u32 err_sts) -{ - const struct hpre_hw_error *err = hpre_hw_errors; - struct device *dev = &hpre->qm.pdev->dev; - - while (err->msg) { - if (err->int_msk & err_sts) - dev_warn(dev, "%s [error status=0x%x] found\n", - err->msg, err->int_msk); - err++; - } -} - -static pci_ers_result_t hpre_hw_error_handle(struct hpre *hpre) -{ - u32 err_sts; - - /* read err sts */ - err_sts = readl(hpre->qm.io_base + HPRE_HAC_INT_STATUS); - if (err_sts) { - hpre_log_hw_error(hpre, err_sts); - - /* clear error interrupts */ - writel(err_sts, hpre->qm.io_base + HPRE_HAC_SOURCE_INT); - return PCI_ERS_RESULT_NEED_RESET; - } - - return PCI_ERS_RESULT_RECOVERED; -} - -static pci_ers_result_t hpre_process_hw_error(struct pci_dev *pdev) +static void hpre_shutdown(struct pci_dev *pdev) { struct hpre *hpre = pci_get_drvdata(pdev); - pci_ers_result_t qm_ret, hpre_ret; - - /* log qm error */ - qm_ret = hisi_qm_hw_error_handle(&hpre->qm);
- /* log hpre error */ - hpre_ret = hpre_hw_error_handle(hpre); - - return (qm_ret == PCI_ERS_RESULT_NEED_RESET || - hpre_ret == PCI_ERS_RESULT_NEED_RESET) ? - PCI_ERS_RESULT_NEED_RESET : PCI_ERS_RESULT_RECOVERED; + hisi_qm_stop(&hpre->qm, QM_NORMAL); }
static pci_ers_result_t hpre_error_detected(struct pci_dev *pdev, @@ -1151,7 +1166,7 @@ static pci_ers_result_t hpre_error_detected(struct pci_dev *pdev, if (state == pci_channel_io_perm_failure) return PCI_ERS_RESULT_DISCONNECT;
- return hpre_process_hw_error(pdev); + return hisi_qm_process_dev_error(pdev); }
static int hpre_vf_reset_prepare(struct pci_dev *pdev, @@ -1240,6 +1255,7 @@ static int hpre_soft_reset(struct hpre *hpre) { struct hisi_qm *qm = &hpre->qm; struct device *dev = &qm->pdev->dev; + unsigned long long value = 0; int ret; u32 val;
@@ -1259,6 +1275,9 @@ static int hpre_soft_reset(struct hpre *hpre) return ret; }
+ /* Set qm ecc if dev ecc happened to hold on ooo */ + hisi_qm_set_ecc(qm); + /* OOO register set and check */ writel(MASTER_GLOBAL_CTRL_SHUTDOWN, hpre->qm.io_base + HPRE_MASTER_GLOBAL_CTRL); @@ -1282,7 +1301,6 @@ static int hpre_soft_reset(struct hpre *hpre)
/* The reset related sub-control registers are not in PCI BAR */ if (ACPI_HANDLE(dev)) { - unsigned long long value = 0; acpi_status s;
s = acpi_evaluate_integer(ACPI_HANDLE(dev), "HRST", @@ -1358,7 +1376,8 @@ static int hpre_controller_reset_done(struct hpre *hpre) ret = hpre_set_user_domain_and_cache(hpre); if (ret) return ret; - hpre_hw_err_init(hpre); + + hisi_qm_restart_prepare(qm);
ret = hisi_qm_restart(qm); if (ret) { @@ -1375,6 +1394,9 @@ static int hpre_controller_reset_done(struct hpre *hpre) return -EPERM; }
+ hisi_qm_restart_done(qm); + hpre_hw_err_init(hpre); + return 0; }
@@ -1438,17 +1460,15 @@ static void hpre_set_hw_error(struct hpre *hisi_hpre, bool enable)
if (enable) { hisi_qm_hw_error_init(qm, QM_BASE_CE, - QM_BASE_NFE | QM_ACC_WB_NOT_READY_TIMEOUT, - 0, QM_DB_RANDOM_INVALID); + QM_BASE_NFE | QM_ACC_DO_TASK_TIMEOUT, + 0, QM_DB_RANDOM_INVALID); hpre_hw_error_enable(hpre); } else { hisi_qm_hw_error_uninit(qm); hpre_hw_error_disable(hpre); } - }
- static int hpre_get_hw_error_status(struct hpre *hpre) { u32 err_sts; @@ -1589,6 +1609,7 @@ static struct pci_driver hpre_pci_driver = { .remove = hpre_remove, .sriov_configure = hpre_sriov_configure, .err_handler = &hpre_err_handler, + .shutdown = hpre_shutdown, };
static void hpre_register_debugfs(void) diff --git a/drivers/crypto/hisilicon/qm.c b/drivers/crypto/hisilicon/qm.c index 0def6ba571a1..9018c214455e 100644 --- a/drivers/crypto/hisilicon/qm.c +++ b/drivers/crypto/hisilicon/qm.c @@ -113,6 +113,7 @@ #define QM_ABNORMAL_INT_MASK 0x100004 #define QM_HW_ERROR_IRQ_DISABLE GENMASK(12, 0) #define QM_ABNORMAL_INT_STATUS 0x100008 +#define QM_ABNORMAL_INT_SET 0x10000c #define QM_ABNORMAL_INF00 0x100010 #define QM_FIFO_OVERFLOW_TYPE 0xc0 #define QM_FIFO_OVERFLOW_VF 0x3f @@ -163,6 +164,13 @@
#define WAIT_PERIOD 20 #define MAX_WAIT_COUNTS 1000 +#define MAX_WAIT_TASK_COUNTS 10 + +#define QM_RAS_NFE_MBIT_DISABLE ~QM_ECC_MBIT +#define AM_CFG_PORT_WR_EN 0x30001C +#define AM_CFG_PORT_WR_EN_VALUE 0xFFFF +#define AM_ROB_ECC_INT_STS 0x300104 +#define ROB_ECC_ERR_MULTPL BIT(1)
#define QM_MK_CQC_DW3_V1(hop_num, pg_sz, buf_sz, cqe_sz) \ (((hop_num) << QM_CQ_HOP_NUM_SHIFT) | \ @@ -524,7 +532,6 @@ static void qm_poll_qp(struct hisi_qp *qp, struct hisi_qm *qm) atomic_dec(&qp->qp_status.used); } } - } }
@@ -1134,13 +1141,13 @@ static pci_ers_result_t qm_hw_error_handle_v2(struct hisi_qm *qm) /* read err sts */ tmp = readl(qm->io_base + QM_ABNORMAL_INT_STATUS); error_status = qm->error_mask & tmp; - if (error_status) { - qm_log_hw_error(qm, error_status); - - /* clear err sts */ - writel(error_status, qm->io_base + QM_ABNORMAL_INT_SOURCE); + if (error_status & QM_ECC_MBIT) + qm->err_ini.is_qm_ecc_mbit = 1; + else + qm->err_ini.is_qm_ecc_mbit = 0;
+ qm_log_hw_error(qm, error_status); return PCI_ERS_RESULT_NEED_RESET; }
@@ -1460,10 +1467,27 @@ int hisi_qm_start_qp(struct hisi_qp *qp, unsigned long arg) } EXPORT_SYMBOL_GPL(hisi_qm_start_qp);
+/* Callback function should be called whether task completed or not. */ +static void qp_stop_fail_cb(struct hisi_qp *qp) +{ + struct hisi_qm *qm = qp->qm; + int cur_head, cur_tail; + int j, cnt, pos; + + cur_tail = qp->qp_status.sq_tail; + cnt = atomic_read(&qp->qp_status.used); + cur_head = (cur_tail + QM_Q_DEPTH - cnt) % QM_Q_DEPTH; + + for (j = 0; j < cnt; j++) { + pos = (j + cur_head) % QM_Q_DEPTH; + qp->req_cb(qp, qp->sqe + qm->sqe_size * pos); + atomic_dec(&qp->qp_status.used); + } +} + static int hisi_qm_stop_qp_nolock(struct hisi_qp *qp) { struct device *dev = &qp->qm->pdev->dev; - int i = 0;
/* it is stopped */ if (atomic_read(&qp->qp_status.flags) == QP_STOP) @@ -1473,14 +1497,11 @@ static int hisi_qm_stop_qp_nolock(struct hisi_qp *qp)
atomic_set(&qp->qp_status.flags, QP_STOP);
- while (atomic_read(&qp->qp_status.used)) { - i++; - msleep(WAIT_PERIOD); - if (i == MAX_WAIT_COUNTS) { - dev_err(dev, "Cannot drain out data for stopping, system may hang up!!!\n"); - break; - } - } + /* waiting for increase used count in hisi_qp_send */ + udelay(WAIT_PERIOD); + + if (atomic_read(&qp->qp_status.used)) + qp_stop_fail_cb(qp);
dev_dbg(dev, "stop queue %u!", qp->qp_id);
@@ -1528,8 +1549,9 @@ int hisi_qp_send(struct hisi_qp *qp, const void *msg) void *sqe = qm_get_avail_sqe(qp);
if (unlikely(atomic_read(&qp->qp_status.flags) == QP_STOP || - atomic_read(&qp->qm->status.flags) == QM_STOP)) { - dev_info(&qp->qm->pdev->dev, "QM resetting...\n"); + atomic_read(&qp->qm->status.flags) == QM_STOP) || + qp->is_resetting == true) { + dev_info_ratelimited(&qp->qm->pdev->dev, "QM resetting...\n"); return -EAGAIN; }
@@ -1897,7 +1919,6 @@ static int qm_register_uacce(struct hisi_qm *qm) if (qm->use_sva) { uacce->flags = UACCE_DEV_SVA | UACCE_DEV_DRVMAP_DUS; } else { - uacce->flags = UACCE_DEV_NOIOMMU | UACCE_DEV_DRVMAP_DUS; if (qm->ver == QM_HW_V1) @@ -1912,7 +1933,6 @@ static int qm_register_uacce(struct hisi_qm *qm) for (i = 0; i < UACCE_QFRT_MAX; i++) uacce->qf_pg_start[i] = UACCE_QFR_NA;
- return uacce_register(uacce); }
@@ -2512,6 +2532,43 @@ static int qm_stop_started_qp(struct hisi_qm *qm) return 0; }
+static void qm_set_resetting_flag(struct hisi_qm *qm) +{ + struct hisi_qp *qp; + int i; + + for (i = 0; i < qm->qp_num; i++) { + qp = qm->qp_array[i]; + if (qp && atomic_read(&qp->qp_status.flags) == QP_START) + qp->is_resetting = true; + } +} + +static void qm_wait_task_complete(struct hisi_qm *qm) +{ + struct hisi_qp *qp; + int tmcnt = 0; + int last_num; + int task_num; + int i; + + task_num = 0; + do { + last_num = task_num; + task_num = 0; + msleep(WAIT_PERIOD); + for (i = 0; i < qm->qp_num; i++) { + qp = qm->qp_array[i]; + if (qp) + task_num += atomic_read(&qp->qp_status.used); + } + if (task_num && last_num == task_num) + tmcnt++; + else + tmcnt = 0; + } while (task_num && tmcnt < MAX_WAIT_TASK_COUNTS); +} + /** * hisi_qm_stop() - Stop a qm. * @qm: The qm which will be stopped. @@ -2535,6 +2592,12 @@ int hisi_qm_stop(struct hisi_qm *qm, enum qm_stop_reason r) goto err_unlock; }
+ if (qm->status.stop_reason == QM_SOFT_RESET || + qm->status.stop_reason == QM_FLR) { + qm_set_resetting_flag(qm); + qm_wait_task_complete(qm); + } + if (qm->status.stop_reason == QM_SOFT_RESET || qm->status.stop_reason == QM_FLR) { ret = qm_stop_started_qp(qm); @@ -2560,6 +2623,7 @@ int hisi_qm_stop(struct hisi_qm *qm, enum qm_stop_reason r)
hisi_qm_clear_queues(qm); atomic_set(&qm->status.flags, QM_STOP); + err_unlock: up_write(&qm->qps_lock); return ret; @@ -2752,6 +2816,46 @@ int hisi_qm_get_hw_error_status(struct hisi_qm *qm) } EXPORT_SYMBOL_GPL(hisi_qm_get_hw_error_status);
+static pci_ers_result_t hisi_qm_dev_err_handle(struct hisi_qm *qm) +{ + u32 err_sts; + + if (!qm->err_ini.get_dev_hw_err_status || + !qm->err_ini.log_dev_hw_err) + return PCI_ERS_RESULT_RECOVERED; + + /* read err sts */ + err_sts = qm->err_ini.get_dev_hw_err_status(qm); + if (err_sts) { + if (err_sts & qm->err_ini.ecc_2bits_mask) + qm->err_ini.is_dev_ecc_mbit = 1; + else + qm->err_ini.is_dev_ecc_mbit = 0; + + qm->err_ini.log_dev_hw_err(qm, err_sts); + return PCI_ERS_RESULT_NEED_RESET; + } + + return PCI_ERS_RESULT_RECOVERED; +} + +pci_ers_result_t hisi_qm_process_dev_error(struct pci_dev *pdev) +{ + struct hisi_qm *qm = pci_get_drvdata(pdev); + pci_ers_result_t qm_ret, dev_ret; + + /* log qm error */ + qm_ret = hisi_qm_hw_error_handle(qm); + + /* log device error */ + dev_ret = hisi_qm_dev_err_handle(qm); + + return (qm_ret == PCI_ERS_RESULT_NEED_RESET || + dev_ret == PCI_ERS_RESULT_NEED_RESET) ? + PCI_ERS_RESULT_NEED_RESET : PCI_ERS_RESULT_RECOVERED; +} +EXPORT_SYMBOL_GPL(hisi_qm_process_dev_error); + int hisi_qm_reg_test(struct hisi_qm *qm) { struct pci_dev *pdev = qm->pdev; @@ -2822,8 +2926,8 @@ int hisi_qm_set_vf_mse(struct hisi_qm *qm, bool set)
for (i = 0; i < MAX_WAIT_COUNTS; i++) { pci_read_config_word(pdev, pos + PCI_SRIOV_CTRL, &sriov_ctrl); - if (set == (sriov_ctrl & PCI_SRIOV_CTRL_MSE) >> - PEH_SRIOV_CTRL_VF_MSE_SHIFT) + if (set == ((sriov_ctrl & PCI_SRIOV_CTRL_MSE) >> + PEH_SRIOV_CTRL_VF_MSE_SHIFT)) return 0;
udelay(1); @@ -2843,6 +2947,8 @@ int hisi_qm_set_msi(struct hisi_qm *qm, bool set) } else { pci_write_config_dword(pdev, pdev->msi_cap + PCI_MSI_MASK_64, PEH_MSI_DISABLE); + if (qm->err_ini.is_qm_ecc_mbit || qm->err_ini.is_dev_ecc_mbit) + return 0;
mdelay(1); if (readl(qm->io_base + QM_PEH_DFX_INFO0)) @@ -2853,6 +2959,63 @@ int hisi_qm_set_msi(struct hisi_qm *qm, bool set) } EXPORT_SYMBOL_GPL(hisi_qm_set_msi);
+void hisi_qm_set_ecc(struct hisi_qm *qm) +{ + u32 nfe_enb; + + if ((!qm->err_ini.is_qm_ecc_mbit && !qm->err_ini.is_dev_ecc_mbit) || + (qm->err_ini.is_qm_ecc_mbit && !qm->err_ini.inject_dev_hw_err) || + (qm->err_ini.is_dev_ecc_mbit && qm->err_ini.inject_dev_hw_err)) + return; + + if (qm->err_ini.inject_dev_hw_err) + qm->err_ini.inject_dev_hw_err(qm); + else { + nfe_enb = readl(qm->io_base + QM_RAS_NFE_ENABLE); + writel(nfe_enb & QM_RAS_NFE_MBIT_DISABLE, + qm->io_base + QM_RAS_NFE_ENABLE); + writel(QM_ECC_MBIT, qm->io_base + QM_ABNORMAL_INT_SET); + qm->err_ini.is_qm_ecc_mbit = 1; + } +} +EXPORT_SYMBOL_GPL(hisi_qm_set_ecc); + +void hisi_qm_restart_prepare(struct hisi_qm *qm) +{ + if (!qm->err_ini.is_qm_ecc_mbit && !qm->err_ini.is_dev_ecc_mbit) + return; + + /* close AM wr msi port */ + writel(qm->err_ini.qm_wr_port, qm->io_base + AM_CFG_PORT_WR_EN); + + /* clear dev ecc 2bit error source */ + if (qm->err_ini.clear_dev_hw_err_status) { + qm->err_ini.clear_dev_hw_err_status(qm, + qm->err_ini.ecc_2bits_mask); + } + + /* clear QM ecc mbit error source */ + writel(QM_ECC_MBIT, qm->io_base + QM_ABNORMAL_INT_SOURCE); + + /* clear AM Reorder Buffer ecc mbit source */ + writel(ROB_ECC_ERR_MULTPL, qm->io_base + AM_ROB_ECC_INT_STS); + + if (qm->err_ini.open_axi_master_ooo) + qm->err_ini.open_axi_master_ooo(qm); +} +EXPORT_SYMBOL_GPL(hisi_qm_restart_prepare); + +void hisi_qm_restart_done(struct hisi_qm *qm) +{ + if (!qm->err_ini.is_qm_ecc_mbit && !qm->err_ini.is_dev_ecc_mbit) + return; + + writel(AM_CFG_PORT_WR_EN_VALUE, qm->io_base + AM_CFG_PORT_WR_EN); + qm->err_ini.is_qm_ecc_mbit = 0; + qm->err_ini.is_dev_ecc_mbit = 0; +} +EXPORT_SYMBOL_GPL(hisi_qm_restart_done); + MODULE_LICENSE("GPL v2"); MODULE_AUTHOR("Zhou Wang wangzhou1@hisilicon.com"); MODULE_DESCRIPTION("HiSilicon Accelerator queue manager driver"); diff --git a/drivers/crypto/hisilicon/qm.h b/drivers/crypto/hisilicon/qm.h index b13aa364866d..e360796fe30d 100644 --- a/drivers/crypto/hisilicon/qm.h +++ b/drivers/crypto/hisilicon/qm.h @@ -52,7 +52,7 @@ #define AXI_M_CFG_ENABLE 0xffffffff #define QM_PEH_AXUSER_CFG 0x1000cc #define QM_PEH_AXUSER_CFG_ENABLE 0x1000d0 -#define PEH_AXUSER_CFG 0x401001 +#define PEH_AXUSER_CFG 0x400801 #define PEH_AXUSER_CFG_ENABLE 0xffffffff
#define QM_DFX_MB_CNT_VF 0x104010 @@ -235,6 +235,21 @@ struct hisi_qm_status { int stop_reason; };
+struct hisi_qm; + +struct hisi_qm_err_ini { + u32 qm_wr_port; + u32 is_qm_ecc_mbit; + u32 is_dev_ecc_mbit; + u32 ecc_2bits_mask; + void (*open_axi_master_ooo)(struct hisi_qm *qm); + u32 (*get_dev_hw_err_status)(struct hisi_qm *qm); + void (*clear_dev_hw_err_status)(struct hisi_qm *qm, u32 err_sts); + void (*log_dev_hw_err)(struct hisi_qm *qm, u32 err_sts); + /* design for module can not hold on ooo through qm, such as zip */ + void (*inject_dev_hw_err)(struct hisi_qm *qm); +}; + struct hisi_qm { enum qm_hw_ver ver; enum qm_fun_type fun_type; @@ -257,7 +272,7 @@ struct hisi_qm { dma_addr_t aeqe_dma;
struct hisi_qm_status status; - + struct hisi_qm_err_ini err_ini; struct rw_semaphore qps_lock; unsigned long *qp_bitmap; struct hisi_qp **qp_array; @@ -355,10 +370,14 @@ void hisi_qm_clear_queues(struct hisi_qm *qm); enum qm_hw_ver hisi_qm_get_hw_version(struct pci_dev *pdev); int hisi_qm_restart(struct hisi_qm *qm); int hisi_qm_get_hw_error_status(struct hisi_qm *qm); +pci_ers_result_t hisi_qm_process_dev_error(struct pci_dev *pdev); int hisi_qm_reg_test(struct hisi_qm *qm); int hisi_qm_set_pf_mse(struct hisi_qm *qm, bool set); int hisi_qm_set_vf_mse(struct hisi_qm *qm, bool set); int hisi_qm_set_msi(struct hisi_qm *qm, bool set); +void hisi_qm_set_ecc(struct hisi_qm *qm); +void hisi_qm_restart_prepare(struct hisi_qm *qm); +void hisi_qm_restart_done(struct hisi_qm *qm);
struct hisi_acc_sgl_pool; struct hisi_acc_hw_sgl *hisi_acc_sg_buf_map_to_hw_sgl(struct device *dev, diff --git a/drivers/crypto/hisilicon/rde/rde.h b/drivers/crypto/hisilicon/rde/rde.h index c9ee329bc1e3..aa7887ad2d58 100644 --- a/drivers/crypto/hisilicon/rde/rde.h +++ b/drivers/crypto/hisilicon/rde/rde.h @@ -70,7 +70,7 @@ struct hisi_rde { #define RDE_DONE_SHIFT 7 #define RDE_PER_SRC_COEF_SIZE 32 #define RDE_PER_SRC_COEF_TIMES 4 -#define RDE_TASK_TMOUT_MS 10000 +#define RDE_TASK_TMOUT_MS 3000
#define RDE_GN_WITH_MODE(column, mode, parity) \ @@ -286,11 +286,12 @@ static inline void rde_bd_dump(struct hisi_rde_sqe *bd) { int i;
- pr_info("====== BD info start======\n"); + pr_info_ratelimited("====== BD info start======\n"); for (i = 0; i < sizeof(struct hisi_rde_sqe) / sizeof(u64); i++) - pr_info("sqe-word[%d]: 0x%llx.\n", i, *((u64 *)bd + i)); + pr_info_ratelimited("sqe-word[%d]: 0x%llx.\n", + i, *((u64 *)bd + i));
- pr_info("====== BD info end======\n"); + pr_info_ratelimited("====== BD info end======\n"); }
static inline void rde_table_dump(const struct hisi_rde_msg *req) @@ -299,26 +300,26 @@ static inline void rde_table_dump(const struct hisi_rde_msg *req)
for (i = 0; i < SRC_ADDR_TABLE_NUM; i++) { if (req->src_addr->content[i]) - pr_info("Table0 info[%d] is 0x%llx.\n", - i, req->src_addr->content[i]); + pr_info_ratelimited("Table0 info[%d] is 0x%llx.\n", + i, req->src_addr->content[i]); }
for (i = 0; i < SRC_DIF_TABLE_NUM; i++) { if (req->src_tag_addr->content[i]) - pr_info("Table1 info[%d] is 0x%llx.\n", - i, req->src_tag_addr->content[i]); + pr_info_ratelimited("Table1 info[%d] is 0x%llx.\n", + i, req->src_tag_addr->content[i]); }
for (i = 0; i < DST_ADDR_TABLE_NUM; i++) { if (req->dst_addr->content[i]) - pr_info("Table2 info[%d] is 0x%llx.\n", - i, req->dst_addr->content[i]); + pr_info_ratelimited("Table2 info[%d] is 0x%llx.\n", + i, req->dst_addr->content[i]); }
for (i = 0; i < DST_DIF_TABLE_NUM; i++) { if (req->dst_tag_addr->content[i]) - pr_info("Table3 info[%d] is 0x%llx.\n", - i, req->dst_tag_addr->content[i]); + pr_info_ratelimited("Table3 info[%d] is 0x%llx.\n", + i, req->dst_tag_addr->content[i]); } }
diff --git a/drivers/crypto/hisilicon/rde/rde_api.c b/drivers/crypto/hisilicon/rde/rde_api.c index 6fceb008d21b..1be468a40976 100644 --- a/drivers/crypto/hisilicon/rde/rde_api.c +++ b/drivers/crypto/hisilicon/rde/rde_api.c @@ -63,7 +63,7 @@ static u32 rde_matrix_len(u8 alg_type, u8 cm_len) break; case MPCC: len = (RDE_PER_SRC_COEF_SIZE * - RDE_PER_SRC_COEF_TIMES * cm_len); + RDE_PER_SRC_COEF_TIMES * cm_len); break; default: pr_err("[%s] Err alg type.\n", __func__); @@ -74,9 +74,8 @@ static u32 rde_matrix_len(u8 alg_type, u8 cm_len) }
static int rde_sgl_src_scatterlist_release(struct pci_dev *pdev, - struct hisi_rde_ctx *rde_ctx, - struct hisi_rde_msg *req, - u32 num) + struct hisi_rde_ctx *rde_ctx, + struct hisi_rde_msg *req, u32 num) { u32 i; int ret; @@ -84,8 +83,8 @@ static int rde_sgl_src_scatterlist_release(struct pci_dev *pdev, for (i = 0; i < num; i++) { if (req->src_record[i]) { ret = acc_sgl_phys_to_virt(pdev, - (void *)req->src_record[i], - rde_ctx->smmu_state); + (void *)req->src_record[i], + rde_ctx->smmu_state); if (ret) { dev_err(&pdev->dev, "[%s] Src[%d] fail.\n", __func__, i); @@ -108,8 +107,8 @@ static int rde_sgl_dst_scatterlist_release(struct pci_dev *pdev, for (i = 0; i < num; i++) { if (req->dst_record[i]) { ret = acc_sgl_phys_to_virt(pdev, - (void *)req->dst_record[i], - rde_ctx->smmu_state); + (void *)req->dst_record[i], + rde_ctx->smmu_state); if (ret) { dev_err(&pdev->dev, "[%s] Dst[%d] fail.\n", __func__, i); @@ -122,9 +121,8 @@ static int rde_sgl_dst_scatterlist_release(struct pci_dev *pdev, }
static void rde_pbuf_src_addr_unmap(struct pci_dev *pdev, - struct hisi_rde_ctx *rde_ctx, - struct hisi_rde_msg *req, - u32 num) + struct hisi_rde_ctx *rde_ctx, + struct hisi_rde_msg *req, u32 num) { u32 i; u32 gn_cnt; @@ -137,16 +135,16 @@ static void rde_pbuf_src_addr_unmap(struct pci_dev *pdev, gn_cnt = RDE_GN_CNT(i) + i; if (req->src_addr->content[gn_cnt]) { acc_phys_to_virt(pdev, - (dma_addr_t)req->src_addr->content[gn_cnt], - (size_t)udata->data_len, rde_ctx->smmu_state); + req->src_addr->content[gn_cnt], + (size_t)udata->data_len, + rde_ctx->smmu_state); } } }
static void rde_pbuf_dst_addr_unmap(struct pci_dev *pdev, - struct hisi_rde_ctx *rde_ctx, - struct hisi_rde_msg *req, - u32 num) + struct hisi_rde_ctx *rde_ctx, + struct hisi_rde_msg *req, u32 num) { u32 i; u32 gn_cnt; @@ -159,15 +157,15 @@ static void rde_pbuf_dst_addr_unmap(struct pci_dev *pdev, gn_cnt = RDE_GN_CNT(i) + i; if (req->dst_addr->content[gn_cnt]) { acc_phys_to_virt(pdev, - (dma_addr_t)req->dst_addr->content[gn_cnt], - (size_t)udata->data_len, rde_ctx->smmu_state); + req->dst_addr->content[gn_cnt], + (size_t)udata->data_len, + rde_ctx->smmu_state); } } }
-static void rde_cm_addr_unmap(struct pci_dev *pdev, - struct hisi_rde_sqe *bd, u8 alg_type, - struct hisi_rde_ctx *rde_ctx) +static void rde_cm_addr_unmap(struct pci_dev *pdev, struct hisi_rde_sqe *bd, + u8 alg_type, struct hisi_rde_ctx *rde_ctx) { u32 matrix_len;
@@ -177,13 +175,12 @@ static void rde_cm_addr_unmap(struct pci_dev *pdev, matrix_len = rde_matrix_len(alg_type, (u8)bd->cm_len); if (bd->coef_matrix_addr && matrix_len) acc_phys_to_virt(pdev, (dma_addr_t)bd->coef_matrix_addr, - (size_t)matrix_len, rde_ctx->smmu_state); + (size_t)matrix_len, rde_ctx->smmu_state); }
static void rde_bd_addr_release(struct pci_dev *pdev, - struct hisi_rde_ctx *rde_ctx, - struct hisi_rde_msg *req, - u8 buf_mode) + struct hisi_rde_ctx *rde_ctx, + struct hisi_rde_msg *req, u8 buf_mode) { int ret = 0; struct raid_ec_ctrl *udata = req->udata; @@ -195,29 +192,36 @@ static void rde_bd_addr_release(struct pci_dev *pdev, rde_pbuf_dst_addr_unmap(pdev, rde_ctx, req, dst_num); } else if (buf_mode == SGL) { ret = rde_sgl_src_scatterlist_release(pdev, - rde_ctx, req, src_num); + rde_ctx, req, src_num); if (ret) dev_err(&pdev->dev, "[%s] Src release fail.\n", __func__);
ret = rde_sgl_dst_scatterlist_release(pdev, - rde_ctx, req, dst_num); + rde_ctx, req, dst_num); if (ret) dev_err(&pdev->dev, "[%s] Dst release fail.\n", __func__); } }
-static int rde_cm_len_check(struct device *dev, u8 alg_type, u8 cm_len) +static int rde_cm_len_check(struct device *dev, struct raid_ec_ctrl *req, + u8 alg_type) { + if (unlikely(req->src_num > RDE_MAX_SRC_PLATE_NUM || + req->dst_num > RDE_MAX_DST_PLATE_NUM)) { + dev_err(dev, "Error!Invalid disk num.\n"); + return -EINVAL; + } + if (alg_type == MPCC) { - if (unlikely(cm_len > RDE_MPCC_MAX_SRC_NUM)) { + if (unlikely(req->cm_len > RDE_MPCC_MAX_SRC_NUM)) { dev_err(dev, "Error!mpcc cmlen should smaller than 17.\n"); return -EINVAL; } } else if (alg_type == PQ_FLEXEC) { - if (unlikely(cm_len > RDE_FLEXEC_MAX_SRC_NUM)) { + if (unlikely(req->cm_len > RDE_FLEXEC_MAX_SRC_NUM)) { dev_err(dev, "Error!flexec cmlen should smaller than 32.\n"); return -EINVAL; @@ -230,9 +234,8 @@ static int rde_cm_len_check(struct device *dev, u8 alg_type, u8 cm_len) return 0; }
-static int rde_io_para_check(struct acc_ctx *ctx, - struct raid_ec_ctrl *req, - u8 op_type, u8 alg_type) +static int rde_io_para_check(struct acc_ctx *ctx, struct raid_ec_ctrl *req, + u8 op_type, u8 alg_type) { struct hisi_rde_ctx *rde_ctx;
@@ -256,7 +259,7 @@ static int rde_io_para_check(struct acc_ctx *ctx, }
if (unlikely(!req->input_block || !req->data_len)) { - dev_err(rde_ctx->dev, "Error!invalid input_block.\n"); + dev_err(rde_ctx->dev, "Error!invalid input block.\n"); return -EINVAL; }
@@ -265,7 +268,7 @@ static int rde_io_para_check(struct acc_ctx *ctx, return -EINVAL; }
- return rde_cm_len_check(rde_ctx->dev, alg_type, req->cm_len); + return rde_cm_len_check(rde_ctx->dev, req, alg_type); }
static void src_dif_package(struct hisi_rde_msg *req) @@ -320,9 +323,10 @@ static void dst_dif_package(struct hisi_rde_msg *req) } }
-static int rde_disk_sgl_addr_translation(struct pci_dev *pdev, - struct hisi_rde_ctx *rde_ctx, struct sgl_hw *sgl_addr, - u64 *content, u64 *record) +static int rde_disk_sgl_addr_tran(struct pci_dev *pdev, + struct hisi_rde_ctx *rde_ctx, + struct sgl_hw *sgl_addr, + u64 *content, u64 *record) { int ret; void *sg_head = NULL; @@ -330,7 +334,7 @@ static int rde_disk_sgl_addr_translation(struct pci_dev *pdev, switch (rde_ctx->addr_type) { case VA_FROM_NORMAL_DMA_ZONE: ret = acc_sgl_virt_to_phys(pdev, sgl_addr, &sg_head, - rde_ctx->smmu_state); + rde_ctx->smmu_state); if (unlikely(ret)) return ret; break; @@ -349,9 +353,8 @@ static int rde_disk_sgl_addr_translation(struct pci_dev *pdev, }
static int sgl_src_addr_package(struct pci_dev *pdev, - struct hisi_rde_ctx *rde_ctx, - struct hisi_rde_msg *req, - u8 mode) + struct hisi_rde_ctx *rde_ctx, + struct hisi_rde_msg *req, u8 mode) { int ret, r_ret; u32 i; @@ -369,21 +372,22 @@ static int sgl_src_addr_package(struct pci_dev *pdev,
memset(&req->src_record[0], 0, num * sizeof(u64)); for (i = 0; i < num; i++) { - gn = RDE_GN_WITH_MODE(rde_sgl_src->column, - mode, rde_sgl_src->parity); - sgl_data = (rde_sgl_src->buf_offset << - SGL_DATA_OFFSET_SHIFT) | (u32)gn; + gn = RDE_GN_WITH_MODE(rde_sgl_src->column, mode, + rde_sgl_src->parity); + sgl_data = (rde_sgl_src->buf_offset << SGL_DATA_OFFSET_SHIFT) | + (u32)gn; gn_cnt = RDE_GN_CNT(i) + i; gn_flag = RDE_GN_FLAG(i); cur_cnt = gn_cnt - gn_flag; req->src_addr->content[cur_cnt] |= ((u64)sgl_data << RDE_GN_SHIFT(gn_flag)); - ret = rde_disk_sgl_addr_translation(pdev, rde_ctx, - rde_sgl_src->ctrl, &req->src_addr->content[gn_cnt], - &req->src_record[i]); + ret = rde_disk_sgl_addr_tran(pdev, rde_ctx, + rde_sgl_src->ctrl, + &req->src_addr->content[gn_cnt], + &req->src_record[i]); if (ret) { - r_ret = rde_sgl_src_scatterlist_release(pdev, - rde_ctx, req, i); + r_ret = rde_sgl_src_scatterlist_release(pdev, rde_ctx, + req, i); if (r_ret) return r_ret; return ret; @@ -396,8 +400,8 @@ static int sgl_src_addr_package(struct pci_dev *pdev, }
static int sgl_dst_addr_package(struct pci_dev *pdev, - struct hisi_rde_ctx *rde_ctx, - struct hisi_rde_msg *req) + struct hisi_rde_ctx *rde_ctx, + struct hisi_rde_msg *req) { int ret, r_ret; u32 i; @@ -417,19 +421,20 @@ static int sgl_dst_addr_package(struct pci_dev *pdev, memset(&req->dst_record[0], 0, num * sizeof(u64)); for (i = 0; i < num; i++) { gn = (u8)(rde_sgl_dst->column); - sgl_data = (rde_sgl_dst->buf_offset << - SGL_DATA_OFFSET_SHIFT) | (u32)gn; + sgl_data = (rde_sgl_dst->buf_offset << SGL_DATA_OFFSET_SHIFT) | + (u32)gn; gn_cnt = RDE_GN_CNT(i) + i; gn_flag = RDE_GN_FLAG(i); cur_cnt = gn_cnt - gn_flag; req->dst_addr->content[cur_cnt] |= ((u64)sgl_data << - RDE_GN_SHIFT(gn_flag)); - ret = rde_disk_sgl_addr_translation(pdev, rde_ctx, - rde_sgl_dst->ctrl, &req->dst_addr->content[gn_cnt], - &req->dst_record[i]); + RDE_GN_SHIFT(gn_flag)); + ret = rde_disk_sgl_addr_tran(pdev, rde_ctx, + rde_sgl_dst->ctrl, + &req->dst_addr->content[gn_cnt], + &req->dst_record[i]); if (ret) { - r_ret = rde_sgl_dst_scatterlist_release(pdev, - rde_ctx, req, i); + r_ret = rde_sgl_dst_scatterlist_release(pdev, rde_ctx, + req, i); if (r_ret) return r_ret; return ret; @@ -441,15 +446,16 @@ static int sgl_dst_addr_package(struct pci_dev *pdev, return 0; }
-static int rde_disk_pbuf_addr_translation(struct pci_dev *pdev, - struct hisi_rde_ctx *rde_ctx, u64 *content, char *addr, u32 data_len) +static int rde_disk_pbuf_addr_tran(struct pci_dev *pdev, + struct hisi_rde_ctx *rde_ctx, + u64 *content, char *addr, u32 data_len) { dma_addr_t pa = 0;
switch (rde_ctx->addr_type) { case VA_FROM_NORMAL_DMA_ZONE: pa = acc_virt_to_phys(pdev, addr, (size_t)data_len, - rde_ctx->smmu_state); + rde_ctx->smmu_state); break; case VA_FROM_HIGHMEM_ZONE: pa = acc_pfn_to_phys(addr); @@ -473,9 +479,8 @@ static int rde_disk_pbuf_addr_translation(struct pci_dev *pdev, }
static int pbuf_src_addr_package(struct pci_dev *pdev, - struct hisi_rde_ctx *rde_ctx, - struct hisi_rde_msg *req, - u8 mode) + struct hisi_rde_ctx *rde_ctx, + struct hisi_rde_msg *req, u8 mode) { u32 i; int ret; @@ -487,16 +492,17 @@ static int pbuf_src_addr_package(struct pci_dev *pdev, struct rde_pbuf *rde_pbuf_src = (struct rde_pbuf *)(ctrl->src_data);
for (i = 0; i < num; i++) { - gn = RDE_GN_WITH_MODE(rde_pbuf_src->column, - mode, rde_pbuf_src->parity); + gn = RDE_GN_WITH_MODE(rde_pbuf_src->column, mode, + rde_pbuf_src->parity); gn_cnt = RDE_GN_CNT(i) + i; gn_flag = RDE_GN_FLAG(i); cur_cnt = gn_cnt - gn_flag; req->src_addr->content[cur_cnt] |= ((u64)gn << - RDE_GN_SHIFT(gn_flag)); - ret = rde_disk_pbuf_addr_translation(pdev, rde_ctx, - &req->src_addr->content[gn_cnt], - rde_pbuf_src->pbuf, data_len_nbytes); + RDE_GN_SHIFT(gn_flag)); + ret = rde_disk_pbuf_addr_tran(pdev, rde_ctx, + &req->src_addr->content[gn_cnt], + rde_pbuf_src->pbuf, + data_len_nbytes); if (ret) { rde_pbuf_src_addr_unmap(pdev, rde_ctx, req, i); return ret; @@ -509,8 +515,8 @@ static int pbuf_src_addr_package(struct pci_dev *pdev, }
static int pbuf_dst_addr_package(struct pci_dev *pdev, - struct hisi_rde_ctx *rde_ctx, - struct hisi_rde_msg *req) + struct hisi_rde_ctx *rde_ctx, + struct hisi_rde_msg *req) { u32 i; int ret; @@ -527,9 +533,9 @@ static int pbuf_dst_addr_package(struct pci_dev *pdev, gf_flag = RDE_GN_FLAG(i); cur_cnt = gf_cnt - gf_flag; req->dst_addr->content[cur_cnt] |= ((u64)gf_coef << - RDE_GN_SHIFT(gf_flag)); - ret = rde_disk_pbuf_addr_translation(pdev, rde_ctx, - &req->dst_addr->content[gf_cnt], + RDE_GN_SHIFT(gf_flag)); + ret = rde_disk_pbuf_addr_tran(pdev, rde_ctx, + &req->dst_addr->content[gf_cnt], rde_pbuf_dst->pbuf, data_len_nbytes); if (ret) { rde_pbuf_dst_addr_unmap(pdev, rde_ctx, req, i); @@ -543,11 +549,12 @@ static int pbuf_dst_addr_package(struct pci_dev *pdev, }
static int hisi_rde_fill_addr_tlb(struct pci_dev *pdev, - struct hisi_rde_ctx *rde_ctx, - struct hisi_rde_msg *req, - struct rde_type *type) + struct hisi_rde_ctx *rde_ctx, + struct hisi_rde_msg *req, + struct rde_type *type) { int ret, r_ret; + u32 num = req->udata->src_num;
if (type->buf_mode == PBUF) { ret = pbuf_src_addr_package(pdev, rde_ctx, req, type->alg_mode); @@ -558,8 +565,7 @@ static int hisi_rde_fill_addr_tlb(struct pci_dev *pdev, ret = pbuf_dst_addr_package(pdev, rde_ctx, req); if (ret) { dev_err(&pdev->dev, "Pbuf dst addr package fail.\n"); - rde_pbuf_src_addr_unmap(pdev, rde_ctx, req, - req->udata->src_num); + rde_pbuf_src_addr_unmap(pdev, rde_ctx, req, num); return ret; } } else if (type->buf_mode == SGL) { @@ -572,7 +578,7 @@ static int hisi_rde_fill_addr_tlb(struct pci_dev *pdev, if (ret) { dev_err(&pdev->dev, "Sgl dst addr package fail.\n"); r_ret = rde_sgl_src_scatterlist_release(pdev, rde_ctx, - req, req->udata->src_num); + req, num); if (r_ret) return r_ret; return ret; @@ -586,8 +592,9 @@ static int hisi_rde_fill_addr_tlb(struct pci_dev *pdev, }
static int rde_cm_addr_translation(struct pci_dev *pdev, - struct hisi_rde_ctx *rde_ctx, struct raid_ec_ctrl *ctrl, - struct hisi_rde_sqe *bd, u8 alg_type) + struct hisi_rde_ctx *rde_ctx, + struct raid_ec_ctrl *ctrl, + struct hisi_rde_sqe *bd, u8 alg_type) { u32 matrix_len = 0; dma_addr_t pa = 0; @@ -595,7 +602,7 @@ static int rde_cm_addr_translation(struct pci_dev *pdev, if (rde_ctx->addr_type != PA_PASS_THROUGH) { matrix_len = rde_matrix_len(alg_type, ctrl->cm_len); pa = acc_virt_to_phys(pdev, ctrl->coe_matrix, - (size_t)matrix_len, rde_ctx->smmu_state); + (size_t)matrix_len, rde_ctx->smmu_state); if (unlikely(!pa)) { dev_err(rde_ctx->dev, "[%s] Coe_matrix virt to phys fail.\n", @@ -610,7 +617,7 @@ static int rde_cm_addr_translation(struct pci_dev *pdev, }
int hisi_rde_fill_sqe(struct hisi_rde_ctx *rde_ctx, struct hisi_rde_msg *req, - struct rde_type *type) + struct rde_type *type) { int ret; struct raid_ec_ctrl *ctrl = req->udata; @@ -622,7 +629,7 @@ int hisi_rde_fill_sqe(struct hisi_rde_ctx *rde_ctx, struct hisi_rde_msg *req, bd->op_tag = q_id * rde_ctx->session_num + req->req_id; bd->alg_blk_size = ctrl->alg_blk_size; bd->cm_type = (type->alg_mode == - ACC_OPT_RCT) ? CM_DECODE : CM_ENCODE; + ACC_OPT_RCT) ? CM_DECODE : CM_ENCODE; bd->cm_le = ctrl->cm_load; bd->abort = NO_ABORT; bd->src_nblks = ctrl->src_num; @@ -634,18 +641,18 @@ int hisi_rde_fill_sqe(struct hisi_rde_ctx *rde_ctx, struct hisi_rde_msg *req, ctrl->dst_dif.ctrl.verify.grd_verify_type; } bd->op_type = type->alg_mode | type->mem_mode | - type->buf_mode | type->alg_type; + type->buf_mode | type->alg_type; bd->block_size = ctrl->block_size; bd->page_pad_type = ctrl->dst_dif.ctrl.gen.page_layout_pad_type; - bd->dif_type = (ctrl->dst_dif.ctrl.gen.grd_gen_type) ? - RDE_DIF : NO_RDE_DIF; + bd->dif_type = ((ctrl->dst_dif.ctrl.gen.grd_gen_type) ? + RDE_DIF : NO_RDE_DIF); bd->crciv_sel = CRCIV1; bd->crciv_en = CRCIV; bd->cm_len = ctrl->cm_len; bd->transfer_size = ctrl->input_block - 1;
- ret = rde_cm_addr_translation(pdev, rde_ctx, ctrl, - bd, type->alg_type); + ret = rde_cm_addr_translation(pdev, rde_ctx, ctrl, bd, + type->alg_type); if (ret) return ret; bd->src_addr = req->src_dma_addr; @@ -708,15 +715,18 @@ static int rde_task_error_log(struct pci_dev *pdev, u8 err_sts) while (err->msg) { if (err_sts == err->status) { dev_err_ratelimited(&pdev->dev, - "[%s] [Error status=0x%x] found.\n", - err->msg, err->status); + "[%s][Error status=0x%x] found.\n", + err->msg, err->status); break; }
err++; }
- if (err_sts < RDE_CRC_CHK_ERR || err_sts > RDE_DISK16_VERIFY) + /* err_sts is 0, fatal engine*/ + if (err_sts == RDE_STATUS_NULL) + return -EAGAIN; + else if (err_sts < RDE_CRC_CHK_ERR || err_sts > RDE_DISK16_VERIFY) return ACC_INVALID_PARAM; else if (err_sts >= RDE_CRC_CHK_ERR && err_sts <= RDE_REF_CHK_ERR) return ACC_RDE_DIF_ERR; @@ -740,11 +750,8 @@ static void rde_cb(struct hisi_qp *qp, void *resp) req = &rde_ctx->req_list[req_id]; ctrl = req->udata; err_status = wb_sqe->status & RDE_STATUS_MSK; - if (wb_sqe->status != RDE_TASK_DONE_STATUS) { + if (wb_sqe->status != RDE_TASK_DONE_STATUS) req->result = rde_task_error_log(pdev, err_status); - rde_bd_dump(wb_sqe); - rde_table_dump(req); - }
if (ctx->cb) { if (rde_ctx->addr_type != PA_PASS_THROUGH) { @@ -760,7 +767,7 @@ static void rde_cb(struct hisi_qp *qp, void *resp) }
int hisi_rde_io_proc(struct acc_ctx *ctx, struct raid_ec_ctrl *ctrl, - u8 op_type, u8 alg_type, bool sync) + u8 op_type, u8 alg_type, bool sync) { int ret, id; struct hisi_rde_ctx *rde_ctx; @@ -809,7 +816,8 @@ int hisi_rde_io_proc(struct acc_ctx *ctx, struct raid_ec_ctrl *ctrl, return ret;
if (wait_for_completion_timeout(&req->completion, - msecs_to_jiffies(RDE_TASK_TMOUT_MS)) == 0) { + msecs_to_jiffies(RDE_TASK_TMOUT_MS)) + == 0) { dev_err_ratelimited(rde_ctx->dev, "Sync mode task timeout.\n"); ret = -ETIME; goto addr_unmap; @@ -864,32 +872,32 @@ static void hisi_rde_release_qp(struct hisi_rde_ctx *rde_ctx) static int hisi_rde_tbl_init(struct device *dev, struct hisi_rde_msg *req) { req->src_addr = dma_alloc_coherent(dev, - (size_t)sizeof(struct rde_src_tbl), - &req->src_dma_addr, GFP_KERNEL); + (size_t)sizeof(struct rde_src_tbl), + &req->src_dma_addr, GFP_KERNEL); if (!req->src_addr) { dev_err(dev, "[%s] Alloc rde_src_tlb failed.\n", __func__); return -ENOMEM; }
req->dst_addr = dma_alloc_coherent(dev, - (size_t)sizeof(struct rde_dst_tbl), - &req->dst_dma_addr, GFP_KERNEL); + (size_t)sizeof(struct rde_dst_tbl), + &req->dst_dma_addr, GFP_KERNEL); if (!req->dst_addr) { dev_err(dev, "[%s] Alloc rde_dst_tlb failed.\n", __func__); return -ENOMEM; }
req->src_tag_addr = dma_alloc_coherent(dev, - (size_t)sizeof(struct rde_src_tag_tbl), - &req->src_tag_dma_addr, GFP_KERNEL); + (size_t)sizeof(struct rde_src_tag_tbl), + &req->src_tag_dma_addr, GFP_KERNEL); if (!req->src_tag_addr) { dev_err(dev, "[%s] Alloc rde_src_tag_tlb failed.\n", __func__); return -ENOMEM; }
req->dst_tag_addr = dma_alloc_coherent(dev, - (size_t)sizeof(struct rde_dst_tag_tbl), - &req->dst_tag_dma_addr, GFP_KERNEL); + (size_t)sizeof(struct rde_dst_tag_tbl), + &req->dst_tag_dma_addr, GFP_KERNEL); if (!req->dst_tag_addr) { dev_err(dev, "[%s] Alloc rde_dst_tag_tlb failed.\n", __func__); return -ENOMEM; @@ -907,25 +915,25 @@ static void hisi_rde_tbl_deinit(struct device *dev, struct hisi_rde_msg *req)
if (req->src_addr) { dma_free_coherent(dev, (size_t)sizeof(struct rde_src_tbl), - req->src_addr, req->src_dma_addr); + req->src_addr, req->src_dma_addr); req->src_addr = NULL; }
if (req->dst_addr) { dma_free_coherent(dev, (size_t)sizeof(struct rde_dst_tbl), - req->dst_addr, req->dst_dma_addr); + req->dst_addr, req->dst_dma_addr); req->dst_addr = NULL; }
if (req->src_tag_addr) { dma_free_coherent(dev, (size_t)sizeof(struct rde_src_tag_tbl), - req->src_tag_addr, req->src_tag_dma_addr); + req->src_tag_addr, req->src_tag_dma_addr); req->src_tag_addr = NULL; }
if (req->dst_tag_addr) { dma_free_coherent(dev, (size_t)sizeof(struct rde_dst_tag_tbl), - req->dst_tag_addr, req->dst_tag_dma_addr); + req->dst_tag_addr, req->dst_tag_dma_addr); req->dst_tag_addr = NULL; } } @@ -953,13 +961,13 @@ static int hisi_rde_ctx_init(struct hisi_rde_ctx *rde_ctx, int qlen) int ret;
spin_lock_init(&rde_ctx->req_lock); - rde_ctx->req_bitmap = kcalloc(BITS_TO_LONGS(qlen), - sizeof(long), GFP_KERNEL); + rde_ctx->req_bitmap = kcalloc(BITS_TO_LONGS(qlen), sizeof(long), + GFP_KERNEL); if (!rde_ctx->req_bitmap) return -ENOMEM;
rde_ctx->req_list = kcalloc(qlen, sizeof(struct hisi_rde_msg), - GFP_KERNEL); + GFP_KERNEL); if (!rde_ctx->req_list) { kfree(rde_ctx->req_bitmap); rde_ctx->req_bitmap = NULL; diff --git a/drivers/crypto/hisilicon/rde/rde_api.h b/drivers/crypto/hisilicon/rde/rde_api.h index 4e19386d7f99..0f9021b4c1aa 100644 --- a/drivers/crypto/hisilicon/rde/rde_api.h +++ b/drivers/crypto/hisilicon/rde/rde_api.h @@ -308,7 +308,7 @@ struct acc_dif { * @input_block: number of sector * @data_len: data len of per disk, block_size (with dif)* input_block * @buf_type: denoted by ACC_BUF_TYPE_E - * @src_dif����dif information of source disks + * @src_dif������dif information of source disks * @dst_dif: dif information of dest disks * @cm_load: coe_matrix reload control, 0: do not load, 1: load * @cm_len: length of loaded coe_matrix, equal to src_num diff --git a/drivers/crypto/hisilicon/rde/rde_data.c b/drivers/crypto/hisilicon/rde/rde_data.c index 0bc5173231e7..c25d3f36b9bf 100644 --- a/drivers/crypto/hisilicon/rde/rde_data.c +++ b/drivers/crypto/hisilicon/rde/rde_data.c @@ -90,7 +90,8 @@ static void acc_sgl_to_scatterlist(struct pci_dev *pdev, struct sgl_hw *data, entry->buf); i++) { sg_set_buf(sglist, (void *)entry->buf, entry->len); pa = acc_virt_to_phys(pdev, sg_virt(sglist), - (size_t)sglist->length, smmu_state); + (size_t)sglist->length, + smmu_state); sg_dma_address(sglist) = pa; sglist++; entry->buf = (char *)pa; @@ -99,11 +100,12 @@ static void acc_sgl_to_scatterlist(struct pci_dev *pdev, struct sgl_hw *data, if (cur_sgl->next) { next_sgl = cur_sgl->next; sg_set_buf(sglist, (void *)next_sgl, - (u32)(sizeof(struct sgl_hw) + - sizeof(struct sgl_entry_hw) * - (next_sgl->entry_sum_in_sgl))); + (u32)(sizeof(struct sgl_hw) + + sizeof(struct sgl_entry_hw) * + (next_sgl->entry_sum_in_sgl))); pa = acc_virt_to_phys(pdev, sg_virt(sglist), - (size_t)sglist->length, smmu_state); + (size_t)sglist->length, + smmu_state); sg_dma_address(sglist) = pa; sglist++; cur_sgl->next = (struct sgl_hw *)pa; @@ -126,7 +128,7 @@ int acc_sgl_virt_to_phys(struct pci_dev *pdev, struct sgl_hw *data, }
if (unlikely(!data->entry_sum_in_sgl) || - data->entry_sum_in_sgl > data->entry_num_in_sgl) { + data->entry_sum_in_sgl > data->entry_num_in_sgl) { pr_err("[%s] Para sge num is wrong.\n", __func__); return -EINVAL; } @@ -141,9 +143,9 @@ int acc_sgl_virt_to_phys(struct pci_dev *pdev, struct sgl_hw *data, *sglist_head = sglist; sg_init_table(sglist, addr_cnt); sg_set_buf(sglist, (void *)data, (u32)(sizeof(struct sgl_hw) + - sizeof(struct sgl_entry_hw) * (data->entry_sum_in_sgl))); + sizeof(struct sgl_entry_hw) * (data->entry_sum_in_sgl))); sg_dma_address(sglist) = acc_virt_to_phys(pdev, sg_virt(sglist), - (size_t)sglist->length, smmu_state); + (size_t)sglist->length, smmu_state); sglist++; acc_sgl_to_scatterlist(pdev, data, sglist, smmu_state);
@@ -170,7 +172,7 @@ int acc_sgl_phys_to_virt(struct pci_dev *pdev, void *sglist_head, sg = sglist; cur_sgl = (struct sgl_hw *)sg_virt(sg); acc_phys_to_virt(pdev, sg_dma_address(sg), - (size_t)sg->length, smmu_state); + (size_t)sg->length, smmu_state); while (cur_sgl) { entry = cur_sgl->entries; for (i = 0; (i < cur_sgl->entry_sum_in_sgl && @@ -178,12 +180,12 @@ int acc_sgl_phys_to_virt(struct pci_dev *pdev, void *sglist_head, sg = sg_next(sg); if (unlikely(!sg)) { pr_err("[%s][%d]Scatterlist happens to be NULL.\n", - __func__, __LINE__); + __func__, __LINE__); goto FAIL; } entry->buf = (char *)sg_virt(sg); acc_phys_to_virt(pdev, sg_dma_address(sg), - (size_t)sg->length, smmu_state); + (size_t)sg->length, smmu_state); entry++; }
@@ -191,12 +193,12 @@ int acc_sgl_phys_to_virt(struct pci_dev *pdev, void *sglist_head, sg = sg_next(sg); if (unlikely(!sg)) { pr_err("[%s][%d]Scatterlist happens to be NULL.\n", - __func__, __LINE__); + __func__, __LINE__); goto FAIL; } next_sgl = (struct sgl_hw *)sg_virt(sg); acc_phys_to_virt(pdev, sg_dma_address(sg), - (size_t)sg->length, smmu_state); + (size_t)sg->length, smmu_state); cur_sgl->next = next_sgl; } else { next_sgl = NULL; diff --git a/drivers/crypto/hisilicon/rde/rde_main.c b/drivers/crypto/hisilicon/rde/rde_main.c index 6e7d02913743..c3385f0ec981 100644 --- a/drivers/crypto/hisilicon/rde/rde_main.c +++ b/drivers/crypto/hisilicon/rde/rde_main.c @@ -34,8 +34,6 @@ #define HRDE_RD_TMOUT_US 1000 #define FORMAT_DECIMAL 10 #define HRDE_RST_TMOUT_MS 400 -#define HRDE_OOO_DFX_NUM 9 -#define HRDE_DFX_NUM 14 #define HRDE_ENABLE 1 #define HRDE_DISABLE 0 #define HRDE_PCI_COMMAND_INVALID 0xFFFFFFFF @@ -48,6 +46,7 @@ #define HRDE_INT_ENABLE 0x0 #define HRDE_INT_DISABLE 0x3ffff #define HRDE_INT_SOURCE 0x31030c +#define HRDE_INT_SOURCE_CLEAR GENMASK(17, 0) #define HRDE_INT_STATUS 0x310318 #define HRDE_DFX_CTRL_0 0x310240 #define HRDE_ECC_ERR 0x310234 @@ -68,6 +67,7 @@ #define CHN_CFG 0x5010101 #define HRDE_AXI_SHUTDOWN_EN BIT(26) #define HRDE_AXI_SHUTDOWN_DIS 0xFBFFFFFF +#define HRDE_WR_MSI_PORT 0xFFFE #define HRDE_AWUSER_BD_1 0x310104 #define HRDE_ARUSER_BD_1 0x310114 #define HRDE_ARUSER_SGL_1 0x310124 @@ -79,14 +79,15 @@ #define HRDE_ECC_2BIT_ERR BIT(1) #define HRDE_ECC_1BIT_SHIFT 16 #define HRDE_ECC_2BIT_CNT_MSK GENMASK(15, 0) -#define HRDE_STATE_INT_ERR GENMASK(10, 2) +#define HRDE_STATE_INT_ERR GENMASK(11, 2) +#define HRDE_AM_CURR_PORT_STS 0x300100 +#define HRDE_MASTER_TRANS_RET 0x300150 #define HRDE_FSM_MAX_CNT 0x310280 #define HRDE_QM_IDEL_STATUS 0x1040e4 #define HRDE_QM_PEH_DFX_INFO0 0x1000fc #define PEH_MSI_MASK_SHIFT 0x90 #define HRDE_MASTER_GLOBAL_CTRL 0x300000 #define MASTER_GLOBAL_CTRL_SHUTDOWN 0x1 -#define HRDE_MASTER_TRANS_RETURN 0x300150 #define MASTER_TRANS_RETURN_RW 0x3 #define CACHE_CTL 0x1833 #define HRDE_DBGFS_VAL_MAX_LEN 20 @@ -403,13 +404,13 @@ static int current_bd_write(struct ctrl_debug_file *file, u32 val) struct hisi_qm *qm = file_to_qm(file); u32 tmp = 0;
- if (val >= (HRDE_SQE_SIZE / sizeof(u32))) { + if (val >= (HRDE_SQE_SIZE / sizeof(u32))) { pr_err("Width index should be smaller than 16.\n"); return -EINVAL; }
tmp = HRDE_PROBE_DATA_EN | HRDE_PROBE_EN | - (val << HRDE_STRB_CS_SHIFT); + (val << HRDE_STRB_CS_SHIFT); writel(tmp, qm->io_base + HRDE_PROBE_ADDR);
return 0; @@ -505,8 +506,12 @@ static int hisi_rde_chn_debug_init(struct hisi_rde_ctrl *ctrl) struct debugfs_regset32 *regset, *regset_ooo; struct dentry *tmp_d, *tmp; char buf[HRDE_DBGFS_VAL_MAX_LEN]; + int ret; + + ret = snprintf(buf, HRDE_DBGFS_VAL_MAX_LEN, "rde_dfx"); + if (ret < 0) + return -ENOENT;
- snprintf(buf, HRDE_DBGFS_VAL_MAX_LEN, "rde_dfx"); tmp_d = debugfs_create_dir(buf, ctrl->debug_root); if (!tmp_d) return -ENOENT; @@ -517,8 +522,7 @@ static int hisi_rde_chn_debug_init(struct hisi_rde_ctrl *ctrl) regset->regs = hrde_dfx_regs; regset->nregs = ARRAY_SIZE(hrde_dfx_regs); regset->base = qm->io_base; - tmp = debugfs_create_regset32("chn_regs", - 0444, tmp_d, regset); + tmp = debugfs_create_regset32("chn_regs", 0444, tmp_d, regset); if (!tmp) return -ENOENT;
@@ -528,8 +532,7 @@ static int hisi_rde_chn_debug_init(struct hisi_rde_ctrl *ctrl) regset_ooo->regs = hrde_ooo_dfx_regs; regset_ooo->nregs = ARRAY_SIZE(hrde_ooo_dfx_regs); regset_ooo->base = qm->io_base; - tmp = debugfs_create_regset32("ooo_regs", - 0444, tmp_d, regset_ooo); + tmp = debugfs_create_regset32("ooo_regs", 0444, tmp_d, regset_ooo); if (!tmp) return -ENOENT;
@@ -602,7 +605,6 @@ static void hisi_rde_engine_init(struct hisi_rde *hisi_rde) readl(hisi_rde->qm.io_base + HRDE_OP_ERR_CNT); readl(hisi_rde->qm.io_base + HRDE_OP_ABORT_CNT); writel(WRITE_CLEAR_VAL, hisi_rde->qm.io_base + HRDE_FIFO_STAT_0); - writel(WRITE_CLEAR_VAL, hisi_rde->qm.io_base + HRDE_INT_SOURCE); writel(WRITE_CLEAR_VAL, hisi_rde->qm.io_base + HRDE_DFX_STAT_7); writel(WRITE_CLEAR_VAL, hisi_rde->qm.io_base + HRDE_DFX_STAT_8);
@@ -653,17 +655,19 @@ static void hisi_rde_hw_error_set_state(struct hisi_rde *hisi_rde, bool state)
val = readl(hisi_rde->qm.io_base + HRDE_CFG); if (state) { + writel(HRDE_INT_SOURCE_CLEAR, + hisi_rde->qm.io_base + HRDE_INT_SOURCE); writel(HRDE_RAS_ENABLE, hisi_rde->qm.io_base + HRDE_RAS_INT_MSK); /* bd prefetch should bd masked to prevent misreport */ writel((HRDE_INT_ENABLE | BIT(8)), hisi_rde->qm.io_base + HRDE_INT_MSK); - /* when m-bit error occur, master ooo will close */ + /* make master ooo close, when m-bits error happens*/ val = val | HRDE_AXI_SHUTDOWN_EN; } else { writel(ras_msk, hisi_rde->qm.io_base + HRDE_RAS_INT_MSK); writel(HRDE_INT_DISABLE, hisi_rde->qm.io_base + HRDE_INT_MSK); - /* when m-bit error occur, master ooo will not close */ + /* make master ooo open, when m-bits error happens*/ val = val & HRDE_AXI_SHUTDOWN_DIS; }
@@ -674,13 +678,64 @@ static void hisi_rde_set_hw_error(struct hisi_rde *hisi_rde, bool state) { if (state) hisi_qm_hw_error_init(&hisi_rde->qm, QM_BASE_CE, - QM_BASE_NFE | QM_ACC_DO_TASK_TIMEOUT, 0, 0); + QM_BASE_NFE | QM_ACC_DO_TASK_TIMEOUT, + 0, 0); else hisi_qm_hw_error_uninit(&hisi_rde->qm);
hisi_rde_hw_error_set_state(hisi_rde, state); }
+static void hisi_rde_open_master_ooo(struct hisi_qm *qm) +{ + u32 val; + + val = readl(qm->io_base + HRDE_CFG); + writel(val & HRDE_AXI_SHUTDOWN_DIS, qm->io_base + HRDE_CFG); + writel(val | HRDE_AXI_SHUTDOWN_EN, qm->io_base + HRDE_CFG); +} + +static u32 hisi_rde_get_hw_err_status(struct hisi_qm *qm) +{ + return readl(qm->io_base + HRDE_INT_STATUS); +} + +static void hisi_rde_clear_hw_err_status(struct hisi_qm *qm, u32 err_sts) +{ + writel(err_sts, qm->io_base + HRDE_INT_SOURCE); +} + +static void hisi_rde_hw_error_log(struct hisi_qm *qm, u32 err_sts) +{ + const struct hisi_rde_hw_error *err = rde_hw_error; + struct device *dev = &qm->pdev->dev; + u32 err_val; + + while (err->msg) { + if (err->int_msk & err_sts) + dev_err_ratelimited(dev, + "[%s] [Error status=0x%x] found.\n", + err->msg, err->int_msk); + err++; + } + + if (HRDE_ECC_2BIT_ERR & err_sts) { + err_val = (readl(qm->io_base + HRDE_ERR_CNT) & + HRDE_ECC_2BIT_CNT_MSK); + dev_err_ratelimited(dev, + "Rde ecc 2bit sram num=0x%x.\n", err_val); + } + + if (HRDE_STATE_INT_ERR & err_sts) { + err_val = readl(qm->io_base + HRDE_AM_CURR_PORT_STS); + dev_err_ratelimited(dev, + "Rde ooo cur port sts=0x%x.\n", err_val); + err_val = readl(qm->io_base + HRDE_MASTER_TRANS_RET); + dev_err_ratelimited(dev, + "Rde ooo outstanding sts=0x%x.\n", err_val); + } +} + static int hisi_rde_pf_probe_init(struct hisi_rde *hisi_rde) { struct hisi_qm *qm = &hisi_rde->qm; @@ -706,8 +761,15 @@ static int hisi_rde_pf_probe_init(struct hisi_rde *hisi_rde) return -EINVAL; }
+ qm->err_ini.qm_wr_port = HRDE_WR_MSI_PORT; + qm->err_ini.ecc_2bits_mask = HRDE_ECC_2BIT_ERR; + qm->err_ini.open_axi_master_ooo = hisi_rde_open_master_ooo; + qm->err_ini.get_dev_hw_err_status = hisi_rde_get_hw_err_status; + qm->err_ini.clear_dev_hw_err_status = hisi_rde_clear_hw_err_status; + qm->err_ini.log_dev_hw_err = hisi_rde_hw_error_log; hisi_rde_set_user_domain_and_cache(hisi_rde); hisi_rde_set_hw_error(hisi_rde, true); + qm->err_ini.open_axi_master_ooo(qm);
return 0; } @@ -841,81 +903,11 @@ static void hisi_rde_remove(struct pci_dev *pdev) hisi_qm_uninit(qm); }
-static void hisi_rde_hw_error_log(struct hisi_rde *hisi_rde, u32 err_sts) -{ - const struct hisi_rde_hw_error *err = rde_hw_error; - struct device *dev = &hisi_rde->qm.pdev->dev; - u32 i, err_val; - - while (err->msg) { - if (err->int_msk & err_sts) - dev_err_ratelimited(dev, - "[%s] [Error status=0x%x] found.\n", - err->msg, err->int_msk); - err++; - } - - if (HRDE_ECC_2BIT_ERR & err_sts) { - err_val = (readl(hisi_rde->qm.io_base + HRDE_ERR_CNT) - & HRDE_ECC_2BIT_CNT_MSK); - dev_err_ratelimited(dev, - "Rde ecc 2bit sram num=0x%x.\n", err_val); - } - - if (HRDE_STATE_INT_ERR & err_sts) { - for (i = 0; i < HRDE_DFX_NUM; i++) { - dev_err_ratelimited(dev, "%s=0x%x\n", - hrde_dfx_regs[i].name, - readl(hisi_rde->qm.io_base + - hrde_dfx_regs[i].offset)); - } - for (i = 0; i < HRDE_OOO_DFX_NUM; i++) { - dev_err_ratelimited(dev, "%s=0x%x\n", - hrde_ooo_dfx_regs[i].name, - readl(hisi_rde->qm.io_base + - hrde_ooo_dfx_regs[i].offset)); - } - } -} - -static pci_ers_result_t hisi_rde_hw_error_handle(struct hisi_rde *hisi_rde) -{ - u32 err_sts; - - /* read err sts */ - err_sts = readl(hisi_rde->qm.io_base + HRDE_INT_STATUS); - if (err_sts) { - hisi_rde_hw_error_log(hisi_rde, err_sts); - - /* clear error interrupts */ - writel(err_sts, hisi_rde->qm.io_base + HRDE_INT_SOURCE); - return PCI_ERS_RESULT_NEED_RESET; - } - - return PCI_ERS_RESULT_RECOVERED; -} - -static pci_ers_result_t hisi_rde_hw_error_process(struct pci_dev *pdev) +static void hisi_rde_shutdown(struct pci_dev *pdev) { struct hisi_rde *hisi_rde = pci_get_drvdata(pdev); - struct device *dev = &pdev->dev; - pci_ers_result_t qm_ret, rde_ret, ret;
- if (!hisi_rde) { - dev_err(dev, "Can't recover rde-error at dev init.\n"); - return PCI_ERS_RESULT_NONE; - } - - /* log qm error */ - qm_ret = hisi_qm_hw_error_handle(&hisi_rde->qm); - - /* log rde error */ - rde_ret = hisi_rde_hw_error_handle(hisi_rde); - ret = (qm_ret == PCI_ERS_RESULT_NEED_RESET || - rde_ret == PCI_ERS_RESULT_NEED_RESET) ? - PCI_ERS_RESULT_NEED_RESET : PCI_ERS_RESULT_RECOVERED; - - return ret; + hisi_qm_stop(&hisi_rde->qm, QM_NORMAL); }
static int hisi_rde_reset_prepare_rdy(struct hisi_rde *hisi_rde) @@ -982,13 +974,16 @@ static int hisi_rde_soft_reset(struct hisi_rde *hisi_rde) return ret; }
+ /* Set qm ecc if dev ecc happened to hold on ooo */ + hisi_qm_set_ecc(qm); + /* OOO register set and check */ writel(MASTER_GLOBAL_CTRL_SHUTDOWN, hisi_rde->qm.io_base + HRDE_MASTER_GLOBAL_CTRL);
/* If bus lock, reset chip */ ret = readl_relaxed_poll_timeout(hisi_rde->qm.io_base + - HRDE_MASTER_TRANS_RETURN, val, + HRDE_MASTER_TRANS_RET, val, (val == MASTER_TRANS_RETURN_RW), HRDE_RD_INTVRL_US, HRDE_RD_TMOUT_US); if (ret) { @@ -1008,7 +1003,7 @@ static int hisi_rde_soft_reset(struct hisi_rde *hisi_rde) acpi_status s;
s = acpi_evaluate_integer(ACPI_HANDLE(dev), "RRST", - NULL, &value); + NULL, &value); if (ACPI_FAILURE(s)) { dev_err(dev, "No controller reset method.\n"); return -EIO; @@ -1047,7 +1042,7 @@ static int hisi_rde_controller_reset_done(struct hisi_rde *hisi_rde) }
hisi_rde_set_user_domain_and_cache(hisi_rde); - hisi_rde_set_hw_error(hisi_rde, true); + hisi_qm_restart_prepare(qm);
ret = hisi_qm_restart(qm); if (ret) { @@ -1055,6 +1050,9 @@ static int hisi_rde_controller_reset_done(struct hisi_rde *hisi_rde) return -EPERM; }
+ hisi_qm_restart_done(qm); + hisi_rde_set_hw_error(hisi_rde, true); + return 0; }
@@ -1096,7 +1094,7 @@ static void hisi_rde_ras_proc(struct work_struct *work) if (!pdev) return;
- ret = hisi_rde_hw_error_process(pdev); + ret = hisi_qm_process_dev_error(pdev); if (ret == PCI_ERS_RESULT_NEED_RESET) if (hisi_rde_controller_reset(hisi_rde)) dev_err(&pdev->dev, "Hisi_rde reset fail.\n"); @@ -1129,7 +1127,7 @@ static int hisi_rde_get_hw_error_status(struct hisi_rde *hisi_rde) u32 err_sts;
err_sts = readl(hisi_rde->qm.io_base + HRDE_INT_STATUS) & - HRDE_ECC_2BIT_ERR; + HRDE_ECC_2BIT_ERR; if (err_sts) return err_sts;
@@ -1221,6 +1219,7 @@ static struct pci_driver hisi_rde_pci_driver = { .probe = hisi_rde_probe, .remove = hisi_rde_remove, .err_handler = &hisi_rde_err_handler, + .shutdown = hisi_rde_shutdown, };
static void hisi_rde_register_debugfs(void) diff --git a/drivers/crypto/hisilicon/sec/sec_algs.c b/drivers/crypto/hisilicon/sec/sec_algs.c index db2983c51f1e..cdc4f9a171d9 100644 --- a/drivers/crypto/hisilicon/sec/sec_algs.c +++ b/drivers/crypto/hisilicon/sec/sec_algs.c @@ -215,18 +215,17 @@ static void sec_free_hw_sgl(struct sec_hw_sgl *hw_sgl, dma_addr_t psec_sgl, struct sec_dev_info *info) { struct sec_hw_sgl *sgl_current, *sgl_next; - dma_addr_t sgl_next_dma;
+ if (!hw_sgl) + return; sgl_current = hw_sgl; - while (sgl_current) { + while (sgl_current->next) { sgl_next = sgl_current->next; - sgl_next_dma = sgl_current->next_sgl; - - dma_pool_free(info->hw_sgl_pool, sgl_current, psec_sgl); - + dma_pool_free(info->hw_sgl_pool, sgl_current, + sgl_current->next_sgl); sgl_current = sgl_next; - psec_sgl = sgl_next_dma; } + dma_pool_free(info->hw_sgl_pool, hw_sgl, psec_sgl); }
static int sec_alg_skcipher_setkey(struct crypto_skcipher *tfm, diff --git a/drivers/crypto/hisilicon/sec2/sec.h b/drivers/crypto/hisilicon/sec2/sec.h index 208b32b70e64..0e164524d169 100644 --- a/drivers/crypto/hisilicon/sec2/sec.h +++ b/drivers/crypto/hisilicon/sec2/sec.h @@ -1,13 +1,5 @@ /* SPDX-License-Identifier: GPL-2.0+ */ -/* - * Copyright (c) 2018-2019 HiSilicon Limited. - * - * This program is free software; you can redistribute it and/or modify - * it under the terms of the GNU General Public License as published by - * the Free Software Foundation; either version 2 of the License, or - * (at your option) any later version. - * - */ +/* Copyright (c) 2018-2019 HiSilicon Limited. */
#ifndef HISI_SEC_H #define HISI_SEC_H diff --git a/drivers/crypto/hisilicon/sec2/sec_crypto.c b/drivers/crypto/hisilicon/sec2/sec_crypto.c index 4164c05f2d18..3a362cebb4b9 100644 --- a/drivers/crypto/hisilicon/sec2/sec_crypto.c +++ b/drivers/crypto/hisilicon/sec2/sec_crypto.c @@ -1,5 +1,5 @@ -// SPDX-License-Identifier: GPL-2.0 -/* Copyright (c) 2019 HiSilicon Limited. */ +// SPDX-License-Identifier: GPL-2.0+ +/* Copyright (c) 2018-2019 HiSilicon Limited. */
#include <linux/crypto.h> #include <linux/hrtimer.h> @@ -19,7 +19,7 @@ #define SEC_ASYNC
#define SEC_INVLD_REQ_ID (-1) -#define SEC_PRIORITY (4001) +#define SEC_PRIORITY 4001 #define SEC_XTS_MIN_KEY_SIZE (2 * AES_MIN_KEY_SIZE) #define SEC_XTS_MAX_KEY_SIZE (2 * AES_MAX_KEY_SIZE) #define SEC_DES3_2KEY_SIZE (2 * DES_KEY_SIZE) @@ -196,7 +196,7 @@ struct hisi_sec_ctx { bool is_fusion; };
-#define DES_WEAK_KEY_NUM (4) +#define DES_WEAK_KEY_NUM 4 u64 des_weak_key[DES_WEAK_KEY_NUM] = {0x0101010101010101, 0xFEFEFEFEFEFEFEFE, 0xE0E0E0E0F1F1F1F1, 0x1F1F1F1F0E0E0E0E};
@@ -325,7 +325,7 @@ static enum hrtimer_restart hrtimer_handler(struct hrtimer *timer) }
static int hisi_sec_create_qp_ctx(struct hisi_qm *qm, struct hisi_sec_ctx *ctx, - int qp_ctx_id, int alg_type, int req_type) + int qp_ctx_id, int alg_type, int req_type) { struct hisi_qp *qp; struct hisi_sec_qp_ctx *qp_ctx; @@ -352,7 +352,7 @@ static int hisi_sec_create_qp_ctx(struct hisi_qm *qm, struct hisi_sec_ctx *ctx, atomic_set(&qp_ctx->req_cnt, 0);
qp_ctx->req_bitmap = kcalloc(BITS_TO_LONGS(QM_Q_DEPTH), sizeof(long), - GFP_ATOMIC); + GFP_ATOMIC); if (!qp_ctx->req_bitmap) { ret = -ENOMEM; goto err_qm_release_qp; @@ -487,8 +487,10 @@ static int hisi_sec_cipher_ctx_init(struct crypto_skcipher *tfm) ctx->enc_q_num = ctx->q_num / 2; ctx->qp_ctx = kcalloc(ctx->q_num, sizeof(struct hisi_sec_qp_ctx), GFP_KERNEL); - if (!ctx->qp_ctx) + if (!ctx->qp_ctx) { + dev_err(ctx->dev, "failed to alloc qp_ctx"); return -ENOMEM; + }
hisi_sec_get_fusion_param(ctx, sec);
@@ -617,10 +619,11 @@ static int hisi_sec_cipher_ctx_init_multi_iv(struct crypto_skcipher *tfm) static void hisi_sec_req_cb(struct hisi_qp *qp, void *resp) { struct hisi_sec_sqe *sec_sqe = (struct hisi_sec_sqe *)resp; - u32 req_id; struct hisi_sec_qp_ctx *qp_ctx = qp->qp_ctx; + struct device *dev = &qp->qm->pdev->dev; struct hisi_sec_req *req; struct hisi_sec_dfx *dfx; + u32 req_id;
if (sec_sqe->type == 1) { req_id = sec_sqe->type1.tag; @@ -629,8 +632,10 @@ static void hisi_sec_req_cb(struct hisi_qp *qp, void *resp) req->err_type = sec_sqe->type1.error_type; if (req->err_type || sec_sqe->type1.done != 0x1 || sec_sqe->type1.flag != 0x2) { - pr_err("err_type[%d] done[%d] flag[%d]\n", - req->err_type, sec_sqe->type1.done, + dev_err_ratelimited(dev, + "err_type[%d] done[%d] flag[%d]\n", + req->err_type, + sec_sqe->type1.done, sec_sqe->type1.flag); } } else if (sec_sqe->type == 2) { @@ -640,12 +645,14 @@ static void hisi_sec_req_cb(struct hisi_qp *qp, void *resp) req->err_type = sec_sqe->type2.error_type; if (req->err_type || sec_sqe->type2.done != 0x1 || sec_sqe->type2.flag != 0x2) { - pr_err("err_type[%d] done[%d] flag[%d]\n", - req->err_type, sec_sqe->type2.done, + dev_err_ratelimited(dev, + "err_type[%d] done[%d] flag[%d]\n", + req->err_type, + sec_sqe->type2.done, sec_sqe->type2.flag); } } else { - pr_err("err bd type [%d]\n", sec_sqe->type); + dev_err_ratelimited(dev, "err bd type [%d]\n", sec_sqe->type); return; }
@@ -1153,7 +1160,8 @@ static int hisi_sec_bd_send_asyn(struct hisi_sec_ctx *ctx,
mutex_lock(&qp_ctx->req_lock); ret = hisi_qp_send(qp_ctx->qp, &req->sec_sqe); - __sync_add_and_fetch(&ctx->sec->sec_dfx.send_cnt, 1); + if (ret == 0) + ctx->sec->sec_dfx.send_cnt++; mutex_unlock(&qp_ctx->req_lock);
return hisi_sec_get_async_ret(ret, req_cnt, ctx->req_fake_limit); @@ -1363,7 +1371,7 @@ static int sec_io_proc(struct hisi_sec_ctx *ctx, struct hisi_sec_req *in_req) req = sec_request_alloc(ctx, in_req, &fusion_send, &fake_busy);
if (!req) { - dev_err(ctx->dev, "sec_request_alloc failed\n"); + dev_err_ratelimited(ctx->dev, "sec_request_alloc failed\n"); return -ENOMEM; }
@@ -1372,14 +1380,14 @@ static int sec_io_proc(struct hisi_sec_ctx *ctx, struct hisi_sec_req *in_req)
ret = sec_request_transfer(ctx, req); if (ret) { - dev_err(ctx->dev, "sec_transfer failed! ret[%d]\n", ret); + dev_err_ratelimited(ctx->dev, "sec_transfer ret[%d]\n", ret); goto err_free_req; }
ret = sec_request_send(ctx, req); __sync_add_and_fetch(&ctx->sec->sec_dfx.send_by_full, 1); if (ret != -EBUSY && ret != -EINPROGRESS) { - dev_err(ctx->dev, "sec_send failed ret[%d]\n", ret); + dev_err_ratelimited(ctx->dev, "sec_send ret[%d]\n", ret); goto err_unmap_req; }
diff --git a/drivers/crypto/hisilicon/sec2/sec_crypto.h b/drivers/crypto/hisilicon/sec2/sec_crypto.h index d05856eaea23..bffbeba1aca9 100644 --- a/drivers/crypto/hisilicon/sec2/sec_crypto.h +++ b/drivers/crypto/hisilicon/sec2/sec_crypto.h @@ -1,13 +1,5 @@ /* SPDX-License-Identifier: GPL-2.0+ */ -/* - * Copyright (c) 2018-2019 HiSilicon Limited. - * - * This program is free software; you can redistribute it and/or modify - * it under the terms of the GNU General Public License as published by - * the Free Software Foundation; either version 2 of the License, or - * (at your option) any later version. - * - */ +/* Copyright (c) 2018-2019 HiSilicon Limited. */
#ifndef HISI_SEC_CRYPTO_H #define HISI_SEC_CRYPTO_H diff --git a/drivers/crypto/hisilicon/sec2/sec_main.c b/drivers/crypto/hisilicon/sec2/sec_main.c index 22595c1dbfbc..50bf5951fd87 100644 --- a/drivers/crypto/hisilicon/sec2/sec_main.c +++ b/drivers/crypto/hisilicon/sec2/sec_main.c @@ -6,7 +6,6 @@ * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or * (at your option) any later version. - * */
#include <linux/acpi.h> @@ -42,23 +41,20 @@ #define SEC_CORE_INT_STATUS 0x301008 #define SEC_CORE_INT_STATUS_M_ECC BIT(2) #define SEC_CORE_ECC_INFO 0x301C14 -#define SEC_ECC_NUM_SHIFT 16 -#define SEC_ECC_ADDR_SHIFT 0 -#define SEC_ECC_NUM(err_val) ((err_val >> SEC_ECC_NUM_SHIFT) & 0xFF) -#define SEC_ECC_ADDR(err_val) (err_val >> SEC_ECC_ADDR_SHIFT) +#define SEC_ECC_NUM(err_val) (((err_val) >> 16) & 0xFFFF) +#define SEC_ECC_ADDR(err_val) ((err_val) & 0xFFFF)
#define SEC_CORE_INT_DISABLE 0x0 #define SEC_CORE_INT_ENABLE 0x1ff #define SEC_HW_ERROR_IRQ_ENABLE 1 #define SEC_HW_ERROR_IRQ_DISABLE 0
-#define SEC_SM4_CTR_ENABLE_REG 0x301380 -#define SEC_SM4_CTR_ENABLE_MSK 0xEFFFFFFF -#define SEC_SM4_CTR_DISABLE_MSK 0xFFFFFFFF - -#define SEC_XTS_MIV_ENABLE_REG 0x301384 -#define SEC_XTS_MIV_ENABLE_MSK 0x7FFFFFFF -#define SEC_XTS_MIV_DISABLE_MSK 0xFFFFFFFF +#define SEC_BD_ERR_CHK_EN0 0xEFFFFFFF +#define SEC_BD_ERR_CHK_EN1 0x7FFFF7FD +#define SEC_BD_ERR_CHK_EN3 0xFFFFBFFF +#define SEC_BD_ERR_CHK_EN_REG0 0x0380 +#define SEC_BD_ERR_CHK_EN_REG1 0x0384 +#define SEC_BD_ERR_CHK_EN_REG3 0x038c
#define SEC_SQE_SIZE 128 #define SEC_SQ_SIZE (SEC_SQE_SIZE * QM_Q_DEPTH) @@ -86,10 +82,10 @@ #define SEC_TRNG_EN_SHIFT 8 #define SEC_AXI_SHUTDOWN_ENABLE BIT(12) #define SEC_AXI_SHUTDOWN_DISABLE 0xFFFFEFFF +#define SEC_WR_MSI_PORT 0xFFFE
#define SEC_INTERFACE_USER_CTRL0_REG 0x0220 #define SEC_INTERFACE_USER_CTRL1_REG 0x0224 -#define SEC_BD_ERR_CHK_EN_REG(n) (0x0380 + (n) * 0x04)
#define SEC_USER0_SMMU_NORMAL (BIT(23) | BIT(15)) #define SEC_USER1_SMMU_NORMAL (BIT(31) | BIT(23) | BIT(15) | BIT(7)) @@ -107,8 +103,10 @@ #define SEC_PCI_COMMAND_INVALID 0xFFFFFFFF
#define FORMAT_DECIMAL 10 +#define FROZEN_RANGE_MIN 10 +#define FROZEN_RANGE_MAX 20
-static const char hisi_sec_name[] = "hisi_sec"; +static const char sec_name[] = "hisi_sec2"; static struct dentry *sec_debugfs_root; static u32 pf_q_num = SEC_PF_DEF_Q_NUM; static struct workqueue_struct *sec_wq; @@ -404,10 +402,6 @@ static int uacce_mode = UACCE_MODE_NOUACCE; module_param_cb(uacce_mode, &uacce_mode_ops, &uacce_mode, 0444); MODULE_PARM_DESC(uacce_mode, "Mode of UACCE can be 0(default), 1, 2");
-static int enable_sm4_ctr; -module_param(enable_sm4_ctr, int, 0444); -MODULE_PARM_DESC(enable_sm4_ctr, "Enable ctr(sm4) algorithm 0(default), 1"); - static int ctx_q_num = CTX_Q_NUM_DEF; module_param_cb(ctx_q_num, &ctx_q_num_ops, &ctx_q_num, 0444); MODULE_PARM_DESC(ctx_q_num, "Number of queue in ctx (2, 4, 6, ..., 1024)"); @@ -502,8 +496,11 @@ static int sec_engine_init(struct hisi_sec *hisi_sec) reg |= SEC_USER1_SMMU_NORMAL; writel(reg, base + SEC_INTERFACE_USER_CTRL1_REG);
- writel(0xfffff7fd, base + SEC_BD_ERR_CHK_EN_REG(1)); - writel(0xffffbfff, base + SEC_BD_ERR_CHK_EN_REG(3)); + /* Enable sm4 extra mode, as ctr/ecb */ + writel(SEC_BD_ERR_CHK_EN0, base + SEC_BD_ERR_CHK_EN_REG0); + /* Enable sm4 xts mode multiple iv */ + writel(SEC_BD_ERR_CHK_EN1, base + SEC_BD_ERR_CHK_EN_REG1); + writel(SEC_BD_ERR_CHK_EN3, base + SEC_BD_ERR_CHK_EN_REG3);
/* enable clock gate control */ reg = readl_relaxed(base + SEC_CONTROL_REG); @@ -515,25 +512,9 @@ static int sec_engine_init(struct hisi_sec *hisi_sec) reg |= sec_get_endian(hisi_sec); writel(reg, base + SEC_CONTROL_REG);
- if (enable_sm4_ctr) - writel(SEC_SM4_CTR_ENABLE_MSK, - qm->io_base + SEC_SM4_CTR_ENABLE_REG); - - writel(SEC_XTS_MIV_ENABLE_MSK, - qm->io_base + SEC_XTS_MIV_ENABLE_REG); - return 0; }
-static void hisi_sec_disable_sm4_ctr(struct hisi_sec *hisi_sec) -{ - struct hisi_qm *qm = &hisi_sec->qm; - - if (enable_sm4_ctr) - writel(SEC_SM4_CTR_DISABLE_MSK, - qm->io_base + SEC_SM4_CTR_ENABLE_REG); -} - static void hisi_sec_set_user_domain_and_cache(struct hisi_sec *hisi_sec) { struct hisi_qm *qm = &hisi_sec->qm; @@ -548,7 +529,11 @@ static void hisi_sec_set_user_domain_and_cache(struct hisi_sec *hisi_sec) /* qm cache */ writel(AXI_M_CFG, qm->io_base + QM_AXI_M_CFG); writel(AXI_M_CFG_ENABLE, qm->io_base + QM_AXI_M_CFG_ENABLE); - writel(PEH_AXUSER_CFG_ENABLE, qm->io_base + QM_PEH_AXUSER_CFG_ENABLE); + + /* disable FLR triggered by BME(bus master enable) */ + writel(PEH_AXUSER_CFG, qm->io_base + QM_PEH_AXUSER_CFG); + writel(PEH_AXUSER_CFG_ENABLE, qm->io_base + + QM_PEH_AXUSER_CFG_ENABLE);
/* enable sqc,cqc writeback */ writel(SQC_CACHE_ENABLE | CQC_CACHE_ENABLE | SQC_CACHE_WB_ENABLE | @@ -590,8 +575,8 @@ static void hisi_sec_hw_error_set_state(struct hisi_sec *hisi_sec, bool state) val = readl(base + SEC_CONTROL_REG); if (state) { /* clear SEC hw error source if having */ - writel(SEC_CORE_INT_DISABLE, qm->io_base + - SEC_CORE_INT_SOURCE); + writel(SEC_CORE_INT_ENABLE, + hisi_sec->qm.io_base + SEC_CORE_INT_SOURCE);
/* enable SEC hw error interrupts */ writel(SEC_CORE_INT_ENABLE, qm->io_base + SEC_CORE_INT_MASK); @@ -782,8 +767,11 @@ static int hisi_sec_core_debug_init(struct hisi_sec_ctrl *ctrl) struct debugfs_regset32 *regset; struct dentry *tmp_d, *tmp; char buf[SEC_DBGFS_VAL_MAX_LEN]; + int ret;
- snprintf(buf, SEC_DBGFS_VAL_MAX_LEN, "hisi_sec_dfx"); + ret = snprintf(buf, SEC_DBGFS_VAL_MAX_LEN, "sec_dfx"); + if (ret < 0) + return -ENOENT;
tmp_d = debugfs_create_dir(buf, ctrl->debug_root); if (!tmp_d) @@ -921,6 +909,49 @@ static void hisi_sec_hw_error_init(struct hisi_sec *hisi_sec) hisi_sec_hw_error_set_state(hisi_sec, true); }
+static void hisi_sec_open_master_ooo(struct hisi_qm *qm) +{ + u32 val; + void *base = qm->io_base + SEC_ENGINE_PF_CFG_OFF + + SEC_ACC_COMMON_REG_OFF; + + val = readl(qm->io_base + SEC_CONTROL_REG); + writel(val & SEC_AXI_SHUTDOWN_DISABLE, base + SEC_CONTROL_REG); + writel(val | SEC_AXI_SHUTDOWN_ENABLE, base + SEC_CONTROL_REG); +} + +static u32 hisi_sec_get_hw_err_status(struct hisi_qm *qm) +{ + return readl(qm->io_base + SEC_CORE_INT_STATUS); +} + +static void hisi_sec_clear_hw_err_status(struct hisi_qm *qm, u32 err_sts) +{ + writel(err_sts, qm->io_base + SEC_CORE_INT_SOURCE); +} + +static void hisi_sec_log_hw_error(struct hisi_qm *qm, u32 err_sts) +{ + const struct hisi_sec_hw_error *err = sec_hw_error; + struct device *dev = &qm->pdev->dev; + u32 err_val; + + while (err->msg) { + if (err->int_msk & err_sts) + dev_err(dev, "%s [error status=0x%x] found\n", + err->msg, err->int_msk); + err++; + } + + if (SEC_CORE_INT_STATUS_M_ECC & err_sts) { + err_val = readl(qm->io_base + SEC_CORE_ECC_INFO); + dev_err(dev, "hisi-sec multi ecc sram num=0x%x\n", + SEC_ECC_NUM(err_val)); + dev_err(dev, "hisi-sec multi ecc sram addr=0x%x\n", + SEC_ECC_ADDR(err_val)); + } +} + static int hisi_sec_pf_probe_init(struct hisi_sec *hisi_sec) { struct hisi_qm *qm = &hisi_sec->qm; @@ -946,8 +977,15 @@ static int hisi_sec_pf_probe_init(struct hisi_sec *hisi_sec) return -EINVAL; }
+ qm->err_ini.qm_wr_port = SEC_WR_MSI_PORT; + qm->err_ini.ecc_2bits_mask = SEC_CORE_INT_STATUS_M_ECC; + qm->err_ini.open_axi_master_ooo = hisi_sec_open_master_ooo; + qm->err_ini.get_dev_hw_err_status = hisi_sec_get_hw_err_status; + qm->err_ini.clear_dev_hw_err_status = hisi_sec_clear_hw_err_status; + qm->err_ini.log_dev_hw_err = hisi_sec_log_hw_error; hisi_sec_set_user_domain_and_cache(hisi_sec); hisi_sec_hw_error_init(hisi_sec); + qm->err_ini.open_axi_master_ooo(qm); hisi_sec_debug_regs_clear(hisi_sec);
return 0; @@ -965,7 +1003,7 @@ static int hisi_sec_qm_init(struct hisi_qm *qm, struct pci_dev *pdev) qm->ver = rev_id;
qm->sqe_size = SEC_SQE_SIZE; - qm->dev_name = hisi_sec_name; + qm->dev_name = sec_name; qm->fun_type = (pdev->device == SEC_PCI_DEVICE_ID_PF) ? QM_HW_PF : QM_HW_VF; qm->algs = "sec\ncipher\ndigest\n"; @@ -1159,6 +1197,31 @@ static int hisi_sec_sriov_enable(struct pci_dev *pdev, int max_vfs) #endif }
+static int hisi_sec_try_frozen_vfs(struct pci_dev *pdev) +{ + struct hisi_sec *sec, *vf_sec; + struct pci_dev *dev; + int ret = 0; + + /* Try to frozen all the VFs as disable SRIOV */ + mutex_lock(&hisi_sec_list_lock); + list_for_each_entry(sec, &hisi_sec_list, list) { + dev = sec->qm.pdev; + if (dev == pdev) + continue; + if (pci_physfn(dev) == pdev) { + vf_sec = pci_get_drvdata(dev); + ret = hisi_qm_frozen(&vf_sec->qm); + if (ret) + goto frozen_fail; + } + } + +frozen_fail: + mutex_unlock(&hisi_sec_list_lock); + return ret; +} + static int hisi_sec_sriov_disable(struct pci_dev *pdev) { struct hisi_sec *hisi_sec = pci_get_drvdata(pdev); @@ -1169,9 +1232,13 @@ static int hisi_sec_sriov_disable(struct pci_dev *pdev) return -EPERM; }
+ if (hisi_sec_try_frozen_vfs(pdev)) { + dev_err(&pdev->dev, "try frozen VFs failed!\n"); + return -EBUSY; + } + /* remove in hisi_sec_pci_driver will be called to free VF resources */ pci_disable_sriov(pdev); - return hisi_sec_clear_vft_config(hisi_sec); }
@@ -1185,8 +1252,12 @@ static int hisi_sec_sriov_configure(struct pci_dev *pdev, int num_vfs)
static void hisi_sec_remove_wait_delay(struct hisi_sec *hisi_sec) { - while (hisi_qm_frozen(&hisi_sec->qm)) - ; + struct hisi_qm *qm = &hisi_sec->qm; + + while (hisi_qm_frozen(qm) || ((qm->fun_type == QM_HW_PF) && + hisi_sec_try_frozen_vfs(qm->pdev))) + usleep_range(FROZEN_RANGE_MIN, FROZEN_RANGE_MAX); + udelay(SEC_WAIT_DELAY); }
@@ -1204,80 +1275,18 @@ static void hisi_sec_remove(struct pci_dev *pdev) hisi_sec_debugfs_exit(hisi_sec); (void)hisi_qm_stop(qm, QM_NORMAL);
- if (qm->fun_type == QM_HW_PF) { + if (qm->fun_type == QM_HW_PF) hisi_sec_hw_error_set_state(hisi_sec, false); - hisi_sec_disable_sm4_ctr(hisi_sec); - }
hisi_qm_uninit(qm); hisi_sec_remove_from_list(hisi_sec); }
-static void hisi_sec_log_hw_error(struct hisi_sec *hisi_sec, u32 err_sts) -{ - const struct hisi_sec_hw_error *err = sec_hw_error; - struct device *dev = &hisi_sec->qm.pdev->dev; - u32 err_val; - - while (err->msg) { - if (err->int_msk & err_sts) { - dev_err(dev, "%s [error status=0x%x] found\n", - err->msg, err->int_msk); - - if (SEC_CORE_INT_STATUS_M_ECC & err_sts) { - err_val = readl(hisi_sec->qm.io_base + - SEC_CORE_ECC_INFO); - dev_err(dev, - "hisi-sec multi ecc sram num=0x%x\n", - SEC_ECC_NUM(err_val)); - dev_err(dev, - "hisi-sec multi ecc sram addr=0x%x\n", - SEC_ECC_ADDR(err_val)); - } - } - err++; - } -} - -static pci_ers_result_t hisi_sec_hw_error_handle(struct hisi_sec *hisi_sec) -{ - u32 err_sts; - - /* read err sts */ - err_sts = readl(hisi_sec->qm.io_base + SEC_CORE_INT_STATUS); - - if (err_sts) { - hisi_sec_log_hw_error(hisi_sec, err_sts); - /* clear error interrupts */ - writel(err_sts, hisi_sec->qm.io_base + SEC_CORE_INT_SOURCE); - - return PCI_ERS_RESULT_NEED_RESET; - } - - return PCI_ERS_RESULT_RECOVERED; -} - -static pci_ers_result_t hisi_sec_process_hw_error(struct pci_dev *pdev) +static void hisi_sec_shutdown(struct pci_dev *pdev) { struct hisi_sec *hisi_sec = pci_get_drvdata(pdev); - struct device *dev = &pdev->dev; - pci_ers_result_t qm_ret, sec_ret; - - if (!hisi_sec) { - dev_err(dev, - "Can't recover error occurred during device init\n"); - return PCI_ERS_RESULT_NONE; - }
- /* log qm error */ - qm_ret = hisi_qm_hw_error_handle(&hisi_sec->qm); - - /* log sec error */ - sec_ret = hisi_sec_hw_error_handle(hisi_sec); - - return (qm_ret == PCI_ERS_RESULT_NEED_RESET || - sec_ret == PCI_ERS_RESULT_NEED_RESET) ? - PCI_ERS_RESULT_NEED_RESET : PCI_ERS_RESULT_RECOVERED; + hisi_qm_stop(&hisi_sec->qm, QM_NORMAL); }
static pci_ers_result_t hisi_sec_error_detected(struct pci_dev *pdev, @@ -1290,7 +1299,7 @@ static pci_ers_result_t hisi_sec_error_detected(struct pci_dev *pdev, if (state == pci_channel_io_perm_failure) return PCI_ERS_RESULT_DISCONNECT;
- return hisi_sec_process_hw_error(pdev); + return hisi_qm_process_dev_error(pdev); }
static int hisi_sec_reset_prepare_ready(struct hisi_sec *hisi_sec) @@ -1399,6 +1408,9 @@ static int hisi_sec_soft_reset(struct hisi_sec *hisi_sec) return ret; }
+ /* Set qm ecc if dev ecc happened to hold on ooo */ + hisi_qm_set_ecc(qm); + /* OOO register set and check */ writel(SEC_MASTER_GLOBAL_CTRL_SHUTDOWN, hisi_sec->qm.io_base + SEC_MASTER_GLOBAL_CTRL); @@ -1492,7 +1504,7 @@ static int hisi_sec_controller_reset_done(struct hisi_sec *hisi_sec) }
hisi_sec_set_user_domain_and_cache(hisi_sec); - hisi_sec_hw_error_init(hisi_sec); + hisi_qm_restart_prepare(qm);
ret = hisi_qm_restart(qm); if (ret) { @@ -1514,6 +1526,9 @@ static int hisi_sec_controller_reset_done(struct hisi_sec *hisi_sec) return -EPERM; }
+ hisi_qm_restart_done(qm); + hisi_sec_hw_error_init(hisi_sec); + return 0; }
@@ -1713,12 +1728,13 @@ static const struct pci_error_handlers hisi_sec_err_handler = { };
static struct pci_driver hisi_sec_pci_driver = { - .name = "hisi_sec", + .name = "hisi_sec2", .id_table = hisi_sec_dev_ids, .probe = hisi_sec_probe, .remove = hisi_sec_remove, .sriov_configure = hisi_sec_sriov_configure, .err_handler = &hisi_sec_err_handler, + .shutdown = hisi_sec_shutdown, };
static void hisi_sec_register_debugfs(void) @@ -1726,7 +1742,7 @@ static void hisi_sec_register_debugfs(void) if (!debugfs_initialized()) return;
- sec_debugfs_root = debugfs_create_dir("hisi_sec", NULL); + sec_debugfs_root = debugfs_create_dir("hisi_sec2", NULL); if (IS_ERR_OR_NULL(sec_debugfs_root)) sec_debugfs_root = NULL; } @@ -1740,7 +1756,7 @@ static int __init hisi_sec_init(void) { int ret;
- sec_wq = alloc_workqueue("hisi_sec", WQ_HIGHPRI | WQ_CPU_INTENSIVE | + sec_wq = alloc_workqueue("hisi_sec2", WQ_HIGHPRI | WQ_CPU_INTENSIVE | WQ_MEM_RECLAIM | WQ_UNBOUND, num_online_cpus());
if (!sec_wq) { diff --git a/drivers/crypto/hisilicon/sec2/sec_usr_if.h b/drivers/crypto/hisilicon/sec2/sec_usr_if.h index 8147d3016c8a..7c76e19f271a 100644 --- a/drivers/crypto/hisilicon/sec2/sec_usr_if.h +++ b/drivers/crypto/hisilicon/sec2/sec_usr_if.h @@ -1,13 +1,5 @@ /* SPDX-License-Identifier: GPL-2.0+ */ -/* - * Copyright (c) 2018-2019 HiSilicon Limited. - * - * This program is free software; you can redistribute it and/or modify - * it under the terms of the GNU General Public License as published by - * the Free Software Foundation; either version 2 of the License, or - * (at your option) any later version. - * - */ +/* Copyright (c) 2018-2019 HiSilicon Limited. */
#ifndef HISI_SEC_USR_IF_H #define HISI_SEC_USR_IF_H diff --git a/drivers/crypto/hisilicon/sgl.c b/drivers/crypto/hisilicon/sgl.c index 85c2bc36d826..523dab7cb4c1 100644 --- a/drivers/crypto/hisilicon/sgl.c +++ b/drivers/crypto/hisilicon/sgl.c @@ -56,7 +56,11 @@ struct hisi_acc_sgl_pool { struct hisi_acc_sgl_pool *hisi_acc_create_sgl_pool(struct device *dev, u32 count, u32 sge_nr) { - u32 sgl_size, block_size, sgl_num_per_block, block_num, remain_sgl; + u32 sgl_size; + u32 block_size; + u32 sgl_num_per_block; + u32 block_num; + u32 remain_sgl; struct hisi_acc_sgl_pool *pool; struct mem_block *block; u32 i, j; diff --git a/drivers/crypto/hisilicon/zip/zip_crypto.c b/drivers/crypto/hisilicon/zip/zip_crypto.c index 2b8dba73f5f0..eec79ae92185 100644 --- a/drivers/crypto/hisilicon/zip/zip_crypto.c +++ b/drivers/crypto/hisilicon/zip/zip_crypto.c @@ -5,6 +5,9 @@ #include <linux/dma-mapping.h> #include <linux/scatterlist.h> #include "zip.h" +#ifndef CONFIG_SG_SPLIT +#include <../lib/sg_split.c> +#endif
/* hisi_zip_sqe dw3 */ #define HZIP_BD_STATUS_M GENMASK(7, 0) @@ -368,7 +371,6 @@ static void hisi_zip_acomp_cb(struct hisi_qp *qp, void *data) int err = 0;
status = sqe->dw3 & HZIP_BD_STATUS_M; - if (status != 0 && status != HZIP_NC_ERR) { dev_err(dev, "%scompress fail in qp%u: %u, output: %u\n", (qp->alg_type == 0) ? "" : "de", qp->qp_id, status, diff --git a/drivers/crypto/hisilicon/zip/zip_main.c b/drivers/crypto/hisilicon/zip/zip_main.c index 510e5874e122..5b4a5c110107 100644 --- a/drivers/crypto/hisilicon/zip/zip_main.c +++ b/drivers/crypto/hisilicon/zip/zip_main.c @@ -19,6 +19,9 @@ #define HZIP_QUEUE_NUM_V1 4096 #define HZIP_QUEUE_NUM_V2 1024
+#define PCI_DEVICE_ID_ZIP_PF 0xa250 +#define PCI_DEVICE_ID_ZIP_VF 0xa251 + #define HZIP_CLOCK_GATE_CTRL 0x301004 #define COMP0_ENABLE BIT(0) #define COMP1_ENABLE BIT(1) @@ -64,6 +67,7 @@
#define HZIP_CORE_INT_SOURCE 0x3010A0 #define HZIP_CORE_INT_MASK 0x3010A4 +#define HZIP_CORE_INT_SET 0x3010A8 #define HZIP_HW_ERROR_IRQ_ENABLE 1 #define HZIP_HW_ERROR_IRQ_DISABLE 0 #define HZIP_CORE_INT_STATUS 0x3010AC @@ -71,6 +75,7 @@ #define HZIP_CORE_SRAM_ECC_ERR_INFO 0x301148 #define HZIP_CORE_INT_RAS_CE_ENB 0x301160 #define HZIP_CORE_INT_RAS_NFE_ENB 0x301164 +#define HZIP_RAS_NFE_MBIT_DISABLE ~HZIP_CORE_INT_STATUS_M_ECC #define HZIP_CORE_INT_RAS_FE_ENB 0x301168 #define HZIP_CORE_INT_RAS_NFE_ENABLE 0x7FE #define HZIP_SRAM_ECC_ERR_NUM_SHIFT 16 @@ -90,6 +95,7 @@ #define HZIP_SOFT_CTRL_ZIP_CONTROL 0x30100C #define HZIP_AXI_SHUTDOWN_ENABLE BIT(14) #define HZIP_AXI_SHUTDOWN_DISABLE 0xFFFFBFFF +#define HZIP_WR_MSI_PORT 0xF7FF
#define HZIP_ENABLE 1 #define HZIP_DISABLE 0 @@ -101,6 +107,9 @@ #define HZIP_RESET_WAIT_TIMEOUT 400 #define HZIP_PCI_COMMAND_INVALID 0xFFFFFFFF
+#define FROZEN_RANGE_MIN 10 +#define FROZEN_RANGE_MAX 20 + static const char hisi_zip_name[] = "hisi_zip"; static struct dentry *hzip_debugfs_root; static LIST_HEAD(hisi_zip_list); @@ -638,13 +647,16 @@ static int hisi_zip_core_debug_init(struct hisi_zip_ctrl *ctrl) struct debugfs_regset32 *regset; struct dentry *tmp_d, *tmp; char buf[HZIP_BUF_SIZE]; - int i; + int i, ret;
for (i = 0; i < HZIP_CORE_NUM; i++) { if (i < HZIP_COMP_CORE_NUM) - sprintf(buf, "comp_core%d", i); + ret = snprintf(buf, HZIP_BUF_SIZE, "comp_core%d", i); else - sprintf(buf, "decomp_core%d", i - HZIP_COMP_CORE_NUM); + ret = snprintf(buf, HZIP_BUF_SIZE, + "decomp_core%d", i - HZIP_COMP_CORE_NUM); + if (ret < 0) + return -EINVAL;
tmp_d = debugfs_create_dir(buf, ctrl->debug_root); if (!tmp_d) @@ -736,6 +748,54 @@ static void hisi_zip_hw_error_init(struct hisi_zip *hisi_zip) hisi_zip_hw_error_set_state(hisi_zip, true); }
+static u32 hisi_zip_get_hw_err_status(struct hisi_qm *qm) +{ + return readl(qm->io_base + HZIP_CORE_INT_STATUS); +} + +static void hisi_zip_clear_hw_err_status(struct hisi_qm *qm, u32 err_sts) +{ + writel(err_sts, qm->io_base + HZIP_CORE_INT_SOURCE); +} + +static void hisi_zip_set_ecc(struct hisi_qm *qm) +{ + u32 nfe_enb; + + nfe_enb = readl(qm->io_base + HZIP_CORE_INT_RAS_NFE_ENB); + writel(nfe_enb & HZIP_RAS_NFE_MBIT_DISABLE, + qm->io_base + HZIP_CORE_INT_RAS_NFE_ENB); + writel(HZIP_CORE_INT_STATUS_M_ECC, qm->io_base + HZIP_CORE_INT_SET); + qm->err_ini.is_dev_ecc_mbit = 1; +} + +static void hisi_zip_log_hw_error(struct hisi_qm *qm, u32 err_sts) +{ + const struct hisi_zip_hw_error *err = zip_hw_error; + struct device *dev = &qm->pdev->dev; + u32 err_val; + + while (err->msg) { + if (err->int_msk & err_sts) { + dev_err(dev, "%s [error status=0x%x] found\n", + err->msg, err->int_msk); + + if (err->int_msk & HZIP_CORE_INT_STATUS_M_ECC) { + err_val = readl(qm->io_base + + HZIP_CORE_SRAM_ECC_ERR_INFO); + dev_err(dev, "hisi-zip multi ecc sram num=0x%x\n", + ((err_val >> + HZIP_SRAM_ECC_ERR_NUM_SHIFT) & + 0xFF)); + dev_err(dev, "hisi-zip multi ecc sram addr=0x%x\n", + (err_val >> + HZIP_SRAM_ECC_ERR_ADDR_SHIFT)); + } + } + err++; + } +} + static int hisi_zip_pf_probe_init(struct hisi_zip *hisi_zip) { struct hisi_qm *qm = &hisi_zip->qm; @@ -761,6 +821,13 @@ static int hisi_zip_pf_probe_init(struct hisi_zip *hisi_zip) return -EINVAL; }
+ qm->err_ini.qm_wr_port = HZIP_WR_MSI_PORT; + qm->err_ini.ecc_2bits_mask = HZIP_CORE_INT_STATUS_M_ECC; + qm->err_ini.get_dev_hw_err_status = hisi_zip_get_hw_err_status; + qm->err_ini.clear_dev_hw_err_status = hisi_zip_clear_hw_err_status; + qm->err_ini.log_dev_hw_err = hisi_zip_log_hw_error; + qm->err_ini.inject_dev_hw_err = hisi_zip_set_ecc; + hisi_zip_set_user_domain_and_cache(hisi_zip); hisi_zip_hw_error_init(hisi_zip); hisi_zip_debug_regs_clear(hisi_zip); @@ -796,7 +863,7 @@ static int hisi_zip_probe(struct pci_dev *pdev, const struct pci_device_id *id) qm->dev_name = hisi_zip_name; qm->fun_type = (pdev->device == PCI_DEVICE_ID_ZIP_PF) ? QM_HW_PF : QM_HW_VF; - qm->algs = "zlib\ngzip\n"; + qm->algs = "zlib\ngzip\nxts(sm4)\nxts(aes)\n";
switch (uacce_mode) { case UACCE_MODE_NOUACCE: @@ -939,7 +1006,6 @@ static int hisi_zip_sriov_enable(struct pci_dev *pdev, int max_vfs) int pre_existing_vfs, num_vfs, ret;
pre_existing_vfs = pci_num_vf(pdev); - if (pre_existing_vfs) { dev_err(&pdev->dev, "Can't enable VF. Please disable pre-enabled VFs!\n"); @@ -969,6 +1035,31 @@ static int hisi_zip_sriov_enable(struct pci_dev *pdev, int max_vfs) #endif }
+static int hisi_zip_try_frozen_vfs(struct pci_dev *pdev) +{ + struct hisi_zip *zip, *vf_zip; + struct pci_dev *dev; + int ret = 0; + + /* Try to frozen all the VFs as disable SRIOV */ + mutex_lock(&hisi_zip_list_lock); + list_for_each_entry(zip, &hisi_zip_list, list) { + dev = zip->qm.pdev; + if (dev == pdev) + continue; + if (pci_physfn(dev) == pdev) { + vf_zip = pci_get_drvdata(dev); + ret = hisi_qm_frozen(&vf_zip->qm); + if (ret) + goto frozen_fail; + } + } + +frozen_fail: + mutex_unlock(&hisi_zip_list_lock); + return ret; +} + static int hisi_zip_sriov_disable(struct pci_dev *pdev) { struct hisi_zip *hisi_zip = pci_get_drvdata(pdev); @@ -979,6 +1070,11 @@ static int hisi_zip_sriov_disable(struct pci_dev *pdev) return -EPERM; }
+ if (hisi_zip_try_frozen_vfs(pdev)) { + dev_err(&pdev->dev, "try frozen VFs failed!\n"); + return -EBUSY; + } + /* remove in hisi_zip_pci_driver will be called to free VF resources */ pci_disable_sriov(pdev);
@@ -995,8 +1091,12 @@ static int hisi_zip_sriov_configure(struct pci_dev *pdev, int num_vfs)
static void hisi_zip_remove_wait_delay(struct hisi_zip *hisi_zip) { - while (hisi_qm_frozen(&hisi_zip->qm)) - ; + struct hisi_qm *qm = &hisi_zip->qm; + + while (hisi_qm_frozen(qm) || ((qm->fun_type == QM_HW_PF) && + hisi_zip_try_frozen_vfs(qm->pdev))) + usleep_range(FROZEN_RANGE_MIN, FROZEN_RANGE_MAX); + udelay(ZIP_WAIT_DELAY); }
@@ -1009,7 +1109,7 @@ static void hisi_zip_remove(struct pci_dev *pdev) hisi_zip_remove_wait_delay(hisi_zip);
if (qm->fun_type == QM_HW_PF && hisi_zip->ctrl->num_vfs != 0) - hisi_zip_sriov_disable(pdev); + (void)hisi_zip_sriov_disable(pdev);
#ifndef CONFIG_IOMMU_SVA if (uacce_mode != UACCE_MODE_UACCE) @@ -1028,70 +1128,11 @@ static void hisi_zip_remove(struct pci_dev *pdev) hisi_zip_remove_from_list(hisi_zip); }
-static void hisi_zip_log_hw_error(struct hisi_zip *hisi_zip, u32 err_sts) -{ - const struct hisi_zip_hw_error *err = zip_hw_error; - struct device *dev = &hisi_zip->qm.pdev->dev; - u32 err_val; - - while (err->msg) { - if (err->int_msk & err_sts) { - dev_err(dev, "%s [error status=0x%x] found\n", - err->msg, err->int_msk); - - if (err->int_msk & HZIP_CORE_INT_STATUS_M_ECC) { - err_val = readl(hisi_zip->qm.io_base + - HZIP_CORE_SRAM_ECC_ERR_INFO); - dev_err(dev, "hisi-zip multi ecc sram num=0x%x\n", - ((err_val >> - HZIP_SRAM_ECC_ERR_NUM_SHIFT) & - 0xFF)); - dev_err(dev, "hisi-zip multi ecc sram addr=0x%x\n", - (err_val >> - HZIP_SRAM_ECC_ERR_ADDR_SHIFT)); - } - } - err++; - } -} - -static pci_ers_result_t hisi_zip_hw_error_handle(struct hisi_zip *hisi_zip) -{ - u32 err_sts; - - /* read err sts */ - err_sts = readl(hisi_zip->qm.io_base + HZIP_CORE_INT_STATUS); - - if (err_sts) { - hisi_zip_log_hw_error(hisi_zip, err_sts); - /* clear error interrupts */ - writel(err_sts, hisi_zip->qm.io_base + HZIP_CORE_INT_SOURCE); - - return PCI_ERS_RESULT_NEED_RESET; - } - - return PCI_ERS_RESULT_RECOVERED; -} - -static pci_ers_result_t hisi_zip_process_hw_error(struct pci_dev *pdev) +static void hisi_zip_shutdown(struct pci_dev *pdev) { struct hisi_zip *hisi_zip = pci_get_drvdata(pdev); - struct device *dev = &pdev->dev; - pci_ers_result_t qm_ret, zip_ret; - - if (!hisi_zip) { - dev_err(dev, - "Can't recover ZIP-error occurred during device init\n"); - return PCI_ERS_RESULT_NONE; - }
- qm_ret = hisi_qm_hw_error_handle(&hisi_zip->qm); - - zip_ret = hisi_zip_hw_error_handle(hisi_zip); - - return (qm_ret == PCI_ERS_RESULT_NEED_RESET || - zip_ret == PCI_ERS_RESULT_NEED_RESET) ? - PCI_ERS_RESULT_NEED_RESET : PCI_ERS_RESULT_RECOVERED; + hisi_qm_stop(&hisi_zip->qm, QM_NORMAL); }
static pci_ers_result_t hisi_zip_error_detected(struct pci_dev *pdev, @@ -1104,7 +1145,7 @@ static pci_ers_result_t hisi_zip_error_detected(struct pci_dev *pdev, if (state == pci_channel_io_perm_failure) return PCI_ERS_RESULT_DISCONNECT;
- return hisi_zip_process_hw_error(pdev); + return hisi_qm_process_dev_error(pdev); }
static int hisi_zip_reset_prepare_ready(struct hisi_zip *hisi_zip) @@ -1213,6 +1254,9 @@ static int hisi_zip_soft_reset(struct hisi_zip *hisi_zip) return ret; }
+ /* Set qm ecc if dev ecc happened to hold on ooo */ + hisi_qm_set_ecc(qm); + /* OOO register set and check */ writel(HZIP_MASTER_GLOBAL_CTRL_SHUTDOWN, hisi_zip->qm.io_base + HZIP_MASTER_GLOBAL_CTRL); @@ -1309,7 +1353,7 @@ static int hisi_zip_controller_reset_done(struct hisi_zip *hisi_zip) }
hisi_zip_set_user_domain_and_cache(hisi_zip); - hisi_zip_hw_error_init(hisi_zip); + hisi_qm_restart_prepare(qm);
ret = hisi_qm_restart(qm); if (ret) { @@ -1331,6 +1375,9 @@ static int hisi_zip_controller_reset_done(struct hisi_zip *hisi_zip) return -EPERM; }
+ hisi_qm_restart_done(qm); + hisi_zip_hw_error_init(hisi_zip); + return 0; }
@@ -1531,6 +1578,7 @@ static struct pci_driver hisi_zip_pci_driver = { .remove = hisi_zip_remove, .sriov_configure = hisi_zip_sriov_configure, .err_handler = &hisi_zip_err_handler, + .shutdown = hisi_zip_shutdown, };
static void hisi_zip_register_debugfs(void)