[OLK-6.6 00/10] crypto: hisilicon - some bugfix

From: JiangShui Yang <yangjiangshui@h-partners.com> Weili Qian (8): crypto: hisilicon/sec2: fix memory use-after-free issue crypto: hisilicon/qm - invalidate queues in use crypto: hisilicon/qm - mask axi error before memory init crypto: hisilicon/qm - modify interrupt processing resource application crypto: hisilicon/qm - fix the pf2vf timeout when global reset crypto: hisilicon/qm - obtain the mailbox configuration at one time hisi_acc_vfio_pci: obtain the mailbox configuration at one time crypto: hisilicon/qm - disable error report before flr Wenkai Lin (1): crypto: hisilicon/sec2 - fix for gcm spec check Zhiqi Song (1): crypto: hisilicon/hpre - fix dma unmap sequence drivers/crypto/hisilicon/hpre/hpre_crypto.c | 8 +- drivers/crypto/hisilicon/hpre/hpre_main.c | 103 ++-- drivers/crypto/hisilicon/qm.c | 561 ++++++++++++------ drivers/crypto/hisilicon/sec2/sec_crypto.c | 53 +- drivers/crypto/hisilicon/sec2/sec_main.c | 92 ++- drivers/crypto/hisilicon/zip/zip_main.c | 104 ++-- .../vfio/pci/hisilicon/hisi_acc_vfio_pci.c | 45 +- include/linux/hisi_acc_qm.h | 33 +- 8 files changed, 604 insertions(+), 395 deletions(-) -- 2.43.0

From: Zhiqi Song <songzhiqi1@huawei.com> driver inclusion category: bugfix bugzilla: https://gitee.com/openeuler/kernel/issues/IC2NHC CVE: NA ---------------------------------------------------------------------- Perform the DMA unmap operation before processing data. Fixes: 90274769cf79 ("crypto: hisilicon/hpre - add 'CURVE25519' algorithm") Signed-off-by: Zhiqi Song <songzhiqi1@huawei.com> Signed-off-by: JiangShui Yang <yangjiangshui@h-partners.com> --- drivers/crypto/hisilicon/hpre/hpre_crypto.c | 8 ++++++-- 1 file changed, 6 insertions(+), 2 deletions(-) diff --git a/drivers/crypto/hisilicon/hpre/hpre_crypto.c b/drivers/crypto/hisilicon/hpre/hpre_crypto.c index 11670cda6eb5..96b89e78db2f 100644 --- a/drivers/crypto/hisilicon/hpre/hpre_crypto.c +++ b/drivers/crypto/hisilicon/hpre/hpre_crypto.c @@ -1491,11 +1491,13 @@ static void hpre_ecdh_cb(struct hpre_ctx *ctx, void *resp) if (overtime_thrhld && hpre_is_bd_timeout(req, overtime_thrhld)) atomic64_inc(&dfx[HPRE_OVER_THRHLD_CNT].value); + /* Do unmap before data processing */ + hpre_ecdh_hw_data_clr_all(ctx, req, areq->dst, areq->src); + p = sg_virt(areq->dst); memmove(p, p + ctx->key_sz - curve_sz, curve_sz); memmove(p + curve_sz, p + areq->dst_len - curve_sz, curve_sz); - hpre_ecdh_hw_data_clr_all(ctx, req, areq->dst, areq->src); kpp_request_complete(areq, ret); atomic64_inc(&dfx[HPRE_RECV_CNT].value); @@ -1808,9 +1810,11 @@ static void hpre_curve25519_cb(struct hpre_ctx *ctx, void *resp) if (overtime_thrhld && hpre_is_bd_timeout(req, overtime_thrhld)) atomic64_inc(&dfx[HPRE_OVER_THRHLD_CNT].value); + /* Do unmap before data processing */ + hpre_curve25519_hw_data_clr_all(ctx, req, areq->dst, areq->src); + hpre_key_to_big_end(sg_virt(areq->dst), CURVE25519_KEY_SIZE); - hpre_curve25519_hw_data_clr_all(ctx, req, areq->dst, areq->src); kpp_request_complete(areq, ret); atomic64_inc(&dfx[HPRE_RECV_CNT].value); -- 2.43.0

From: Wenkai Lin <linwenkai6@hisilicon.com> driver inclusion category: bugfix bugzilla: https://gitee.com/openeuler/kernel/issues/IC2NHC CVE: NA ---------------------------------------------------------------------- For the GCM mode, the hardware does not support authsize less than 8 bytes, therefore, it will switch to the software computing mode. Fixes: c16a70c1f253 ("crypto: hisilicon/sec - add new algorithm mode for AEAD") Signed-off-by: Wenkai Lin <linwenkai6@hisilicon.com> Signed-off-by: JiangShui Yang <yangjiangshui@h-partners.com> --- drivers/crypto/hisilicon/sec2/sec_crypto.c | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/drivers/crypto/hisilicon/sec2/sec_crypto.c b/drivers/crypto/hisilicon/sec2/sec_crypto.c index c8fe1eb342b6..11c6e1d9887f 100644 --- a/drivers/crypto/hisilicon/sec2/sec_crypto.c +++ b/drivers/crypto/hisilicon/sec2/sec_crypto.c @@ -102,6 +102,7 @@ #define IV_LAST_BYTE_MASK 0xFF #define IV_CTR_INIT 0x1 #define IV_BYTE_OFFSET 0x8 +#define SEC_GCM_MIN_AUTH_SZ 0x8 static DEFINE_MUTEX(sec_algs_lock); static unsigned int sec_available_devs; @@ -2236,6 +2237,9 @@ static int sec_aead_spec_check(struct sec_ctx *ctx, struct sec_req *sreq) return -EINVAL; if (unlikely(ctx->a_ctx.a_key_len & WORD_MASK)) return -EINVAL; + } else if (c_mode == SEC_CMODE_GCM) { + if (unlikely(sz < SEC_GCM_MIN_AUTH_SZ)) + return -EINVAL; } return 0; -- 2.43.0

driver inclusion category: bugfix bugzilla: https://gitee.com/openeuler/kernel/issues/IC2NHC CVE: NA ---------------------------------------------------------------------- When the number of packets being processed in the hardware queue is greater than 512, the SEC driver continues to send the packet to the hardware, but adds the packet to the backlog list. Then, the SEC driver returns -EBUSY to the caller, and the caller stops sending packets. When the number of packets in the queue queried in the callback is less than 512, The packet sending thread is woken up. When the number of send packet threads is greater than 512, packages in the backlog may be complete but the packet is not deleted from list. The released memory is accessed during the deletion, causing a system panic. Therefore, delete the backlog, determine whether the packet sending thread needs to be woken up based on 'fake_busy' in the sec_req, and then invoke the callback function of the user to ensure that the thread is woken up before releasing the req memory. log likes: [ 169.430697][ T1354] CPU: 27 PID: 1354 Comm: kworker/u262:1 Kdump: loaded Not tainted 5.10.0+ #1 [ 169.439678][ T1354] Hardware name: Huawei TaiShan 200 (Model 2280)/BC82AMDD, BIOS 2280-V2 CS V5.B211.01 11/10/2021 [ 169.450421][ T1354] Workqueue: 0000:76:00.0 qm_work_process [hisi_qm] [ 169.457050][ T1354] Call trace: [ 169.460285][ T1354] dump_backtrace+0x0/0x300 [ 169.464780][ T1354] show_stack+0x20/0x30 [ 169.468936][ T1354] dump_stack+0x104/0x17c [ 169.473240][ T1354] print_address_description.constprop.0+0x68/0x204 [ 169.479889][ T1354] __kasan_report+0xe0/0x140 [ 169.484463][ T1354] kasan_report+0x44/0xe0 [ 169.488771][ T1354] __asan_load8+0x94/0xd0 [ 169.493088][ T1354] __list_del_entry_valid+0x20/0x180 [ 169.498408][ T1354] sec_back_req_clear+0x184/0x2dc [hisi_sec2] [ 169.504494][ T1354] sec_skcipher_callback+0x84/0x150 [hisi_sec2] [ 169.510800][ T1354] sec_req_cb+0x1d4/0x334 [hisi_sec2] [ 169.516227][ T1354] qm_poll_req_cb+0x170/0x20c [hisi_qm] [ 169.524821][ T1354] qm_work_process+0xf8/0x124 [hisi_qm] [ 169.533436][ T1354] process_one_work+0x3a8/0x860 [ 169.541063][ T1354] worker_thread+0x280/0x670 [ 169.548349][ T1354] kthread+0x18c/0x1d0 [ 169.555169][ T1354] ret_from_fork+0x10/0x18 [ 169.562107][ T1354] Fixes: c16a70c1f253 ("crypto: hisilicon/sec - add new algorithm mode for AEAD") Signed-off-by: Weili Qian <qianweili@huawei.com> Signed-off-by: JiangShui Yang <yangjiangshui@h-partners.com> --- drivers/crypto/hisilicon/sec2/sec_crypto.c | 49 ++++------------------ 1 file changed, 8 insertions(+), 41 deletions(-) diff --git a/drivers/crypto/hisilicon/sec2/sec_crypto.c b/drivers/crypto/hisilicon/sec2/sec_crypto.c index 11c6e1d9887f..0428c50bea31 100644 --- a/drivers/crypto/hisilicon/sec2/sec_crypto.c +++ b/drivers/crypto/hisilicon/sec2/sec_crypto.c @@ -288,10 +288,10 @@ static int sec_bd_send(struct sec_ctx *ctx, struct sec_req *req) ret = hisi_qp_send(qp_ctx->qp, &req->sec_sqe); if (ctx->fake_req_limit <= atomic_read(&qp_ctx->qp->qp_status.used) && !ret) { - list_add_tail(&req->backlog_head, &qp_ctx->backlog); + req->fake_busy = true; + spin_unlock_bh(&qp_ctx->req_lock); atomic64_inc(&ctx->sec->debug.dfx.send_cnt); atomic64_inc(&ctx->sec->debug.dfx.send_busy_cnt); - spin_unlock_bh(&qp_ctx->req_lock); return -EBUSY; } spin_unlock_bh(&qp_ctx->req_lock); @@ -559,7 +559,6 @@ static int sec_create_qp_ctx(struct sec_ctx *ctx, int qp_ctx_id) spin_lock_init(&qp_ctx->req_lock); idr_init(&qp_ctx->req_idr); - INIT_LIST_HEAD(&qp_ctx->backlog); ret = sec_alloc_qp_ctx_resource(ctx, qp_ctx); if (ret) @@ -1394,31 +1393,10 @@ static void sec_update_iv(struct sec_req *req, enum sec_alg_type alg_type) } } -static struct sec_req *sec_back_req_clear(struct sec_ctx *ctx, - struct sec_qp_ctx *qp_ctx) -{ - struct sec_req *backlog_req = NULL; - - spin_lock_bh(&qp_ctx->req_lock); - if (ctx->fake_req_limit >= - atomic_read(&qp_ctx->qp->qp_status.used) && - !list_empty(&qp_ctx->backlog)) { - backlog_req = list_first_entry(&qp_ctx->backlog, - typeof(*backlog_req), backlog_head); - list_del(&backlog_req->backlog_head); - } - spin_unlock_bh(&qp_ctx->req_lock); - - return backlog_req; -} - static void sec_skcipher_callback(struct sec_ctx *ctx, struct sec_req *req, int err) { struct skcipher_request *sk_req = req->c_req.sk_req; - struct sec_qp_ctx *qp_ctx = req->qp_ctx; - struct skcipher_request *backlog_sk_req; - struct sec_req *backlog_req; sec_free_req_id(req); @@ -1427,13 +1405,8 @@ static void sec_skcipher_callback(struct sec_ctx *ctx, struct sec_req *req, ctx->c_ctx.c_mode == SEC_CMODE_CTR) && req->c_req.encrypt) sec_update_iv(req, SEC_SKCIPHER); - while (1) { - backlog_req = sec_back_req_clear(ctx, qp_ctx); - if (!backlog_req) - break; - - backlog_sk_req = backlog_req->c_req.sk_req; - skcipher_request_complete(backlog_sk_req, -EINPROGRESS); + if (req->fake_busy) { + skcipher_request_complete(sk_req, -EINPROGRESS); atomic64_inc(&ctx->sec->debug.dfx.recv_busy_cnt); } @@ -1678,9 +1651,6 @@ static void sec_aead_callback(struct sec_ctx *c, struct sec_req *req, int err) size_t authsize = crypto_aead_authsize(tfm); struct sec_aead_req *aead_req = &req->aead_req; struct sec_cipher_req *c_req = &req->c_req; - struct sec_qp_ctx *qp_ctx = req->qp_ctx; - struct aead_request *backlog_aead_req; - struct sec_req *backlog_req; size_t sz; if (!err && c->c_ctx.c_mode == SEC_CMODE_CBC && c_req->encrypt) @@ -1700,13 +1670,8 @@ static void sec_aead_callback(struct sec_ctx *c, struct sec_req *req, int err) sec_free_req_id(req); - while (1) { - backlog_req = sec_back_req_clear(c, qp_ctx); - if (!backlog_req) - break; - - backlog_aead_req = backlog_req->aead_req.aead_req; - aead_request_complete(backlog_aead_req, -EINPROGRESS); + if (req->fake_busy) { + aead_request_complete(a_req, -EINPROGRESS); atomic64_inc(&c->sec->debug.dfx.recv_busy_cnt); } @@ -2103,6 +2068,7 @@ static int sec_skcipher_crypto(struct skcipher_request *sk_req, bool encrypt) req->c_req.sk_req = sk_req; req->c_req.encrypt = encrypt; req->ctx = ctx; + req->fake_busy = false; ret = sec_skcipher_param_check(ctx, req, &need_fallback); if (unlikely(ret)) @@ -2323,6 +2289,7 @@ static int sec_aead_crypto(struct aead_request *a_req, bool encrypt) req->aead_req.aead_req = a_req; req->c_req.encrypt = encrypt; req->ctx = ctx; + req->fake_busy = false; req->c_req.c_len = a_req->cryptlen - (req->c_req.encrypt ? 0 : sz); ret = sec_aead_param_check(ctx, req, &need_fallback); -- 2.43.0

driver inclusion category: bugfix bugzilla: https://gitee.com/openeuler/kernel/issues/IC2NHC CVE: NA ---------------------------------------------------------------------- Before the reset, although the queue status of the driver is set to prevent the task thread send doorbell, the task process maybe still send doorbells because the reset thread is isolated from the user-mode task process. If the user process still sends the doorbell after the reset, the device may access an invalid address when processing the doorbell since queue is not started. As a result, the SMMU reports a page fault interrupt. Therefore, the queue is invalidated before the reset, and the doorbell sended by the process is discarded by the device. Fixes: 6c6dd5802c2d ("crypto: hisilicon/qm - add controller reset interface") Signed-off-by: Weili Qian <qianweili@huawei.com> Signed-off-by: JiangShui Yang <yangjiangshui@h-partners.com> --- drivers/crypto/hisilicon/qm.c | 45 +++++++++++++++++++++++++---------- 1 file changed, 33 insertions(+), 12 deletions(-) diff --git a/drivers/crypto/hisilicon/qm.c b/drivers/crypto/hisilicon/qm.c index cb00a14b9843..0c70cf179a4c 100644 --- a/drivers/crypto/hisilicon/qm.c +++ b/drivers/crypto/hisilicon/qm.c @@ -46,6 +46,8 @@ #define QM_SQ_TYPE_MASK GENMASK(3, 0) #define QM_SQ_TAIL_IDX(sqc) ((le16_to_cpu((sqc).w11) >> 6) & 0x1) +#define QM_SQC_DISABLE_QP (1UL << 6) +#define QM_XQC_RANDOM_DATA 0xaaaa /* cqc shift */ #define QM_CQ_HOP_NUM_SHIFT 0 @@ -3379,7 +3381,7 @@ static int qm_restart(struct hisi_qm *qm) for (i = 0; i < qm->qp_num; i++) { qp = &qm->qp_array[i]; if (atomic_read(&qp->qp_status.flags) == QP_STOP && - qp->is_resetting == true) { + qp->is_resetting == true && qp->is_in_kernel == true) { ret = qm_start_qp_nolock(qp, 0); if (ret < 0) { dev_err(dev, "Failed to start qp%d!\n", i); @@ -3411,24 +3413,45 @@ static void qm_stop_started_qp(struct hisi_qm *qm) } /** - * qm_clear_queues() - Clear all queues memory in a qm. - * @qm: The qm in which the queues will be cleared. + * qm_invalid_queues() - invalid all queues in use. + * @qm: The qm in which the queues will be invalidated. * - * This function clears all queues memory in a qm. Reset of accelerator can - * use this to clear queues. + * This function invalid all queues in use. If the doorbell command is sent + * to device in user space after the device is reset, the device discards + * the doorbell command. */ -static void qm_clear_queues(struct hisi_qm *qm) +static void qm_invalid_queues(struct hisi_qm *qm) { struct hisi_qp *qp; + struct qm_sqc *sqc; + struct qm_cqc *cqc; int i; + /* + * Normal stop queues is no longer used and does not need to be + * invalid queues. + */ + if (qm->status.stop_reason == QM_NORMAL) + return; + + if (qm->status.stop_reason == QM_DOWN) + hisi_qm_cache_wb(qm); + + memset(qm->qdma.va, 0, qm->qdma.size); for (i = 0; i < qm->qp_num; i++) { qp = &qm->qp_array[i]; - if (qp->is_in_kernel && qp->is_resetting) + if (!qp->is_resetting) + continue; + + /* Modify random data and set sqc close bit to invalid queue. */ + sqc = qm->sqc + i; + cqc = qm->cqc + i; + sqc->w8 = cpu_to_le16(QM_XQC_RANDOM_DATA); + sqc->w13 = cpu_to_le16(QM_SQC_DISABLE_QP); + cqc->w8 = cpu_to_le16(QM_XQC_RANDOM_DATA); + if (qp->is_in_kernel) memset(qp->qdma.va, 0, qp->qdma.size); } - - memset(qm->qdma.va, 0, qm->qdma.size); } /** @@ -3485,7 +3508,7 @@ int hisi_qm_stop(struct hisi_qm *qm, enum qm_stop_reason r) } } - qm_clear_queues(qm); + qm_invalid_queues(qm); qm->status.stop_reason = QM_NORMAL; err_unlock: @@ -4876,8 +4899,6 @@ void hisi_qm_dev_shutdown(struct pci_dev *pdev) ret = hisi_qm_stop(qm, QM_DOWN); if (ret) dev_err(&pdev->dev, "Fail to stop qm in shutdown!\n"); - - hisi_qm_cache_wb(qm); } EXPORT_SYMBOL_GPL(hisi_qm_dev_shutdown); -- 2.43.0

driver inclusion category: bugfix bugzilla: https://gitee.com/openeuler/kernel/issues/IC2NHC CVE: NA ---------------------------------------------------------------------- After the device memory is cleared, if the software sends the doorbell operation, the hardware may trigger a axi error when processing the doorbell. This error is caused by memory clearing and hardware access to address 0. Therefore, the axi error is masked during this period. Fixes: 6c6dd5802c2d ("crypto: hisilicon/qm - add controller reset interface") Signed-off-by: Weili Qian <qianweili@huawei.com> Signed-off-by: JiangShui Yang <yangjiangshui@h-partners.com> --- drivers/crypto/hisilicon/hpre/hpre_main.c | 100 +++++++++++++-------- drivers/crypto/hisilicon/qm.c | 66 ++++++++++---- drivers/crypto/hisilicon/sec2/sec_main.c | 90 +++++++++++++------ drivers/crypto/hisilicon/zip/zip_main.c | 102 ++++++++++++++-------- include/linux/hisi_acc_qm.h | 21 +++-- 5 files changed, 257 insertions(+), 122 deletions(-) diff --git a/drivers/crypto/hisilicon/hpre/hpre_main.c b/drivers/crypto/hisilicon/hpre/hpre_main.c index 2c1ffd172663..471bdd856be8 100644 --- a/drivers/crypto/hisilicon/hpre/hpre_main.c +++ b/drivers/crypto/hisilicon/hpre/hpre_main.c @@ -38,6 +38,7 @@ #define HPRE_HAC_RAS_NFE_ENB 0x301414 #define HPRE_HAC_RAS_FE_ENB 0x301418 #define HPRE_HAC_INT_SET 0x301500 +#define HPRE_AXI_ERROR_MASK GENMASK(21, 10) #define HPRE_RNG_TIMEOUT_NUM 0x301A34 #define HPRE_CORE_INT_ENABLE 0 #define HPRE_RDCHN_INI_ST 0x301a00 @@ -755,8 +756,7 @@ static void hpre_master_ooo_ctrl(struct hisi_qm *qm, bool enable) val1 = readl(qm->io_base + HPRE_AM_OOO_SHUTDOWN_ENB); if (enable) { val1 |= HPRE_AM_OOO_SHUTDOWN_ENABLE; - val2 = hisi_qm_get_hw_info(qm, hpre_basic_info, - HPRE_OOO_SHUTDOWN_MASK_CAP, qm->cap_ver); + val2 = qm->err_info.dev_err.shutdown_mask; } else { val1 &= ~HPRE_AM_OOO_SHUTDOWN_ENABLE; val2 = 0x0; @@ -770,38 +770,33 @@ static void hpre_master_ooo_ctrl(struct hisi_qm *qm, bool enable) static void hpre_hw_error_disable(struct hisi_qm *qm) { - u32 ce, nfe; - - ce = hisi_qm_get_hw_info(qm, hpre_basic_info, HPRE_CE_MASK_CAP, qm->cap_ver); - nfe = hisi_qm_get_hw_info(qm, hpre_basic_info, HPRE_NFE_MASK_CAP, qm->cap_ver); + struct hisi_qm_err_mask *dev_err = &qm->err_info.dev_err; + u32 err_mask = dev_err->ce | dev_err->nfe | dev_err->fe; /* disable hpre hw error interrupts */ - writel(ce | nfe | HPRE_HAC_RAS_FE_ENABLE, qm->io_base + HPRE_INT_MASK); + writel(err_mask, qm->io_base + HPRE_INT_MASK); /* disable HPRE block master OOO when nfe occurs on Kunpeng930 */ hpre_master_ooo_ctrl(qm, false); } static void hpre_hw_error_enable(struct hisi_qm *qm) { - u32 ce, nfe, err_en; - - ce = hisi_qm_get_hw_info(qm, hpre_basic_info, HPRE_CE_MASK_CAP, qm->cap_ver); - nfe = hisi_qm_get_hw_info(qm, hpre_basic_info, HPRE_NFE_MASK_CAP, qm->cap_ver); + struct hisi_qm_err_mask *dev_err = &qm->err_info.dev_err; + u32 err_mask = dev_err->ce | dev_err->nfe | dev_err->fe; /* clear HPRE hw error source if having */ - writel(ce | nfe | HPRE_HAC_RAS_FE_ENABLE, qm->io_base + HPRE_HAC_SOURCE_INT); + writel(err_mask, qm->io_base + HPRE_HAC_SOURCE_INT); /* configure error type */ - writel(ce, qm->io_base + HPRE_RAS_CE_ENB); - writel(nfe, qm->io_base + HPRE_RAS_NFE_ENB); - writel(HPRE_HAC_RAS_FE_ENABLE, qm->io_base + HPRE_RAS_FE_ENB); + writel(dev_err->ce, qm->io_base + HPRE_RAS_CE_ENB); + writel(dev_err->nfe, qm->io_base + HPRE_RAS_NFE_ENB); + writel(dev_err->fe, qm->io_base + HPRE_RAS_FE_ENB); /* enable HPRE block master OOO when nfe occurs on Kunpeng930 */ hpre_master_ooo_ctrl(qm, true); /* enable hpre hw error interrupts */ - err_en = ce | nfe | HPRE_HAC_RAS_FE_ENABLE; - writel(~err_en, qm->io_base + HPRE_INT_MASK); + writel(~err_mask, qm->io_base + HPRE_INT_MASK); } static inline struct hisi_qm *hpre_file_to_qm(struct hpre_debugfs_file *file) @@ -1356,9 +1351,8 @@ static void hpre_clear_hw_err_status(struct hisi_qm *qm, u32 err_sts) static void hpre_disable_error_report(struct hisi_qm *qm, u32 err_type) { - u32 nfe_mask; + u32 nfe_mask = qm->err_info.dev_err.nfe; - nfe_mask = hisi_qm_get_hw_info(qm, hpre_basic_info, HPRE_NFE_MASK_CAP, qm->cap_ver); writel(nfe_mask & (~err_type), qm->io_base + HPRE_RAS_NFE_ENB); } @@ -1379,11 +1373,11 @@ static enum acc_err_result hpre_get_err_result(struct hisi_qm *qm) err_status = hpre_get_hw_err_status(qm); if (err_status) { - if (err_status & qm->err_info.ecc_2bits_mask) + if (err_status & qm->err_info.dev_err.ecc_2bits_mask) qm->err_status.is_dev_ecc_mbit = true; hpre_log_hw_error(qm, err_status); - if (err_status & qm->err_info.dev_reset_mask) { + if (err_status & qm->err_info.dev_err.reset_mask) { /* Disable the same error reporting until device is recovered. */ hpre_disable_error_report(qm, err_status); return ACC_ERR_NEED_RESET; @@ -1399,28 +1393,64 @@ static bool hpre_dev_is_abnormal(struct hisi_qm *qm) u32 err_status; err_status = hpre_get_hw_err_status(qm); - if (err_status & qm->err_info.dev_shutdown_mask) + if (err_status & qm->err_info.dev_err.shutdown_mask) return true; return false; } +static void hpre_disable_axi_error(struct hisi_qm *qm) +{ + struct hisi_qm_err_mask *dev_err = &qm->err_info.dev_err; + u32 err_mask = dev_err->ce | dev_err->nfe | dev_err->fe; + u32 val; + + val = ~(err_mask & (~HPRE_AXI_ERROR_MASK)); + writel(val, qm->io_base + HPRE_INT_MASK); + + if (qm->ver > QM_HW_V2) + writel(dev_err->shutdown_mask & (~HPRE_AXI_ERROR_MASK), + qm->io_base + HPRE_OOO_SHUTDOWN_SEL); +} + +static void hpre_enable_axi_error(struct hisi_qm *qm) +{ + struct hisi_qm_err_mask *dev_err = &qm->err_info.dev_err; + u32 err_mask = dev_err->ce | dev_err->nfe | dev_err->fe; + + /* clear axi error source */ + writel(HPRE_AXI_ERROR_MASK, qm->io_base + HPRE_HAC_SOURCE_INT); + + writel(~err_mask, qm->io_base + HPRE_INT_MASK); + + if (qm->ver > QM_HW_V2) + writel(dev_err->shutdown_mask, qm->io_base + HPRE_OOO_SHUTDOWN_SEL); +} + static void hpre_err_info_init(struct hisi_qm *qm) { struct hisi_qm_err_info *err_info = &qm->err_info; + struct hisi_qm_err_mask *qm_err = &err_info->qm_err; + struct hisi_qm_err_mask *dev_err = &err_info->dev_err; + + qm_err->fe = HPRE_HAC_RAS_FE_ENABLE; + qm_err->ce = hisi_qm_get_hw_info(qm, hpre_basic_info, HPRE_QM_CE_MASK_CAP, qm->cap_ver); + qm_err->nfe = hisi_qm_get_hw_info(qm, hpre_basic_info, HPRE_QM_NFE_MASK_CAP, qm->cap_ver); + qm_err->shutdown_mask = hisi_qm_get_hw_info(qm, hpre_basic_info, + HPRE_QM_OOO_SHUTDOWN_MASK_CAP, qm->cap_ver); + qm_err->reset_mask = hisi_qm_get_hw_info(qm, hpre_basic_info, + HPRE_QM_RESET_MASK_CAP, qm->cap_ver); + qm_err->ecc_2bits_mask = QM_ECC_MBIT; + + dev_err->fe = HPRE_HAC_RAS_FE_ENABLE; + dev_err->ce = hisi_qm_get_hw_info(qm, hpre_basic_info, HPRE_CE_MASK_CAP, qm->cap_ver); + dev_err->nfe = hisi_qm_get_hw_info(qm, hpre_basic_info, HPRE_NFE_MASK_CAP, qm->cap_ver); + dev_err->shutdown_mask = hisi_qm_get_hw_info(qm, hpre_basic_info, + HPRE_OOO_SHUTDOWN_MASK_CAP, qm->cap_ver); + dev_err->reset_mask = hisi_qm_get_hw_info(qm, hpre_basic_info, + HPRE_RESET_MASK_CAP, qm->cap_ver); + dev_err->ecc_2bits_mask = HPRE_CORE_ECC_2BIT_ERR | HPRE_OOO_ECC_2BIT_ERR; - err_info->fe = HPRE_HAC_RAS_FE_ENABLE; - err_info->ce = hisi_qm_get_hw_info(qm, hpre_basic_info, HPRE_QM_CE_MASK_CAP, qm->cap_ver); - err_info->nfe = hisi_qm_get_hw_info(qm, hpre_basic_info, HPRE_QM_NFE_MASK_CAP, qm->cap_ver); - err_info->ecc_2bits_mask = HPRE_CORE_ECC_2BIT_ERR | HPRE_OOO_ECC_2BIT_ERR; - err_info->dev_shutdown_mask = hisi_qm_get_hw_info(qm, hpre_basic_info, - HPRE_OOO_SHUTDOWN_MASK_CAP, qm->cap_ver); - err_info->qm_shutdown_mask = hisi_qm_get_hw_info(qm, hpre_basic_info, - HPRE_QM_OOO_SHUTDOWN_MASK_CAP, qm->cap_ver); - err_info->qm_reset_mask = hisi_qm_get_hw_info(qm, hpre_basic_info, - HPRE_QM_RESET_MASK_CAP, qm->cap_ver); - err_info->dev_reset_mask = hisi_qm_get_hw_info(qm, hpre_basic_info, - HPRE_RESET_MASK_CAP, qm->cap_ver); err_info->msi_wr_port = HPRE_WR_MSI_PORT; err_info->acpi_rst = "HRST"; } @@ -1438,6 +1468,8 @@ static const struct hisi_qm_err_ini hpre_err_ini = { .err_info_init = hpre_err_info_init, .get_err_result = hpre_get_err_result, .dev_is_abnormal = hpre_dev_is_abnormal, + .disable_axi_error = hpre_disable_axi_error, + .enable_axi_error = hpre_enable_axi_error, }; static int hpre_pf_probe_init(struct hpre *hpre) diff --git a/drivers/crypto/hisilicon/qm.c b/drivers/crypto/hisilicon/qm.c index 0c70cf179a4c..9e9fb2466537 100644 --- a/drivers/crypto/hisilicon/qm.c +++ b/drivers/crypto/hisilicon/qm.c @@ -148,9 +148,9 @@ #define QM_RAS_CE_TIMES_PER_IRQ 1 #define QM_OOO_SHUTDOWN_SEL 0x1040f8 #define QM_AXI_RRESP_ERR BIT(0) -#define QM_ECC_MBIT BIT(2) #define QM_DB_TIMEOUT BIT(10) #define QM_OF_FIFO_OF BIT(11) +#define QM_RAS_AXI_ERROR (BIT(0) | BIT(1) | BIT(12)) #define QM_RESET_WAIT_TIMEOUT 400 #define QM_PEH_VENDOR_ID 0x1000d8 @@ -166,7 +166,6 @@ #define ACC_MASTER_TRANS_RETURN 0x300150 #define ACC_MASTER_GLOBAL_CTRL 0x300000 #define ACC_AM_CFG_PORT_WR_EN 0x30001c -#define QM_RAS_NFE_MBIT_DISABLE ~QM_ECC_MBIT #define ACC_AM_ROB_ECC_INT_STS 0x300104 #define ACC_ROB_ECC_ERR_MULTPL BIT(1) #define QM_MSI_CAP_ENABLE BIT(16) @@ -526,7 +525,7 @@ static bool qm_check_dev_error(struct hisi_qm *qm) return false; err_status = qm_get_hw_error_status(pf_qm); - if (err_status & pf_qm->err_info.qm_shutdown_mask) + if (err_status & pf_qm->err_info.qm_err.shutdown_mask) return true; if (pf_qm->err_ini->dev_is_abnormal) @@ -1430,17 +1429,17 @@ static void qm_hw_error_init_v1(struct hisi_qm *qm) static void qm_hw_error_cfg(struct hisi_qm *qm) { - struct hisi_qm_err_info *err_info = &qm->err_info; + struct hisi_qm_err_mask *qm_err = &qm->err_info.qm_err; - qm->error_mask = err_info->nfe | err_info->ce | err_info->fe; + qm->error_mask = qm_err->nfe | qm_err->ce | qm_err->fe; /* clear QM hw residual error source */ writel(qm->error_mask, qm->io_base + QM_ABNORMAL_INT_SOURCE); /* configure error type */ - writel(err_info->ce, qm->io_base + QM_RAS_CE_ENABLE); + writel(qm_err->ce, qm->io_base + QM_RAS_CE_ENABLE); writel(QM_RAS_CE_TIMES_PER_IRQ, qm->io_base + QM_RAS_CE_THRESHOLD); - writel(err_info->nfe, qm->io_base + QM_RAS_NFE_ENABLE); - writel(err_info->fe, qm->io_base + QM_RAS_FE_ENABLE); + writel(qm_err->nfe, qm->io_base + QM_RAS_NFE_ENABLE); + writel(qm_err->fe, qm->io_base + QM_RAS_FE_ENABLE); } static void qm_hw_error_init_v2(struct hisi_qm *qm) @@ -1469,7 +1468,7 @@ static void qm_hw_error_init_v3(struct hisi_qm *qm) qm_hw_error_cfg(qm); /* enable close master ooo when hardware error happened */ - writel(qm->err_info.qm_shutdown_mask, qm->io_base + QM_OOO_SHUTDOWN_SEL); + writel(qm->err_info.qm_err.shutdown_mask, qm->io_base + QM_OOO_SHUTDOWN_SEL); irq_unmask = ~qm->error_mask; irq_unmask &= readl(qm->io_base + QM_ABNORMAL_INT_MASK); @@ -1531,6 +1530,7 @@ static void qm_log_hw_error(struct hisi_qm *qm, u32 error_status) static enum acc_err_result qm_hw_error_handle_v2(struct hisi_qm *qm) { + struct hisi_qm_err_mask *qm_err = &qm->err_info.qm_err; u32 error_status; error_status = qm_get_hw_error_status(qm); @@ -1539,17 +1539,16 @@ static enum acc_err_result qm_hw_error_handle_v2(struct hisi_qm *qm) qm->err_status.is_qm_ecc_mbit = true; qm_log_hw_error(qm, error_status); - if (error_status & qm->err_info.qm_reset_mask) { + if (error_status & qm_err->reset_mask) { /* Disable the same error reporting until device is recovered. */ - writel(qm->err_info.nfe & (~error_status), - qm->io_base + QM_RAS_NFE_ENABLE); + writel(qm_err->nfe & (~error_status), qm->io_base + QM_RAS_NFE_ENABLE); return ACC_ERR_NEED_RESET; } /* Clear error source if not need reset. */ writel(error_status, qm->io_base + QM_ABNORMAL_INT_SOURCE); - writel(qm->err_info.nfe, qm->io_base + QM_RAS_NFE_ENABLE); - writel(qm->err_info.ce, qm->io_base + QM_RAS_CE_ENABLE); + writel(qm_err->nfe, qm->io_base + QM_RAS_NFE_ENABLE); + writel(qm_err->ce, qm->io_base + QM_RAS_CE_ENABLE); } return ACC_ERR_RECOVERED; @@ -4315,9 +4314,9 @@ static void qm_dev_ecc_mbit_handle(struct hisi_qm *qm) !qm->err_status.is_qm_ecc_mbit && !qm->err_ini->close_axi_master_ooo) { nfe_enb = readl(qm->io_base + QM_RAS_NFE_ENABLE); - writel(nfe_enb & QM_RAS_NFE_MBIT_DISABLE, + writel(nfe_enb & ~qm->err_info.qm_err.ecc_2bits_mask, qm->io_base + QM_RAS_NFE_ENABLE); - writel(QM_ECC_MBIT, qm->io_base + QM_ABNORMAL_INT_SET); + writel(qm->err_info.qm_err.ecc_2bits_mask, qm->io_base + QM_ABNORMAL_INT_SET); } } @@ -4599,12 +4598,12 @@ static void qm_restart_prepare(struct hisi_qm *qm) qm->io_base + ACC_AM_CFG_PORT_WR_EN); /* clear dev ecc 2bit error source if having */ - value = qm_get_dev_err_status(qm) & qm->err_info.ecc_2bits_mask; + value = qm_get_dev_err_status(qm) & qm->err_info.dev_err.ecc_2bits_mask; if (value && qm->err_ini->clear_dev_hw_err_status) qm->err_ini->clear_dev_hw_err_status(qm, value); /* clear QM ecc mbit error source */ - writel(QM_ECC_MBIT, qm->io_base + QM_ABNORMAL_INT_SOURCE); + writel(qm->err_info.qm_err.ecc_2bits_mask, qm->io_base + QM_ABNORMAL_INT_SOURCE); /* clear AM Reorder Buffer ecc mbit source */ writel(ACC_ROB_ECC_ERR_MULTPL, qm->io_base + ACC_AM_ROB_ECC_INT_STS); @@ -4631,6 +4630,34 @@ static void qm_restart_done(struct hisi_qm *qm) qm->err_status.is_dev_ecc_mbit = false; } +static void qm_disable_axi_error(struct hisi_qm *qm) +{ + struct hisi_qm_err_mask *qm_err = &qm->err_info.qm_err; + u32 val; + + val = ~(qm->error_mask & (~QM_RAS_AXI_ERROR)); + writel(val, qm->io_base + QM_ABNORMAL_INT_MASK); + if (qm->ver > QM_HW_V2) + writel(qm_err->shutdown_mask & (~QM_RAS_AXI_ERROR), + qm->io_base + QM_OOO_SHUTDOWN_SEL); + + if (qm->err_ini->disable_axi_error) + qm->err_ini->disable_axi_error(qm); +} + +static void qm_enable_axi_error(struct hisi_qm *qm) +{ + /* clear axi error source */ + writel(QM_RAS_AXI_ERROR, qm->io_base + QM_ABNORMAL_INT_SOURCE); + + writel(~qm->error_mask, qm->io_base + QM_ABNORMAL_INT_MASK); + if (qm->ver > QM_HW_V2) + writel(qm->err_info.qm_err.shutdown_mask, qm->io_base + QM_OOO_SHUTDOWN_SEL); + + if (qm->err_ini->enable_axi_error) + qm->err_ini->enable_axi_error(qm); +} + static int qm_controller_reset_done(struct hisi_qm *qm) { struct pci_dev *pdev = qm->pdev; @@ -4664,6 +4691,7 @@ static int qm_controller_reset_done(struct hisi_qm *qm) qm_restart_prepare(qm); hisi_qm_dev_err_init(qm); + qm_disable_axi_error(qm); if (qm->err_ini->open_axi_master_ooo) qm->err_ini->open_axi_master_ooo(qm); @@ -4686,7 +4714,7 @@ static int qm_controller_reset_done(struct hisi_qm *qm) ret = qm_wait_vf_prepare_finish(qm); if (ret) pci_err(pdev, "failed to start by vfs in soft reset!\n"); - + qm_enable_axi_error(qm); qm_cmd_init(qm); qm_restart_done(qm); diff --git a/drivers/crypto/hisilicon/sec2/sec_main.c b/drivers/crypto/hisilicon/sec2/sec_main.c index 50161336287f..50268a4f6fad 100644 --- a/drivers/crypto/hisilicon/sec2/sec_main.c +++ b/drivers/crypto/hisilicon/sec2/sec_main.c @@ -47,6 +47,8 @@ #define SEC_RAS_FE_ENB_MSK 0x0 #define SEC_OOO_SHUTDOWN_SEL 0x301014 #define SEC_RAS_DISABLE 0x0 +#define SEC_AXI_ERROR_MASK (BIT(0) | BIT(1)) + #define SEC_MEM_START_INIT_REG 0x301100 #define SEC_MEM_INIT_DONE_REG 0x301104 @@ -666,8 +668,7 @@ static void sec_master_ooo_ctrl(struct hisi_qm *qm, bool enable) val1 = readl(qm->io_base + SEC_CONTROL_REG); if (enable) { val1 |= SEC_AXI_SHUTDOWN_ENABLE; - val2 = hisi_qm_get_hw_info(qm, sec_basic_info, - SEC_OOO_SHUTDOWN_MASK_CAP, qm->cap_ver); + val2 = qm->err_info.dev_err.shutdown_mask; } else { val1 &= SEC_AXI_SHUTDOWN_DISABLE; val2 = 0x0; @@ -681,7 +682,8 @@ static void sec_master_ooo_ctrl(struct hisi_qm *qm, bool enable) static void sec_hw_error_enable(struct hisi_qm *qm) { - u32 ce, nfe; + struct hisi_qm_err_mask *dev_err = &qm->err_info.dev_err; + u32 err_mask = dev_err->ce | dev_err->nfe | dev_err->fe; if (qm->ver == QM_HW_V1) { writel(SEC_CORE_INT_DISABLE, qm->io_base + SEC_CORE_INT_MASK); @@ -689,22 +691,19 @@ static void sec_hw_error_enable(struct hisi_qm *qm) return; } - ce = hisi_qm_get_hw_info(qm, sec_basic_info, SEC_CE_MASK_CAP, qm->cap_ver); - nfe = hisi_qm_get_hw_info(qm, sec_basic_info, SEC_NFE_MASK_CAP, qm->cap_ver); - /* clear SEC hw error source if having */ - writel(ce | nfe | SEC_RAS_FE_ENB_MSK, qm->io_base + SEC_CORE_INT_SOURCE); + writel(err_mask, qm->io_base + SEC_CORE_INT_SOURCE); /* enable RAS int */ - writel(ce, qm->io_base + SEC_RAS_CE_REG); - writel(SEC_RAS_FE_ENB_MSK, qm->io_base + SEC_RAS_FE_REG); - writel(nfe, qm->io_base + SEC_RAS_NFE_REG); + writel(dev_err->ce, qm->io_base + SEC_RAS_CE_REG); + writel(dev_err->fe, qm->io_base + SEC_RAS_FE_REG); + writel(dev_err->nfe, qm->io_base + SEC_RAS_NFE_REG); /* enable SEC block master OOO when nfe occurs on Kunpeng930 */ sec_master_ooo_ctrl(qm, true); /* enable SEC hw error interrupts */ - writel(ce | nfe | SEC_RAS_FE_ENB_MSK, qm->io_base + SEC_CORE_INT_MASK); + writel(err_mask, qm->io_base + SEC_CORE_INT_MASK); } static void sec_hw_error_disable(struct hisi_qm *qm) @@ -1061,9 +1060,8 @@ static void sec_clear_hw_err_status(struct hisi_qm *qm, u32 err_sts) static void sec_disable_error_report(struct hisi_qm *qm, u32 err_type) { - u32 nfe_mask; + u32 nfe_mask = qm->err_info.dev_err.nfe; - nfe_mask = hisi_qm_get_hw_info(qm, sec_basic_info, SEC_NFE_MASK_CAP, qm->cap_ver); writel(nfe_mask & (~err_type), qm->io_base + SEC_RAS_NFE_REG); } @@ -1082,11 +1080,11 @@ static enum acc_err_result sec_get_err_result(struct hisi_qm *qm) err_status = sec_get_hw_err_status(qm); if (err_status) { - if (err_status & qm->err_info.ecc_2bits_mask) + if (err_status & qm->err_info.dev_err.ecc_2bits_mask) qm->err_status.is_dev_ecc_mbit = true; sec_log_hw_error(qm, err_status); - if (err_status & qm->err_info.dev_reset_mask) { + if (err_status & qm->err_info.dev_err.reset_mask) { /* Disable the same error reporting until device is recovered. */ sec_disable_error_report(qm, err_status); return ACC_ERR_NEED_RESET; @@ -1102,28 +1100,62 @@ static bool sec_dev_is_abnormal(struct hisi_qm *qm) u32 err_status; err_status = sec_get_hw_err_status(qm); - if (err_status & qm->err_info.dev_shutdown_mask) + if (err_status & qm->err_info.dev_err.shutdown_mask) return true; return false; } +static void sec_disable_axi_error(struct hisi_qm *qm) +{ + struct hisi_qm_err_mask *dev_err = &qm->err_info.dev_err; + u32 err_mask = dev_err->ce | dev_err->nfe | dev_err->fe; + + writel(err_mask & ~SEC_AXI_ERROR_MASK, qm->io_base + SEC_CORE_INT_MASK); + + if (qm->ver > QM_HW_V2) + writel(dev_err->shutdown_mask & (~SEC_AXI_ERROR_MASK), + qm->io_base + SEC_OOO_SHUTDOWN_SEL); +} + +static void sec_enable_axi_error(struct hisi_qm *qm) +{ + struct hisi_qm_err_mask *dev_err = &qm->err_info.dev_err; + u32 err_mask = dev_err->ce | dev_err->nfe | dev_err->fe; + + /* clear axi error source */ + writel(SEC_AXI_ERROR_MASK, qm->io_base + SEC_CORE_INT_SOURCE); + + writel(err_mask, qm->io_base + SEC_CORE_INT_MASK); + + if (qm->ver > QM_HW_V2) + writel(dev_err->shutdown_mask, qm->io_base + SEC_OOO_SHUTDOWN_SEL); +} + static void sec_err_info_init(struct hisi_qm *qm) { struct hisi_qm_err_info *err_info = &qm->err_info; + struct hisi_qm_err_mask *qm_err = &err_info->qm_err; + struct hisi_qm_err_mask *dev_err = &err_info->dev_err; + + qm_err->fe = SEC_RAS_FE_ENB_MSK; + qm_err->ce = hisi_qm_get_hw_info(qm, sec_basic_info, SEC_QM_CE_MASK_CAP, qm->cap_ver); + qm_err->nfe = hisi_qm_get_hw_info(qm, sec_basic_info, SEC_QM_NFE_MASK_CAP, qm->cap_ver); + qm_err->shutdown_mask = hisi_qm_get_hw_info(qm, sec_basic_info, + SEC_QM_OOO_SHUTDOWN_MASK_CAP, qm->cap_ver); + qm_err->reset_mask = hisi_qm_get_hw_info(qm, sec_basic_info, + SEC_QM_RESET_MASK_CAP, qm->cap_ver); + qm_err->ecc_2bits_mask = QM_ECC_MBIT; + + dev_err->fe = SEC_RAS_FE_ENB_MSK; + dev_err->ce = hisi_qm_get_hw_info(qm, sec_basic_info, SEC_CE_MASK_CAP, qm->cap_ver); + dev_err->nfe = hisi_qm_get_hw_info(qm, sec_basic_info, SEC_NFE_MASK_CAP, qm->cap_ver); + dev_err->shutdown_mask = hisi_qm_get_hw_info(qm, sec_basic_info, + SEC_OOO_SHUTDOWN_MASK_CAP, qm->cap_ver); + dev_err->reset_mask = hisi_qm_get_hw_info(qm, sec_basic_info, + SEC_RESET_MASK_CAP, qm->cap_ver); + dev_err->ecc_2bits_mask = SEC_CORE_INT_STATUS_M_ECC; - err_info->fe = SEC_RAS_FE_ENB_MSK; - err_info->ce = hisi_qm_get_hw_info(qm, sec_basic_info, SEC_QM_CE_MASK_CAP, qm->cap_ver); - err_info->nfe = hisi_qm_get_hw_info(qm, sec_basic_info, SEC_QM_NFE_MASK_CAP, qm->cap_ver); - err_info->ecc_2bits_mask = SEC_CORE_INT_STATUS_M_ECC; - err_info->qm_shutdown_mask = hisi_qm_get_hw_info(qm, sec_basic_info, - SEC_QM_OOO_SHUTDOWN_MASK_CAP, qm->cap_ver); - err_info->dev_shutdown_mask = hisi_qm_get_hw_info(qm, sec_basic_info, - SEC_OOO_SHUTDOWN_MASK_CAP, qm->cap_ver); - err_info->qm_reset_mask = hisi_qm_get_hw_info(qm, sec_basic_info, - SEC_QM_RESET_MASK_CAP, qm->cap_ver); - err_info->dev_reset_mask = hisi_qm_get_hw_info(qm, sec_basic_info, - SEC_RESET_MASK_CAP, qm->cap_ver); err_info->msi_wr_port = BIT(0); err_info->acpi_rst = "SRST"; } @@ -1141,6 +1173,8 @@ static const struct hisi_qm_err_ini sec_err_ini = { .err_info_init = sec_err_info_init, .get_err_result = sec_get_err_result, .dev_is_abnormal = sec_dev_is_abnormal, + .disable_axi_error = sec_disable_axi_error, + .enable_axi_error = sec_enable_axi_error, }; static int sec_pf_probe_init(struct sec_dev *sec) diff --git a/drivers/crypto/hisilicon/zip/zip_main.c b/drivers/crypto/hisilicon/zip/zip_main.c index cdf4cfe69dac..fb197db8f8bc 100644 --- a/drivers/crypto/hisilicon/zip/zip_main.c +++ b/drivers/crypto/hisilicon/zip/zip_main.c @@ -64,6 +64,7 @@ #define HZIP_SRAM_ECC_ERR_NUM_SHIFT 16 #define HZIP_SRAM_ECC_ERR_ADDR_SHIFT 24 #define HZIP_CORE_INT_MASK_ALL GENMASK(12, 0) +#define HZIP_AXI_ERROR_MASK (BIT(2) | BIT(3)) #define HZIP_SQE_SIZE 128 #define HZIP_PF_DEF_Q_NUM 64 #define HZIP_PF_DEF_Q_BASE 0 @@ -591,8 +592,7 @@ static void hisi_zip_master_ooo_ctrl(struct hisi_qm *qm, bool enable) val1 = readl(qm->io_base + HZIP_SOFT_CTRL_ZIP_CONTROL); if (enable) { val1 |= HZIP_AXI_SHUTDOWN_ENABLE; - val2 = hisi_qm_get_hw_info(qm, zip_basic_cap_info, - ZIP_OOO_SHUTDOWN_MASK_CAP, qm->cap_ver); + val2 = qm->err_info.dev_err.shutdown_mask; } else { val1 &= ~HZIP_AXI_SHUTDOWN_ENABLE; val2 = 0x0; @@ -606,7 +606,8 @@ static void hisi_zip_master_ooo_ctrl(struct hisi_qm *qm, bool enable) static void hisi_zip_hw_error_enable(struct hisi_qm *qm) { - u32 nfe, ce; + struct hisi_qm_err_mask *dev_err = &qm->err_info.dev_err; + u32 err_mask = dev_err->ce | dev_err->nfe | dev_err->fe; if (qm->ver == QM_HW_V1) { writel(HZIP_CORE_INT_MASK_ALL, @@ -615,33 +616,29 @@ static void hisi_zip_hw_error_enable(struct hisi_qm *qm) return; } - nfe = hisi_qm_get_hw_info(qm, zip_basic_cap_info, ZIP_NFE_MASK_CAP, qm->cap_ver); - ce = hisi_qm_get_hw_info(qm, zip_basic_cap_info, ZIP_CE_MASK_CAP, qm->cap_ver); - /* clear ZIP hw error source if having */ - writel(ce | nfe | HZIP_CORE_INT_RAS_FE_ENB_MASK, qm->io_base + HZIP_CORE_INT_SOURCE); + writel(err_mask, qm->io_base + HZIP_CORE_INT_SOURCE); /* configure error type */ - writel(ce, qm->io_base + HZIP_CORE_INT_RAS_CE_ENB); - writel(HZIP_CORE_INT_RAS_FE_ENB_MASK, qm->io_base + HZIP_CORE_INT_RAS_FE_ENB); - writel(nfe, qm->io_base + HZIP_CORE_INT_RAS_NFE_ENB); + writel(dev_err->ce, qm->io_base + HZIP_CORE_INT_RAS_CE_ENB); + writel(dev_err->fe, qm->io_base + HZIP_CORE_INT_RAS_FE_ENB); + writel(dev_err->nfe, qm->io_base + HZIP_CORE_INT_RAS_NFE_ENB); hisi_zip_master_ooo_ctrl(qm, true); /* enable ZIP hw error interrupts */ - writel(0, qm->io_base + HZIP_CORE_INT_MASK_REG); + writel(~err_mask, qm->io_base + HZIP_CORE_INT_MASK_REG); hisi_dae_hw_error_enable(qm); } static void hisi_zip_hw_error_disable(struct hisi_qm *qm) { - u32 nfe, ce; + struct hisi_qm_err_mask *dev_err = &qm->err_info.dev_err; + u32 err_mask = dev_err->ce | dev_err->nfe | dev_err->fe; /* disable ZIP hw error interrupts */ - nfe = hisi_qm_get_hw_info(qm, zip_basic_cap_info, ZIP_NFE_MASK_CAP, qm->cap_ver); - ce = hisi_qm_get_hw_info(qm, zip_basic_cap_info, ZIP_CE_MASK_CAP, qm->cap_ver); - writel(ce | nfe | HZIP_CORE_INT_RAS_FE_ENB_MASK, qm->io_base + HZIP_CORE_INT_MASK_REG); + writel(err_mask, qm->io_base + HZIP_CORE_INT_MASK_REG); hisi_zip_master_ooo_ctrl(qm, false); @@ -1115,9 +1112,8 @@ static void hisi_zip_clear_hw_err_status(struct hisi_qm *qm, u32 err_sts) static void hisi_zip_disable_error_report(struct hisi_qm *qm, u32 err_type) { - u32 nfe_mask; + u32 nfe_mask = qm->err_info.dev_err.nfe; - nfe_mask = hisi_qm_get_hw_info(qm, zip_basic_cap_info, ZIP_NFE_MASK_CAP, qm->cap_ver); writel(nfe_mask & (~err_type), qm->io_base + HZIP_CORE_INT_RAS_NFE_ENB); } @@ -1159,14 +1155,14 @@ static enum acc_err_result hisi_zip_get_err_result(struct hisi_qm *qm) /* Get device hardware new error status */ err_status = hisi_zip_get_hw_err_status(qm); if (err_status) { - if (err_status & qm->err_info.ecc_2bits_mask) + if (err_status & qm->err_info.dev_err.ecc_2bits_mask) qm->err_status.is_dev_ecc_mbit = true; hisi_zip_log_hw_error(qm, err_status); - if (err_status & qm->err_info.dev_reset_mask) { + if (err_status & qm->err_info.dev_err.reset_mask) { /* Disable the same error reporting until device is recovered. */ hisi_zip_disable_error_report(qm, err_status); - return ACC_ERR_NEED_RESET; + zip_result = ACC_ERR_NEED_RESET; } else { hisi_zip_clear_hw_err_status(qm, err_status); } @@ -1184,7 +1180,7 @@ static bool hisi_zip_dev_is_abnormal(struct hisi_qm *qm) u32 err_status; err_status = hisi_zip_get_hw_err_status(qm); - if (err_status & qm->err_info.dev_shutdown_mask) + if (err_status & qm->err_info.dev_err.shutdown_mask) return true; return hisi_dae_dev_is_abnormal(qm); @@ -1195,23 +1191,59 @@ static int hisi_zip_set_priv_status(struct hisi_qm *qm) return hisi_dae_close_axi_master_ooo(qm); } +static void hisi_zip_disable_axi_error(struct hisi_qm *qm) +{ + struct hisi_qm_err_mask *dev_err = &qm->err_info.dev_err; + u32 err_mask = dev_err->ce | dev_err->nfe | dev_err->fe; + u32 val; + + val = ~(err_mask & (~HZIP_AXI_ERROR_MASK)); + writel(val, qm->io_base + HZIP_CORE_INT_MASK_REG); + + if (qm->ver > QM_HW_V2) + writel(dev_err->shutdown_mask & (~HZIP_AXI_ERROR_MASK), + qm->io_base + HZIP_OOO_SHUTDOWN_SEL); +} + +static void hisi_zip_enable_axi_error(struct hisi_qm *qm) +{ + struct hisi_qm_err_mask *dev_err = &qm->err_info.dev_err; + u32 err_mask = dev_err->ce | dev_err->nfe | dev_err->fe; + + /* clear axi error source */ + writel(HZIP_AXI_ERROR_MASK, qm->io_base + HZIP_CORE_INT_SOURCE); + + writel(~err_mask, qm->io_base + HZIP_CORE_INT_MASK_REG); + + if (qm->ver > QM_HW_V2) + writel(dev_err->shutdown_mask, qm->io_base + HZIP_OOO_SHUTDOWN_SEL); +} + static void hisi_zip_err_info_init(struct hisi_qm *qm) { struct hisi_qm_err_info *err_info = &qm->err_info; + struct hisi_qm_err_mask *qm_err = &err_info->qm_err; + struct hisi_qm_err_mask *dev_err = &err_info->dev_err; + + qm_err->fe = HZIP_CORE_INT_RAS_FE_ENB_MASK; + qm_err->ce = hisi_qm_get_hw_info(qm, zip_basic_cap_info, ZIP_QM_CE_MASK_CAP, qm->cap_ver); + qm_err->nfe = hisi_qm_get_hw_info(qm, zip_basic_cap_info, + ZIP_QM_NFE_MASK_CAP, qm->cap_ver); + qm_err->ecc_2bits_mask = QM_ECC_MBIT; + qm_err->reset_mask = hisi_qm_get_hw_info(qm, zip_basic_cap_info, + ZIP_QM_RESET_MASK_CAP, qm->cap_ver); + qm_err->shutdown_mask = hisi_qm_get_hw_info(qm, zip_basic_cap_info, + ZIP_QM_OOO_SHUTDOWN_MASK_CAP, qm->cap_ver); + + dev_err->fe = HZIP_CORE_INT_RAS_FE_ENB_MASK; + dev_err->ce = hisi_qm_get_hw_info(qm, zip_basic_cap_info, ZIP_CE_MASK_CAP, qm->cap_ver); + dev_err->nfe = hisi_qm_get_hw_info(qm, zip_basic_cap_info, ZIP_NFE_MASK_CAP, qm->cap_ver); + dev_err->ecc_2bits_mask = HZIP_CORE_INT_STATUS_M_ECC; + dev_err->shutdown_mask = hisi_qm_get_hw_info(qm, zip_basic_cap_info, + ZIP_OOO_SHUTDOWN_MASK_CAP, qm->cap_ver); + dev_err->reset_mask = hisi_qm_get_hw_info(qm, zip_basic_cap_info, + ZIP_RESET_MASK_CAP, qm->cap_ver); - err_info->fe = HZIP_CORE_INT_RAS_FE_ENB_MASK; - err_info->ce = hisi_qm_get_hw_info(qm, zip_basic_cap_info, ZIP_QM_CE_MASK_CAP, qm->cap_ver); - err_info->nfe = hisi_qm_get_hw_info(qm, zip_basic_cap_info, - ZIP_QM_NFE_MASK_CAP, qm->cap_ver); - err_info->ecc_2bits_mask = HZIP_CORE_INT_STATUS_M_ECC; - err_info->qm_shutdown_mask = hisi_qm_get_hw_info(qm, zip_basic_cap_info, - ZIP_QM_OOO_SHUTDOWN_MASK_CAP, qm->cap_ver); - err_info->dev_shutdown_mask = hisi_qm_get_hw_info(qm, zip_basic_cap_info, - ZIP_OOO_SHUTDOWN_MASK_CAP, qm->cap_ver); - err_info->qm_reset_mask = hisi_qm_get_hw_info(qm, zip_basic_cap_info, - ZIP_QM_RESET_MASK_CAP, qm->cap_ver); - err_info->dev_reset_mask = hisi_qm_get_hw_info(qm, zip_basic_cap_info, - ZIP_RESET_MASK_CAP, qm->cap_ver); err_info->msi_wr_port = HZIP_WR_PORT; err_info->acpi_rst = "ZRST"; } @@ -1231,6 +1263,8 @@ static const struct hisi_qm_err_ini hisi_zip_err_ini = { .get_err_result = hisi_zip_get_err_result, .set_priv_status = hisi_zip_set_priv_status, .dev_is_abnormal = hisi_zip_dev_is_abnormal, + .disable_axi_error = hisi_zip_disable_axi_error, + .enable_axi_error = hisi_zip_enable_axi_error, }; static int hisi_zip_pf_probe_init(struct hisi_zip *hisi_zip) diff --git a/include/linux/hisi_acc_qm.h b/include/linux/hisi_acc_qm.h index 013be52cc950..3c89171514c2 100644 --- a/include/linux/hisi_acc_qm.h +++ b/include/linux/hisi_acc_qm.h @@ -105,6 +105,8 @@ #define UACCE_MODE_SVA 1 /* use uacce sva mode */ #define UACCE_MODE_DESC "0(default) means only register to crypto, 1 means both register to crypto and uacce" +#define QM_ECC_MBIT BIT(2) + enum qm_stop_reason { QM_NORMAL, QM_SOFT_RESET, @@ -240,19 +242,22 @@ enum acc_err_result { ACC_ERR_RECOVERED, }; -struct hisi_qm_err_info { - char *acpi_rst; - u32 msi_wr_port; +struct hisi_qm_err_mask { u32 ecc_2bits_mask; - u32 qm_shutdown_mask; - u32 dev_shutdown_mask; - u32 qm_reset_mask; - u32 dev_reset_mask; + u32 shutdown_mask; + u32 reset_mask; u32 ce; u32 nfe; u32 fe; }; +struct hisi_qm_err_info { + char *acpi_rst; + u32 msi_wr_port; + struct hisi_qm_err_mask qm_err; + struct hisi_qm_err_mask dev_err; +}; + struct hisi_qm_err_status { u32 is_qm_ecc_mbit; u32 is_dev_ecc_mbit; @@ -273,6 +278,8 @@ struct hisi_qm_err_ini { enum acc_err_result (*get_err_result)(struct hisi_qm *qm); bool (*dev_is_abnormal)(struct hisi_qm *qm); int (*set_priv_status)(struct hisi_qm *qm); + void (*disable_axi_error)(struct hisi_qm *qm); + void (*enable_axi_error)(struct hisi_qm *qm); }; struct hisi_qm_cap_info { -- 2.43.0

driver inclusion category: bugfix bugzilla: https://gitee.com/openeuler/kernel/issues/IC2NHC CVE: NA ---------------------------------------------------------------------- The resources required by the interrupt processing like workqueue are applied for in function hisi_qm_memory_init(). Resources are requested regardless of whether interrupts are enabled. As a result, the applied resources may not be used. To avoid waste of resources, the resource application is moved to function qm_irqs_register(). When the interrupt type is not supported, resource is not applied. In addition, Interrupt registrations and interrupt resource application in the same interface are easy to maintain. Fixes: 3536cc55cada ("crypto: hisilicon/qm - support get device irq information from hardware registers") Signed-off-by: Weili Qian <qianweili@huawei.com> Signed-off-by: JiangShui Yang <yangjiangshui@h-partners.com> --- drivers/crypto/hisilicon/hpre/hpre_main.c | 3 - drivers/crypto/hisilicon/qm.c | 186 +++++++++++++--------- drivers/crypto/hisilicon/sec2/sec_main.c | 2 - drivers/crypto/hisilicon/zip/zip_main.c | 2 - include/linux/hisi_acc_qm.h | 2 - 5 files changed, 108 insertions(+), 87 deletions(-) diff --git a/drivers/crypto/hisilicon/hpre/hpre_main.c b/drivers/crypto/hisilicon/hpre/hpre_main.c index 471bdd856be8..7abc187033b9 100644 --- a/drivers/crypto/hisilicon/hpre/hpre_main.c +++ b/drivers/crypto/hisilicon/hpre/hpre_main.c @@ -1482,8 +1482,6 @@ static int hpre_pf_probe_init(struct hpre *hpre) return ret; hpre_open_sva_prefetch(qm); - - hisi_qm_dev_err_init(qm); ret = hpre_show_last_regs_init(qm); if (ret) pci_err(qm->pdev, "Failed to init last word regs!\n"); @@ -1520,7 +1518,6 @@ static void hpre_probe_uninit(struct hisi_qm *qm) qm->debug.curr_qm_qp_num = 0; hpre_show_last_regs_uninit(qm); hpre_close_sva_prefetch(qm); - hisi_qm_dev_err_uninit(qm); } static int hpre_probe(struct pci_dev *pdev, const struct pci_device_id *id) diff --git a/drivers/crypto/hisilicon/qm.c b/drivers/crypto/hisilicon/qm.c index 9e9fb2466537..3d2cc3f21282 100644 --- a/drivers/crypto/hisilicon/qm.c +++ b/drivers/crypto/hisilicon/qm.c @@ -473,6 +473,8 @@ static struct qm_typical_qos_table shaper_cbs_s[] = { static void qm_irqs_unregister(struct hisi_qm *qm); static int qm_reset_device(struct hisi_qm *qm); +static void qm_dev_err_uninit(struct hisi_qm *qm); + int hisi_qm_q_num_set(const char *val, const struct kernel_param *kp, unsigned int device) { @@ -2971,10 +2973,8 @@ static void hisi_qp_memory_uninit(struct hisi_qm *qm, int num) for (i = num - 1; i >= 0; i--) { qdma = &qm->qp_array[i].qdma; dma_free_coherent(dev, qdma->size, qdma->va, qdma->dma); - kfree(qm->poll_data[i].qp_finish_id); } - kfree(qm->poll_data); kfree(qm->qp_array); } @@ -2984,18 +2984,12 @@ static int hisi_qp_memory_init(struct hisi_qm *qm, size_t dma_size, int id, struct device *dev = &qm->pdev->dev; size_t off = qm->sqe_size * sq_depth; struct hisi_qp *qp; - int ret = -ENOMEM; - - qm->poll_data[id].qp_finish_id = kcalloc(qm->eq_depth, sizeof(u16), - GFP_KERNEL); - if (!qm->poll_data[id].qp_finish_id) - return -ENOMEM; qp = &qm->qp_array[id]; qp->qdma.va = dma_alloc_coherent(dev, dma_size, &qp->qdma.dma, GFP_KERNEL); if (!qp->qdma.va) - goto err_free_qp_finish_id; + return -ENOMEM; qp->sqe = qp->qdma.va; qp->sqe_dma = qp->qdma.dma; @@ -3008,10 +3002,6 @@ static int hisi_qp_memory_init(struct hisi_qm *qm, size_t dma_size, int id, qp->qp_id = id; return 0; - -err_free_qp_finish_id: - kfree(qm->poll_data[id].qp_finish_id); - return ret; } static inline bool is_iommu_used(struct device *dev) @@ -3132,11 +3122,6 @@ static void hisi_qm_set_state(struct hisi_qm *qm, u8 state) writel(state, qm->io_base + QM_VF_STATE); } -static void hisi_qm_unint_work(struct hisi_qm *qm) -{ - destroy_workqueue(qm->wq); -} - static void hisi_qm_free_rsv_buf(struct hisi_qm *qm) { struct qm_dma *xqc_dma = &qm->xqc_buf.qcdma; @@ -3171,8 +3156,7 @@ static void hisi_qm_memory_uninit(struct hisi_qm *qm) */ void hisi_qm_uninit(struct hisi_qm *qm) { - qm_cmd_uninit(qm); - hisi_qm_unint_work(qm); + qm_dev_err_uninit(qm); down_write(&qm->qps_lock); hisi_qm_memory_uninit(qm); @@ -3547,12 +3531,12 @@ static enum acc_err_result qm_hw_error_handle(struct hisi_qm *qm) } /** - * hisi_qm_dev_err_init() - Initialize device error configuration. + * qm_dev_err_init() - Initialize device error configuration. * @qm: The qm for which we want to do error initialization. * * Initialize QM and device error related configuration. */ -void hisi_qm_dev_err_init(struct hisi_qm *qm) +static void qm_dev_err_init(struct hisi_qm *qm) { if (qm->fun_type == QM_HW_VF) return; @@ -3565,15 +3549,14 @@ void hisi_qm_dev_err_init(struct hisi_qm *qm) } qm->err_ini->hw_err_enable(qm); } -EXPORT_SYMBOL_GPL(hisi_qm_dev_err_init); /** - * hisi_qm_dev_err_uninit() - Uninitialize device error configuration. + * qm_dev_err_uninit() - Uninitialize device error configuration. * @qm: The qm for which we want to do error uninitialization. * * Uninitialize QM and device error related configuration. */ -void hisi_qm_dev_err_uninit(struct hisi_qm *qm) +static void qm_dev_err_uninit(struct hisi_qm *qm) { if (qm->fun_type == QM_HW_VF) return; @@ -3586,7 +3569,6 @@ void hisi_qm_dev_err_uninit(struct hisi_qm *qm) } qm->err_ini->hw_err_disable(qm); } -EXPORT_SYMBOL_GPL(hisi_qm_dev_err_uninit); /** * hisi_qm_free_qps() - free multiple queue pairs. @@ -4690,7 +4672,7 @@ static int qm_controller_reset_done(struct hisi_qm *qm) } qm_restart_prepare(qm); - hisi_qm_dev_err_init(qm); + qm_dev_err_init(qm); qm_disable_axi_error(qm); if (qm->err_ini->open_axi_master_ooo) qm->err_ini->open_axi_master_ooo(qm); @@ -4797,7 +4779,7 @@ void hisi_qm_reset_prepare(struct pci_dev *pdev) u32 delay = 0; int ret; - hisi_qm_dev_err_uninit(pf_qm); + qm_dev_err_uninit(pf_qm); /* * Check whether there is an ECC mbit error, If it occurs, need to @@ -4868,7 +4850,7 @@ void hisi_qm_reset_done(struct pci_dev *pdev) } } - hisi_qm_dev_err_init(pf_qm); + qm_dev_err_init(pf_qm); ret = qm_restart(qm); if (ret) { @@ -5182,6 +5164,9 @@ static void qm_unregister_abnormal_irq(struct hisi_qm *qm) if (qm->fun_type == QM_HW_VF) return; + if (!qm->err_ini->err_info_init) + return; + val = qm->cap_tables.qm_cap_table[QM_ABNORMAL_IRQ].cap_val; if (!((val >> QM_IRQ_TYPE_SHIFT) & QM_ABN_IRQ_TYPE_MASK)) return; @@ -5199,16 +5184,27 @@ static int qm_register_abnormal_irq(struct hisi_qm *qm) if (qm->fun_type == QM_HW_VF) return 0; + if (!qm->err_ini->err_info_init) { + dev_info(&qm->pdev->dev, "device doesnot support error init!\n"); + return 0; + } + val = qm->cap_tables.qm_cap_table[QM_ABNORMAL_IRQ].cap_val; if (!((val >> QM_IRQ_TYPE_SHIFT) & QM_ABN_IRQ_TYPE_MASK)) return 0; + INIT_WORK(&qm->rst_work, hisi_qm_controller_reset); + irq_vector = val & QM_IRQ_VECTOR_MASK; ret = request_irq(pci_irq_vector(pdev, irq_vector), qm_abnormal_irq, 0, qm->dev_name, qm); - if (ret) - dev_err(&qm->pdev->dev, "failed to request abnormal irq, ret = %d", ret); + if (ret) { + dev_err(&qm->pdev->dev, "failed to request abnormal irq, ret = %d!\n", ret); + return ret; + } - return ret; + qm_dev_err_init(qm); + + return 0; } static void qm_unregister_mb_cmd_irq(struct hisi_qm *qm) @@ -5220,6 +5216,8 @@ static void qm_unregister_mb_cmd_irq(struct hisi_qm *qm) if (!((val >> QM_IRQ_TYPE_SHIFT) & QM_IRQ_TYPE_MASK)) return; + qm_cmd_uninit(qm); + irq_vector = val & QM_IRQ_VECTOR_MASK; free_irq(pci_irq_vector(pdev, irq_vector), qm); } @@ -5234,14 +5232,22 @@ static int qm_register_mb_cmd_irq(struct hisi_qm *qm) if (!((val >> QM_IRQ_TYPE_SHIFT) & QM_IRQ_TYPE_MASK)) return 0; + INIT_WORK(&qm->cmd_process, qm_cmd_process); + irq_vector = val & QM_IRQ_VECTOR_MASK; ret = request_irq(pci_irq_vector(pdev, irq_vector), qm_mb_cmd_irq, 0, qm->dev_name, qm); - if (ret) - dev_err(&pdev->dev, "failed to request function communication irq, ret = %d", ret); + if (ret) { + dev_err(&pdev->dev, + "failed to request function communication irq, ret = %d!\n", ret); + return ret; + } - return ret; + qm_cmd_init(qm); + + return 0; } +/* Disable aeq interrupt by hisi_qm_stop(). */ static void qm_unregister_aeq_irq(struct hisi_qm *qm) { struct pci_dev *pdev = qm->pdev; @@ -5255,6 +5261,7 @@ static void qm_unregister_aeq_irq(struct hisi_qm *qm) free_irq(pci_irq_vector(pdev, irq_vector), qm); } +/* Enable aeq interrupt by hisi_qm_start(). */ static int qm_register_aeq_irq(struct hisi_qm *qm) { struct pci_dev *pdev = qm->pdev; @@ -5269,11 +5276,62 @@ static int qm_register_aeq_irq(struct hisi_qm *qm) ret = request_threaded_irq(pci_irq_vector(pdev, irq_vector), NULL, qm_aeq_thread, IRQF_ONESHOT, qm->dev_name, qm); if (ret) - dev_err(&pdev->dev, "failed to request eq irq, ret = %d", ret); + dev_err(&pdev->dev, "failed to request aeq irq, ret = %d!\n", ret); + + return ret; +} + +static void qm_uninit_eq_work(struct hisi_qm *qm) +{ + int i; + + destroy_workqueue(qm->wq); + for (i = qm->qp_num - 1; i >= 0; i--) + kfree(qm->poll_data[i].qp_finish_id); + + kfree(qm->poll_data); +} + +static int qm_init_eq_work(struct hisi_qm *qm) +{ + int ret = -ENOMEM; + u16 eq_depth; + int i; + + qm->poll_data = kcalloc(qm->qp_num, sizeof(struct hisi_qm_poll_data), GFP_KERNEL); + if (!qm->poll_data) + return ret; + + qm_get_xqc_depth(qm, &qm->eq_depth, &qm->aeq_depth, QM_XEQ_DEPTH_CAP); + eq_depth = qm->eq_depth >> 1; + for (i = 0; i < qm->qp_num; i++) { + qm->poll_data[i].qp_finish_id = kcalloc(eq_depth, sizeof(u16), GFP_KERNEL); + if (!qm->poll_data[i].qp_finish_id) + goto free_qp_finish_id; + + INIT_WORK(&qm->poll_data[i].work, qm_work_process); + qm->poll_data[i].qm = qm; + } + qm->wq = alloc_workqueue("%s", WQ_HIGHPRI | WQ_MEM_RECLAIM | + WQ_UNBOUND, num_online_cpus(), + pci_name(qm->pdev)); + if (!qm->wq) { + dev_err(&qm->pdev->dev, "failed to alloc workqueue!\n"); + goto free_qp_finish_id; + } + + return 0; + +free_qp_finish_id: + for (i = i - 1; i >= 0; i--) + kfree(qm->poll_data[i].qp_finish_id); + + kfree(qm->poll_data); return ret; } +/* Disable eq interrupt by hisi_qm_stop(). */ static void qm_unregister_eq_irq(struct hisi_qm *qm) { struct pci_dev *pdev = qm->pdev; @@ -5285,8 +5343,11 @@ static void qm_unregister_eq_irq(struct hisi_qm *qm) irq_vector = val & QM_IRQ_VECTOR_MASK; free_irq(pci_irq_vector(pdev, irq_vector), qm); + + qm_uninit_eq_work(qm); } +/* Enable eq interrupt by hisi_qm_start(). */ static int qm_register_eq_irq(struct hisi_qm *qm) { struct pci_dev *pdev = qm->pdev; @@ -5297,10 +5358,18 @@ static int qm_register_eq_irq(struct hisi_qm *qm) if (!((val >> QM_IRQ_TYPE_SHIFT) & QM_IRQ_TYPE_MASK)) return 0; + ret = qm_init_eq_work(qm); + if (ret) { + dev_err(&pdev->dev, "failed to init eq work!\n"); + return ret; + } + irq_vector = val & QM_IRQ_VECTOR_MASK; ret = request_irq(pci_irq_vector(pdev, irq_vector), qm_eq_irq, 0, qm->dev_name, qm); - if (ret) - dev_err(&pdev->dev, "failed to request eq irq, ret = %d", ret); + if (ret) { + dev_err(&pdev->dev, "failed to request eq irq, ret = %d!\n", ret); + qm_uninit_eq_work(qm); + } return ret; } @@ -5593,30 +5662,6 @@ static int hisi_qm_pci_init(struct hisi_qm *qm) return ret; } -static int hisi_qm_init_work(struct hisi_qm *qm) -{ - int i; - - for (i = 0; i < qm->qp_num; i++) - INIT_WORK(&qm->poll_data[i].work, qm_work_process); - - if (qm->fun_type == QM_HW_PF) - INIT_WORK(&qm->rst_work, hisi_qm_controller_reset); - - if (qm->ver > QM_HW_V2) - INIT_WORK(&qm->cmd_process, qm_cmd_process); - - qm->wq = alloc_workqueue("%s", WQ_HIGHPRI | WQ_MEM_RECLAIM | - WQ_UNBOUND, num_online_cpus(), - pci_name(qm->pdev)); - if (!qm->wq) { - pci_err(qm->pdev, "failed to alloc workqueue!\n"); - return -ENOMEM; - } - - return 0; -} - static int hisi_qp_alloc_memory(struct hisi_qm *qm) { struct device *dev = &qm->pdev->dev; @@ -5628,19 +5673,12 @@ static int hisi_qp_alloc_memory(struct hisi_qm *qm) if (!qm->qp_array) return -ENOMEM; - qm->poll_data = kcalloc(qm->qp_num, sizeof(struct hisi_qm_poll_data), GFP_KERNEL); - if (!qm->poll_data) { - kfree(qm->qp_array); - return -ENOMEM; - } - qm_get_xqc_depth(qm, &sq_depth, &cq_depth, QM_QP_DEPTH_CAP); /* one more page for device or qp statuses */ qp_dma_size = qm->sqe_size * sq_depth + sizeof(struct qm_cqe) * cq_depth; qp_dma_size = PAGE_ALIGN(qp_dma_size) + PAGE_SIZE; for (i = 0; i < qm->qp_num; i++) { - qm->poll_data[i].qm = qm; ret = hisi_qp_memory_init(qm, qp_dma_size, i, sq_depth, cq_depth); if (ret) goto err_init_qp_mem; @@ -5708,7 +5746,6 @@ static int hisi_qm_memory_init(struct hisi_qm *qm) } while (0) idr_init(&qm->qp_idr); - qm_get_xqc_depth(qm, &qm->eq_depth, &qm->aeq_depth, QM_XEQ_DEPTH_CAP); qm->qdma.size = QMC_ALIGN(sizeof(struct qm_eqe) * qm->eq_depth) + QMC_ALIGN(sizeof(struct qm_aeqe) * qm->aeq_depth) + QMC_ALIGN(sizeof(struct qm_sqc) * qm->qp_num) + @@ -5789,17 +5826,10 @@ int hisi_qm_init(struct hisi_qm *qm) if (ret) goto err_alloc_uacce; - ret = hisi_qm_init_work(qm); - if (ret) - goto err_free_qm_memory; - - qm_cmd_init(qm); hisi_mig_region_enable(qm); return 0; -err_free_qm_memory: - hisi_qm_memory_uninit(qm); err_alloc_uacce: qm_remove_uacce(qm); err_irq_register: @@ -5933,7 +5963,7 @@ static int qm_rebuild_for_resume(struct hisi_qm *qm) } qm_cmd_init(qm); - hisi_qm_dev_err_init(qm); + qm_dev_err_init(qm); /* Set the doorbell timeout to QM_DB_TIMEOUT_CFG ns. */ writel(QM_DB_TIMEOUT_SET, qm->io_base + QM_DB_TIMEOUT_CFG); qm_disable_clock_gate(qm); diff --git a/drivers/crypto/hisilicon/sec2/sec_main.c b/drivers/crypto/hisilicon/sec2/sec_main.c index 50268a4f6fad..9af30716c49c 100644 --- a/drivers/crypto/hisilicon/sec2/sec_main.c +++ b/drivers/crypto/hisilicon/sec2/sec_main.c @@ -1187,7 +1187,6 @@ static int sec_pf_probe_init(struct sec_dev *sec) return ret; sec_open_sva_prefetch(qm); - hisi_qm_dev_err_init(qm); sec_debug_regs_clear(qm); ret = sec_show_last_regs_init(qm); if (ret) @@ -1307,7 +1306,6 @@ static void sec_probe_uninit(struct hisi_qm *qm) sec_debug_regs_clear(qm); sec_show_last_regs_uninit(qm); sec_close_sva_prefetch(qm); - hisi_qm_dev_err_uninit(qm); } static int sec_probe(struct pci_dev *pdev, const struct pci_device_id *id) diff --git a/drivers/crypto/hisilicon/zip/zip_main.c b/drivers/crypto/hisilicon/zip/zip_main.c index fb197db8f8bc..10ddbaa02773 100644 --- a/drivers/crypto/hisilicon/zip/zip_main.c +++ b/drivers/crypto/hisilicon/zip/zip_main.c @@ -1289,7 +1289,6 @@ static int hisi_zip_pf_probe_init(struct hisi_zip *hisi_zip) return ret; hisi_zip_open_sva_prefetch(qm); - hisi_qm_dev_err_init(qm); hisi_zip_debug_regs_clear(qm); ret = hisi_zip_show_last_regs_init(qm); @@ -1421,7 +1420,6 @@ static void hisi_zip_probe_uninit(struct hisi_qm *qm) hisi_zip_show_last_regs_uninit(qm); hisi_zip_close_sva_prefetch(qm); - hisi_qm_dev_err_uninit(qm); } static int hisi_zip_probe(struct pci_dev *pdev, const struct pci_device_id *id) diff --git a/include/linux/hisi_acc_qm.h b/include/linux/hisi_acc_qm.h index 3c89171514c2..689582249a90 100644 --- a/include/linux/hisi_acc_qm.h +++ b/include/linux/hisi_acc_qm.h @@ -546,8 +546,6 @@ void hisi_qm_debug_regs_clear(struct hisi_qm *qm); int hisi_qm_sriov_enable(struct pci_dev *pdev, int max_vfs); int hisi_qm_sriov_disable(struct pci_dev *pdev, bool is_frozen); int hisi_qm_sriov_configure(struct pci_dev *pdev, int num_vfs); -void hisi_qm_dev_err_init(struct hisi_qm *qm); -void hisi_qm_dev_err_uninit(struct hisi_qm *qm); int hisi_qm_regs_debugfs_init(struct hisi_qm *qm, struct dfx_diff_registers *dregs, u32 reg_len); void hisi_qm_regs_debugfs_uninit(struct hisi_qm *qm, u32 reg_len); -- 2.43.0

driver inclusion category: bugfix bugzilla: https://gitee.com/openeuler/kernel/issues/IC2NHC CVE: NA ---------------------------------------------------------------------- When funcitons communicate with each other, if the mailbox operation fails, funciton cannot obtain the message from the communication source. If the vf does not receive the message from pf to stop function when reset, it will cause the vf to be unavailable. For the global reset scenario: 1. When vf obtains pf message, if the mailbox fails, it is considered to be a global reset, and stop function directly. When pf sends reset message to vf, if the mailbox fails, it still send interrupt event to vf. 2. Increase the response time of PF waiting for vf. Fixes: 760fe22cf5e9 ("crypto: hisilicon/qm - update reset flow") Signed-off-by: Weili Qian <qianweili@huawei.com> Signed-off-by: JiangShui Yang <yangjiangshui@h-partners.com> --- drivers/crypto/hisilicon/qm.c | 28 +++++++++++++++++++++------- include/linux/hisi_acc_qm.h | 1 + 2 files changed, 22 insertions(+), 7 deletions(-) diff --git a/drivers/crypto/hisilicon/qm.c b/drivers/crypto/hisilicon/qm.c index 3d2cc3f21282..6abccf99c860 100644 --- a/drivers/crypto/hisilicon/qm.c +++ b/drivers/crypto/hisilicon/qm.c @@ -189,8 +189,8 @@ #define QM_IFC_INT_DISABLE BIT(0) #define QM_IFC_INT_STATUS_MASK BIT(0) #define QM_IFC_INT_SET_MASK BIT(0) -#define QM_WAIT_DST_ACK 10 -#define QM_MAX_PF_WAIT_COUNT 10 +#define QM_WAIT_DST_ACK 1000 +#define QM_MAX_PF_WAIT_COUNT 20 #define QM_MAX_VF_WAIT_COUNT 40 #define QM_VF_RESET_WAIT_US 20000 #define QM_VF_RESET_WAIT_CNT 3000 @@ -523,6 +523,10 @@ static bool qm_check_dev_error(struct hisi_qm *qm) struct hisi_qm *pf_qm = pci_get_drvdata(pci_physfn(qm->pdev)); u32 err_status; + if (test_bit(QM_DEVICE_DOWN, &qm->misc_ctl)) + return true; + + /* VF cannot read status register, return false */ if (pf_qm->fun_type == QM_HW_VF) return false; @@ -1721,7 +1725,7 @@ static int qm_ping_all_vfs(struct hisi_qm *qm, enum qm_ifc_cmd cmd) u32 i; ret = qm->ops->set_ifc_begin(qm, cmd, 0, QM_MB_PING_ALL_VFS); - if (ret) { + if (ret && cmd != QM_PF_FLR_PREPARE && cmd != QM_PF_SRST_PREPARE) { dev_err(dev, "failed to send command(0x%x) to all vfs!\n", cmd); qm->ops->set_ifc_end(qm); return ret; @@ -1759,7 +1763,7 @@ static int qm_ping_pf(struct hisi_qm *qm, enum qm_ifc_cmd cmd) int ret; ret = qm->ops->set_ifc_begin(qm, cmd, 0, 0); - if (ret) { + if (ret && (cmd > QM_VF_START_FAIL || cmd < QM_VF_PREPARE_DONE)) { dev_err(&qm->pdev->dev, "failed to send command(0x%x) to PF!\n", cmd); goto unlock; } @@ -1769,8 +1773,10 @@ static int qm_ping_pf(struct hisi_qm *qm, enum qm_ifc_cmd cmd) while (true) { msleep(QM_WAIT_DST_ACK); val = readl(qm->io_base + QM_IFC_INT_SET_V); - if (!(val & QM_IFC_INT_STATUS_MASK)) + if (!(val & QM_IFC_INT_STATUS_MASK)) { + ret = 0; break; + } if (++cnt > QM_MAX_VF_WAIT_COUNT) { ret = -ETIMEDOUT; @@ -5038,6 +5044,7 @@ static void qm_pf_reset_vf_process(struct hisi_qm *qm, if (ret) goto err_get_status; + clear_bit(QM_DEVICE_DOWN, &qm->misc_ctl); qm_pf_reset_vf_done(qm); dev_info(dev, "device reset done.\n"); @@ -5045,6 +5052,7 @@ static void qm_pf_reset_vf_process(struct hisi_qm *qm, return; err_get_status: + clear_bit(QM_DEVICE_DOWN, &qm->misc_ctl); qm_cmd_init(qm); qm_reset_bit_clear(qm); } @@ -5063,8 +5071,13 @@ static void qm_handle_cmd_msg(struct hisi_qm *qm, u32 fun_num) ret = qm->ops->get_ifc(qm, &cmd, &data, fun_num); qm_clear_cmd_interrupt(qm, BIT(fun_num)); if (ret) { - dev_err(dev, "failed to get command from source!\n"); - return; + if (!fun_num) { + cmd = QM_PF_SRST_PREPARE; + dev_err(dev, "failed to get response from PF, suppos it is soft reset!\n"); + } else { + dev_err(dev, "failed to get command from source!\n"); + return; + } } switch (cmd) { @@ -5072,6 +5085,7 @@ static void qm_handle_cmd_msg(struct hisi_qm *qm, u32 fun_num) qm_pf_reset_vf_process(qm, QM_DOWN); break; case QM_PF_SRST_PREPARE: + set_bit(QM_DEVICE_DOWN, &qm->misc_ctl); qm_pf_reset_vf_process(qm, QM_SOFT_RESET); break; case QM_VF_GET_QOS: diff --git a/include/linux/hisi_acc_qm.h b/include/linux/hisi_acc_qm.h index 689582249a90..53c2b7792c6a 100644 --- a/include/linux/hisi_acc_qm.h +++ b/include/linux/hisi_acc_qm.h @@ -152,6 +152,7 @@ enum qm_misc_ctl_bits { QM_RST_SCHED, QM_RESETTING, QM_MODULE_PARAM, + QM_DEVICE_DOWN, }; enum qm_cap_bits { -- 2.43.0

driver inclusion category: bugfix bugzilla: https://gitee.com/openeuler/kernel/issues/IC2NHC CVE: NA ---------------------------------------------------------------------- The malibox needs to be triggered by a 128bit atomic operation. The reason is that one QM hardware entity in one accelerator servers QM mailbox MMIO interfaces in related PF and VFs. A mutex cannot lock mailbox processes in different functions. When multiple functions access the mailbox simultaneously, if the generic IO interface readq/writeq is used to access the mailbox, the data read from mailbox or written to mailbox is unpredictable. Therefore, the generic IO interface is changed to a 128bit atomic operation. Fixes: 263c9959c937 ("crypto: hisilicon - add queue management driver for HiSilicon QM module") Signed-off-by: Weili Qian <qianweili@huawei.com> Signed-off-by: JiangShui Yang <yangjiangshui@h-partners.com> --- drivers/crypto/hisilicon/qm.c | 177 +++++++++++++++++++++++----------- include/linux/hisi_acc_qm.h | 1 - 2 files changed, 120 insertions(+), 58 deletions(-) diff --git a/drivers/crypto/hisilicon/qm.c b/drivers/crypto/hisilicon/qm.c index 6abccf99c860..948c93c91bf7 100644 --- a/drivers/crypto/hisilicon/qm.c +++ b/drivers/crypto/hisilicon/qm.c @@ -32,6 +32,10 @@ /* mailbox */ #define QM_MB_PING_ALL_VFS 0xffff #define QM_MB_STATUS_MASK GENMASK(12, 9) +#define QM_MB_BUSY_MASK BIT(13) +#define QM_MB_SIZE 16 +#define QM_MB_MAX_WAIT_CNT 6000 +#define QM_MB_WAIT_READY_CNT 10 /* sqc shift */ #define QM_SQ_HOP_NUM_SHIFT 0 @@ -202,9 +206,10 @@ #define POLL_PERIOD 10 #define POLL_TIMEOUT 1000 +#define QM_MB_MAX_STOP_CNT 20000 #define WAIT_PERIOD_US_MAX 200 #define WAIT_PERIOD_US_MIN 100 -#define MAX_WAIT_COUNTS 1000 +#define MAX_WAIT_COUNTS 10000 #define QM_CACHE_WB_START 0x204 #define QM_CACHE_WB_DONE 0x208 #define QM_FUNC_CAPS_REG 0x3100 @@ -592,17 +597,6 @@ static void qm_mb_pre_init(struct qm_mailbox *mailbox, u8 cmd, mailbox->rsvd = 0; } -/* return 0 mailbox ready, -ETIMEDOUT hardware timeout */ -int hisi_qm_wait_mb_ready(struct hisi_qm *qm) -{ - u32 val; - - return readl_relaxed_poll_timeout(qm->io_base + QM_MB_CMD_SEND_BASE, - val, !((val >> QM_MB_BUSY_SHIFT) & - 0x1), POLL_PERIOD, POLL_TIMEOUT); -} -EXPORT_SYMBOL_GPL(hisi_qm_wait_mb_ready); - /* 128 bit should be written to hardware at one time to trigger a mailbox */ static void qm_mb_write(struct hisi_qm *qm, const void *src) { @@ -613,7 +607,7 @@ static void qm_mb_write(struct hisi_qm *qm, const void *src) #endif if (!IS_ENABLED(CONFIG_ARM64)) { - memcpy_toio(fun_base, src, 16); + memcpy_toio(fun_base, src, QM_MB_SIZE); dma_wmb(); return; } @@ -630,35 +624,95 @@ static void qm_mb_write(struct hisi_qm *qm, const void *src) #endif } -static int qm_mb_nolock(struct hisi_qm *qm, struct qm_mailbox *mailbox) +/* 128 bit should be read from hardware at one time */ +static void qm_mb_read(struct hisi_qm *qm, void *dst) { - int ret; - u32 val; + const void __iomem *fun_base = qm->io_base + QM_MB_CMD_SEND_BASE; + +#if IS_ENABLED(CONFIG_ARM64) + unsigned long tmp0 = 0, tmp1 = 0; +#endif - if (unlikely(hisi_qm_wait_mb_ready(qm))) { - dev_err(&qm->pdev->dev, "QM mailbox is busy to start!\n"); - ret = -EBUSY; - goto mb_busy; + if (!IS_ENABLED(CONFIG_ARM64)) { + memcpy_fromio(dst, fun_base, QM_MB_SIZE); + dma_wmb(); + return; } - qm_mb_write(qm, mailbox); +#if IS_ENABLED(CONFIG_ARM64) + asm volatile("ldp %0, %1, %3\n" + "stp %0, %1, %2\n" + "dmb oshst\n" + : "=&r" (tmp0), + "=&r" (tmp1), + "+Q" (*((char *)dst)) + : "Q" (*((char __iomem *)fun_base)) + : "memory"); +#endif +} + +int hisi_qm_wait_mb_ready(struct hisi_qm *qm) +{ + struct qm_mailbox mailbox; + int i = 0; + + while (i++ < QM_MB_WAIT_READY_CNT) { + qm_mb_read(qm, &mailbox); + if (!(le16_to_cpu(mailbox.w0) & QM_MB_BUSY_MASK)) + return 0; - if (unlikely(hisi_qm_wait_mb_ready(qm))) { - dev_err(&qm->pdev->dev, "QM mailbox operation timeout!\n"); - ret = -ETIMEDOUT; - goto mb_busy; + usleep_range(WAIT_PERIOD_US_MIN, WAIT_PERIOD_US_MAX); } - val = readl(qm->io_base + QM_MB_CMD_SEND_BASE); - if (val & QM_MB_STATUS_MASK) { - dev_err(&qm->pdev->dev, "QM mailbox operation failed!\n"); - ret = -EIO; - goto mb_busy; + dev_err(&qm->pdev->dev, "QM mailbox is busy to start!\n"); + + return -EBUSY; +} +EXPORT_SYMBOL_GPL(hisi_qm_wait_mb_ready); + +static int qm_wait_mb_finish(struct hisi_qm *qm, struct qm_mailbox *mailbox, u32 wait_cnt) +{ + struct device *dev = &qm->pdev->dev; + u32 i = 0; + + while (++i) { + qm_mb_read(qm, mailbox); + if (!(le16_to_cpu(mailbox->w0) & QM_MB_BUSY_MASK)) + break; + + if (i == wait_cnt) { + dev_err(dev, "QM mailbox operation timeout!\n"); + return -ETIMEDOUT; + } + + usleep_range(WAIT_PERIOD_US_MIN, WAIT_PERIOD_US_MAX); + } + + if (le16_to_cpu(mailbox->w0) & QM_MB_STATUS_MASK) { + dev_err(dev, "QM mailbox operation failed!\n"); + return -EIO; } return 0; +} + +static int qm_mb_nolock(struct hisi_qm *qm, struct qm_mailbox *mailbox, u32 wait_cnt) +{ + int ret; + + ret = hisi_qm_wait_mb_ready(qm); + if (ret) + goto mb_err_cnt_increase; + + qm_mb_write(qm, mailbox); + + ret = qm_wait_mb_finish(qm, mailbox, wait_cnt); + if (ret) + goto mb_err_cnt_increase; -mb_busy: + return 0; + +mb_err_cnt_increase: atomic64_inc(&qm->debug.dfx.mb_err_cnt); return ret; } @@ -667,18 +721,48 @@ int hisi_qm_mb(struct hisi_qm *qm, u8 cmd, dma_addr_t dma_addr, u16 queue, bool op) { struct qm_mailbox mailbox; + u32 wait_cnt; int ret; + if (cmd == QM_MB_CMD_STOP_QP || cmd == QM_MB_CMD_FLUSH_QM) + wait_cnt = QM_MB_MAX_STOP_CNT; + else + wait_cnt = QM_MB_MAX_WAIT_CNT; + + /* No need to judge if master OOO is blocked. */ + if (qm_check_dev_error(qm)) { + dev_err(&qm->pdev->dev, "QM mailbox operation failed since qm is stop!\n"); + return -EIO; + } + qm_mb_pre_init(&mailbox, cmd, dma_addr, queue, op); mutex_lock(&qm->mailbox_lock); - ret = qm_mb_nolock(qm, &mailbox); + ret = qm_mb_nolock(qm, &mailbox, wait_cnt); mutex_unlock(&qm->mailbox_lock); return ret; } EXPORT_SYMBOL_GPL(hisi_qm_mb); +static int hisi_qm_mb_read(struct hisi_qm *qm, u64 *base, u8 cmd, u16 queue) +{ + struct qm_mailbox mailbox; + int ret; + + qm_mb_pre_init(&mailbox, cmd, 0, queue, 1); + mutex_lock(&qm->mailbox_lock); + ret = qm_mb_nolock(qm, &mailbox, QM_MB_MAX_WAIT_CNT); + mutex_unlock(&qm->mailbox_lock); + if (ret) + return ret; + + *base = le32_to_cpu(mailbox.base_l) | + ((u64)le32_to_cpu(mailbox.base_h) << 32); + + return 0; +} + /* op 0: set xqc information to hardware, 1: get xqc information from hardware. */ int qm_set_and_get_xqc(struct hisi_qm *qm, u8 cmd, void *xqc, u32 qp_id, bool op) { @@ -725,7 +809,7 @@ int qm_set_and_get_xqc(struct hisi_qm *qm, u8 cmd, void *xqc, u32 qp_id, bool op memcpy(tmp_xqc, xqc, size); qm_mb_pre_init(&mailbox, cmd, xqc_dma, qp_id, op); - ret = qm_mb_nolock(qm, &mailbox); + ret = qm_mb_nolock(qm, &mailbox, QM_MB_MAX_WAIT_CNT); if (!ret && op) memcpy(xqc, tmp_xqc, size); @@ -1415,12 +1499,10 @@ static int qm_get_vft_v2(struct hisi_qm *qm, u32 *base, u32 *number) u64 sqc_vft; int ret; - ret = hisi_qm_mb(qm, QM_MB_CMD_SQC_VFT_V2, 0, 0, 1); + ret = hisi_qm_mb_read(qm, &sqc_vft, QM_MB_CMD_SQC_VFT_V2, 0); if (ret) return ret; - sqc_vft = readl(qm->io_base + QM_MB_CMD_DATA_ADDR_L) | - ((u64)readl(qm->io_base + QM_MB_CMD_DATA_ADDR_H) << 32); *base = QM_SQC_VFT_BASE_MASK_V2 & (sqc_vft >> QM_SQC_VFT_BASE_SHIFT_V2); *number = (QM_SQC_VFT_NUM_MASK_V2 & (sqc_vft >> QM_SQC_VFT_NUM_SHIFT_V2)) + 1; @@ -1560,25 +1642,6 @@ static enum acc_err_result qm_hw_error_handle_v2(struct hisi_qm *qm) return ACC_ERR_RECOVERED; } -static int qm_get_mb_cmd(struct hisi_qm *qm, u64 *msg, u16 fun_num) -{ - struct qm_mailbox mailbox; - int ret; - - qm_mb_pre_init(&mailbox, QM_MB_CMD_DST, 0, fun_num, 0); - mutex_lock(&qm->mailbox_lock); - ret = qm_mb_nolock(qm, &mailbox); - if (ret) - goto err_unlock; - - *msg = readl(qm->io_base + QM_MB_CMD_DATA_ADDR_L) | - ((u64)readl(qm->io_base + QM_MB_CMD_DATA_ADDR_H) << 32); - -err_unlock: - mutex_unlock(&qm->mailbox_lock); - return ret; -} - static void qm_clear_cmd_interrupt(struct hisi_qm *qm, u64 vf_mask) { u32 val; @@ -1896,7 +1959,7 @@ static int qm_set_ifc_begin_v3(struct hisi_qm *qm, enum qm_ifc_cmd cmd, u32 data qm_mb_pre_init(&mailbox, QM_MB_CMD_SRC, msg, fun_num, 0); mutex_lock(&qm->mailbox_lock); - return qm_mb_nolock(qm, &mailbox); + return qm_mb_nolock(qm, &mailbox, QM_MB_MAX_WAIT_CNT); } static void qm_set_ifc_end_v3(struct hisi_qm *qm) @@ -1909,7 +1972,7 @@ static int qm_get_ifc_v3(struct hisi_qm *qm, enum qm_ifc_cmd *cmd, u32 *data, u3 u64 msg; int ret; - ret = qm_get_mb_cmd(qm, &msg, fun_num); + ret = hisi_qm_mb_read(qm, &msg, QM_MB_CMD_DST, fun_num); if (ret) return ret; diff --git a/include/linux/hisi_acc_qm.h b/include/linux/hisi_acc_qm.h index 53c2b7792c6a..6d1b434d157b 100644 --- a/include/linux/hisi_acc_qm.h +++ b/include/linux/hisi_acc_qm.h @@ -54,7 +54,6 @@ #define QM_MB_OP_SHIFT 14 #define QM_MB_CMD_DATA_ADDR_L 0x304 #define QM_MB_CMD_DATA_ADDR_H 0x308 -#define QM_MB_MAX_WAIT_CNT 6000 /* doorbell */ #define QM_DOORBELL_CMD_SQ 0 -- 2.43.0

driver inclusion category: bugfix bugzilla: https://gitee.com/openeuler/kernel/issues/IC2NHC CVE: NA ---------------------------------------------------------------------- The mailbox configuration(128 bits) needs to be obtained from the hardware at one time. If the mailbox configuration is obtained for multiple times, the read value may be incorrect. Use the instruction to read mailbox data instead of readl(). Fixes: 263c9959c937 ("crypto: hisilicon - add queue management driver for HiSilicon QM module") Signed-off-by: Weili Qian <qianweili@huawei.com> Signed-off-by: JiangShui Yang <yangjiangshui@h-partners.com> --- drivers/crypto/hisilicon/qm.c | 15 ++++--- .../vfio/pci/hisilicon/hisi_acc_vfio_pci.c | 45 +++---------------- include/linux/hisi_acc_qm.h | 8 ++-- 3 files changed, 17 insertions(+), 51 deletions(-) diff --git a/drivers/crypto/hisilicon/qm.c b/drivers/crypto/hisilicon/qm.c index 948c93c91bf7..db3e62b884df 100644 --- a/drivers/crypto/hisilicon/qm.c +++ b/drivers/crypto/hisilicon/qm.c @@ -717,7 +717,7 @@ static int qm_mb_nolock(struct hisi_qm *qm, struct qm_mailbox *mailbox, u32 wait return ret; } -int hisi_qm_mb(struct hisi_qm *qm, u8 cmd, dma_addr_t dma_addr, u16 queue, +int hisi_qm_mb_write(struct hisi_qm *qm, u8 cmd, dma_addr_t dma_addr, u16 queue, bool op) { struct qm_mailbox mailbox; @@ -743,9 +743,9 @@ int hisi_qm_mb(struct hisi_qm *qm, u8 cmd, dma_addr_t dma_addr, u16 queue, return ret; } -EXPORT_SYMBOL_GPL(hisi_qm_mb); +EXPORT_SYMBOL_GPL(hisi_qm_mb_write); -static int hisi_qm_mb_read(struct hisi_qm *qm, u64 *base, u8 cmd, u16 queue) +int hisi_qm_mb_read(struct hisi_qm *qm, u64 *base, u8 cmd, u16 queue) { struct qm_mailbox mailbox; int ret; @@ -762,6 +762,7 @@ static int hisi_qm_mb_read(struct hisi_qm *qm, u64 *base, u8 cmd, u16 queue) return 0; } +EXPORT_SYMBOL_GPL(hisi_qm_mb_read); /* op 0: set xqc information to hardware, 1: get xqc information from hardware. */ int qm_set_and_get_xqc(struct hisi_qm *qm, u8 cmd, void *xqc, u32 qp_id, bool op) @@ -1855,12 +1856,12 @@ static int qm_ping_pf(struct hisi_qm *qm, enum qm_ifc_cmd cmd) static int qm_drain_qm(struct hisi_qm *qm) { - return hisi_qm_mb(qm, QM_MB_CMD_FLUSH_QM, 0, 0, 0); + return hisi_qm_mb_write(qm, QM_MB_CMD_FLUSH_QM, 0, 0, 0); } static int qm_stop_qp(struct hisi_qp *qp) { - return hisi_qm_mb(qp->qm, QM_MB_CMD_STOP_QP, 0, qp->qp_id, 0); + return hisi_qm_mb_write(qp->qm, QM_MB_CMD_STOP_QP, 0, qp->qp_id, 0); } static int qm_set_msi(struct hisi_qm *qm, bool set) @@ -3371,11 +3372,11 @@ static int __hisi_qm_start(struct hisi_qm *qm) if (ret) return ret; - ret = hisi_qm_mb(qm, QM_MB_CMD_SQC_BT, qm->sqc_dma, 0, 0); + ret = hisi_qm_mb_write(qm, QM_MB_CMD_SQC_BT, qm->sqc_dma, 0, 0); if (ret) return ret; - ret = hisi_qm_mb(qm, QM_MB_CMD_CQC_BT, qm->cqc_dma, 0, 0); + ret = hisi_qm_mb_write(qm, QM_MB_CMD_CQC_BT, qm->cqc_dma, 0, 0); if (ret) return ret; diff --git a/drivers/vfio/pci/hisilicon/hisi_acc_vfio_pci.c b/drivers/vfio/pci/hisilicon/hisi_acc_vfio_pci.c index 103d93f9602a..dabb4bacedc9 100644 --- a/drivers/vfio/pci/hisilicon/hisi_acc_vfio_pci.c +++ b/drivers/vfio/pci/hisilicon/hisi_acc_vfio_pci.c @@ -81,13 +81,10 @@ static int qm_get_vft(struct hisi_qm *qm, u32 *base) u32 qp_num; int ret; - ret = hisi_qm_mb(qm, QM_MB_CMD_SQC_VFT_V2, 0, 0, 1); + ret = hisi_qm_mb_read(qm, &sqc_vft, QM_MB_CMD_SQC_VFT_V2, 0); if (ret) return ret; - sqc_vft = readl(qm->io_base + QM_MB_CMD_DATA_ADDR_L) | - ((u64)readl(qm->io_base + QM_MB_CMD_DATA_ADDR_H) << - QM_XQC_ADDR_OFFSET); *base = QM_SQC_VFT_BASE_MASK_V2 & (sqc_vft >> QM_SQC_VFT_BASE_SHIFT_V2); qp_num = (QM_SQC_VFT_NUM_MASK_V2 & (sqc_vft >> QM_SQC_VFT_NUM_SHIFT_V2)) + 1; @@ -95,36 +92,6 @@ static int qm_get_vft(struct hisi_qm *qm, u32 *base) return qp_num; } -static int qm_get_sqc(struct hisi_qm *qm, u64 *addr) -{ - int ret; - - ret = hisi_qm_mb(qm, QM_MB_CMD_SQC_BT, 0, 0, 1); - if (ret) - return ret; - - *addr = readl(qm->io_base + QM_MB_CMD_DATA_ADDR_L) | - ((u64)readl(qm->io_base + QM_MB_CMD_DATA_ADDR_H) << - QM_XQC_ADDR_OFFSET); - - return 0; -} - -static int qm_get_cqc(struct hisi_qm *qm, u64 *addr) -{ - int ret; - - ret = hisi_qm_mb(qm, QM_MB_CMD_CQC_BT, 0, 0, 1); - if (ret) - return ret; - - *addr = readl(qm->io_base + QM_MB_CMD_DATA_ADDR_L) | - ((u64)readl(qm->io_base + QM_MB_CMD_DATA_ADDR_H) << - QM_XQC_ADDR_OFFSET); - - return 0; -} - static int qm_get_xqc_regs(struct hisi_acc_vf_core_device *hisi_acc_vdev, struct acc_vf_data *vf_data) { @@ -385,7 +352,7 @@ static void vf_qm_fun_reset(struct hisi_qm *qm) static int vf_qm_func_stop(struct hisi_qm *qm) { - return hisi_qm_mb(qm, QM_MB_CMD_PAUSE_QM, 0, 0, 0); + return hisi_qm_mb_write(qm, QM_MB_CMD_PAUSE_QM, 0, 0, 0); } static int vf_qm_version_check(struct acc_vf_data *vf_data, struct device *dev) @@ -561,13 +528,13 @@ static int vf_qm_load_data(struct hisi_acc_vf_core_device *hisi_acc_vdev, if (ret) return ret; - ret = hisi_qm_mb(qm, QM_MB_CMD_SQC_BT, qm->sqc_dma, 0, 0); + ret = hisi_qm_mb_write(qm, QM_MB_CMD_SQC_BT, qm->sqc_dma, 0, 0); if (ret) { dev_err(dev, "set sqc failed\n"); return ret; } - ret = hisi_qm_mb(qm, QM_MB_CMD_CQC_BT, qm->cqc_dma, 0, 0); + ret = hisi_qm_mb_write(qm, QM_MB_CMD_CQC_BT, qm->cqc_dma, 0, 0); if (ret) { dev_err(dev, "set cqc failed\n"); return ret; @@ -595,13 +562,13 @@ static int vf_qm_read_data(struct hisi_qm *vf_qm, struct acc_vf_data *vf_data) vf_data->aeqe_dma |= vf_data->qm_aeqc_dw[QM_XQC_ADDR_LOW]; /* Through SQC_BT/CQC_BT to get sqc and cqc address */ - ret = qm_get_sqc(vf_qm, &vf_data->sqc_dma); + ret = hisi_qm_mb_read(vf_qm, &vf_data->sqc_dma, QM_MB_CMD_SQC_BT, 0); if (ret) { dev_err(dev, "failed to read SQC addr!\n"); return ret; } - ret = qm_get_cqc(vf_qm, &vf_data->cqc_dma); + ret = hisi_qm_mb_read(vf_qm, &vf_data->cqc_dma, QM_MB_CMD_CQC_BT, 0); if (ret) { dev_err(dev, "failed to read CQC addr!\n"); return ret; diff --git a/include/linux/hisi_acc_qm.h b/include/linux/hisi_acc_qm.h index 6d1b434d157b..e114fcce8aef 100644 --- a/include/linux/hisi_acc_qm.h +++ b/include/linux/hisi_acc_qm.h @@ -52,8 +52,6 @@ #define QM_MB_EVENT_SHIFT 8 #define QM_MB_BUSY_SHIFT 13 #define QM_MB_OP_SHIFT 14 -#define QM_MB_CMD_DATA_ADDR_L 0x304 -#define QM_MB_CMD_DATA_ADDR_H 0x308 /* doorbell */ #define QM_DOORBELL_CMD_SQ 0 @@ -559,9 +557,9 @@ void hisi_qm_reset_prepare(struct pci_dev *pdev); void hisi_qm_reset_done(struct pci_dev *pdev); int hisi_qm_wait_mb_ready(struct hisi_qm *qm); -int hisi_qm_mb(struct hisi_qm *qm, u8 cmd, dma_addr_t dma_addr, u16 queue, - bool op); - +int hisi_qm_mb_write(struct hisi_qm *qm, u8 cmd, dma_addr_t dma_addr, + u16 queue, bool op); +int hisi_qm_mb_read(struct hisi_qm *qm, u64 *msg, u8 cmd, u16 queue); struct hisi_acc_sgl_pool; struct hisi_acc_hw_sgl *hisi_acc_sg_buf_map_to_hw_sgl(struct device *dev, struct scatterlist *sgl, struct hisi_acc_sgl_pool *pool, -- 2.43.0

driver inclusion category: bugfix bugzilla: https://gitee.com/openeuler/kernel/issues/IC2NHC CVE: NA ---------------------------------------------------------------------- Before function level reset, driver first disable device error report and then waits for the device reset to complete. However, when the error is recovered, the error bits will be enabled again, resulting in invalid disable. It is modified to detect that there is no error before disable error report, and then do FLR. Fixes: 7ce396fa12a9 ("crypto: hisilicon - add FLR support") Signed-off-by: Weili Qian <qianweili@huawei.com> Signed-off-by: JiangShui Yang <yangjiangshui@h-partners.com> --- drivers/crypto/hisilicon/qm.c | 48 ++++++++++++++++++++++------------- 1 file changed, 31 insertions(+), 17 deletions(-) diff --git a/drivers/crypto/hisilicon/qm.c b/drivers/crypto/hisilicon/qm.c index db3e62b884df..36b23e3b6c46 100644 --- a/drivers/crypto/hisilicon/qm.c +++ b/drivers/crypto/hisilicon/qm.c @@ -4849,22 +4849,30 @@ void hisi_qm_reset_prepare(struct pci_dev *pdev) u32 delay = 0; int ret; - qm_dev_err_uninit(pf_qm); - - /* - * Check whether there is an ECC mbit error, If it occurs, need to - * wait for soft reset to fix it. - */ - while (qm_check_dev_error(qm)) { - msleep(++delay); - if (delay > QM_RESET_WAIT_TIMEOUT) + while (true) { + ret = qm_reset_prepare_ready(qm); + if (ret) { + pci_err(pdev, "FLR not ready!\n"); return; - } + } - ret = qm_reset_prepare_ready(qm); - if (ret) { - pci_err(pdev, "FLR not ready!\n"); - return; + qm_dev_err_uninit(pf_qm); + /* + * Check whether there is an ECC mbit error, + * If it occurs, need to wait for soft reset + * to fix it. + */ + if (qm_check_dev_error(qm)) { + qm_reset_bit_clear(qm); + if (delay > QM_RESET_WAIT_TIMEOUT) { + pci_err(pdev, "the hardware error was not recovered!\n"); + return; + } + + msleep(++delay); + } else { + break; + } } /* PF obtains the information of VF by querying the register. */ @@ -4878,9 +4886,7 @@ void hisi_qm_reset_prepare(struct pci_dev *pdev) ret = hisi_qm_stop(qm, QM_DOWN); if (ret) { pci_err(pdev, "Failed to stop QM, ret = %d.\n", ret); - hisi_qm_set_hw_reset(qm, QM_RESET_STOP_TX_OFFSET); - hisi_qm_set_hw_reset(qm, QM_RESET_STOP_RX_OFFSET); - return; + goto err_prepare; } ret = qm_wait_vf_prepare_finish(qm); @@ -4888,6 +4894,14 @@ void hisi_qm_reset_prepare(struct pci_dev *pdev) pci_err(pdev, "failed to stop by vfs in FLR!\n"); pci_info(pdev, "FLR resetting...\n"); + return; + +err_prepare: + pci_info(pdev, "FLR resetting prepare failed!\n"); + hisi_qm_set_hw_reset(qm, QM_RESET_STOP_TX_OFFSET); + hisi_qm_set_hw_reset(qm, QM_RESET_STOP_RX_OFFSET); + atomic_set(&qm->status.flags, QM_STOP); + hisi_qm_cache_wb(qm); } EXPORT_SYMBOL_GPL(hisi_qm_reset_prepare); -- 2.43.0
participants (1)
-
Weili Qian