The mailbox of the Kunpeng accelerator is a special operation. Data(128 bits) needs to be read from hardware or written to hardware at one time. And the operation cannot be canceled by software. Therefore, the software process needs to be modified to avoid mailbox operation errors.
Weili Qian (4): crypto: hisilicon/qm - obtain the mailbox configuration at one time vfio/migration: obtain the mailbox configuration at one time crypto: hisilicon/qm - fix the pf2vf timeout when device reset crypto: hisilicon/qm - alloc buffer to set and get xqc
drivers/crypto/hisilicon/debugfs.c | 75 +-- .../hisilicon/migration/acc_vf_migration.c | 176 +++--- .../hisilicon/migration/acc_vf_migration.h | 8 +- drivers/crypto/hisilicon/qm.c | 532 +++++++++--------- drivers/crypto/hisilicon/qm_common.h | 5 +- include/linux/hisi_acc_qm.h | 18 +- 6 files changed, 429 insertions(+), 385 deletions(-)
driver inclusion category: bugfix bugzilla: https://gitee.com/openeuler/kernel/issues/I7BJW9 CVE: NA
----------------------------------------------------------------------
The mailbox configuration(128 bits) needs to be obtained from the hardware at one time. If the mailbox configuration is obtained for multiple times, the read value may be incorrect. Use the instruction to read mailbox data instead of readl().
Signed-off-by: Weili Qian qianweili@huawei.com Signed-off-by: JiangShui Yang yangjiangshui@h-partners.com --- drivers/crypto/hisilicon/debugfs.c | 6 +- drivers/crypto/hisilicon/qm.c | 172 ++++++++++++++++----------- drivers/crypto/hisilicon/qm_common.h | 1 + include/linux/hisi_acc_qm.h | 5 - 4 files changed, 109 insertions(+), 75 deletions(-)
diff --git a/drivers/crypto/hisilicon/debugfs.c b/drivers/crypto/hisilicon/debugfs.c index 318c8ac1ba5e..3875b5b06ec4 100644 --- a/drivers/crypto/hisilicon/debugfs.c +++ b/drivers/crypto/hisilicon/debugfs.c @@ -155,7 +155,7 @@ static int qm_sqc_dump(struct hisi_qm *qm, char *s, char *name) if (IS_ERR(sqc)) return PTR_ERR(sqc);
- ret = hisi_qm_mb(qm, QM_MB_CMD_SQC, sqc_dma, qp_id, 1); + ret = hisi_qm_mb_write(qm, QM_MB_CMD_SQC, sqc_dma, qp_id, 1); if (ret) { down_read(&qm->qps_lock); if (qm->sqc) { @@ -196,7 +196,7 @@ static int qm_cqc_dump(struct hisi_qm *qm, char *s, char *name) if (IS_ERR(cqc)) return PTR_ERR(cqc);
- ret = hisi_qm_mb(qm, QM_MB_CMD_CQC, cqc_dma, qp_id, 1); + ret = hisi_qm_mb_write(qm, QM_MB_CMD_CQC, cqc_dma, qp_id, 1); if (ret) { down_read(&qm->qps_lock); if (qm->cqc) { @@ -242,7 +242,7 @@ static int qm_eqc_aeqc_dump(struct hisi_qm *qm, char *s, char *name) if (IS_ERR(xeqc)) return PTR_ERR(xeqc);
- ret = hisi_qm_mb(qm, cmd, xeqc_dma, 0, 1); + ret = hisi_qm_mb_write(qm, cmd, xeqc_dma, 0, 1); if (ret) goto err_free_ctx;
diff --git a/drivers/crypto/hisilicon/qm.c b/drivers/crypto/hisilicon/qm.c index 443d1edc0de6..c5eaa8877e74 100644 --- a/drivers/crypto/hisilicon/qm.c +++ b/drivers/crypto/hisilicon/qm.c @@ -33,6 +33,8 @@ #define QM_MB_CMD_DATA_SHIFT 32 #define QM_MB_CMD_DATA_MASK GENMASK(31, 0) #define QM_MB_STATUS_MASK GENMASK(12, 9) +#define QM_MB_MAX_WAIT_CNT 6000 +#define QM_MB_WAIT_READY_CNT 10
/* sqc shift */ #define QM_SQ_HOP_NUM_SHIFT 0 @@ -594,17 +596,6 @@ static void qm_mb_pre_init(struct qm_mailbox *mailbox, u8 cmd, mailbox->rsvd = 0; }
-/* return 0 mailbox ready, -ETIMEDOUT hardware timeout */ -int hisi_qm_wait_mb_ready(struct hisi_qm *qm) -{ - u32 val; - - return readl_relaxed_poll_timeout(qm->io_base + QM_MB_CMD_SEND_BASE, - val, !((val >> QM_MB_BUSY_SHIFT) & - 0x1), POLL_PERIOD, POLL_TIMEOUT); -} -EXPORT_SYMBOL_GPL(hisi_qm_wait_mb_ready); - /* 128 bit should be written to hardware at one time to trigger a mailbox */ static void qm_mb_write(struct hisi_qm *qm, const void *src) { @@ -627,57 +618,125 @@ static void qm_mb_write(struct hisi_qm *qm, const void *src) : "memory"); }
-static int qm_mb_nolock(struct hisi_qm *qm, struct qm_mailbox *mailbox) +/* 128 bit should be read from hardware at one time */ +static void qm_mb_read(struct hisi_qm *qm, void *dst) { - int ret; - u32 val; + const void __iomem *fun_base = qm->io_base + QM_MB_CMD_SEND_BASE; + unsigned long tmp0 = 0, tmp1 = 0;
- if (unlikely(hisi_qm_wait_mb_ready(qm))) { - dev_err(&qm->pdev->dev, "QM mailbox is busy to start!\n"); - ret = -EBUSY; - goto mb_busy; + if (!IS_ENABLED(CONFIG_ARM64)) { + memcpy_fromio(dst, fun_base, 16); + dma_wmb(); + return; }
- qm_mb_write(qm, mailbox); + asm volatile("ldp %0, %1, %3\n" + "stp %0, %1, %2\n" + "dmb oshst\n" + : "=&r" (tmp0), + "=&r" (tmp1), + "+Q" (*((char *)dst)) + : "Q" (*((char __iomem *)fun_base)) + : "memory"); +}
- if (unlikely(hisi_qm_wait_mb_ready(qm))) { - dev_err(&qm->pdev->dev, "QM mailbox operation timeout!\n"); - ret = -ETIMEDOUT; - goto mb_busy; +static int qm_wait_mb_ready(struct hisi_qm *qm) +{ + struct qm_mailbox mailbox; + int i = 0; + + while (i++ < QM_MB_WAIT_READY_CNT) { + qm_mb_read(qm, &mailbox); + if (!((le16_to_cpu(mailbox.w0) >> QM_MB_BUSY_SHIFT) & 0x1)) + return 0; + + usleep_range(WAIT_PERIOD_US_MIN, WAIT_PERIOD_US_MAX); }
- val = readl(qm->io_base + QM_MB_CMD_SEND_BASE); - if (val & QM_MB_STATUS_MASK) { + dev_err(&qm->pdev->dev, "QM mailbox is busy to start!\n"); + + return -EBUSY; +} + +static int qm_wait_mb_finish(struct hisi_qm *qm, struct qm_mailbox *mailbox) +{ + int i = 0; + + while (i++) { + qm_mb_read(qm, mailbox); + if (!((le16_to_cpu(mailbox->w0) >> QM_MB_BUSY_SHIFT) & 0x1)) + break; + + if (i == QM_MB_MAX_WAIT_CNT) { + dev_err(&qm->pdev->dev, "QM mailbox operation timeout!\n"); + return -ETIMEDOUT; + } + + usleep_range(WAIT_PERIOD_US_MIN, WAIT_PERIOD_US_MAX); + } + + if (le16_to_cpu(mailbox->w0) & QM_MB_STATUS_MASK) { dev_err(&qm->pdev->dev, "QM mailbox operation failed!\n"); - ret = -EIO; - goto mb_busy; + return -EIO; }
return 0; +}
-mb_busy: +static int qm_mb_nolock(struct hisi_qm *qm, struct qm_mailbox *mailbox) +{ + int ret; + + ret = qm_wait_mb_ready(qm); + if (ret) + goto mb_err_cnt_increase; + + qm_mb_write(qm, mailbox); + + ret = qm_wait_mb_finish(qm, mailbox); + if (ret) + goto mb_err_cnt_increase; + + return 0; + +mb_err_cnt_increase: atomic64_inc(&qm->debug.dfx.mb_err_cnt); return ret; }
-int hisi_qm_mb(struct hisi_qm *qm, u8 cmd, dma_addr_t dma_addr, u16 queue, - bool op) +int hisi_qm_mb_write(struct hisi_qm *qm, u8 cmd, dma_addr_t dma_addr, u16 queue, bool op) { struct qm_mailbox mailbox; int ret;
- dev_dbg(&qm->pdev->dev, "QM mailbox request to q%u: %u-%llx\n", + dev_dbg(&qm->pdev->dev, "QM mailbox request to q%u: %u-0x%llx\n", queue, cmd, (unsigned long long)dma_addr);
qm_mb_pre_init(&mailbox, cmd, dma_addr, queue, op); - mutex_lock(&qm->mailbox_lock); ret = qm_mb_nolock(qm, &mailbox); mutex_unlock(&qm->mailbox_lock);
return ret; } -EXPORT_SYMBOL_GPL(hisi_qm_mb); + +static int hisi_qm_mb_read(struct hisi_qm *qm, u64 *base, u8 cmd, u16 queue) +{ + struct qm_mailbox mailbox; + int ret; + + qm_mb_pre_init(&mailbox, cmd, 0, queue, 1); + mutex_lock(&qm->mailbox_lock); + ret = qm_mb_nolock(qm, &mailbox); + mutex_unlock(&qm->mailbox_lock); + if (ret) + return ret; + + *base = le32_to_cpu(mailbox.base_l) | + ((u64)le32_to_cpu(mailbox.base_h) << 32); + + return 0; +}
static void qm_db_v1(struct hisi_qm *qm, u16 qn, u8 cmd, u16 index, u8 priority) { @@ -1324,12 +1383,10 @@ static int qm_get_vft_v2(struct hisi_qm *qm, u32 *base, u32 *number) u64 sqc_vft; int ret;
- ret = hisi_qm_mb(qm, QM_MB_CMD_SQC_VFT_V2, 0, 0, 1); + ret = hisi_qm_mb_read(qm, &sqc_vft, QM_MB_CMD_SQC_VFT_V2, 0); if (ret) return ret;
- sqc_vft = readl(qm->io_base + QM_MB_CMD_DATA_ADDR_L) | - ((u64)readl(qm->io_base + QM_MB_CMD_DATA_ADDR_H) << 32); *base = QM_SQC_VFT_BASE_MASK_V2 & (sqc_vft >> QM_SQC_VFT_BASE_SHIFT_V2); *number = (QM_SQC_VFT_NUM_MASK_V2 & (sqc_vft >> QM_SQC_VFT_NUM_SHIFT_V2)) + 1; @@ -1368,12 +1425,12 @@ void hisi_qm_ctx_free(struct hisi_qm *qm, size_t ctx_size,
static int qm_dump_sqc_raw(struct hisi_qm *qm, dma_addr_t dma_addr, u16 qp_id) { - return hisi_qm_mb(qm, QM_MB_CMD_SQC, dma_addr, qp_id, 1); + return hisi_qm_mb_write(qm, QM_MB_CMD_SQC, dma_addr, qp_id, 1); }
static int qm_dump_cqc_raw(struct hisi_qm *qm, dma_addr_t dma_addr, u16 qp_id) { - return hisi_qm_mb(qm, QM_MB_CMD_CQC, dma_addr, qp_id, 1); + return hisi_qm_mb_write(qm, QM_MB_CMD_CQC, dma_addr, qp_id, 1); }
static void qm_hw_error_init_v1(struct hisi_qm *qm) @@ -1500,25 +1557,6 @@ static enum acc_err_result qm_hw_error_handle_v2(struct hisi_qm *qm) return ACC_ERR_RECOVERED; }
-static int qm_get_mb_cmd(struct hisi_qm *qm, u64 *msg, u16 fun_num) -{ - struct qm_mailbox mailbox; - int ret; - - qm_mb_pre_init(&mailbox, QM_MB_CMD_DST, 0, fun_num, 0); - mutex_lock(&qm->mailbox_lock); - ret = qm_mb_nolock(qm, &mailbox); - if (ret) - goto err_unlock; - - *msg = readl(qm->io_base + QM_MB_CMD_DATA_ADDR_L) | - ((u64)readl(qm->io_base + QM_MB_CMD_DATA_ADDR_H) << 32); - -err_unlock: - mutex_unlock(&qm->mailbox_lock); - return ret; -} - static void qm_clear_cmd_interrupt(struct hisi_qm *qm, u64 vf_mask) { u32 val; @@ -1538,7 +1576,7 @@ static void qm_handle_vf_msg(struct hisi_qm *qm, u32 vf_id) u64 msg; int ret;
- ret = qm_get_mb_cmd(qm, &msg, vf_id); + ret = hisi_qm_mb_read(qm, &msg, QM_MB_CMD_DST, vf_id); if (ret) { dev_err(dev, "failed to get msg from VF(%u)!\n", vf_id); return; @@ -1741,7 +1779,7 @@ static int qm_ping_pf(struct hisi_qm *qm, u64 cmd)
static int qm_stop_qp(struct hisi_qp *qp) { - return hisi_qm_mb(qp->qm, QM_MB_CMD_STOP_QP, 0, qp->qp_id, 0); + return hisi_qm_mb_write(qp->qm, QM_MB_CMD_STOP_QP, 0, qp->qp_id, 0); }
static int qm_set_msi(struct hisi_qm *qm, bool set) @@ -2000,7 +2038,7 @@ static int qm_sq_ctx_cfg(struct hisi_qp *qp, int qp_id, u32 pasid) return -ENOMEM; }
- ret = hisi_qm_mb(qm, QM_MB_CMD_SQC, sqc_dma, qp_id, 0); + ret = hisi_qm_mb_write(qm, QM_MB_CMD_SQC, sqc_dma, qp_id, 0); dma_unmap_single(dev, sqc_dma, sizeof(struct qm_sqc), DMA_TO_DEVICE); kfree(sqc);
@@ -2041,7 +2079,7 @@ static int qm_cq_ctx_cfg(struct hisi_qp *qp, int qp_id, u32 pasid) return -ENOMEM; }
- ret = hisi_qm_mb(qm, QM_MB_CMD_CQC, cqc_dma, qp_id, 0); + ret = hisi_qm_mb_write(qm, QM_MB_CMD_CQC, cqc_dma, qp_id, 0); dma_unmap_single(dev, cqc_dma, sizeof(struct qm_cqc), DMA_TO_DEVICE); kfree(cqc);
@@ -3178,7 +3216,7 @@ static int qm_eq_ctx_cfg(struct hisi_qm *qm) return -ENOMEM; }
- ret = hisi_qm_mb(qm, QM_MB_CMD_EQC, eqc_dma, 0, 0); + ret = hisi_qm_mb_write(qm, QM_MB_CMD_EQC, eqc_dma, 0, 0); dma_unmap_single(dev, eqc_dma, sizeof(struct qm_eqc), DMA_TO_DEVICE); kfree(eqc);
@@ -3207,7 +3245,7 @@ static int qm_aeq_ctx_cfg(struct hisi_qm *qm) return -ENOMEM; }
- ret = hisi_qm_mb(qm, QM_MB_CMD_AEQC, aeqc_dma, 0, 0); + ret = hisi_qm_mb_write(qm, QM_MB_CMD_AEQC, aeqc_dma, 0, 0); dma_unmap_single(dev, aeqc_dma, sizeof(struct qm_aeqc), DMA_TO_DEVICE); kfree(aeqc);
@@ -3246,11 +3284,11 @@ static int __hisi_qm_start(struct hisi_qm *qm) if (ret) return ret;
- ret = hisi_qm_mb(qm, QM_MB_CMD_SQC_BT, qm->sqc_dma, 0, 0); + ret = hisi_qm_mb_write(qm, QM_MB_CMD_SQC_BT, qm->sqc_dma, 0, 0); if (ret) return ret;
- ret = hisi_qm_mb(qm, QM_MB_CMD_CQC_BT, qm->cqc_dma, 0, 0); + ret = hisi_qm_mb_write(qm, QM_MB_CMD_CQC_BT, qm->cqc_dma, 0, 0); if (ret) return ret;
@@ -4899,7 +4937,7 @@ static int qm_wait_pf_reset_finish(struct hisi_qm *qm) * Whether message is got successfully, * VF needs to ack PF by clearing the interrupt. */ - ret = qm_get_mb_cmd(qm, &msg, 0); + ret = hisi_qm_mb_read(qm, &msg, QM_MB_CMD_DST, 0); qm_clear_cmd_interrupt(qm, 0); if (ret) { dev_err(dev, "failed to get msg from PF in reset done!\n"); @@ -4953,7 +4991,7 @@ static void qm_handle_cmd_msg(struct hisi_qm *qm, u32 fun_num) * Get the msg from source by sending mailbox. Whether message is got * successfully, destination needs to ack source by clearing the interrupt. */ - ret = qm_get_mb_cmd(qm, &msg, fun_num); + ret = hisi_qm_mb_read(qm, &msg, QM_MB_CMD_DST, fun_num); qm_clear_cmd_interrupt(qm, BIT(fun_num)); if (ret) { dev_err(dev, "failed to get msg from source!\n"); diff --git a/drivers/crypto/hisilicon/qm_common.h b/drivers/crypto/hisilicon/qm_common.h index 8e36aa9c681b..ab9d8150b3b6 100644 --- a/drivers/crypto/hisilicon/qm_common.h +++ b/drivers/crypto/hisilicon/qm_common.h @@ -82,5 +82,6 @@ void hisi_qm_ctx_free(struct hisi_qm *qm, size_t ctx_size, const void *ctx_addr, dma_addr_t *dma_addr); void hisi_qm_show_last_dfx_regs(struct hisi_qm *qm); void hisi_qm_set_algqos_init(struct hisi_qm *qm); +int hisi_qm_mb_write(struct hisi_qm *qm, u8 cmd, dma_addr_t dma_addr, u16 queue, bool op);
#endif diff --git a/include/linux/hisi_acc_qm.h b/include/linux/hisi_acc_qm.h index b434e4f6111c..4a2fa879fa2c 100644 --- a/include/linux/hisi_acc_qm.h +++ b/include/linux/hisi_acc_qm.h @@ -53,7 +53,6 @@ #define QM_MB_OP_SHIFT 14 #define QM_MB_CMD_DATA_ADDR_L 0x304 #define QM_MB_CMD_DATA_ADDR_H 0x308 -#define QM_MB_MAX_WAIT_CNT 6000
/* doorbell */ #define QM_DOORBELL_CMD_SQ 0 @@ -525,10 +524,6 @@ pci_ers_result_t hisi_qm_dev_slot_reset(struct pci_dev *pdev); void hisi_qm_reset_prepare(struct pci_dev *pdev); void hisi_qm_reset_done(struct pci_dev *pdev);
-int hisi_qm_wait_mb_ready(struct hisi_qm *qm); -int hisi_qm_mb(struct hisi_qm *qm, u8 cmd, dma_addr_t dma_addr, u16 queue, - bool op); - struct hisi_acc_sgl_pool; struct hisi_acc_hw_sgl *hisi_acc_sg_buf_map_to_hw_sgl(struct device *dev, struct scatterlist *sgl, struct hisi_acc_sgl_pool *pool,
driver inclusion category: bugfix bugzilla: https://gitee.com/openeuler/kernel/issues/I7BJW9 CVE: NA
----------------------------------------------------------------------
The mailbox configuration(128 bits) needs to be obtained from the hardware at one time. If the mailbox configuration is obtained for multiple times, the read value may be incorrect. Use the instruction to read mailbox data instead of readl().
Signed-off-by: Weili Qian qianweili@huawei.com Signed-off-by: JiangShui Yang yangjiangshui@h-partners.com --- .../hisilicon/migration/acc_vf_migration.c | 176 ++++++++++-------- .../hisilicon/migration/acc_vf_migration.h | 8 +- 2 files changed, 107 insertions(+), 77 deletions(-)
diff --git a/drivers/crypto/hisilicon/migration/acc_vf_migration.c b/drivers/crypto/hisilicon/migration/acc_vf_migration.c index 7c5fc4eb02ac..54c02c44331f 100644 --- a/drivers/crypto/hisilicon/migration/acc_vf_migration.c +++ b/drivers/crypto/hisilicon/migration/acc_vf_migration.c @@ -18,16 +18,6 @@ static struct dentry *mig_debugfs_root; static int mig_root_ref;
-/* return 0 mailbox ready, -ETIMEDOUT hardware timeout */ -static int qm_wait_mb_ready(struct hisi_qm *qm) -{ - u32 val; - - return readl_relaxed_poll_timeout(qm->io_base + QM_MB_CMD_SEND_BASE, - val, !((val >> QM_MB_BUSY_SHIFT) & - 0x1), POLL_PERIOD, POLL_TIMEOUT); -} - /* return 0 VM acc device ready, -ETIMEDOUT hardware timeout */ static int qm_wait_dev_ready(struct hisi_qm *qm) { @@ -37,7 +27,6 @@ static int qm_wait_dev_ready(struct hisi_qm *qm) val, !(val & 0x1), POLL_PERIOD, POLL_TIMEOUT); }
- /* 128 bit should be written to hardware at one time to trigger a mailbox */ static void qm_mb_write(struct hisi_qm *qm, const void *src) { @@ -61,57 +50,129 @@ static void qm_mb_write(struct hisi_qm *qm, const void *src) : "memory"); }
+/* 128 bit should be read from hardware at one time */ +static void qm_mb_read(struct hisi_qm *qm, void *dst) +{ + const void __iomem *fun_base = qm->io_base + QM_MB_CMD_SEND_BASE; + unsigned long tmp0 = 0, tmp1 = 0; + + if (!IS_ENABLED(CONFIG_ARM64)) { + memcpy_fromio(dst, fun_base, 16); + dma_wmb(); + return; + } + + asm volatile("ldp %0, %1, %3\n" + "stp %0, %1, %2\n" + "dmb oshst\n" + : "=&r" (tmp0), + "=&r" (tmp1), + "+Q" (*((char *)dst)) + : "Q" (*((char __iomem *)fun_base)) + : "memory"); +} + static void qm_mb_pre_init(struct qm_mailbox *mailbox, u8 cmd, - u16 queue, bool op) + u64 base, u16 queue, bool op) { - mailbox->w0 = cpu_to_le16(cmd | - (op ? 0x1 << QM_MB_OP_SHIFT : 0) | - (0x1 << QM_MB_BUSY_SHIFT)); + mailbox->w0 = cpu_to_le16((cmd) | + ((op) ? 0x1 << QM_MB_OP_SHIFT : 0) | + (0x1 << QM_MB_BUSY_SHIFT)); mailbox->queue_num = cpu_to_le16(queue); + mailbox->base_l = cpu_to_le32(lower_32_bits(base)); + mailbox->base_h = cpu_to_le32(upper_32_bits(base)); mailbox->rsvd = 0; }
-static int qm_mb_nolock(struct hisi_qm *qm, struct qm_mailbox *mailbox) +static int qm_wait_mb_ready(struct hisi_qm *qm) { - int cnt = 0; + struct qm_mailbox mailbox; + int i = 0;
- if (unlikely(qm_wait_mb_ready(qm))) { - dev_err(&qm->pdev->dev, "QM mailbox is busy to start!\n"); - return -EBUSY; + while (i++ < QM_MB_WAIT_READY_CNT) { + qm_mb_read(qm, &mailbox); + if (!((le16_to_cpu(mailbox.w0) >> QM_MB_BUSY_SHIFT) & 0x1)) + return 0; + + usleep_range(WAIT_PERIOD_US_MIN, WAIT_PERIOD_US_MAX); }
- qm_mb_write(qm, mailbox); - while (true) { - if (!qm_wait_mb_ready(qm)) + return -EBUSY; +} + +static int qm_wait_mb_finish(struct hisi_qm *qm, struct qm_mailbox *mailbox) +{ + int i = 0; + + while (i++) { + qm_mb_read(qm, mailbox); + if (!((le16_to_cpu(mailbox->w0) >> QM_MB_BUSY_SHIFT) & 0x1)) break; - if (++cnt > QM_MB_MAX_WAIT_CNT) { + + if (i == QM_MB_MAX_WAIT_CNT) { dev_err(&qm->pdev->dev, "QM mailbox operation timeout!\n"); - return -EBUSY; + return -ETIMEDOUT; } + + usleep_range(WAIT_PERIOD_US_MIN, WAIT_PERIOD_US_MAX); } + + if (le16_to_cpu(mailbox->w0) & QM_MB_STATUS_MASK) { + dev_err(&qm->pdev->dev, "QM mailbox operation failed!\n"); + return -EIO; + } + return 0; }
-static int qm_mb(struct hisi_qm *qm, u8 cmd, dma_addr_t dma_addr, u16 queue, - bool op) +static int qm_mb(struct hisi_qm *qm, struct qm_mailbox *mailbox) { - struct qm_mailbox mailbox; int ret;
- dev_dbg(&qm->pdev->dev, "QM mailbox request to q%u: %u-0x%llx\n", - queue, cmd, (unsigned long long)dma_addr); + mutex_lock(&qm->mailbox_lock); + ret = qm_wait_mb_ready(qm); + if (ret) + goto unlock;
- qm_mb_pre_init(&mailbox, cmd, queue, op); - mailbox.base_l = cpu_to_le32(lower_32_bits(dma_addr)); - mailbox.base_h = cpu_to_le32(upper_32_bits(dma_addr)); + qm_mb_write(qm, mailbox); + ret = qm_wait_mb_finish(qm, mailbox);
- mutex_lock(&qm->mailbox_lock); - ret = qm_mb_nolock(qm, &mailbox); +unlock: mutex_unlock(&qm->mailbox_lock);
return ret; }
+static int qm_config_set(struct hisi_qm *qm, u8 cmd, dma_addr_t dma_addr, + u16 queue, bool op) +{ + struct qm_mailbox mailbox; + + dev_dbg(&qm->pdev->dev, "QM mailbox request to q%u: %u-0x%llx\n", + queue, cmd, (unsigned long long)dma_addr); + + qm_mb_pre_init(&mailbox, cmd, dma_addr, queue, op); + + return qm_mb(qm, &mailbox); +} + +static int qm_config_get(struct hisi_qm *qm, u64 *base, u8 cmd, u16 queue) +{ + struct qm_mailbox mailbox; + int ret; + + qm_mb_pre_init(&mailbox, cmd, 0, queue, 1); + + ret = qm_mb(qm, &mailbox); + if (ret) + return ret; + + *base = le32_to_cpu(mailbox.base_l) | + ((u64)le32_to_cpu(mailbox.base_h) << 32); + + return 0; +} + /* * Each state Reg is checked 100 times, * with a delay of 100 microseconds after each check @@ -230,13 +291,10 @@ static int qm_get_vft(struct hisi_qm *qm, u32 *base, u32 *number) u64 sqc_vft; int ret;
- ret = qm_mb(qm, QM_MB_CMD_SQC_VFT_V2, 0, 0, 1); + ret = qm_config_get(qm, &sqc_vft, QM_MB_CMD_SQC_VFT_V2, 0); if (ret) return ret;
- sqc_vft = readl(qm->io_base + QM_MB_CMD_DATA_ADDR_L) | - ((u64)readl(qm->io_base + QM_MB_CMD_DATA_ADDR_H) << - QM_XQC_ADDR_OFFSET); *base = QM_SQC_VFT_BASE_MASK_V2 & (sqc_vft >> QM_SQC_VFT_BASE_SHIFT_V2); *number = (QM_SQC_VFT_NUM_MASK_V2 & (sqc_vft >> QM_SQC_VFT_NUM_SHIFT_V2)) + 1; @@ -244,36 +302,6 @@ static int qm_get_vft(struct hisi_qm *qm, u32 *base, u32 *number) return 0; }
-static int qm_get_sqc(struct hisi_qm *qm, u64 *addr) -{ - int ret; - - ret = qm_mb(qm, QM_MB_CMD_SQC_BT, 0, 0, 1); - if (ret) - return ret; - - *addr = readl(qm->io_base + QM_MB_CMD_DATA_ADDR_L) | - ((u64)readl(qm->io_base + QM_MB_CMD_DATA_ADDR_H) << - QM_XQC_ADDR_OFFSET); - - return 0; -} - -static int qm_get_cqc(struct hisi_qm *qm, u64 *addr) -{ - int ret; - - ret = qm_mb(qm, QM_MB_CMD_CQC_BT, 0, 0, 1); - if (ret) - return ret; - - *addr = readl(qm->io_base + QM_MB_CMD_DATA_ADDR_L) | - ((u64)readl(qm->io_base + QM_MB_CMD_DATA_ADDR_H) << - QM_XQC_ADDR_OFFSET); - - return 0; -} - static int qm_rw_regs_read(struct hisi_qm *qm, struct acc_vf_data *vf_data) { struct device *dev = &qm->pdev->dev; @@ -441,13 +469,13 @@ static int vf_migration_data_store(struct hisi_qm *qm, vf_data->aeqe_dma |= vf_data->qm_aeqc_dw[1];
/* Through SQC_BT/CQC_BT to get sqc and cqc address */ - ret = qm_get_sqc(qm, &vf_data->sqc_dma); + ret = qm_config_get(qm, &vf_data->sqc_dma, QM_MB_CMD_SQC_BT, 0); if (ret) { dev_err(dev, "failed to read SQC addr!\n"); return -EINVAL; }
- ret = qm_get_cqc(qm, &vf_data->cqc_dma); + ret = qm_config_get(qm, &vf_data->cqc_dma, QM_MB_CMD_CQC_BT, 0); if (ret) { dev_err(dev, "failed to read CQC addr!\n"); return -EINVAL; @@ -569,13 +597,13 @@ static int vf_migration_data_recover(struct hisi_qm *qm, return ret; }
- ret = qm_mb(qm, QM_MB_CMD_SQC_BT, qm->sqc_dma, 0, 0); + ret = qm_config_set(qm, QM_MB_CMD_SQC_BT, qm->sqc_dma, 0, 0); if (ret) { dev_err(dev, "Set sqc failed!\n"); return ret; }
- ret = qm_mb(qm, QM_MB_CMD_CQC_BT, qm->cqc_dma, 0, 0); + ret = qm_config_set(qm, QM_MB_CMD_CQC_BT, qm->cqc_dma, 0, 0); if (ret) { dev_err(dev, "Set cqc failed!\n"); return ret; @@ -604,7 +632,7 @@ static int vf_qm_cache_wb(struct hisi_qm *qm)
static int vf_qm_func_stop(struct hisi_qm *qm) { - return qm_mb(qm, QM_MB_CMD_PAUSE_QM, 0, 0, 0); + return qm_config_set(qm, QM_MB_CMD_PAUSE_QM, 0, 0, 0); }
static int pf_qm_get_qp_num(struct hisi_qm *qm, int vf_id, diff --git a/drivers/crypto/hisilicon/migration/acc_vf_migration.h b/drivers/crypto/hisilicon/migration/acc_vf_migration.h index 8b38b9943d35..5b9c91b9687b 100644 --- a/drivers/crypto/hisilicon/migration/acc_vf_migration.h +++ b/drivers/crypto/hisilicon/migration/acc_vf_migration.h @@ -58,9 +58,11 @@ #define QM_MB_CMD_SEND_BASE 0x300 #define QM_MB_BUSY_SHIFT 13 #define QM_MB_OP_SHIFT 14 -#define QM_MB_CMD_DATA_ADDR_L 0x304 -#define QM_MB_CMD_DATA_ADDR_H 0x308 -#define QM_MB_MAX_WAIT_CNT 6000 +#define QM_MB_WAIT_READY_CNT 10 +#define QM_MB_MAX_WAIT_CNT 3000 +#define WAIT_PERIOD_US_MIN 100 +#define WAIT_PERIOD_US_MAX 200 +#define QM_MB_STATUS_MASK GENMASK(12, 9)
/* doorbell */ #define QM_DOORBELL_CMD_SQ 0
driver inclusion category: bugfix bugzilla: https://gitee.com/openeuler/kernel/issues/I7BJW9 CVE: NA
----------------------------------------------------------------------
When funcitons communicate with each other, if the mailbox operation fails, funciton cannot obtain the message from the communication source. If the vf does not receive the message from pf to stop function when reset, it will cause the vf to be unavailable.
For the device reset scenario: 1. Increase the QM_DEVICE_DOWN state. Before IO operation, check the state to avoid mailbox busy during communication. 2. When vf obtains pf message, if the mailbox fails, it is considered to be a device reset, and stop function directly. When pf sends reset message to vf, if the mailbox fails, it still send interrupt event to vf. 3. Increase the response time of PF waiting for vf.
Signed-off-by: Weili Qian qianweili@huawei.com Signed-off-by: JiangShui Yang yangjiangshui@h-partners.com --- drivers/crypto/hisilicon/qm.c | 40 ++++++++++++++++++++++++++--------- 1 file changed, 30 insertions(+), 10 deletions(-)
diff --git a/drivers/crypto/hisilicon/qm.c b/drivers/crypto/hisilicon/qm.c index c5eaa8877e74..5867c69a8ff8 100644 --- a/drivers/crypto/hisilicon/qm.c +++ b/drivers/crypto/hisilicon/qm.c @@ -176,9 +176,9 @@ #define QM_IFC_INT_DISABLE BIT(0) #define QM_IFC_INT_STATUS_MASK BIT(0) #define QM_IFC_INT_SET_MASK BIT(0) -#define QM_WAIT_DST_ACK 10 -#define QM_MAX_PF_WAIT_COUNT 10 -#define QM_MAX_VF_WAIT_COUNT 40 +#define QM_WAIT_DST_ACK 100 +#define QM_MAX_PF_WAIT_COUNT 50 +#define QM_MAX_VF_WAIT_COUNT 100 #define QM_VF_RESET_WAIT_US 20000 #define QM_VF_RESET_WAIT_CNT 3000 #define QM_VF_RESET_WAIT_TIMEOUT_US \ @@ -535,6 +535,9 @@ static bool qm_check_dev_error(struct hisi_qm *qm) { u32 val, dev_val;
+ if (test_bit(QM_DEVICE_DOWN, &qm->misc_ctl)) + return true; + if (qm->fun_type == QM_HW_VF) return false;
@@ -706,12 +709,19 @@ static int qm_mb_nolock(struct hisi_qm *qm, struct qm_mailbox *mailbox)
int hisi_qm_mb_write(struct hisi_qm *qm, u8 cmd, dma_addr_t dma_addr, u16 queue, bool op) { + struct hisi_qm *pf_qm = pci_get_drvdata(pci_physfn(qm->pdev)); struct qm_mailbox mailbox; int ret;
dev_dbg(&qm->pdev->dev, "QM mailbox request to q%u: %u-0x%llx\n", queue, cmd, (unsigned long long)dma_addr);
+ /* No need to judge if master OOO is blocked. */ + if (qm_check_dev_error(pf_qm)) { + dev_err(&qm->pdev->dev, "QM mailbox operation failed since qm is stop!\n"); + return -EIO; + } + qm_mb_pre_init(&mailbox, cmd, dma_addr, queue, op); mutex_lock(&qm->mailbox_lock); ret = qm_mb_nolock(qm, &mailbox); @@ -1712,8 +1722,8 @@ static int qm_ping_all_vfs(struct hisi_qm *qm, u64 cmd) mutex_lock(&qm->mailbox_lock); /* PF sends command to all VFs by mailbox */ ret = qm_mb_nolock(qm, &mailbox); - if (ret) { - dev_err(dev, "failed to send command to VFs!\n"); + if (ret && cmd != QM_PF_FLR_PREPARE && cmd != QM_PF_SRST_PREPARE) { + dev_err(dev, "failed to send command to all vfs, cmd = %llu!\n", cmd); mutex_unlock(&qm->mailbox_lock); return ret; } @@ -1753,8 +1763,8 @@ static int qm_ping_pf(struct hisi_qm *qm, u64 cmd) qm_mb_pre_init(&mailbox, QM_MB_CMD_SRC, cmd, 0, 0); mutex_lock(&qm->mailbox_lock); ret = qm_mb_nolock(qm, &mailbox); - if (ret) { - dev_err(&qm->pdev->dev, "failed to send command to PF!\n"); + if (ret && (cmd > QM_VF_START_FAIL || cmd < QM_VF_PREPARE_DONE)) { + dev_err(&qm->pdev->dev, "failed to send command to PF, cmd = %llu!\n", cmd); goto unlock; }
@@ -1763,8 +1773,10 @@ static int qm_ping_pf(struct hisi_qm *qm, u64 cmd) while (true) { msleep(QM_WAIT_DST_ACK); val = readl(qm->io_base + QM_IFC_INT_SET_V); - if (!(val & QM_IFC_INT_STATUS_MASK)) + if (!(val & QM_IFC_INT_STATUS_MASK)) { + ret = 0; break; + }
if (++cnt > QM_MAX_VF_WAIT_COUNT) { ret = -ETIMEDOUT; @@ -4969,6 +4981,7 @@ static void qm_pf_reset_vf_process(struct hisi_qm *qm, if (ret) goto err_get_status;
+ clear_bit(QM_DEVICE_DOWN, &qm->misc_ctl); qm_pf_reset_vf_done(qm);
dev_info(dev, "device reset done.\n"); @@ -4976,6 +4989,7 @@ static void qm_pf_reset_vf_process(struct hisi_qm *qm, return;
err_get_status: + clear_bit(QM_DEVICE_DOWN, &qm->misc_ctl); qm_cmd_init(qm); qm_reset_bit_clear(qm); } @@ -4994,8 +5008,13 @@ static void qm_handle_cmd_msg(struct hisi_qm *qm, u32 fun_num) ret = hisi_qm_mb_read(qm, &msg, QM_MB_CMD_DST, fun_num); qm_clear_cmd_interrupt(qm, BIT(fun_num)); if (ret) { - dev_err(dev, "failed to get msg from source!\n"); - return; + if (!fun_num) { + msg = QM_PF_SRST_PREPARE; + dev_info(dev, "failed to get response from PF, suppos it is soft reset!\n"); + } else { + dev_err(dev, "failed to get msg from source!\n"); + return; + } }
cmd = msg & QM_MB_CMD_DATA_MASK; @@ -5004,6 +5023,7 @@ static void qm_handle_cmd_msg(struct hisi_qm *qm, u32 fun_num) qm_pf_reset_vf_process(qm, QM_DOWN); break; case QM_PF_SRST_PREPARE: + set_bit(QM_DEVICE_DOWN, &qm->misc_ctl); qm_pf_reset_vf_process(qm, QM_SOFT_RESET); break; case QM_VF_GET_QOS:
driver inclusion category: bugfix bugzilla: https://gitee.com/openeuler/kernel/issues/I7BJW9 CVE: NA
----------------------------------------------------------------------
If the temporarily applied memory is used to set or get the xqc information, the driver will release the memory after waiting for the mailbox times out. However, the hardware still performs the operation, the released memory may be written by hardware. Therefore, when load driver, memory is applied for xqc configuration.
Signed-off-by: Weili Qian qianweili@huawei.com Signed-off-by: JiangShui Yang yangjiangshui@h-partners.com --- drivers/crypto/hisilicon/debugfs.c | 75 +++--- drivers/crypto/hisilicon/qm.c | 336 ++++++++++++--------------- drivers/crypto/hisilicon/qm_common.h | 6 +- include/linux/hisi_acc_qm.h | 13 ++ 4 files changed, 195 insertions(+), 235 deletions(-)
diff --git a/drivers/crypto/hisilicon/debugfs.c b/drivers/crypto/hisilicon/debugfs.c index 3875b5b06ec4..88885268851b 100644 --- a/drivers/crypto/hisilicon/debugfs.c +++ b/drivers/crypto/hisilicon/debugfs.c @@ -137,8 +137,8 @@ static void dump_show(struct hisi_qm *qm, void *info, static int qm_sqc_dump(struct hisi_qm *qm, char *s, char *name) { struct device *dev = &qm->pdev->dev; - struct qm_sqc *sqc, *sqc_curr; - dma_addr_t sqc_dma; + struct qm_sqc *sqc_curr; + struct qm_sqc sqc; u32 qp_id; int ret;
@@ -151,35 +151,29 @@ static int qm_sqc_dump(struct hisi_qm *qm, char *s, char *name) return -EINVAL; }
- sqc = hisi_qm_ctx_alloc(qm, sizeof(*sqc), &sqc_dma); - if (IS_ERR(sqc)) - return PTR_ERR(sqc); + ret = qm_set_and_get_xqc(qm, QM_MB_CMD_SQC, &sqc, qp_id, 1); + if (!ret) { + dump_show(qm, &sqc, sizeof(struct qm_sqc), name);
- ret = hisi_qm_mb_write(qm, QM_MB_CMD_SQC, sqc_dma, qp_id, 1); - if (ret) { - down_read(&qm->qps_lock); - if (qm->sqc) { - sqc_curr = qm->sqc + qp_id; + return 0; + }
- dump_show(qm, sqc_curr, sizeof(*sqc), "SOFT SQC"); - } - up_read(&qm->qps_lock); + down_read(&qm->qps_lock); + if (qm->sqc) { + sqc_curr = qm->sqc + qp_id;
- goto free_ctx; + dump_show(qm, sqc_curr, sizeof(*sqc_curr), "SOFT SQC"); } + up_read(&qm->qps_lock);
- dump_show(qm, sqc, sizeof(*sqc), name); - -free_ctx: - hisi_qm_ctx_free(qm, sizeof(*sqc), sqc, &sqc_dma); return 0; }
static int qm_cqc_dump(struct hisi_qm *qm, char *s, char *name) { struct device *dev = &qm->pdev->dev; - struct qm_cqc *cqc, *cqc_curr; - dma_addr_t cqc_dma; + struct qm_cqc *cqc_curr; + struct qm_cqc cqc; u32 qp_id; int ret;
@@ -192,34 +186,29 @@ static int qm_cqc_dump(struct hisi_qm *qm, char *s, char *name) return -EINVAL; }
- cqc = hisi_qm_ctx_alloc(qm, sizeof(*cqc), &cqc_dma); - if (IS_ERR(cqc)) - return PTR_ERR(cqc); + ret = qm_set_and_get_xqc(qm, QM_MB_CMD_CQC, &cqc, qp_id, 1); + if (!ret) { + dump_show(qm, &cqc, sizeof(struct qm_cqc), name);
- ret = hisi_qm_mb_write(qm, QM_MB_CMD_CQC, cqc_dma, qp_id, 1); - if (ret) { - down_read(&qm->qps_lock); - if (qm->cqc) { - cqc_curr = qm->cqc + qp_id; + return 0; + }
- dump_show(qm, cqc_curr, sizeof(*cqc), "SOFT CQC"); - } - up_read(&qm->qps_lock); + down_read(&qm->qps_lock); + if (qm->cqc) { + cqc_curr = qm->cqc + qp_id;
- goto free_ctx; + dump_show(qm, cqc_curr, sizeof(*cqc_curr), "SOFT CQC"); } + up_read(&qm->qps_lock);
- dump_show(qm, cqc, sizeof(*cqc), name); - -free_ctx: - hisi_qm_ctx_free(qm, sizeof(*cqc), cqc, &cqc_dma); return 0; }
static int qm_eqc_aeqc_dump(struct hisi_qm *qm, char *s, char *name) { struct device *dev = &qm->pdev->dev; - dma_addr_t xeqc_dma; + struct qm_aeqc aeqc; + struct qm_eqc eqc; size_t size; void *xeqc; int ret; @@ -233,23 +222,19 @@ static int qm_eqc_aeqc_dump(struct hisi_qm *qm, char *s, char *name) if (!strcmp(name, "EQC")) { cmd = QM_MB_CMD_EQC; size = sizeof(struct qm_eqc); + xeqc = &eqc; } else { cmd = QM_MB_CMD_AEQC; size = sizeof(struct qm_aeqc); + xeqc = &aeqc; }
- xeqc = hisi_qm_ctx_alloc(qm, size, &xeqc_dma); - if (IS_ERR(xeqc)) - return PTR_ERR(xeqc); - - ret = hisi_qm_mb_write(qm, cmd, xeqc_dma, 0, 1); + ret = qm_set_and_get_xqc(qm, cmd, xeqc, 0, 1); if (ret) - goto err_free_ctx; + return ret;
dump_show(qm, xeqc, size, name);
-err_free_ctx: - hisi_qm_ctx_free(qm, size, xeqc, &xeqc_dma); return ret; }
diff --git a/drivers/crypto/hisilicon/qm.c b/drivers/crypto/hisilicon/qm.c index 5867c69a8ff8..ac3ac03294d0 100644 --- a/drivers/crypto/hisilicon/qm.c +++ b/drivers/crypto/hisilicon/qm.c @@ -48,7 +48,7 @@ #define QM_QC_PASID_ENABLE_SHIFT 7
#define QM_SQ_TYPE_MASK GENMASK(3, 0) -#define QM_SQ_TAIL_IDX(sqc) ((le16_to_cpu((sqc)->w11) >> 6) & 0x1) +#define QM_SQ_TAIL_IDX(sqc) ((le16_to_cpu((sqc).w11) >> 6) & 0x1)
/* cqc shift */ #define QM_CQ_HOP_NUM_SHIFT 0 @@ -60,7 +60,7 @@
#define QM_CQE_PHASE(cqe) (le16_to_cpu((cqe)->w7) & 0x1) #define QM_QC_CQE_SIZE 4 -#define QM_CQ_TAIL_IDX(cqc) ((le16_to_cpu((cqc)->w11) >> 6) & 0x1) +#define QM_CQ_TAIL_IDX(cqc) ((le16_to_cpu((cqc).w11) >> 6) & 0x1)
/* eqc shift */ #define QM_EQE_AEQE_SIZE (2UL << 12) @@ -252,19 +252,6 @@ #define QM_MK_SQC_DW3_V2(sqe_sz, sq_depth) \ ((((u32)sq_depth) - 1) | ((u32)ilog2(sqe_sz) << QM_SQ_SQE_SIZE_SHIFT))
-#define INIT_QC_COMMON(qc, base, pasid) do { \ - (qc)->head = 0; \ - (qc)->tail = 0; \ - (qc)->base_l = cpu_to_le32(lower_32_bits(base)); \ - (qc)->base_h = cpu_to_le32(upper_32_bits(base)); \ - (qc)->dw3 = 0; \ - (qc)->w8 = 0; \ - (qc)->rsvd0 = 0; \ - (qc)->pasid = cpu_to_le16(pasid); \ - (qc)->w11 = 0; \ - (qc)->rsvd1 = 0; \ -} while (0) - enum vft_type { SQC_VFT = 0, CQC_VFT, @@ -707,7 +694,7 @@ static int qm_mb_nolock(struct hisi_qm *qm, struct qm_mailbox *mailbox) return ret; }
-int hisi_qm_mb_write(struct hisi_qm *qm, u8 cmd, dma_addr_t dma_addr, u16 queue, bool op) +static int hisi_qm_mb_write(struct hisi_qm *qm, u8 cmd, dma_addr_t dma_addr, u16 queue, bool op) { struct hisi_qm *pf_qm = pci_get_drvdata(pci_physfn(qm->pdev)); struct qm_mailbox mailbox; @@ -748,6 +735,59 @@ static int hisi_qm_mb_read(struct hisi_qm *qm, u64 *base, u8 cmd, u16 queue) return 0; }
+/* op 0: set xqc information to hardware, 1: get xqc information from hardware. */ +int qm_set_and_get_xqc(struct hisi_qm *qm, u8 cmd, void *xqc, u32 qp_id, bool op) +{ + struct hisi_qm *pf_qm = pci_get_drvdata(pci_physfn(qm->pdev)); + struct qm_mailbox mailbox; + dma_addr_t xqc_dma; + void *tmp_xqc; + size_t size; + int ret; + + switch (cmd) { + case QM_MB_CMD_SQC: + size = sizeof(struct qm_sqc); + tmp_xqc = qm->xqc_buf.sqc; + xqc_dma = qm->xqc_buf.sqc_dma; + break; + case QM_MB_CMD_CQC: + size = sizeof(struct qm_cqc); + tmp_xqc = qm->xqc_buf.cqc; + xqc_dma = qm->xqc_buf.cqc_dma; + break; + case QM_MB_CMD_EQC: + size = sizeof(struct qm_eqc); + tmp_xqc = qm->xqc_buf.eqc; + xqc_dma = qm->xqc_buf.eqc_dma; + break; + case QM_MB_CMD_AEQC: + size = sizeof(struct qm_aeqc); + tmp_xqc = qm->xqc_buf.aeqc; + xqc_dma = qm->xqc_buf.aeqc_dma; + break; + } + + /* No need to judge if master OOO is blocked. */ + if (qm_check_dev_error(pf_qm)) { + dev_err(&qm->pdev->dev, "failed to send mailbox since qm is stop!\n"); + return -EIO; + } + + mutex_lock(&qm->mailbox_lock); + if (!op) + memcpy(tmp_xqc, xqc, size); + + qm_mb_pre_init(&mailbox, cmd, xqc_dma, qp_id, op); + ret = qm_mb_nolock(qm, &mailbox); + if (!ret && op) + memcpy(xqc, tmp_xqc, size); + + mutex_unlock(&qm->mailbox_lock); + + return ret; +} + static void qm_db_v1(struct hisi_qm *qm, u16 qn, u8 cmd, u16 index, u8 priority) { u64 doorbell; @@ -1404,45 +1444,6 @@ static int qm_get_vft_v2(struct hisi_qm *qm, u32 *base, u32 *number) return 0; }
-void *hisi_qm_ctx_alloc(struct hisi_qm *qm, size_t ctx_size, - dma_addr_t *dma_addr) -{ - struct device *dev = &qm->pdev->dev; - void *ctx_addr; - - ctx_addr = kzalloc(ctx_size, GFP_KERNEL); - if (!ctx_addr) - return ERR_PTR(-ENOMEM); - - *dma_addr = dma_map_single(dev, ctx_addr, ctx_size, DMA_FROM_DEVICE); - if (dma_mapping_error(dev, *dma_addr)) { - dev_err(dev, "DMA mapping error!\n"); - kfree(ctx_addr); - return ERR_PTR(-ENOMEM); - } - - return ctx_addr; -} - -void hisi_qm_ctx_free(struct hisi_qm *qm, size_t ctx_size, - const void *ctx_addr, dma_addr_t *dma_addr) -{ - struct device *dev = &qm->pdev->dev; - - dma_unmap_single(dev, *dma_addr, ctx_size, DMA_FROM_DEVICE); - kfree(ctx_addr); -} - -static int qm_dump_sqc_raw(struct hisi_qm *qm, dma_addr_t dma_addr, u16 qp_id) -{ - return hisi_qm_mb_write(qm, QM_MB_CMD_SQC, dma_addr, qp_id, 1); -} - -static int qm_dump_cqc_raw(struct hisi_qm *qm, dma_addr_t dma_addr, u16 qp_id) -{ - return hisi_qm_mb_write(qm, QM_MB_CMD_CQC, dma_addr, qp_id, 1); -} - static void qm_hw_error_init_v1(struct hisi_qm *qm) { writel(QM_ABNORMAL_INT_MASK_VALUE, qm->io_base + QM_ABNORMAL_INT_MASK); @@ -2018,84 +2019,55 @@ static void hisi_qm_release_qp(struct hisi_qp *qp) static int qm_sq_ctx_cfg(struct hisi_qp *qp, int qp_id, u32 pasid) { struct hisi_qm *qm = qp->qm; - struct device *dev = &qm->pdev->dev; enum qm_hw_ver ver = qm->ver; - struct qm_sqc *sqc; - dma_addr_t sqc_dma; - int ret; + struct qm_sqc sqc = {0};
- sqc = kzalloc(sizeof(struct qm_sqc), GFP_KERNEL); - if (!sqc) - return -ENOMEM; - - INIT_QC_COMMON(sqc, qp->sqe_dma, pasid); if (ver == QM_HW_V1) { - sqc->dw3 = cpu_to_le32(QM_MK_SQC_DW3_V1(0, 0, 0, qm->sqe_size)); - sqc->w8 = cpu_to_le16(qp->sq_depth - 1); + sqc.dw3 = cpu_to_le32(QM_MK_SQC_DW3_V1(0, 0, 0, qm->sqe_size)); + sqc.w8 = cpu_to_le16(qp->sq_depth - 1); } else { - sqc->dw3 = cpu_to_le32(QM_MK_SQC_DW3_V2(qm->sqe_size, qp->sq_depth)); - sqc->w8 = 0; /* rand_qc */ + sqc.dw3 = cpu_to_le32(QM_MK_SQC_DW3_V2(qm->sqe_size, qp->sq_depth)); + sqc.w8 = 0; /* rand_qc */ } - sqc->cq_num = cpu_to_le16(qp_id); - sqc->w13 = cpu_to_le16(QM_MK_SQC_W13(0, 1, qp->alg_type)); + sqc.cq_num = cpu_to_le16(qp_id); + sqc.w13 = cpu_to_le16(QM_MK_SQC_W13(0, 1, qp->alg_type)); + sqc.base_l = cpu_to_le32(lower_32_bits(qp->sqe_dma)); + sqc.base_h = cpu_to_le32(upper_32_bits(qp->sqe_dma)); + sqc.pasid = cpu_to_le16(pasid);
if (ver >= QM_HW_V3 && qm->use_sva && !qp->is_in_kernel) - sqc->w11 = cpu_to_le16(QM_QC_PASID_ENABLE << + sqc.w11 = cpu_to_le16(QM_QC_PASID_ENABLE << QM_QC_PASID_ENABLE_SHIFT);
- sqc_dma = dma_map_single(dev, sqc, sizeof(struct qm_sqc), - DMA_TO_DEVICE); - if (dma_mapping_error(dev, sqc_dma)) { - kfree(sqc); - return -ENOMEM; - } - - ret = hisi_qm_mb_write(qm, QM_MB_CMD_SQC, sqc_dma, qp_id, 0); - dma_unmap_single(dev, sqc_dma, sizeof(struct qm_sqc), DMA_TO_DEVICE); - kfree(sqc); - - return ret; + return qm_set_and_get_xqc(qm, QM_MB_CMD_SQC, &sqc, qp_id, 0); }
static int qm_cq_ctx_cfg(struct hisi_qp *qp, int qp_id, u32 pasid) { struct hisi_qm *qm = qp->qm; - struct device *dev = &qm->pdev->dev; enum qm_hw_ver ver = qm->ver; - struct qm_cqc *cqc; - dma_addr_t cqc_dma; - int ret; - - cqc = kzalloc(sizeof(struct qm_cqc), GFP_KERNEL); - if (!cqc) - return -ENOMEM; + struct qm_cqc cqc = {0};
- INIT_QC_COMMON(cqc, qp->cqe_dma, pasid); + cqc.base_l = cpu_to_le32(lower_32_bits(qp->cqe_dma)); + cqc.base_h = cpu_to_le32(upper_32_bits(qp->cqe_dma)); + cqc.pasid = cpu_to_le16(pasid); if (ver == QM_HW_V1) { - cqc->dw3 = cpu_to_le32(QM_MK_CQC_DW3_V1(0, 0, 0, + cqc.dw3 = cpu_to_le32(QM_MK_CQC_DW3_V1(0, 0, 0, QM_QC_CQE_SIZE)); - cqc->w8 = cpu_to_le16(qp->cq_depth - 1); + cqc.w8 = cpu_to_le16(qp->cq_depth - 1); } else { - cqc->dw3 = cpu_to_le32(QM_MK_CQC_DW3_V2(QM_QC_CQE_SIZE, qp->cq_depth)); - cqc->w8 = 0; /* rand_qc */ + cqc.dw3 = cpu_to_le32(QM_MK_CQC_DW3_V2(QM_QC_CQE_SIZE, qp->cq_depth)); + cqc.w8 = 0; /* rand_qc */ } - cqc->dw6 = cpu_to_le32(1 << QM_CQ_PHASE_SHIFT | 1 << QM_CQ_FLAG_SHIFT); + cqc.dw6 = cpu_to_le32(1 << QM_CQ_PHASE_SHIFT | 1 << QM_CQ_FLAG_SHIFT); + cqc.base_l = cpu_to_le32(lower_32_bits(qp->cqe_dma)); + cqc.base_h = cpu_to_le32(upper_32_bits(qp->cqe_dma)); + cqc.pasid = cpu_to_le16(pasid);
if (ver >= QM_HW_V3 && qm->use_sva && !qp->is_in_kernel) - cqc->w11 = cpu_to_le16(QM_QC_PASID_ENABLE); + cqc.w11 = cpu_to_le16(QM_QC_PASID_ENABLE);
- cqc_dma = dma_map_single(dev, cqc, sizeof(struct qm_cqc), - DMA_TO_DEVICE); - if (dma_mapping_error(dev, cqc_dma)) { - kfree(cqc); - return -ENOMEM; - } - - ret = hisi_qm_mb_write(qm, QM_MB_CMD_CQC, cqc_dma, qp_id, 0); - dma_unmap_single(dev, cqc_dma, sizeof(struct qm_cqc), DMA_TO_DEVICE); - kfree(cqc); - - return ret; + return qm_set_and_get_xqc(qm, QM_MB_CMD_CQC, &cqc, qp_id, 0); }
static int qm_qp_ctx_cfg(struct hisi_qp *qp, int qp_id, u32 pasid) @@ -2178,57 +2150,41 @@ static void qp_stop_fail_cb(struct hisi_qp *qp)
static int qm_wait_qp_empty(struct hisi_qm *qm, u32 *state, u32 qp_id) { - size_t size = sizeof(struct qm_sqc) + sizeof(struct qm_cqc); struct device *dev = &qm->pdev->dev; - struct qm_sqc *sqc; - struct qm_cqc *cqc; - dma_addr_t dma_addr; - void *addr; - + struct qm_sqc sqc; + struct qm_cqc cqc; int ret = 0; int i = 0;
- addr = hisi_qm_ctx_alloc(qm, size, &dma_addr); - if (IS_ERR(addr)) { - dev_err(dev, "Failed to alloc ctx for sqc and cqc!\n"); - *state = ALLOC_CTX_FAIL; - return -ENOMEM; - } - while (++i) { - ret = qm_dump_sqc_raw(qm, dma_addr, qp_id); + ret = qm_set_and_get_xqc(qm, QM_MB_CMD_SQC, &sqc, qp_id, 1); if (ret) { dev_err_ratelimited(dev, "Failed to dump sqc!\n"); *state = DUMP_SQC_FAIL; - break; + return ret; } - sqc = addr;
- ret = qm_dump_cqc_raw(qm, (dma_addr + sizeof(struct qm_sqc)), qp_id); + ret = qm_set_and_get_xqc(qm, QM_MB_CMD_CQC, &cqc, qp_id, 1); if (ret) { dev_err_ratelimited(dev, "Failed to dump cqc!\n"); *state = DUMP_CQC_FAIL; - break; + return ret; } - cqc = addr + sizeof(struct qm_sqc);
- if ((sqc->tail == cqc->tail) && + if ((sqc.tail == cqc.tail) && (QM_SQ_TAIL_IDX(sqc) == QM_CQ_TAIL_IDX(cqc))) - break; + return 0;
if (i == MAX_WAIT_COUNTS) { dev_err(dev, "Fail to empty queue %u!\n", qp_id); *state = STOP_QUEUE_FAIL; - ret = -EBUSY; - break; + return -ETIMEDOUT; }
usleep_range(WAIT_PERIOD_US_MIN, WAIT_PERIOD_US_MAX); }
- hisi_qm_ctx_free(qm, size, addr, &dma_addr); - - return ret; + return 0; }
/** @@ -3080,11 +3036,20 @@ static void hisi_qm_unint_work(struct hisi_qm *qm) destroy_workqueue(qm->wq); }
+static void hisi_qm_free_rsv_buf(struct hisi_qm *qm) +{ + struct qm_dma *xqc_dma = &qm->xqc_buf.qcdma; + struct device *dev = &qm->pdev->dev; + + dma_free_coherent(dev, xqc_dma->size, xqc_dma->va, xqc_dma->dma); +} + static void hisi_qm_memory_uninit(struct hisi_qm *qm) { struct device *dev = &qm->pdev->dev;
hisi_qp_memory_uninit(qm, qm->qp_num); + hisi_qm_free_rsv_buf(qm); if (qm->qdma.va) { hisi_qm_cache_wb(qm); dma_free_coherent(dev, qm->qdma.size, @@ -3206,62 +3171,26 @@ static void qm_disable_eq_aeq_interrupts(struct hisi_qm *qm)
static int qm_eq_ctx_cfg(struct hisi_qm *qm) { - struct device *dev = &qm->pdev->dev; - struct qm_eqc *eqc; - dma_addr_t eqc_dma; - int ret; - - eqc = kzalloc(sizeof(struct qm_eqc), GFP_KERNEL); - if (!eqc) - return -ENOMEM; + struct qm_eqc eqc = {0};
- eqc->base_l = cpu_to_le32(lower_32_bits(qm->eqe_dma)); - eqc->base_h = cpu_to_le32(upper_32_bits(qm->eqe_dma)); + eqc.base_l = cpu_to_le32(lower_32_bits(qm->eqe_dma)); + eqc.base_h = cpu_to_le32(upper_32_bits(qm->eqe_dma)); if (qm->ver == QM_HW_V1) - eqc->dw3 = cpu_to_le32(QM_EQE_AEQE_SIZE); - eqc->dw6 = cpu_to_le32(((u32)qm->eq_depth - 1) | (1 << QM_EQC_PHASE_SHIFT)); + eqc.dw3 = cpu_to_le32(QM_EQE_AEQE_SIZE); + eqc.dw6 = cpu_to_le32(((u32)qm->eq_depth - 1) | (1 << QM_EQC_PHASE_SHIFT));
- eqc_dma = dma_map_single(dev, eqc, sizeof(struct qm_eqc), - DMA_TO_DEVICE); - if (dma_mapping_error(dev, eqc_dma)) { - kfree(eqc); - return -ENOMEM; - } - - ret = hisi_qm_mb_write(qm, QM_MB_CMD_EQC, eqc_dma, 0, 0); - dma_unmap_single(dev, eqc_dma, sizeof(struct qm_eqc), DMA_TO_DEVICE); - kfree(eqc); - - return ret; + return qm_set_and_get_xqc(qm, QM_MB_CMD_EQC, &eqc, 0, 0); }
static int qm_aeq_ctx_cfg(struct hisi_qm *qm) { - struct device *dev = &qm->pdev->dev; - struct qm_aeqc *aeqc; - dma_addr_t aeqc_dma; - int ret; + struct qm_aeqc aeqc = {0};
- aeqc = kzalloc(sizeof(struct qm_aeqc), GFP_KERNEL); - if (!aeqc) - return -ENOMEM; + aeqc.base_l = cpu_to_le32(lower_32_bits(qm->aeqe_dma)); + aeqc.base_h = cpu_to_le32(upper_32_bits(qm->aeqe_dma)); + aeqc.dw6 = cpu_to_le32(((u32)qm->aeq_depth - 1) | (1 << QM_EQC_PHASE_SHIFT));
- aeqc->base_l = cpu_to_le32(lower_32_bits(qm->aeqe_dma)); - aeqc->base_h = cpu_to_le32(upper_32_bits(qm->aeqe_dma)); - aeqc->dw6 = cpu_to_le32(((u32)qm->aeq_depth - 1) | (1 << QM_EQC_PHASE_SHIFT)); - - aeqc_dma = dma_map_single(dev, aeqc, sizeof(struct qm_aeqc), - DMA_TO_DEVICE); - if (dma_mapping_error(dev, aeqc_dma)) { - kfree(aeqc); - return -ENOMEM; - } - - ret = hisi_qm_mb_write(qm, QM_MB_CMD_AEQC, aeqc_dma, 0, 0); - dma_unmap_single(dev, aeqc_dma, sizeof(struct qm_aeqc), DMA_TO_DEVICE); - kfree(aeqc); - - return ret; + return qm_set_and_get_xqc(qm, QM_MB_CMD_AEQC, &aeqc, 0, 0); }
static int qm_eq_aeq_ctx_cfg(struct hisi_qm *qm) @@ -5515,6 +5444,37 @@ static int hisi_qp_alloc_memory(struct hisi_qm *qm) return ret; }
+static int hisi_qm_alloc_rsv_buf(struct hisi_qm *qm) +{ + struct qm_rsv_buf *xqc_buf = &qm->xqc_buf; + struct qm_dma *xqc_dma = &xqc_buf->qcdma; + struct device *dev = &qm->pdev->dev; + size_t off = 0; + +#define QM_XQC_BUF_INIT(xqc_buf, type) do { \ + (xqc_buf)->type = ((xqc_buf)->qcdma.va + (off)); \ + (xqc_buf)->type##_dma = (xqc_buf)->qcdma.dma + (off); \ + off += QMC_ALIGN(sizeof(struct qm_##type)); \ +} while (0) + + xqc_dma->size = QMC_ALIGN(sizeof(struct qm_eqc)) + + QMC_ALIGN(sizeof(struct qm_aeqc)) + + QMC_ALIGN(sizeof(struct qm_sqc)) + + QMC_ALIGN(sizeof(struct qm_cqc)); + + xqc_dma->va = dma_alloc_coherent(dev, xqc_dma->size, &xqc_dma->dma, + GFP_ATOMIC); + if (!xqc_dma->va) + return -ENOMEM; + + QM_XQC_BUF_INIT(xqc_buf, eqc); + QM_XQC_BUF_INIT(xqc_buf, aeqc); + QM_XQC_BUF_INIT(xqc_buf, sqc); + QM_XQC_BUF_INIT(xqc_buf, cqc); + + return 0; +} + static int hisi_qm_memory_init(struct hisi_qm *qm) { struct device *dev = &qm->pdev->dev; @@ -5556,13 +5516,19 @@ static int hisi_qm_memory_init(struct hisi_qm *qm) QM_INIT_BUF(qm, sqc, qm->qp_num); QM_INIT_BUF(qm, cqc, qm->qp_num);
+ ret = hisi_qm_alloc_rsv_buf(qm); + if (ret) + goto err_free_qdma; + ret = hisi_qp_alloc_memory(qm); if (ret) - goto err_alloc_qp_array; + goto err_free_reserve_buf;
return 0;
-err_alloc_qp_array: +err_free_reserve_buf: + hisi_qm_free_rsv_buf(qm); +err_free_qdma: dma_free_coherent(dev, qm->qdma.size, qm->qdma.va, qm->qdma.dma); err_destroy_idr: idr_destroy(&qm->qp_idr); diff --git a/drivers/crypto/hisilicon/qm_common.h b/drivers/crypto/hisilicon/qm_common.h index ab9d8150b3b6..7b0b15c83ec1 100644 --- a/drivers/crypto/hisilicon/qm_common.h +++ b/drivers/crypto/hisilicon/qm_common.h @@ -76,12 +76,8 @@ static const char * const qm_s[] = { "init", "start", "close", "stop", };
-void *hisi_qm_ctx_alloc(struct hisi_qm *qm, size_t ctx_size, - dma_addr_t *dma_addr); -void hisi_qm_ctx_free(struct hisi_qm *qm, size_t ctx_size, - const void *ctx_addr, dma_addr_t *dma_addr); +int qm_set_and_get_xqc(struct hisi_qm *qm, u8 cmd, void *xqc, u32 qp_id, bool op); void hisi_qm_show_last_dfx_regs(struct hisi_qm *qm); void hisi_qm_set_algqos_init(struct hisi_qm *qm); -int hisi_qm_mb_write(struct hisi_qm *qm, u8 cmd, dma_addr_t dma_addr, u16 queue, bool op);
#endif diff --git a/include/linux/hisi_acc_qm.h b/include/linux/hisi_acc_qm.h index 4a2fa879fa2c..829a1be28c87 100644 --- a/include/linux/hisi_acc_qm.h +++ b/include/linux/hisi_acc_qm.h @@ -308,6 +308,18 @@ struct qm_err_isolate { struct list_head qm_hw_errs; };
+struct qm_rsv_buf { + struct qm_sqc *sqc; + struct qm_cqc *cqc; + struct qm_eqc *eqc; + struct qm_aeqc *aeqc; + dma_addr_t sqc_dma; + dma_addr_t cqc_dma; + dma_addr_t eqc_dma; + dma_addr_t aeqc_dma; + struct qm_dma qcdma; +}; + struct hisi_qm { enum qm_hw_ver ver; enum qm_fun_type fun_type; @@ -340,6 +352,7 @@ struct hisi_qm { dma_addr_t cqc_dma; dma_addr_t eqe_dma; dma_addr_t aeqe_dma; + struct qm_rsv_buf xqc_buf;
struct hisi_qm_status status; const struct hisi_qm_err_ini *err_ini;