From: JiangShui Yang yangjiangshui@h-partners.com
driver inclusion category: feature bugzilla: https://gitee.com/openeuler/kernel/issues/I94L2V CVE: NA
----------------------------------------------------------------------
Support the HiSilicon Kunpeng accelerators sec/zip/hpre register to UACCE subsystem in UACCE_MODE_NOIOMMU mode so that users can directly perform encryption, decryption, compression and decompression in user space.
Signed-off-by: JiangShui Yang yangjiangshui@h-partners.com Signed-off-by: Weili Qian qianweili@huawei.com --- drivers/crypto/hisilicon/hpre/hpre_main.c | 1 - drivers/crypto/hisilicon/qm.c | 191 ++++++++++++++-------- drivers/crypto/hisilicon/sec2/sec_main.c | 11 +- drivers/crypto/hisilicon/zip/zip_main.c | 11 +- include/linux/hisi_acc_qm.h | 5 +- 5 files changed, 136 insertions(+), 83 deletions(-)
diff --git a/drivers/crypto/hisilicon/hpre/hpre_main.c b/drivers/crypto/hisilicon/hpre/hpre_main.c index d93aa6630a57..e970dd629234 100644 --- a/drivers/crypto/hisilicon/hpre/hpre_main.c +++ b/drivers/crypto/hisilicon/hpre/hpre_main.c @@ -10,7 +10,6 @@ #include <linux/pci.h> #include <linux/pm_runtime.h> #include <linux/topology.h> -#include <linux/uacce.h> #include "hpre.h"
#define HPRE_QM_ABNML_INT_MASK 0x100004 diff --git a/drivers/crypto/hisilicon/qm.c b/drivers/crypto/hisilicon/qm.c index 543074742db5..d9caff12cd4d 100644 --- a/drivers/crypto/hisilicon/qm.c +++ b/drivers/crypto/hisilicon/qm.c @@ -850,6 +850,33 @@ static void qm_cq_head_update(struct hisi_qp *qp) } }
+static void qm_poll_user_event_cb(struct hisi_qp *qp) +{ + struct qm_cqe *cqe = qp->cqe + qp->qp_status.cq_head; + struct uacce_queue *q = qp->uacce_q; + bool updated = 0; + + /* + * If multi thread poll one queue, each thread will produce + * one event, so we query one cqe and break out of the loop. + * If only one thread poll one queue, we need query all cqe + * to ensure that we poll a cleaned queue next time. + */ + while (QM_CQE_PHASE(cqe) == qp->qp_status.cqc_phase) { + dma_rmb(); + qm_cq_head_update(qp); + cqe = qp->cqe + qp->qp_status.cq_head; + updated = 1; + if (!wq_has_single_sleeper(&q->wait)) + break; + } + + if (updated) { + atomic_inc(&qp->qp_status.complete_task); + qp->event_cb(qp); + } +} + static void qm_poll_req_cb(struct hisi_qp *qp) { struct qm_cqe *cqe = qp->cqe + qp->qp_status.cq_head; @@ -887,7 +914,7 @@ static void qm_work_process(struct work_struct *work) continue;
if (qp->event_cb) { - qp->event_cb(qp); + qm_poll_user_event_cb(qp); continue; }
@@ -1074,6 +1101,7 @@ static void qm_init_qp_status(struct hisi_qp *qp) qp_status->cq_head = 0; qp_status->cqc_phase = true; atomic_set(&qp_status->used, 0); + atomic_set(&qp_status->complete_task, 0); }
static void qm_init_prefetch(struct hisi_qm *qm) @@ -2230,7 +2258,7 @@ static void hisi_qm_cache_wb(struct hisi_qm *qm)
static void qm_qp_event_notifier(struct hisi_qp *qp) { - wake_up_interruptible(&qp->uacce_q->wait); + uacce_wake_up(qp->uacce_q); }
/* This function returns free number of qp in qm. */ @@ -2376,18 +2404,8 @@ static void hisi_qm_uacce_stop_queue(struct uacce_queue *q) static int hisi_qm_is_q_updated(struct uacce_queue *q) { struct hisi_qp *qp = q->priv; - struct qm_cqe *cqe = qp->cqe + qp->qp_status.cq_head; - int updated = 0; - - while (QM_CQE_PHASE(cqe) == qp->qp_status.cqc_phase) { - /* make sure to read data from memory */ - dma_rmb(); - qm_cq_head_update(qp); - cqe = qp->cqe + qp->qp_status.cq_head; - updated = 1; - }
- return updated; + return atomic_add_unless(&qp->qp_status.complete_task, -1, 0); }
static void qm_set_sqctype(struct uacce_queue *q, u16 type) @@ -2418,7 +2436,7 @@ static long hisi_qm_uacce_ioctl(struct uacce_queue *q, unsigned int cmd, qm_set_sqctype(q, qp_ctx.qc_type); qp_ctx.id = qp->qp_id;
- if (copy_to_user((void __user *)arg, &qp_ctx, + if (copy_to_user((void __user *)(uintptr_t)arg, &qp_ctx, sizeof(struct hisi_qp_ctx))) return -EFAULT;
@@ -2442,6 +2460,66 @@ static long hisi_qm_uacce_ioctl(struct uacce_queue *q, unsigned int cmd, return -EINVAL; }
+static void qm_uacce_api_ver_init(struct hisi_qm *qm) +{ + struct uacce_device *uacce = qm->uacce; + + if (uacce->flags & UACCE_DEV_IOMMU) { + qm->use_sva = uacce->flags & UACCE_DEV_SVA ? true : false; + + if (qm->ver == QM_HW_V1) + uacce->api_ver = HISI_QM_API_VER_BASE; + else if (qm->ver == QM_HW_V2) + uacce->api_ver = HISI_QM_API_VER2_BASE; + else + uacce->api_ver = HISI_QM_API_VER3_BASE; + } else { + qm->use_sva = false; + + if (qm->ver == QM_HW_V1) + uacce->api_ver = HISI_QM_API_VER_BASE + UACCE_API_VER_NOIOMMU_SUBFIX; + else if (qm->ver == QM_HW_V2) + uacce->api_ver = HISI_QM_API_VER2_BASE + UACCE_API_VER_NOIOMMU_SUBFIX; + else + uacce->api_ver = HISI_QM_API_VER3_BASE + UACCE_API_VER_NOIOMMU_SUBFIX; + } +} + +static void qm_uacce_base_init(struct hisi_qm *qm) +{ + unsigned long dus_page_nr, mmio_page_nr; + struct uacce_device *uacce = qm->uacce; + struct pci_dev *pdev = qm->pdev; + u16 sq_depth, cq_depth; + + qm_uacce_api_ver_init(qm); + + if (qm->ver == QM_HW_V1) + mmio_page_nr = QM_DOORBELL_PAGE_NR; + else if (qm->ver == QM_HW_V2 || + !test_bit(QM_SUPPORT_DB_ISOLATION, &qm->caps)) + mmio_page_nr = QM_DOORBELL_PAGE_NR + + QM_DOORBELL_SQ_CQ_BASE_V2 / PAGE_SIZE; + else + mmio_page_nr = QM_QP_DB_INTERVAL / PAGE_SIZE; + + uacce->is_vf = pdev->is_virtfn; + uacce->priv = qm; + uacce->parent = &pdev->dev; + qm_get_xqc_depth(qm, &sq_depth, &cq_depth, QM_QP_DEPTH_CAP); + + /* Add one more page for device or qp status */ + dus_page_nr = (PAGE_SIZE - 1 + qm->sqe_size * sq_depth + + sizeof(struct qm_cqe) * cq_depth + PAGE_SIZE) >> + PAGE_SHIFT; + + uacce->qf_pg_num[UACCE_QFRT_MMIO] = mmio_page_nr; + uacce->qf_pg_num[UACCE_QFRT_DUS] = dus_page_nr; +} + /** * qm_hw_err_isolate() - Try to set the isolation status of the uacce device * according to user's configuration of error threshold. @@ -2567,7 +2645,7 @@ static void qm_remove_uacce(struct hisi_qm *qm) { struct uacce_device *uacce = qm->uacce;
- if (qm->use_sva) { + if (uacce) { qm_hw_err_destroy(qm); uacce_remove(uacce); qm->uacce = NULL; @@ -2576,15 +2654,9 @@ static void qm_remove_uacce(struct hisi_qm *qm)
static int qm_alloc_uacce(struct hisi_qm *qm) { + struct uacce_interface interface = {0}; struct pci_dev *pdev = qm->pdev; struct uacce_device *uacce; - unsigned long mmio_page_nr; - unsigned long dus_page_nr; - u16 sq_depth, cq_depth; - struct uacce_interface interface = { - .flags = UACCE_DEV_SVA, - .ops = &uacce_qm_ops, - }; int ret;
ret = strscpy(interface.name, dev_driver_string(&pdev->dev), @@ -2592,53 +2664,41 @@ static int qm_alloc_uacce(struct hisi_qm *qm) if (ret < 0) return -ENAMETOOLONG;
- uacce = uacce_alloc(&pdev->dev, &interface); - if (IS_ERR(uacce)) - return PTR_ERR(uacce); + interface.flags = qm->use_iommu ? UACCE_DEV_IOMMU : UACCE_DEV_NOIOMMU; + if (qm->mode == UACCE_MODE_SVA) { + if (!qm->use_iommu) { + pci_err(pdev, "iommu not support sva!\n"); + return -EINVAL; + }
- if (uacce->flags & UACCE_DEV_SVA) { - qm->use_sva = true; - } else { - /* only consider sva case */ - qm_remove_uacce(qm); - return -EINVAL; + interface.flags |= UACCE_DEV_SVA; }
- uacce->is_vf = pdev->is_virtfn; - uacce->priv = qm; - - if (qm->ver == QM_HW_V1) - uacce->api_ver = HISI_QM_API_VER_BASE; - else if (qm->ver == QM_HW_V2) - uacce->api_ver = HISI_QM_API_VER2_BASE; - else - uacce->api_ver = HISI_QM_API_VER3_BASE; - - if (qm->ver == QM_HW_V1) - mmio_page_nr = QM_DOORBELL_PAGE_NR; - else if (!test_bit(QM_SUPPORT_DB_ISOLATION, &qm->caps)) - mmio_page_nr = QM_DOORBELL_PAGE_NR + - QM_DOORBELL_SQ_CQ_BASE_V2 / PAGE_SIZE; - else - mmio_page_nr = qm->db_interval / PAGE_SIZE; - - qm_get_xqc_depth(qm, &sq_depth, &cq_depth, QM_QP_DEPTH_CAP); - - /* Add one more page for device or qp status */ - dus_page_nr = (PAGE_SIZE - 1 + qm->sqe_size * sq_depth + - sizeof(struct qm_cqe) * cq_depth + PAGE_SIZE) >> - PAGE_SHIFT; - - uacce->qf_pg_num[UACCE_QFRT_MMIO] = mmio_page_nr; - uacce->qf_pg_num[UACCE_QFRT_DUS] = dus_page_nr; - + interface.ops = &uacce_qm_ops; + uacce = uacce_alloc(&pdev->dev, &interface); + if (IS_ERR(uacce)) { + pci_err(pdev, "fail to alloc uacce device\n!"); + return PTR_ERR(uacce); + } qm->uacce = uacce; + + qm_uacce_base_init(qm); INIT_LIST_HEAD(&qm->isolate_data.qm_hw_errs); mutex_init(&qm->isolate_data.isolate_lock);
return 0; }
+int qm_register_uacce(struct hisi_qm *qm) +{ + if (!qm->uacce) + return 0; + + dev_info(&qm->pdev->dev, "qm register to uacce\n"); + return uacce_register(qm->uacce); +} +EXPORT_SYMBOL_GPL(qm_register_uacce); + /** * qm_frozen() - Try to froze QM to cut continuous queue request. If * there is user on the QM, return failure without doing anything. @@ -2744,7 +2804,7 @@ static int hisi_qp_memory_init(struct hisi_qm *qm, size_t dma_size, int id, struct hisi_qp *qp; int ret = -ENOMEM;
- qm->poll_data[id].qp_finish_id = kcalloc(qm->qp_num, sizeof(u16), + qm->poll_data[id].qp_finish_id = kcalloc(qm->eq_depth, sizeof(u16), GFP_KERNEL); if (!qm->poll_data[id].qp_finish_id) return -ENOMEM; @@ -2909,12 +2969,9 @@ void hisi_qm_uninit(struct hisi_qm *qm) hisi_qm_set_state(qm, QM_NOT_READY); up_write(&qm->qps_lock);
+ qm_remove_uacce(qm); qm_irqs_unregister(qm); hisi_qm_pci_uninit(qm); - if (qm->use_sva) { - uacce_remove(qm->uacce); - qm->uacce = NULL; - } } EXPORT_SYMBOL_GPL(hisi_qm_uninit);
@@ -4114,7 +4171,7 @@ static int qm_controller_reset_prepare(struct hisi_qm *qm) return ret; }
- if (qm->use_sva) { + if (qm->uacce) { ret = qm_hw_err_isolate(qm); if (ret) pci_err(pdev, "failed to isolate hw err!\n"); @@ -4440,7 +4497,7 @@ static int qm_controller_reset(struct hisi_qm *qm) qm_reset_bit_clear(qm);
/* if resetting fails, isolate the device */ - if (qm->use_sva) + if (qm->uacce) qm->isolate_data.is_isolate = true; return ret; } @@ -5402,7 +5459,7 @@ int hisi_qm_init(struct hisi_qm *qm) } }
- if (qm->mode == UACCE_MODE_SVA) { + if (qm->mode != UACCE_MODE_NOUACCE) { ret = qm_alloc_uacce(qm); if (ret < 0) dev_warn(dev, "fail to alloc uacce (%d)\n", ret); diff --git a/drivers/crypto/hisilicon/sec2/sec_main.c b/drivers/crypto/hisilicon/sec2/sec_main.c index 377c58bef466..d7047200ee7a 100644 --- a/drivers/crypto/hisilicon/sec2/sec_main.c +++ b/drivers/crypto/hisilicon/sec2/sec_main.c @@ -13,7 +13,6 @@ #include <linux/pm_runtime.h> #include <linux/seq_file.h> #include <linux/topology.h> -#include <linux/uacce.h>
#include "sec.h"
@@ -1232,12 +1231,10 @@ static int sec_probe(struct pci_dev *pdev, const struct pci_device_id *id) goto err_qm_del_list; }
- if (qm->uacce) { - ret = uacce_register(qm->uacce); - if (ret) { - pci_err(pdev, "failed to register uacce (%d)!\n", ret); - goto err_alg_unregister; - } + ret = qm_register_uacce(qm); + if (ret) { + pci_err(pdev, "failed to register uacce (%d)!\n", ret); + goto err_alg_unregister; }
if (qm->fun_type == QM_HW_PF && vfs_num) { diff --git a/drivers/crypto/hisilicon/zip/zip_main.c b/drivers/crypto/hisilicon/zip/zip_main.c index c065fd867161..6239dc47d8a0 100644 --- a/drivers/crypto/hisilicon/zip/zip_main.c +++ b/drivers/crypto/hisilicon/zip/zip_main.c @@ -11,7 +11,6 @@ #include <linux/pm_runtime.h> #include <linux/seq_file.h> #include <linux/topology.h> -#include <linux/uacce.h> #include "zip.h"
#define PCI_DEVICE_ID_HUAWEI_ZIP_PF 0xa250 @@ -1318,12 +1317,10 @@ static int hisi_zip_probe(struct pci_dev *pdev, const struct pci_device_id *id) goto err_qm_del_list; }
- if (qm->uacce) { - ret = uacce_register(qm->uacce); - if (ret) { - pci_err(pdev, "failed to register uacce (%d)!\n", ret); - goto err_qm_alg_unregister; - } + ret = qm_register_uacce(qm); + if (ret) { + pci_err(pdev, "failed to register uacce (%d)!\n", ret); + goto err_qm_alg_unregister; }
if (qm->fun_type == QM_HW_PF && vfs_num > 0) { diff --git a/include/linux/hisi_acc_qm.h b/include/linux/hisi_acc_qm.h index 773e0f1c67dc..f6383689974f 100644 --- a/include/linux/hisi_acc_qm.h +++ b/include/linux/hisi_acc_qm.h @@ -8,6 +8,7 @@ #include <linux/iopoll.h> #include <linux/module.h> #include <linux/pci.h> +#include <linux/uacce.h>
#define QM_QNUM_V1 4096 #define QM_QNUM_V2 1024 @@ -405,6 +406,7 @@ struct hisi_qp_status { u16 cq_head; bool cqc_phase; atomic_t flags; + atomic_t complete_task; };
struct hisi_qp_ops { @@ -495,7 +497,7 @@ static inline int mode_set(const char *val, const struct kernel_param *kp) return -EINVAL;
ret = kstrtou32(val, 10, &n); - if (ret != 0 || (n != UACCE_MODE_SVA && + if (ret != 0 || (n != UACCE_MODE_NOIOMMU && n != UACCE_MODE_SVA && n != UACCE_MODE_NOUACCE)) return -EINVAL;
@@ -527,6 +529,7 @@ static inline void hisi_qm_del_list(struct hisi_qm *qm, struct hisi_qm_list *qm_ mutex_unlock(&qm_list->lock); }
+int qm_register_uacce(struct hisi_qm *qm); int hisi_qm_init(struct hisi_qm *qm); void hisi_qm_uninit(struct hisi_qm *qm); int hisi_qm_start(struct hisi_qm *qm);