From: Yu'an Wang <wangyuan46(a)huawei.com>
driver inclusion
category: bugfix
bugzilla: NA
CVE: NA
In this patch, we try to move accelerator drivers into qm module
to simplify code, including RAS/FLR/SRIOV and uacce_mode/pf_q_num/
vfs_num setting.
In qm.h we add mode_set/q_num_set/vf_num_set for accelerator to
realize module parm uacce_mode/pf_q_num/vfs_num setting.
In qm.c hisi_qm_add_to_list and hisi_qm_del_from_list can be called
to manage accelerators through hisi_qm_list. We additionally realize
hisi_qm_alloc_qps_node to fix the problem that device is found but
queue request fails. Because of RAS process flow/FLR process flow/
SRIOV config flow are consistent for different accelerator drivers,
so we add Corresponding interfaces.
Meanwhile, zip/hpre/sec/rde accelerator drivers should match changes
of qm, including RAS/FLR/SRIOV processing, module parms setting, queue
allocing.
Signed-off-by: Yu'an Wang <wangyuan46(a)huawei.com>
Reviewed-by: Cheng Hu <hucheng.hu(a)huawei.com>
Reviewed-by: Wei Zhang <zhangwei375(a)huawei.com>
Reviewed-by: Guangwei Zhang <zhouguangwei5(a)huawei.com>
Reviewed-by: Junxian Liu <liujunxian3(a)huawei.com>
Reviewed-by: Shukun Tan <tanshukun1(a)huawei.com>
Reviewed-by: Hao Fang <fanghao11(a)huawei.com>
Signed-off-by: Yang Yingliang <yangyingliang(a)huawei.com>
---
drivers/crypto/hisilicon/hpre/hpre.h | 9 +-
drivers/crypto/hisilicon/hpre/hpre_crypto.c | 20 +-
drivers/crypto/hisilicon/hpre/hpre_main.c | 944 +++---------------
drivers/crypto/hisilicon/qm.c | 1093 +++++++++++++++++----
drivers/crypto/hisilicon/qm.h | 209 +++-
drivers/crypto/hisilicon/rde/rde.h | 11 +-
drivers/crypto/hisilicon/rde/rde_api.c | 29 +-
drivers/crypto/hisilicon/rde/rde_api.h | 2 +-
drivers/crypto/hisilicon/rde/rde_main.c | 717 ++++----------
drivers/crypto/hisilicon/sec2/sec.h | 13 +-
drivers/crypto/hisilicon/sec2/sec_crypto.c | 83 +-
drivers/crypto/hisilicon/sec2/sec_main.c | 1364 +++++++--------------------
drivers/crypto/hisilicon/zip/zip.h | 9 +-
drivers/crypto/hisilicon/zip/zip_crypto.c | 30 +-
drivers/crypto/hisilicon/zip/zip_main.c | 1152 +++++-----------------
15 files changed, 2053 insertions(+), 3632 deletions(-)
diff --git a/drivers/crypto/hisilicon/hpre/hpre.h b/drivers/crypto/hisilicon/hpre/hpre.h
index ba7c88e..3ac02ef 100644
--- a/drivers/crypto/hisilicon/hpre/hpre.h
+++ b/drivers/crypto/hisilicon/hpre/hpre.h
@@ -35,25 +35,18 @@ struct hpre_debugfs_file {
struct hpre_debug *debug;
};
-#define HPRE_RESET 0
-#define HPRE_WAIT_DELAY 1000
-
/*
* One HPRE controller has one PF and multiple VFs, some global configurations
* which PF has need this structure.
* Just relevant for PF.
*/
struct hpre_debug {
- struct dentry *debug_root;
struct hpre_debugfs_file files[HPRE_DEBUGFS_FILE_NUM];
};
struct hpre {
struct hisi_qm qm;
- struct list_head list;
struct hpre_debug debug;
- u32 num_vfs;
- unsigned long status;
};
enum hpre_alg_type {
@@ -80,7 +73,7 @@ struct hpre_sqe {
__le32 rsvd1[_HPRE_SQE_ALIGN_EXT];
};
-struct hpre *hpre_find_device(int node);
+struct hisi_qp *hpre_create_qp(void);
int hpre_algs_register(void);
void hpre_algs_unregister(void);
diff --git a/drivers/crypto/hisilicon/hpre/hpre_crypto.c b/drivers/crypto/hisilicon/hpre/hpre_crypto.c
index aadc975..7610e13 100644
--- a/drivers/crypto/hisilicon/hpre/hpre_crypto.c
+++ b/drivers/crypto/hisilicon/hpre/hpre_crypto.c
@@ -147,26 +147,18 @@ static void hpre_rm_req_from_ctx(struct hpre_asym_request *hpre_req)
static struct hisi_qp *hpre_get_qp_and_start(void)
{
struct hisi_qp *qp;
- struct hpre *hpre;
int ret;
- /* find the proper hpre device, which is near the current CPU core */
- hpre = hpre_find_device(cpu_to_node(smp_processor_id()));
- if (!hpre) {
- pr_err("Can not find proper hpre device!\n");
- return ERR_PTR(-ENODEV);
- }
-
- qp = hisi_qm_create_qp(&hpre->qm, 0);
- if (IS_ERR(qp)) {
- pci_err(hpre->qm.pdev, "Can not create qp!\n");
+ qp = hpre_create_qp();
+ if (!qp) {
+ pr_err("Can not create hpre qp!\n");
return ERR_PTR(-ENODEV);
}
ret = hisi_qm_start_qp(qp, 0);
if (ret < 0) {
- hisi_qm_release_qp(qp);
- pci_err(hpre->qm.pdev, "Can not start qp!\n");
+ hisi_qm_free_qps(&qp, 1);
+ pci_err(qp->qm->pdev, "Can not start qp!\n");
return ERR_PTR(-EINVAL);
}
@@ -337,7 +329,7 @@ static void hpre_ctx_clear(struct hpre_ctx *ctx, bool is_clear_all)
if (is_clear_all) {
idr_destroy(&ctx->req_idr);
kfree(ctx->req_list);
- hisi_qm_release_qp(ctx->qp);
+ hisi_qm_free_qps(&ctx->qp, 1);
}
ctx->crt_g2_mode = false;
diff --git a/drivers/crypto/hisilicon/hpre/hpre_main.c b/drivers/crypto/hisilicon/hpre/hpre_main.c
index 6a3bce2..4dc0d3e 100644
--- a/drivers/crypto/hisilicon/hpre/hpre_main.c
+++ b/drivers/crypto/hisilicon/hpre/hpre_main.c
@@ -13,9 +13,6 @@
#include <linux/uacce.h>
#include "hpre.h"
-#define HPRE_ENABLE 1
-#define HPRE_DISABLE 0
-#define HPRE_VF_NUM 63
#define HPRE_QUEUE_NUM_V2 1024
#define HPRE_QUEUE_NUM_V1 4096
#define HPRE_QM_ABNML_INT_MASK 0x100004
@@ -63,10 +60,6 @@
#define HPRE_HAC_ECC2_CNT 0x301a08
#define HPRE_HAC_INT_STATUS 0x301800
#define HPRE_HAC_SOURCE_INT 0x301600
-#define MASTER_GLOBAL_CTRL_SHUTDOWN 1
-#define MASTER_TRANS_RETURN_RW 3
-#define HPRE_MASTER_TRANS_RETURN 0x300150
-#define HPRE_MASTER_GLOBAL_CTRL 0x300000
#define HPRE_CLSTR_ADDR_INTRVL 0x1000
#define HPRE_CLUSTER_INQURY 0x100
#define HPRE_CLSTR_ADDR_INQRY_RSLT 0x104
@@ -83,24 +76,18 @@
#define HPRE_QM_VFG_AX_MASK 0xff
#define HPRE_BD_USR_MASK 0x3
#define HPRE_CLUSTER_CORE_MASK 0xf
-#define HPRE_RESET_WAIT_TIMEOUT 400
#define HPRE_AM_OOO_SHUTDOWN_ENB 0x301044
#define AM_OOO_SHUTDOWN_ENABLE BIT(0)
#define AM_OOO_SHUTDOWN_DISABLE 0xFFFFFFFE
-#define HPRE_WR_MSI_PORT 0xFFFB
+#define HPRE_WR_MSI_PORT BIT(2)
-#define HPRE_HW_ERROR_IRQ_ENABLE 1
-#define HPRE_HW_ERROR_IRQ_DISABLE 0
-#define HPRE_PCI_COMMAND_INVALID 0xFFFFFFFF
#define HPRE_CORE_ECC_2BIT_ERR BIT(1)
#define HPRE_OOO_ECC_2BIT_ERR BIT(5)
-#define HPRE_QM_BME_FLR BIT(7)
-#define HPRE_QM_PM_FLR BIT(11)
-#define HPRE_QM_SRIOV_FLR BIT(12)
-
-#define HPRE_USLEEP 10
+#define HPRE_QM_BME_FLR BIT(7)
+#define HPRE_QM_PM_FLR BIT(11)
+#define HPRE_QM_SRIOV_FLR BIT(12)
/* function index:
* 1 for hpre bypass mode,
@@ -108,8 +95,7 @@
*/
#define HPRE_VIA_MSI_DSM 1
-static LIST_HEAD(hpre_list);
-static DEFINE_MUTEX(hpre_list_lock);
+static struct hisi_qm_list hpre_devices;
static const char hpre_name[] = "hisi_hpre";
static struct dentry *hpre_debugfs_root;
static const struct pci_device_id hpre_dev_ids[] = {
@@ -183,59 +169,29 @@ struct hpre_hw_error {
{"INT_STATUS ", HPRE_INT_STATUS},
};
-static int hpre_pf_q_num_set(const char *val, const struct kernel_param *kp)
+#ifdef CONFIG_CRYPTO_QM_UACCE
+static int uacce_mode_set(const char *val, const struct kernel_param *kp)
{
- struct pci_dev *pdev;
- u32 q_num;
- u32 n = 0;
- u8 rev_id;
- int ret;
-
- if (!val)
- return -EINVAL;
-
- pdev = pci_get_device(PCI_VENDOR_ID_HUAWEI, HPRE_PCI_DEVICE_ID, NULL);
- if (!pdev) {
- q_num = HPRE_QUEUE_NUM_V2;
- pr_info("No device found currently, suppose queue number is %d\n",
- q_num);
- } else {
- rev_id = pdev->revision;
- if (rev_id != QM_HW_V2)
- return -EINVAL;
-
- q_num = HPRE_QUEUE_NUM_V2;
- }
-
- ret = kstrtou32(val, 10, &n);
- if (ret != 0 || n == 0 || n > q_num)
- return -EINVAL;
-
- return param_set_int(val, kp);
+ return mode_set(val, kp);
}
-static const struct kernel_param_ops hpre_pf_q_num_ops = {
- .set = hpre_pf_q_num_set,
+static const struct kernel_param_ops uacce_mode_ops = {
+ .set = uacce_mode_set,
.get = param_get_int,
};
-static int uacce_mode_set(const char *val, const struct kernel_param *kp)
-{
- u32 n;
- int ret;
-
- if (!val)
- return -EINVAL;
-
- ret = kstrtou32(val, 10, &n);
- if (ret != 0 || (n != UACCE_MODE_NOIOMMU && n != UACCE_MODE_NOUACCE))
- return -EINVAL;
+static int uacce_mode = UACCE_MODE_NOUACCE;
+module_param_cb(uacce_mode, &uacce_mode_ops, &uacce_mode, 0444);
+MODULE_PARM_DESC(uacce_mode, "Mode of UACCE can be 0(default), 2");
+#endif
- return param_set_int(val, kp);
+static int pf_q_num_set(const char *val, const struct kernel_param *kp)
+{
+ return q_num_set(val, kp, HPRE_PCI_DEVICE_ID);
}
-static const struct kernel_param_ops uacce_mode_ops = {
- .set = uacce_mode_set,
+static const struct kernel_param_ops hpre_pf_q_num_ops = {
+ .set = pf_q_num_set,
.get = param_get_int,
};
@@ -243,46 +199,31 @@ static int uacce_mode_set(const char *val, const struct kernel_param *kp)
module_param_cb(pf_q_num, &hpre_pf_q_num_ops, &pf_q_num, 0444);
MODULE_PARM_DESC(pf_q_num, "Number of queues in PF of CS(1-1024)");
-static int uacce_mode = UACCE_MODE_NOUACCE;
-module_param_cb(uacce_mode, &uacce_mode_ops, &uacce_mode, 0444);
-MODULE_PARM_DESC(uacce_mode, "Mode of UACCE can be 0(default), 2");
-static inline void hpre_add_to_list(struct hpre *hpre)
+static int vfs_num_set(const char *val, const struct kernel_param *kp)
{
- mutex_lock(&hpre_list_lock);
- list_add_tail(&hpre->list, &hpre_list);
- mutex_unlock(&hpre_list_lock);
+ return vf_num_set(val, kp);
}
-static inline void hpre_remove_from_list(struct hpre *hpre)
-{
- mutex_lock(&hpre_list_lock);
- list_del(&hpre->list);
- mutex_unlock(&hpre_list_lock);
-}
+static const struct kernel_param_ops vfs_num_ops = {
+ .set = vfs_num_set,
+ .get = param_get_int,
+};
-struct hpre *hpre_find_device(int node)
+static u32 vfs_num;
+module_param_cb(vfs_num, &vfs_num_ops, &vfs_num, 0444);
+MODULE_PARM_DESC(vfs_num, "Number of VFs to enable(1-63), 0(default)");
+
+struct hisi_qp *hpre_create_qp(void)
{
- struct hpre *hpre, *ret = NULL;
- int min_distance = INT_MAX;
- struct device *dev;
- int dev_node = 0;
-
- mutex_lock(&hpre_list_lock);
- list_for_each_entry(hpre, &hpre_list, list) {
- dev = &hpre->qm.pdev->dev;
-#ifdef CONFIG_NUMA
- dev_node = dev->numa_node;
- if (dev_node < 0)
- dev_node = 0;
-#endif
- if (node_distance(dev_node, node) < min_distance) {
- ret = hpre;
- min_distance = node_distance(dev_node, node);
- }
- }
- mutex_unlock(&hpre_list_lock);
+ int node = cpu_to_node(smp_processor_id());
+ struct hisi_qp *qp = NULL;
+ int ret;
- return ret;
+ ret = hisi_qm_alloc_qps_node(node, &hpre_devices, &qp, 1, 0);
+ if (!ret)
+ return qp;
+
+ return NULL;
}
static void hpre_pasid_enable(struct hisi_qm *qm)
@@ -351,9 +292,8 @@ static int hpre_set_cluster(struct hisi_qm *qm)
return 0;
}
-static int hpre_set_user_domain_and_cache(struct hpre *hpre)
+static int hpre_set_user_domain_and_cache(struct hisi_qm *qm)
{
- struct hisi_qm *qm = &hpre->qm;
struct pci_dev *pdev = qm->pdev;
u32 val;
int ret;
@@ -403,7 +343,7 @@ static int hpre_set_user_domain_and_cache(struct hpre *hpre)
pci_err(pdev, "acpi_evaluate_dsm err.\n");
/* disable FLR triggered by BME(bus master enable) */
- val = readl(hpre->qm.io_base + QM_PEH_AXUSER_CFG);
+ val = readl(HPRE_ADDR(qm, QM_PEH_AXUSER_CFG));
val &= ~(HPRE_QM_BME_FLR | HPRE_QM_SRIOV_FLR);
val |= HPRE_QM_PM_FLR;
writel(val, HPRE_ADDR(qm, QM_PEH_AXUSER_CFG));
@@ -433,23 +373,21 @@ static void hpre_cnt_regs_clear(struct hisi_qm *qm)
hisi_qm_debug_regs_clear(qm);
}
-static void hpre_hw_error_disable(struct hpre *hpre)
+static void hpre_hw_error_disable(struct hisi_qm *qm)
{
- struct hisi_qm *qm = &hpre->qm;
u32 val;
/* disable hpre hw error interrupts */
writel(HPRE_CORE_INT_DISABLE, qm->io_base + HPRE_INT_MASK);
/* disable HPRE block master OOO when m-bit error occur */
- val = readl(hpre->qm.io_base + HPRE_AM_OOO_SHUTDOWN_ENB);
+ val = readl(qm->io_base + HPRE_AM_OOO_SHUTDOWN_ENB);
val &= AM_OOO_SHUTDOWN_DISABLE;
- writel(val, hpre->qm.io_base + HPRE_AM_OOO_SHUTDOWN_ENB);
+ writel(val, qm->io_base + HPRE_AM_OOO_SHUTDOWN_ENB);
}
-static void hpre_hw_error_enable(struct hpre *hpre)
+static void hpre_hw_error_enable(struct hisi_qm *qm)
{
- struct hisi_qm *qm = &hpre->qm;
u32 val;
/* clear HPRE hw error source if having */
@@ -462,9 +400,9 @@ static void hpre_hw_error_enable(struct hpre *hpre)
writel(HPRE_HAC_RAS_FE_ENABLE, qm->io_base + HPRE_RAS_FE_ENB);
/* enable HPRE block master OOO when m-bit error occur */
- val = readl(hpre->qm.io_base + HPRE_AM_OOO_SHUTDOWN_ENB);
+ val = readl(qm->io_base + HPRE_AM_OOO_SHUTDOWN_ENB);
val |= AM_OOO_SHUTDOWN_ENABLE;
- writel(val, hpre->qm.io_base + HPRE_AM_OOO_SHUTDOWN_ENB);
+ writel(val, qm->io_base + HPRE_AM_OOO_SHUTDOWN_ENB);
}
static inline struct hisi_qm *hpre_file_to_qm(struct hpre_debugfs_file *file)
@@ -484,9 +422,7 @@ static u32 hpre_current_qm_read(struct hpre_debugfs_file *file)
static int hpre_current_qm_write(struct hpre_debugfs_file *file, u32 val)
{
struct hisi_qm *qm = hpre_file_to_qm(file);
- struct hpre_debug *debug = file->debug;
- struct hpre *hpre = container_of(debug, struct hpre, debug);
- u32 num_vfs = hpre->num_vfs;
+ u32 num_vfs = qm->vfs_num;
u32 vfq_num, tmp;
if (val > num_vfs)
@@ -657,11 +593,14 @@ static int hpre_create_debugfs_file(struct hpre_debug *dbg, struct dentry *dir,
enum hpre_ctrl_dbgfs_file type, int indx)
{
struct dentry *tmp, *file_dir;
+ struct hpre *hpre;
- if (dir)
+ if (dir) {
file_dir = dir;
- else
- file_dir = dbg->debug_root;
+ } else {
+ hpre = container_of(dbg, struct hpre, debug);
+ file_dir = hpre->qm.debug.debug_root;
+ }
if (type >= HPRE_DEBUG_FILE_NUM)
return -EINVAL;
@@ -694,7 +633,8 @@ static int hpre_pf_comm_regs_debugfs_init(struct hpre_debug *debug)
regset->nregs = ARRAY_SIZE(hpre_com_dfx_regs);
regset->base = qm->io_base;
- tmp = debugfs_create_regset32("regs", 0444, debug->debug_root, regset);
+ tmp = debugfs_create_regset32("regs", 0444, qm->debug.debug_root,
+ regset);
if (!tmp)
return -ENOENT;
@@ -716,7 +656,7 @@ static int hpre_cluster_debugfs_init(struct hpre_debug *debug)
if (ret < 0)
return -EINVAL;
- tmp_d = debugfs_create_dir(buf, debug->debug_root);
+ tmp_d = debugfs_create_dir(buf, qm->debug.debug_root);
if (!tmp_d)
return -ENOENT;
@@ -761,9 +701,9 @@ static int hpre_ctrl_debug_init(struct hpre_debug *debug)
return hpre_cluster_debugfs_init(debug);
}
-static int hpre_debugfs_init(struct hpre *hpre)
+static int hpre_debugfs_init(struct hisi_qm *qm)
{
- struct hisi_qm *qm = &hpre->qm;
+ struct hpre *hpre = container_of(qm, struct hpre, qm);
struct device *dev = &qm->pdev->dev;
struct dentry *dir;
int ret;
@@ -779,7 +719,6 @@ static int hpre_debugfs_init(struct hpre *hpre)
goto failed_to_create;
if (qm->pdev->device == HPRE_PCI_DEVICE_ID) {
- hpre->debug.debug_root = dir;
ret = hpre_ctrl_debug_init(&hpre->debug);
if (ret)
goto failed_to_create;
@@ -791,69 +730,41 @@ static int hpre_debugfs_init(struct hpre *hpre)
return ret;
}
-static void hpre_debugfs_exit(struct hpre *hpre)
-{
- struct hisi_qm *qm = &hpre->qm;
-
- debugfs_remove_recursive(qm->debug.debug_root);
-}
-
static int hpre_qm_pre_init(struct hisi_qm *qm, struct pci_dev *pdev)
{
- enum qm_hw_ver rev_id;
-
- rev_id = hisi_qm_get_hw_version(pdev);
- if (rev_id < 0)
- return -ENODEV;
+ int ret;
- if (rev_id == QM_HW_V1) {
+#ifdef CONFIG_CRYPTO_QM_UACCE
+ qm->algs = "rsa\ndh\n";
+ qm->uacce_mode = uacce_mode;
+#endif
+ qm->pdev = pdev;
+ ret = hisi_qm_pre_init(qm, pf_q_num, HPRE_PF_DEF_Q_BASE);
+ if (ret)
+ return ret;
+ if (qm->ver == QM_HW_V1) {
pci_warn(pdev, "HPRE version 1 is not supported!\n");
return -EINVAL;
}
- qm->pdev = pdev;
- qm->ver = rev_id;
+ qm->qm_list = &hpre_devices;
qm->sqe_size = HPRE_SQE_SIZE;
qm->dev_name = hpre_name;
- qm->fun_type = (pdev->device == HPRE_PCI_DEVICE_ID) ?
- QM_HW_PF : QM_HW_VF;
- qm->algs = "rsa\ndh\n";
- switch (uacce_mode) {
- case UACCE_MODE_NOUACCE:
- qm->use_uacce = false;
- break;
- case UACCE_MODE_NOIOMMU:
- qm->use_uacce = true;
- break;
- default:
- return -EINVAL;
- }
- if (pdev->is_physfn) {
- qm->qp_base = HPRE_PF_DEF_Q_BASE;
- qm->qp_num = pf_q_num;
- qm->debug.curr_qm_qp_num = pf_q_num;
- }
return 0;
}
-static void hpre_hw_err_init(struct hpre *hpre)
-{
- hisi_qm_hw_error_init(&hpre->qm, QM_BASE_CE,
- QM_BASE_NFE | QM_ACC_DO_TASK_TIMEOUT,
- 0, QM_DB_RANDOM_INVALID);
- hpre_hw_error_enable(hpre);
-}
-
-static void hpre_open_master_ooo(struct hisi_qm *qm)
+static void hpre_log_hw_error(struct hisi_qm *qm, u32 err_sts)
{
- u32 val;
+ const struct hpre_hw_error *err = hpre_hw_errors;
+ struct device *dev = &qm->pdev->dev;
- val = readl(qm->io_base + HPRE_AM_OOO_SHUTDOWN_ENB);
- writel(val & AM_OOO_SHUTDOWN_DISABLE,
- HPRE_ADDR(qm, HPRE_AM_OOO_SHUTDOWN_ENB));
- writel(val | AM_OOO_SHUTDOWN_ENABLE,
- HPRE_ADDR(qm, HPRE_AM_OOO_SHUTDOWN_ENB));
+ while (err->msg) {
+ if (err->int_msk & err_sts)
+ dev_warn(dev, "%s [error status=0x%x] found\n",
+ err->msg, err->int_msk);
+ err++;
+ }
}
static u32 hpre_get_hw_err_status(struct hisi_qm *qm)
@@ -866,41 +777,47 @@ static void hpre_clear_hw_err_status(struct hisi_qm *qm, u32 err_sts)
writel(err_sts, qm->io_base + HPRE_HAC_SOURCE_INT);
}
-static void hpre_log_hw_error(struct hisi_qm *qm, u32 err_sts)
+static void hpre_open_axi_master_ooo(struct hisi_qm *qm)
{
- const struct hpre_hw_error *err = hpre_hw_errors;
- struct device *dev = &qm->pdev->dev;
+ u32 value;
- while (err->msg) {
- if (err->int_msk & err_sts)
- dev_warn(dev, "%s [error status=0x%x] found\n",
- err->msg, err->int_msk);
- err++;
- }
+ value = readl(qm->io_base + HPRE_AM_OOO_SHUTDOWN_ENB);
+ writel(value & AM_OOO_SHUTDOWN_DISABLE,
+ HPRE_ADDR(qm, HPRE_AM_OOO_SHUTDOWN_ENB));
+ writel(value | AM_OOO_SHUTDOWN_ENABLE,
+ HPRE_ADDR(qm, HPRE_AM_OOO_SHUTDOWN_ENB));
}
-static int hpre_pf_probe_init(struct hpre *hpre)
+static int hpre_pf_probe_init(struct hisi_qm *qm)
{
- struct hisi_qm *qm = &hpre->qm;
int ret;
if (qm->ver != QM_HW_V2)
return -EINVAL;
qm->ctrl_q_num = HPRE_QUEUE_NUM_V2;
- qm->err_ini.qm_wr_port = HPRE_WR_MSI_PORT;
- qm->err_ini.ecc_2bits_mask = (HPRE_CORE_ECC_2BIT_ERR |
- HPRE_OOO_ECC_2BIT_ERR);
- qm->err_ini.open_axi_master_ooo = hpre_open_master_ooo;
qm->err_ini.get_dev_hw_err_status = hpre_get_hw_err_status;
qm->err_ini.clear_dev_hw_err_status = hpre_clear_hw_err_status;
+ qm->err_ini.err_info.ecc_2bits_mask = HPRE_CORE_ECC_2BIT_ERR |
+ HPRE_OOO_ECC_2BIT_ERR;
+ qm->err_ini.err_info.ce = QM_BASE_CE;
+ qm->err_ini.err_info.nfe = QM_BASE_NFE | QM_ACC_DO_TASK_TIMEOUT;
+ qm->err_ini.err_info.fe = 0;
+ qm->err_ini.err_info.msi = QM_DB_RANDOM_INVALID;
+ qm->err_ini.err_info.acpi_rst = "HRST";
+
+ qm->err_ini.hw_err_disable = hpre_hw_error_disable;
+ qm->err_ini.hw_err_enable = hpre_hw_error_enable;
+ qm->err_ini.set_usr_domain_cache = hpre_set_user_domain_and_cache;
qm->err_ini.log_dev_hw_err = hpre_log_hw_error;
+ qm->err_ini.open_axi_master_ooo = hpre_open_axi_master_ooo;
+ qm->err_ini.err_info.msi_wr_port = HPRE_WR_MSI_PORT;
- ret = hpre_set_user_domain_and_cache(hpre);
+ ret = qm->err_ini.set_usr_domain_cache(qm);
if (ret)
return ret;
- hpre_hw_err_init(hpre);
+ hisi_qm_dev_err_init(qm);
return 0;
}
@@ -914,10 +831,9 @@ static int hpre_probe(struct pci_dev *pdev, const struct pci_device_id *id)
hpre = devm_kzalloc(&pdev->dev, sizeof(*hpre), GFP_KERNEL);
if (!hpre)
return -ENOMEM;
-
- pci_set_drvdata(pdev, hpre);
-
qm = &hpre->qm;
+ qm->fun_type = pdev->is_physfn ? QM_HW_PF : QM_HW_VF;
+
ret = hpre_qm_pre_init(qm, pdev);
if (ret)
return ret;
@@ -929,7 +845,7 @@ static int hpre_probe(struct pci_dev *pdev, const struct pci_device_id *id)
}
if (pdev->is_physfn) {
- ret = hpre_pf_probe_init(hpre);
+ ret = hpre_pf_probe_init(qm);
if (ret) {
pci_err(pdev, "Failed to init pf probe (%d)!\n", ret);
goto err_with_qm_init;
@@ -947,26 +863,35 @@ static int hpre_probe(struct pci_dev *pdev, const struct pci_device_id *id)
goto err_with_err_init;
}
- ret = hpre_debugfs_init(hpre);
+ ret = hpre_debugfs_init(qm);
if (ret)
pci_warn(pdev, "init debugfs fail!\n");
- hpre_add_to_list(hpre);
+ hisi_qm_add_to_list(qm, &hpre_devices);
ret = hpre_algs_register();
if (ret < 0) {
- hpre_remove_from_list(hpre);
pci_err(pdev, "fail to register algs to crypto!\n");
goto err_with_qm_start;
}
+
+ if (qm->fun_type == QM_HW_PF && vfs_num > 0) {
+ ret = hisi_qm_sriov_enable(pdev, vfs_num);
+ if (ret < 0)
+ goto err_with_crypto_register;
+ }
+
return 0;
+err_with_crypto_register:
+ hpre_algs_unregister();
+
err_with_qm_start:
+ hisi_qm_del_from_list(qm, &hpre_devices);
hisi_qm_stop(qm, QM_NORMAL);
err_with_err_init:
- if (pdev->is_physfn)
- hpre_hw_error_disable(hpre);
+ hisi_qm_dev_err_uninit(qm);
err_with_qm_init:
hisi_qm_uninit(qm);
@@ -974,627 +899,51 @@ static int hpre_probe(struct pci_dev *pdev, const struct pci_device_id *id)
return ret;
}
-static int hpre_vf_q_assign(struct hpre *hpre, int num_vfs)
-{
- struct hisi_qm *qm = &hpre->qm;
- u32 qp_num = qm->qp_num;
- int q_num, remain_q_num, i;
- u32 q_base = qp_num;
- int ret;
-
- if (!num_vfs)
- return -EINVAL;
-
- remain_q_num = qm->ctrl_q_num - qp_num;
- /* If remain queues not enough, return error. */
- if (remain_q_num < num_vfs)
- return -EINVAL;
-
- q_num = remain_q_num / num_vfs;
- for (i = 1; i <= num_vfs; i++) {
- if (i == num_vfs)
- q_num += remain_q_num % num_vfs;
- ret = hisi_qm_set_vft(qm, i, q_base, (u32)q_num);
- if (ret)
- return ret;
- q_base += q_num;
- }
-
- return 0;
-}
-
-static int hpre_clear_vft_config(struct hpre *hpre)
-{
- struct hisi_qm *qm = &hpre->qm;
- u32 num_vfs = hpre->num_vfs;
- int ret;
- u32 i;
-
- for (i = 1; i <= num_vfs; i++) {
- ret = hisi_qm_set_vft(qm, i, 0, 0);
- if (ret)
- return ret;
- }
- hpre->num_vfs = 0;
-
- return 0;
-}
-
-static int hpre_sriov_enable(struct pci_dev *pdev, int max_vfs)
-{
- struct hpre *hpre = pci_get_drvdata(pdev);
- int pre_existing_vfs, num_vfs, ret;
-
- pre_existing_vfs = pci_num_vf(pdev);
- if (pre_existing_vfs) {
- pci_err(pdev,
- "Can't enable VF. Please disable pre-enabled VFs!\n");
- return 0;
- }
-
- num_vfs = min_t(int, max_vfs, HPRE_VF_NUM);
- ret = hpre_vf_q_assign(hpre, num_vfs);
- if (ret) {
- pci_err(pdev, "Can't assign queues for VF!\n");
- return ret;
- }
-
- hpre->num_vfs = num_vfs;
-
- ret = pci_enable_sriov(pdev, num_vfs);
- if (ret) {
- pci_err(pdev, "Can't enable VF!\n");
- hpre_clear_vft_config(hpre);
- return ret;
- }
- return num_vfs;
-}
-
-static int hpre_try_frozen_vfs(struct pci_dev *pdev)
-{
- int ret = 0;
- struct hpre *hpre, *vf_hpre;
- struct pci_dev *dev;
-
- /* Try to frozen all the VFs as disable SRIOV */
- mutex_lock(&hpre_list_lock);
- list_for_each_entry(hpre, &hpre_list, list) {
- dev = hpre->qm.pdev;
- if (dev == pdev)
- continue;
- if (pci_physfn(dev) == pdev) {
- vf_hpre = pci_get_drvdata(dev);
- ret = hisi_qm_frozen(&vf_hpre->qm);
- if (ret)
- goto frozen_fail;
- }
- }
-
-frozen_fail:
- mutex_unlock(&hpre_list_lock);
- return ret;
-}
-
-static int hpre_sriov_disable(struct pci_dev *pdev)
-{
- struct hpre *hpre = pci_get_drvdata(pdev);
-
- if (pci_vfs_assigned(pdev)) {
- pci_err(pdev, "Failed to disable VFs while VFs are assigned!\n");
- return -EPERM;
- }
-
- /* While VF is in used, SRIOV cannot be disabled.
- * However, there is a risk that the behavior is uncertain if the
- * device is in hardware resetting.
- */
- if (hpre_try_frozen_vfs(pdev)) {
- dev_err(&pdev->dev,
- "Uacce user space task is using its VF!\n");
- return -EBUSY;
- }
-
- /* remove in hpre_pci_driver will be called to free VF resources */
- pci_disable_sriov(pdev);
- return hpre_clear_vft_config(hpre);
-}
-
static int hpre_sriov_configure(struct pci_dev *pdev, int num_vfs)
{
if (num_vfs)
- return hpre_sriov_enable(pdev, num_vfs);
+ return hisi_qm_sriov_enable(pdev, num_vfs);
else
- return hpre_sriov_disable(pdev);
-}
-
-static void hpre_remove_wait_delay(struct hpre *hpre)
-{
- struct hisi_qm *qm = &hpre->qm;
-
- while (hisi_qm_frozen(&hpre->qm) ||
- ((qm->fun_type == QM_HW_PF) &&
- hpre_try_frozen_vfs(hpre->qm.pdev)))
- usleep_range(HPRE_USLEEP, HPRE_USLEEP);
- udelay(HPRE_WAIT_DELAY);
+ return hisi_qm_sriov_disable(pdev, &hpre_devices);
}
static void hpre_remove(struct pci_dev *pdev)
{
- struct hpre *hpre = pci_get_drvdata(pdev);
- struct hisi_qm *qm = &hpre->qm;
+ struct hisi_qm *qm = pci_get_drvdata(pdev);
+ int ret;
+#ifdef CONFIG_CRYPTO_QM_UACCE
if (uacce_mode != UACCE_MODE_NOUACCE)
- hpre_remove_wait_delay(hpre);
-
+ hisi_qm_remove_wait_delay(qm, &hpre_devices);
+#endif
hpre_algs_unregister();
- hpre_remove_from_list(hpre);
- if (qm->fun_type == QM_HW_PF && hpre->num_vfs != 0)
- hpre_sriov_disable(pdev);
-
+ hisi_qm_del_from_list(qm, &hpre_devices);
+ if (qm->fun_type == QM_HW_PF && qm->vfs_num) {
+ ret = hisi_qm_sriov_disable(pdev, NULL);
+ if (ret) {
+ pci_err(pdev, "Disable SRIOV fail!\n");
+ return;
+ }
+ }
if (qm->fun_type == QM_HW_PF) {
hpre_cnt_regs_clear(qm);
qm->debug.curr_qm_qp_num = 0;
}
-
- hpre_debugfs_exit(hpre);
+ debugfs_remove_recursive(qm->debug.debug_root);
hisi_qm_stop(qm, QM_NORMAL);
if (qm->fun_type == QM_HW_PF)
- hpre_hw_error_disable(hpre);
+ hisi_qm_dev_err_uninit(qm);
hisi_qm_uninit(qm);
}
-static void hpre_shutdown(struct pci_dev *pdev)
-{
- struct hpre *hpre = pci_get_drvdata(pdev);
-
- hisi_qm_stop(&hpre->qm, QM_NORMAL);
-}
-
-static pci_ers_result_t hpre_error_detected(struct pci_dev *pdev,
- pci_channel_state_t state)
-{
- if (pdev->is_virtfn)
- return PCI_ERS_RESULT_NONE;
-
- pci_info(pdev, "PCI error detected, state(=%d)!!\n", state);
- if (state == pci_channel_io_perm_failure)
- return PCI_ERS_RESULT_DISCONNECT;
-
- return hisi_qm_process_dev_error(pdev);
-}
-
-static int hpre_vf_reset_prepare(struct pci_dev *pdev,
- enum qm_stop_reason stop_reason)
-{
- struct pci_dev *dev;
- struct hisi_qm *qm;
- struct hpre *hpre;
- int ret = 0;
-
- mutex_lock(&hpre_list_lock);
- if (pdev->is_physfn) {
- list_for_each_entry(hpre, &hpre_list, list) {
- dev = hpre->qm.pdev;
- if (dev == pdev)
- continue;
-
- if (pci_physfn(dev) == pdev) {
- qm = &hpre->qm;
-
- ret = hisi_qm_stop(qm, stop_reason);
- if (ret)
- goto prepare_fail;
- }
- }
- }
-
-prepare_fail:
- mutex_unlock(&hpre_list_lock);
- return ret;
-}
-
-static int hpre_reset_prepare_rdy(struct hpre *hpre)
-{
- struct pci_dev *pdev = hpre->qm.pdev;
- struct hpre *hisi_hpre = pci_get_drvdata(pci_physfn(pdev));
- int delay = 0;
-
- while (test_and_set_bit(HPRE_RESET, &hisi_hpre->status)) {
- msleep(++delay);
- if (delay > HPRE_RESET_WAIT_TIMEOUT)
- return -EBUSY;
- }
-
- return 0;
-}
-
-static int hpre_controller_reset_prepare(struct hpre *hpre)
-{
- struct hisi_qm *qm = &hpre->qm;
- struct pci_dev *pdev = qm->pdev;
- int ret;
-
- ret = hpre_reset_prepare_rdy(hpre);
- if (ret) {
- dev_err(&pdev->dev, "Controller reset not ready!\n");
- return ret;
- }
-
- ret = hpre_vf_reset_prepare(pdev, QM_SOFT_RESET);
- if (ret) {
- dev_err(&pdev->dev, "Fails to stop VFs!\n");
- return ret;
- }
-
- ret = hisi_qm_stop(qm, QM_SOFT_RESET);
- if (ret) {
- dev_err(&pdev->dev, "Fails to stop QM!\n");
- return ret;
- }
-
-#ifdef CONFIG_CRYPTO_QM_UACCE
- if (qm->use_uacce) {
- ret = uacce_hw_err_isolate(&qm->uacce);
- if (ret) {
- dev_err(&pdev->dev, "Fails to isolate hw err!\n");
- return ret;
- }
- }
-#endif
-
- return 0;
-}
-
-static int hpre_soft_reset(struct hpre *hpre)
-{
- struct hisi_qm *qm = &hpre->qm;
- struct device *dev = &qm->pdev->dev;
- unsigned long long value = 0;
- int ret;
- u32 val;
-
- ret = hisi_qm_reg_test(qm);
- if (ret)
- return ret;
-
- ret = hisi_qm_set_vf_mse(qm, HPRE_DISABLE);
- if (ret) {
- dev_err(dev, "Fails to disable vf mse bit.\n");
- return ret;
- }
-
- ret = hisi_qm_set_msi(qm, HPRE_DISABLE);
- if (ret) {
- dev_err(dev, "Fails to disable peh msi bit.\n");
- return ret;
- }
-
- /* Set qm ecc if dev ecc happened to hold on ooo */
- hisi_qm_set_ecc(qm);
-
- /* OOO register set and check */
- writel(MASTER_GLOBAL_CTRL_SHUTDOWN,
- hpre->qm.io_base + HPRE_MASTER_GLOBAL_CTRL);
-
- /* If bus lock, reset chip */
- ret = readl_relaxed_poll_timeout(hpre->qm.io_base +
- HPRE_MASTER_TRANS_RETURN, val,
- (val == MASTER_TRANS_RETURN_RW),
- HPRE_REG_RD_INTVRL_US,
- HPRE_REG_RD_TMOUT_US);
- if (ret) {
- dev_emerg(dev, "Bus lock! Please reset system.\n");
- return ret;
- }
-
- ret = hisi_qm_set_pf_mse(qm, HPRE_DISABLE);
- if (ret) {
- dev_err(dev, "Fails to disable pf mse bit.\n");
- return ret;
- }
-
- /* The reset related sub-control registers are not in PCI BAR */
- if (ACPI_HANDLE(dev)) {
- acpi_status s;
-
- s = acpi_evaluate_integer(ACPI_HANDLE(dev), "HRST",
- NULL, &value);
- if (ACPI_FAILURE(s)) {
- dev_err(dev, "NO controller reset method!\n");
- return -EIO;
- }
-
- if (value) {
- dev_err(dev, "Reset step %llu failed!\n", value);
- return -EIO;
- }
- } else {
- dev_err(dev, "No reset method!\n");
- return -EINVAL;
- }
-
- return 0;
-}
-
-static int hpre_vf_reset_done(struct pci_dev *pdev)
-{
- struct pci_dev *dev;
- struct hisi_qm *qm;
- struct hpre *hpre;
- int ret = 0;
-
- mutex_lock(&hpre_list_lock);
- list_for_each_entry(hpre, &hpre_list, list) {
- dev = hpre->qm.pdev;
- if (dev == pdev)
- continue;
-
- if (pci_physfn(dev) == pdev) {
- qm = &hpre->qm;
-
- ret = hisi_qm_restart(qm);
- if (ret)
- goto reset_fail;
- }
- }
-
-reset_fail:
- mutex_unlock(&hpre_list_lock);
- return ret;
-}
-
-static int hpre_controller_reset_done(struct hpre *hpre)
-{
- struct hisi_qm *qm = &hpre->qm;
- struct pci_dev *pdev = qm->pdev;
- int ret;
-
- ret = hisi_qm_set_msi(qm, HPRE_ENABLE);
- if (ret) {
- dev_err(&pdev->dev, "Fails to enable peh msi bit!\n");
- return ret;
- }
-
- ret = hisi_qm_set_pf_mse(qm, HPRE_ENABLE);
- if (ret) {
- dev_err(&pdev->dev, "Fails to enable pf mse bit!\n");
- return ret;
- }
-
- ret = hisi_qm_set_vf_mse(qm, HPRE_ENABLE);
- if (ret) {
- dev_err(&pdev->dev, "Fails to enable vf mse bit!\n");
- return ret;
- }
-
- ret = hpre_set_user_domain_and_cache(hpre);
- if (ret)
- return ret;
-
- hisi_qm_restart_prepare(qm);
-
- ret = hisi_qm_restart(qm);
- if (ret) {
- dev_err(&pdev->dev, "Failed to start QM!\n");
- return ret;
- }
-
- if (hpre->num_vfs)
- hpre_vf_q_assign(hpre, hpre->num_vfs);
-
- ret = hpre_vf_reset_done(pdev);
- if (ret) {
- dev_err(&pdev->dev, "Failed to start VFs!\n");
- return -EPERM;
- }
-
- hisi_qm_restart_done(qm);
- hpre_hw_err_init(hpre);
-
- return 0;
-}
-
-static int hpre_controller_reset(struct hpre *hpre)
-{
- struct device *dev = &hpre->qm.pdev->dev;
- int ret;
-
- dev_info(dev, "Controller resetting...\n");
-
- ret = hpre_controller_reset_prepare(hpre);
- if (ret)
- return ret;
-
- ret = hpre_soft_reset(hpre);
- if (ret) {
- dev_err(dev, "Controller reset failed (%d)\n", ret);
- return ret;
- }
-
- ret = hpre_controller_reset_done(hpre);
- if (ret)
- return ret;
-
- clear_bit(HPRE_RESET, &hpre->status);
- dev_info(dev, "Controller reset complete\n");
-
- return 0;
-}
-
-static pci_ers_result_t hpre_slot_reset(struct pci_dev *pdev)
-{
- struct hpre *hpre = pci_get_drvdata(pdev);
- int ret;
-
- if (pdev->is_virtfn)
- return PCI_ERS_RESULT_RECOVERED;
-
- dev_info(&pdev->dev, "Requesting reset due to PCI error\n");
- pci_cleanup_aer_uncorrect_error_status(pdev);
-
- /* reset hpre controller */
- ret = hpre_controller_reset(hpre);
- if (ret) {
- dev_err(&pdev->dev, "hpre controller reset failed (%d)\n",
- ret);
- return PCI_ERS_RESULT_DISCONNECT;
- }
-
- return PCI_ERS_RESULT_RECOVERED;
-}
-
-static void hpre_set_hw_error(struct hpre *hisi_hpre, bool enable)
-{
- struct pci_dev *pdev = hisi_hpre->qm.pdev;
- struct hpre *hpre = pci_get_drvdata(pci_physfn(pdev));
- struct hisi_qm *qm = &hpre->qm;
-
- if (qm->fun_type == QM_HW_VF)
- return;
-
- if (enable) {
- hisi_qm_hw_error_init(qm, QM_BASE_CE,
- QM_BASE_NFE | QM_ACC_DO_TASK_TIMEOUT,
- 0, QM_DB_RANDOM_INVALID);
- hpre_hw_error_enable(hpre);
- } else {
- hisi_qm_hw_error_uninit(qm);
- hpre_hw_error_disable(hpre);
- }
-}
-
-static int hpre_get_hw_error_status(struct hpre *hpre)
-{
- u32 err_sts;
-
- err_sts = readl(hpre->qm.io_base + HPRE_HAC_INT_STATUS) &
- (HPRE_CORE_ECC_2BIT_ERR | HPRE_OOO_ECC_2BIT_ERR);
- if (err_sts)
- return err_sts;
-
- return 0;
-}
-
-/* check the interrupt is ecc-zbit error or not */
-static int hpre_check_hw_error(struct hpre *hisi_hpre)
-{
- struct pci_dev *pdev = hisi_hpre->qm.pdev;
- struct hpre *hpre = pci_get_drvdata(pci_physfn(pdev));
- struct hisi_qm *qm = &hpre->qm;
- int ret;
-
- if (qm->fun_type == QM_HW_VF)
- return 0;
-
- ret = hisi_qm_get_hw_error_status(qm);
- if (ret)
- return ret;
-
- /* Now the ecc-2bit is ce_err, so this func is always return 0 */
- return hpre_get_hw_error_status(hpre);
-}
-
-static void hpre_reset_prepare(struct pci_dev *pdev)
-{
- struct hpre *hpre = pci_get_drvdata(pdev);
- struct hisi_qm *qm = &hpre->qm;
- struct device *dev = &pdev->dev;
- u32 delay = 0;
- int ret;
-
- hpre_set_hw_error(hpre, HPRE_HW_ERROR_IRQ_DISABLE);
-
- while (hpre_check_hw_error(hpre)) {
- msleep(++delay);
- if (delay > HPRE_RESET_WAIT_TIMEOUT)
- return;
- }
-
- ret = hpre_reset_prepare_rdy(hpre);
- if (ret) {
- dev_err(dev, "FLR not ready!\n");
- return;
- }
-
- ret = hpre_vf_reset_prepare(pdev, QM_FLR);
- if (ret) {
- dev_err(&pdev->dev, "Fails to prepare reset!\n");
- return;
- }
-
- ret = hisi_qm_stop(qm, QM_FLR);
- if (ret) {
- dev_err(&pdev->dev, "Fails to stop QM!\n");
- return;
- }
-
- dev_info(dev, "FLR resetting...\n");
-}
-
-static bool hpre_flr_reset_complete(struct pci_dev *pdev)
-{
- struct pci_dev *pf_pdev = pci_physfn(pdev);
- struct hpre *hpre = pci_get_drvdata(pf_pdev);
- struct device *dev = &hpre->qm.pdev->dev;
- u32 id;
-
- pci_read_config_dword(hpre->qm.pdev, PCI_COMMAND, &id);
- if (id == HPRE_PCI_COMMAND_INVALID) {
- dev_err(dev, "Device HPRE can not be used!\n");
- return false;
- }
-
- clear_bit(HPRE_RESET, &hpre->status);
- return true;
-}
-
-static void hpre_reset_done(struct pci_dev *pdev)
-{
- struct hpre *hpre = pci_get_drvdata(pdev);
- struct hisi_qm *qm = &hpre->qm;
- struct device *dev = &pdev->dev;
- int ret;
-
- hpre_set_hw_error(hpre, HPRE_HW_ERROR_IRQ_ENABLE);
-
- ret = hisi_qm_restart(qm);
- if (ret) {
- dev_err(dev, "Failed to start QM!\n");
- return;
- }
-
- if (pdev->is_physfn) {
- ret = hpre_set_user_domain_and_cache(hpre);
- if (ret) {
- dev_err(dev, "Failed to start QM!\n");
- goto flr_done;
- }
-
- hpre_hw_err_init(hpre);
-
- if (hpre->num_vfs)
- hpre_vf_q_assign(hpre, hpre->num_vfs);
-
- ret = hpre_vf_reset_done(pdev);
- if (ret) {
- dev_err(&pdev->dev, "Failed to start VFs!\n");
- return;
- }
- }
-
-flr_done:
- if (hpre_flr_reset_complete(pdev))
- dev_info(dev, "FLR reset complete\n");
-}
-
static const struct pci_error_handlers hpre_err_handler = {
- .error_detected = hpre_error_detected,
- .slot_reset = hpre_slot_reset,
+ .error_detected = hisi_qm_dev_err_detected,
+ .slot_reset = hisi_qm_dev_slot_reset,
#ifdef CONFIG_CRYPTO_QM_UACCE
- .reset_prepare = hpre_reset_prepare,
- .reset_done = hpre_reset_done,
+ .reset_prepare = hisi_qm_reset_prepare,
+ .reset_done = hisi_qm_reset_done,
#endif
};
@@ -1605,7 +954,7 @@ static void hpre_reset_done(struct pci_dev *pdev)
.remove = hpre_remove,
.sriov_configure = hpre_sriov_configure,
.err_handler = &hpre_err_handler,
- .shutdown = hpre_shutdown,
+ .shutdown = hisi_qm_dev_shutdown,
};
static void hpre_register_debugfs(void)
@@ -1618,20 +967,19 @@ static void hpre_register_debugfs(void)
hpre_debugfs_root = NULL;
}
-static void hpre_unregister_debugfs(void)
-{
- debugfs_remove_recursive(hpre_debugfs_root);
-}
-
static int __init hpre_init(void)
{
int ret;
+ INIT_LIST_HEAD(&hpre_devices.list);
+ mutex_init(&hpre_devices.lock);
+ hpre_devices.check = NULL;
+
hpre_register_debugfs();
ret = pci_register_driver(&hpre_pci_driver);
if (ret) {
- hpre_unregister_debugfs();
+ debugfs_remove_recursive(hpre_debugfs_root);
pr_err("hpre: can't register hisi hpre driver.\n");
}
@@ -1641,7 +989,7 @@ static int __init hpre_init(void)
static void __exit hpre_exit(void)
{
pci_unregister_driver(&hpre_pci_driver);
- hpre_unregister_debugfs();
+ debugfs_remove_recursive(hpre_debugfs_root);
}
module_init(hpre_init);
diff --git a/drivers/crypto/hisilicon/qm.c b/drivers/crypto/hisilicon/qm.c
index 4bd7739..e89a770 100644
--- a/drivers/crypto/hisilicon/qm.c
+++ b/drivers/crypto/hisilicon/qm.c
@@ -1,6 +1,8 @@
// SPDX-License-Identifier: GPL-2.0
/* Copyright (c) 2018-2019 HiSilicon Limited. */
#include <asm/page.h>
+#include <linux/acpi.h>
+#include <linux/aer.h>
#include <linux/bitmap.h>
#include <linux/debugfs.h>
#include <linux/dma-mapping.h>
@@ -117,7 +119,7 @@
#define QM_ABNORMAL_INT_MASK 0x100004
#define QM_HW_ERROR_IRQ_DISABLE GENMASK(12, 0)
#define QM_ABNORMAL_INT_STATUS 0x100008
-#define QM_ABNORMAL_INT_SET 0x10000c
+#define QM_PF_ABNORMAL_INT_SET 0x10000c
#define QM_ABNORMAL_INF00 0x100010
#define QM_FIFO_OVERFLOW_TYPE 0xc0
#define QM_FIFO_OVERFLOW_VF 0x3f
@@ -167,17 +169,30 @@
#define TASK_TIMEOUT 10000
#define WAIT_PERIOD 20
-#define MAX_WAIT_COUNTS 1000
#define WAIT_PERIOD_US_MAX 200
#define WAIT_PERIOD_US_MIN 100
-#define MAX_WAIT_TASK_COUNTS 10
-
-#define QM_RAS_NFE_MBIT_DISABLE ~QM_ECC_MBIT
+#define REMOVE_WAIT_DELAY 10
+#define MAX_WAIT_COUNTS 1000
+#define DELAY_PERIOD_MS 100
+#define QM_DEV_RESET_STATUS 0
+#define QM_RESET_WAIT_TIMEOUT 400
+#define QM_PCI_COMMAND_INVALID 0xFFFFFFFF
+#define MASTER_GLOBAL_CTRL_SHUTDOWN 0x1
+#define MASTER_TRANS_RETURN_RW 3
+#define MASTER_TRANS_RETURN 0x300150
+#define MASTER_GLOBAL_CTRL 0x300000
+#define QM_REG_RD_INTVRL_US 10
+#define QM_REG_RD_TMOUT_US 1000
+#define AM_CFG_PORT_RD_EN 0x300018
#define AM_CFG_PORT_WR_EN 0x30001C
-#define AM_CFG_PORT_WR_EN_VALUE 0xFFFF
+#define QM_RAS_NFE_MBIT_DISABLE ~QM_ECC_MBIT
#define AM_ROB_ECC_INT_STS 0x300104
#define ROB_ECC_ERR_MULTPL BIT(1)
+#define QM_DBG_READ_LEN 256
+#define QM_DBG_WRITE_LEN 1024
+#define QM_DBG_SHOW_SHIFT 16
+
#define QM_MK_CQC_DW3_V1(hop_num, pg_sz, buf_sz, cqe_sz) \
(((hop_num) << QM_CQ_HOP_NUM_SHIFT) | \
((pg_sz) << QM_CQ_PAGE_SIZE_SHIFT) | \
@@ -219,6 +234,12 @@ enum vft_type {
CQC_VFT,
};
+struct hisi_qm_resource {
+ struct hisi_qm *qm;
+ int distance;
+ struct list_head list;
+};
+
struct hisi_qm_hw_ops {
int (*get_vft)(struct hisi_qm *qm, u32 *base, u32 *number);
void (*qm_db)(struct hisi_qm *qm, u16 qn,
@@ -237,11 +258,6 @@ struct hisi_qm_hw_ops {
[QM_STATE] = "qm_state",
};
-struct hisi_qm_hw_error {
- u32 int_msk;
- const char *msg;
-};
-
static const struct hisi_qm_hw_error qm_hw_error[] = {
{ .int_msk = BIT(0), .msg = "qm_axi_rresp" },
{ .int_msk = BIT(1), .msg = "qm_axi_bresp" },
@@ -1115,13 +1131,20 @@ static void qm_hw_error_init_v2(struct hisi_qm *qm, u32 ce, u32 nfe, u32 fe,
{
u32 irq_enable = ce | nfe | fe | msi;
u32 irq_unmask = ~irq_enable;
+ u32 error_status;
qm->error_mask = ce | nfe | fe;
qm->msi_mask = msi;
/* clear QM hw residual error source */
- writel(QM_ABNORMAL_INT_SOURCE_CLR, qm->io_base +
- QM_ABNORMAL_INT_SOURCE);
+ error_status = readl(qm->io_base + QM_ABNORMAL_INT_STATUS);
+ if (!(qm->hw_status & BIT(QM_DEV_RESET_STATUS))
+ || !error_status)
+ error_status = QM_ABNORMAL_INT_SOURCE_CLR;
+ else
+ error_status &= qm->error_mask;
+
+ writel(error_status, qm->io_base + QM_ABNORMAL_INT_SOURCE);
/* configure error type */
writel(ce, qm->io_base + QM_RAS_CE_ENABLE);
@@ -1190,9 +1213,7 @@ static pci_ers_result_t qm_hw_error_handle_v2(struct hisi_qm *qm)
error_status = qm->error_mask & tmp;
if (error_status) {
if (error_status & QM_ECC_MBIT)
- qm->err_ini.is_qm_ecc_mbit = 1;
- else
- qm->err_ini.is_qm_ecc_mbit = 0;
+ qm->err_ini.err_info.is_qm_ecc_mbit = true;
qm_log_hw_error(qm, error_status);
return PCI_ERS_RESULT_NEED_RESET;
@@ -1513,7 +1534,8 @@ static void qm_qp_has_no_task(struct hisi_qp *qp)
int i = 0;
int ret;
- if (qp->qm->err_ini.is_qm_ecc_mbit || qp->qm->err_ini.is_dev_ecc_mbit)
+ if (qp->qm->err_ini.err_info.is_qm_ecc_mbit ||
+ qp->qm->err_ini.err_info.is_dev_ecc_mbit)
return;
addr = qm_ctx_alloc(qp->qm, size, &dma_addr);
@@ -1967,6 +1989,74 @@ static int qm_unregister_uacce(struct hisi_qm *qm)
#endif
/**
+ * hisi_qm_frozen() - Try to froze QM to cut continuous queue request. If
+ * there is user on the QM, return failure without doing anything.
+ * @qm: The qm needed to be fronzen.
+ *
+ * This function frozes QM, then we can do SRIOV disabling.
+ */
+static int hisi_qm_frozen(struct hisi_qm *qm)
+{
+ int count, i;
+
+ down_write(&qm->qps_lock);
+ for (i = 0, count = 0; i < qm->qp_num; i++)
+ if (!qm->qp_array[i])
+ count++;
+
+ if (count == qm->qp_num) {
+ bitmap_set(qm->qp_bitmap, 0, qm->qp_num);
+ } else {
+ up_write(&qm->qps_lock);
+ return -EBUSY;
+ }
+ up_write(&qm->qps_lock);
+
+ return 0;
+}
+
+static int qm_try_frozen_vfs(struct pci_dev *pdev,
+ struct hisi_qm_list *qm_list)
+{
+ struct hisi_qm *qm, *vf_qm;
+ struct pci_dev *dev;
+ int ret = 0;
+
+ if (!qm_list || !pdev)
+ return -EINVAL;
+
+ /* Try to frozen all the VFs as disable SRIOV */
+ mutex_lock(&qm_list->lock);
+ list_for_each_entry(qm, &qm_list->list, list) {
+ dev = qm->pdev;
+ if (dev == pdev)
+ continue;
+ if (pci_physfn(dev) == pdev) {
+ vf_qm = pci_get_drvdata(dev);
+ ret = hisi_qm_frozen(vf_qm);
+ if (ret)
+ goto frozen_fail;
+ }
+ }
+
+frozen_fail:
+ mutex_unlock(&qm_list->lock);
+ return ret;
+}
+
+void hisi_qm_remove_wait_delay(struct hisi_qm *qm,
+ struct hisi_qm_list *qm_list)
+{
+ while (hisi_qm_frozen(qm) ||
+ ((qm->fun_type == QM_HW_PF) &&
+ qm_try_frozen_vfs(qm->pdev, qm_list))) {
+ msleep(WAIT_PERIOD);
+ }
+ udelay(REMOVE_WAIT_DELAY);
+}
+EXPORT_SYMBOL_GPL(hisi_qm_remove_wait_delay);
+
+/**
* hisi_qm_init() - Initialize configures about qm.
* @qm: The qm needed init.
*
@@ -2107,32 +2197,21 @@ void hisi_qm_uninit(struct hisi_qm *qm)
EXPORT_SYMBOL_GPL(hisi_qm_uninit);
/**
- * hisi_qm_frozen() - Try to froze QM to cut continuous queue request. If
- * there is user on the QM, return failure without doing anything.
- * @qm: The qm needed to be fronzen.
+ * hisi_qm_dev_shutdown() - shutdown device.
+ * @pdev: The device will be shutdown.
*
- * This function frozes QM, then we can do SRIOV disabling.
+ * This function will stop qm when OS shutdown or rebooting.
*/
-int hisi_qm_frozen(struct hisi_qm *qm)
+void hisi_qm_dev_shutdown(struct pci_dev *pdev)
{
- int count, i;
-
- down_write(&qm->qps_lock);
- for (i = 0, count = 0; i < qm->qp_num; i++)
- if (!qm->qp_array[i])
- count++;
-
- if (count == qm->qp_num) {
- bitmap_set(qm->qp_bitmap, 0, qm->qp_num);
- } else {
- up_write(&qm->qps_lock);
- return -EBUSY;
- }
- up_write(&qm->qps_lock);
+ struct hisi_qm *qm = pci_get_drvdata(pdev);
+ int ret;
- return 0;
+ ret = hisi_qm_stop(qm, QM_NORMAL);
+ if (ret)
+ dev_err(&pdev->dev, "Fail to stop qm in shutdown!\n");
}
-EXPORT_SYMBOL_GPL(hisi_qm_frozen);
+EXPORT_SYMBOL_GPL(hisi_qm_dev_shutdown);
/**
* hisi_qm_get_vft() - Get vft from a qm.
@@ -2174,7 +2253,7 @@ int hisi_qm_get_vft(struct hisi_qm *qm, u32 *base, u32 *number)
* Assign queues A~B to VF: hisi_qm_set_vft(qm, 2, A, B - A + 1)
* (VF function number 0x2)
*/
-int hisi_qm_set_vft(struct hisi_qm *qm, u32 fun_num, u32 base,
+static int hisi_qm_set_vft(struct hisi_qm *qm, u32 fun_num, u32 base,
u32 number)
{
u32 max_q_num = qm->ctrl_q_num;
@@ -2185,7 +2264,6 @@ int hisi_qm_set_vft(struct hisi_qm *qm, u32 fun_num, u32 base,
return qm_set_sqc_cqc_vft(qm, fun_num, base, number);
}
-EXPORT_SYMBOL_GPL(hisi_qm_set_vft);
static void qm_init_eq_aeq_status(struct hisi_qm *qm)
{
@@ -2483,6 +2561,28 @@ static int qm_stop_started_qp(struct hisi_qm *qm)
}
/**
+ * qm_clear_queues() - Clear memory of queues in a qm.
+ * @qm: The qm which memory needs clear.
+ *
+ * This function clears all queues memory in a qm. Reset of accelerator can
+ * use this to clear queues.
+ */
+static void qm_clear_queues(struct hisi_qm *qm)
+{
+ struct hisi_qp *qp;
+ int i;
+
+ for (i = 0; i < qm->qp_num; i++) {
+ qp = qm->qp_array[i];
+ if (qp)
+ /* device state use the last page */
+ memset(qp->qdma.va, 0, qp->qdma.size - PAGE_SIZE);
+ }
+
+ memset(qm->qdma.va, 0, qm->qdma.size);
+}
+
+/**
* hisi_qm_stop() - Stop a qm.
* @qm: The qm which will be stopped.
* @r: The reason to stop qm.
@@ -2528,7 +2628,7 @@ int hisi_qm_stop(struct hisi_qm *qm, enum qm_stop_reason r)
}
}
- hisi_qm_clear_queues(qm);
+ qm_clear_queues(qm);
atomic_set(&qm->status.flags, QM_STOP);
err_unlock:
@@ -2589,7 +2689,7 @@ int hisi_qm_debug_init(struct hisi_qm *qm)
goto failed_to_create;
}
- qm_regs = debugfs_create_file("qm_regs", 0444, qm->debug.qm_d, qm,
+ qm_regs = debugfs_create_file("regs", 0444, qm->debug.qm_d, qm,
&qm_regs_fops);
if (IS_ERR(qm_regs)) {
ret = -ENOENT;
@@ -2605,7 +2705,7 @@ int hisi_qm_debug_init(struct hisi_qm *qm)
EXPORT_SYMBOL_GPL(hisi_qm_debug_init);
/**
- * hisi_qm_hw_error_init() - Configure qm hardware error report method.
+ * qm_hw_error_init() - Configure qm hardware error report method.
* @qm: The qm which we want to configure.
* @ce: Correctable error configure.
* @nfe: Non-fatal error configure.
@@ -2622,9 +2722,13 @@ int hisi_qm_debug_init(struct hisi_qm *qm)
* related report methods. Error report will be masked if related error bit
* does not configure.
*/
-void hisi_qm_hw_error_init(struct hisi_qm *qm, u32 ce, u32 nfe, u32 fe,
- u32 msi)
+static void qm_hw_error_init(struct hisi_qm *qm)
{
+ u32 nfe = qm->err_ini.err_info.nfe;
+ u32 msi = qm->err_ini.err_info.msi;
+ u32 ce = qm->err_ini.err_info.ce;
+ u32 fe = qm->err_ini.err_info.fe;
+
if (!qm->ops->hw_error_init) {
dev_err(&qm->pdev->dev,
"QM version %d doesn't support hw error handling!\n",
@@ -2634,9 +2738,8 @@ void hisi_qm_hw_error_init(struct hisi_qm *qm, u32 ce, u32 nfe, u32 fe,
qm->ops->hw_error_init(qm, ce, nfe, fe, msi);
}
-EXPORT_SYMBOL_GPL(hisi_qm_hw_error_init);
-void hisi_qm_hw_error_uninit(struct hisi_qm *qm)
+static void qm_hw_error_uninit(struct hisi_qm *qm)
{
if (!qm->ops->hw_error_uninit) {
dev_err(&qm->pdev->dev,
@@ -2647,15 +2750,14 @@ void hisi_qm_hw_error_uninit(struct hisi_qm *qm)
qm->ops->hw_error_uninit(qm);
}
-EXPORT_SYMBOL_GPL(hisi_qm_hw_error_uninit);
/**
- * hisi_qm_hw_error_handle() - Handle qm non-fatal hardware errors.
+ * qm_hw_error_handle() - Handle qm non-fatal hardware errors.
* @qm: The qm which has non-fatal hardware errors.
*
* Accelerators use this function to handle qm non-fatal hardware errors.
*/
-pci_ers_result_t hisi_qm_hw_error_handle(struct hisi_qm *qm)
+static pci_ers_result_t qm_hw_error_handle(struct hisi_qm *qm)
{
if (!qm->ops->hw_error_handle) {
dev_err(&qm->pdev->dev,
@@ -2666,104 +2768,19 @@ pci_ers_result_t hisi_qm_hw_error_handle(struct hisi_qm *qm)
return qm->ops->hw_error_handle(qm);
}
-EXPORT_SYMBOL_GPL(hisi_qm_hw_error_handle);
-
-/**
- * hisi_qm_clear_queues() - Clear memory of queues in a qm.
- * @qm: The qm which memory needs clear.
- *
- * This function clears all queues memory in a qm. Reset of accelerator can
- * use this to clear queues.
- */
-void hisi_qm_clear_queues(struct hisi_qm *qm)
-{
- struct hisi_qp *qp;
- int i;
-
- for (i = 0; i < qm->qp_num; i++) {
- qp = qm->qp_array[i];
- if (qp)
- /* device state use the last page */
- memset(qp->qdma.va, 0, qp->qdma.size - PAGE_SIZE);
- }
-
- memset(qm->qdma.va, 0, qm->qdma.size);
-}
-EXPORT_SYMBOL_GPL(hisi_qm_clear_queues);
-
-/**
- * hisi_qm_get_hw_version() - Get hardware version of a qm.
- * @pdev: The device which hardware version we want to get.
- *
- * This function gets the hardware version of a qm. Return QM_HW_UNKNOWN
- * if the hardware version is not supported.
- */
-enum qm_hw_ver hisi_qm_get_hw_version(struct pci_dev *pdev)
-{
- switch (pdev->revision) {
- case QM_HW_V1:
- case QM_HW_V2:
- return pdev->revision;
- default:
- return QM_HW_UNKNOWN;
- }
-}
-EXPORT_SYMBOL_GPL(hisi_qm_get_hw_version);
-int hisi_qm_get_hw_error_status(struct hisi_qm *qm)
+static int qm_get_hw_error_status(struct hisi_qm *qm)
{
u32 err_sts;
- err_sts = readl(qm->io_base + QM_ABNORMAL_INT_STATUS) &
- QM_ECC_MBIT;
+ err_sts = readl(qm->io_base + QM_ABNORMAL_INT_STATUS) & QM_ECC_MBIT;
if (err_sts)
return err_sts;
return 0;
}
-EXPORT_SYMBOL_GPL(hisi_qm_get_hw_error_status);
-
-static pci_ers_result_t hisi_qm_dev_err_handle(struct hisi_qm *qm)
-{
- u32 err_sts;
-
- if (!qm->err_ini.get_dev_hw_err_status ||
- !qm->err_ini.log_dev_hw_err)
- return PCI_ERS_RESULT_RECOVERED;
-
- /* read err sts */
- err_sts = qm->err_ini.get_dev_hw_err_status(qm);
- if (err_sts) {
- if (err_sts & qm->err_ini.ecc_2bits_mask)
- qm->err_ini.is_dev_ecc_mbit = 1;
- else
- qm->err_ini.is_dev_ecc_mbit = 0;
-
- qm->err_ini.log_dev_hw_err(qm, err_sts);
- return PCI_ERS_RESULT_NEED_RESET;
- }
-
- return PCI_ERS_RESULT_RECOVERED;
-}
-
-pci_ers_result_t hisi_qm_process_dev_error(struct pci_dev *pdev)
-{
- struct hisi_qm *qm = pci_get_drvdata(pdev);
- pci_ers_result_t qm_ret, dev_ret;
-
- /* log qm error */
- qm_ret = hisi_qm_hw_error_handle(qm);
-
- /* log device error */
- dev_ret = hisi_qm_dev_err_handle(qm);
-
- return (qm_ret == PCI_ERS_RESULT_NEED_RESET ||
- dev_ret == PCI_ERS_RESULT_NEED_RESET) ?
- PCI_ERS_RESULT_NEED_RESET : PCI_ERS_RESULT_RECOVERED;
-}
-EXPORT_SYMBOL_GPL(hisi_qm_process_dev_error);
-int hisi_qm_reg_test(struct hisi_qm *qm)
+static int qm_reg_test(struct hisi_qm *qm)
{
struct pci_dev *pdev = qm->pdev;
int ret;
@@ -2782,16 +2799,13 @@ int hisi_qm_reg_test(struct hisi_qm *qm)
ret = readl_relaxed_poll_timeout(qm->io_base + QM_PEH_VENDOR_ID, val,
(val == PCI_VENDOR_ID_HUAWEI),
POLL_PERIOD, POLL_TIMEOUT);
- if (ret) {
+ if (ret)
dev_err(&pdev->dev, "Fails to read QM reg in the second time!\n");
- return ret;
- }
return ret;
}
-EXPORT_SYMBOL_GPL(hisi_qm_reg_test);
-int hisi_qm_set_pf_mse(struct hisi_qm *qm, bool set)
+static int qm_set_pf_mse(struct hisi_qm *qm, bool set)
{
struct pci_dev *pdev = qm->pdev;
u16 cmd;
@@ -2814,9 +2828,8 @@ int hisi_qm_set_pf_mse(struct hisi_qm *qm, bool set)
return -ETIMEDOUT;
}
-EXPORT_SYMBOL_GPL(hisi_qm_set_pf_mse);
-int hisi_qm_set_vf_mse(struct hisi_qm *qm, bool set)
+static int qm_set_vf_mse(struct hisi_qm *qm, bool set)
{
struct pci_dev *pdev = qm->pdev;
u16 sriov_ctrl;
@@ -2833,8 +2846,8 @@ int hisi_qm_set_vf_mse(struct hisi_qm *qm, bool set)
for (i = 0; i < MAX_WAIT_COUNTS; i++) {
pci_read_config_word(pdev, pos + PCI_SRIOV_CTRL, &sriov_ctrl);
- if (set == ((sriov_ctrl & PCI_SRIOV_CTRL_MSE) >>
- PEH_SRIOV_CTRL_VF_MSE_SHIFT))
+ if (set == (sriov_ctrl & PCI_SRIOV_CTRL_MSE) >>
+ PEH_SRIOV_CTRL_VF_MSE_SHIFT)
return 0;
udelay(1);
@@ -2842,9 +2855,8 @@ int hisi_qm_set_vf_mse(struct hisi_qm *qm, bool set)
return -ETIMEDOUT;
}
-EXPORT_SYMBOL_GPL(hisi_qm_set_vf_mse);
-int hisi_qm_set_msi(struct hisi_qm *qm, bool set)
+static int qm_set_msi(struct hisi_qm *qm, bool set)
{
struct pci_dev *pdev = qm->pdev;
@@ -2854,7 +2866,8 @@ int hisi_qm_set_msi(struct hisi_qm *qm, bool set)
} else {
pci_write_config_dword(pdev, pdev->msi_cap +
PCI_MSI_MASK_64, PEH_MSI_DISABLE);
- if (qm->err_ini.is_qm_ecc_mbit || qm->err_ini.is_dev_ecc_mbit)
+ if (qm->err_ini.err_info.is_qm_ecc_mbit ||
+ qm->err_ini.err_info.is_dev_ecc_mbit)
return 0;
mdelay(1);
@@ -2864,64 +2877,768 @@ int hisi_qm_set_msi(struct hisi_qm *qm, bool set)
return 0;
}
-EXPORT_SYMBOL_GPL(hisi_qm_set_msi);
-void hisi_qm_set_ecc(struct hisi_qm *qm)
+void hisi_qm_free_qps(struct hisi_qp **qps, int qp_num)
{
- u32 nfe_enb;
+ int i;
- if ((!qm->err_ini.is_qm_ecc_mbit && !qm->err_ini.is_dev_ecc_mbit) ||
- (qm->err_ini.is_qm_ecc_mbit && !qm->err_ini.inject_dev_hw_err) ||
- (qm->err_ini.is_dev_ecc_mbit && qm->err_ini.inject_dev_hw_err))
+ if (!qps || qp_num < 0)
return;
- if (qm->err_ini.inject_dev_hw_err)
- qm->err_ini.inject_dev_hw_err(qm);
- else {
- nfe_enb = readl(qm->io_base + QM_RAS_NFE_ENABLE);
- writel(nfe_enb & QM_RAS_NFE_MBIT_DISABLE,
- qm->io_base + QM_RAS_NFE_ENABLE);
- writel(QM_ECC_MBIT, qm->io_base + QM_ABNORMAL_INT_SET);
- qm->err_ini.is_qm_ecc_mbit = 1;
+ for (i = qp_num - 1; i >= 0; i--)
+ hisi_qm_release_qp(qps[i]);
+}
+EXPORT_SYMBOL_GPL(hisi_qm_free_qps);
+
+static void free_list(struct list_head *head)
+{
+ struct hisi_qm_resource *res, *tmp;
+
+ list_for_each_entry_safe(res, tmp, head, list) {
+ list_del(&res->list);
+ kfree(res);
}
}
-EXPORT_SYMBOL_GPL(hisi_qm_set_ecc);
-void hisi_qm_restart_prepare(struct hisi_qm *qm)
+static int hisi_qm_sort_devices(int node, struct list_head *head,
+ struct hisi_qm_list *qm_list)
{
- if (!qm->err_ini.is_qm_ecc_mbit && !qm->err_ini.is_dev_ecc_mbit)
- return;
+ struct hisi_qm_resource *res, *tmp;
+ struct hisi_qm *qm;
+ struct list_head *n;
+ struct device *dev;
+ int dev_node = 0;
+
+ list_for_each_entry(qm, &qm_list->list, list) {
+ dev = &qm->pdev->dev;
+
+ if (IS_ENABLED(CONFIG_NUMA)) {
+ dev_node = dev->numa_node;
+ if (dev_node < 0)
+ dev_node = 0;
+ }
- /* close AM wr msi port */
- writel(qm->err_ini.qm_wr_port, qm->io_base + AM_CFG_PORT_WR_EN);
+ if (qm_list->check && !qm_list->check(qm))
+ continue;
+
+ res = kzalloc(sizeof(*res), GFP_KERNEL);
+ if (!res)
+ return -ENOMEM;
- /* clear dev ecc 2bit error source */
- if (qm->err_ini.clear_dev_hw_err_status) {
- qm->err_ini.clear_dev_hw_err_status(qm,
- qm->err_ini.ecc_2bits_mask);
+ res->qm = qm;
+ res->distance = node_distance(dev_node, node);
+ n = head;
+ list_for_each_entry(tmp, head, list) {
+ if (res->distance < tmp->distance) {
+ n = &tmp->list;
+ break;
+ }
+ }
+ list_add_tail(&res->list, n);
}
- /* clear QM ecc mbit error source */
- writel(QM_ECC_MBIT, qm->io_base + QM_ABNORMAL_INT_SOURCE);
+ return 0;
+}
- /* clear AM Reorder Buffer ecc mbit source */
- writel(ROB_ECC_ERR_MULTPL, qm->io_base + AM_ROB_ECC_INT_STS);
+int hisi_qm_alloc_qps_node(int node, struct hisi_qm_list *qm_list,
+ struct hisi_qp **qps, int qp_num, u8 alg_type)
+{
+ struct hisi_qm_resource *tmp;
+ int ret = -ENODEV;
+ LIST_HEAD(head);
+ int i;
- if (qm->err_ini.open_axi_master_ooo)
- qm->err_ini.open_axi_master_ooo(qm);
+ if (!qps || !qm_list || qp_num <= 0)
+ return -EINVAL;
+
+ mutex_lock(&qm_list->lock);
+ if (hisi_qm_sort_devices(node, &head, qm_list)) {
+ mutex_unlock(&qm_list->lock);
+ goto err;
+ }
+
+ list_for_each_entry(tmp, &head, list) {
+ for (i = 0; i < qp_num; i++) {
+ qps[i] = hisi_qm_create_qp(tmp->qm, alg_type);
+ if (IS_ERR(qps[i])) {
+ hisi_qm_free_qps(qps, i);
+ break;
+ }
+ }
+
+ if (i == qp_num) {
+ ret = 0;
+ break;
+ }
+ }
+
+ mutex_unlock(&qm_list->lock);
+ if (ret)
+ pr_info("Failed to create qps, node[%d], alg[%d], qp[%d]!\n",
+ node, alg_type, qp_num);
+
+err:
+ free_list(&head);
+ return ret;
}
-EXPORT_SYMBOL_GPL(hisi_qm_restart_prepare);
+EXPORT_SYMBOL_GPL(hisi_qm_alloc_qps_node);
-void hisi_qm_restart_done(struct hisi_qm *qm)
+static int qm_vf_q_assign(struct hisi_qm *qm, u32 num_vfs)
{
- if (!qm->err_ini.is_qm_ecc_mbit && !qm->err_ini.is_dev_ecc_mbit)
- return;
+ u32 q_num, i, remain_q_num;
+ u32 q_base = qm->qp_num;
+ int ret;
+
+ if (!num_vfs)
+ return -EINVAL;
+
+ remain_q_num = qm->ctrl_q_num - qm->qp_num;
+
+ /* If remain queues not enough, return error. */
+ if (qm->ctrl_q_num < qm->qp_num || remain_q_num < num_vfs)
+ return -EINVAL;
+
+ q_num = remain_q_num / num_vfs;
+ for (i = 1; i <= num_vfs; i++) {
+ if (i == num_vfs)
+ q_num += remain_q_num % num_vfs;
+ ret = hisi_qm_set_vft(qm, i, q_base, q_num);
+ if (ret)
+ return ret;
+ q_base += q_num;
+ }
+
+ return 0;
+}
+
+static int qm_clear_vft_config(struct hisi_qm *qm)
+{
+ int ret;
+ u32 i;
+
+ for (i = 1; i <= qm->vfs_num; i++) {
+ ret = hisi_qm_set_vft(qm, i, 0, 0);
+ if (ret)
+ return ret;
+ }
+ qm->vfs_num = 0;
+
+ return 0;
+}
+
+int hisi_qm_sriov_enable(struct pci_dev *pdev, int max_vfs)
+{
+ struct hisi_qm *qm = pci_get_drvdata(pdev);
+ int pre_existing_vfs, num_vfs, ret;
+
+ pre_existing_vfs = pci_num_vf(pdev);
+ if (pre_existing_vfs) {
+ pci_err(pdev,
+ "Can't enable VF. Please disable pre-enabled VFs!\n");
+ return 0;
+ }
+
+ num_vfs = min_t(int, max_vfs, QM_MAX_VFS_NUM);
+ ret = qm_vf_q_assign(qm, num_vfs);
+ if (ret) {
+ pci_err(pdev, "Can't assign queues for VF!\n");
+ return ret;
+ }
+
+ qm->vfs_num = num_vfs;
+
+ ret = pci_enable_sriov(pdev, num_vfs);
+ if (ret) {
+ pci_err(pdev, "Can't enable VF!\n");
+ qm_clear_vft_config(qm);
+ return ret;
+ }
+
+ pci_info(pdev, "VF enabled, vfs_num(=%d)!\n", num_vfs);
+
+ return num_vfs;
+}
+EXPORT_SYMBOL_GPL(hisi_qm_sriov_enable);
+
+int hisi_qm_sriov_disable(struct pci_dev *pdev, struct hisi_qm_list *qm_list)
+{
+ struct hisi_qm *qm = pci_get_drvdata(pdev);
+
+ if (pci_vfs_assigned(pdev)) {
+ pci_err(pdev, "Failed to disable VFs as VFs are assigned!\n");
+ return -EPERM;
+ }
+
+ /* While VF is in used, SRIOV cannot be disabled.
+ * However, there is a risk that the behavior is uncertain if the
+ * device is in hardware resetting.
+ */
+ if (qm_list && qm_try_frozen_vfs(pdev, qm_list)) {
+ pci_err(pdev, "Uacce user space task is using its VF!\n");
+ return -EBUSY;
+ }
+
+ /* remove in hpre_pci_driver will be called to free VF resources */
+ pci_disable_sriov(pdev);
+ return qm_clear_vft_config(qm);
+}
+EXPORT_SYMBOL_GPL(hisi_qm_sriov_disable);
+
+void hisi_qm_dev_err_init(struct hisi_qm *qm)
+{
+ struct pci_dev *pdev = qm->pdev;
+ struct hisi_qm *pf_qm = pci_get_drvdata(pci_physfn(pdev));
+
+ if (pf_qm->fun_type == QM_HW_VF)
+ return;
+
+ qm_hw_error_init(pf_qm);
+ pf_qm->err_ini.hw_err_enable(pf_qm);
+}
+EXPORT_SYMBOL_GPL(hisi_qm_dev_err_init);
+
+/**
+ * hisi_qm_dev_err_uninit() - Uninitialize device error configuration.
+ * @qm: The qm for which we want to do error uninitialization.
+ *
+ * Uninitialize QM and device error related configuration, It may called
+ * by PF/VF, the caller should ensure the scene explicilty.
+ */
+void hisi_qm_dev_err_uninit(struct hisi_qm *qm)
+{
+ struct pci_dev *pdev = qm->pdev;
+ struct hisi_qm *pf_qm = pci_get_drvdata(pci_physfn(pdev));
+
+ if (pf_qm->fun_type == QM_HW_VF)
+ return;
+
+ qm_hw_error_uninit(pf_qm);
+ pf_qm->err_ini.hw_err_disable(pf_qm);
+}
+EXPORT_SYMBOL_GPL(hisi_qm_dev_err_uninit);
+
+static pci_ers_result_t qm_dev_err_handle(struct hisi_qm *qm)
+{
+ u32 err_sts;
+
+ /* read err sts */
+ err_sts = qm->err_ini.get_dev_hw_err_status(qm);
+ if (err_sts) {
+ if (err_sts & qm->err_ini.err_info.ecc_2bits_mask)
+ qm->err_ini.err_info.is_dev_ecc_mbit = true;
+
+ qm->err_ini.log_dev_hw_err(qm, err_sts);
+ return PCI_ERS_RESULT_NEED_RESET;
+ }
+
+ return PCI_ERS_RESULT_RECOVERED;
+}
+
+pci_ers_result_t hisi_qm_process_dev_error(struct pci_dev *pdev)
+{
+ struct hisi_qm *qm = pci_get_drvdata(pdev);
+ pci_ers_result_t qm_ret, dev_ret;
+
+ /* log qm error */
+ qm_ret = qm_hw_error_handle(qm);
+
+ /* log device error */
+ dev_ret = qm_dev_err_handle(qm);
+
+ return (qm_ret == PCI_ERS_RESULT_NEED_RESET ||
+ dev_ret == PCI_ERS_RESULT_NEED_RESET) ?
+ PCI_ERS_RESULT_NEED_RESET : PCI_ERS_RESULT_RECOVERED;
+}
+EXPORT_SYMBOL_GPL(hisi_qm_process_dev_error);
+
+pci_ers_result_t hisi_qm_dev_err_detected(struct pci_dev *pdev,
+ pci_channel_state_t state)
+{
+ if (pdev->is_virtfn)
+ return PCI_ERS_RESULT_NONE;
+
+ pci_info(pdev, "PCI error detected, state(=%d)!!\n", state);
+ if (state == pci_channel_io_perm_failure)
+ return PCI_ERS_RESULT_DISCONNECT;
+
+ return hisi_qm_process_dev_error(pdev);
+}
+EXPORT_SYMBOL_GPL(hisi_qm_dev_err_detected);
+
+static int qm_vf_reset_prepare(struct pci_dev *pdev,
+ struct hisi_qm_list *qm_list,
+ enum qm_stop_reason stop_reason)
+{
+ struct pci_dev *dev;
+ struct hisi_qm *qm;
+ int ret = 0;
+
+ mutex_lock(&qm_list->lock);
+ list_for_each_entry(qm, &qm_list->list, list) {
+ dev = qm->pdev;
+ if (dev == pdev)
+ continue;
+
+ if (pci_physfn(dev) == pdev) {
+ /* save VFs PCIE BAR configuration */
+ pci_save_state(dev);
+
+ ret = hisi_qm_stop(qm, stop_reason);
+ if (ret)
+ goto prepare_fail;
+ }
+ }
+
+prepare_fail:
+ mutex_unlock(&qm_list->lock);
+ return ret;
+}
+
+static int qm_reset_prepare_ready(struct hisi_qm *qm)
+{
+ struct pci_dev *pdev = qm->pdev;
+ struct hisi_qm *pf_qm = pci_get_drvdata(pci_physfn(pdev));
+ int delay = 0;
+
+ while (test_and_set_bit(QM_DEV_RESET_STATUS, &pf_qm->hw_status)) {
+ msleep(++delay);
+ if (delay > QM_RESET_WAIT_TIMEOUT)
+ return -EBUSY;
+ }
+
+ return 0;
+}
+
+static int qm_controller_reset_prepare(struct hisi_qm *qm)
+{
+ struct pci_dev *pdev = qm->pdev;
+ int ret;
+
+ ret = qm_reset_prepare_ready(qm);
+ if (ret) {
+ pci_err(pdev, "Controller reset not ready!\n");
+ return ret;
+ }
+
+ if (qm->vfs_num) {
+ ret = qm_vf_reset_prepare(pdev, qm->qm_list, QM_SOFT_RESET);
+ if (ret) {
+ pci_err(pdev, "Fails to stop VFs!\n");
+ return ret;
+ }
+ }
+
+ ret = hisi_qm_stop(qm, QM_SOFT_RESET);
+ if (ret) {
+ pci_err(pdev, "Fails to stop QM!\n");
+ return ret;
+ }
+
+#ifdef CONFIG_CRYPTO_QM_UACCE
+ if (qm->use_uacce) {
+ ret = uacce_hw_err_isolate(&qm->uacce);
+ if (ret) {
+ pci_err(pdev, "Fails to isolate hw err!\n");
+ return ret;
+ }
+ }
+#endif
+
+ return 0;
+}
+
+static void qm_dev_ecc_mbit_handle(struct hisi_qm *qm)
+{
+ u32 nfe_enb = 0;
+
+ if (!qm->err_ini.err_info.is_dev_ecc_mbit &&
+ qm->err_ini.err_info.is_qm_ecc_mbit &&
+ qm->err_ini.close_axi_master_ooo) {
+
+ qm->err_ini.close_axi_master_ooo(qm);
+
+ } else if (qm->err_ini.err_info.is_dev_ecc_mbit &&
+ !qm->err_ini.err_info.is_qm_ecc_mbit &&
+ !qm->err_ini.close_axi_master_ooo) {
+
+ nfe_enb = readl(qm->io_base + QM_RAS_NFE_ENABLE);
+ writel(nfe_enb & QM_RAS_NFE_MBIT_DISABLE,
+ qm->io_base + QM_RAS_NFE_ENABLE);
+ writel(QM_ECC_MBIT, qm->io_base + QM_PF_ABNORMAL_INT_SET);
+ }
+}
+
+static int qm_soft_reset(struct hisi_qm *qm)
+{
+ struct pci_dev *pdev = qm->pdev;
+ int ret;
+ u32 val;
+
+ ret = qm_reg_test(qm);
+ if (ret)
+ return ret;
+
+ if (qm->vfs_num) {
+ ret = qm_set_vf_mse(qm, false);
+ if (ret) {
+ pci_err(pdev, "Fails to disable vf mse bit.\n");
+ return ret;
+ }
+ }
+
+ ret = qm_set_msi(qm, false);
+ if (ret) {
+ pci_err(pdev, "Fails to disable peh msi bit.\n");
+ return ret;
+ }
+
+ qm_dev_ecc_mbit_handle(qm);
+
+ mdelay(DELAY_PERIOD_MS);
+
+ /* OOO register set and check */
+ writel(MASTER_GLOBAL_CTRL_SHUTDOWN, qm->io_base + MASTER_GLOBAL_CTRL);
+
+ /* If bus lock, reset chip */
+ ret = readl_relaxed_poll_timeout(qm->io_base + MASTER_TRANS_RETURN,
+ val, (val == MASTER_TRANS_RETURN_RW),
+ QM_REG_RD_INTVRL_US,
+ QM_REG_RD_TMOUT_US);
+ if (ret) {
+ pci_emerg(pdev, "Bus lock! Please reset system.\n");
+ return ret;
+ }
+
+ ret = qm_set_pf_mse(qm, false);
+ if (ret) {
+ pci_err(pdev, "Fails to disable pf mse bit.\n");
+ return ret;
+ }
+
+ /* The reset related sub-control registers are not in PCI BAR */
+ if (ACPI_HANDLE(&pdev->dev)) {
+ unsigned long long value = 0;
+ acpi_status s;
+
+ s = acpi_evaluate_integer(ACPI_HANDLE(&pdev->dev),
+ qm->err_ini.err_info.acpi_rst,
+ NULL, &value);
+ if (ACPI_FAILURE(s)) {
+ pci_err(pdev, "NO controller reset method!\n");
+ return -EIO;
+ }
+
+ if (value) {
+ pci_err(pdev, "Reset step %llu failed!\n", value);
+ return -EIO;
+ }
+ } else {
+ pci_err(pdev, "No reset method!\n");
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+static int qm_vf_reset_done(struct pci_dev *pdev,
+ struct hisi_qm_list *qm_list)
+{
+ struct pci_dev *dev;
+ struct hisi_qm *qm;
+ int ret = 0;
+
+ mutex_lock(&qm_list->lock);
+ list_for_each_entry(qm, &qm_list->list, list) {
+ dev = qm->pdev;
+ if (dev == pdev)
+ continue;
+
+ if (pci_physfn(dev) == pdev) {
+ /* enable VFs PCIE BAR configuration */
+ pci_restore_state(dev);
+
+ ret = hisi_qm_restart(qm);
+ if (ret)
+ goto reset_fail;
+ }
+ }
+
+reset_fail:
+ mutex_unlock(&qm_list->lock);
+ return ret;
+}
+
+static int qm_get_dev_err_status(struct hisi_qm *qm)
+{
+ u32 err_sts;
+
+ err_sts = qm->err_ini.get_dev_hw_err_status(qm) &
+ qm->err_ini.err_info.ecc_2bits_mask;
+ if (err_sts)
+ return err_sts;
+
+ return 0;
+}
+
+static void hisi_qm_restart_prepare(struct hisi_qm *qm)
+{
+ u32 value;
+
+ if (!qm->err_ini.err_info.is_qm_ecc_mbit &&
+ !qm->err_ini.err_info.is_dev_ecc_mbit)
+ return;
+
+ value = readl(qm->io_base + AM_CFG_PORT_WR_EN);
+ writel(value & ~qm->err_ini.err_info.msi_wr_port,
+ qm->io_base + AM_CFG_PORT_WR_EN);
+
+ /* clear dev ecc 2bit error source if having */
+ value = qm_get_dev_err_status(qm);
+ if (value && qm->err_ini.clear_dev_hw_err_status)
+ qm->err_ini.clear_dev_hw_err_status(qm, value);
+
+ /* clear QM ecc mbit error source */
+ writel(QM_ECC_MBIT, qm->io_base +
+ QM_ABNORMAL_INT_SOURCE);
+
+ /* clear AM Reorder Buffer ecc mbit source */
+ writel(ROB_ECC_ERR_MULTPL, qm->io_base +
+ AM_ROB_ECC_INT_STS);
+
+ if (qm->err_ini.open_axi_master_ooo)
+ qm->err_ini.open_axi_master_ooo(qm);
+}
+
+static void hisi_qm_restart_done(struct hisi_qm *qm)
+{
+ u32 value;
+
+ if (!qm->err_ini.err_info.is_qm_ecc_mbit &&
+ !qm->err_ini.err_info.is_dev_ecc_mbit)
+ return;
+
+ value = readl(qm->io_base + AM_CFG_PORT_WR_EN);
+ value |= qm->err_ini.err_info.msi_wr_port;
+
+ writel(value, qm->io_base + AM_CFG_PORT_WR_EN);
+ qm->err_ini.err_info.is_qm_ecc_mbit = false;
+ qm->err_ini.err_info.is_dev_ecc_mbit = false;
+}
+
+static int qm_controller_reset_done(struct hisi_qm *qm)
+{
+ struct pci_dev *pdev = qm->pdev;
+ int ret;
+
+ ret = qm_set_msi(qm, true);
+ if (ret) {
+ pci_err(pdev, "Fails to enable peh msi bit!\n");
+ return ret;
+ }
+
+ ret = qm_set_pf_mse(qm, true);
+ if (ret) {
+ pci_err(pdev, "Fails to enable pf mse bit!\n");
+ return ret;
+ }
+
+ if (qm->vfs_num) {
+ ret = qm_set_vf_mse(qm, true);
+ if (ret) {
+ pci_err(pdev, "Fails to enable vf mse bit!\n");
+ return ret;
+ }
+ }
+
+ ret = qm->err_ini.set_usr_domain_cache(qm);
+ if (ret)
+ return ret;
+
+ hisi_qm_restart_prepare(qm);
+
+ ret = hisi_qm_restart(qm);
+ if (ret) {
+ pci_err(pdev, "Failed to start QM!\n");
+ return ret;
+ }
+
+ if (qm->vfs_num) {
+ ret = qm_vf_q_assign(qm, qm->vfs_num);
+ if (ret) {
+ pci_err(pdev, "Failed to assign queue!\n");
+ return ret;
+ }
+ }
+
+ ret = qm_vf_reset_done(pdev, qm->qm_list);
+ if (ret) {
+ pci_err(pdev, "Failed to start VFs!\n");
+ return -EPERM;
+ }
+
+ hisi_qm_dev_err_init(qm);
+
+ hisi_qm_restart_done(qm);
+
+ return 0;
+}
+
+int hisi_qm_controller_reset(struct hisi_qm *qm)
+{
+ struct pci_dev *pdev = qm->pdev;
+ int ret;
+
+ pci_info(pdev, "Controller resetting...\n");
+
+ ret = qm_controller_reset_prepare(qm);
+ if (ret)
+ return ret;
+
+ ret = qm_soft_reset(qm);
+ if (ret) {
+ pci_err(pdev, "Controller reset failed (%d)\n", ret);
+ return ret;
+ }
+
+ ret = qm_controller_reset_done(qm);
+ if (ret)
+ return ret;
+
+ clear_bit(QM_DEV_RESET_STATUS, &qm->hw_status);
+ pci_info(pdev, "Controller reset complete\n");
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(hisi_qm_controller_reset);
+
+pci_ers_result_t hisi_qm_dev_slot_reset(struct pci_dev *pdev)
+{
+ struct hisi_qm *qm = pci_get_drvdata(pdev);
+ int ret;
+
+ if (pdev->is_virtfn)
+ return PCI_ERS_RESULT_RECOVERED;
+
+ pci_info(pdev, "Requesting reset due to PCI error\n");
+ pci_cleanup_aer_uncorrect_error_status(pdev);
+
+ /* reset pcie device controller */
+ ret = hisi_qm_controller_reset(qm);
+ if (ret) {
+ pci_err(pdev, "controller reset failed (%d)\n", ret);
+ return PCI_ERS_RESULT_DISCONNECT;
+ }
+
+ return PCI_ERS_RESULT_RECOVERED;
+}
+EXPORT_SYMBOL_GPL(hisi_qm_dev_slot_reset);
+
+/* check the interrupt is ecc-mbit error or not */
+static int qm_check_dev_error(struct hisi_qm *qm)
+{
+ struct pci_dev *pdev = qm->pdev;
+ struct hisi_qm *pf_qm = pci_get_drvdata(pci_physfn(pdev));
+ int ret;
+
+ if (pf_qm->fun_type == QM_HW_VF)
+ return 0;
+
+ ret = qm_get_hw_error_status(pf_qm);
+ if (ret)
+ return ret;
+
+ return qm_get_dev_err_status(pf_qm);
+}
+
+void hisi_qm_reset_prepare(struct pci_dev *pdev)
+{
+ struct hisi_qm *qm = pci_get_drvdata(pdev);
+ u32 delay = 0;
+ int ret;
+
+ hisi_qm_dev_err_uninit(qm);
+
+ while (qm_check_dev_error(qm)) {
+ msleep(++delay);
+ if (delay > QM_RESET_WAIT_TIMEOUT)
+ return;
+ }
+
+ ret = qm_reset_prepare_ready(qm);
+ if (ret) {
+ pci_err(pdev, "FLR not ready!\n");
+ return;
+ }
+
+ if (qm->vfs_num) {
+ ret = qm_vf_reset_prepare(pdev, qm->qm_list, QM_FLR);
+ if (ret) {
+ pci_err(pdev, "Fails to prepare reset!\n");
+ return;
+ }
+ }
+
+ ret = hisi_qm_stop(qm, QM_FLR);
+ if (ret) {
+ pci_err(pdev, "Fails to stop QM!\n");
+ return;
+ }
+
+ pci_info(pdev, "FLR resetting...\n");
+}
+EXPORT_SYMBOL_GPL(hisi_qm_reset_prepare);
+
+static bool qm_flr_reset_complete(struct pci_dev *pdev)
+{
+ struct pci_dev *pf_pdev = pci_physfn(pdev);
+ struct hisi_qm *qm = pci_get_drvdata(pf_pdev);
+ u32 id;
+
+ pci_read_config_dword(qm->pdev, PCI_COMMAND, &id);
+ if (id == QM_PCI_COMMAND_INVALID) {
+ pci_err(pdev, "Device can not be used!\n");
+ return false;
+ }
+
+ clear_bit(QM_DEV_RESET_STATUS, &qm->hw_status);
+ return true;
+}
+
+void hisi_qm_reset_done(struct pci_dev *pdev)
+{
+ struct hisi_qm *qm = pci_get_drvdata(pdev);
+ int ret;
+
+ hisi_qm_dev_err_init(qm);
+
+ ret = hisi_qm_restart(qm);
+ if (ret) {
+ pci_err(pdev, "Failed to start QM!\n");
+ goto flr_done;
+ }
+
+ if (qm->fun_type == QM_HW_PF) {
+ ret = qm->err_ini.set_usr_domain_cache(qm);
+ if (ret) {
+ pci_err(pdev, "Failed to start QM!\n");
+ goto flr_done;
+ }
+
+ if (qm->vfs_num)
+ qm_vf_q_assign(qm, qm->vfs_num);
+
+ ret = qm_vf_reset_done(pdev, qm->qm_list);
+ if (ret) {
+ pci_err(pdev, "Failed to start VFs!\n");
+ goto flr_done;
+ }
+ }
- writel(AM_CFG_PORT_WR_EN_VALUE, qm->io_base + AM_CFG_PORT_WR_EN);
- qm->err_ini.is_qm_ecc_mbit = 0;
- qm->err_ini.is_dev_ecc_mbit = 0;
+flr_done:
+ if (qm_flr_reset_complete(pdev))
+ pci_info(pdev, "FLR reset complete\n");
}
-EXPORT_SYMBOL_GPL(hisi_qm_restart_done);
+EXPORT_SYMBOL_GPL(hisi_qm_reset_done);
MODULE_LICENSE("GPL v2");
MODULE_AUTHOR("Zhou Wang <wangzhou1(a)hisilicon.com>");
diff --git a/drivers/crypto/hisilicon/qm.h b/drivers/crypto/hisilicon/qm.h
index 24b3609..36e888f 100644
--- a/drivers/crypto/hisilicon/qm.h
+++ b/drivers/crypto/hisilicon/qm.h
@@ -17,6 +17,9 @@
#include "qm_usr_if.h"
+#define QNUM_V1 4096
+#define QNUM_V2 1024
+#define QM_MAX_VFS_NUM 63
/* qm user domain */
#define QM_ARUSER_M_CFG_1 0x100088
#define AXUSER_SNOOP_ENABLE BIT(30)
@@ -49,6 +52,7 @@
#define QM_AXI_M_CFG 0x1000ac
#define AXI_M_CFG 0xffff
#define QM_AXI_M_CFG_ENABLE 0x1000b0
+#define AM_CFG_SINGLE_PORT_MAX_TRANS 0x300014
#define AXI_M_CFG_ENABLE 0xffffffff
#define QM_PEH_AXUSER_CFG 0x1000cc
#define QM_PEH_AXUSER_CFG_ENABLE 0x1000d0
@@ -235,19 +239,41 @@ struct hisi_qm_status {
int stop_reason;
};
+struct hisi_qm_hw_error {
+ u32 int_msk;
+ const char *msg;
+};
+
struct hisi_qm;
-struct hisi_qm_err_ini {
- u32 qm_wr_port;
+struct hisi_qm_err_info {
+ char *acpi_rst;
+ u32 msi_wr_port;
+ u32 ecc_2bits_mask;
u32 is_qm_ecc_mbit;
u32 is_dev_ecc_mbit;
- u32 ecc_2bits_mask;
- void (*open_axi_master_ooo)(struct hisi_qm *qm);
+ u32 ce;
+ u32 nfe;
+ u32 fe;
+ u32 msi;
+};
+
+struct hisi_qm_err_ini {
u32 (*get_dev_hw_err_status)(struct hisi_qm *qm);
void (*clear_dev_hw_err_status)(struct hisi_qm *qm, u32 err_sts);
+ void (*hw_err_enable)(struct hisi_qm *qm);
+ void (*hw_err_disable)(struct hisi_qm *qm);
+ int (*set_usr_domain_cache)(struct hisi_qm *qm);
void (*log_dev_hw_err)(struct hisi_qm *qm, u32 err_sts);
- /* design for module can not hold on ooo through qm, such as zip */
- void (*inject_dev_hw_err)(struct hisi_qm *qm);
+ void (*open_axi_master_ooo)(struct hisi_qm *qm);
+ void (*close_axi_master_ooo)(struct hisi_qm *qm);
+ struct hisi_qm_err_info err_info;
+};
+
+struct hisi_qm_list {
+ struct mutex lock;
+ struct list_head list;
+ bool (*check)(struct hisi_qm *qm);
};
struct hisi_qm {
@@ -260,7 +286,9 @@ struct hisi_qm {
u32 qp_base;
u32 qp_num;
u32 ctrl_q_num;
-
+ u32 vfs_num;
+ struct list_head list;
+ struct hisi_qm_list *qm_list;
struct qm_dma qdma;
struct qm_sqc *sqc;
struct qm_cqc *cqc;
@@ -285,8 +313,7 @@ struct hisi_qm {
u32 error_mask;
u32 msi_mask;
-
- const char *algs;
+ unsigned long hw_status;
bool use_uacce; /* register to uacce */
bool use_sva;
@@ -294,7 +321,9 @@ struct hisi_qm {
resource_size_t phys_base;
resource_size_t size;
struct uacce uacce;
+ const char *algs;
void *reserve;
+ int uacce_mode;
dma_addr_t reserve_dma;
#endif
struct workqueue_struct *wq;
@@ -345,9 +374,144 @@ struct hisi_qp {
#endif
};
+static inline int q_num_set(const char *val, const struct kernel_param *kp,
+ unsigned int device)
+{
+ struct pci_dev *pdev = pci_get_device(PCI_VENDOR_ID_HUAWEI,
+ device, NULL);
+ u32 n, q_num;
+ u8 rev_id;
+ int ret;
+
+ if (!val)
+ return -EINVAL;
+
+ if (!pdev) {
+ q_num = min_t(u32, QNUM_V1, QNUM_V2);
+ pr_info("No device found currently, suppose queue number is %d\n",
+ q_num);
+ } else {
+ rev_id = pdev->revision;
+ switch (rev_id) {
+ case QM_HW_V1:
+ q_num = QNUM_V1;
+ break;
+ case QM_HW_V2:
+ q_num = QNUM_V2;
+ break;
+ default:
+ return -EINVAL;
+ }
+ }
+
+ ret = kstrtou32(val, 10, &n);
+ if (ret || !n || n > q_num)
+ return -EINVAL;
+
+ return param_set_int(val, kp);
+}
+
+static inline int vf_num_set(const char *val, const struct kernel_param *kp)
+{
+ u32 n;
+ int ret;
+
+ if (!val)
+ return -EINVAL;
+
+ ret = kstrtou32(val, 10, &n);
+ if (ret < 0)
+ return ret;
+
+ if (n > QM_MAX_VFS_NUM)
+ return -ERANGE;
+
+ return param_set_int(val, kp);
+}
+
+#ifdef CONFIG_CRYPTO_QM_UACCE
+static inline int mode_set(const char *val, const struct kernel_param *kp)
+{
+ u32 n;
+ int ret;
+
+ if (!val)
+ return -EINVAL;
+
+ ret = kstrtou32(val, 10, &n);
+ if (ret != 0 || (n != UACCE_MODE_NOIOMMU &&
+ n != UACCE_MODE_NOUACCE))
+ return -EINVAL;
+
+ return param_set_int(val, kp);
+}
+#endif
+
+static inline void hisi_qm_add_to_list(struct hisi_qm *qm,
+ struct hisi_qm_list *qm_list)
+{
+ mutex_lock(&qm_list->lock);
+ list_add_tail(&qm->list, &qm_list->list);
+ mutex_unlock(&qm_list->lock);
+}
+
+static inline void hisi_qm_del_from_list(struct hisi_qm *qm,
+ struct hisi_qm_list *qm_list)
+{
+ mutex_lock(&qm_list->lock);
+ list_del(&qm->list);
+ mutex_unlock(&qm_list->lock);
+}
+
+static inline int hisi_qm_pre_init(struct hisi_qm *qm,
+ u32 pf_q_num, u32 def_q_num)
+{
+ struct pci_dev *pdev = qm->pdev;
+
+ switch (pdev->revision) {
+ case QM_HW_V1:
+ case QM_HW_V2:
+ qm->ver = pdev->revision;
+ break;
+ default:
+ pci_err(pdev, "hardware version err!\n");
+ return -ENODEV;
+ }
+
+ pci_set_drvdata(pdev, qm);
+
+#ifdef CONFIG_CRYPTO_QM_UACCE
+ switch (qm->uacce_mode) {
+ case UACCE_MODE_NOUACCE:
+ qm->use_uacce = false;
+ break;
+ case UACCE_MODE_NOIOMMU:
+ qm->use_uacce = true;
+ break;
+ default:
+ pci_err(pdev, "uacce mode error!\n");
+ return -EINVAL;
+ }
+#else
+ qm->use_uacce = false;
+#endif
+ if (qm->fun_type == QM_HW_PF) {
+ qm->qp_base = def_q_num;
+ qm->qp_num = pf_q_num;
+ qm->debug.curr_qm_qp_num = pf_q_num;
+ }
+
+ return 0;
+}
+
+void hisi_qm_free_qps(struct hisi_qp **qps, int qp_num);
+int hisi_qm_alloc_qps_node(int node, struct hisi_qm_list *qm_list,
+ struct hisi_qp **qps, int qp_num, u8 alg_type);
int hisi_qm_init(struct hisi_qm *qm);
void hisi_qm_uninit(struct hisi_qm *qm);
-int hisi_qm_frozen(struct hisi_qm *qm);
+void hisi_qm_dev_shutdown(struct pci_dev *pdev);
+void hisi_qm_remove_wait_delay(struct hisi_qm *qm,
+ struct hisi_qm_list *qm_list);
int hisi_qm_start(struct hisi_qm *qm);
int hisi_qm_stop(struct hisi_qm *qm, enum qm_stop_reason r);
struct hisi_qp *hisi_qm_create_qp(struct hisi_qm *qm, u8 alg_type);
@@ -358,25 +522,20 @@ struct hisi_qp {
int hisi_qp_wait(struct hisi_qp *qp);
int hisi_qm_get_free_qp_num(struct hisi_qm *qm);
int hisi_qm_get_vft(struct hisi_qm *qm, u32 *base, u32 *number);
-int hisi_qm_set_vft(struct hisi_qm *qm, u32 fun_num, u32 base, u32 number);
void hisi_qm_debug_regs_clear(struct hisi_qm *qm);
int hisi_qm_debug_init(struct hisi_qm *qm);
-void hisi_qm_hw_error_init(struct hisi_qm *qm, u32 ce, u32 nfe, u32 fe,
- u32 msi);
-void hisi_qm_hw_error_uninit(struct hisi_qm *qm);
-pci_ers_result_t hisi_qm_hw_error_handle(struct hisi_qm *qm);
-void hisi_qm_clear_queues(struct hisi_qm *qm);
-enum qm_hw_ver hisi_qm_get_hw_version(struct pci_dev *pdev);
int hisi_qm_restart(struct hisi_qm *qm);
-int hisi_qm_get_hw_error_status(struct hisi_qm *qm);
+int hisi_qm_sriov_enable(struct pci_dev *pdev, int max_vfs);
+int hisi_qm_sriov_disable(struct pci_dev *pdev, struct hisi_qm_list *qm_list);
+void hisi_qm_dev_err_init(struct hisi_qm *qm);
+void hisi_qm_dev_err_uninit(struct hisi_qm *qm);
+pci_ers_result_t hisi_qm_dev_err_detected(struct pci_dev *pdev,
+ pci_channel_state_t state);
+pci_ers_result_t hisi_qm_dev_slot_reset(struct pci_dev *pdev);
+void hisi_qm_reset_prepare(struct pci_dev *pdev);
+void hisi_qm_reset_done(struct pci_dev *pdev);
pci_ers_result_t hisi_qm_process_dev_error(struct pci_dev *pdev);
-int hisi_qm_reg_test(struct hisi_qm *qm);
-int hisi_qm_set_pf_mse(struct hisi_qm *qm, bool set);
-int hisi_qm_set_vf_mse(struct hisi_qm *qm, bool set);
-int hisi_qm_set_msi(struct hisi_qm *qm, bool set);
-void hisi_qm_set_ecc(struct hisi_qm *qm);
-void hisi_qm_restart_prepare(struct hisi_qm *qm);
-void hisi_qm_restart_done(struct hisi_qm *qm);
+int hisi_qm_controller_reset(struct hisi_qm *qm);
struct hisi_acc_sgl_pool;
struct hisi_acc_hw_sgl *hisi_acc_sg_buf_map_to_hw_sgl(struct device *dev,
diff --git a/drivers/crypto/hisilicon/rde/rde.h b/drivers/crypto/hisilicon/rde/rde.h
index aa7887a..e06efc7 100644
--- a/drivers/crypto/hisilicon/rde/rde.h
+++ b/drivers/crypto/hisilicon/rde/rde.h
@@ -22,19 +22,11 @@
struct hisi_rde_ctrl;
-enum hisi_rde_status {
- HISI_RDE_RESET,
-};
-
struct hisi_rde {
struct hisi_qm qm;
- struct list_head list;
struct hisi_rde_ctrl *ctrl;
struct work_struct reset_work;
- struct mutex *rde_list_lock;
- unsigned long status;
u32 smmu_state;
- int q_ref;
};
#define RDE_CM_LOAD_ENABLE 1
@@ -134,7 +126,6 @@ struct hisi_rde_msg {
struct hisi_rde_ctx {
struct device *dev;
struct hisi_qp *qp;
- struct hisi_rde *rde_dev;
struct hisi_rde_msg *req_list;
unsigned long *req_bitmap;
spinlock_t req_lock;
@@ -323,7 +314,7 @@ static inline void rde_table_dump(const struct hisi_rde_msg *req)
}
}
-struct hisi_rde *find_rde_device(int node);
+struct hisi_qp *rde_create_qp(void);
int hisi_rde_abnormal_fix(struct hisi_qm *qm);
#endif
diff --git a/drivers/crypto/hisilicon/rde/rde_api.c b/drivers/crypto/hisilicon/rde/rde_api.c
index 1be468a..f1330f1 100644
--- a/drivers/crypto/hisilicon/rde/rde_api.c
+++ b/drivers/crypto/hisilicon/rde/rde_api.c
@@ -835,17 +835,12 @@ int hisi_rde_io_proc(struct acc_ctx *ctx, struct raid_ec_ctrl *ctrl,
return ret;
}
-static int hisi_rde_create_qp(struct hisi_qm *qm, struct acc_ctx *ctx,
- int alg_type, int req_type)
+static int hisi_rde_start_qp(struct hisi_qp *qp, struct acc_ctx *ctx,
+ int req_type)
{
- struct hisi_qp *qp;
struct hisi_rde_ctx *rde_ctx;
int ret;
- qp = hisi_qm_create_qp(qm, alg_type);
- if (IS_ERR(qp))
- return PTR_ERR(qp);
-
qp->req_type = req_type;
qp->qp_ctx = ctx;
@@ -994,9 +989,10 @@ static int hisi_rde_ctx_init(struct hisi_rde_ctx *rde_ctx, int qlen)
int acc_init(struct acc_ctx *ctx)
{
+ struct hisi_rde_ctx *rde_ctx;
struct hisi_rde *hisi_rde;
+ struct hisi_qp *qp;
struct hisi_qm *qm;
- struct hisi_rde_ctx *rde_ctx;
int ret;
if (unlikely(!ctx)) {
@@ -1004,9 +1000,9 @@ int acc_init(struct acc_ctx *ctx)
return -EINVAL;
}
- hisi_rde = find_rde_device(cpu_to_node(smp_processor_id()));
- if (unlikely(!hisi_rde)) {
- pr_err("[%s]Can not find proper RDE device.\n", __func__);
+ qp = rde_create_qp();
+ if (unlikely(!qp)) {
+ pr_err("[%s]Can not create RDE qp.\n", __func__);
return -ENODEV;
}
/* alloc inner private struct */
@@ -1017,20 +1013,20 @@ int acc_init(struct acc_ctx *ctx)
}
ctx->inner = (void *)rde_ctx;
- qm = &hisi_rde->qm;
+ qm = qp->qm;
if (unlikely(!qm->pdev)) {
pr_err("[%s] Pdev is NULL.\n", __func__);
return -ENODEV;
}
rde_ctx->dev = &qm->pdev->dev;
- ret = hisi_rde_create_qp(qm, ctx, 0, 0);
+ ret = hisi_rde_start_qp(qp, ctx, 0);
if (ret) {
- dev_err(rde_ctx->dev, "[%s] Create qp failed.\n", __func__);
+ dev_err(rde_ctx->dev, "[%s] start qp failed.\n", __func__);
goto qp_err;
}
- rde_ctx->rde_dev = hisi_rde;
+ hisi_rde = container_of(qm, struct hisi_rde, qm);
rde_ctx->smmu_state = hisi_rde->smmu_state;
rde_ctx->addr_type = ctx->addr_type;
hisi_rde_session_init(rde_ctx);
@@ -1081,9 +1077,6 @@ int acc_clear(struct acc_ctx *ctx)
rde_ctx->req_list = NULL;
hisi_rde_release_qp(rde_ctx);
- mutex_lock(rde_ctx->rde_dev->rde_list_lock);
- rde_ctx->rde_dev->q_ref = rde_ctx->rde_dev->q_ref - 1;
- mutex_unlock(rde_ctx->rde_dev->rde_list_lock);
kfree(rde_ctx);
ctx->inner = NULL;
diff --git a/drivers/crypto/hisilicon/rde/rde_api.h b/drivers/crypto/hisilicon/rde/rde_api.h
index 0f9021b..167607e 100644
--- a/drivers/crypto/hisilicon/rde/rde_api.h
+++ b/drivers/crypto/hisilicon/rde/rde_api.h
@@ -308,7 +308,7 @@ struct acc_dif {
* @input_block: number of sector
* @data_len: data len of per disk, block_size (with dif)* input_block
* @buf_type: denoted by ACC_BUF_TYPE_E
- * @src_dif��dif information of source disks
+ * @src_dif: dif information of source disks
* @dst_dif: dif information of dest disks
* @cm_load: coe_matrix reload control, 0: do not load, 1: load
* @cm_len: length of loaded coe_matrix, equal to src_num
diff --git a/drivers/crypto/hisilicon/rde/rde_main.c b/drivers/crypto/hisilicon/rde/rde_main.c
index 453657a..318d4a0 100644
--- a/drivers/crypto/hisilicon/rde/rde_main.c
+++ b/drivers/crypto/hisilicon/rde/rde_main.c
@@ -22,7 +22,6 @@
#include <linux/uacce.h>
#include "rde.h"
-#define HRDE_VF_NUM 63
#define HRDE_QUEUE_NUM_V1 4096
#define HRDE_QUEUE_NUM_V2 1024
#define HRDE_PCI_DEVICE_ID 0xa25a
@@ -32,7 +31,6 @@
#define HRDE_PF_DEF_Q_BASE 0
#define HRDE_RD_INTVRL_US 10
#define HRDE_RD_TMOUT_US 1000
-#define FORMAT_DECIMAL 10
#define HRDE_RST_TMOUT_MS 400
#define HRDE_ENABLE 1
#define HRDE_DISABLE 0
@@ -68,7 +66,7 @@
#define CHN_CFG 0x5010101
#define HRDE_AXI_SHUTDOWN_EN BIT(26)
#define HRDE_AXI_SHUTDOWN_DIS 0xFBFFFFFF
-#define HRDE_WR_MSI_PORT 0xFFFE
+#define HRDE_WR_MSI_PORT BIT(0)
#define HRDE_AWUSER_BD_1 0x310104
#define HRDE_ARUSER_BD_1 0x310114
#define HRDE_ARUSER_SGL_1 0x310124
@@ -87,9 +85,6 @@
#define HRDE_QM_IDEL_STATUS 0x1040e4
#define HRDE_QM_PEH_DFX_INFO0 0x1000fc
#define PEH_MSI_MASK_SHIFT 0x90
-#define HRDE_MASTER_GLOBAL_CTRL 0x300000
-#define MASTER_GLOBAL_CTRL_SHUTDOWN 0x1
-#define MASTER_TRANS_RETURN_RW 0x3
#define CACHE_CTL 0x1833
#define HRDE_DBGFS_VAL_MAX_LEN 20
#define HRDE_PROBE_ADDR 0x31025c
@@ -100,16 +95,9 @@
static const char hisi_rde_name[] = "hisi_rde";
static struct dentry *hrde_debugfs_root;
-LIST_HEAD(hisi_rde_list);
-DEFINE_MUTEX(hisi_rde_list_lock);
+static struct hisi_qm_list rde_devices;
static void hisi_rde_ras_proc(struct work_struct *work);
-struct hisi_rde_resource {
- struct hisi_rde *hrde;
- int distance;
- struct list_head list;
-};
-
static const struct hisi_rde_hw_error rde_hw_error[] = {
{.int_msk = BIT(0), .msg = "Rde_ecc_1bitt_err"},
{.int_msk = BIT(1), .msg = "Rde_ecc_2bit_err"},
@@ -157,7 +145,6 @@ struct ctrl_debug_file {
*/
struct hisi_rde_ctrl {
struct hisi_rde *hisi_rde;
- struct dentry *debug_root;
struct ctrl_debug_file files[HRDE_DEBUG_FILE_NUM];
};
@@ -199,78 +186,36 @@ struct hisi_rde_ctrl {
{"HRDE_AM_CURR_WR_TXID_STS_2", 0x300178ull},
};
-static int pf_q_num_set(const char *val, const struct kernel_param *kp)
+#ifdef CONFIG_CRYPTO_QM_UACCE
+static int uacce_mode_set(const char *val, const struct kernel_param *kp)
{
- struct pci_dev *pdev;
- u32 n;
- u32 q_num;
- u8 rev_id;
- int ret;
-
- if (!val)
- return -EINVAL;
-
- pdev = pci_get_device(PCI_VENDOR_ID_HUAWEI, HRDE_PCI_DEVICE_ID, NULL);
- if (unlikely(!pdev)) {
- q_num = min_t(u32, HRDE_QUEUE_NUM_V1, HRDE_QUEUE_NUM_V2);
- pr_info
- ("No device found currently, suppose queue number is %d.\n",
- q_num);
- } else {
- rev_id = pdev->revision;
- switch (rev_id) {
- case QM_HW_V1:
- q_num = HRDE_QUEUE_NUM_V1;
- break;
- case QM_HW_V2:
- q_num = HRDE_QUEUE_NUM_V2;
- break;
- default:
- return -EINVAL;
- }
- }
-
- ret = kstrtou32(val, 10, &n);
- if (ret != 0 || n > q_num)
- return -EINVAL;
-
- return param_set_int(val, kp);
+ return mode_set(val, kp);
}
-static const struct kernel_param_ops pf_q_num_ops = {
- .set = pf_q_num_set,
+static const struct kernel_param_ops uacce_mode_ops = {
+ .set = uacce_mode_set,
.get = param_get_int,
};
-static int uacce_mode_set(const char *val, const struct kernel_param *kp)
-{
- u32 n;
- int ret;
-
- if (!val)
- return -EINVAL;
-
- ret = kstrtou32(val, FORMAT_DECIMAL, &n);
- if (ret != 0 || (n != UACCE_MODE_NOIOMMU && n != UACCE_MODE_NOUACCE))
- return -EINVAL;
+static int uacce_mode = UACCE_MODE_NOUACCE;
+module_param_cb(uacce_mode, &uacce_mode_ops, &uacce_mode, 0444);
+MODULE_PARM_DESC(uacce_mode, "Mode of UACCE can be 0(default), 2");
+#endif
- return param_set_int(val, kp);
+static int pf_q_num_set(const char *val, const struct kernel_param *kp)
+{
+ return q_num_set(val, kp, HRDE_PCI_DEVICE_ID);
}
-static const struct kernel_param_ops uacce_mode_ops = {
- .set = uacce_mode_set,
+static const struct kernel_param_ops pf_q_num_ops = {
+ .set = pf_q_num_set,
.get = param_get_int,
};
-
static u32 pf_q_num = HRDE_PF_DEF_Q_NUM;
module_param_cb(pf_q_num, &pf_q_num_ops, &pf_q_num, 0444);
MODULE_PARM_DESC(pf_q_num, "Number of queues in PF(v1 0-4096, v2 0-1024)");
-static int uacce_mode = UACCE_MODE_NOUACCE;
-module_param_cb(uacce_mode, &uacce_mode_ops, &uacce_mode, 0444);
-MODULE_PARM_DESC(uacce_mode, "Mode of UACCE can be 0(default), 2");
-
static const struct pci_device_id hisi_rde_dev_ids[] = {
{PCI_DEVICE(PCI_VENDOR_ID_HUAWEI, HRDE_PCI_DEVICE_ID)},
{0,}
@@ -278,125 +223,59 @@ static int uacce_mode_set(const char *val, const struct kernel_param *kp)
MODULE_DEVICE_TABLE(pci, hisi_rde_dev_ids);
-static void free_list(struct list_head *head)
-{
- struct hisi_rde_resource *res;
- struct hisi_rde_resource *tmp;
-
- list_for_each_entry_safe(res, tmp, head, list) {
- list_del(&res->list);
- kfree(res);
- }
-}
-
-struct hisi_rde *find_rde_device(int node)
+struct hisi_qp *rde_create_qp(void)
{
- struct hisi_rde *ret = NULL;
-#ifdef CONFIG_NUMA
- struct hisi_rde_resource *res, *tmp;
- struct hisi_rde *hisi_rde;
- struct list_head *n;
- struct device *dev;
- LIST_HEAD(head);
-
- mutex_lock(&hisi_rde_list_lock);
-
- list_for_each_entry(hisi_rde, &hisi_rde_list, list) {
- res = kzalloc(sizeof(*res), GFP_KERNEL);
- if (!res)
- goto err;
-
- dev = &hisi_rde->qm.pdev->dev;
- res->hrde = hisi_rde;
- res->distance = node_distance(dev->numa_node, node);
- n = &head;
- list_for_each_entry(tmp, &head, list) {
- if (res->distance < tmp->distance) {
- n = &tmp->list;
- break;
- }
- }
- list_add_tail(&res->list, n);
- }
-
- list_for_each_entry(tmp, &head, list) {
- if (tmp->hrde->q_ref + 1 <= pf_q_num) {
- tmp->hrde->q_ref = tmp->hrde->q_ref + 1;
- ret = tmp->hrde;
- break;
- }
- }
+ int node = cpu_to_node(smp_processor_id());
+ struct hisi_qp *qp;
+ int ret;
- free_list(&head);
-#else
- mutex_lock(&hisi_rde_list_lock);
- ret = list_first_entry(&hisi_rde_list, struct hisi_rde, list);
-#endif
- mutex_unlock(&hisi_rde_list_lock);
- return ret;
+ ret = hisi_qm_alloc_qps_node(node, &rde_devices, &qp, 1, 0);
+ if (!ret)
+ return qp;
-err:
- free_list(&head);
- mutex_unlock(&hisi_rde_list_lock);
return NULL;
}
-static inline void hisi_rde_add_to_list(struct hisi_rde *hisi_rde)
-{
- mutex_lock(&hisi_rde_list_lock);
- list_add_tail(&hisi_rde->list, &hisi_rde_list);
- mutex_unlock(&hisi_rde_list_lock);
-}
-
-static inline void hisi_rde_remove_from_list(struct hisi_rde *hisi_rde)
-{
- mutex_lock(&hisi_rde_list_lock);
- list_del(&hisi_rde->list);
- mutex_unlock(&hisi_rde_list_lock);
-}
-
-static void hisi_rde_engine_init(struct hisi_rde *hisi_rde)
+static int hisi_rde_engine_init(struct hisi_qm *qm)
{
- writel(DFX_CTRL0, hisi_rde->qm.io_base + HRDE_DFX_CTRL_0);
+ writel(DFX_CTRL0, qm->io_base + HRDE_DFX_CTRL_0);
/* usr domain */
- writel(HRDE_USER_SMMU, hisi_rde->qm.io_base + HRDE_AWUSER_BD_1);
- writel(HRDE_USER_SMMU, hisi_rde->qm.io_base + HRDE_ARUSER_BD_1);
- writel(HRDE_USER_SMMU, hisi_rde->qm.io_base + HRDE_AWUSER_DAT_1);
- writel(HRDE_USER_SMMU, hisi_rde->qm.io_base + HRDE_ARUSER_DAT_1);
- writel(HRDE_USER_SMMU, hisi_rde->qm.io_base + HRDE_ARUSER_SGL_1);
+ writel(HRDE_USER_SMMU, qm->io_base + HRDE_AWUSER_BD_1);
+ writel(HRDE_USER_SMMU, qm->io_base + HRDE_ARUSER_BD_1);
+ writel(HRDE_USER_SMMU, qm->io_base + HRDE_AWUSER_DAT_1);
+ writel(HRDE_USER_SMMU, qm->io_base + HRDE_ARUSER_DAT_1);
+ writel(HRDE_USER_SMMU, qm->io_base + HRDE_ARUSER_SGL_1);
/* rde cache */
- writel(AWCACHE, hisi_rde->qm.io_base + HRDE_AWCACHE);
- writel(ARCACHE, hisi_rde->qm.io_base + HRDE_ARCACHE);
+ writel(AWCACHE, qm->io_base + HRDE_AWCACHE);
+ writel(ARCACHE, qm->io_base + HRDE_ARCACHE);
/* rde chn enable + outstangding config */
- writel(CHN_CFG, hisi_rde->qm.io_base + HRDE_CFG);
+ writel(CHN_CFG, qm->io_base + HRDE_CFG);
+
+ return 0;
}
-static void hisi_rde_set_user_domain_and_cache(struct hisi_rde *hisi_rde)
+static int hisi_rde_set_user_domain_and_cache(struct hisi_qm *qm)
{
/* qm user domain */
- writel(AXUSER_BASE, hisi_rde->qm.io_base + QM_ARUSER_M_CFG_1);
- writel(ARUSER_M_CFG_ENABLE, hisi_rde->qm.io_base +
- QM_ARUSER_M_CFG_ENABLE);
- writel(AXUSER_BASE, hisi_rde->qm.io_base + QM_AWUSER_M_CFG_1);
- writel(AWUSER_M_CFG_ENABLE, hisi_rde->qm.io_base +
- QM_AWUSER_M_CFG_ENABLE);
- writel(WUSER_M_CFG_ENABLE, hisi_rde->qm.io_base +
- QM_WUSER_M_CFG_ENABLE);
+ writel(AXUSER_BASE, qm->io_base + QM_ARUSER_M_CFG_1);
+ writel(ARUSER_M_CFG_ENABLE, qm->io_base + QM_ARUSER_M_CFG_ENABLE);
+ writel(AXUSER_BASE, qm->io_base + QM_AWUSER_M_CFG_1);
+ writel(AWUSER_M_CFG_ENABLE, qm->io_base + QM_AWUSER_M_CFG_ENABLE);
+ writel(WUSER_M_CFG_ENABLE, qm->io_base + QM_WUSER_M_CFG_ENABLE);
/* qm cache */
- writel(AXI_M_CFG, hisi_rde->qm.io_base + QM_AXI_M_CFG);
- writel(AXI_M_CFG_ENABLE, hisi_rde->qm.io_base + QM_AXI_M_CFG_ENABLE);
+ writel(AXI_M_CFG, qm->io_base + QM_AXI_M_CFG);
+ writel(AXI_M_CFG_ENABLE, qm->io_base + QM_AXI_M_CFG_ENABLE);
/* disable BME/PM/SRIOV FLR*/
- writel(PEH_AXUSER_CFG, hisi_rde->qm.io_base + QM_PEH_AXUSER_CFG);
- writel(PEH_AXUSER_CFG_ENABLE, hisi_rde->qm.io_base +
- QM_PEH_AXUSER_CFG_ENABLE);
+ writel(PEH_AXUSER_CFG, qm->io_base + QM_PEH_AXUSER_CFG);
+ writel(PEH_AXUSER_CFG_ENABLE, qm->io_base + QM_PEH_AXUSER_CFG_ENABLE);
- writel(CACHE_CTL, hisi_rde->qm.io_base + QM_CACHE_CTL);
+ writel(CACHE_CTL, qm->io_base + QM_CACHE_CTL);
- hisi_rde_engine_init(hisi_rde);
+ return hisi_rde_engine_init(qm);
}
static void hisi_rde_debug_regs_clear(struct hisi_qm *qm)
@@ -418,30 +297,38 @@ static void hisi_rde_debug_regs_clear(struct hisi_qm *qm)
hisi_qm_debug_regs_clear(qm);
}
-static void hisi_rde_hw_error_set_state(struct hisi_rde *hisi_rde, bool state)
+static void hisi_rde_hw_error_enable(struct hisi_qm *qm)
{
- u32 ras_msk = (HRDE_RAS_CE_MSK | HRDE_RAS_NFE_MSK);
u32 val;
- val = readl(hisi_rde->qm.io_base + HRDE_CFG);
- if (state) {
- writel(HRDE_INT_SOURCE_CLEAR,
- hisi_rde->qm.io_base + HRDE_INT_SOURCE);
- writel(HRDE_RAS_ENABLE,
- hisi_rde->qm.io_base + HRDE_RAS_INT_MSK);
- /* bd prefetch should bd masked to prevent misreport */
- writel((HRDE_INT_ENABLE | HRDE_BD_PREFETCH),
- hisi_rde->qm.io_base + HRDE_INT_MSK);
- /* make master ooo close, when m-bits error happens*/
- val = val | HRDE_AXI_SHUTDOWN_EN;
- } else {
- writel(ras_msk, hisi_rde->qm.io_base + HRDE_RAS_INT_MSK);
- writel(HRDE_INT_DISABLE, hisi_rde->qm.io_base + HRDE_INT_MSK);
- /* make master ooo open, when m-bits error happens*/
- val = val & HRDE_AXI_SHUTDOWN_DIS;
- }
+ val = readl(qm->io_base + HRDE_CFG);
+
+ /* clear RDE hw error source if having */
+ writel(HRDE_INT_SOURCE_CLEAR, qm->io_base + HRDE_INT_SOURCE);
+ writel(HRDE_RAS_ENABLE, qm->io_base + HRDE_RAS_INT_MSK);
+
+ /* bd prefetch should bd masked to prevent misreport */
+ writel((HRDE_INT_ENABLE | HRDE_BD_PREFETCH),
+ qm->io_base + HRDE_INT_MSK);
- writel(val, hisi_rde->qm.io_base + HRDE_CFG);
+ /* when m-bit error occur, master ooo will close */
+ val = val | HRDE_AXI_SHUTDOWN_EN;
+ writel(val, qm->io_base + HRDE_CFG);
+}
+
+static void hisi_rde_hw_error_disable(struct hisi_qm *qm)
+{
+ u32 ras_msk = HRDE_RAS_CE_MSK | HRDE_RAS_NFE_MSK;
+ u32 val;
+
+ val = readl(qm->io_base + HRDE_CFG);
+
+ writel(ras_msk, qm->io_base + HRDE_RAS_INT_MSK);
+ writel(HRDE_INT_DISABLE, qm->io_base + HRDE_INT_MSK);
+
+ /* when m-bit error occur, master ooo will not close */
+ val = val & HRDE_AXI_SHUTDOWN_DIS;
+ writel(val, qm->io_base + HRDE_CFG);
}
static inline struct hisi_qm *file_to_qm(struct ctrl_debug_file *file)
@@ -587,10 +474,8 @@ static ssize_t ctrl_debug_write(struct file *filp, const char __user *buf,
.write = ctrl_debug_write,
};
-static int hisi_rde_chn_debug_init(struct hisi_rde_ctrl *ctrl)
+static int hisi_rde_chn_debug_init(struct hisi_qm *qm)
{
- struct hisi_rde *hisi_rde = ctrl->hisi_rde;
- struct hisi_qm *qm = &hisi_rde->qm;
struct device *dev = &qm->pdev->dev;
struct debugfs_regset32 *regset, *regset_ooo;
struct dentry *tmp_d, *tmp;
@@ -601,7 +486,7 @@ static int hisi_rde_chn_debug_init(struct hisi_rde_ctrl *ctrl)
if (ret < 0)
return -ENOENT;
- tmp_d = debugfs_create_dir(buf, ctrl->debug_root);
+ tmp_d = debugfs_create_dir(buf, qm->debug.debug_root);
if (!tmp_d)
return -ENOENT;
@@ -628,29 +513,30 @@ static int hisi_rde_chn_debug_init(struct hisi_rde_ctrl *ctrl)
return 0;
}
-static int hisi_rde_ctrl_debug_init(struct hisi_rde_ctrl *ctrl)
+static int hisi_rde_ctrl_debug_init(struct hisi_qm *qm)
{
+ struct hisi_rde *hisi_rde = container_of(qm, struct hisi_rde, qm);
struct dentry *tmp;
int i;
for (i = HRDE_CURRENT_FUNCTION; i < HRDE_DEBUG_FILE_NUM; i++) {
- spin_lock_init(&ctrl->files[i].lock);
- ctrl->files[i].ctrl = ctrl;
- ctrl->files[i].index = i;
+ spin_lock_init(&hisi_rde->ctrl->files[i].lock);
+ hisi_rde->ctrl->files[i].ctrl = hisi_rde->ctrl;
+ hisi_rde->ctrl->files[i].index = i;
tmp = debugfs_create_file(ctrl_debug_file_name[i], 0600,
- ctrl->debug_root, ctrl->files + i,
+ qm->debug.debug_root,
+ hisi_rde->ctrl->files + i,
&ctrl_debug_fops);
if (!tmp)
return -ENOENT;
}
- return hisi_rde_chn_debug_init(ctrl);
+ return hisi_rde_chn_debug_init(qm);
}
-static int hisi_rde_debugfs_init(struct hisi_rde *hisi_rde)
+static int hisi_rde_debugfs_init(struct hisi_qm *qm)
{
- struct hisi_qm *qm = &hisi_rde->qm;
struct device *dev = &qm->pdev->dev;
struct dentry *dev_d;
int ret;
@@ -665,8 +551,7 @@ static int hisi_rde_debugfs_init(struct hisi_rde *hisi_rde)
goto failed_to_create;
if (qm->pdev->device == HRDE_PCI_DEVICE_ID) {
- hisi_rde->ctrl->debug_root = dev_d;
- ret = hisi_rde_ctrl_debug_init(hisi_rde->ctrl);
+ ret = hisi_rde_ctrl_debug_init(qm);
if (ret)
goto failed_to_create;
}
@@ -678,49 +563,17 @@ static int hisi_rde_debugfs_init(struct hisi_rde *hisi_rde)
return ret;
}
-static void hisi_rde_debugfs_exit(struct hisi_rde *hisi_rde)
+static void hisi_rde_debugfs_exit(struct hisi_qm *qm)
{
- struct hisi_qm *qm = &hisi_rde->qm;
-
debugfs_remove_recursive(qm->debug.debug_root);
+
if (qm->fun_type == QM_HW_PF) {
hisi_rde_debug_regs_clear(qm);
qm->debug.curr_qm_qp_num = 0;
}
}
-static void hisi_rde_set_hw_error(struct hisi_rde *hisi_rde, bool state)
-{
- if (state)
- hisi_qm_hw_error_init(&hisi_rde->qm, QM_BASE_CE,
- QM_BASE_NFE | QM_ACC_DO_TASK_TIMEOUT,
- 0, 0);
- else
- hisi_qm_hw_error_uninit(&hisi_rde->qm);
-
- hisi_rde_hw_error_set_state(hisi_rde, state);
-}
-
-static void hisi_rde_open_master_ooo(struct hisi_qm *qm)
-{
- u32 val;
-
- val = readl(qm->io_base + HRDE_CFG);
- writel(val & HRDE_AXI_SHUTDOWN_DIS, qm->io_base + HRDE_CFG);
- writel(val | HRDE_AXI_SHUTDOWN_EN, qm->io_base + HRDE_CFG);
-}
-
-static u32 hisi_rde_get_hw_err_status(struct hisi_qm *qm)
-{
- return readl(qm->io_base + HRDE_INT_STATUS);
-}
-
-static void hisi_rde_clear_hw_err_status(struct hisi_qm *qm, u32 err_sts)
-{
- writel(err_sts, qm->io_base + HRDE_INT_SOURCE);
-}
-
-static void hisi_rde_hw_error_log(struct hisi_qm *qm, u32 err_sts)
+void hisi_rde_hw_error_log(struct hisi_qm *qm, u32 err_sts)
{
const struct hisi_rde_hw_error *err = rde_hw_error;
struct device *dev = &qm->pdev->dev;
@@ -751,10 +604,30 @@ static void hisi_rde_hw_error_log(struct hisi_qm *qm, u32 err_sts)
}
}
-static int hisi_rde_pf_probe_init(struct hisi_rde *hisi_rde)
+u32 hisi_rde_get_hw_err_status(struct hisi_qm *qm)
+{
+ return readl(qm->io_base + HRDE_INT_STATUS);
+}
+
+void hisi_rde_clear_hw_err_status(struct hisi_qm *qm, u32 err_sts)
{
- struct hisi_qm *qm = &hisi_rde->qm;
+ writel(err_sts, qm->io_base + HRDE_INT_SOURCE);
+}
+
+static void hisi_rde_open_master_ooo(struct hisi_qm *qm)
+{
+ u32 val;
+
+ val = readl(qm->io_base + HRDE_CFG);
+ writel(val & HRDE_AXI_SHUTDOWN_DIS, qm->io_base + HRDE_CFG);
+ writel(val | HRDE_AXI_SHUTDOWN_EN, qm->io_base + HRDE_CFG);
+}
+
+static int hisi_rde_pf_probe_init(struct hisi_qm *qm)
+{
+ struct hisi_rde *hisi_rde = container_of(qm, struct hisi_rde, qm);
struct hisi_rde_ctrl *ctrl;
+ int ret;
ctrl = devm_kzalloc(&qm->pdev->dev, sizeof(*ctrl), GFP_KERNEL);
if (!ctrl)
@@ -776,14 +649,26 @@ static int hisi_rde_pf_probe_init(struct hisi_rde *hisi_rde)
return -EINVAL;
}
- qm->err_ini.qm_wr_port = HRDE_WR_MSI_PORT;
- qm->err_ini.ecc_2bits_mask = HRDE_ECC_2BIT_ERR;
- qm->err_ini.open_axi_master_ooo = hisi_rde_open_master_ooo;
qm->err_ini.get_dev_hw_err_status = hisi_rde_get_hw_err_status;
qm->err_ini.clear_dev_hw_err_status = hisi_rde_clear_hw_err_status;
+ qm->err_ini.err_info.ecc_2bits_mask = HRDE_ECC_2BIT_ERR;
+ qm->err_ini.err_info.ce = QM_BASE_CE;
+ qm->err_ini.err_info.nfe = QM_BASE_NFE | QM_ACC_DO_TASK_TIMEOUT;
+ qm->err_ini.err_info.fe = 0;
+ qm->err_ini.err_info.msi = 0;
+ qm->err_ini.err_info.acpi_rst = "RRST";
+ qm->err_ini.hw_err_disable = hisi_rde_hw_error_disable;
+ qm->err_ini.hw_err_enable = hisi_rde_hw_error_enable;
+ qm->err_ini.set_usr_domain_cache = hisi_rde_set_user_domain_and_cache;
qm->err_ini.log_dev_hw_err = hisi_rde_hw_error_log;
- hisi_rde_set_user_domain_and_cache(hisi_rde);
- hisi_rde_set_hw_error(hisi_rde, true);
+ qm->err_ini.open_axi_master_ooo = hisi_rde_open_master_ooo;
+ qm->err_ini.err_info.msi_wr_port = HRDE_WR_MSI_PORT;
+
+ ret = qm->err_ini.set_usr_domain_cache(qm);
+ if (ret)
+ return ret;
+
+ hisi_qm_dev_err_init(qm);
qm->err_ini.open_axi_master_ooo(qm);
hisi_rde_debug_regs_clear(qm);
@@ -792,33 +677,21 @@ static int hisi_rde_pf_probe_init(struct hisi_rde *hisi_rde)
static int hisi_rde_qm_pre_init(struct hisi_qm *qm, struct pci_dev *pdev)
{
- enum qm_hw_ver rev_id;
+ int ret;
- rev_id = hisi_qm_get_hw_version(pdev);
- if (rev_id == QM_HW_UNKNOWN)
- return -EINVAL;
+#ifdef CONFIG_CRYPTO_QM_UACCE
+ qm->algs = "ec\n";
+ qm->uacce_mode = uacce_mode;
+#endif
qm->pdev = pdev;
- qm->ver = rev_id;
+ ret = hisi_qm_pre_init(qm, pf_q_num, HRDE_PF_DEF_Q_BASE);
+ if (ret)
+ return ret;
+
+ qm->qm_list = &rde_devices;
qm->sqe_size = HRDE_SQE_SIZE;
qm->dev_name = hisi_rde_name;
- qm->fun_type = QM_HW_PF;
- qm->algs = "ec\n";
-
- switch (uacce_mode) {
- case UACCE_MODE_NOUACCE:
- qm->use_uacce = false;
- break;
- case UACCE_MODE_NOIOMMU:
- qm->use_uacce = true;
- break;
- default:
- return -EINVAL;
- }
-
- qm->qp_base = HRDE_PF_DEF_Q_BASE;
- qm->qp_num = pf_q_num;
- qm->debug.curr_qm_qp_num = pf_q_num;
qm->abnormal_fix = hisi_rde_abnormal_fix;
return 0;
@@ -849,11 +722,12 @@ static int hisi_rde_probe(struct pci_dev *pdev, const struct pci_device_id *id)
if (!hisi_rde)
return -ENOMEM;
- pci_set_drvdata(pdev, hisi_rde);
INIT_WORK(&hisi_rde->reset_work, hisi_rde_ras_proc);
hisi_rde->smmu_state = hisi_rde_smmu_state(&pdev->dev);
qm = &hisi_rde->qm;
+ qm->fun_type = QM_HW_PF;
+
ret = hisi_rde_qm_pre_init(qm, pdev);
if (ret) {
pci_err(pdev, "Pre init qm failed!\n");
@@ -866,7 +740,7 @@ static int hisi_rde_probe(struct pci_dev *pdev, const struct pci_device_id *id)
return ret;
}
- ret = hisi_rde_pf_probe_init(hisi_rde);
+ ret = hisi_rde_pf_probe_init(qm);
if (ret) {
pci_err(pdev, "Init pf failed!\n");
goto err_qm_uninit;
@@ -878,16 +752,15 @@ static int hisi_rde_probe(struct pci_dev *pdev, const struct pci_device_id *id)
goto err_qm_uninit;
}
- ret = hisi_rde_debugfs_init(hisi_rde);
+ ret = hisi_rde_debugfs_init(qm);
if (ret)
pci_warn(pdev, "Init debugfs failed!\n");
- hisi_rde_add_to_list(hisi_rde);
- hisi_rde->rde_list_lock = &hisi_rde_list_lock;
+ hisi_qm_add_to_list(qm, &rde_devices);
return 0;
- err_qm_uninit:
+err_qm_uninit:
hisi_qm_uninit(qm);
return ret;
@@ -895,198 +768,20 @@ static int hisi_rde_probe(struct pci_dev *pdev, const struct pci_device_id *id)
static void hisi_rde_remove(struct pci_dev *pdev)
{
- struct hisi_rde *hisi_rde = pci_get_drvdata(pdev);
- struct hisi_qm *qm = &hisi_rde->qm;
+ struct hisi_qm *qm = pci_get_drvdata(pdev);
+ struct hisi_rde *hisi_rde = container_of(qm, struct hisi_rde, qm);
+
+ hisi_qm_remove_wait_delay(qm, &rde_devices);
qm->abnormal_fix = NULL;
- hisi_rde_hw_error_set_state(hisi_rde, false);
+ hisi_qm_dev_err_uninit(qm);
cancel_work_sync(&hisi_rde->reset_work);
- hisi_rde_remove_from_list(hisi_rde);
- hisi_rde_debugfs_exit(hisi_rde);
+ hisi_qm_del_from_list(qm, &rde_devices);
+ hisi_rde_debugfs_exit(qm);
hisi_qm_stop(qm, QM_NORMAL);
hisi_qm_uninit(qm);
}
-static void hisi_rde_shutdown(struct pci_dev *pdev)
-{
- struct hisi_rde *hisi_rde = pci_get_drvdata(pdev);
-
- hisi_qm_stop(&hisi_rde->qm, QM_NORMAL);
-}
-
-static int hisi_rde_reset_prepare_rdy(struct hisi_rde *hisi_rde)
-{
- int delay = 0;
-
- while (test_and_set_bit(HISI_RDE_RESET, &hisi_rde->status)) {
- msleep(++delay);
- if (delay > HRDE_RST_TMOUT_MS)
- return -EBUSY;
- }
-
- return 0;
-}
-
-static int hisi_rde_controller_reset_prepare(struct hisi_rde *hisi_rde)
-{
- struct hisi_qm *qm = &hisi_rde->qm;
- struct pci_dev *pdev = qm->pdev;
- int ret;
-
- ret = hisi_rde_reset_prepare_rdy(hisi_rde);
- if (ret) {
- dev_err(&pdev->dev, "Controller reset not ready!\n");
- return ret;
- }
-
- ret = hisi_qm_stop(qm, QM_SOFT_RESET);
- if (ret) {
- dev_err(&pdev->dev, "Stop QM failed!\n");
- return ret;
- }
-
-#ifdef CONFIG_CRYPTO_QM_UACCE
- if (qm->use_uacce) {
- ret = uacce_hw_err_isolate(&qm->uacce);
- if (ret) {
- dev_err(&pdev->dev, "Isolate hw err failed!\n");
- return ret;
- }
- }
-#endif
-
- return 0;
-}
-
-static int hisi_rde_soft_reset(struct hisi_rde *hisi_rde)
-{
- struct hisi_qm *qm = &hisi_rde->qm;
- struct device *dev = &qm->pdev->dev;
- unsigned long long value;
- int ret;
- u32 val;
-
- /* Check PF stream stop */
- ret = hisi_qm_reg_test(qm);
- if (ret)
- return ret;
-
- /* Disable PEH MSI */
- ret = hisi_qm_set_msi(qm, HRDE_DISABLE);
- if (ret) {
- dev_err(dev, "Disable peh msi bit failed.\n");
- return ret;
- }
-
- /* Set qm ecc if dev ecc happened to hold on ooo */
- hisi_qm_set_ecc(qm);
-
- /* OOO register set and check */
- writel(MASTER_GLOBAL_CTRL_SHUTDOWN,
- hisi_rde->qm.io_base + HRDE_MASTER_GLOBAL_CTRL);
-
- /* If bus lock, reset chip */
- ret = readl_relaxed_poll_timeout(hisi_rde->qm.io_base +
- HRDE_MASTER_TRANS_RET, val,
- (val == MASTER_TRANS_RETURN_RW),
- HRDE_RD_INTVRL_US, HRDE_RD_TMOUT_US);
- if (ret) {
- dev_emerg(dev, "Bus lock! Please reset system.\n");
- return ret;
- }
-
- /* Disable PF MSE bit */
- ret = hisi_qm_set_pf_mse(qm, HRDE_DISABLE);
- if (ret) {
- dev_err(dev, "Disable pf mse bit failed.\n");
- return ret;
- }
-
- /* The reset related sub-control registers are not in PCI BAR */
- if (ACPI_HANDLE(dev)) {
- acpi_status s;
-
- s = acpi_evaluate_integer(ACPI_HANDLE(dev), "RRST",
- NULL, &value);
- if (ACPI_FAILURE(s)) {
- dev_err(dev, "No controller reset method.\n");
- return -EIO;
- }
-
- if (value) {
- dev_err(dev, "Reset step %llu failed.\n", value);
- return -EIO;
- }
- } else {
- dev_err(dev, "No reset method!\n");
- return -EINVAL;
- }
-
- return 0;
-}
-
-static int hisi_rde_controller_reset_done(struct hisi_rde *hisi_rde)
-{
- struct hisi_qm *qm = &hisi_rde->qm;
- struct pci_dev *pdev = qm->pdev;
- int ret;
-
- /* Enable PEH MSI */
- ret = hisi_qm_set_msi(qm, HRDE_ENABLE);
- if (ret) {
- dev_err(&pdev->dev, "Enable peh msi bit failed!\n");
- return ret;
- }
-
- /* Enable PF MSE bit */
- ret = hisi_qm_set_pf_mse(qm, HRDE_ENABLE);
- if (ret) {
- dev_err(&pdev->dev, "Enable pf mse bit failed!\n");
- return ret;
- }
-
- hisi_rde_set_user_domain_and_cache(hisi_rde);
- hisi_qm_restart_prepare(qm);
-
- ret = hisi_qm_restart(qm);
- if (ret) {
- dev_err(&pdev->dev, "Start QM failed!\n");
- return -EPERM;
- }
-
- hisi_qm_restart_done(qm);
- hisi_rde_set_hw_error(hisi_rde, true);
-
- return 0;
-}
-
-static int hisi_rde_controller_reset(struct hisi_rde *hisi_rde)
-{
- struct device *dev = &hisi_rde->qm.pdev->dev;
- int ret;
-
- dev_info_ratelimited(dev, "Controller resetting...\n");
-
- ret = hisi_rde_controller_reset_prepare(hisi_rde);
- if (ret)
- return ret;
-
- ret = hisi_rde_soft_reset(hisi_rde);
- if (ret) {
- dev_err(dev, "Controller reset failed (%d).\n", ret);
- return ret;
- }
-
- ret = hisi_rde_controller_reset_done(hisi_rde);
- if (ret)
- return ret;
-
- clear_bit(HISI_RDE_RESET, &hisi_rde->status);
- dev_info_ratelimited(dev, "Controller reset complete.\n");
-
- return 0;
-}
-
static void hisi_rde_ras_proc(struct work_struct *work)
{
struct pci_dev *pdev;
@@ -1100,121 +795,26 @@ static void hisi_rde_ras_proc(struct work_struct *work)
ret = hisi_qm_process_dev_error(pdev);
if (ret == PCI_ERS_RESULT_NEED_RESET)
- if (hisi_rde_controller_reset(hisi_rde))
+ if (hisi_qm_controller_reset(&hisi_rde->qm))
dev_err(&pdev->dev, "Hisi_rde reset fail.\n");
}
int hisi_rde_abnormal_fix(struct hisi_qm *qm)
{
- struct pci_dev *pdev;
struct hisi_rde *hisi_rde;
if (!qm)
return -EINVAL;
- pdev = qm->pdev;
- if (!pdev)
- return -EINVAL;
-
- hisi_rde = pci_get_drvdata(pdev);
- if (!hisi_rde) {
- dev_err(&pdev->dev, "Hisi_rde is NULL.\n");
- return -EINVAL;
- }
+ hisi_rde = container_of(qm, struct hisi_rde, qm);
return schedule_work(&hisi_rde->reset_work);
}
-static int hisi_rde_get_hw_error_status(struct hisi_rde *hisi_rde)
-{
- u32 err_sts;
-
- err_sts = readl(hisi_rde->qm.io_base + HRDE_INT_STATUS) &
- HRDE_ECC_2BIT_ERR;
- if (err_sts)
- return err_sts;
-
- return 0;
-}
-
-static int hisi_rde_check_hw_error(struct hisi_rde *hisi_rde)
-{
- int ret;
-
- ret = hisi_qm_get_hw_error_status(&hisi_rde->qm);
- if (ret)
- return ret;
-
- return hisi_rde_get_hw_error_status(hisi_rde);
-}
-
-static void hisi_rde_reset_prepare(struct pci_dev *pdev)
-{
- struct hisi_rde *hisi_rde = pci_get_drvdata(pdev);
- struct hisi_qm *qm = &hisi_rde->qm;
- u32 delay = 0;
- int ret;
-
- hisi_rde_set_hw_error(hisi_rde, false);
-
- while (hisi_rde_check_hw_error(hisi_rde)) {
- msleep(++delay);
- if (delay > HRDE_RST_TMOUT_MS)
- return;
- }
-
- ret = hisi_rde_reset_prepare_rdy(hisi_rde);
- if (ret) {
- dev_err(&pdev->dev, "FLR not ready!\n");
- return;
- }
-
- ret = hisi_qm_stop(qm, QM_FLR);
- if (ret) {
- dev_err(&pdev->dev, "Stop QM failed!\n");
- return;
- }
-
- dev_info(&pdev->dev, "FLR resetting...\n");
-}
-
-static void hisi_rde_flr_reset_complete(struct pci_dev *pdev,
- struct hisi_rde *hisi_rde)
-{
- u32 id;
-
- pci_read_config_dword(pdev, PCI_COMMAND, &id);
- if (id == HRDE_PCI_COMMAND_INVALID)
- dev_err(&pdev->dev, "Device can not be used!\n");
-
- clear_bit(HISI_RDE_RESET, &hisi_rde->status);
-}
-
-static void hisi_rde_reset_done(struct pci_dev *pdev)
-{
- struct hisi_rde *hisi_rde = pci_get_drvdata(pdev);
- struct hisi_qm *qm = &hisi_rde->qm;
- int ret;
-
- hisi_rde_set_hw_error(hisi_rde, true);
-
- ret = hisi_qm_restart(qm);
- if (ret) {
- dev_err(&pdev->dev, "Start QM failed!\n");
- goto flr_done;
- }
-
- hisi_rde_set_user_domain_and_cache(hisi_rde);
-
-flr_done:
- hisi_rde_flr_reset_complete(pdev, hisi_rde);
- dev_info(&pdev->dev, "FLR reset complete.\n");
-}
-
static const struct pci_error_handlers hisi_rde_err_handler = {
- .reset_prepare = hisi_rde_reset_prepare,
- .reset_done = hisi_rde_reset_done,
+ .reset_prepare = hisi_qm_reset_prepare,
+ .reset_done = hisi_qm_reset_done,
};
static struct pci_driver hisi_rde_pci_driver = {
@@ -1223,7 +823,7 @@ static void hisi_rde_reset_done(struct pci_dev *pdev)
.probe = hisi_rde_probe,
.remove = hisi_rde_remove,
.err_handler = &hisi_rde_err_handler,
- .shutdown = hisi_rde_shutdown,
+ .shutdown = hisi_qm_dev_shutdown,
};
static void hisi_rde_register_debugfs(void)
@@ -1245,6 +845,9 @@ static int __init hisi_rde_init(void)
{
int ret;
+ INIT_LIST_HEAD(&rde_devices.list);
+ mutex_init(&rde_devices.lock);
+ rde_devices.check = NULL;
hisi_rde_register_debugfs();
ret = pci_register_driver(&hisi_rde_pci_driver);
diff --git a/drivers/crypto/hisilicon/sec2/sec.h b/drivers/crypto/hisilicon/sec2/sec.h
index 0e16452..f85dd06 100644
--- a/drivers/crypto/hisilicon/sec2/sec.h
+++ b/drivers/crypto/hisilicon/sec2/sec.h
@@ -11,7 +11,6 @@
#undef pr_fmt
#define pr_fmt(fmt) "hisi_sec: " fmt
-#define CTX_Q_NUM_DEF 24
#define FUSION_LIMIT_DEF 1
#define FUSION_LIMIT_MAX 64
#define FUSION_TMOUT_NSEC_DEF (400 * 1000)
@@ -24,10 +23,6 @@ enum sec_endian {
struct hisi_sec_ctrl;
-enum hisi_sec_status {
- HISI_SEC_RESET,
-};
-
struct hisi_sec_dfx {
u64 send_cnt;
u64 send_by_tmout;
@@ -39,21 +34,19 @@ struct hisi_sec_dfx {
u64 thread_cnt;
u64 fake_busy_cnt;
u64 busy_comp_cnt;
- u64 sec_ctrl;
};
struct hisi_sec {
struct hisi_qm qm;
- struct list_head list;
struct hisi_sec_dfx sec_dfx;
struct hisi_sec_ctrl *ctrl;
- struct mutex *hisi_sec_list_lock;
- int q_ref;
int ctx_q_num;
int fusion_limit;
int fusion_tmout_nsec;
- unsigned long status;
};
+void sec_destroy_qps(struct hisi_qp **qps, int qp_num);
+struct hisi_qp **sec_create_qps(void);
struct hisi_sec *find_sec_device(int node);
+
#endif
diff --git a/drivers/crypto/hisilicon/sec2/sec_crypto.c b/drivers/crypto/hisilicon/sec2/sec_crypto.c
index 3a362ce..0643955 100644
--- a/drivers/crypto/hisilicon/sec2/sec_crypto.c
+++ b/drivers/crypto/hisilicon/sec2/sec_crypto.c
@@ -16,6 +16,8 @@
#include "sec.h"
#include "sec_crypto.h"
+static atomic_t sec_active_devs;
+
#define SEC_ASYNC
#define SEC_INVLD_REQ_ID (-1)
@@ -179,6 +181,7 @@ struct hisi_sec_ctx {
struct hisi_sec *sec;
struct device *dev;
struct hisi_sec_req_op *req_op;
+ struct hisi_qp **qps;
struct hrtimer timer;
struct work_struct work;
atomic_t thread_cnt;
@@ -200,11 +203,6 @@ struct hisi_sec_ctx {
u64 des_weak_key[DES_WEAK_KEY_NUM] = {0x0101010101010101, 0xFEFEFEFEFEFEFEFE,
0xE0E0E0E0F1F1F1F1, 0x1F1F1F1F0E0E0E0E};
-static void sec_update_iv(struct hisi_sec_req *req, u8 *iv)
-{
- // todo: update iv by cbc/ctr mode
-}
-
static void hisi_sec_req_cb(struct hisi_qp *qp, void *);
static int hisi_sec_alloc_req_id(struct hisi_sec_req *req,
@@ -324,19 +322,16 @@ static enum hrtimer_restart hrtimer_handler(struct hrtimer *timer)
return HRTIMER_RESTART;
}
-static int hisi_sec_create_qp_ctx(struct hisi_qm *qm, struct hisi_sec_ctx *ctx,
- int qp_ctx_id, int alg_type, int req_type)
+static int hisi_sec_create_qp_ctx(struct hisi_sec_ctx *ctx,
+ int qp_ctx_id, int req_type)
{
- struct hisi_qp *qp;
struct hisi_sec_qp_ctx *qp_ctx;
struct device *dev = ctx->dev;
+ struct hisi_qp *qp;
int ret;
- qp = hisi_qm_create_qp(qm, alg_type);
- if (IS_ERR(qp))
- return PTR_ERR(qp);
-
qp_ctx = &ctx->qp_ctx[qp_ctx_id];
+ qp = ctx->qps[qp_ctx_id];
qp->req_type = req_type;
qp->qp_ctx = qp_ctx;
#ifdef SEC_ASYNC
@@ -353,10 +348,8 @@ static int hisi_sec_create_qp_ctx(struct hisi_qm *qm, struct hisi_sec_ctx *ctx,
qp_ctx->req_bitmap = kcalloc(BITS_TO_LONGS(QM_Q_DEPTH), sizeof(long),
GFP_ATOMIC);
- if (!qp_ctx->req_bitmap) {
- ret = -ENOMEM;
- goto err_qm_release_qp;
- }
+ if (!qp_ctx->req_bitmap)
+ return -ENOMEM;
qp_ctx->req_list = kcalloc(QM_Q_DEPTH, sizeof(void *), GFP_ATOMIC);
if (!qp_ctx->req_list) {
@@ -407,8 +400,7 @@ static int hisi_sec_create_qp_ctx(struct hisi_qm *qm, struct hisi_sec_ctx *ctx,
kfree(qp_ctx->req_list);
err_free_req_bitmap:
kfree(qp_ctx->req_bitmap);
-err_qm_release_qp:
- hisi_qm_release_qp(qp);
+
return ret;
}
@@ -424,7 +416,6 @@ static void hisi_sec_release_qp_ctx(struct hisi_sec_ctx *ctx,
kfree(qp_ctx->req_bitmap);
kfree(qp_ctx->req_list);
kfree(qp_ctx->sqe_list);
- hisi_qm_release_qp(qp_ctx->qp);
}
static int __hisi_sec_ctx_init(struct hisi_sec_ctx *ctx, int qlen)
@@ -465,22 +456,22 @@ static void hisi_sec_get_fusion_param(struct hisi_sec_ctx *ctx,
static int hisi_sec_cipher_ctx_init(struct crypto_skcipher *tfm)
{
struct hisi_sec_ctx *ctx = crypto_skcipher_ctx(tfm);
- struct hisi_qm *qm;
struct hisi_sec_cipher_ctx *c_ctx;
struct hisi_sec *sec;
int i, ret;
crypto_skcipher_set_reqsize(tfm, sizeof(struct hisi_sec_req));
- sec = find_sec_device(cpu_to_node(smp_processor_id()));
- if (!sec) {
- pr_err("failed to find a proper sec device!\n");
+ ctx->qps = sec_create_qps();
+ if (!ctx->qps) {
+ pr_err("Can not create sec qps!\n");
return -ENODEV;
}
+
+ sec = container_of(ctx->qps[0]->qm, struct hisi_sec, qm);
ctx->sec = sec;
- qm = &sec->qm;
- ctx->dev = &qm->pdev->dev;
+ ctx->dev = &sec->qm.pdev->dev;
ctx->q_num = sec->ctx_q_num;
@@ -495,7 +486,7 @@ static int hisi_sec_cipher_ctx_init(struct crypto_skcipher *tfm)
hisi_sec_get_fusion_param(ctx, sec);
for (i = 0; i < ctx->q_num; i++) {
- ret = hisi_sec_create_qp_ctx(qm, ctx, i, 0, 0);
+ ret = hisi_sec_create_qp_ctx(ctx, i, 0);
if (ret)
goto err_sec_release_qp_ctx;
}
@@ -515,6 +506,7 @@ static int hisi_sec_cipher_ctx_init(struct crypto_skcipher *tfm)
for (i = i - 1; i >= 0; i--)
hisi_sec_release_qp_ctx(ctx, &ctx->qp_ctx[i]);
+ sec_destroy_qps(ctx->qps, sec->ctx_q_num);
kfree(ctx->qp_ctx);
return ret;
}
@@ -540,11 +532,8 @@ static void hisi_sec_cipher_ctx_exit(struct crypto_skcipher *tfm)
for (i = 0; i < ctx->q_num; i++)
hisi_sec_release_qp_ctx(ctx, &ctx->qp_ctx[i]);
+ sec_destroy_qps(ctx->qps, ctx->q_num);
kfree(ctx->qp_ctx);
-
- mutex_lock(ctx->sec->hisi_sec_list_lock);
- ctx->sec->q_ref -= ctx->sec->ctx_q_num;
- mutex_unlock(ctx->sec->hisi_sec_list_lock);
}
static int hisi_sec_skcipher_get_res(struct hisi_sec_ctx *ctx,
@@ -658,8 +647,6 @@ static void hisi_sec_req_cb(struct hisi_qp *qp, void *resp)
dfx = &req->ctx->sec->sec_dfx;
- sec_update_iv(req, req->c_req.sk_req->iv);
-
req->ctx->req_op->buf_unmap(req->ctx, req);
req->ctx->req_op->callback(req->ctx, req);
@@ -1497,20 +1484,28 @@ static int sec_skcipher_decrypt(struct skcipher_request *sk_req)
int hisi_sec_register_to_crypto(int fusion_limit)
{
- if (fusion_limit == 1)
- return crypto_register_skciphers(sec_normal_algs,
- ARRAY_SIZE(sec_normal_algs));
- else
- return crypto_register_skciphers(sec_fusion_algs,
- ARRAY_SIZE(sec_fusion_algs));
+ /* To avoid repeat register */
+ if (atomic_add_return(1, &sec_active_devs) == 1) {
+ if (fusion_limit == 1)
+ return crypto_register_skciphers(sec_normal_algs,
+ ARRAY_SIZE(sec_normal_algs));
+ else
+ return crypto_register_skciphers(sec_fusion_algs,
+ ARRAY_SIZE(sec_fusion_algs));
+ }
+
+ return 0;
}
void hisi_sec_unregister_from_crypto(int fusion_limit)
{
- if (fusion_limit == 1)
- crypto_unregister_skciphers(sec_normal_algs,
- ARRAY_SIZE(sec_normal_algs));
- else
- crypto_unregister_skciphers(sec_fusion_algs,
- ARRAY_SIZE(sec_fusion_algs));
+ if (atomic_sub_return(1, &sec_active_devs) == 0) {
+ if (fusion_limit == 1)
+ crypto_unregister_skciphers(sec_normal_algs,
+ ARRAY_SIZE(sec_normal_algs));
+ else
+ crypto_unregister_skciphers(sec_fusion_algs,
+ ARRAY_SIZE(sec_fusion_algs));
+ }
}
+
diff --git a/drivers/crypto/hisilicon/sec2/sec_main.c b/drivers/crypto/hisilicon/sec2/sec_main.c
index ba5c478..b4e5d57f 100644
--- a/drivers/crypto/hisilicon/sec2/sec_main.c
+++ b/drivers/crypto/hisilicon/sec2/sec_main.c
@@ -23,21 +23,24 @@
#include "sec.h"
#include "sec_crypto.h"
-#define SEC_VF_NUM 63
#define SEC_QUEUE_NUM_V1 4096
#define SEC_QUEUE_NUM_V2 1024
-#define SEC_PCI_DEVICE_ID_PF 0xa255
-#define SEC_PCI_DEVICE_ID_VF 0xa256
+#define SEC_PF_PCI_DEVICE_ID 0xa255
+#define SEC_VF_PCI_DEVICE_ID 0xa256
-#define SEC_COMMON_REG_OFF 0x1000
+#define SEC_SQE_SIZE 128
+#define SEC_SQ_SIZE (SEC_SQE_SIZE * QM_Q_DEPTH)
+#define SEC_PF_DEF_Q_NUM 64
+#define SEC_PF_DEF_Q_BASE 0
+#define SEC_CTX_Q_NUM_DEF 24
+#define SEC_CTX_Q_NUM_MAX 32
-#define SEC_MASTER_GLOBAL_CTRL 0x300000
-#define SEC_MASTER_GLOBAL_CTRL_SHUTDOWN 0x1
-#define SEC_MASTER_TRANS_RETURN 0x300150
-#define SEC_MASTER_TRANS_RETURN_RW 0x3
#define SEC_AM_CFG_SIG_PORT_MAX_TRANS 0x300014
#define SEC_SINGLE_PORT_MAX_TRANS 0x2060
-
+#define SEC_CTRL_CNT_CLR_CE 0x301120
+#define SEC_CTRL_CNT_CLR_CE_BIT BIT(0)
+#define SEC_ENGINE_PF_CFG_OFF 0x300000
+#define SEC_ACC_COMMON_REG_OFF 0x1000
#define SEC_CORE_INT_SOURCE 0x301010
#define SEC_CORE_INT_MASK 0x301000
#define SEC_CORE_INT_STATUS 0x301008
@@ -45,41 +48,17 @@
#define SEC_CORE_ECC_INFO 0x301C14
#define SEC_ECC_NUM(err_val) (((err_val) >> 16) & 0xFFFF)
#define SEC_ECC_ADDR(err_val) ((err_val) & 0xFFFF)
-
#define SEC_CORE_INT_DISABLE 0x0
#define SEC_CORE_INT_ENABLE 0x1ff
-#define SEC_HW_ERROR_IRQ_ENABLE 1
-#define SEC_HW_ERROR_IRQ_DISABLE 0
-
-#define SEC_BD_ERR_CHK_EN0 0xEFFFFFFF
-#define SEC_BD_ERR_CHK_EN1 0x7FFFF7FD
-#define SEC_BD_ERR_CHK_EN3 0xFFFFBFFF
-#define SEC_BD_ERR_CHK_EN_REG0 0x0380
-#define SEC_BD_ERR_CHK_EN_REG1 0x0384
-#define SEC_BD_ERR_CHK_EN_REG3 0x038c
-
-#define SEC_SQE_SIZE 128
-#define SEC_SQ_SIZE (SEC_SQE_SIZE * QM_Q_DEPTH)
-#define SEC_PF_DEF_Q_NUM 64
-#define SEC_PF_DEF_Q_BASE 0
-
-#define SEC_CTRL_CNT_CLR_CE 0x301120
-#define SEC_CTRL_CNT_CLR_CE_BIT BIT(0)
-
-#define SEC_ENGINE_PF_CFG_OFF 0x300000
-#define SEC_ACC_COMMON_REG_OFF 0x1000
+#define SEC_CORE_INT_CLEAR 0x1ff
-#define SEC_RAS_CE_REG 0x50
-#define SEC_RAS_FE_REG 0x54
-#define SEC_RAS_NFE_REG 0x58
+#define SEC_RAS_CE_REG 0x301050
+#define SEC_RAS_FE_REG 0x301054
+#define SEC_RAS_NFE_REG 0x301058
#define SEC_RAS_CE_ENB_MSK 0x88
#define SEC_RAS_FE_ENB_MSK 0x0
#define SEC_RAS_NFE_ENB_MSK 0x177
#define SEC_RAS_DISABLE 0x0
-
-#define SEC_SAA_EN_REG 0x270
-#define SEC_SAA_EN 0x17F
-
#define SEC_MEM_START_INIT_REG 0x0100
#define SEC_MEM_INIT_DONE_REG 0x0104
@@ -88,114 +67,39 @@
#define SEC_CLK_GATE_DISABLE (~BIT(3))
#define SEC_AXI_SHUTDOWN_ENABLE BIT(12)
#define SEC_AXI_SHUTDOWN_DISABLE 0xFFFFEFFF
-#define SEC_WR_MSI_PORT 0xFFFE
+#define SEC_WR_MSI_PORT BIT(0)
#define SEC_INTERFACE_USER_CTRL0_REG 0x0220
#define SEC_INTERFACE_USER_CTRL1_REG 0x0224
+#define SEC_SAA_EN_REG 0x270
+#define SEC_SAA_EN 0x17F
+#define SEC_BD_ERR_CHK_EN_REG0 0x0380
+#define SEC_BD_ERR_CHK_EN_REG1 0x0384
+#define SEC_BD_ERR_CHK_EN_REG3 0x038c
+#define SEC_BD_ERR_CHK_EN0 0xEFFFFFFF
+#define SEC_BD_ERR_CHK_EN1 0x7FFFF7FD
+#define SEC_BD_ERR_CHK_EN3 0xFFFFBFFF
#define SEC_USER0_SMMU_NORMAL (BIT(23) | BIT(15))
#define SEC_USER1_SMMU_NORMAL (BIT(31) | BIT(23) | BIT(15) | BIT(7))
#define SEC_DELAY_10_US 10
#define SEC_POLL_TIMEOUT_US 1000
-#define SEC_WAIT_DELAY 1000
-
#define SEC_DBGFS_VAL_MAX_LEN 20
-#define SEC_CHAIN_ABN_LEN 128UL
-#define SEC_ENABLE 1
-#define SEC_DISABLE 0
-#define SEC_RESET_WAIT_TIMEOUT 400
-#define SEC_PCI_COMMAND_INVALID 0xFFFFFFFF
-
-#define FORMAT_DECIMAL 10
-#define FROZEN_RANGE_MIN 10
-#define FROZEN_RANGE_MAX 20
-
-static const char sec_name[] = "hisi_sec2";
-static struct dentry *sec_debugfs_root;
-static u32 pf_q_num = SEC_PF_DEF_Q_NUM;
-static struct workqueue_struct *sec_wq;
-
-LIST_HEAD(hisi_sec_list);
-DEFINE_MUTEX(hisi_sec_list_lock);
-
-struct hisi_sec_resource {
- struct hisi_sec *sec;
- int distance;
- struct list_head list;
-};
-
-static void free_list(struct list_head *head)
-{
- struct hisi_sec_resource *res, *tmp;
-
- list_for_each_entry_safe(res, tmp, head, list) {
- list_del(&res->list);
- kfree(res);
- }
-}
-
-struct hisi_sec *find_sec_device(int node)
-{
- struct hisi_sec *ret = NULL;
-#ifdef CONFIG_NUMA
- struct hisi_sec_resource *res, *tmp;
- struct hisi_sec *hisi_sec;
- struct list_head *n;
- struct device *dev;
- LIST_HEAD(head);
-
- mutex_lock(&hisi_sec_list_lock);
-
- list_for_each_entry(hisi_sec, &hisi_sec_list, list) {
- res = kzalloc(sizeof(*res), GFP_KERNEL);
- if (!res)
- goto err;
-
- dev = &hisi_sec->qm.pdev->dev;
- res->sec = hisi_sec;
- res->distance = node_distance(dev->numa_node, node);
-
- n = &head;
- list_for_each_entry(tmp, &head, list) {
- if (res->distance < tmp->distance) {
- n = &tmp->list;
- break;
- }
- }
- list_add_tail(&res->list, n);
- }
-
- list_for_each_entry(tmp, &head, list) {
- if (tmp->sec->q_ref + tmp->sec->ctx_q_num <= pf_q_num) {
- tmp->sec->q_ref += tmp->sec->ctx_q_num;
- ret = tmp->sec;
- break;
- }
- }
-
- free_list(&head);
-#else
- mutex_lock(&hisi_sec_list_lock);
-
- ret = list_first_entry(&hisi_sec_list, struct hisi_sec, list);
-#endif
- mutex_unlock(&hisi_sec_list_lock);
-
- return ret;
-
-err:
- free_list(&head);
- mutex_unlock(&hisi_sec_list_lock);
- return NULL;
-}
+#define SEC_ADDR(qm, offset) ((qm)->io_base + (offset) + \
+ SEC_ENGINE_PF_CFG_OFF + SEC_ACC_COMMON_REG_OFF)
struct hisi_sec_hw_error {
u32 int_msk;
const char *msg;
};
+static const char sec_name[] = "hisi_sec2";
+static struct dentry *sec_debugfs_root;
+static struct hisi_qm_list sec_devices;
+static struct workqueue_struct *sec_wq;
+
static const struct hisi_sec_hw_error sec_hw_error[] = {
{.int_msk = BIT(0), .msg = "sec_axi_rresp_err_rint"},
{.int_msk = BIT(1), .msg = "sec_axi_bresp_err_rint"},
@@ -233,9 +137,7 @@ struct ctrl_debug_file {
* Just relevant for PF.
*/
struct hisi_sec_ctrl {
- u32 num_vfs;
struct hisi_sec *hisi_sec;
- struct dentry *debug_root;
struct ctrl_debug_file files[SEC_DEBUG_FILE_NUM];
};
@@ -263,94 +165,104 @@ struct hisi_sec_ctrl {
{"SEC_BD_SAA8 ", 0x301C40},
};
-static int pf_q_num_set(const char *val, const struct kernel_param *kp)
+static int sec_ctx_q_num_set(const char *val, const struct kernel_param *kp)
{
- struct pci_dev *pdev = pci_get_device(PCI_VENDOR_ID_HUAWEI,
- SEC_PCI_DEVICE_ID_PF, NULL);
- u32 n, q_num;
- u8 rev_id;
+ u32 ctx_q_num;
int ret;
if (!val)
return -EINVAL;
- if (unlikely(!pdev)) {
- q_num = min_t(u32, SEC_QUEUE_NUM_V1, SEC_QUEUE_NUM_V2);
- pr_info
- ("No device found currently, suppose queue number is %d\n",
- q_num);
- } else {
- rev_id = pdev->revision;
- switch (rev_id) {
- case QM_HW_V1:
- q_num = SEC_QUEUE_NUM_V1;
- break;
- case QM_HW_V2:
- q_num = SEC_QUEUE_NUM_V2;
- break;
- default:
- return -EINVAL;
- }
- }
+ ret = kstrtou32(val, 10, &ctx_q_num);
+ if (ret)
+ return -EINVAL;
- ret = kstrtou32(val, 10, &n);
- if (ret != 0 || n > q_num)
+ if (!ctx_q_num || ctx_q_num > SEC_CTX_Q_NUM_MAX || ctx_q_num & 0x1) {
+ pr_err("ctx queue num[%u] is invalid!\n", ctx_q_num);
return -EINVAL;
+ }
return param_set_int(val, kp);
}
-static const struct kernel_param_ops pf_q_num_ops = {
- .set = pf_q_num_set,
+static const struct kernel_param_ops sec_ctx_q_num_ops = {
+ .set = sec_ctx_q_num_set,
.get = param_get_int,
};
+static u32 ctx_q_num = SEC_CTX_Q_NUM_DEF;
+module_param_cb(ctx_q_num, &sec_ctx_q_num_ops, &ctx_q_num, 0444);
+MODULE_PARM_DESC(ctx_q_num, "Queue num in ctx (24 default, 2, 4, ..., 32)");
-static int uacce_mode_set(const char *val, const struct kernel_param *kp)
+void sec_destroy_qps(struct hisi_qp **qps, int qp_num)
+{
+ hisi_qm_free_qps(qps, qp_num);
+ kfree(qps);
+}
+
+struct hisi_qp **sec_create_qps(void)
{
- u32 n;
+ int node = cpu_to_node(smp_processor_id());
+ u32 ctx_num = ctx_q_num;
+ struct hisi_qp **qps;
int ret;
- if (!val)
- return -EINVAL;
+ qps = kcalloc(ctx_num, sizeof(struct hisi_qp *), GFP_KERNEL);
+ if (!qps)
+ return NULL;
- ret = kstrtou32(val, FORMAT_DECIMAL, &n);
- if (ret != 0 || (n != UACCE_MODE_NOIOMMU && n != UACCE_MODE_NOUACCE))
- return -EINVAL;
+ ret = hisi_qm_alloc_qps_node(node, &sec_devices, qps, ctx_num, 0);
+ if (!ret)
+ return qps;
- return param_set_int(val, kp);
+ kfree(qps);
+ return NULL;
+}
+
+#ifdef CONFIG_CRYPTO_QM_UACCE
+static int uacce_mode_set(const char *val, const struct kernel_param *kp)
+{
+ return mode_set(val, kp);
}
-static const struct kernel_param_ops uacce_mode_ops = {
+static const struct kernel_param_ops sec_uacce_mode_ops = {
.set = uacce_mode_set,
.get = param_get_int,
};
-static int ctx_q_num_set(const char *val, const struct kernel_param *kp)
-{
- u32 ctx_q_num;
- int ret;
+static u32 uacce_mode = UACCE_MODE_NOUACCE;
+module_param_cb(uacce_mode, &sec_uacce_mode_ops, &uacce_mode, 0444);
+MODULE_PARM_DESC(uacce_mode, "Mode of UACCE can be 0(default), 2");
+#endif
- if (!val)
- return -EINVAL;
+static int pf_q_num_set(const char *val, const struct kernel_param *kp)
+{
+ return q_num_set(val, kp, SEC_PF_PCI_DEVICE_ID);
+}
- ret = kstrtou32(val, FORMAT_DECIMAL, &ctx_q_num);
- if (ret)
- return -EINVAL;
+static const struct kernel_param_ops sec_pf_q_num_ops = {
+ .set = pf_q_num_set,
+ .get = param_get_int,
+};
- if (ctx_q_num == 0 || ctx_q_num > QM_Q_DEPTH || ctx_q_num % 2 == 1) {
- pr_err("ctx_q_num[%u] is invalid\n", ctx_q_num);
- return -EINVAL;
- }
+static u32 pf_q_num = SEC_PF_DEF_Q_NUM;
+module_param_cb(pf_q_num, &sec_pf_q_num_ops, &pf_q_num, 0444);
+MODULE_PARM_DESC(pf_q_num, "Number of queues in PF(v1 1-4096, v2 1-1024)");
- return param_set_int(val, kp);
+static int vfs_num_set(const char *val, const struct kernel_param *kp)
+{
+ return vf_num_set(val, kp);
}
-static const struct kernel_param_ops ctx_q_num_ops = {
- .set = ctx_q_num_set,
+static const struct kernel_param_ops vfs_num_ops = {
+ .set = vfs_num_set,
.get = param_get_int,
};
-static int fusion_limit_set(const char *val, const struct kernel_param *kp)
+static u32 vfs_num;
+module_param_cb(vfs_num, &vfs_num_ops, &vfs_num, 0444);
+MODULE_PARM_DESC(vfs_num, "Number of VFs to enable(1-63), 0(default)");
+
+static int sec_fusion_limit_set(const char *val, const struct kernel_param *kp)
{
u32 fusion_limit;
int ret;
@@ -358,11 +270,11 @@ static int fusion_limit_set(const char *val, const struct kernel_param *kp)
if (!val)
return -EINVAL;
- ret = kstrtou32(val, FORMAT_DECIMAL, &fusion_limit);
+ ret = kstrtou32(val, 10, &fusion_limit);
if (ret)
return ret;
- if (fusion_limit == 0 || fusion_limit > FUSION_LIMIT_MAX) {
+ if (!fusion_limit || fusion_limit > FUSION_LIMIT_MAX) {
pr_err("fusion_limit[%u] is't at range(0, %d)", fusion_limit,
FUSION_LIMIT_MAX);
return -EINVAL;
@@ -371,12 +283,17 @@ static int fusion_limit_set(const char *val, const struct kernel_param *kp)
return param_set_int(val, kp);
}
-static const struct kernel_param_ops fusion_limit_ops = {
- .set = fusion_limit_set,
+static const struct kernel_param_ops sec_fusion_limit_ops = {
+ .set = sec_fusion_limit_set,
.get = param_get_int,
};
+static u32 fusion_limit = FUSION_LIMIT_DEF;
-static int fusion_tmout_nsec_set(const char *val, const struct kernel_param *kp)
+module_param_cb(fusion_limit, &sec_fusion_limit_ops, &fusion_limit, 0444);
+MODULE_PARM_DESC(fusion_limit, "(1, acc_sgl_sge_nr of hisilicon QM)");
+
+static int sec_fusion_tmout_ns_set(const char *val,
+ const struct kernel_param *kp)
{
u32 fusion_tmout_nsec;
int ret;
@@ -384,7 +301,7 @@ static int fusion_tmout_nsec_set(const char *val, const struct kernel_param *kp)
if (!val)
return -EINVAL;
- ret = kstrtou32(val, FORMAT_DECIMAL, &fusion_tmout_nsec);
+ ret = kstrtou32(val, 10, &fusion_tmout_nsec);
if (ret)
return ret;
@@ -396,53 +313,22 @@ static int fusion_tmout_nsec_set(const char *val, const struct kernel_param *kp)
return param_set_int(val, kp);
}
-static const struct kernel_param_ops fusion_tmout_nsec_ops = {
- .set = fusion_tmout_nsec_set,
+static const struct kernel_param_ops sec_fusion_time_ops = {
+ .set = sec_fusion_tmout_ns_set,
.get = param_get_int,
};
-
-module_param_cb(pf_q_num, &pf_q_num_ops, &pf_q_num, 0444);
-MODULE_PARM_DESC(pf_q_num, "Number of queues in PF(v1 0-4096, v2 0-1024)");
-
-static int uacce_mode = UACCE_MODE_NOUACCE;
-module_param_cb(uacce_mode, &uacce_mode_ops, &uacce_mode, 0444);
-MODULE_PARM_DESC(uacce_mode, "Mode of UACCE can be 0(default), 2");
-
-static int ctx_q_num = CTX_Q_NUM_DEF;
-module_param_cb(ctx_q_num, &ctx_q_num_ops, &ctx_q_num, 0444);
-MODULE_PARM_DESC(ctx_q_num, "Number of queue in ctx (2, 4, 6, ..., 1024)");
-
-static int fusion_limit = FUSION_LIMIT_DEF;
-module_param_cb(fusion_limit, &fusion_limit_ops, &fusion_limit, 0444);
-MODULE_PARM_DESC(fusion_limit, "(1, acc_sgl_sge_nr)");
-
-static int fusion_tmout_nsec = FUSION_TMOUT_NSEC_DEF;
-module_param_cb(fusion_tmout_nsec, &fusion_tmout_nsec_ops, &fusion_tmout_nsec,
- 0444);
-MODULE_PARM_DESC(fusion_tmout_nsec, "(0, NSEC_PER_SEC)");
+static u32 fusion_time = FUSION_TMOUT_NSEC_DEF; /* ns */
+module_param_cb(fusion_time, &sec_fusion_time_ops, &fusion_time, 0444);
+MODULE_PARM_DESC(fusion_time, "(0, NSEC_PER_SEC)");
static const struct pci_device_id hisi_sec_dev_ids[] = {
- { PCI_DEVICE(PCI_VENDOR_ID_HUAWEI, SEC_PCI_DEVICE_ID_PF) },
- { PCI_DEVICE(PCI_VENDOR_ID_HUAWEI, SEC_PCI_DEVICE_ID_VF) },
+ { PCI_DEVICE(PCI_VENDOR_ID_HUAWEI, SEC_PF_PCI_DEVICE_ID) },
+ { PCI_DEVICE(PCI_VENDOR_ID_HUAWEI, SEC_VF_PCI_DEVICE_ID) },
{ 0, }
};
MODULE_DEVICE_TABLE(pci, hisi_sec_dev_ids);
-static inline void hisi_sec_add_to_list(struct hisi_sec *hisi_sec)
-{
- mutex_lock(&hisi_sec_list_lock);
- list_add_tail(&hisi_sec->list, &hisi_sec_list);
- mutex_unlock(&hisi_sec_list_lock);
-}
-
-static inline void hisi_sec_remove_from_list(struct hisi_sec *hisi_sec)
-{
- mutex_lock(&hisi_sec_list_lock);
- list_del(&hisi_sec->list);
- mutex_unlock(&hisi_sec_list_lock);
-}
-
-u8 sec_get_endian(struct hisi_sec *hisi_sec)
+static u8 sec_get_endian(struct hisi_qm *qm)
{
u32 reg;
@@ -450,83 +336,83 @@ u8 sec_get_endian(struct hisi_sec *hisi_sec)
* As for VF, it is a wrong way to get endian setting by
* reading a register of the engine
*/
- if (hisi_sec->qm.pdev->is_virtfn) {
- dev_err_ratelimited(&hisi_sec->qm.pdev->dev,
- "error! shouldn't access a register in VF\n");
+ if (qm->pdev->is_virtfn) {
+ dev_err_ratelimited(&qm->pdev->dev,
+ "cannot access a register in VF!\n");
return SEC_LE;
}
- reg = readl_relaxed(hisi_sec->qm.io_base + SEC_ENGINE_PF_CFG_OFF +
+ reg = readl_relaxed(qm->io_base + SEC_ENGINE_PF_CFG_OFF +
SEC_ACC_COMMON_REG_OFF + SEC_CONTROL_REG);
+
/* BD little endian mode */
if (!(reg & BIT(0)))
return SEC_LE;
+
/* BD 32-bits big endian mode */
else if (!(reg & BIT(1)))
return SEC_32BE;
+
/* BD 64-bits big endian mode */
else
return SEC_64BE;
}
-static int sec_engine_init(struct hisi_sec *hisi_sec)
+static int sec_engine_init(struct hisi_qm *qm)
{
int ret;
u32 reg;
- struct hisi_qm *qm = &hisi_sec->qm;
- void *base = qm->io_base + SEC_ENGINE_PF_CFG_OFF +
- SEC_ACC_COMMON_REG_OFF;
-
- /* config sec single port max outstanding */
- writel(SEC_SINGLE_PORT_MAX_TRANS,
- qm->io_base + SEC_AM_CFG_SIG_PORT_MAX_TRANS);
-
- /* config sec saa enable */
- writel(SEC_SAA_EN, base + SEC_SAA_EN_REG);
/* disable clock gate control */
- reg = readl_relaxed(base + SEC_CONTROL_REG);
+ reg = readl_relaxed(SEC_ADDR(qm, SEC_CONTROL_REG));
reg &= SEC_CLK_GATE_DISABLE;
- writel(reg, base + SEC_CONTROL_REG);
+ writel_relaxed(reg, SEC_ADDR(qm, SEC_CONTROL_REG));
- writel(0x1, base + SEC_MEM_START_INIT_REG);
- ret = readl_relaxed_poll_timeout(base +
- SEC_MEM_INIT_DONE_REG, reg, reg & 0x1,
- SEC_DELAY_10_US, SEC_POLL_TIMEOUT_US);
+ writel_relaxed(0x1, SEC_ADDR(qm, SEC_MEM_START_INIT_REG));
+
+ ret = readl_relaxed_poll_timeout(SEC_ADDR(qm, SEC_MEM_INIT_DONE_REG),
+ reg, reg & 0x1, SEC_DELAY_10_US,
+ SEC_POLL_TIMEOUT_US);
if (ret) {
- dev_err(&qm->pdev->dev, "fail to init sec mem\n");
+ pci_err(qm->pdev, "fail to init sec mem\n");
return ret;
}
- reg = readl_relaxed(base + SEC_CONTROL_REG);
+ reg = readl_relaxed(SEC_ADDR(qm, SEC_CONTROL_REG));
reg |= (0x1 << SEC_TRNG_EN_SHIFT);
- writel(reg, base + SEC_CONTROL_REG);
+ writel_relaxed(reg, SEC_ADDR(qm, SEC_CONTROL_REG));
- reg = readl_relaxed(base + SEC_INTERFACE_USER_CTRL0_REG);
+ reg = readl_relaxed(SEC_ADDR(qm, SEC_INTERFACE_USER_CTRL0_REG));
reg |= SEC_USER0_SMMU_NORMAL;
- writel(reg, base + SEC_INTERFACE_USER_CTRL0_REG);
+ writel_relaxed(reg, SEC_ADDR(qm, SEC_INTERFACE_USER_CTRL0_REG));
- reg = readl_relaxed(base + SEC_INTERFACE_USER_CTRL1_REG);
+ reg = readl_relaxed(SEC_ADDR(qm, SEC_INTERFACE_USER_CTRL1_REG));
reg |= SEC_USER1_SMMU_NORMAL;
- writel(reg, base + SEC_INTERFACE_USER_CTRL1_REG);
+ writel_relaxed(reg, SEC_ADDR(qm, SEC_INTERFACE_USER_CTRL1_REG));
+
+ writel(SEC_SINGLE_PORT_MAX_TRANS,
+ qm->io_base + SEC_AM_CFG_SIG_PORT_MAX_TRANS);
+
+ writel(SEC_SAA_EN, SEC_ADDR(qm, SEC_SAA_EN_REG));
/* Enable sm4 extra mode, as ctr/ecb */
- writel(SEC_BD_ERR_CHK_EN0, base + SEC_BD_ERR_CHK_EN_REG0);
+ writel_relaxed(SEC_BD_ERR_CHK_EN0,
+ SEC_ADDR(qm, SEC_BD_ERR_CHK_EN_REG0));
/* Enable sm4 xts mode multiple iv */
- writel(SEC_BD_ERR_CHK_EN1, base + SEC_BD_ERR_CHK_EN_REG1);
- writel(SEC_BD_ERR_CHK_EN3, base + SEC_BD_ERR_CHK_EN_REG3);
+ writel_relaxed(SEC_BD_ERR_CHK_EN1,
+ SEC_ADDR(qm, SEC_BD_ERR_CHK_EN_REG1));
+ writel_relaxed(SEC_BD_ERR_CHK_EN3,
+ SEC_ADDR(qm, SEC_BD_ERR_CHK_EN_REG3));
/* config endian */
- reg = readl_relaxed(base + SEC_CONTROL_REG);
- reg |= sec_get_endian(hisi_sec);
- writel(reg, base + SEC_CONTROL_REG);
+ reg = readl_relaxed(SEC_ADDR(qm, SEC_CONTROL_REG));
+ reg |= sec_get_endian(qm);
+ writel_relaxed(reg, SEC_ADDR(qm, SEC_CONTROL_REG));
return 0;
}
-static void hisi_sec_set_user_domain_and_cache(struct hisi_sec *hisi_sec)
+static int sec_set_user_domain_and_cache(struct hisi_qm *qm)
{
- struct hisi_qm *qm = &hisi_sec->qm;
-
/* qm user domain */
writel(AXUSER_BASE, qm->io_base + QM_ARUSER_M_CFG_1);
writel(ARUSER_M_CFG_ENABLE, qm->io_base + QM_ARUSER_M_CFG_ENABLE);
@@ -540,22 +426,18 @@ static void hisi_sec_set_user_domain_and_cache(struct hisi_sec *hisi_sec)
/* disable FLR triggered by BME(bus master enable) */
writel(PEH_AXUSER_CFG, qm->io_base + QM_PEH_AXUSER_CFG);
- writel(PEH_AXUSER_CFG_ENABLE, qm->io_base +
- QM_PEH_AXUSER_CFG_ENABLE);
+ writel(PEH_AXUSER_CFG_ENABLE, qm->io_base + QM_PEH_AXUSER_CFG_ENABLE);
/* enable sqc,cqc writeback */
writel(SQC_CACHE_ENABLE | CQC_CACHE_ENABLE | SQC_CACHE_WB_ENABLE |
CQC_CACHE_WB_ENABLE | FIELD_PREP(SQC_CACHE_WB_THRD, 1) |
FIELD_PREP(CQC_CACHE_WB_THRD, 1), qm->io_base + QM_CACHE_CTL);
- if (sec_engine_init(hisi_sec))
- dev_err(&qm->pdev->dev, "sec_engine_init failed");
+ return sec_engine_init(qm);
}
-static void hisi_sec_debug_regs_clear(struct hisi_sec *hisi_sec)
+static void sec_debug_regs_clear(struct hisi_qm *qm)
{
- struct hisi_qm *qm = &hisi_sec->qm;
-
/* clear current_qm */
writel(0x0, qm->io_base + QM_DFX_MB_CNT_VF);
writel(0x0, qm->io_base + QM_DFX_DB_CNT_VF);
@@ -566,50 +448,53 @@ static void hisi_sec_debug_regs_clear(struct hisi_sec *hisi_sec)
hisi_qm_debug_regs_clear(qm);
}
-static void hisi_sec_hw_error_set_state(struct hisi_sec *hisi_sec, bool state)
+static void sec_hw_error_enable(struct hisi_qm *qm)
{
- struct hisi_qm *qm = &hisi_sec->qm;
- void *base = qm->io_base + SEC_ENGINE_PF_CFG_OFF +
- SEC_ACC_COMMON_REG_OFF;
u32 val;
if (qm->ver == QM_HW_V1) {
writel(SEC_CORE_INT_DISABLE, qm->io_base + SEC_CORE_INT_MASK);
- dev_info(&qm->pdev->dev, "v%d don't support hw error handle\n",
- qm->ver);
+ pci_info(qm->pdev, "V1 not support hw error handle\n");
return;
}
- val = readl(base + SEC_CONTROL_REG);
- if (state) {
- /* clear SEC hw error source if having */
- writel(SEC_CORE_INT_ENABLE,
- hisi_sec->qm.io_base + SEC_CORE_INT_SOURCE);
+ val = readl(SEC_ADDR(qm, SEC_CONTROL_REG));
- /* enable SEC hw error interrupts */
- writel(SEC_CORE_INT_ENABLE, qm->io_base + SEC_CORE_INT_MASK);
+ /* clear SEC hw error source if having */
+ writel(SEC_CORE_INT_CLEAR, qm->io_base + SEC_CORE_INT_SOURCE);
- /* enable RAS int */
- writel(SEC_RAS_CE_ENB_MSK, base + SEC_RAS_CE_REG);
- writel(SEC_RAS_FE_ENB_MSK, base + SEC_RAS_FE_REG);
- writel(SEC_RAS_NFE_ENB_MSK, base + SEC_RAS_NFE_REG);
+ /* enable SEC hw error interrupts */
+ writel(SEC_CORE_INT_ENABLE, qm->io_base + SEC_CORE_INT_MASK);
- /* enable SEC block master OOO when m-bit error occur */
- val = val | SEC_AXI_SHUTDOWN_ENABLE;
- } else {
- /* disable RAS int */
- writel(SEC_RAS_DISABLE, base + SEC_RAS_CE_REG);
- writel(SEC_RAS_DISABLE, base + SEC_RAS_FE_REG);
- writel(SEC_RAS_DISABLE, base + SEC_RAS_NFE_REG);
+ /* enable RAS int */
+ writel(SEC_RAS_CE_ENB_MSK, qm->io_base + SEC_RAS_CE_REG);
+ writel(SEC_RAS_FE_ENB_MSK, qm->io_base + SEC_RAS_FE_REG);
+ writel(SEC_RAS_NFE_ENB_MSK, qm->io_base + SEC_RAS_NFE_REG);
- /* disable SEC hw error interrupts */
- writel(SEC_CORE_INT_DISABLE, qm->io_base + SEC_CORE_INT_MASK);
+ /* enable SEC block master OOO when m-bit error occur */
+ val = val | SEC_AXI_SHUTDOWN_ENABLE;
- /* disable SEC block master OOO when m-bit error occur */
- val = val & SEC_AXI_SHUTDOWN_DISABLE;
- }
+ writel(val, SEC_ADDR(qm, SEC_CONTROL_REG));
+}
- writel(val, base + SEC_CONTROL_REG);
+static void sec_hw_error_disable(struct hisi_qm *qm)
+{
+ u32 val;
+
+ val = readl(SEC_ADDR(qm, SEC_CONTROL_REG));
+
+ /* disable RAS int */
+ writel(SEC_RAS_DISABLE, qm->io_base + SEC_RAS_CE_REG);
+ writel(SEC_RAS_DISABLE, qm->io_base + SEC_RAS_FE_REG);
+ writel(SEC_RAS_DISABLE, qm->io_base + SEC_RAS_NFE_REG);
+
+ /* disable SEC hw error interrupts */
+ writel(SEC_CORE_INT_DISABLE, qm->io_base + SEC_CORE_INT_MASK);
+
+ /* disable SEC block master OOO when m-bit error occur */
+ val = val & SEC_AXI_SHUTDOWN_DISABLE;
+
+ writel(val, SEC_ADDR(qm, SEC_CONTROL_REG));
}
static inline struct hisi_qm *file_to_qm(struct ctrl_debug_file *file)
@@ -629,21 +514,21 @@ static u32 current_qm_read(struct ctrl_debug_file *file)
static int current_qm_write(struct ctrl_debug_file *file, u32 val)
{
struct hisi_qm *qm = file_to_qm(file);
- struct hisi_sec_ctrl *ctrl = file->ctrl;
- u32 tmp, vfq_num;
+ u32 vfq_num;
+ u32 tmp;
- if (val > ctrl->num_vfs)
+ if (val > qm->vfs_num)
return -EINVAL;
/* According PF or VF Dev ID to calculation curr_qm_qp_num and store */
if (val == 0) {
qm->debug.curr_qm_qp_num = qm->qp_num;
} else {
- vfq_num = (qm->ctrl_q_num - qm->qp_num) / ctrl->num_vfs;
- if (val == ctrl->num_vfs) {
+ vfq_num = (qm->ctrl_q_num - qm->qp_num) / qm->vfs_num;
+ if (val == qm->vfs_num) {
qm->debug.curr_qm_qp_num =
qm->ctrl_q_num - qm->qp_num -
- (ctrl->num_vfs - 1) * vfq_num;
+ (qm->vfs_num - 1) * vfq_num;
} else {
qm->debug.curr_qm_qp_num = vfq_num;
}
@@ -668,7 +553,7 @@ static u32 clear_enable_read(struct ctrl_debug_file *file)
struct hisi_qm *qm = file_to_qm(file);
return readl(qm->io_base + SEC_CTRL_CNT_CLR_CE) &
- SEC_CTRL_CNT_CLR_CE_BIT;
+ SEC_CTRL_CNT_CLR_CE_BIT;
}
static int clear_enable_write(struct ctrl_debug_file *file, u32 val)
@@ -676,11 +561,11 @@ static int clear_enable_write(struct ctrl_debug_file *file, u32 val)
struct hisi_qm *qm = file_to_qm(file);
u32 tmp;
- if (val != 1 && val != 0)
+ if (val != 1 && val)
return -EINVAL;
tmp = (readl(qm->io_base + SEC_CTRL_CNT_CLR_CE) &
- ~SEC_CTRL_CNT_CLR_CE_BIT) | val;
+ ~SEC_CTRL_CNT_CLR_CE_BIT) | val;
writel(tmp, qm->io_base + SEC_CTRL_CNT_CLR_CE);
return 0;
@@ -695,6 +580,7 @@ static ssize_t ctrl_debug_read(struct file *filp, char __user *buf,
int ret;
spin_lock_irq(&file->lock);
+
switch (file->index) {
case SEC_CURRENT_QM:
val = current_qm_read(file);
@@ -706,8 +592,10 @@ static ssize_t ctrl_debug_read(struct file *filp, char __user *buf,
spin_unlock_irq(&file->lock);
return -EINVAL;
}
+
spin_unlock_irq(&file->lock);
ret = snprintf(tbuf, SEC_DBGFS_VAL_MAX_LEN, "%u\n", val);
+
return simple_read_from_buffer(buf, count, pos, tbuf, ret);
}
@@ -726,7 +614,7 @@ static ssize_t ctrl_debug_write(struct file *filp, const char __user *buf,
return -ENOSPC;
len = simple_write_to_buffer(tbuf, SEC_DBGFS_VAL_MAX_LEN - 1,
- pos, buf, count);
+ pos, buf, count);
if (len < 0)
return len;
@@ -735,6 +623,7 @@ static ssize_t ctrl_debug_write(struct file *filp, const char __user *buf,
return -EFAULT;
spin_lock_irq(&file->lock);
+
switch (file->index) {
case SEC_CURRENT_QM:
ret = current_qm_write(file, val);
@@ -750,6 +639,7 @@ static ssize_t ctrl_debug_write(struct file *filp, const char __user *buf,
ret = -EINVAL;
goto err_input;
}
+
spin_unlock_irq(&file->lock);
return count;
@@ -766,12 +656,11 @@ static ssize_t ctrl_debug_write(struct file *filp, const char __user *buf,
.write = ctrl_debug_write,
};
-static int hisi_sec_core_debug_init(struct hisi_sec_ctrl *ctrl)
+static int hisi_sec_core_debug_init(struct hisi_qm *qm)
{
- struct hisi_sec *hisi_sec = ctrl->hisi_sec;
- struct hisi_qm *qm = &hisi_sec->qm;
+ struct hisi_sec *sec = container_of(qm, struct hisi_sec, qm);
struct device *dev = &qm->pdev->dev;
- struct hisi_sec_dfx *dfx = &hisi_sec->sec_dfx;
+ struct hisi_sec_dfx *dfx = &sec->sec_dfx;
struct debugfs_regset32 *regset;
struct dentry *tmp_d, *tmp;
char buf[SEC_DBGFS_VAL_MAX_LEN];
@@ -781,7 +670,7 @@ static int hisi_sec_core_debug_init(struct hisi_sec_ctrl *ctrl)
if (ret < 0)
return -ENOENT;
- tmp_d = debugfs_create_dir(buf, ctrl->debug_root);
+ tmp_d = debugfs_create_dir(buf, qm->debug.debug_root);
if (!tmp_d)
return -ENOENT;
@@ -847,29 +736,30 @@ static int hisi_sec_core_debug_init(struct hisi_sec_ctrl *ctrl)
return 0;
}
-static int hisi_sec_ctrl_debug_init(struct hisi_sec_ctrl *ctrl)
+static int hisi_sec_ctrl_debug_init(struct hisi_qm *qm)
{
+ struct hisi_sec *sec = container_of(qm, struct hisi_sec, qm);
struct dentry *tmp;
int i;
for (i = SEC_CURRENT_QM; i < SEC_DEBUG_FILE_NUM; i++) {
- spin_lock_init(&ctrl->files[i].lock);
- ctrl->files[i].ctrl = ctrl;
- ctrl->files[i].index = i;
+ spin_lock_init(&sec->ctrl->files[i].lock);
+ sec->ctrl->files[i].ctrl = sec->ctrl;
+ sec->ctrl->files[i].index = i;
tmp = debugfs_create_file(ctrl_debug_file_name[i], 0600,
- ctrl->debug_root, ctrl->files + i,
+ qm->debug.debug_root,
+ sec->ctrl->files + i,
&ctrl_debug_fops);
if (!tmp)
return -ENOENT;
}
- return hisi_sec_core_debug_init(ctrl);
+ return hisi_sec_core_debug_init(qm);
}
-static int hisi_sec_debugfs_init(struct hisi_sec *hisi_sec)
+static int hisi_sec_debugfs_init(struct hisi_qm *qm)
{
- struct hisi_qm *qm = &hisi_sec->qm;
struct device *dev = &qm->pdev->dev;
struct dentry *dev_d;
int ret;
@@ -883,9 +773,8 @@ static int hisi_sec_debugfs_init(struct hisi_sec *hisi_sec)
if (ret)
goto failed_to_create;
- if (qm->pdev->device == SEC_PCI_DEVICE_ID_PF) {
- hisi_sec->ctrl->debug_root = dev_d;
- ret = hisi_sec_ctrl_debug_init(hisi_sec->ctrl);
+ if (qm->pdev->device == SEC_PF_PCI_DEVICE_ID) {
+ ret = hisi_sec_ctrl_debug_init(qm);
if (ret)
goto failed_to_create;
}
@@ -897,71 +786,62 @@ static int hisi_sec_debugfs_init(struct hisi_sec *hisi_sec)
return ret;
}
-static void hisi_sec_debugfs_exit(struct hisi_sec *hisi_sec)
+static void hisi_sec_debugfs_exit(struct hisi_qm *qm)
{
- struct hisi_qm *qm = &hisi_sec->qm;
-
debugfs_remove_recursive(qm->debug.debug_root);
+
if (qm->fun_type == QM_HW_PF) {
- hisi_sec_debug_regs_clear(hisi_sec);
+ sec_debug_regs_clear(qm);
qm->debug.curr_qm_qp_num = 0;
}
}
-static void hisi_sec_hw_error_init(struct hisi_sec *hisi_sec)
+static void sec_log_hw_error(struct hisi_qm *qm, u32 err_sts)
{
- hisi_qm_hw_error_init(&hisi_sec->qm, QM_BASE_CE,
- QM_BASE_NFE | QM_ACC_DO_TASK_TIMEOUT
- | QM_ACC_WB_NOT_READY_TIMEOUT, 0,
- QM_DB_RANDOM_INVALID);
- hisi_sec_hw_error_set_state(hisi_sec, true);
-}
+ const struct hisi_sec_hw_error *errs = sec_hw_error;
+ struct device *dev = &qm->pdev->dev;
+ u32 err_val;
-static void hisi_sec_open_master_ooo(struct hisi_qm *qm)
-{
- u32 val;
- void *base = qm->io_base + SEC_ENGINE_PF_CFG_OFF +
- SEC_ACC_COMMON_REG_OFF;
+ while (errs->msg) {
+ if (errs->int_msk & err_sts) {
+ dev_err(dev, "%s [error status=0x%x] found\n",
+ errs->msg, errs->int_msk);
- val = readl(base + SEC_CONTROL_REG);
- writel(val & SEC_AXI_SHUTDOWN_DISABLE, base + SEC_CONTROL_REG);
- writel(val | SEC_AXI_SHUTDOWN_ENABLE, base + SEC_CONTROL_REG);
+ if (SEC_CORE_INT_STATUS_M_ECC & errs->int_msk) {
+ err_val = readl(qm->io_base +
+ SEC_CORE_ECC_INFO);
+ dev_err(dev, "multi ecc sram num=0x%x\n",
+ SEC_ECC_NUM(err_val));
+ }
+ }
+ errs++;
+ }
}
-static u32 hisi_sec_get_hw_err_status(struct hisi_qm *qm)
+static u32 sec_get_hw_err_status(struct hisi_qm *qm)
{
return readl(qm->io_base + SEC_CORE_INT_STATUS);
}
-static void hisi_sec_clear_hw_err_status(struct hisi_qm *qm, u32 err_sts)
+static void sec_clear_hw_err_status(struct hisi_qm *qm, u32 err_sts)
{
writel(err_sts, qm->io_base + SEC_CORE_INT_SOURCE);
}
-static void hisi_sec_log_hw_error(struct hisi_qm *qm, u32 err_sts)
+static void sec_open_axi_master_ooo(struct hisi_qm *qm)
{
- const struct hisi_sec_hw_error *err = sec_hw_error;
- struct device *dev = &qm->pdev->dev;
- u32 err_val;
-
- while (err->msg) {
- if (err->int_msk & err_sts)
- dev_err(dev, "%s [error status=0x%x] found\n",
- err->msg, err->int_msk);
- err++;
- }
+ u32 val;
- if (SEC_CORE_INT_STATUS_M_ECC & err_sts) {
- err_val = readl(qm->io_base + SEC_CORE_ECC_INFO);
- dev_err(dev, "hisi-sec multi ecc sram num=0x%x\n",
- SEC_ECC_NUM(err_val));
- }
+ val = readl(SEC_ADDR(qm, SEC_CONTROL_REG));
+ writel(val & SEC_AXI_SHUTDOWN_DISABLE, SEC_ADDR(qm, SEC_CONTROL_REG));
+ writel(val | SEC_AXI_SHUTDOWN_ENABLE, SEC_ADDR(qm, SEC_CONTROL_REG));
}
-static int hisi_sec_pf_probe_init(struct hisi_sec *hisi_sec)
+static int hisi_sec_pf_probe_init(struct hisi_qm *qm)
{
- struct hisi_qm *qm = &hisi_sec->qm;
+ struct hisi_sec *hisi_sec = container_of(qm, struct hisi_sec, qm);
struct hisi_sec_ctrl *ctrl;
+ int ret;
ctrl = devm_kzalloc(&qm->pdev->dev, sizeof(*ctrl), GFP_KERNEL);
if (!ctrl)
@@ -983,59 +863,57 @@ static int hisi_sec_pf_probe_init(struct hisi_sec *hisi_sec)
return -EINVAL;
}
- qm->err_ini.qm_wr_port = SEC_WR_MSI_PORT;
- qm->err_ini.ecc_2bits_mask = SEC_CORE_INT_STATUS_M_ECC;
- qm->err_ini.open_axi_master_ooo = hisi_sec_open_master_ooo;
- qm->err_ini.get_dev_hw_err_status = hisi_sec_get_hw_err_status;
- qm->err_ini.clear_dev_hw_err_status = hisi_sec_clear_hw_err_status;
- qm->err_ini.log_dev_hw_err = hisi_sec_log_hw_error;
- hisi_sec_set_user_domain_and_cache(hisi_sec);
- hisi_sec_hw_error_init(hisi_sec);
+ qm->err_ini.get_dev_hw_err_status = sec_get_hw_err_status;
+ qm->err_ini.clear_dev_hw_err_status = sec_clear_hw_err_status;
+ qm->err_ini.err_info.ecc_2bits_mask = SEC_CORE_INT_STATUS_M_ECC;
+ qm->err_ini.err_info.ce = QM_BASE_CE;
+ qm->err_ini.err_info.nfe = QM_BASE_NFE | QM_ACC_DO_TASK_TIMEOUT |
+ QM_ACC_WB_NOT_READY_TIMEOUT;
+ qm->err_ini.err_info.fe = 0;
+ qm->err_ini.err_info.msi = QM_DB_RANDOM_INVALID;
+ qm->err_ini.err_info.acpi_rst = "SRST";
+ qm->err_ini.hw_err_disable = sec_hw_error_disable;
+ qm->err_ini.hw_err_enable = sec_hw_error_enable;
+ qm->err_ini.set_usr_domain_cache = sec_set_user_domain_and_cache;
+ qm->err_ini.log_dev_hw_err = sec_log_hw_error;
+ qm->err_ini.open_axi_master_ooo = sec_open_axi_master_ooo;
+ qm->err_ini.err_info.msi_wr_port = SEC_WR_MSI_PORT;
+
+ ret = qm->err_ini.set_usr_domain_cache(qm);
+ if (ret)
+ return ret;
+
+ hisi_qm_dev_err_init(qm);
qm->err_ini.open_axi_master_ooo(qm);
- hisi_sec_debug_regs_clear(hisi_sec);
+ sec_debug_regs_clear(qm);
return 0;
}
-static int hisi_sec_qm_init(struct hisi_qm *qm, struct pci_dev *pdev)
+static int hisi_sec_qm_pre_init(struct hisi_qm *qm, struct pci_dev *pdev)
{
- enum qm_hw_ver rev_id;
-
- rev_id = hisi_qm_get_hw_version(pdev);
- if (rev_id == QM_HW_UNKNOWN)
- return -ENODEV;
+ int ret;
+#ifdef CONFIG_CRYPTO_QM_UACCE
+ qm->algs = "sec\ncipher\ndigest\n";
+ qm->uacce_mode = uacce_mode;
+#endif
qm->pdev = pdev;
- qm->ver = rev_id;
-
+ ret = hisi_qm_pre_init(qm, pf_q_num, SEC_PF_DEF_Q_BASE);
+ if (ret)
+ return ret;
qm->sqe_size = SEC_SQE_SIZE;
qm->dev_name = sec_name;
- qm->fun_type = (pdev->device == SEC_PCI_DEVICE_ID_PF) ?
- QM_HW_PF : QM_HW_VF;
- qm->algs = "sec\ncipher\ndigest\n";
+ qm->qm_list = &sec_devices;
qm->wq = sec_wq;
- switch (uacce_mode) {
- case UACCE_MODE_NOUACCE:
- qm->use_uacce = false;
- break;
- case UACCE_MODE_NOIOMMU:
- qm->use_uacce = true;
- break;
- default:
- return -EINVAL;
- }
-
- return hisi_qm_init(qm);
+ return 0;
}
-static int hisi_sec_probe_init(struct hisi_qm *qm, struct hisi_sec *hisi_sec)
+static int hisi_sec_probe_init(struct hisi_qm *qm)
{
if (qm->fun_type == QM_HW_PF) {
- qm->qp_base = SEC_PF_DEF_Q_BASE;
- qm->qp_num = pf_q_num;
- qm->debug.curr_qm_qp_num = pf_q_num;
- return hisi_sec_pf_probe_init(hisi_sec);
+ return hisi_sec_pf_probe_init(qm);
} else if (qm->fun_type == QM_HW_VF) {
/*
* have no way to get qm configure in VM in v1 hardware,
@@ -1066,660 +944,104 @@ static int hisi_sec_probe(struct pci_dev *pdev, const struct pci_device_id *id)
if (!hisi_sec)
return -ENOMEM;
- pci_set_drvdata(pdev, hisi_sec);
-
- hisi_sec_add_to_list(hisi_sec);
+ qm = &hisi_sec->qm;
+ qm->fun_type = pdev->is_physfn ? QM_HW_PF : QM_HW_VF;
- hisi_sec->hisi_sec_list_lock = &hisi_sec_list_lock;
+ ret = hisi_sec_qm_pre_init(qm, pdev);
+ if (ret)
+ return ret;
hisi_sec->ctx_q_num = ctx_q_num;
hisi_sec->fusion_limit = fusion_limit;
+ hisi_sec->fusion_tmout_nsec = fusion_time;
- hisi_sec->fusion_tmout_nsec = fusion_tmout_nsec;
-
- qm = &hisi_sec->qm;
-
- ret = hisi_sec_qm_init(qm, pdev);
+ ret = hisi_qm_init(qm);
if (ret) {
- dev_err(&pdev->dev, "Failed to pre init qm!\n");
- goto err_remove_from_list;
+ pci_err(pdev, "Failed to init qm (%d)!\n", ret);
+ return ret;
}
- ret = hisi_sec_probe_init(qm, hisi_sec);
+ ret = hisi_sec_probe_init(qm);
if (ret) {
- dev_err(&pdev->dev, "Failed to probe!\n");
+ pci_err(pdev, "Failed to probe init (%d)!\n", ret);
goto err_qm_uninit;
}
ret = hisi_qm_start(qm);
- if (ret)
+ if (ret) {
+ pci_err(pdev, "Failed to start qm (%d)!\n", ret);
goto err_qm_uninit;
+ }
- ret = hisi_sec_debugfs_init(hisi_sec);
+ ret = hisi_sec_debugfs_init(qm);
if (ret)
- dev_err(&pdev->dev, "Failed to init debugfs (%d)!\n", ret);
-
- return 0;
-
- err_qm_uninit:
- hisi_qm_uninit(qm);
- err_remove_from_list:
- hisi_sec_remove_from_list(hisi_sec);
- return ret;
-}
-
-/* now we only support equal assignment */
-static int hisi_sec_vf_q_assign(struct hisi_sec *hisi_sec, u32 num_vfs)
-{
- struct hisi_qm *qm = &hisi_sec->qm;
- u32 qp_num = qm->qp_num;
- u32 q_base = qp_num;
- u32 q_num, remain_q_num, i;
- int ret;
+ pci_warn(pdev, "Failed to init debugfs (%d)!\n", ret);
- if (!num_vfs)
- return -EINVAL;
-
- remain_q_num = qm->ctrl_q_num - qp_num;
- q_num = remain_q_num / num_vfs;
+ hisi_qm_add_to_list(qm, &sec_devices);
- for (i = 1; i <= num_vfs; i++) {
- if (i == num_vfs)
- q_num += remain_q_num % num_vfs;
- ret = hisi_qm_set_vft(qm, i, q_base, q_num);
- if (ret)
- return ret;
- q_base += q_num;
+ ret = hisi_sec_register_to_crypto(fusion_limit);
+ if (ret < 0) {
+ pci_err(pdev, "Failed to register driver to crypto!\n");
+ goto err_remove_from_list;
}
- return 0;
-}
-
-static int hisi_sec_clear_vft_config(struct hisi_sec *hisi_sec)
-{
- struct hisi_sec_ctrl *ctrl = hisi_sec->ctrl;
- struct hisi_qm *qm = &hisi_sec->qm;
- u32 num_vfs = ctrl->num_vfs;
- int ret;
- u32 i;
-
- for (i = 1; i <= num_vfs; i++) {
- ret = hisi_qm_set_vft(qm, i, 0, 0);
- if (ret)
- return ret;
+ if (qm->fun_type == QM_HW_PF && vfs_num > 0) {
+ ret = hisi_qm_sriov_enable(pdev, vfs_num);
+ if (ret < 0)
+ goto err_crypto_unregister;
}
- ctrl->num_vfs = 0;
-
return 0;
-}
-
-static int hisi_sec_sriov_enable(struct pci_dev *pdev, int max_vfs)
-{
-#ifdef CONFIG_PCI_IOV
- struct hisi_sec *hisi_sec = pci_get_drvdata(pdev);
- u32 num_vfs;
- int pre_existing_vfs, ret;
-
- pre_existing_vfs = pci_num_vf(pdev);
-
- if (pre_existing_vfs) {
- dev_err(&pdev->dev,
- "Can't enable VF. Please disable pre-enabled VFs!\n");
- return 0;
- }
-
- num_vfs = min_t(u32, max_vfs, SEC_VF_NUM);
-
- ret = hisi_sec_vf_q_assign(hisi_sec, num_vfs);
- if (ret) {
- dev_err(&pdev->dev, "Can't assign queues for VF!\n");
- return ret;
- }
- hisi_sec->ctrl->num_vfs = num_vfs;
+err_crypto_unregister:
+ hisi_sec_unregister_from_crypto(fusion_limit);
- ret = pci_enable_sriov(pdev, num_vfs);
- if (ret) {
- dev_err(&pdev->dev, "Can't enable VF!\n");
- hisi_sec_clear_vft_config(hisi_sec);
- return ret;
- }
+err_remove_from_list:
+ hisi_qm_del_from_list(qm, &sec_devices);
+ hisi_sec_debugfs_exit(qm);
+ hisi_qm_stop(qm, QM_NORMAL);
- return num_vfs;
-#else
- return 0;
-#endif
-}
-
-static int hisi_sec_try_frozen_vfs(struct pci_dev *pdev)
-{
- struct hisi_sec *sec, *vf_sec;
- struct pci_dev *dev;
- int ret = 0;
-
- /* Try to frozen all the VFs as disable SRIOV */
- mutex_lock(&hisi_sec_list_lock);
- list_for_each_entry(sec, &hisi_sec_list, list) {
- dev = sec->qm.pdev;
- if (dev == pdev)
- continue;
- if (pci_physfn(dev) == pdev) {
- vf_sec = pci_get_drvdata(dev);
- ret = hisi_qm_frozen(&vf_sec->qm);
- if (ret)
- goto frozen_fail;
- }
- }
+err_qm_uninit:
+ hisi_qm_uninit(qm);
-frozen_fail:
- mutex_unlock(&hisi_sec_list_lock);
return ret;
}
-static int hisi_sec_sriov_disable(struct pci_dev *pdev)
-{
- struct hisi_sec *hisi_sec = pci_get_drvdata(pdev);
-
- if (pci_vfs_assigned(pdev)) {
- dev_err(&pdev->dev,
- "Can't disable VFs while VFs are assigned!\n");
- return -EPERM;
- }
-
- if (hisi_sec_try_frozen_vfs(pdev)) {
- dev_err(&pdev->dev, "try frozen VFs failed!\n");
- return -EBUSY;
- }
-
- /* remove in hisi_sec_pci_driver will be called to free VF resources */
- pci_disable_sriov(pdev);
- return hisi_sec_clear_vft_config(hisi_sec);
-}
-
static int hisi_sec_sriov_configure(struct pci_dev *pdev, int num_vfs)
{
if (num_vfs == 0)
- return hisi_sec_sriov_disable(pdev);
+ return hisi_qm_sriov_disable(pdev, &sec_devices);
else
- return hisi_sec_sriov_enable(pdev, num_vfs);
-}
-
-static void hisi_sec_remove_wait_delay(struct hisi_sec *hisi_sec)
-{
- struct hisi_qm *qm = &hisi_sec->qm;
-
- while (hisi_qm_frozen(qm) || ((qm->fun_type == QM_HW_PF) &&
- hisi_sec_try_frozen_vfs(qm->pdev)))
- usleep_range(FROZEN_RANGE_MIN, FROZEN_RANGE_MAX);
-
- udelay(SEC_WAIT_DELAY);
+ return hisi_qm_sriov_enable(pdev, num_vfs);
}
static void hisi_sec_remove(struct pci_dev *pdev)
{
- struct hisi_sec *hisi_sec = pci_get_drvdata(pdev);
- struct hisi_qm *qm = &hisi_sec->qm;
+ struct hisi_qm *qm = pci_get_drvdata(pdev);
if (uacce_mode != UACCE_MODE_NOUACCE)
- hisi_sec_remove_wait_delay(hisi_sec);
+ hisi_qm_remove_wait_delay(qm, &sec_devices);
+
+ if (qm->fun_type == QM_HW_PF && qm->vfs_num)
+ (void)hisi_qm_sriov_disable(pdev, NULL);
- if (qm->fun_type == QM_HW_PF && hisi_sec->ctrl->num_vfs != 0)
- (void)hisi_sec_sriov_disable(pdev);
+ hisi_sec_unregister_from_crypto(fusion_limit);
- hisi_sec_debugfs_exit(hisi_sec);
+ hisi_qm_del_from_list(qm, &sec_devices);
+ hisi_sec_debugfs_exit(qm);
(void)hisi_qm_stop(qm, QM_NORMAL);
if (qm->fun_type == QM_HW_PF)
- hisi_sec_hw_error_set_state(hisi_sec, false);
+ hisi_qm_dev_err_uninit(qm);
hisi_qm_uninit(qm);
- hisi_sec_remove_from_list(hisi_sec);
-}
-
-static void hisi_sec_shutdown(struct pci_dev *pdev)
-{
- struct hisi_sec *hisi_sec = pci_get_drvdata(pdev);
-
- hisi_qm_stop(&hisi_sec->qm, QM_NORMAL);
-}
-
-static pci_ers_result_t hisi_sec_error_detected(struct pci_dev *pdev,
- pci_channel_state_t state)
-{
- if (pdev->is_virtfn)
- return PCI_ERS_RESULT_NONE;
-
- dev_info(&pdev->dev, "PCI error detected, state(=%d)!!\n", state);
- if (state == pci_channel_io_perm_failure)
- return PCI_ERS_RESULT_DISCONNECT;
-
- return hisi_qm_process_dev_error(pdev);
-}
-
-static int hisi_sec_reset_prepare_ready(struct hisi_sec *hisi_sec)
-{
- struct pci_dev *pdev = hisi_sec->qm.pdev;
- struct hisi_sec *sec = pci_get_drvdata(pci_physfn(pdev));
- int delay = 0;
-
- while (test_and_set_bit(HISI_SEC_RESET, &sec->status)) {
- msleep(++delay);
- if (delay > SEC_RESET_WAIT_TIMEOUT)
- return -EBUSY;
- }
-
- return 0;
-}
-
-static int hisi_sec_vf_reset_prepare(struct pci_dev *pdev,
- enum qm_stop_reason stop_reason)
-{
- struct hisi_sec *hisi_sec;
- struct pci_dev *dev;
- struct hisi_qm *qm;
- int ret = 0;
-
- mutex_lock(&hisi_sec_list_lock);
- if (pdev->is_physfn) {
- list_for_each_entry(hisi_sec, &hisi_sec_list, list) {
- dev = hisi_sec->qm.pdev;
- if (dev == pdev)
- continue;
-
- if (pci_physfn(dev) == pdev) {
- qm = &hisi_sec->qm;
-
- ret = hisi_qm_stop(qm, stop_reason);
- if (ret)
- goto prepare_fail;
- }
- }
- }
-
-prepare_fail:
- mutex_unlock(&hisi_sec_list_lock);
- return ret;
-}
-
-static int hisi_sec_controller_reset_prepare(struct hisi_sec *hisi_sec)
-{
- struct hisi_qm *qm = &hisi_sec->qm;
- struct pci_dev *pdev = qm->pdev;
- int ret;
-
- ret = hisi_sec_reset_prepare_ready(hisi_sec);
- if (ret) {
- dev_err(&pdev->dev, "Controller reset not ready!\n");
- return ret;
- }
-
- ret = hisi_sec_vf_reset_prepare(pdev, QM_SOFT_RESET);
- if (ret) {
- dev_err(&pdev->dev, "Fails to stop VFs!\n");
- return ret;
- }
-
- ret = hisi_qm_stop(qm, QM_SOFT_RESET);
- if (ret) {
- dev_err(&pdev->dev, "Fails to stop QM!\n");
- return ret;
- }
-
-#ifdef CONFIG_CRYPTO_QM_UACCE
- if (qm->use_uacce) {
- ret = uacce_hw_err_isolate(&qm->uacce);
- if (ret) {
- dev_err(&pdev->dev, "Fails to isolate hw err!\n");
- return ret;
- }
- }
-#endif
-
- return 0;
-}
-
-static int hisi_sec_soft_reset(struct hisi_sec *hisi_sec)
-{
- struct hisi_qm *qm = &hisi_sec->qm;
- struct device *dev = &qm->pdev->dev;
- unsigned long long value;
- int ret;
- u32 val;
-
- ret = hisi_qm_reg_test(qm);
- if (ret)
- return ret;
-
- ret = hisi_qm_set_vf_mse(qm, SEC_DISABLE);
- if (ret) {
- dev_err(dev, "Fails to disable vf mse bit.\n");
- return ret;
- }
-
- ret = hisi_qm_set_msi(qm, SEC_DISABLE);
- if (ret) {
- dev_err(dev, "Fails to disable peh msi bit.\n");
- return ret;
- }
-
- /* Set qm ecc if dev ecc happened to hold on ooo */
- hisi_qm_set_ecc(qm);
-
- /* OOO register set and check */
- writel(SEC_MASTER_GLOBAL_CTRL_SHUTDOWN,
- hisi_sec->qm.io_base + SEC_MASTER_GLOBAL_CTRL);
-
- /* If bus lock, reset chip */
- ret = readl_relaxed_poll_timeout(hisi_sec->qm.io_base +
- SEC_MASTER_TRANS_RETURN,
- val,
- (val == SEC_MASTER_TRANS_RETURN_RW),
- SEC_DELAY_10_US,
- SEC_POLL_TIMEOUT_US);
- if (ret) {
- dev_emerg(dev, "Bus lock! Please reset system.\n");
- return ret;
- }
-
- ret = hisi_qm_set_pf_mse(qm, SEC_DISABLE);
- if (ret) {
- dev_err(dev, "Fails to disable pf mse bit.\n");
- return ret;
- }
-
- /* The reset related sub-control registers are not in PCI BAR */
- if (ACPI_HANDLE(dev)) {
- acpi_status s;
-
- s = acpi_evaluate_integer(ACPI_HANDLE(dev), "SRST",
- NULL, &value);
- if (ACPI_FAILURE(s) || value) {
- dev_err(dev, "Controller reset fails %lld\n", value);
- return -EIO;
- }
- } else {
- dev_err(dev, "No reset method!\n");
- return -EINVAL;
- }
-
- return 0;
-}
-
-static int hisi_sec_vf_reset_done(struct pci_dev *pdev)
-{
- struct hisi_sec *hisi_sec;
- struct pci_dev *dev;
- struct hisi_qm *qm;
- int ret = 0;
-
- mutex_lock(&hisi_sec_list_lock);
- list_for_each_entry(hisi_sec, &hisi_sec_list, list) {
- dev = hisi_sec->qm.pdev;
- if (dev == pdev)
- continue;
-
- if (pci_physfn(dev) == pdev) {
- qm = &hisi_sec->qm;
-
- ret = hisi_qm_restart(qm);
- if (ret)
- goto reset_fail;
- }
- }
-
-reset_fail:
- mutex_unlock(&hisi_sec_list_lock);
- return ret;
-}
-
-static int hisi_sec_controller_reset_done(struct hisi_sec *hisi_sec)
-{
- struct hisi_qm *qm = &hisi_sec->qm;
- struct pci_dev *pdev = qm->pdev;
- struct device *dev = &pdev->dev;
- int ret;
-
- ret = hisi_qm_set_msi(qm, SEC_ENABLE);
- if (ret) {
- dev_err(dev, "Fails to enable peh msi bit!\n");
- return ret;
- }
-
- ret = hisi_qm_set_pf_mse(qm, SEC_ENABLE);
- if (ret) {
- dev_err(dev, "Fails to enable pf mse bit!\n");
- return ret;
- }
-
- ret = hisi_qm_set_vf_mse(qm, SEC_ENABLE);
- if (ret) {
- dev_err(dev, "Fails to enable vf mse bit!\n");
- return ret;
- }
-
- hisi_sec_set_user_domain_and_cache(hisi_sec);
- hisi_qm_restart_prepare(qm);
-
- ret = hisi_qm_restart(qm);
- if (ret) {
- dev_err(dev, "Failed to start QM!\n");
- return -EPERM;
- }
-
- if (hisi_sec->ctrl->num_vfs) {
- ret = hisi_sec_vf_q_assign(hisi_sec, hisi_sec->ctrl->num_vfs);
- if (ret) {
- dev_err(dev, "Failed to assign vf queues!\n");
- return ret;
- }
- }
-
- ret = hisi_sec_vf_reset_done(pdev);
- if (ret) {
- dev_err(dev, "Failed to start VFs!\n");
- return -EPERM;
- }
-
- hisi_qm_restart_done(qm);
- hisi_sec_hw_error_init(hisi_sec);
-
- return 0;
-}
-
-static int hisi_sec_controller_reset(struct hisi_sec *hisi_sec)
-{
- struct device *dev = &hisi_sec->qm.pdev->dev;
- int ret;
-
- dev_info(dev, "Controller resetting...\n");
-
- ret = hisi_sec_controller_reset_prepare(hisi_sec);
- if (ret)
- return ret;
-
- ret = hisi_sec_soft_reset(hisi_sec);
- if (ret) {
- dev_err(dev, "Controller reset failed (%d)\n", ret);
- return ret;
- }
-
- ret = hisi_sec_controller_reset_done(hisi_sec);
- if (ret)
- return ret;
-
- clear_bit(HISI_SEC_RESET, &hisi_sec->status);
- dev_info(dev, "Controller reset complete\n");
-
- return 0;
-}
-
-static pci_ers_result_t hisi_sec_slot_reset(struct pci_dev *pdev)
-{
- struct hisi_sec *hisi_sec = pci_get_drvdata(pdev);
- int ret;
-
- if (pdev->is_virtfn)
- return PCI_ERS_RESULT_RECOVERED;
-
- dev_info(&pdev->dev, "Requesting reset due to PCI error\n");
-
- pci_cleanup_aer_uncorrect_error_status(pdev);
-
- /* reset sec controller */
- ret = hisi_sec_controller_reset(hisi_sec);
- if (ret) {
- dev_warn(&pdev->dev, "hisi_sec controller reset failed (%d)\n",
- ret);
- return PCI_ERS_RESULT_DISCONNECT;
- }
-
- return PCI_ERS_RESULT_RECOVERED;
-}
-
-static void hisi_sec_set_hw_error(struct hisi_sec *hisi_sec, bool state)
-{
- struct pci_dev *pdev = hisi_sec->qm.pdev;
- struct hisi_sec *sec = pci_get_drvdata(pci_physfn(pdev));
- struct hisi_qm *qm = &sec->qm;
-
- if (qm->fun_type == QM_HW_VF)
- return;
-
- if (state)
- hisi_qm_hw_error_init(qm, QM_BASE_CE,
- QM_BASE_NFE | QM_ACC_WB_NOT_READY_TIMEOUT,
- 0, QM_DB_RANDOM_INVALID);
- else
- hisi_qm_hw_error_uninit(qm);
-
- hisi_sec_hw_error_set_state(sec, state);
-}
-
-static int hisi_sec_get_hw_error_status(struct hisi_sec *hisi_sec)
-{
- u32 err_sts;
-
- err_sts = readl(hisi_sec->qm.io_base + SEC_CORE_INT_STATUS) &
- SEC_CORE_INT_STATUS_M_ECC;
- if (err_sts)
- return err_sts;
-
- return 0;
-}
-
-static int hisi_sec_check_hw_error(struct hisi_sec *hisi_sec)
-{
- struct pci_dev *pdev = hisi_sec->qm.pdev;
- struct hisi_sec *sec = pci_get_drvdata(pci_physfn(pdev));
- struct hisi_qm *qm = &sec->qm;
- int ret;
-
- if (qm->fun_type == QM_HW_VF)
- return 0;
-
- ret = hisi_qm_get_hw_error_status(qm);
- if (ret)
- return ret;
-
- return hisi_sec_get_hw_error_status(sec);
-}
-
-static void hisi_sec_reset_prepare(struct pci_dev *pdev)
-{
- struct hisi_sec *hisi_sec = pci_get_drvdata(pdev);
- struct hisi_qm *qm = &hisi_sec->qm;
- struct device *dev = &pdev->dev;
- u32 delay = 0;
- int ret;
-
- hisi_sec_set_hw_error(hisi_sec, SEC_HW_ERROR_IRQ_DISABLE);
-
- while (hisi_sec_check_hw_error(hisi_sec)) {
- msleep(++delay);
- if (delay > SEC_RESET_WAIT_TIMEOUT)
- return;
- }
-
- ret = hisi_sec_reset_prepare_ready(hisi_sec);
- if (ret) {
- dev_err(dev, "FLR not ready!\n");
- return;
- }
-
- ret = hisi_sec_vf_reset_prepare(pdev, QM_FLR);
- if (ret) {
- dev_err(dev, "Fails to prepare reset!\n");
- return;
- }
-
- ret = hisi_qm_stop(qm, QM_FLR);
- if (ret) {
- dev_err(dev, "Fails to stop QM!\n");
- return;
- }
-
- dev_info(dev, "FLR resetting...\n");
-}
-
-static void hisi_sec_flr_reset_complete(struct pci_dev *pdev)
-{
- struct pci_dev *pf_pdev = pci_physfn(pdev);
- struct hisi_sec *hisi_sec = pci_get_drvdata(pf_pdev);
- struct device *dev = &hisi_sec->qm.pdev->dev;
- u32 id;
-
- pci_read_config_dword(hisi_sec->qm.pdev, PCI_COMMAND, &id);
- if (id == SEC_PCI_COMMAND_INVALID)
- dev_err(dev, "Device can not be used!\n");
-
- clear_bit(HISI_SEC_RESET, &hisi_sec->status);
-}
-
-static void hisi_sec_reset_done(struct pci_dev *pdev)
-{
- struct hisi_sec *hisi_sec = pci_get_drvdata(pdev);
- struct hisi_qm *qm = &hisi_sec->qm;
- struct device *dev = &pdev->dev;
- int ret;
-
- hisi_sec_set_hw_error(hisi_sec, SEC_HW_ERROR_IRQ_ENABLE);
-
- ret = hisi_qm_restart(qm);
- if (ret) {
- dev_err(dev, "Failed to start QM!\n");
- goto flr_done;
- }
-
- if (pdev->is_physfn) {
- hisi_sec_set_user_domain_and_cache(hisi_sec);
- if (hisi_sec->ctrl->num_vfs) {
- ret = hisi_sec_vf_q_assign(hisi_sec,
- hisi_sec->ctrl->num_vfs);
- if (ret) {
- dev_err(dev, "Failed to assign vf queue\n");
- goto flr_done;
- }
- }
-
- ret = hisi_sec_vf_reset_done(pdev);
- if (ret) {
- dev_err(dev, "Failed to reset vf\n");
- goto flr_done;
- }
- }
-
-flr_done:
- hisi_sec_flr_reset_complete(pdev);
-
- dev_info(dev, "FLR reset complete\n");
}
static const struct pci_error_handlers hisi_sec_err_handler = {
- .error_detected = hisi_sec_error_detected,
- .slot_reset = hisi_sec_slot_reset,
- .reset_prepare = hisi_sec_reset_prepare,
- .reset_done = hisi_sec_reset_done,
+ .error_detected = hisi_qm_dev_err_detected,
+ .slot_reset = hisi_qm_dev_slot_reset,
+ .reset_prepare = hisi_qm_reset_prepare,
+ .reset_done = hisi_qm_reset_done,
};
static struct pci_driver hisi_sec_pci_driver = {
@@ -1729,7 +1051,7 @@ static void hisi_sec_reset_done(struct pci_dev *pdev)
.remove = hisi_sec_remove,
.sriov_configure = hisi_sec_sriov_configure,
.err_handler = &hisi_sec_err_handler,
- .shutdown = hisi_sec_shutdown,
+ .shutdown = hisi_qm_dev_shutdown,
};
static void hisi_sec_register_debugfs(void)
@@ -1759,35 +1081,25 @@ static int __init hisi_sec_init(void)
return -ENOMEM;
}
+ INIT_LIST_HEAD(&sec_devices.list);
+ mutex_init(&sec_devices.lock);
+ sec_devices.check = NULL;
+
hisi_sec_register_debugfs();
ret = pci_register_driver(&hisi_sec_pci_driver);
if (ret < 0) {
+ hisi_sec_unregister_debugfs();
+ if (sec_wq)
+ destroy_workqueue(sec_wq);
pr_err("Failed to register pci driver.\n");
- goto err_pci;
- }
-
- pr_info("hisi_sec: register to crypto\n");
- ret = hisi_sec_register_to_crypto(fusion_limit);
- if (ret < 0) {
- pr_err("Failed to register driver to crypto.\n");
- goto err_probe_device;
}
- return 0;
-
- err_probe_device:
- pci_unregister_driver(&hisi_sec_pci_driver);
- err_pci:
- hisi_sec_unregister_debugfs();
- if (sec_wq)
- destroy_workqueue(sec_wq);
return ret;
}
static void __exit hisi_sec_exit(void)
{
- hisi_sec_unregister_from_crypto(fusion_limit);
pci_unregister_driver(&hisi_sec_pci_driver);
hisi_sec_unregister_debugfs();
if (sec_wq)
diff --git a/drivers/crypto/hisilicon/zip/zip.h b/drivers/crypto/hisilicon/zip/zip.h
index 560751a..ddd5924 100644
--- a/drivers/crypto/hisilicon/zip/zip.h
+++ b/drivers/crypto/hisilicon/zip/zip.h
@@ -18,19 +18,12 @@ enum hisi_zip_error_type {
};
struct hisi_zip_ctrl;
-
-enum hisi_zip_status {
- HISI_ZIP_RESET,
-};
-
struct hisi_zip {
struct hisi_qm qm;
- struct list_head list;
struct hisi_zip_ctrl *ctrl;
- unsigned long status;
};
-struct hisi_zip *find_zip_device(int node);
+int zip_create_qps(struct hisi_qp **qps, int ctx_num);
int hisi_zip_register_to_crypto(void);
void hisi_zip_unregister_from_crypto(void);
#endif
diff --git a/drivers/crypto/hisilicon/zip/zip_crypto.c b/drivers/crypto/hisilicon/zip/zip_crypto.c
index b2965ba..b247021 100644
--- a/drivers/crypto/hisilicon/zip/zip_crypto.c
+++ b/drivers/crypto/hisilicon/zip/zip_crypto.c
@@ -153,26 +153,19 @@ static void hisi_zip_fill_sqe(struct hisi_zip_sqe *sqe, u8 req_type,
sqe->dest_addr_h = upper_32_bits(d_addr);
}
-static int hisi_zip_create_qp(struct hisi_qm *qm, struct hisi_zip_qp_ctx *ctx,
+static int hisi_zip_start_qp(struct hisi_qp *qp, struct hisi_zip_qp_ctx *ctx,
int alg_type, int req_type)
{
- struct device *dev = &qm->pdev->dev;
- struct hisi_qp *qp;
+ struct device *dev = &qp->qm->pdev->dev;
int ret;
- qp = hisi_qm_create_qp(qm, alg_type);
- if (IS_ERR(qp)) {
- dev_err(dev, "create qp failed!\n");
- return PTR_ERR(qp);
- }
-
qp->req_type = req_type;
+ qp->alg_type = alg_type;
qp->qp_ctx = ctx;
ret = hisi_qm_start_qp(qp, 0);
if (ret < 0) {
dev_err(dev, "start qp failed!\n");
- hisi_qm_release_qp(qp);
return ret;
}
@@ -188,26 +181,27 @@ static void hisi_zip_release_qp(struct hisi_zip_qp_ctx *ctx)
static int hisi_zip_ctx_init(struct hisi_zip_ctx *hisi_zip_ctx, u8 req_type)
{
+ struct hisi_qp *qps[HZIP_CTX_Q_NUM] = { NULL };
struct hisi_zip *hisi_zip;
- struct hisi_qm *qm;
int ret, i, j;
- /* find the proper zip device */
- hisi_zip = find_zip_device(cpu_to_node(smp_processor_id()));
- if (!hisi_zip) {
- pr_err("Failed to find a proper ZIP device!\n");
+ ret = zip_create_qps(qps, HZIP_CTX_Q_NUM);
+ if (ret) {
+ pr_err("Can not create zip qps!\n");
return -ENODEV;
}
- qm = &hisi_zip->qm;
+
+ hisi_zip = container_of(qps[0]->qm, struct hisi_zip, qm);
for (i = 0; i < HZIP_CTX_Q_NUM; i++) {
/* alg_type = 0 for compress, 1 for decompress in hw sqe */
- ret = hisi_zip_create_qp(qm, &hisi_zip_ctx->qp_ctx[i], i,
+ ret = hisi_zip_start_qp(qps[i], &hisi_zip_ctx->qp_ctx[i], i,
req_type);
if (ret) {
for (j = i - 1; j >= 0; j--)
- hisi_zip_release_qp(&hisi_zip_ctx->qp_ctx[j]);
+ hisi_qm_stop_qp(hisi_zip_ctx->qp_ctx[j].qp);
+ hisi_qm_free_qps(qps, HZIP_CTX_Q_NUM);
return ret;
}
diff --git a/drivers/crypto/hisilicon/zip/zip_main.c b/drivers/crypto/hisilicon/zip/zip_main.c
index 5e40fbf..54681dc 100644
--- a/drivers/crypto/hisilicon/zip/zip_main.c
+++ b/drivers/crypto/hisilicon/zip/zip_main.c
@@ -15,7 +15,6 @@
#include <linux/uacce.h>
#include "zip.h"
-#define HZIP_VF_NUM 63
#define HZIP_QUEUE_NUM_V1 4096
#define HZIP_QUEUE_NUM_V2 1024
@@ -75,7 +74,6 @@
#define HZIP_CORE_SRAM_ECC_ERR_INFO 0x301148
#define HZIP_CORE_INT_RAS_CE_ENB 0x301160
#define HZIP_CORE_INT_RAS_NFE_ENB 0x301164
-#define HZIP_RAS_NFE_MBIT_DISABLE ~HZIP_CORE_INT_STATUS_M_ECC
#define HZIP_CORE_INT_RAS_FE_ENB 0x301168
#define HZIP_CORE_INT_RAS_NFE_ENABLE 0x7FE
#define HZIP_SRAM_ECC_ERR_NUM_SHIFT 16
@@ -95,95 +93,14 @@
#define HZIP_SOFT_CTRL_ZIP_CONTROL 0x30100C
#define HZIP_AXI_SHUTDOWN_ENABLE BIT(14)
#define HZIP_AXI_SHUTDOWN_DISABLE 0xFFFFBFFF
-#define HZIP_WR_MSI_PORT 0xF7FF
+#define HZIP_WR_PORT BIT(11)
-#define HZIP_ENABLE 1
-#define HZIP_DISABLE 0
-#define HZIP_NUMA_DISTANCE 100
#define HZIP_BUF_SIZE 22
#define FORMAT_DECIMAL 10
-#define HZIP_REG_RD_INTVRL_US 10
-#define HZIP_REG_RD_TMOUT_US 1000
-#define HZIP_RESET_WAIT_TIMEOUT 400
-#define HZIP_PCI_COMMAND_INVALID 0xFFFFFFFF
-
-#define FROZEN_RANGE_MIN 10
-#define FROZEN_RANGE_MAX 20
static const char hisi_zip_name[] = "hisi_zip";
static struct dentry *hzip_debugfs_root;
-static LIST_HEAD(hisi_zip_list);
-static DEFINE_MUTEX(hisi_zip_list_lock);
-
-struct hisi_zip_resource {
- struct hisi_zip *hzip;
- int distance;
- struct list_head list;
-};
-
-static void free_list(struct list_head *head)
-{
- struct hisi_zip_resource *res, *tmp;
-
- list_for_each_entry_safe(res, tmp, head, list) {
- list_del(&res->list);
- kfree(res);
- }
-}
-
-struct hisi_zip *find_zip_device(int node)
-{
- struct hisi_zip *ret = NULL;
-#ifdef CONFIG_NUMA
- struct hisi_zip_resource *res, *tmp;
- struct hisi_zip *hisi_zip;
- struct list_head *n;
- struct device *dev;
- LIST_HEAD(head);
-
- mutex_lock(&hisi_zip_list_lock);
-
- list_for_each_entry(hisi_zip, &hisi_zip_list, list) {
- res = kzalloc(sizeof(*res), GFP_KERNEL);
- if (!res)
- goto err;
-
- dev = &hisi_zip->qm.pdev->dev;
- res->hzip = hisi_zip;
- res->distance = node_distance(dev->numa_node, node);
-
- n = &head;
- list_for_each_entry(tmp, &head, list) {
- if (res->distance < tmp->distance) {
- n = &tmp->list;
- break;
- }
- }
- list_add_tail(&res->list, n);
- }
-
- list_for_each_entry(tmp, &head, list) {
- if (hisi_qm_get_free_qp_num(&tmp->hzip->qm)) {
- ret = tmp->hzip;
- break;
- }
- }
-
- free_list(&head);
-#else
- mutex_lock(&hisi_zip_list_lock);
-
- ret = list_first_entry(&hisi_zip_list, struct hisi_zip, list);
-#endif
- mutex_unlock(&hisi_zip_list_lock);
-
- return ret;
-
-err:
- free_list(&head);
- mutex_unlock(&hisi_zip_list_lock);
- return NULL;
-}
+static struct hisi_qm_list zip_devices;
struct hisi_zip_hw_error {
u32 int_msk;
@@ -229,9 +146,7 @@ struct ctrl_debug_file {
* Just relevant for PF.
*/
struct hisi_zip_ctrl {
- u32 num_vfs;
struct hisi_zip *hisi_zip;
- struct dentry *debug_root;
struct ctrl_debug_file files[HZIP_DEBUG_FILE_NUM];
};
@@ -282,73 +197,49 @@ enum {
{"HZIP_DECOMP_LZ77_CURR_ST ", 0x9cull},
};
-static int pf_q_num_set(const char *val, const struct kernel_param *kp)
+#ifdef CONFIG_CRYPTO_QM_UACCE
+static int uacce_mode_set(const char *val, const struct kernel_param *kp)
{
- struct pci_dev *pdev = pci_get_device(PCI_VENDOR_ID_HUAWEI,
- PCI_DEVICE_ID_ZIP_PF, NULL);
- u32 n, q_num;
- u8 rev_id;
- int ret;
-
- if (!val)
- return -EINVAL;
+ return mode_set(val, kp);
+}
- if (!pdev) {
- q_num = min_t(u32, HZIP_QUEUE_NUM_V1, HZIP_QUEUE_NUM_V2);
- pr_info("No device found currently, suppose queue number is %d\n",
- q_num);
- } else {
- rev_id = pdev->revision;
- switch (rev_id) {
- case QM_HW_V1:
- q_num = HZIP_QUEUE_NUM_V1;
- break;
- case QM_HW_V2:
- q_num = HZIP_QUEUE_NUM_V2;
- break;
- default:
- return -EINVAL;
- }
- }
+static const struct kernel_param_ops uacce_mode_ops = {
+ .set = uacce_mode_set,
+ .get = param_get_int,
+};
- ret = kstrtou32(val, 10, &n);
- if (ret != 0 || n == 0 || n > q_num)
- return -EINVAL;
+static int uacce_mode = UACCE_MODE_NOUACCE;
+module_param_cb(uacce_mode, &uacce_mode_ops, &uacce_mode, 0444);
+MODULE_PARM_DESC(uacce_mode, "Mode of UACCE can be 0(default), 2");
+#endif
- return param_set_int(val, kp);
+static int pf_q_num_set(const char *val, const struct kernel_param *kp)
+{
+ return q_num_set(val, kp, PCI_DEVICE_ID_ZIP_PF);
}
static const struct kernel_param_ops pf_q_num_ops = {
.set = pf_q_num_set,
.get = param_get_int,
};
-static int uacce_mode_set(const char *val, const struct kernel_param *kp)
-{
- u32 n;
- int ret;
-
- if (!val)
- return -EINVAL;
- ret = kstrtou32(val, FORMAT_DECIMAL, &n);
- if (ret != 0 || (n != UACCE_MODE_NOIOMMU && n != UACCE_MODE_NOUACCE))
- return -EINVAL;
+static u32 pf_q_num = HZIP_PF_DEF_Q_NUM;
+module_param_cb(pf_q_num, &pf_q_num_ops, &pf_q_num, 0444);
+MODULE_PARM_DESC(pf_q_num, "Number of queues in PF(v1 1-4096, v2 1-1024)");
- return param_set_int(val, kp);
+static int vfs_num_set(const char *val, const struct kernel_param *kp)
+{
+ return vf_num_set(val, kp);
}
-static const struct kernel_param_ops uacce_mode_ops = {
- .set = uacce_mode_set,
+static const struct kernel_param_ops vfs_num_ops = {
+ .set = vfs_num_set,
.get = param_get_int,
};
-static u32 pf_q_num = HZIP_PF_DEF_Q_NUM;
-module_param_cb(pf_q_num, &pf_q_num_ops, &pf_q_num, 0444);
-MODULE_PARM_DESC(pf_q_num, "Number of queues in PF(v1 1-4096, v2 1-1024)");
-
-static int uacce_mode = UACCE_MODE_NOUACCE;
-module_param_cb(uacce_mode, &uacce_mode_ops, &uacce_mode, 0444);
-MODULE_PARM_DESC(uacce_mode, "Mode of UACCE can be 0(default), 2");
+static u32 vfs_num;
+module_param_cb(vfs_num, &vfs_num_ops, &vfs_num, 0444);
+MODULE_PARM_DESC(vfs_num, "Number of VFs to enable(1-63), 0(default)");
static const struct pci_device_id hisi_zip_dev_ids[] = {
{ PCI_DEVICE(PCI_VENDOR_ID_HUAWEI, PCI_DEVICE_ID_ZIP_PF) },
@@ -357,81 +248,67 @@ static int uacce_mode_set(const char *val, const struct kernel_param *kp)
};
MODULE_DEVICE_TABLE(pci, hisi_zip_dev_ids);
-static inline void hisi_zip_add_to_list(struct hisi_zip *hisi_zip)
+int zip_create_qps(struct hisi_qp **qps, int ctx_num)
{
- mutex_lock(&hisi_zip_list_lock);
- list_add_tail(&hisi_zip->list, &hisi_zip_list);
- mutex_unlock(&hisi_zip_list_lock);
-}
+ int node = cpu_to_node(smp_processor_id());
-static inline void hisi_zip_remove_from_list(struct hisi_zip *hisi_zip)
-{
- mutex_lock(&hisi_zip_list_lock);
- list_del(&hisi_zip->list);
- mutex_unlock(&hisi_zip_list_lock);
+ return hisi_qm_alloc_qps_node(node, &zip_devices,
+ qps, ctx_num, 0);
}
-static void hisi_zip_set_user_domain_and_cache(struct hisi_zip *hisi_zip)
+static int hisi_zip_set_user_domain_and_cache(struct hisi_qm *qm)
{
- struct hisi_qm *qm = &hisi_zip->qm;
+ void __iomem *base = qm->io_base;
/* qm user domain */
- writel(AXUSER_BASE, hisi_zip->qm.io_base + QM_ARUSER_M_CFG_1);
- writel(ARUSER_M_CFG_ENABLE, hisi_zip->qm.io_base +
- QM_ARUSER_M_CFG_ENABLE);
- writel(AXUSER_BASE, hisi_zip->qm.io_base + QM_AWUSER_M_CFG_1);
- writel(AWUSER_M_CFG_ENABLE, hisi_zip->qm.io_base +
- QM_AWUSER_M_CFG_ENABLE);
- writel(WUSER_M_CFG_ENABLE, hisi_zip->qm.io_base +
- QM_WUSER_M_CFG_ENABLE);
+ writel(AXUSER_BASE, base + QM_ARUSER_M_CFG_1);
+ writel(ARUSER_M_CFG_ENABLE, base + QM_ARUSER_M_CFG_ENABLE);
+ writel(AXUSER_BASE, base + QM_AWUSER_M_CFG_1);
+ writel(AWUSER_M_CFG_ENABLE, base + QM_AWUSER_M_CFG_ENABLE);
+ writel(WUSER_M_CFG_ENABLE, base + QM_WUSER_M_CFG_ENABLE);
/* qm cache */
- writel(AXI_M_CFG, hisi_zip->qm.io_base + QM_AXI_M_CFG);
- writel(AXI_M_CFG_ENABLE, hisi_zip->qm.io_base + QM_AXI_M_CFG_ENABLE);
+ writel(AXI_M_CFG, base + QM_AXI_M_CFG);
+ writel(AXI_M_CFG_ENABLE, base + QM_AXI_M_CFG_ENABLE);
+
/* disable FLR triggered by BME(bus master enable) */
- writel(PEH_AXUSER_CFG, hisi_zip->qm.io_base + QM_PEH_AXUSER_CFG);
- writel(PEH_AXUSER_CFG_ENABLE, hisi_zip->qm.io_base +
- QM_PEH_AXUSER_CFG_ENABLE);
+ writel(PEH_AXUSER_CFG, base + QM_PEH_AXUSER_CFG);
+ writel(PEH_AXUSER_CFG_ENABLE, base + QM_PEH_AXUSER_CFG_ENABLE);
/* cache */
- writel(HZIP_CACHE_ALL_EN, hisi_zip->qm.io_base + HZIP_PORT_ARCA_CHE_0);
- writel(HZIP_CACHE_ALL_EN, hisi_zip->qm.io_base + HZIP_PORT_ARCA_CHE_1);
- writel(HZIP_CACHE_ALL_EN, hisi_zip->qm.io_base + HZIP_PORT_AWCA_CHE_0);
- writel(HZIP_CACHE_ALL_EN, hisi_zip->qm.io_base + HZIP_PORT_AWCA_CHE_1);
+ writel(HZIP_CACHE_ALL_EN, base + HZIP_PORT_ARCA_CHE_0);
+ writel(HZIP_CACHE_ALL_EN, base + HZIP_PORT_ARCA_CHE_1);
+ writel(HZIP_CACHE_ALL_EN, base + HZIP_PORT_AWCA_CHE_0);
+ writel(HZIP_CACHE_ALL_EN, base + HZIP_PORT_AWCA_CHE_1);
/* user domain configurations */
- writel(AXUSER_BASE, hisi_zip->qm.io_base + HZIP_BD_RUSER_32_63);
- writel(AXUSER_BASE, hisi_zip->qm.io_base + HZIP_SGL_RUSER_32_63);
- writel(AXUSER_BASE, hisi_zip->qm.io_base + HZIP_BD_WUSER_32_63);
+ writel(AXUSER_BASE, base + HZIP_BD_RUSER_32_63);
+ writel(AXUSER_BASE, base + HZIP_SGL_RUSER_32_63);
+ writel(AXUSER_BASE, base + HZIP_BD_WUSER_32_63);
if (qm->use_sva) {
- writel(AXUSER_BASE | AXUSER_SSV, hisi_zip->qm.io_base +
- HZIP_DATA_RUSER_32_63);
- writel(AXUSER_BASE | AXUSER_SSV, hisi_zip->qm.io_base +
- HZIP_DATA_WUSER_32_63);
+ writel(AXUSER_BASE | AXUSER_SSV, base + HZIP_DATA_RUSER_32_63);
+ writel(AXUSER_BASE | AXUSER_SSV, base + HZIP_DATA_WUSER_32_63);
} else {
- writel(AXUSER_BASE, hisi_zip->qm.io_base +
- HZIP_DATA_RUSER_32_63);
- writel(AXUSER_BASE, hisi_zip->qm.io_base +
- HZIP_DATA_WUSER_32_63);
+ writel(AXUSER_BASE, base + HZIP_DATA_RUSER_32_63);
+ writel(AXUSER_BASE, base + HZIP_DATA_WUSER_32_63);
}
/* let's open all compression/decompression cores */
writel(HZIP_DECOMP_CHECK_ENABLE | HZIP_ALL_COMP_DECOMP_EN,
- hisi_zip->qm.io_base + HZIP_CLOCK_GATE_CTRL);
+ base + HZIP_CLOCK_GATE_CTRL);
/* enable sqc,cqc writeback */
writel(SQC_CACHE_ENABLE | CQC_CACHE_ENABLE | SQC_CACHE_WB_ENABLE |
CQC_CACHE_WB_ENABLE | FIELD_PREP(SQC_CACHE_WB_THRD, 1) |
- FIELD_PREP(CQC_CACHE_WB_THRD, 1),
- hisi_zip->qm.io_base + QM_CACHE_CTL);
+ FIELD_PREP(CQC_CACHE_WB_THRD, 1), base + QM_CACHE_CTL);
+
+ return 0;
}
/* hisi_zip_debug_regs_clear() - clear the zip debug regs */
-static void hisi_zip_debug_regs_clear(struct hisi_zip *hisi_zip)
+static void hisi_zip_debug_regs_clear(struct hisi_qm *qm)
{
- struct hisi_qm *qm = &hisi_zip->qm;
-
/* clear current_qm */
writel(0x0, qm->io_base + QM_DFX_MB_CNT_VF);
writel(0x0, qm->io_base + QM_DFX_DB_CNT_VF);
@@ -442,52 +319,70 @@ static void hisi_zip_debug_regs_clear(struct hisi_zip *hisi_zip)
hisi_qm_debug_regs_clear(qm);
}
-
-static void hisi_zip_hw_error_set_state(struct hisi_zip *hisi_zip, bool state)
+static int hisi_zip_hw_err_pre_set(struct hisi_qm *qm, u32 *val)
{
- struct hisi_qm *qm = &hisi_zip->qm;
- u32 val;
-
if (qm->ver == QM_HW_V1) {
writel(HZIP_CORE_INT_DISABLE, qm->io_base + HZIP_CORE_INT_MASK);
pci_info(qm->pdev, "ZIP v%d cannot support hw error handle!\n",
qm->ver);
- return;
+ return -EINVAL;
}
/* configure error type */
- writel(0x1, hisi_zip->qm.io_base + HZIP_CORE_INT_RAS_CE_ENB);
- writel(0x0, hisi_zip->qm.io_base + HZIP_CORE_INT_RAS_FE_ENB);
+ writel(0x1, qm->io_base + HZIP_CORE_INT_RAS_CE_ENB);
+ writel(0x0, qm->io_base + HZIP_CORE_INT_RAS_FE_ENB);
writel(HZIP_CORE_INT_RAS_NFE_ENABLE,
- hisi_zip->qm.io_base + HZIP_CORE_INT_RAS_NFE_ENB);
-
- val = readl(hisi_zip->qm.io_base + HZIP_SOFT_CTRL_ZIP_CONTROL);
- if (state) {
- /* clear ZIP hw error source if having */
- writel(HZIP_CORE_INT_DISABLE, hisi_zip->qm.io_base +
- HZIP_CORE_INT_SOURCE);
- /* enable ZIP hw error interrupts */
- writel(0, hisi_zip->qm.io_base + HZIP_CORE_INT_MASK);
-
- /* enable ZIP block master OOO when m-bit error occur */
- val = val | HZIP_AXI_SHUTDOWN_ENABLE;
- } else {
- /* disable ZIP hw error interrupts */
- writel(HZIP_CORE_INT_DISABLE,
- hisi_zip->qm.io_base + HZIP_CORE_INT_MASK);
+ qm->io_base + HZIP_CORE_INT_RAS_NFE_ENB);
- /* disable ZIP block master OOO when m-bit error occur */
- val = val & HZIP_AXI_SHUTDOWN_DISABLE;
- }
+ *val = readl(qm->io_base + HZIP_SOFT_CTRL_ZIP_CONTROL);
+
+ return 0;
+}
+
+static void hisi_zip_hw_error_enable(struct hisi_qm *qm)
+{
+ u32 val;
+ int ret;
+
+ ret = hisi_zip_hw_err_pre_set(qm, &val);
+ if (ret)
+ return;
+
+ /* clear ZIP hw error source if having */
+ writel(HZIP_CORE_INT_DISABLE, qm->io_base + HZIP_CORE_INT_SOURCE);
+
+ /* enable ZIP hw error interrupts */
+ writel(0, qm->io_base + HZIP_CORE_INT_MASK);
+
+ /* enable ZIP block master OOO when m-bit error occur */
+ val = val | HZIP_AXI_SHUTDOWN_ENABLE;
- writel(val, hisi_zip->qm.io_base + HZIP_SOFT_CTRL_ZIP_CONTROL);
+ writel(val, qm->io_base + HZIP_SOFT_CTRL_ZIP_CONTROL);
+}
+
+static void hisi_zip_hw_error_disable(struct hisi_qm *qm)
+{
+ u32 val;
+ int ret;
+
+ ret = hisi_zip_hw_err_pre_set(qm, &val);
+ if (ret)
+ return;
+
+ /* disable ZIP hw error interrupts */
+ writel(HZIP_CORE_INT_DISABLE, qm->io_base + HZIP_CORE_INT_MASK);
+
+ /* disable ZIP block master OOO when m-bit error occur */
+ val = val & HZIP_AXI_SHUTDOWN_DISABLE;
+
+ writel(val, qm->io_base + HZIP_SOFT_CTRL_ZIP_CONTROL);
}
static inline struct hisi_qm *file_to_qm(struct ctrl_debug_file *file)
{
- struct hisi_zip *hisi_zip = file->ctrl->hisi_zip;
+ struct hisi_zip *zip = file->ctrl->hisi_zip;
- return &hisi_zip->qm;
+ return &zip->qm;
}
static u32 current_qm_read(struct ctrl_debug_file *file)
@@ -500,22 +395,21 @@ static u32 current_qm_read(struct ctrl_debug_file *file)
static int current_qm_write(struct ctrl_debug_file *file, u32 val)
{
struct hisi_qm *qm = file_to_qm(file);
- struct hisi_zip_ctrl *ctrl = file->ctrl;
u32 vfq_num;
u32 tmp;
- if (val > ctrl->num_vfs)
+ if (val > qm->vfs_num)
return -EINVAL;
/* According PF or VF Dev ID to calculation curr_qm_qp_num and store */
if (val == 0) {
qm->debug.curr_qm_qp_num = qm->qp_num;
} else {
- vfq_num = (qm->ctrl_q_num - qm->qp_num) / ctrl->num_vfs;
- if (val == ctrl->num_vfs) {
+ vfq_num = (qm->ctrl_q_num - qm->qp_num) / qm->vfs_num;
+ if (val == qm->vfs_num) {
qm->debug.curr_qm_qp_num =
qm->ctrl_q_num - qm->qp_num -
- (ctrl->num_vfs - 1) * vfq_num;
+ (qm->vfs_num - 1) * vfq_num;
} else {
qm->debug.curr_qm_qp_num = vfq_num;
}
@@ -638,10 +532,8 @@ static ssize_t hisi_zip_ctrl_debug_write(struct file *filp,
.write = hisi_zip_ctrl_debug_write,
};
-static int hisi_zip_core_debug_init(struct hisi_zip_ctrl *ctrl)
+static int hisi_zip_core_debug_init(struct hisi_qm *qm)
{
- struct hisi_zip *hisi_zip = ctrl->hisi_zip;
- struct hisi_qm *qm = &hisi_zip->qm;
struct device *dev = &qm->pdev->dev;
struct debugfs_regset32 *regset;
struct dentry *tmp_d, *tmp;
@@ -657,7 +549,7 @@ static int hisi_zip_core_debug_init(struct hisi_zip_ctrl *ctrl)
if (ret < 0)
return -EINVAL;
- tmp_d = debugfs_create_dir(buf, ctrl->debug_root);
+ tmp_d = debugfs_create_dir(buf, qm->debug.debug_root);
if (!tmp_d)
return -ENOENT;
@@ -677,29 +569,29 @@ static int hisi_zip_core_debug_init(struct hisi_zip_ctrl *ctrl)
return 0;
}
-static int hisi_zip_ctrl_debug_init(struct hisi_zip_ctrl *ctrl)
+static int hisi_zip_ctrl_debug_init(struct hisi_qm *qm)
{
+ struct hisi_zip *zip = container_of(qm, struct hisi_zip, qm);
struct dentry *tmp;
int i;
for (i = HZIP_CURRENT_QM; i < HZIP_DEBUG_FILE_NUM; i++) {
- spin_lock_init(&ctrl->files[i].lock);
- ctrl->files[i].ctrl = ctrl;
- ctrl->files[i].index = i;
+ spin_lock_init(&zip->ctrl->files[i].lock);
+ zip->ctrl->files[i].ctrl = zip->ctrl;
+ zip->ctrl->files[i].index = i;
tmp = debugfs_create_file(ctrl_debug_file_name[i], 0600,
- ctrl->debug_root, ctrl->files + i,
+ qm->debug.debug_root, zip->ctrl->files + i,
&ctrl_debug_fops);
if (!tmp)
return -ENOENT;
}
- return hisi_zip_core_debug_init(ctrl);
+ return hisi_zip_core_debug_init(qm);
}
-static int hisi_zip_debugfs_init(struct hisi_zip *hisi_zip)
+static int hisi_zip_debugfs_init(struct hisi_qm *qm)
{
- struct hisi_qm *qm = &hisi_zip->qm;
struct device *dev = &qm->pdev->dev;
struct dentry *dev_d;
int ret;
@@ -714,8 +606,7 @@ static int hisi_zip_debugfs_init(struct hisi_zip *hisi_zip)
goto failed_to_create;
if (qm->fun_type == QM_HW_PF) {
- hisi_zip->ctrl->debug_root = dev_d;
- ret = hisi_zip_ctrl_debug_init(hisi_zip->ctrl);
+ ret = hisi_zip_ctrl_debug_init(qm);
if (ret)
goto failed_to_create;
}
@@ -727,47 +618,16 @@ static int hisi_zip_debugfs_init(struct hisi_zip *hisi_zip)
return ret;
}
-static void hisi_zip_debugfs_exit(struct hisi_zip *hisi_zip)
+static void hisi_zip_debugfs_exit(struct hisi_qm *qm)
{
- struct hisi_qm *qm = &hisi_zip->qm;
-
debugfs_remove_recursive(qm->debug.debug_root);
if (qm->fun_type == QM_HW_PF) {
- hisi_zip_debug_regs_clear(hisi_zip);
+ hisi_zip_debug_regs_clear(qm);
qm->debug.curr_qm_qp_num = 0;
}
}
-static void hisi_zip_hw_error_init(struct hisi_zip *hisi_zip)
-{
- hisi_qm_hw_error_init(&hisi_zip->qm, QM_BASE_CE,
- QM_BASE_NFE | QM_ACC_WB_NOT_READY_TIMEOUT, 0,
- QM_DB_RANDOM_INVALID);
- hisi_zip_hw_error_set_state(hisi_zip, true);
-}
-
-static u32 hisi_zip_get_hw_err_status(struct hisi_qm *qm)
-{
- return readl(qm->io_base + HZIP_CORE_INT_STATUS);
-}
-
-static void hisi_zip_clear_hw_err_status(struct hisi_qm *qm, u32 err_sts)
-{
- writel(err_sts, qm->io_base + HZIP_CORE_INT_SOURCE);
-}
-
-static void hisi_zip_set_ecc(struct hisi_qm *qm)
-{
- u32 nfe_enb;
-
- nfe_enb = readl(qm->io_base + HZIP_CORE_INT_RAS_NFE_ENB);
- writel(nfe_enb & HZIP_RAS_NFE_MBIT_DISABLE,
- qm->io_base + HZIP_CORE_INT_RAS_NFE_ENB);
- writel(HZIP_CORE_INT_STATUS_M_ECC, qm->io_base + HZIP_CORE_INT_SET);
- qm->err_ini.is_dev_ecc_mbit = 1;
-}
-
static void hisi_zip_log_hw_error(struct hisi_qm *qm, u32 err_sts)
{
const struct hisi_zip_hw_error *err = zip_hw_error;
@@ -792,17 +652,53 @@ static void hisi_zip_log_hw_error(struct hisi_qm *qm, u32 err_sts)
}
}
-static int hisi_zip_pf_probe_init(struct hisi_zip *hisi_zip)
+static u32 hisi_zip_get_hw_err_status(struct hisi_qm *qm)
+{
+ return readl(qm->io_base + HZIP_CORE_INT_STATUS);
+}
+
+static void hisi_zip_clear_hw_err_status(struct hisi_qm *qm, u32 err_sts)
+{
+ writel(err_sts, qm->io_base + HZIP_CORE_INT_SOURCE);
+}
+
+static void hisi_zip_open_axi_master_ooo(struct hisi_qm *qm)
{
- struct hisi_qm *qm = &hisi_zip->qm;
+ u32 val;
+
+ val = readl(qm->io_base + HZIP_SOFT_CTRL_ZIP_CONTROL);
+
+ writel(val & HZIP_AXI_SHUTDOWN_DISABLE,
+ qm->io_base + HZIP_SOFT_CTRL_ZIP_CONTROL);
+
+ writel(val | HZIP_AXI_SHUTDOWN_ENABLE,
+ qm->io_base + HZIP_SOFT_CTRL_ZIP_CONTROL);
+}
+
+static void hisi_zip_close_axi_master_ooo(struct hisi_qm *qm)
+{
+ u32 nfe_enb;
+
+ nfe_enb = readl(qm->io_base + HZIP_CORE_INT_RAS_NFE_ENB);
+ writel(nfe_enb & ~HZIP_CORE_INT_STATUS_M_ECC,
+ qm->io_base + HZIP_CORE_INT_RAS_NFE_ENB);
+
+ writel(HZIP_CORE_INT_STATUS_M_ECC,
+ qm->io_base + HZIP_CORE_INT_SET);
+}
+
+static int hisi_zip_pf_probe_init(struct hisi_qm *qm)
+{
+ struct hisi_zip *zip = container_of(qm, struct hisi_zip, qm);
struct hisi_zip_ctrl *ctrl;
+ int ret;
ctrl = devm_kzalloc(&qm->pdev->dev, sizeof(*ctrl), GFP_KERNEL);
if (!ctrl)
return -ENOMEM;
- hisi_zip->ctrl = ctrl;
- ctrl->hisi_zip = hisi_zip;
+ zip->ctrl = ctrl;
+ ctrl->hisi_zip = zip;
switch (qm->ver) {
case QM_HW_V1:
@@ -817,61 +713,71 @@ static int hisi_zip_pf_probe_init(struct hisi_zip *hisi_zip)
return -EINVAL;
}
- qm->err_ini.qm_wr_port = HZIP_WR_MSI_PORT;
- qm->err_ini.ecc_2bits_mask = HZIP_CORE_INT_STATUS_M_ECC;
qm->err_ini.get_dev_hw_err_status = hisi_zip_get_hw_err_status;
qm->err_ini.clear_dev_hw_err_status = hisi_zip_clear_hw_err_status;
+ qm->err_ini.err_info.ecc_2bits_mask = HZIP_CORE_INT_STATUS_M_ECC;
+ qm->err_ini.err_info.ce = QM_BASE_CE;
+ qm->err_ini.err_info.nfe = QM_BASE_NFE | QM_ACC_WB_NOT_READY_TIMEOUT;
+ qm->err_ini.err_info.fe = 0;
+ qm->err_ini.err_info.msi = QM_DB_RANDOM_INVALID;
+ qm->err_ini.err_info.acpi_rst = "ZRST";
+ qm->err_ini.hw_err_disable = hisi_zip_hw_error_disable;
+ qm->err_ini.hw_err_enable = hisi_zip_hw_error_enable;
+ qm->err_ini.set_usr_domain_cache = hisi_zip_set_user_domain_and_cache;
qm->err_ini.log_dev_hw_err = hisi_zip_log_hw_error;
- qm->err_ini.inject_dev_hw_err = hisi_zip_set_ecc;
+ qm->err_ini.open_axi_master_ooo = hisi_zip_open_axi_master_ooo;
+ qm->err_ini.close_axi_master_ooo = hisi_zip_close_axi_master_ooo;
- hisi_zip_set_user_domain_and_cache(hisi_zip);
- hisi_zip_hw_error_init(hisi_zip);
- hisi_zip_debug_regs_clear(hisi_zip);
+ qm->err_ini.err_info.msi_wr_port = HZIP_WR_PORT;
+
+ ret = qm->err_ini.set_usr_domain_cache(qm);
+ if (ret)
+ return ret;
+
+ hisi_qm_dev_err_init(qm);
+
+ hisi_zip_debug_regs_clear(qm);
+
+ return 0;
+}
+
+static int hisi_zip_qm_pre_init(struct hisi_qm *qm, struct pci_dev *pdev)
+{
+ int ret;
+
+#ifdef CONFIG_CRYPTO_QM_UACCE
+ qm->algs = "zlib\ngzip\nxts(sm4)\nxts(aes)\n";
+ qm->uacce_mode = uacce_mode;
+#endif
+ qm->pdev = pdev;
+ ret = hisi_qm_pre_init(qm, pf_q_num, HZIP_PF_DEF_Q_BASE);
+ if (ret)
+ return ret;
+ qm->sqe_size = HZIP_SQE_SIZE;
+ qm->dev_name = hisi_zip_name;
+ qm->qm_list = &zip_devices;
return 0;
}
static int hisi_zip_probe(struct pci_dev *pdev, const struct pci_device_id *id)
{
- struct hisi_zip *hisi_zip;
- enum qm_hw_ver rev_id;
+ struct hisi_zip *zip;
struct hisi_qm *qm;
int ret;
- rev_id = hisi_qm_get_hw_version(pdev);
- if (rev_id == QM_HW_UNKNOWN)
- return -EINVAL;
-
- hisi_zip = devm_kzalloc(&pdev->dev, sizeof(*hisi_zip), GFP_KERNEL);
- if (!hisi_zip)
+ zip = devm_kzalloc(&pdev->dev, sizeof(*zip), GFP_KERNEL);
+ if (!zip)
return -ENOMEM;
- pci_set_drvdata(pdev, hisi_zip);
-
- hisi_zip_add_to_list(hisi_zip);
+ qm = &zip->qm;
+ qm->fun_type = pdev->is_physfn ? QM_HW_PF : QM_HW_VF;
- hisi_zip->status = 0;
- qm = &hisi_zip->qm;
- qm->pdev = pdev;
- qm->ver = rev_id;
+ ret = hisi_zip_qm_pre_init(qm, pdev);
+ if (ret)
+ return ret;
- qm->sqe_size = HZIP_SQE_SIZE;
- qm->dev_name = hisi_zip_name;
- qm->fun_type = (pdev->device == PCI_DEVICE_ID_ZIP_PF) ? QM_HW_PF :
- QM_HW_VF;
- qm->algs = "zlib\ngzip\nxts(sm4)\nxts(aes)\n";
-
- switch (uacce_mode) {
- case UACCE_MODE_NOUACCE:
- qm->use_uacce = false;
- break;
- case UACCE_MODE_NOIOMMU:
- qm->use_uacce = true;
- break;
- default:
- ret = -EINVAL;
- goto err_remove_from_list;
- }
+ hisi_qm_add_to_list(qm, &zip_devices);
ret = hisi_qm_init(qm);
if (ret) {
@@ -880,15 +786,11 @@ static int hisi_zip_probe(struct pci_dev *pdev, const struct pci_device_id *id)
}
if (qm->fun_type == QM_HW_PF) {
- ret = hisi_zip_pf_probe_init(hisi_zip);
+ ret = hisi_zip_pf_probe_init(qm);
if (ret) {
pci_err(pdev, "Failed to init pf probe (%d)!\n", ret);
goto err_remove_from_list;
}
-
- qm->qp_base = HZIP_PF_DEF_Q_BASE;
- qm->qp_num = pf_q_num;
- qm->debug.curr_qm_qp_num = pf_q_num;
} else if (qm->fun_type == QM_HW_VF) {
/*
* have no way to get qm configure in VM in v1 hardware,
@@ -914,7 +816,7 @@ static int hisi_zip_probe(struct pci_dev *pdev, const struct pci_device_id *id)
goto err_qm_uninit;
}
- ret = hisi_zip_debugfs_init(hisi_zip);
+ ret = hisi_zip_debugfs_init(qm);
if (ret)
pci_err(pdev, "Failed to init debugfs (%d)!\n", ret);
@@ -923,630 +825,62 @@ static int hisi_zip_probe(struct pci_dev *pdev, const struct pci_device_id *id)
pci_err(pdev, "Failed to register driver to crypto!\n");
goto err_qm_stop;
}
+
+ if (qm->fun_type == QM_HW_PF && vfs_num > 0) {
+ ret = hisi_qm_sriov_enable(pdev, vfs_num);
+ if (ret < 0)
+ goto err_crypto_unregister;
+ }
+
return 0;
+err_crypto_unregister:
+ hisi_zip_unregister_from_crypto();
err_qm_stop:
- hisi_zip_debugfs_exit(hisi_zip);
+ hisi_zip_debugfs_exit(qm);
hisi_qm_stop(qm, QM_NORMAL);
err_qm_uninit:
hisi_qm_uninit(qm);
err_remove_from_list:
- hisi_zip_remove_from_list(hisi_zip);
- return ret;
-}
-
-/* now we only support equal assignment */
-static int hisi_zip_vf_q_assign(struct hisi_zip *hisi_zip, u32 num_vfs)
-{
- struct hisi_qm *qm = &hisi_zip->qm;
- u32 qp_num = qm->qp_num;
- u32 q_base = qp_num;
- u32 q_num, remain_q_num, i;
- int ret;
-
- if (!num_vfs)
- return -EINVAL;
-
- remain_q_num = qm->ctrl_q_num - qp_num;
- /* If remain queues not enough, return error. */
- if (remain_q_num < num_vfs)
- return -EINVAL;
-
- q_num = remain_q_num / num_vfs;
- for (i = 1; i <= num_vfs; i++) {
- if (i == num_vfs)
- q_num += remain_q_num % num_vfs;
- ret = hisi_qm_set_vft(qm, i, q_base, q_num);
- if (ret)
- return ret;
- q_base += q_num;
- }
-
- return 0;
-}
-
-static int hisi_zip_clear_vft_config(struct hisi_zip *hisi_zip)
-{
- struct hisi_zip_ctrl *ctrl = hisi_zip->ctrl;
- struct hisi_qm *qm = &hisi_zip->qm;
- u32 i, num_vfs = ctrl->num_vfs;
- int ret;
-
- for (i = 1; i <= num_vfs; i++) {
- ret = hisi_qm_set_vft(qm, i, 0, 0);
- if (ret)
- return ret;
- }
-
- ctrl->num_vfs = 0;
-
- return 0;
-}
-
-static int hisi_zip_sriov_enable(struct pci_dev *pdev, int max_vfs)
-{
-#ifdef CONFIG_PCI_IOV
- struct hisi_zip *hisi_zip = pci_get_drvdata(pdev);
- int pre_existing_vfs, num_vfs, ret;
-
- pre_existing_vfs = pci_num_vf(pdev);
- if (pre_existing_vfs) {
- dev_err(&pdev->dev,
- "Can't enable VF. Please disable pre-enabled VFs!\n");
- return 0;
- }
-
- num_vfs = min_t(int, max_vfs, HZIP_VF_NUM);
-
- ret = hisi_zip_vf_q_assign(hisi_zip, num_vfs);
- if (ret) {
- dev_err(&pdev->dev, "Can't assign queues for VF!\n");
- return ret;
- }
-
- hisi_zip->ctrl->num_vfs = num_vfs;
-
- ret = pci_enable_sriov(pdev, num_vfs);
- if (ret) {
- dev_err(&pdev->dev, "Can't enable VF!\n");
- hisi_zip_clear_vft_config(hisi_zip);
- return ret;
- }
-
- return num_vfs;
-#else
- return 0;
-#endif
-}
-
-static int hisi_zip_try_frozen_vfs(struct pci_dev *pdev)
-{
- struct hisi_zip *zip, *vf_zip;
- struct pci_dev *dev;
- int ret = 0;
-
- /* Try to frozen all the VFs as disable SRIOV */
- mutex_lock(&hisi_zip_list_lock);
- list_for_each_entry(zip, &hisi_zip_list, list) {
- dev = zip->qm.pdev;
- if (dev == pdev)
- continue;
- if (pci_physfn(dev) == pdev) {
- vf_zip = pci_get_drvdata(dev);
- ret = hisi_qm_frozen(&vf_zip->qm);
- if (ret)
- goto frozen_fail;
- }
- }
-
-frozen_fail:
- mutex_unlock(&hisi_zip_list_lock);
+ hisi_qm_del_from_list(qm, &zip_devices);
return ret;
}
-static int hisi_zip_sriov_disable(struct pci_dev *pdev)
-{
- struct hisi_zip *hisi_zip = pci_get_drvdata(pdev);
-
- if (pci_vfs_assigned(pdev)) {
- dev_err(&pdev->dev,
- "Can't disable VFs while VFs are assigned!\n");
- return -EPERM;
- }
-
- if (hisi_zip_try_frozen_vfs(pdev)) {
- dev_err(&pdev->dev, "try frozen VFs failed!\n");
- return -EBUSY;
- }
-
- /* remove in hisi_zip_pci_driver will be called to free VF resources */
- pci_disable_sriov(pdev);
-
- return hisi_zip_clear_vft_config(hisi_zip);
-}
-
static int hisi_zip_sriov_configure(struct pci_dev *pdev, int num_vfs)
{
if (num_vfs == 0)
- return hisi_zip_sriov_disable(pdev);
+ return hisi_qm_sriov_disable(pdev, &zip_devices);
else
- return hisi_zip_sriov_enable(pdev, num_vfs);
-}
-
-static void hisi_zip_remove_wait_delay(struct hisi_zip *hisi_zip)
-{
- struct hisi_qm *qm = &hisi_zip->qm;
-
- while (hisi_qm_frozen(qm) || ((qm->fun_type == QM_HW_PF) &&
- hisi_zip_try_frozen_vfs(qm->pdev)))
- usleep_range(FROZEN_RANGE_MIN, FROZEN_RANGE_MAX);
-
- udelay(ZIP_WAIT_DELAY);
+ return hisi_qm_sriov_enable(pdev, num_vfs);
}
static void hisi_zip_remove(struct pci_dev *pdev)
{
- struct hisi_zip *hisi_zip = pci_get_drvdata(pdev);
- struct hisi_qm *qm = &hisi_zip->qm;
+ struct hisi_qm *qm = pci_get_drvdata(pdev);
if (uacce_mode != UACCE_MODE_NOUACCE)
- hisi_zip_remove_wait_delay(hisi_zip);
+ hisi_qm_remove_wait_delay(qm, &zip_devices);
- if (qm->fun_type == QM_HW_PF && hisi_zip->ctrl->num_vfs != 0)
- (void)hisi_zip_sriov_disable(pdev);
+ if (qm->fun_type == QM_HW_PF && qm->vfs_num)
+ hisi_qm_sriov_disable(pdev, NULL);
hisi_zip_unregister_from_crypto();
- hisi_zip_debugfs_exit(hisi_zip);
+
+ hisi_zip_debugfs_exit(qm);
hisi_qm_stop(qm, QM_NORMAL);
if (qm->fun_type == QM_HW_PF)
- hisi_zip_hw_error_set_state(hisi_zip, false);
+ hisi_qm_dev_err_uninit(qm);
hisi_qm_uninit(qm);
- hisi_zip_remove_from_list(hisi_zip);
-}
-
-static void hisi_zip_shutdown(struct pci_dev *pdev)
-{
- struct hisi_zip *hisi_zip = pci_get_drvdata(pdev);
-
- hisi_qm_stop(&hisi_zip->qm, QM_NORMAL);
-}
-
-static pci_ers_result_t hisi_zip_error_detected(struct pci_dev *pdev,
- pci_channel_state_t state)
-{
- if (pdev->is_virtfn)
- return PCI_ERS_RESULT_NONE;
-
- dev_info(&pdev->dev, "PCI error detected, state(=%d)!!\n", state);
- if (state == pci_channel_io_perm_failure)
- return PCI_ERS_RESULT_DISCONNECT;
-
- return hisi_qm_process_dev_error(pdev);
-}
-
-static int hisi_zip_reset_prepare_ready(struct hisi_zip *hisi_zip)
-{
- struct pci_dev *pdev = hisi_zip->qm.pdev;
- struct hisi_zip *zip = pci_get_drvdata(pci_physfn(pdev));
- int delay = 0;
-
- while (test_and_set_bit(HISI_ZIP_RESET, &zip->status)) {
- msleep(++delay);
- if (delay > HZIP_RESET_WAIT_TIMEOUT)
- return -EBUSY;
- }
-
- return 0;
-}
-
-static int hisi_zip_vf_reset_prepare(struct hisi_zip *hisi_zip,
- enum qm_stop_reason stop_reason)
-{
- struct pci_dev *pdev = hisi_zip->qm.pdev;
- struct pci_dev *dev;
- struct hisi_qm *qm;
- int ret = 0;
-
- mutex_lock(&hisi_zip_list_lock);
- if (pdev->is_physfn) {
- list_for_each_entry(hisi_zip, &hisi_zip_list, list) {
- dev = hisi_zip->qm.pdev;
- if (dev == pdev)
- continue;
-
- if (pci_physfn(dev) == pdev) {
- qm = &hisi_zip->qm;
-
- ret = hisi_qm_stop(qm, stop_reason);
- if (ret)
- goto prepare_fail;
- }
- }
- }
-
-prepare_fail:
- mutex_unlock(&hisi_zip_list_lock);
- return ret;
-}
-
-static int hisi_zip_controller_reset_prepare(struct hisi_zip *hisi_zip)
-{
- struct hisi_qm *qm = &hisi_zip->qm;
- struct device *dev = &qm->pdev->dev;
- int ret;
-
- ret = hisi_zip_reset_prepare_ready(hisi_zip);
- if (ret) {
- dev_err(dev, "Controller reset not ready!\n");
- return ret;
- }
-
- ret = hisi_zip_vf_reset_prepare(hisi_zip, QM_SOFT_RESET);
- if (ret) {
- dev_err(dev, "Fails to stop VFs!\n");
- return ret;
- }
-
- ret = hisi_qm_stop(qm, QM_SOFT_RESET);
- if (ret) {
- dev_err(dev, "Fails to stop QM!\n");
- return ret;
- }
-
-#ifdef CONFIG_CRYPTO_QM_UACCE
- if (qm->use_uacce) {
- ret = uacce_hw_err_isolate(&qm->uacce);
- if (ret) {
- dev_err(dev, "Fails to isolate hw err!\n");
- return ret;
- }
- }
-#endif
-
- return 0;
-}
-
-static int hisi_zip_soft_reset(struct hisi_zip *hisi_zip)
-{
- struct hisi_qm *qm = &hisi_zip->qm;
- struct device *dev = &qm->pdev->dev;
- unsigned long long value;
- int ret;
- u32 val;
-
- ret = hisi_qm_reg_test(qm);
- if (ret)
- return ret;
-
- ret = hisi_qm_set_vf_mse(qm, HZIP_DISABLE);
- if (ret) {
- dev_err(dev, "Fails to disable vf mse bit.\n");
- return ret;
- }
-
- ret = hisi_qm_set_msi(qm, HZIP_DISABLE);
- if (ret) {
- dev_err(dev, "Fails to disable peh msi bit.\n");
- return ret;
- }
-
- /* Set qm ecc if dev ecc happened to hold on ooo */
- hisi_qm_set_ecc(qm);
-
- /* OOO register set and check */
- writel(HZIP_MASTER_GLOBAL_CTRL_SHUTDOWN,
- hisi_zip->qm.io_base + HZIP_MASTER_GLOBAL_CTRL);
-
- /* If bus lock, reset chip */
- ret = readl_relaxed_poll_timeout(hisi_zip->qm.io_base +
- HZIP_MASTER_TRANS_RETURN, val,
- (val == HZIP_MASTER_TRANS_RETURN_RW),
- HZIP_REG_RD_INTVRL_US,
- HZIP_REG_RD_TMOUT_US);
- if (ret) {
- dev_emerg(dev, "Bus lock! Please reset system.\n");
- return ret;
- }
-
- ret = hisi_qm_set_pf_mse(qm, HZIP_DISABLE);
- if (ret) {
- dev_err(dev, "Fails to disable pf mse bit.\n");
- return ret;
- }
-
- /* The reset related sub-control registers are not in PCI BAR */
- if (ACPI_HANDLE(dev)) {
- acpi_status s;
-
- s = acpi_evaluate_integer(ACPI_HANDLE(dev), "ZRST",
- NULL, &value);
- if (ACPI_FAILURE(s)) {
- dev_err(dev, "NO controller reset method!\n");
- return -EIO;
- }
-
- if (value) {
- dev_err(dev, "Reset step %llu failed!\n", value);
- return -EIO;
- }
- } else {
- dev_err(dev, "No reset method!\n");
- return -EINVAL;
- }
-
- return 0;
-}
-
-static int hisi_zip_vf_reset_done(struct hisi_zip *hisi_zip)
-{
- struct pci_dev *pdev = hisi_zip->qm.pdev;
- struct pci_dev *dev;
- struct hisi_qm *qm;
- int ret = 0;
-
- mutex_lock(&hisi_zip_list_lock);
- list_for_each_entry(hisi_zip, &hisi_zip_list, list) {
- dev = hisi_zip->qm.pdev;
- if (dev == pdev)
- continue;
-
- if (pci_physfn(dev) == pdev) {
- qm = &hisi_zip->qm;
-
- ret = hisi_qm_restart(qm);
- if (ret)
- goto reset_fail;
- }
- }
-
-reset_fail:
- mutex_unlock(&hisi_zip_list_lock);
- return ret;
-}
-
-static int hisi_zip_controller_reset_done(struct hisi_zip *hisi_zip)
-{
- struct hisi_qm *qm = &hisi_zip->qm;
- struct device *dev = &qm->pdev->dev;
- int ret;
-
- ret = hisi_qm_set_msi(qm, HZIP_ENABLE);
- if (ret) {
- dev_err(dev, "Fails to enable peh msi bit!\n");
- return ret;
- }
-
- ret = hisi_qm_set_pf_mse(qm, HZIP_ENABLE);
- if (ret) {
- dev_err(dev, "Fails to enable pf mse bit!\n");
- return ret;
- }
-
- ret = hisi_qm_set_vf_mse(qm, HZIP_ENABLE);
- if (ret) {
- dev_err(dev, "Fails to enable vf mse bit!\n");
- return ret;
- }
-
- hisi_zip_set_user_domain_and_cache(hisi_zip);
- hisi_qm_restart_prepare(qm);
-
- ret = hisi_qm_restart(qm);
- if (ret) {
- dev_err(dev, "Failed to start QM!\n");
- return -EPERM;
- }
-
- if (hisi_zip->ctrl->num_vfs) {
- ret = hisi_zip_vf_q_assign(hisi_zip, hisi_zip->ctrl->num_vfs);
- if (ret) {
- dev_err(dev, "Failed to assign vf queues!\n");
- return ret;
- }
- }
-
- ret = hisi_zip_vf_reset_done(hisi_zip);
- if (ret) {
- dev_err(dev, "Failed to start VFs!\n");
- return -EPERM;
- }
-
- hisi_qm_restart_done(qm);
- hisi_zip_hw_error_init(hisi_zip);
-
- return 0;
-}
-
-static int hisi_zip_controller_reset(struct hisi_zip *hisi_zip)
-{
- struct hisi_qm *qm = &hisi_zip->qm;
- struct device *dev = &qm->pdev->dev;
- int ret;
-
- dev_info(dev, "Controller resetting...\n");
-
- ret = hisi_zip_controller_reset_prepare(hisi_zip);
- if (ret)
- return ret;
-
- ret = hisi_zip_soft_reset(hisi_zip);
- if (ret) {
- dev_err(dev, "Controller reset failed (%d)\n", ret);
- return ret;
- }
-
- ret = hisi_zip_controller_reset_done(hisi_zip);
- if (ret)
- return ret;
-
- clear_bit(HISI_ZIP_RESET, &hisi_zip->status);
-
- dev_info(dev, "Controller reset complete\n");
-
- return ret;
-}
-
-static pci_ers_result_t hisi_zip_slot_reset(struct pci_dev *pdev)
-{
- struct hisi_zip *hisi_zip = pci_get_drvdata(pdev);
- int ret;
-
- if (pdev->is_virtfn)
- return PCI_ERS_RESULT_RECOVERED;
-
- dev_info(&pdev->dev, "Requesting reset due to PCI error\n");
-
- pci_cleanup_aer_uncorrect_error_status(pdev);
-
- ret = hisi_zip_controller_reset(hisi_zip);
- if (ret) {
- dev_err(&pdev->dev, "hisi_zip controller reset failed (%d)\n",
- ret);
- return PCI_ERS_RESULT_DISCONNECT;
- }
-
- return PCI_ERS_RESULT_RECOVERED;
-}
-
-static void hisi_zip_set_hw_error(struct hisi_zip *hisi_zip, bool state)
-{
- struct pci_dev *pdev = hisi_zip->qm.pdev;
- struct hisi_zip *zip = pci_get_drvdata(pci_physfn(pdev));
- struct hisi_qm *qm = &zip->qm;
-
- if (qm->fun_type == QM_HW_VF)
- return;
-
- if (state)
- hisi_qm_hw_error_init(qm, QM_BASE_CE,
- QM_BASE_NFE | QM_ACC_WB_NOT_READY_TIMEOUT,
- 0, QM_DB_RANDOM_INVALID);
- else
- hisi_qm_hw_error_uninit(qm);
-
- hisi_zip_hw_error_set_state(zip, state);
-}
-
-static int hisi_zip_get_hw_error_status(struct hisi_zip *hisi_zip)
-{
- u32 err_sts;
-
- err_sts = readl(hisi_zip->qm.io_base + HZIP_CORE_INT_STATUS) &
- HZIP_CORE_INT_STATUS_M_ECC;
- if (err_sts)
- return err_sts;
-
- return 0;
-}
-
-static int hisi_zip_check_hw_error(struct hisi_zip *hisi_zip)
-{
- struct pci_dev *pdev = hisi_zip->qm.pdev;
- struct hisi_zip *zip = pci_get_drvdata(pci_physfn(pdev));
- struct hisi_qm *qm = &zip->qm;
- int ret;
-
- if (qm->fun_type == QM_HW_VF)
- return 0;
-
- ret = hisi_qm_get_hw_error_status(qm);
- if (ret)
- return ret;
-
- return hisi_zip_get_hw_error_status(zip);
-}
-
-static void hisi_zip_reset_prepare(struct pci_dev *pdev)
-{
- struct hisi_zip *hisi_zip = pci_get_drvdata(pdev);
- struct hisi_qm *qm = &hisi_zip->qm;
- struct device *dev = &pdev->dev;
- u32 delay = 0;
- int ret;
-
- hisi_zip_set_hw_error(hisi_zip, HZIP_HW_ERROR_IRQ_DISABLE);
-
- while (hisi_zip_check_hw_error(hisi_zip)) {
- msleep(++delay);
- if (delay > HZIP_RESET_WAIT_TIMEOUT)
- return;
- }
-
- ret = hisi_zip_reset_prepare_ready(hisi_zip);
- if (ret) {
- dev_err(dev, "FLR not ready!\n");
- return;
- }
-
- ret = hisi_zip_vf_reset_prepare(hisi_zip, QM_FLR);
- if (ret) {
- dev_err(dev, "Fails to prepare reset!\n");
- return;
- }
-
- ret = hisi_qm_stop(qm, QM_FLR);
- if (ret) {
- dev_err(dev, "Fails to stop QM!\n");
- return;
- }
-
- dev_info(dev, "FLR resetting...\n");
-}
-
-static void hisi_zip_flr_reset_complete(struct hisi_zip *hisi_zip)
-{
- struct pci_dev *pdev = hisi_zip->qm.pdev;
- struct hisi_zip *zip = pci_get_drvdata(pci_physfn(pdev));
- struct device *dev = &zip->qm.pdev->dev;
- u32 id;
-
- pci_read_config_dword(zip->qm.pdev, PCI_COMMAND, &id);
- if (id == HZIP_PCI_COMMAND_INVALID)
- dev_err(dev, "Device can not be used!\n");
-
- clear_bit(HISI_ZIP_RESET, &zip->status);
-}
-
-static void hisi_zip_reset_done(struct pci_dev *pdev)
-{
- struct hisi_zip *hisi_zip = pci_get_drvdata(pdev);
- struct hisi_qm *qm = &hisi_zip->qm;
- struct device *dev = &pdev->dev;
- int ret;
-
- hisi_zip_set_hw_error(hisi_zip, HZIP_HW_ERROR_IRQ_ENABLE);
-
- ret = hisi_qm_restart(qm);
- if (ret) {
- dev_err(dev, "Failed to start QM!\n");
- goto flr_done;
- }
-
- if (pdev->is_physfn) {
- hisi_zip_set_user_domain_and_cache(hisi_zip);
- if (hisi_zip->ctrl->num_vfs)
- hisi_zip_vf_q_assign(hisi_zip,
- hisi_zip->ctrl->num_vfs);
- ret = hisi_zip_vf_reset_done(hisi_zip);
- if (ret) {
- dev_err(dev, "Failed to start VFs!\n");
- goto flr_done;
- }
- }
-
-flr_done:
- hisi_zip_flr_reset_complete(hisi_zip);
-
- dev_info(dev, "FLR reset complete\n");
+ hisi_qm_del_from_list(qm, &zip_devices);
}
static const struct pci_error_handlers hisi_zip_err_handler = {
- .error_detected = hisi_zip_error_detected,
- .slot_reset = hisi_zip_slot_reset,
- .reset_prepare = hisi_zip_reset_prepare,
- .reset_done = hisi_zip_reset_done,
+ .error_detected = hisi_qm_dev_err_detected,
+ .slot_reset = hisi_qm_dev_slot_reset,
+ .reset_prepare = hisi_qm_reset_prepare,
+ .reset_done = hisi_qm_reset_done,
};
static struct pci_driver hisi_zip_pci_driver = {
@@ -1556,7 +890,7 @@ static void hisi_zip_reset_done(struct pci_dev *pdev)
.remove = hisi_zip_remove,
.sriov_configure = hisi_zip_sriov_configure,
.err_handler = &hisi_zip_err_handler,
- .shutdown = hisi_zip_shutdown,
+ .shutdown = hisi_qm_dev_shutdown,
};
static void hisi_zip_register_debugfs(void)
@@ -1578,6 +912,10 @@ static int __init hisi_zip_init(void)
{
int ret;
+ INIT_LIST_HEAD(&zip_devices.list);
+ mutex_init(&zip_devices.lock);
+ zip_devices.check = NULL;
+
hisi_zip_register_debugfs();
ret = pci_register_driver(&hisi_zip_pci_driver);
--
1.8.3