Kernel
Threads by month
- ----- 2025 -----
- February
- January
- ----- 2024 -----
- December
- November
- October
- September
- August
- July
- June
- May
- April
- March
- February
- January
- ----- 2023 -----
- December
- November
- October
- September
- August
- July
- June
- May
- April
- March
- February
- January
- ----- 2022 -----
- December
- November
- October
- September
- August
- July
- June
- May
- April
- March
- February
- January
- ----- 2021 -----
- December
- November
- October
- September
- August
- July
- June
- May
- April
- March
- February
- January
- ----- 2020 -----
- December
- November
- October
- September
- August
- July
- June
- May
- April
- March
- February
- January
- ----- 2019 -----
- December
- 54 participants
- 16907 discussions

[PATCH] qm: Move all the same logic functions of hisilicon crypto to qm
by Yang Yingliang 16 Apr '20
by Yang Yingliang 16 Apr '20
16 Apr '20
From: Yu'an Wang <wangyuan46(a)huawei.com>
driver inclusion
category: bugfix
bugzilla: NA
CVE: NA
In this patch, we try to move accelerator drivers into qm module
to simplify code, including RAS/FLR/SRIOV and uacce_mode/pf_q_num/
vfs_num setting.
In qm.h we add mode_set/q_num_set/vf_num_set for accelerator to
realize module parm uacce_mode/pf_q_num/vfs_num setting.
In qm.c hisi_qm_add_to_list and hisi_qm_del_from_list can be called
to manage accelerators through hisi_qm_list. We additionally realize
hisi_qm_alloc_qps_node to fix the problem that device is found but
queue request fails. Because of RAS process flow/FLR process flow/
SRIOV config flow are consistent for different accelerator drivers,
so we add Corresponding interfaces.
Meanwhile, zip/hpre/sec/rde accelerator drivers should match changes
of qm, including RAS/FLR/SRIOV processing, module parms setting, queue
allocing.
Signed-off-by: Yu'an Wang <wangyuan46(a)huawei.com>
Reviewed-by: Cheng Hu <hucheng.hu(a)huawei.com>
Reviewed-by: Wei Zhang <zhangwei375(a)huawei.com>
Reviewed-by: Guangwei Zhang <zhouguangwei5(a)huawei.com>
Reviewed-by: Junxian Liu <liujunxian3(a)huawei.com>
Reviewed-by: Shukun Tan <tanshukun1(a)huawei.com>
Reviewed-by: Hao Fang <fanghao11(a)huawei.com>
Signed-off-by: Yang Yingliang <yangyingliang(a)huawei.com>
---
drivers/crypto/hisilicon/hpre/hpre.h | 9 +-
drivers/crypto/hisilicon/hpre/hpre_crypto.c | 20 +-
drivers/crypto/hisilicon/hpre/hpre_main.c | 944 +++---------------
drivers/crypto/hisilicon/qm.c | 1093 +++++++++++++++++----
drivers/crypto/hisilicon/qm.h | 209 +++-
drivers/crypto/hisilicon/rde/rde.h | 11 +-
drivers/crypto/hisilicon/rde/rde_api.c | 29 +-
drivers/crypto/hisilicon/rde/rde_api.h | 2 +-
drivers/crypto/hisilicon/rde/rde_main.c | 717 ++++----------
drivers/crypto/hisilicon/sec2/sec.h | 13 +-
drivers/crypto/hisilicon/sec2/sec_crypto.c | 83 +-
drivers/crypto/hisilicon/sec2/sec_main.c | 1364 +++++++--------------------
drivers/crypto/hisilicon/zip/zip.h | 9 +-
drivers/crypto/hisilicon/zip/zip_crypto.c | 30 +-
drivers/crypto/hisilicon/zip/zip_main.c | 1152 +++++-----------------
15 files changed, 2053 insertions(+), 3632 deletions(-)
diff --git a/drivers/crypto/hisilicon/hpre/hpre.h b/drivers/crypto/hisilicon/hpre/hpre.h
index ba7c88e..3ac02ef 100644
--- a/drivers/crypto/hisilicon/hpre/hpre.h
+++ b/drivers/crypto/hisilicon/hpre/hpre.h
@@ -35,25 +35,18 @@ struct hpre_debugfs_file {
struct hpre_debug *debug;
};
-#define HPRE_RESET 0
-#define HPRE_WAIT_DELAY 1000
-
/*
* One HPRE controller has one PF and multiple VFs, some global configurations
* which PF has need this structure.
* Just relevant for PF.
*/
struct hpre_debug {
- struct dentry *debug_root;
struct hpre_debugfs_file files[HPRE_DEBUGFS_FILE_NUM];
};
struct hpre {
struct hisi_qm qm;
- struct list_head list;
struct hpre_debug debug;
- u32 num_vfs;
- unsigned long status;
};
enum hpre_alg_type {
@@ -80,7 +73,7 @@ struct hpre_sqe {
__le32 rsvd1[_HPRE_SQE_ALIGN_EXT];
};
-struct hpre *hpre_find_device(int node);
+struct hisi_qp *hpre_create_qp(void);
int hpre_algs_register(void);
void hpre_algs_unregister(void);
diff --git a/drivers/crypto/hisilicon/hpre/hpre_crypto.c b/drivers/crypto/hisilicon/hpre/hpre_crypto.c
index aadc975..7610e13 100644
--- a/drivers/crypto/hisilicon/hpre/hpre_crypto.c
+++ b/drivers/crypto/hisilicon/hpre/hpre_crypto.c
@@ -147,26 +147,18 @@ static void hpre_rm_req_from_ctx(struct hpre_asym_request *hpre_req)
static struct hisi_qp *hpre_get_qp_and_start(void)
{
struct hisi_qp *qp;
- struct hpre *hpre;
int ret;
- /* find the proper hpre device, which is near the current CPU core */
- hpre = hpre_find_device(cpu_to_node(smp_processor_id()));
- if (!hpre) {
- pr_err("Can not find proper hpre device!\n");
- return ERR_PTR(-ENODEV);
- }
-
- qp = hisi_qm_create_qp(&hpre->qm, 0);
- if (IS_ERR(qp)) {
- pci_err(hpre->qm.pdev, "Can not create qp!\n");
+ qp = hpre_create_qp();
+ if (!qp) {
+ pr_err("Can not create hpre qp!\n");
return ERR_PTR(-ENODEV);
}
ret = hisi_qm_start_qp(qp, 0);
if (ret < 0) {
- hisi_qm_release_qp(qp);
- pci_err(hpre->qm.pdev, "Can not start qp!\n");
+ hisi_qm_free_qps(&qp, 1);
+ pci_err(qp->qm->pdev, "Can not start qp!\n");
return ERR_PTR(-EINVAL);
}
@@ -337,7 +329,7 @@ static void hpre_ctx_clear(struct hpre_ctx *ctx, bool is_clear_all)
if (is_clear_all) {
idr_destroy(&ctx->req_idr);
kfree(ctx->req_list);
- hisi_qm_release_qp(ctx->qp);
+ hisi_qm_free_qps(&ctx->qp, 1);
}
ctx->crt_g2_mode = false;
diff --git a/drivers/crypto/hisilicon/hpre/hpre_main.c b/drivers/crypto/hisilicon/hpre/hpre_main.c
index 6a3bce2..4dc0d3e 100644
--- a/drivers/crypto/hisilicon/hpre/hpre_main.c
+++ b/drivers/crypto/hisilicon/hpre/hpre_main.c
@@ -13,9 +13,6 @@
#include <linux/uacce.h>
#include "hpre.h"
-#define HPRE_ENABLE 1
-#define HPRE_DISABLE 0
-#define HPRE_VF_NUM 63
#define HPRE_QUEUE_NUM_V2 1024
#define HPRE_QUEUE_NUM_V1 4096
#define HPRE_QM_ABNML_INT_MASK 0x100004
@@ -63,10 +60,6 @@
#define HPRE_HAC_ECC2_CNT 0x301a08
#define HPRE_HAC_INT_STATUS 0x301800
#define HPRE_HAC_SOURCE_INT 0x301600
-#define MASTER_GLOBAL_CTRL_SHUTDOWN 1
-#define MASTER_TRANS_RETURN_RW 3
-#define HPRE_MASTER_TRANS_RETURN 0x300150
-#define HPRE_MASTER_GLOBAL_CTRL 0x300000
#define HPRE_CLSTR_ADDR_INTRVL 0x1000
#define HPRE_CLUSTER_INQURY 0x100
#define HPRE_CLSTR_ADDR_INQRY_RSLT 0x104
@@ -83,24 +76,18 @@
#define HPRE_QM_VFG_AX_MASK 0xff
#define HPRE_BD_USR_MASK 0x3
#define HPRE_CLUSTER_CORE_MASK 0xf
-#define HPRE_RESET_WAIT_TIMEOUT 400
#define HPRE_AM_OOO_SHUTDOWN_ENB 0x301044
#define AM_OOO_SHUTDOWN_ENABLE BIT(0)
#define AM_OOO_SHUTDOWN_DISABLE 0xFFFFFFFE
-#define HPRE_WR_MSI_PORT 0xFFFB
+#define HPRE_WR_MSI_PORT BIT(2)
-#define HPRE_HW_ERROR_IRQ_ENABLE 1
-#define HPRE_HW_ERROR_IRQ_DISABLE 0
-#define HPRE_PCI_COMMAND_INVALID 0xFFFFFFFF
#define HPRE_CORE_ECC_2BIT_ERR BIT(1)
#define HPRE_OOO_ECC_2BIT_ERR BIT(5)
-#define HPRE_QM_BME_FLR BIT(7)
-#define HPRE_QM_PM_FLR BIT(11)
-#define HPRE_QM_SRIOV_FLR BIT(12)
-
-#define HPRE_USLEEP 10
+#define HPRE_QM_BME_FLR BIT(7)
+#define HPRE_QM_PM_FLR BIT(11)
+#define HPRE_QM_SRIOV_FLR BIT(12)
/* function index:
* 1 for hpre bypass mode,
@@ -108,8 +95,7 @@
*/
#define HPRE_VIA_MSI_DSM 1
-static LIST_HEAD(hpre_list);
-static DEFINE_MUTEX(hpre_list_lock);
+static struct hisi_qm_list hpre_devices;
static const char hpre_name[] = "hisi_hpre";
static struct dentry *hpre_debugfs_root;
static const struct pci_device_id hpre_dev_ids[] = {
@@ -183,59 +169,29 @@ struct hpre_hw_error {
{"INT_STATUS ", HPRE_INT_STATUS},
};
-static int hpre_pf_q_num_set(const char *val, const struct kernel_param *kp)
+#ifdef CONFIG_CRYPTO_QM_UACCE
+static int uacce_mode_set(const char *val, const struct kernel_param *kp)
{
- struct pci_dev *pdev;
- u32 q_num;
- u32 n = 0;
- u8 rev_id;
- int ret;
-
- if (!val)
- return -EINVAL;
-
- pdev = pci_get_device(PCI_VENDOR_ID_HUAWEI, HPRE_PCI_DEVICE_ID, NULL);
- if (!pdev) {
- q_num = HPRE_QUEUE_NUM_V2;
- pr_info("No device found currently, suppose queue number is %d\n",
- q_num);
- } else {
- rev_id = pdev->revision;
- if (rev_id != QM_HW_V2)
- return -EINVAL;
-
- q_num = HPRE_QUEUE_NUM_V2;
- }
-
- ret = kstrtou32(val, 10, &n);
- if (ret != 0 || n == 0 || n > q_num)
- return -EINVAL;
-
- return param_set_int(val, kp);
+ return mode_set(val, kp);
}
-static const struct kernel_param_ops hpre_pf_q_num_ops = {
- .set = hpre_pf_q_num_set,
+static const struct kernel_param_ops uacce_mode_ops = {
+ .set = uacce_mode_set,
.get = param_get_int,
};
-static int uacce_mode_set(const char *val, const struct kernel_param *kp)
-{
- u32 n;
- int ret;
-
- if (!val)
- return -EINVAL;
-
- ret = kstrtou32(val, 10, &n);
- if (ret != 0 || (n != UACCE_MODE_NOIOMMU && n != UACCE_MODE_NOUACCE))
- return -EINVAL;
+static int uacce_mode = UACCE_MODE_NOUACCE;
+module_param_cb(uacce_mode, &uacce_mode_ops, &uacce_mode, 0444);
+MODULE_PARM_DESC(uacce_mode, "Mode of UACCE can be 0(default), 2");
+#endif
- return param_set_int(val, kp);
+static int pf_q_num_set(const char *val, const struct kernel_param *kp)
+{
+ return q_num_set(val, kp, HPRE_PCI_DEVICE_ID);
}
-static const struct kernel_param_ops uacce_mode_ops = {
- .set = uacce_mode_set,
+static const struct kernel_param_ops hpre_pf_q_num_ops = {
+ .set = pf_q_num_set,
.get = param_get_int,
};
@@ -243,46 +199,31 @@ static int uacce_mode_set(const char *val, const struct kernel_param *kp)
module_param_cb(pf_q_num, &hpre_pf_q_num_ops, &pf_q_num, 0444);
MODULE_PARM_DESC(pf_q_num, "Number of queues in PF of CS(1-1024)");
-static int uacce_mode = UACCE_MODE_NOUACCE;
-module_param_cb(uacce_mode, &uacce_mode_ops, &uacce_mode, 0444);
-MODULE_PARM_DESC(uacce_mode, "Mode of UACCE can be 0(default), 2");
-static inline void hpre_add_to_list(struct hpre *hpre)
+static int vfs_num_set(const char *val, const struct kernel_param *kp)
{
- mutex_lock(&hpre_list_lock);
- list_add_tail(&hpre->list, &hpre_list);
- mutex_unlock(&hpre_list_lock);
+ return vf_num_set(val, kp);
}
-static inline void hpre_remove_from_list(struct hpre *hpre)
-{
- mutex_lock(&hpre_list_lock);
- list_del(&hpre->list);
- mutex_unlock(&hpre_list_lock);
-}
+static const struct kernel_param_ops vfs_num_ops = {
+ .set = vfs_num_set,
+ .get = param_get_int,
+};
-struct hpre *hpre_find_device(int node)
+static u32 vfs_num;
+module_param_cb(vfs_num, &vfs_num_ops, &vfs_num, 0444);
+MODULE_PARM_DESC(vfs_num, "Number of VFs to enable(1-63), 0(default)");
+
+struct hisi_qp *hpre_create_qp(void)
{
- struct hpre *hpre, *ret = NULL;
- int min_distance = INT_MAX;
- struct device *dev;
- int dev_node = 0;
-
- mutex_lock(&hpre_list_lock);
- list_for_each_entry(hpre, &hpre_list, list) {
- dev = &hpre->qm.pdev->dev;
-#ifdef CONFIG_NUMA
- dev_node = dev->numa_node;
- if (dev_node < 0)
- dev_node = 0;
-#endif
- if (node_distance(dev_node, node) < min_distance) {
- ret = hpre;
- min_distance = node_distance(dev_node, node);
- }
- }
- mutex_unlock(&hpre_list_lock);
+ int node = cpu_to_node(smp_processor_id());
+ struct hisi_qp *qp = NULL;
+ int ret;
- return ret;
+ ret = hisi_qm_alloc_qps_node(node, &hpre_devices, &qp, 1, 0);
+ if (!ret)
+ return qp;
+
+ return NULL;
}
static void hpre_pasid_enable(struct hisi_qm *qm)
@@ -351,9 +292,8 @@ static int hpre_set_cluster(struct hisi_qm *qm)
return 0;
}
-static int hpre_set_user_domain_and_cache(struct hpre *hpre)
+static int hpre_set_user_domain_and_cache(struct hisi_qm *qm)
{
- struct hisi_qm *qm = &hpre->qm;
struct pci_dev *pdev = qm->pdev;
u32 val;
int ret;
@@ -403,7 +343,7 @@ static int hpre_set_user_domain_and_cache(struct hpre *hpre)
pci_err(pdev, "acpi_evaluate_dsm err.\n");
/* disable FLR triggered by BME(bus master enable) */
- val = readl(hpre->qm.io_base + QM_PEH_AXUSER_CFG);
+ val = readl(HPRE_ADDR(qm, QM_PEH_AXUSER_CFG));
val &= ~(HPRE_QM_BME_FLR | HPRE_QM_SRIOV_FLR);
val |= HPRE_QM_PM_FLR;
writel(val, HPRE_ADDR(qm, QM_PEH_AXUSER_CFG));
@@ -433,23 +373,21 @@ static void hpre_cnt_regs_clear(struct hisi_qm *qm)
hisi_qm_debug_regs_clear(qm);
}
-static void hpre_hw_error_disable(struct hpre *hpre)
+static void hpre_hw_error_disable(struct hisi_qm *qm)
{
- struct hisi_qm *qm = &hpre->qm;
u32 val;
/* disable hpre hw error interrupts */
writel(HPRE_CORE_INT_DISABLE, qm->io_base + HPRE_INT_MASK);
/* disable HPRE block master OOO when m-bit error occur */
- val = readl(hpre->qm.io_base + HPRE_AM_OOO_SHUTDOWN_ENB);
+ val = readl(qm->io_base + HPRE_AM_OOO_SHUTDOWN_ENB);
val &= AM_OOO_SHUTDOWN_DISABLE;
- writel(val, hpre->qm.io_base + HPRE_AM_OOO_SHUTDOWN_ENB);
+ writel(val, qm->io_base + HPRE_AM_OOO_SHUTDOWN_ENB);
}
-static void hpre_hw_error_enable(struct hpre *hpre)
+static void hpre_hw_error_enable(struct hisi_qm *qm)
{
- struct hisi_qm *qm = &hpre->qm;
u32 val;
/* clear HPRE hw error source if having */
@@ -462,9 +400,9 @@ static void hpre_hw_error_enable(struct hpre *hpre)
writel(HPRE_HAC_RAS_FE_ENABLE, qm->io_base + HPRE_RAS_FE_ENB);
/* enable HPRE block master OOO when m-bit error occur */
- val = readl(hpre->qm.io_base + HPRE_AM_OOO_SHUTDOWN_ENB);
+ val = readl(qm->io_base + HPRE_AM_OOO_SHUTDOWN_ENB);
val |= AM_OOO_SHUTDOWN_ENABLE;
- writel(val, hpre->qm.io_base + HPRE_AM_OOO_SHUTDOWN_ENB);
+ writel(val, qm->io_base + HPRE_AM_OOO_SHUTDOWN_ENB);
}
static inline struct hisi_qm *hpre_file_to_qm(struct hpre_debugfs_file *file)
@@ -484,9 +422,7 @@ static u32 hpre_current_qm_read(struct hpre_debugfs_file *file)
static int hpre_current_qm_write(struct hpre_debugfs_file *file, u32 val)
{
struct hisi_qm *qm = hpre_file_to_qm(file);
- struct hpre_debug *debug = file->debug;
- struct hpre *hpre = container_of(debug, struct hpre, debug);
- u32 num_vfs = hpre->num_vfs;
+ u32 num_vfs = qm->vfs_num;
u32 vfq_num, tmp;
if (val > num_vfs)
@@ -657,11 +593,14 @@ static int hpre_create_debugfs_file(struct hpre_debug *dbg, struct dentry *dir,
enum hpre_ctrl_dbgfs_file type, int indx)
{
struct dentry *tmp, *file_dir;
+ struct hpre *hpre;
- if (dir)
+ if (dir) {
file_dir = dir;
- else
- file_dir = dbg->debug_root;
+ } else {
+ hpre = container_of(dbg, struct hpre, debug);
+ file_dir = hpre->qm.debug.debug_root;
+ }
if (type >= HPRE_DEBUG_FILE_NUM)
return -EINVAL;
@@ -694,7 +633,8 @@ static int hpre_pf_comm_regs_debugfs_init(struct hpre_debug *debug)
regset->nregs = ARRAY_SIZE(hpre_com_dfx_regs);
regset->base = qm->io_base;
- tmp = debugfs_create_regset32("regs", 0444, debug->debug_root, regset);
+ tmp = debugfs_create_regset32("regs", 0444, qm->debug.debug_root,
+ regset);
if (!tmp)
return -ENOENT;
@@ -716,7 +656,7 @@ static int hpre_cluster_debugfs_init(struct hpre_debug *debug)
if (ret < 0)
return -EINVAL;
- tmp_d = debugfs_create_dir(buf, debug->debug_root);
+ tmp_d = debugfs_create_dir(buf, qm->debug.debug_root);
if (!tmp_d)
return -ENOENT;
@@ -761,9 +701,9 @@ static int hpre_ctrl_debug_init(struct hpre_debug *debug)
return hpre_cluster_debugfs_init(debug);
}
-static int hpre_debugfs_init(struct hpre *hpre)
+static int hpre_debugfs_init(struct hisi_qm *qm)
{
- struct hisi_qm *qm = &hpre->qm;
+ struct hpre *hpre = container_of(qm, struct hpre, qm);
struct device *dev = &qm->pdev->dev;
struct dentry *dir;
int ret;
@@ -779,7 +719,6 @@ static int hpre_debugfs_init(struct hpre *hpre)
goto failed_to_create;
if (qm->pdev->device == HPRE_PCI_DEVICE_ID) {
- hpre->debug.debug_root = dir;
ret = hpre_ctrl_debug_init(&hpre->debug);
if (ret)
goto failed_to_create;
@@ -791,69 +730,41 @@ static int hpre_debugfs_init(struct hpre *hpre)
return ret;
}
-static void hpre_debugfs_exit(struct hpre *hpre)
-{
- struct hisi_qm *qm = &hpre->qm;
-
- debugfs_remove_recursive(qm->debug.debug_root);
-}
-
static int hpre_qm_pre_init(struct hisi_qm *qm, struct pci_dev *pdev)
{
- enum qm_hw_ver rev_id;
-
- rev_id = hisi_qm_get_hw_version(pdev);
- if (rev_id < 0)
- return -ENODEV;
+ int ret;
- if (rev_id == QM_HW_V1) {
+#ifdef CONFIG_CRYPTO_QM_UACCE
+ qm->algs = "rsa\ndh\n";
+ qm->uacce_mode = uacce_mode;
+#endif
+ qm->pdev = pdev;
+ ret = hisi_qm_pre_init(qm, pf_q_num, HPRE_PF_DEF_Q_BASE);
+ if (ret)
+ return ret;
+ if (qm->ver == QM_HW_V1) {
pci_warn(pdev, "HPRE version 1 is not supported!\n");
return -EINVAL;
}
- qm->pdev = pdev;
- qm->ver = rev_id;
+ qm->qm_list = &hpre_devices;
qm->sqe_size = HPRE_SQE_SIZE;
qm->dev_name = hpre_name;
- qm->fun_type = (pdev->device == HPRE_PCI_DEVICE_ID) ?
- QM_HW_PF : QM_HW_VF;
- qm->algs = "rsa\ndh\n";
- switch (uacce_mode) {
- case UACCE_MODE_NOUACCE:
- qm->use_uacce = false;
- break;
- case UACCE_MODE_NOIOMMU:
- qm->use_uacce = true;
- break;
- default:
- return -EINVAL;
- }
- if (pdev->is_physfn) {
- qm->qp_base = HPRE_PF_DEF_Q_BASE;
- qm->qp_num = pf_q_num;
- qm->debug.curr_qm_qp_num = pf_q_num;
- }
return 0;
}
-static void hpre_hw_err_init(struct hpre *hpre)
-{
- hisi_qm_hw_error_init(&hpre->qm, QM_BASE_CE,
- QM_BASE_NFE | QM_ACC_DO_TASK_TIMEOUT,
- 0, QM_DB_RANDOM_INVALID);
- hpre_hw_error_enable(hpre);
-}
-
-static void hpre_open_master_ooo(struct hisi_qm *qm)
+static void hpre_log_hw_error(struct hisi_qm *qm, u32 err_sts)
{
- u32 val;
+ const struct hpre_hw_error *err = hpre_hw_errors;
+ struct device *dev = &qm->pdev->dev;
- val = readl(qm->io_base + HPRE_AM_OOO_SHUTDOWN_ENB);
- writel(val & AM_OOO_SHUTDOWN_DISABLE,
- HPRE_ADDR(qm, HPRE_AM_OOO_SHUTDOWN_ENB));
- writel(val | AM_OOO_SHUTDOWN_ENABLE,
- HPRE_ADDR(qm, HPRE_AM_OOO_SHUTDOWN_ENB));
+ while (err->msg) {
+ if (err->int_msk & err_sts)
+ dev_warn(dev, "%s [error status=0x%x] found\n",
+ err->msg, err->int_msk);
+ err++;
+ }
}
static u32 hpre_get_hw_err_status(struct hisi_qm *qm)
@@ -866,41 +777,47 @@ static void hpre_clear_hw_err_status(struct hisi_qm *qm, u32 err_sts)
writel(err_sts, qm->io_base + HPRE_HAC_SOURCE_INT);
}
-static void hpre_log_hw_error(struct hisi_qm *qm, u32 err_sts)
+static void hpre_open_axi_master_ooo(struct hisi_qm *qm)
{
- const struct hpre_hw_error *err = hpre_hw_errors;
- struct device *dev = &qm->pdev->dev;
+ u32 value;
- while (err->msg) {
- if (err->int_msk & err_sts)
- dev_warn(dev, "%s [error status=0x%x] found\n",
- err->msg, err->int_msk);
- err++;
- }
+ value = readl(qm->io_base + HPRE_AM_OOO_SHUTDOWN_ENB);
+ writel(value & AM_OOO_SHUTDOWN_DISABLE,
+ HPRE_ADDR(qm, HPRE_AM_OOO_SHUTDOWN_ENB));
+ writel(value | AM_OOO_SHUTDOWN_ENABLE,
+ HPRE_ADDR(qm, HPRE_AM_OOO_SHUTDOWN_ENB));
}
-static int hpre_pf_probe_init(struct hpre *hpre)
+static int hpre_pf_probe_init(struct hisi_qm *qm)
{
- struct hisi_qm *qm = &hpre->qm;
int ret;
if (qm->ver != QM_HW_V2)
return -EINVAL;
qm->ctrl_q_num = HPRE_QUEUE_NUM_V2;
- qm->err_ini.qm_wr_port = HPRE_WR_MSI_PORT;
- qm->err_ini.ecc_2bits_mask = (HPRE_CORE_ECC_2BIT_ERR |
- HPRE_OOO_ECC_2BIT_ERR);
- qm->err_ini.open_axi_master_ooo = hpre_open_master_ooo;
qm->err_ini.get_dev_hw_err_status = hpre_get_hw_err_status;
qm->err_ini.clear_dev_hw_err_status = hpre_clear_hw_err_status;
+ qm->err_ini.err_info.ecc_2bits_mask = HPRE_CORE_ECC_2BIT_ERR |
+ HPRE_OOO_ECC_2BIT_ERR;
+ qm->err_ini.err_info.ce = QM_BASE_CE;
+ qm->err_ini.err_info.nfe = QM_BASE_NFE | QM_ACC_DO_TASK_TIMEOUT;
+ qm->err_ini.err_info.fe = 0;
+ qm->err_ini.err_info.msi = QM_DB_RANDOM_INVALID;
+ qm->err_ini.err_info.acpi_rst = "HRST";
+
+ qm->err_ini.hw_err_disable = hpre_hw_error_disable;
+ qm->err_ini.hw_err_enable = hpre_hw_error_enable;
+ qm->err_ini.set_usr_domain_cache = hpre_set_user_domain_and_cache;
qm->err_ini.log_dev_hw_err = hpre_log_hw_error;
+ qm->err_ini.open_axi_master_ooo = hpre_open_axi_master_ooo;
+ qm->err_ini.err_info.msi_wr_port = HPRE_WR_MSI_PORT;
- ret = hpre_set_user_domain_and_cache(hpre);
+ ret = qm->err_ini.set_usr_domain_cache(qm);
if (ret)
return ret;
- hpre_hw_err_init(hpre);
+ hisi_qm_dev_err_init(qm);
return 0;
}
@@ -914,10 +831,9 @@ static int hpre_probe(struct pci_dev *pdev, const struct pci_device_id *id)
hpre = devm_kzalloc(&pdev->dev, sizeof(*hpre), GFP_KERNEL);
if (!hpre)
return -ENOMEM;
-
- pci_set_drvdata(pdev, hpre);
-
qm = &hpre->qm;
+ qm->fun_type = pdev->is_physfn ? QM_HW_PF : QM_HW_VF;
+
ret = hpre_qm_pre_init(qm, pdev);
if (ret)
return ret;
@@ -929,7 +845,7 @@ static int hpre_probe(struct pci_dev *pdev, const struct pci_device_id *id)
}
if (pdev->is_physfn) {
- ret = hpre_pf_probe_init(hpre);
+ ret = hpre_pf_probe_init(qm);
if (ret) {
pci_err(pdev, "Failed to init pf probe (%d)!\n", ret);
goto err_with_qm_init;
@@ -947,26 +863,35 @@ static int hpre_probe(struct pci_dev *pdev, const struct pci_device_id *id)
goto err_with_err_init;
}
- ret = hpre_debugfs_init(hpre);
+ ret = hpre_debugfs_init(qm);
if (ret)
pci_warn(pdev, "init debugfs fail!\n");
- hpre_add_to_list(hpre);
+ hisi_qm_add_to_list(qm, &hpre_devices);
ret = hpre_algs_register();
if (ret < 0) {
- hpre_remove_from_list(hpre);
pci_err(pdev, "fail to register algs to crypto!\n");
goto err_with_qm_start;
}
+
+ if (qm->fun_type == QM_HW_PF && vfs_num > 0) {
+ ret = hisi_qm_sriov_enable(pdev, vfs_num);
+ if (ret < 0)
+ goto err_with_crypto_register;
+ }
+
return 0;
+err_with_crypto_register:
+ hpre_algs_unregister();
+
err_with_qm_start:
+ hisi_qm_del_from_list(qm, &hpre_devices);
hisi_qm_stop(qm, QM_NORMAL);
err_with_err_init:
- if (pdev->is_physfn)
- hpre_hw_error_disable(hpre);
+ hisi_qm_dev_err_uninit(qm);
err_with_qm_init:
hisi_qm_uninit(qm);
@@ -974,627 +899,51 @@ static int hpre_probe(struct pci_dev *pdev, const struct pci_device_id *id)
return ret;
}
-static int hpre_vf_q_assign(struct hpre *hpre, int num_vfs)
-{
- struct hisi_qm *qm = &hpre->qm;
- u32 qp_num = qm->qp_num;
- int q_num, remain_q_num, i;
- u32 q_base = qp_num;
- int ret;
-
- if (!num_vfs)
- return -EINVAL;
-
- remain_q_num = qm->ctrl_q_num - qp_num;
- /* If remain queues not enough, return error. */
- if (remain_q_num < num_vfs)
- return -EINVAL;
-
- q_num = remain_q_num / num_vfs;
- for (i = 1; i <= num_vfs; i++) {
- if (i == num_vfs)
- q_num += remain_q_num % num_vfs;
- ret = hisi_qm_set_vft(qm, i, q_base, (u32)q_num);
- if (ret)
- return ret;
- q_base += q_num;
- }
-
- return 0;
-}
-
-static int hpre_clear_vft_config(struct hpre *hpre)
-{
- struct hisi_qm *qm = &hpre->qm;
- u32 num_vfs = hpre->num_vfs;
- int ret;
- u32 i;
-
- for (i = 1; i <= num_vfs; i++) {
- ret = hisi_qm_set_vft(qm, i, 0, 0);
- if (ret)
- return ret;
- }
- hpre->num_vfs = 0;
-
- return 0;
-}
-
-static int hpre_sriov_enable(struct pci_dev *pdev, int max_vfs)
-{
- struct hpre *hpre = pci_get_drvdata(pdev);
- int pre_existing_vfs, num_vfs, ret;
-
- pre_existing_vfs = pci_num_vf(pdev);
- if (pre_existing_vfs) {
- pci_err(pdev,
- "Can't enable VF. Please disable pre-enabled VFs!\n");
- return 0;
- }
-
- num_vfs = min_t(int, max_vfs, HPRE_VF_NUM);
- ret = hpre_vf_q_assign(hpre, num_vfs);
- if (ret) {
- pci_err(pdev, "Can't assign queues for VF!\n");
- return ret;
- }
-
- hpre->num_vfs = num_vfs;
-
- ret = pci_enable_sriov(pdev, num_vfs);
- if (ret) {
- pci_err(pdev, "Can't enable VF!\n");
- hpre_clear_vft_config(hpre);
- return ret;
- }
- return num_vfs;
-}
-
-static int hpre_try_frozen_vfs(struct pci_dev *pdev)
-{
- int ret = 0;
- struct hpre *hpre, *vf_hpre;
- struct pci_dev *dev;
-
- /* Try to frozen all the VFs as disable SRIOV */
- mutex_lock(&hpre_list_lock);
- list_for_each_entry(hpre, &hpre_list, list) {
- dev = hpre->qm.pdev;
- if (dev == pdev)
- continue;
- if (pci_physfn(dev) == pdev) {
- vf_hpre = pci_get_drvdata(dev);
- ret = hisi_qm_frozen(&vf_hpre->qm);
- if (ret)
- goto frozen_fail;
- }
- }
-
-frozen_fail:
- mutex_unlock(&hpre_list_lock);
- return ret;
-}
-
-static int hpre_sriov_disable(struct pci_dev *pdev)
-{
- struct hpre *hpre = pci_get_drvdata(pdev);
-
- if (pci_vfs_assigned(pdev)) {
- pci_err(pdev, "Failed to disable VFs while VFs are assigned!\n");
- return -EPERM;
- }
-
- /* While VF is in used, SRIOV cannot be disabled.
- * However, there is a risk that the behavior is uncertain if the
- * device is in hardware resetting.
- */
- if (hpre_try_frozen_vfs(pdev)) {
- dev_err(&pdev->dev,
- "Uacce user space task is using its VF!\n");
- return -EBUSY;
- }
-
- /* remove in hpre_pci_driver will be called to free VF resources */
- pci_disable_sriov(pdev);
- return hpre_clear_vft_config(hpre);
-}
-
static int hpre_sriov_configure(struct pci_dev *pdev, int num_vfs)
{
if (num_vfs)
- return hpre_sriov_enable(pdev, num_vfs);
+ return hisi_qm_sriov_enable(pdev, num_vfs);
else
- return hpre_sriov_disable(pdev);
-}
-
-static void hpre_remove_wait_delay(struct hpre *hpre)
-{
- struct hisi_qm *qm = &hpre->qm;
-
- while (hisi_qm_frozen(&hpre->qm) ||
- ((qm->fun_type == QM_HW_PF) &&
- hpre_try_frozen_vfs(hpre->qm.pdev)))
- usleep_range(HPRE_USLEEP, HPRE_USLEEP);
- udelay(HPRE_WAIT_DELAY);
+ return hisi_qm_sriov_disable(pdev, &hpre_devices);
}
static void hpre_remove(struct pci_dev *pdev)
{
- struct hpre *hpre = pci_get_drvdata(pdev);
- struct hisi_qm *qm = &hpre->qm;
+ struct hisi_qm *qm = pci_get_drvdata(pdev);
+ int ret;
+#ifdef CONFIG_CRYPTO_QM_UACCE
if (uacce_mode != UACCE_MODE_NOUACCE)
- hpre_remove_wait_delay(hpre);
-
+ hisi_qm_remove_wait_delay(qm, &hpre_devices);
+#endif
hpre_algs_unregister();
- hpre_remove_from_list(hpre);
- if (qm->fun_type == QM_HW_PF && hpre->num_vfs != 0)
- hpre_sriov_disable(pdev);
-
+ hisi_qm_del_from_list(qm, &hpre_devices);
+ if (qm->fun_type == QM_HW_PF && qm->vfs_num) {
+ ret = hisi_qm_sriov_disable(pdev, NULL);
+ if (ret) {
+ pci_err(pdev, "Disable SRIOV fail!\n");
+ return;
+ }
+ }
if (qm->fun_type == QM_HW_PF) {
hpre_cnt_regs_clear(qm);
qm->debug.curr_qm_qp_num = 0;
}
-
- hpre_debugfs_exit(hpre);
+ debugfs_remove_recursive(qm->debug.debug_root);
hisi_qm_stop(qm, QM_NORMAL);
if (qm->fun_type == QM_HW_PF)
- hpre_hw_error_disable(hpre);
+ hisi_qm_dev_err_uninit(qm);
hisi_qm_uninit(qm);
}
-static void hpre_shutdown(struct pci_dev *pdev)
-{
- struct hpre *hpre = pci_get_drvdata(pdev);
-
- hisi_qm_stop(&hpre->qm, QM_NORMAL);
-}
-
-static pci_ers_result_t hpre_error_detected(struct pci_dev *pdev,
- pci_channel_state_t state)
-{
- if (pdev->is_virtfn)
- return PCI_ERS_RESULT_NONE;
-
- pci_info(pdev, "PCI error detected, state(=%d)!!\n", state);
- if (state == pci_channel_io_perm_failure)
- return PCI_ERS_RESULT_DISCONNECT;
-
- return hisi_qm_process_dev_error(pdev);
-}
-
-static int hpre_vf_reset_prepare(struct pci_dev *pdev,
- enum qm_stop_reason stop_reason)
-{
- struct pci_dev *dev;
- struct hisi_qm *qm;
- struct hpre *hpre;
- int ret = 0;
-
- mutex_lock(&hpre_list_lock);
- if (pdev->is_physfn) {
- list_for_each_entry(hpre, &hpre_list, list) {
- dev = hpre->qm.pdev;
- if (dev == pdev)
- continue;
-
- if (pci_physfn(dev) == pdev) {
- qm = &hpre->qm;
-
- ret = hisi_qm_stop(qm, stop_reason);
- if (ret)
- goto prepare_fail;
- }
- }
- }
-
-prepare_fail:
- mutex_unlock(&hpre_list_lock);
- return ret;
-}
-
-static int hpre_reset_prepare_rdy(struct hpre *hpre)
-{
- struct pci_dev *pdev = hpre->qm.pdev;
- struct hpre *hisi_hpre = pci_get_drvdata(pci_physfn(pdev));
- int delay = 0;
-
- while (test_and_set_bit(HPRE_RESET, &hisi_hpre->status)) {
- msleep(++delay);
- if (delay > HPRE_RESET_WAIT_TIMEOUT)
- return -EBUSY;
- }
-
- return 0;
-}
-
-static int hpre_controller_reset_prepare(struct hpre *hpre)
-{
- struct hisi_qm *qm = &hpre->qm;
- struct pci_dev *pdev = qm->pdev;
- int ret;
-
- ret = hpre_reset_prepare_rdy(hpre);
- if (ret) {
- dev_err(&pdev->dev, "Controller reset not ready!\n");
- return ret;
- }
-
- ret = hpre_vf_reset_prepare(pdev, QM_SOFT_RESET);
- if (ret) {
- dev_err(&pdev->dev, "Fails to stop VFs!\n");
- return ret;
- }
-
- ret = hisi_qm_stop(qm, QM_SOFT_RESET);
- if (ret) {
- dev_err(&pdev->dev, "Fails to stop QM!\n");
- return ret;
- }
-
-#ifdef CONFIG_CRYPTO_QM_UACCE
- if (qm->use_uacce) {
- ret = uacce_hw_err_isolate(&qm->uacce);
- if (ret) {
- dev_err(&pdev->dev, "Fails to isolate hw err!\n");
- return ret;
- }
- }
-#endif
-
- return 0;
-}
-
-static int hpre_soft_reset(struct hpre *hpre)
-{
- struct hisi_qm *qm = &hpre->qm;
- struct device *dev = &qm->pdev->dev;
- unsigned long long value = 0;
- int ret;
- u32 val;
-
- ret = hisi_qm_reg_test(qm);
- if (ret)
- return ret;
-
- ret = hisi_qm_set_vf_mse(qm, HPRE_DISABLE);
- if (ret) {
- dev_err(dev, "Fails to disable vf mse bit.\n");
- return ret;
- }
-
- ret = hisi_qm_set_msi(qm, HPRE_DISABLE);
- if (ret) {
- dev_err(dev, "Fails to disable peh msi bit.\n");
- return ret;
- }
-
- /* Set qm ecc if dev ecc happened to hold on ooo */
- hisi_qm_set_ecc(qm);
-
- /* OOO register set and check */
- writel(MASTER_GLOBAL_CTRL_SHUTDOWN,
- hpre->qm.io_base + HPRE_MASTER_GLOBAL_CTRL);
-
- /* If bus lock, reset chip */
- ret = readl_relaxed_poll_timeout(hpre->qm.io_base +
- HPRE_MASTER_TRANS_RETURN, val,
- (val == MASTER_TRANS_RETURN_RW),
- HPRE_REG_RD_INTVRL_US,
- HPRE_REG_RD_TMOUT_US);
- if (ret) {
- dev_emerg(dev, "Bus lock! Please reset system.\n");
- return ret;
- }
-
- ret = hisi_qm_set_pf_mse(qm, HPRE_DISABLE);
- if (ret) {
- dev_err(dev, "Fails to disable pf mse bit.\n");
- return ret;
- }
-
- /* The reset related sub-control registers are not in PCI BAR */
- if (ACPI_HANDLE(dev)) {
- acpi_status s;
-
- s = acpi_evaluate_integer(ACPI_HANDLE(dev), "HRST",
- NULL, &value);
- if (ACPI_FAILURE(s)) {
- dev_err(dev, "NO controller reset method!\n");
- return -EIO;
- }
-
- if (value) {
- dev_err(dev, "Reset step %llu failed!\n", value);
- return -EIO;
- }
- } else {
- dev_err(dev, "No reset method!\n");
- return -EINVAL;
- }
-
- return 0;
-}
-
-static int hpre_vf_reset_done(struct pci_dev *pdev)
-{
- struct pci_dev *dev;
- struct hisi_qm *qm;
- struct hpre *hpre;
- int ret = 0;
-
- mutex_lock(&hpre_list_lock);
- list_for_each_entry(hpre, &hpre_list, list) {
- dev = hpre->qm.pdev;
- if (dev == pdev)
- continue;
-
- if (pci_physfn(dev) == pdev) {
- qm = &hpre->qm;
-
- ret = hisi_qm_restart(qm);
- if (ret)
- goto reset_fail;
- }
- }
-
-reset_fail:
- mutex_unlock(&hpre_list_lock);
- return ret;
-}
-
-static int hpre_controller_reset_done(struct hpre *hpre)
-{
- struct hisi_qm *qm = &hpre->qm;
- struct pci_dev *pdev = qm->pdev;
- int ret;
-
- ret = hisi_qm_set_msi(qm, HPRE_ENABLE);
- if (ret) {
- dev_err(&pdev->dev, "Fails to enable peh msi bit!\n");
- return ret;
- }
-
- ret = hisi_qm_set_pf_mse(qm, HPRE_ENABLE);
- if (ret) {
- dev_err(&pdev->dev, "Fails to enable pf mse bit!\n");
- return ret;
- }
-
- ret = hisi_qm_set_vf_mse(qm, HPRE_ENABLE);
- if (ret) {
- dev_err(&pdev->dev, "Fails to enable vf mse bit!\n");
- return ret;
- }
-
- ret = hpre_set_user_domain_and_cache(hpre);
- if (ret)
- return ret;
-
- hisi_qm_restart_prepare(qm);
-
- ret = hisi_qm_restart(qm);
- if (ret) {
- dev_err(&pdev->dev, "Failed to start QM!\n");
- return ret;
- }
-
- if (hpre->num_vfs)
- hpre_vf_q_assign(hpre, hpre->num_vfs);
-
- ret = hpre_vf_reset_done(pdev);
- if (ret) {
- dev_err(&pdev->dev, "Failed to start VFs!\n");
- return -EPERM;
- }
-
- hisi_qm_restart_done(qm);
- hpre_hw_err_init(hpre);
-
- return 0;
-}
-
-static int hpre_controller_reset(struct hpre *hpre)
-{
- struct device *dev = &hpre->qm.pdev->dev;
- int ret;
-
- dev_info(dev, "Controller resetting...\n");
-
- ret = hpre_controller_reset_prepare(hpre);
- if (ret)
- return ret;
-
- ret = hpre_soft_reset(hpre);
- if (ret) {
- dev_err(dev, "Controller reset failed (%d)\n", ret);
- return ret;
- }
-
- ret = hpre_controller_reset_done(hpre);
- if (ret)
- return ret;
-
- clear_bit(HPRE_RESET, &hpre->status);
- dev_info(dev, "Controller reset complete\n");
-
- return 0;
-}
-
-static pci_ers_result_t hpre_slot_reset(struct pci_dev *pdev)
-{
- struct hpre *hpre = pci_get_drvdata(pdev);
- int ret;
-
- if (pdev->is_virtfn)
- return PCI_ERS_RESULT_RECOVERED;
-
- dev_info(&pdev->dev, "Requesting reset due to PCI error\n");
- pci_cleanup_aer_uncorrect_error_status(pdev);
-
- /* reset hpre controller */
- ret = hpre_controller_reset(hpre);
- if (ret) {
- dev_err(&pdev->dev, "hpre controller reset failed (%d)\n",
- ret);
- return PCI_ERS_RESULT_DISCONNECT;
- }
-
- return PCI_ERS_RESULT_RECOVERED;
-}
-
-static void hpre_set_hw_error(struct hpre *hisi_hpre, bool enable)
-{
- struct pci_dev *pdev = hisi_hpre->qm.pdev;
- struct hpre *hpre = pci_get_drvdata(pci_physfn(pdev));
- struct hisi_qm *qm = &hpre->qm;
-
- if (qm->fun_type == QM_HW_VF)
- return;
-
- if (enable) {
- hisi_qm_hw_error_init(qm, QM_BASE_CE,
- QM_BASE_NFE | QM_ACC_DO_TASK_TIMEOUT,
- 0, QM_DB_RANDOM_INVALID);
- hpre_hw_error_enable(hpre);
- } else {
- hisi_qm_hw_error_uninit(qm);
- hpre_hw_error_disable(hpre);
- }
-}
-
-static int hpre_get_hw_error_status(struct hpre *hpre)
-{
- u32 err_sts;
-
- err_sts = readl(hpre->qm.io_base + HPRE_HAC_INT_STATUS) &
- (HPRE_CORE_ECC_2BIT_ERR | HPRE_OOO_ECC_2BIT_ERR);
- if (err_sts)
- return err_sts;
-
- return 0;
-}
-
-/* check the interrupt is ecc-zbit error or not */
-static int hpre_check_hw_error(struct hpre *hisi_hpre)
-{
- struct pci_dev *pdev = hisi_hpre->qm.pdev;
- struct hpre *hpre = pci_get_drvdata(pci_physfn(pdev));
- struct hisi_qm *qm = &hpre->qm;
- int ret;
-
- if (qm->fun_type == QM_HW_VF)
- return 0;
-
- ret = hisi_qm_get_hw_error_status(qm);
- if (ret)
- return ret;
-
- /* Now the ecc-2bit is ce_err, so this func is always return 0 */
- return hpre_get_hw_error_status(hpre);
-}
-
-static void hpre_reset_prepare(struct pci_dev *pdev)
-{
- struct hpre *hpre = pci_get_drvdata(pdev);
- struct hisi_qm *qm = &hpre->qm;
- struct device *dev = &pdev->dev;
- u32 delay = 0;
- int ret;
-
- hpre_set_hw_error(hpre, HPRE_HW_ERROR_IRQ_DISABLE);
-
- while (hpre_check_hw_error(hpre)) {
- msleep(++delay);
- if (delay > HPRE_RESET_WAIT_TIMEOUT)
- return;
- }
-
- ret = hpre_reset_prepare_rdy(hpre);
- if (ret) {
- dev_err(dev, "FLR not ready!\n");
- return;
- }
-
- ret = hpre_vf_reset_prepare(pdev, QM_FLR);
- if (ret) {
- dev_err(&pdev->dev, "Fails to prepare reset!\n");
- return;
- }
-
- ret = hisi_qm_stop(qm, QM_FLR);
- if (ret) {
- dev_err(&pdev->dev, "Fails to stop QM!\n");
- return;
- }
-
- dev_info(dev, "FLR resetting...\n");
-}
-
-static bool hpre_flr_reset_complete(struct pci_dev *pdev)
-{
- struct pci_dev *pf_pdev = pci_physfn(pdev);
- struct hpre *hpre = pci_get_drvdata(pf_pdev);
- struct device *dev = &hpre->qm.pdev->dev;
- u32 id;
-
- pci_read_config_dword(hpre->qm.pdev, PCI_COMMAND, &id);
- if (id == HPRE_PCI_COMMAND_INVALID) {
- dev_err(dev, "Device HPRE can not be used!\n");
- return false;
- }
-
- clear_bit(HPRE_RESET, &hpre->status);
- return true;
-}
-
-static void hpre_reset_done(struct pci_dev *pdev)
-{
- struct hpre *hpre = pci_get_drvdata(pdev);
- struct hisi_qm *qm = &hpre->qm;
- struct device *dev = &pdev->dev;
- int ret;
-
- hpre_set_hw_error(hpre, HPRE_HW_ERROR_IRQ_ENABLE);
-
- ret = hisi_qm_restart(qm);
- if (ret) {
- dev_err(dev, "Failed to start QM!\n");
- return;
- }
-
- if (pdev->is_physfn) {
- ret = hpre_set_user_domain_and_cache(hpre);
- if (ret) {
- dev_err(dev, "Failed to start QM!\n");
- goto flr_done;
- }
-
- hpre_hw_err_init(hpre);
-
- if (hpre->num_vfs)
- hpre_vf_q_assign(hpre, hpre->num_vfs);
-
- ret = hpre_vf_reset_done(pdev);
- if (ret) {
- dev_err(&pdev->dev, "Failed to start VFs!\n");
- return;
- }
- }
-
-flr_done:
- if (hpre_flr_reset_complete(pdev))
- dev_info(dev, "FLR reset complete\n");
-}
-
static const struct pci_error_handlers hpre_err_handler = {
- .error_detected = hpre_error_detected,
- .slot_reset = hpre_slot_reset,
+ .error_detected = hisi_qm_dev_err_detected,
+ .slot_reset = hisi_qm_dev_slot_reset,
#ifdef CONFIG_CRYPTO_QM_UACCE
- .reset_prepare = hpre_reset_prepare,
- .reset_done = hpre_reset_done,
+ .reset_prepare = hisi_qm_reset_prepare,
+ .reset_done = hisi_qm_reset_done,
#endif
};
@@ -1605,7 +954,7 @@ static void hpre_reset_done(struct pci_dev *pdev)
.remove = hpre_remove,
.sriov_configure = hpre_sriov_configure,
.err_handler = &hpre_err_handler,
- .shutdown = hpre_shutdown,
+ .shutdown = hisi_qm_dev_shutdown,
};
static void hpre_register_debugfs(void)
@@ -1618,20 +967,19 @@ static void hpre_register_debugfs(void)
hpre_debugfs_root = NULL;
}
-static void hpre_unregister_debugfs(void)
-{
- debugfs_remove_recursive(hpre_debugfs_root);
-}
-
static int __init hpre_init(void)
{
int ret;
+ INIT_LIST_HEAD(&hpre_devices.list);
+ mutex_init(&hpre_devices.lock);
+ hpre_devices.check = NULL;
+
hpre_register_debugfs();
ret = pci_register_driver(&hpre_pci_driver);
if (ret) {
- hpre_unregister_debugfs();
+ debugfs_remove_recursive(hpre_debugfs_root);
pr_err("hpre: can't register hisi hpre driver.\n");
}
@@ -1641,7 +989,7 @@ static int __init hpre_init(void)
static void __exit hpre_exit(void)
{
pci_unregister_driver(&hpre_pci_driver);
- hpre_unregister_debugfs();
+ debugfs_remove_recursive(hpre_debugfs_root);
}
module_init(hpre_init);
diff --git a/drivers/crypto/hisilicon/qm.c b/drivers/crypto/hisilicon/qm.c
index 4bd7739..e89a770 100644
--- a/drivers/crypto/hisilicon/qm.c
+++ b/drivers/crypto/hisilicon/qm.c
@@ -1,6 +1,8 @@
// SPDX-License-Identifier: GPL-2.0
/* Copyright (c) 2018-2019 HiSilicon Limited. */
#include <asm/page.h>
+#include <linux/acpi.h>
+#include <linux/aer.h>
#include <linux/bitmap.h>
#include <linux/debugfs.h>
#include <linux/dma-mapping.h>
@@ -117,7 +119,7 @@
#define QM_ABNORMAL_INT_MASK 0x100004
#define QM_HW_ERROR_IRQ_DISABLE GENMASK(12, 0)
#define QM_ABNORMAL_INT_STATUS 0x100008
-#define QM_ABNORMAL_INT_SET 0x10000c
+#define QM_PF_ABNORMAL_INT_SET 0x10000c
#define QM_ABNORMAL_INF00 0x100010
#define QM_FIFO_OVERFLOW_TYPE 0xc0
#define QM_FIFO_OVERFLOW_VF 0x3f
@@ -167,17 +169,30 @@
#define TASK_TIMEOUT 10000
#define WAIT_PERIOD 20
-#define MAX_WAIT_COUNTS 1000
#define WAIT_PERIOD_US_MAX 200
#define WAIT_PERIOD_US_MIN 100
-#define MAX_WAIT_TASK_COUNTS 10
-
-#define QM_RAS_NFE_MBIT_DISABLE ~QM_ECC_MBIT
+#define REMOVE_WAIT_DELAY 10
+#define MAX_WAIT_COUNTS 1000
+#define DELAY_PERIOD_MS 100
+#define QM_DEV_RESET_STATUS 0
+#define QM_RESET_WAIT_TIMEOUT 400
+#define QM_PCI_COMMAND_INVALID 0xFFFFFFFF
+#define MASTER_GLOBAL_CTRL_SHUTDOWN 0x1
+#define MASTER_TRANS_RETURN_RW 3
+#define MASTER_TRANS_RETURN 0x300150
+#define MASTER_GLOBAL_CTRL 0x300000
+#define QM_REG_RD_INTVRL_US 10
+#define QM_REG_RD_TMOUT_US 1000
+#define AM_CFG_PORT_RD_EN 0x300018
#define AM_CFG_PORT_WR_EN 0x30001C
-#define AM_CFG_PORT_WR_EN_VALUE 0xFFFF
+#define QM_RAS_NFE_MBIT_DISABLE ~QM_ECC_MBIT
#define AM_ROB_ECC_INT_STS 0x300104
#define ROB_ECC_ERR_MULTPL BIT(1)
+#define QM_DBG_READ_LEN 256
+#define QM_DBG_WRITE_LEN 1024
+#define QM_DBG_SHOW_SHIFT 16
+
#define QM_MK_CQC_DW3_V1(hop_num, pg_sz, buf_sz, cqe_sz) \
(((hop_num) << QM_CQ_HOP_NUM_SHIFT) | \
((pg_sz) << QM_CQ_PAGE_SIZE_SHIFT) | \
@@ -219,6 +234,12 @@ enum vft_type {
CQC_VFT,
};
+struct hisi_qm_resource {
+ struct hisi_qm *qm;
+ int distance;
+ struct list_head list;
+};
+
struct hisi_qm_hw_ops {
int (*get_vft)(struct hisi_qm *qm, u32 *base, u32 *number);
void (*qm_db)(struct hisi_qm *qm, u16 qn,
@@ -237,11 +258,6 @@ struct hisi_qm_hw_ops {
[QM_STATE] = "qm_state",
};
-struct hisi_qm_hw_error {
- u32 int_msk;
- const char *msg;
-};
-
static const struct hisi_qm_hw_error qm_hw_error[] = {
{ .int_msk = BIT(0), .msg = "qm_axi_rresp" },
{ .int_msk = BIT(1), .msg = "qm_axi_bresp" },
@@ -1115,13 +1131,20 @@ static void qm_hw_error_init_v2(struct hisi_qm *qm, u32 ce, u32 nfe, u32 fe,
{
u32 irq_enable = ce | nfe | fe | msi;
u32 irq_unmask = ~irq_enable;
+ u32 error_status;
qm->error_mask = ce | nfe | fe;
qm->msi_mask = msi;
/* clear QM hw residual error source */
- writel(QM_ABNORMAL_INT_SOURCE_CLR, qm->io_base +
- QM_ABNORMAL_INT_SOURCE);
+ error_status = readl(qm->io_base + QM_ABNORMAL_INT_STATUS);
+ if (!(qm->hw_status & BIT(QM_DEV_RESET_STATUS))
+ || !error_status)
+ error_status = QM_ABNORMAL_INT_SOURCE_CLR;
+ else
+ error_status &= qm->error_mask;
+
+ writel(error_status, qm->io_base + QM_ABNORMAL_INT_SOURCE);
/* configure error type */
writel(ce, qm->io_base + QM_RAS_CE_ENABLE);
@@ -1190,9 +1213,7 @@ static pci_ers_result_t qm_hw_error_handle_v2(struct hisi_qm *qm)
error_status = qm->error_mask & tmp;
if (error_status) {
if (error_status & QM_ECC_MBIT)
- qm->err_ini.is_qm_ecc_mbit = 1;
- else
- qm->err_ini.is_qm_ecc_mbit = 0;
+ qm->err_ini.err_info.is_qm_ecc_mbit = true;
qm_log_hw_error(qm, error_status);
return PCI_ERS_RESULT_NEED_RESET;
@@ -1513,7 +1534,8 @@ static void qm_qp_has_no_task(struct hisi_qp *qp)
int i = 0;
int ret;
- if (qp->qm->err_ini.is_qm_ecc_mbit || qp->qm->err_ini.is_dev_ecc_mbit)
+ if (qp->qm->err_ini.err_info.is_qm_ecc_mbit ||
+ qp->qm->err_ini.err_info.is_dev_ecc_mbit)
return;
addr = qm_ctx_alloc(qp->qm, size, &dma_addr);
@@ -1967,6 +1989,74 @@ static int qm_unregister_uacce(struct hisi_qm *qm)
#endif
/**
+ * hisi_qm_frozen() - Try to froze QM to cut continuous queue request. If
+ * there is user on the QM, return failure without doing anything.
+ * @qm: The qm needed to be fronzen.
+ *
+ * This function frozes QM, then we can do SRIOV disabling.
+ */
+static int hisi_qm_frozen(struct hisi_qm *qm)
+{
+ int count, i;
+
+ down_write(&qm->qps_lock);
+ for (i = 0, count = 0; i < qm->qp_num; i++)
+ if (!qm->qp_array[i])
+ count++;
+
+ if (count == qm->qp_num) {
+ bitmap_set(qm->qp_bitmap, 0, qm->qp_num);
+ } else {
+ up_write(&qm->qps_lock);
+ return -EBUSY;
+ }
+ up_write(&qm->qps_lock);
+
+ return 0;
+}
+
+static int qm_try_frozen_vfs(struct pci_dev *pdev,
+ struct hisi_qm_list *qm_list)
+{
+ struct hisi_qm *qm, *vf_qm;
+ struct pci_dev *dev;
+ int ret = 0;
+
+ if (!qm_list || !pdev)
+ return -EINVAL;
+
+ /* Try to frozen all the VFs as disable SRIOV */
+ mutex_lock(&qm_list->lock);
+ list_for_each_entry(qm, &qm_list->list, list) {
+ dev = qm->pdev;
+ if (dev == pdev)
+ continue;
+ if (pci_physfn(dev) == pdev) {
+ vf_qm = pci_get_drvdata(dev);
+ ret = hisi_qm_frozen(vf_qm);
+ if (ret)
+ goto frozen_fail;
+ }
+ }
+
+frozen_fail:
+ mutex_unlock(&qm_list->lock);
+ return ret;
+}
+
+void hisi_qm_remove_wait_delay(struct hisi_qm *qm,
+ struct hisi_qm_list *qm_list)
+{
+ while (hisi_qm_frozen(qm) ||
+ ((qm->fun_type == QM_HW_PF) &&
+ qm_try_frozen_vfs(qm->pdev, qm_list))) {
+ msleep(WAIT_PERIOD);
+ }
+ udelay(REMOVE_WAIT_DELAY);
+}
+EXPORT_SYMBOL_GPL(hisi_qm_remove_wait_delay);
+
+/**
* hisi_qm_init() - Initialize configures about qm.
* @qm: The qm needed init.
*
@@ -2107,32 +2197,21 @@ void hisi_qm_uninit(struct hisi_qm *qm)
EXPORT_SYMBOL_GPL(hisi_qm_uninit);
/**
- * hisi_qm_frozen() - Try to froze QM to cut continuous queue request. If
- * there is user on the QM, return failure without doing anything.
- * @qm: The qm needed to be fronzen.
+ * hisi_qm_dev_shutdown() - shutdown device.
+ * @pdev: The device will be shutdown.
*
- * This function frozes QM, then we can do SRIOV disabling.
+ * This function will stop qm when OS shutdown or rebooting.
*/
-int hisi_qm_frozen(struct hisi_qm *qm)
+void hisi_qm_dev_shutdown(struct pci_dev *pdev)
{
- int count, i;
-
- down_write(&qm->qps_lock);
- for (i = 0, count = 0; i < qm->qp_num; i++)
- if (!qm->qp_array[i])
- count++;
-
- if (count == qm->qp_num) {
- bitmap_set(qm->qp_bitmap, 0, qm->qp_num);
- } else {
- up_write(&qm->qps_lock);
- return -EBUSY;
- }
- up_write(&qm->qps_lock);
+ struct hisi_qm *qm = pci_get_drvdata(pdev);
+ int ret;
- return 0;
+ ret = hisi_qm_stop(qm, QM_NORMAL);
+ if (ret)
+ dev_err(&pdev->dev, "Fail to stop qm in shutdown!\n");
}
-EXPORT_SYMBOL_GPL(hisi_qm_frozen);
+EXPORT_SYMBOL_GPL(hisi_qm_dev_shutdown);
/**
* hisi_qm_get_vft() - Get vft from a qm.
@@ -2174,7 +2253,7 @@ int hisi_qm_get_vft(struct hisi_qm *qm, u32 *base, u32 *number)
* Assign queues A~B to VF: hisi_qm_set_vft(qm, 2, A, B - A + 1)
* (VF function number 0x2)
*/
-int hisi_qm_set_vft(struct hisi_qm *qm, u32 fun_num, u32 base,
+static int hisi_qm_set_vft(struct hisi_qm *qm, u32 fun_num, u32 base,
u32 number)
{
u32 max_q_num = qm->ctrl_q_num;
@@ -2185,7 +2264,6 @@ int hisi_qm_set_vft(struct hisi_qm *qm, u32 fun_num, u32 base,
return qm_set_sqc_cqc_vft(qm, fun_num, base, number);
}
-EXPORT_SYMBOL_GPL(hisi_qm_set_vft);
static void qm_init_eq_aeq_status(struct hisi_qm *qm)
{
@@ -2483,6 +2561,28 @@ static int qm_stop_started_qp(struct hisi_qm *qm)
}
/**
+ * qm_clear_queues() - Clear memory of queues in a qm.
+ * @qm: The qm which memory needs clear.
+ *
+ * This function clears all queues memory in a qm. Reset of accelerator can
+ * use this to clear queues.
+ */
+static void qm_clear_queues(struct hisi_qm *qm)
+{
+ struct hisi_qp *qp;
+ int i;
+
+ for (i = 0; i < qm->qp_num; i++) {
+ qp = qm->qp_array[i];
+ if (qp)
+ /* device state use the last page */
+ memset(qp->qdma.va, 0, qp->qdma.size - PAGE_SIZE);
+ }
+
+ memset(qm->qdma.va, 0, qm->qdma.size);
+}
+
+/**
* hisi_qm_stop() - Stop a qm.
* @qm: The qm which will be stopped.
* @r: The reason to stop qm.
@@ -2528,7 +2628,7 @@ int hisi_qm_stop(struct hisi_qm *qm, enum qm_stop_reason r)
}
}
- hisi_qm_clear_queues(qm);
+ qm_clear_queues(qm);
atomic_set(&qm->status.flags, QM_STOP);
err_unlock:
@@ -2589,7 +2689,7 @@ int hisi_qm_debug_init(struct hisi_qm *qm)
goto failed_to_create;
}
- qm_regs = debugfs_create_file("qm_regs", 0444, qm->debug.qm_d, qm,
+ qm_regs = debugfs_create_file("regs", 0444, qm->debug.qm_d, qm,
&qm_regs_fops);
if (IS_ERR(qm_regs)) {
ret = -ENOENT;
@@ -2605,7 +2705,7 @@ int hisi_qm_debug_init(struct hisi_qm *qm)
EXPORT_SYMBOL_GPL(hisi_qm_debug_init);
/**
- * hisi_qm_hw_error_init() - Configure qm hardware error report method.
+ * qm_hw_error_init() - Configure qm hardware error report method.
* @qm: The qm which we want to configure.
* @ce: Correctable error configure.
* @nfe: Non-fatal error configure.
@@ -2622,9 +2722,13 @@ int hisi_qm_debug_init(struct hisi_qm *qm)
* related report methods. Error report will be masked if related error bit
* does not configure.
*/
-void hisi_qm_hw_error_init(struct hisi_qm *qm, u32 ce, u32 nfe, u32 fe,
- u32 msi)
+static void qm_hw_error_init(struct hisi_qm *qm)
{
+ u32 nfe = qm->err_ini.err_info.nfe;
+ u32 msi = qm->err_ini.err_info.msi;
+ u32 ce = qm->err_ini.err_info.ce;
+ u32 fe = qm->err_ini.err_info.fe;
+
if (!qm->ops->hw_error_init) {
dev_err(&qm->pdev->dev,
"QM version %d doesn't support hw error handling!\n",
@@ -2634,9 +2738,8 @@ void hisi_qm_hw_error_init(struct hisi_qm *qm, u32 ce, u32 nfe, u32 fe,
qm->ops->hw_error_init(qm, ce, nfe, fe, msi);
}
-EXPORT_SYMBOL_GPL(hisi_qm_hw_error_init);
-void hisi_qm_hw_error_uninit(struct hisi_qm *qm)
+static void qm_hw_error_uninit(struct hisi_qm *qm)
{
if (!qm->ops->hw_error_uninit) {
dev_err(&qm->pdev->dev,
@@ -2647,15 +2750,14 @@ void hisi_qm_hw_error_uninit(struct hisi_qm *qm)
qm->ops->hw_error_uninit(qm);
}
-EXPORT_SYMBOL_GPL(hisi_qm_hw_error_uninit);
/**
- * hisi_qm_hw_error_handle() - Handle qm non-fatal hardware errors.
+ * qm_hw_error_handle() - Handle qm non-fatal hardware errors.
* @qm: The qm which has non-fatal hardware errors.
*
* Accelerators use this function to handle qm non-fatal hardware errors.
*/
-pci_ers_result_t hisi_qm_hw_error_handle(struct hisi_qm *qm)
+static pci_ers_result_t qm_hw_error_handle(struct hisi_qm *qm)
{
if (!qm->ops->hw_error_handle) {
dev_err(&qm->pdev->dev,
@@ -2666,104 +2768,19 @@ pci_ers_result_t hisi_qm_hw_error_handle(struct hisi_qm *qm)
return qm->ops->hw_error_handle(qm);
}
-EXPORT_SYMBOL_GPL(hisi_qm_hw_error_handle);
-
-/**
- * hisi_qm_clear_queues() - Clear memory of queues in a qm.
- * @qm: The qm which memory needs clear.
- *
- * This function clears all queues memory in a qm. Reset of accelerator can
- * use this to clear queues.
- */
-void hisi_qm_clear_queues(struct hisi_qm *qm)
-{
- struct hisi_qp *qp;
- int i;
-
- for (i = 0; i < qm->qp_num; i++) {
- qp = qm->qp_array[i];
- if (qp)
- /* device state use the last page */
- memset(qp->qdma.va, 0, qp->qdma.size - PAGE_SIZE);
- }
-
- memset(qm->qdma.va, 0, qm->qdma.size);
-}
-EXPORT_SYMBOL_GPL(hisi_qm_clear_queues);
-
-/**
- * hisi_qm_get_hw_version() - Get hardware version of a qm.
- * @pdev: The device which hardware version we want to get.
- *
- * This function gets the hardware version of a qm. Return QM_HW_UNKNOWN
- * if the hardware version is not supported.
- */
-enum qm_hw_ver hisi_qm_get_hw_version(struct pci_dev *pdev)
-{
- switch (pdev->revision) {
- case QM_HW_V1:
- case QM_HW_V2:
- return pdev->revision;
- default:
- return QM_HW_UNKNOWN;
- }
-}
-EXPORT_SYMBOL_GPL(hisi_qm_get_hw_version);
-int hisi_qm_get_hw_error_status(struct hisi_qm *qm)
+static int qm_get_hw_error_status(struct hisi_qm *qm)
{
u32 err_sts;
- err_sts = readl(qm->io_base + QM_ABNORMAL_INT_STATUS) &
- QM_ECC_MBIT;
+ err_sts = readl(qm->io_base + QM_ABNORMAL_INT_STATUS) & QM_ECC_MBIT;
if (err_sts)
return err_sts;
return 0;
}
-EXPORT_SYMBOL_GPL(hisi_qm_get_hw_error_status);
-
-static pci_ers_result_t hisi_qm_dev_err_handle(struct hisi_qm *qm)
-{
- u32 err_sts;
-
- if (!qm->err_ini.get_dev_hw_err_status ||
- !qm->err_ini.log_dev_hw_err)
- return PCI_ERS_RESULT_RECOVERED;
-
- /* read err sts */
- err_sts = qm->err_ini.get_dev_hw_err_status(qm);
- if (err_sts) {
- if (err_sts & qm->err_ini.ecc_2bits_mask)
- qm->err_ini.is_dev_ecc_mbit = 1;
- else
- qm->err_ini.is_dev_ecc_mbit = 0;
-
- qm->err_ini.log_dev_hw_err(qm, err_sts);
- return PCI_ERS_RESULT_NEED_RESET;
- }
-
- return PCI_ERS_RESULT_RECOVERED;
-}
-
-pci_ers_result_t hisi_qm_process_dev_error(struct pci_dev *pdev)
-{
- struct hisi_qm *qm = pci_get_drvdata(pdev);
- pci_ers_result_t qm_ret, dev_ret;
-
- /* log qm error */
- qm_ret = hisi_qm_hw_error_handle(qm);
-
- /* log device error */
- dev_ret = hisi_qm_dev_err_handle(qm);
-
- return (qm_ret == PCI_ERS_RESULT_NEED_RESET ||
- dev_ret == PCI_ERS_RESULT_NEED_RESET) ?
- PCI_ERS_RESULT_NEED_RESET : PCI_ERS_RESULT_RECOVERED;
-}
-EXPORT_SYMBOL_GPL(hisi_qm_process_dev_error);
-int hisi_qm_reg_test(struct hisi_qm *qm)
+static int qm_reg_test(struct hisi_qm *qm)
{
struct pci_dev *pdev = qm->pdev;
int ret;
@@ -2782,16 +2799,13 @@ int hisi_qm_reg_test(struct hisi_qm *qm)
ret = readl_relaxed_poll_timeout(qm->io_base + QM_PEH_VENDOR_ID, val,
(val == PCI_VENDOR_ID_HUAWEI),
POLL_PERIOD, POLL_TIMEOUT);
- if (ret) {
+ if (ret)
dev_err(&pdev->dev, "Fails to read QM reg in the second time!\n");
- return ret;
- }
return ret;
}
-EXPORT_SYMBOL_GPL(hisi_qm_reg_test);
-int hisi_qm_set_pf_mse(struct hisi_qm *qm, bool set)
+static int qm_set_pf_mse(struct hisi_qm *qm, bool set)
{
struct pci_dev *pdev = qm->pdev;
u16 cmd;
@@ -2814,9 +2828,8 @@ int hisi_qm_set_pf_mse(struct hisi_qm *qm, bool set)
return -ETIMEDOUT;
}
-EXPORT_SYMBOL_GPL(hisi_qm_set_pf_mse);
-int hisi_qm_set_vf_mse(struct hisi_qm *qm, bool set)
+static int qm_set_vf_mse(struct hisi_qm *qm, bool set)
{
struct pci_dev *pdev = qm->pdev;
u16 sriov_ctrl;
@@ -2833,8 +2846,8 @@ int hisi_qm_set_vf_mse(struct hisi_qm *qm, bool set)
for (i = 0; i < MAX_WAIT_COUNTS; i++) {
pci_read_config_word(pdev, pos + PCI_SRIOV_CTRL, &sriov_ctrl);
- if (set == ((sriov_ctrl & PCI_SRIOV_CTRL_MSE) >>
- PEH_SRIOV_CTRL_VF_MSE_SHIFT))
+ if (set == (sriov_ctrl & PCI_SRIOV_CTRL_MSE) >>
+ PEH_SRIOV_CTRL_VF_MSE_SHIFT)
return 0;
udelay(1);
@@ -2842,9 +2855,8 @@ int hisi_qm_set_vf_mse(struct hisi_qm *qm, bool set)
return -ETIMEDOUT;
}
-EXPORT_SYMBOL_GPL(hisi_qm_set_vf_mse);
-int hisi_qm_set_msi(struct hisi_qm *qm, bool set)
+static int qm_set_msi(struct hisi_qm *qm, bool set)
{
struct pci_dev *pdev = qm->pdev;
@@ -2854,7 +2866,8 @@ int hisi_qm_set_msi(struct hisi_qm *qm, bool set)
} else {
pci_write_config_dword(pdev, pdev->msi_cap +
PCI_MSI_MASK_64, PEH_MSI_DISABLE);
- if (qm->err_ini.is_qm_ecc_mbit || qm->err_ini.is_dev_ecc_mbit)
+ if (qm->err_ini.err_info.is_qm_ecc_mbit ||
+ qm->err_ini.err_info.is_dev_ecc_mbit)
return 0;
mdelay(1);
@@ -2864,64 +2877,768 @@ int hisi_qm_set_msi(struct hisi_qm *qm, bool set)
return 0;
}
-EXPORT_SYMBOL_GPL(hisi_qm_set_msi);
-void hisi_qm_set_ecc(struct hisi_qm *qm)
+void hisi_qm_free_qps(struct hisi_qp **qps, int qp_num)
{
- u32 nfe_enb;
+ int i;
- if ((!qm->err_ini.is_qm_ecc_mbit && !qm->err_ini.is_dev_ecc_mbit) ||
- (qm->err_ini.is_qm_ecc_mbit && !qm->err_ini.inject_dev_hw_err) ||
- (qm->err_ini.is_dev_ecc_mbit && qm->err_ini.inject_dev_hw_err))
+ if (!qps || qp_num < 0)
return;
- if (qm->err_ini.inject_dev_hw_err)
- qm->err_ini.inject_dev_hw_err(qm);
- else {
- nfe_enb = readl(qm->io_base + QM_RAS_NFE_ENABLE);
- writel(nfe_enb & QM_RAS_NFE_MBIT_DISABLE,
- qm->io_base + QM_RAS_NFE_ENABLE);
- writel(QM_ECC_MBIT, qm->io_base + QM_ABNORMAL_INT_SET);
- qm->err_ini.is_qm_ecc_mbit = 1;
+ for (i = qp_num - 1; i >= 0; i--)
+ hisi_qm_release_qp(qps[i]);
+}
+EXPORT_SYMBOL_GPL(hisi_qm_free_qps);
+
+static void free_list(struct list_head *head)
+{
+ struct hisi_qm_resource *res, *tmp;
+
+ list_for_each_entry_safe(res, tmp, head, list) {
+ list_del(&res->list);
+ kfree(res);
}
}
-EXPORT_SYMBOL_GPL(hisi_qm_set_ecc);
-void hisi_qm_restart_prepare(struct hisi_qm *qm)
+static int hisi_qm_sort_devices(int node, struct list_head *head,
+ struct hisi_qm_list *qm_list)
{
- if (!qm->err_ini.is_qm_ecc_mbit && !qm->err_ini.is_dev_ecc_mbit)
- return;
+ struct hisi_qm_resource *res, *tmp;
+ struct hisi_qm *qm;
+ struct list_head *n;
+ struct device *dev;
+ int dev_node = 0;
+
+ list_for_each_entry(qm, &qm_list->list, list) {
+ dev = &qm->pdev->dev;
+
+ if (IS_ENABLED(CONFIG_NUMA)) {
+ dev_node = dev->numa_node;
+ if (dev_node < 0)
+ dev_node = 0;
+ }
- /* close AM wr msi port */
- writel(qm->err_ini.qm_wr_port, qm->io_base + AM_CFG_PORT_WR_EN);
+ if (qm_list->check && !qm_list->check(qm))
+ continue;
+
+ res = kzalloc(sizeof(*res), GFP_KERNEL);
+ if (!res)
+ return -ENOMEM;
- /* clear dev ecc 2bit error source */
- if (qm->err_ini.clear_dev_hw_err_status) {
- qm->err_ini.clear_dev_hw_err_status(qm,
- qm->err_ini.ecc_2bits_mask);
+ res->qm = qm;
+ res->distance = node_distance(dev_node, node);
+ n = head;
+ list_for_each_entry(tmp, head, list) {
+ if (res->distance < tmp->distance) {
+ n = &tmp->list;
+ break;
+ }
+ }
+ list_add_tail(&res->list, n);
}
- /* clear QM ecc mbit error source */
- writel(QM_ECC_MBIT, qm->io_base + QM_ABNORMAL_INT_SOURCE);
+ return 0;
+}
- /* clear AM Reorder Buffer ecc mbit source */
- writel(ROB_ECC_ERR_MULTPL, qm->io_base + AM_ROB_ECC_INT_STS);
+int hisi_qm_alloc_qps_node(int node, struct hisi_qm_list *qm_list,
+ struct hisi_qp **qps, int qp_num, u8 alg_type)
+{
+ struct hisi_qm_resource *tmp;
+ int ret = -ENODEV;
+ LIST_HEAD(head);
+ int i;
- if (qm->err_ini.open_axi_master_ooo)
- qm->err_ini.open_axi_master_ooo(qm);
+ if (!qps || !qm_list || qp_num <= 0)
+ return -EINVAL;
+
+ mutex_lock(&qm_list->lock);
+ if (hisi_qm_sort_devices(node, &head, qm_list)) {
+ mutex_unlock(&qm_list->lock);
+ goto err;
+ }
+
+ list_for_each_entry(tmp, &head, list) {
+ for (i = 0; i < qp_num; i++) {
+ qps[i] = hisi_qm_create_qp(tmp->qm, alg_type);
+ if (IS_ERR(qps[i])) {
+ hisi_qm_free_qps(qps, i);
+ break;
+ }
+ }
+
+ if (i == qp_num) {
+ ret = 0;
+ break;
+ }
+ }
+
+ mutex_unlock(&qm_list->lock);
+ if (ret)
+ pr_info("Failed to create qps, node[%d], alg[%d], qp[%d]!\n",
+ node, alg_type, qp_num);
+
+err:
+ free_list(&head);
+ return ret;
}
-EXPORT_SYMBOL_GPL(hisi_qm_restart_prepare);
+EXPORT_SYMBOL_GPL(hisi_qm_alloc_qps_node);
-void hisi_qm_restart_done(struct hisi_qm *qm)
+static int qm_vf_q_assign(struct hisi_qm *qm, u32 num_vfs)
{
- if (!qm->err_ini.is_qm_ecc_mbit && !qm->err_ini.is_dev_ecc_mbit)
- return;
+ u32 q_num, i, remain_q_num;
+ u32 q_base = qm->qp_num;
+ int ret;
+
+ if (!num_vfs)
+ return -EINVAL;
+
+ remain_q_num = qm->ctrl_q_num - qm->qp_num;
+
+ /* If remain queues not enough, return error. */
+ if (qm->ctrl_q_num < qm->qp_num || remain_q_num < num_vfs)
+ return -EINVAL;
+
+ q_num = remain_q_num / num_vfs;
+ for (i = 1; i <= num_vfs; i++) {
+ if (i == num_vfs)
+ q_num += remain_q_num % num_vfs;
+ ret = hisi_qm_set_vft(qm, i, q_base, q_num);
+ if (ret)
+ return ret;
+ q_base += q_num;
+ }
+
+ return 0;
+}
+
+static int qm_clear_vft_config(struct hisi_qm *qm)
+{
+ int ret;
+ u32 i;
+
+ for (i = 1; i <= qm->vfs_num; i++) {
+ ret = hisi_qm_set_vft(qm, i, 0, 0);
+ if (ret)
+ return ret;
+ }
+ qm->vfs_num = 0;
+
+ return 0;
+}
+
+int hisi_qm_sriov_enable(struct pci_dev *pdev, int max_vfs)
+{
+ struct hisi_qm *qm = pci_get_drvdata(pdev);
+ int pre_existing_vfs, num_vfs, ret;
+
+ pre_existing_vfs = pci_num_vf(pdev);
+ if (pre_existing_vfs) {
+ pci_err(pdev,
+ "Can't enable VF. Please disable pre-enabled VFs!\n");
+ return 0;
+ }
+
+ num_vfs = min_t(int, max_vfs, QM_MAX_VFS_NUM);
+ ret = qm_vf_q_assign(qm, num_vfs);
+ if (ret) {
+ pci_err(pdev, "Can't assign queues for VF!\n");
+ return ret;
+ }
+
+ qm->vfs_num = num_vfs;
+
+ ret = pci_enable_sriov(pdev, num_vfs);
+ if (ret) {
+ pci_err(pdev, "Can't enable VF!\n");
+ qm_clear_vft_config(qm);
+ return ret;
+ }
+
+ pci_info(pdev, "VF enabled, vfs_num(=%d)!\n", num_vfs);
+
+ return num_vfs;
+}
+EXPORT_SYMBOL_GPL(hisi_qm_sriov_enable);
+
+int hisi_qm_sriov_disable(struct pci_dev *pdev, struct hisi_qm_list *qm_list)
+{
+ struct hisi_qm *qm = pci_get_drvdata(pdev);
+
+ if (pci_vfs_assigned(pdev)) {
+ pci_err(pdev, "Failed to disable VFs as VFs are assigned!\n");
+ return -EPERM;
+ }
+
+ /* While VF is in used, SRIOV cannot be disabled.
+ * However, there is a risk that the behavior is uncertain if the
+ * device is in hardware resetting.
+ */
+ if (qm_list && qm_try_frozen_vfs(pdev, qm_list)) {
+ pci_err(pdev, "Uacce user space task is using its VF!\n");
+ return -EBUSY;
+ }
+
+ /* remove in hpre_pci_driver will be called to free VF resources */
+ pci_disable_sriov(pdev);
+ return qm_clear_vft_config(qm);
+}
+EXPORT_SYMBOL_GPL(hisi_qm_sriov_disable);
+
+void hisi_qm_dev_err_init(struct hisi_qm *qm)
+{
+ struct pci_dev *pdev = qm->pdev;
+ struct hisi_qm *pf_qm = pci_get_drvdata(pci_physfn(pdev));
+
+ if (pf_qm->fun_type == QM_HW_VF)
+ return;
+
+ qm_hw_error_init(pf_qm);
+ pf_qm->err_ini.hw_err_enable(pf_qm);
+}
+EXPORT_SYMBOL_GPL(hisi_qm_dev_err_init);
+
+/**
+ * hisi_qm_dev_err_uninit() - Uninitialize device error configuration.
+ * @qm: The qm for which we want to do error uninitialization.
+ *
+ * Uninitialize QM and device error related configuration, It may called
+ * by PF/VF, the caller should ensure the scene explicilty.
+ */
+void hisi_qm_dev_err_uninit(struct hisi_qm *qm)
+{
+ struct pci_dev *pdev = qm->pdev;
+ struct hisi_qm *pf_qm = pci_get_drvdata(pci_physfn(pdev));
+
+ if (pf_qm->fun_type == QM_HW_VF)
+ return;
+
+ qm_hw_error_uninit(pf_qm);
+ pf_qm->err_ini.hw_err_disable(pf_qm);
+}
+EXPORT_SYMBOL_GPL(hisi_qm_dev_err_uninit);
+
+static pci_ers_result_t qm_dev_err_handle(struct hisi_qm *qm)
+{
+ u32 err_sts;
+
+ /* read err sts */
+ err_sts = qm->err_ini.get_dev_hw_err_status(qm);
+ if (err_sts) {
+ if (err_sts & qm->err_ini.err_info.ecc_2bits_mask)
+ qm->err_ini.err_info.is_dev_ecc_mbit = true;
+
+ qm->err_ini.log_dev_hw_err(qm, err_sts);
+ return PCI_ERS_RESULT_NEED_RESET;
+ }
+
+ return PCI_ERS_RESULT_RECOVERED;
+}
+
+pci_ers_result_t hisi_qm_process_dev_error(struct pci_dev *pdev)
+{
+ struct hisi_qm *qm = pci_get_drvdata(pdev);
+ pci_ers_result_t qm_ret, dev_ret;
+
+ /* log qm error */
+ qm_ret = qm_hw_error_handle(qm);
+
+ /* log device error */
+ dev_ret = qm_dev_err_handle(qm);
+
+ return (qm_ret == PCI_ERS_RESULT_NEED_RESET ||
+ dev_ret == PCI_ERS_RESULT_NEED_RESET) ?
+ PCI_ERS_RESULT_NEED_RESET : PCI_ERS_RESULT_RECOVERED;
+}
+EXPORT_SYMBOL_GPL(hisi_qm_process_dev_error);
+
+pci_ers_result_t hisi_qm_dev_err_detected(struct pci_dev *pdev,
+ pci_channel_state_t state)
+{
+ if (pdev->is_virtfn)
+ return PCI_ERS_RESULT_NONE;
+
+ pci_info(pdev, "PCI error detected, state(=%d)!!\n", state);
+ if (state == pci_channel_io_perm_failure)
+ return PCI_ERS_RESULT_DISCONNECT;
+
+ return hisi_qm_process_dev_error(pdev);
+}
+EXPORT_SYMBOL_GPL(hisi_qm_dev_err_detected);
+
+static int qm_vf_reset_prepare(struct pci_dev *pdev,
+ struct hisi_qm_list *qm_list,
+ enum qm_stop_reason stop_reason)
+{
+ struct pci_dev *dev;
+ struct hisi_qm *qm;
+ int ret = 0;
+
+ mutex_lock(&qm_list->lock);
+ list_for_each_entry(qm, &qm_list->list, list) {
+ dev = qm->pdev;
+ if (dev == pdev)
+ continue;
+
+ if (pci_physfn(dev) == pdev) {
+ /* save VFs PCIE BAR configuration */
+ pci_save_state(dev);
+
+ ret = hisi_qm_stop(qm, stop_reason);
+ if (ret)
+ goto prepare_fail;
+ }
+ }
+
+prepare_fail:
+ mutex_unlock(&qm_list->lock);
+ return ret;
+}
+
+static int qm_reset_prepare_ready(struct hisi_qm *qm)
+{
+ struct pci_dev *pdev = qm->pdev;
+ struct hisi_qm *pf_qm = pci_get_drvdata(pci_physfn(pdev));
+ int delay = 0;
+
+ while (test_and_set_bit(QM_DEV_RESET_STATUS, &pf_qm->hw_status)) {
+ msleep(++delay);
+ if (delay > QM_RESET_WAIT_TIMEOUT)
+ return -EBUSY;
+ }
+
+ return 0;
+}
+
+static int qm_controller_reset_prepare(struct hisi_qm *qm)
+{
+ struct pci_dev *pdev = qm->pdev;
+ int ret;
+
+ ret = qm_reset_prepare_ready(qm);
+ if (ret) {
+ pci_err(pdev, "Controller reset not ready!\n");
+ return ret;
+ }
+
+ if (qm->vfs_num) {
+ ret = qm_vf_reset_prepare(pdev, qm->qm_list, QM_SOFT_RESET);
+ if (ret) {
+ pci_err(pdev, "Fails to stop VFs!\n");
+ return ret;
+ }
+ }
+
+ ret = hisi_qm_stop(qm, QM_SOFT_RESET);
+ if (ret) {
+ pci_err(pdev, "Fails to stop QM!\n");
+ return ret;
+ }
+
+#ifdef CONFIG_CRYPTO_QM_UACCE
+ if (qm->use_uacce) {
+ ret = uacce_hw_err_isolate(&qm->uacce);
+ if (ret) {
+ pci_err(pdev, "Fails to isolate hw err!\n");
+ return ret;
+ }
+ }
+#endif
+
+ return 0;
+}
+
+static void qm_dev_ecc_mbit_handle(struct hisi_qm *qm)
+{
+ u32 nfe_enb = 0;
+
+ if (!qm->err_ini.err_info.is_dev_ecc_mbit &&
+ qm->err_ini.err_info.is_qm_ecc_mbit &&
+ qm->err_ini.close_axi_master_ooo) {
+
+ qm->err_ini.close_axi_master_ooo(qm);
+
+ } else if (qm->err_ini.err_info.is_dev_ecc_mbit &&
+ !qm->err_ini.err_info.is_qm_ecc_mbit &&
+ !qm->err_ini.close_axi_master_ooo) {
+
+ nfe_enb = readl(qm->io_base + QM_RAS_NFE_ENABLE);
+ writel(nfe_enb & QM_RAS_NFE_MBIT_DISABLE,
+ qm->io_base + QM_RAS_NFE_ENABLE);
+ writel(QM_ECC_MBIT, qm->io_base + QM_PF_ABNORMAL_INT_SET);
+ }
+}
+
+static int qm_soft_reset(struct hisi_qm *qm)
+{
+ struct pci_dev *pdev = qm->pdev;
+ int ret;
+ u32 val;
+
+ ret = qm_reg_test(qm);
+ if (ret)
+ return ret;
+
+ if (qm->vfs_num) {
+ ret = qm_set_vf_mse(qm, false);
+ if (ret) {
+ pci_err(pdev, "Fails to disable vf mse bit.\n");
+ return ret;
+ }
+ }
+
+ ret = qm_set_msi(qm, false);
+ if (ret) {
+ pci_err(pdev, "Fails to disable peh msi bit.\n");
+ return ret;
+ }
+
+ qm_dev_ecc_mbit_handle(qm);
+
+ mdelay(DELAY_PERIOD_MS);
+
+ /* OOO register set and check */
+ writel(MASTER_GLOBAL_CTRL_SHUTDOWN, qm->io_base + MASTER_GLOBAL_CTRL);
+
+ /* If bus lock, reset chip */
+ ret = readl_relaxed_poll_timeout(qm->io_base + MASTER_TRANS_RETURN,
+ val, (val == MASTER_TRANS_RETURN_RW),
+ QM_REG_RD_INTVRL_US,
+ QM_REG_RD_TMOUT_US);
+ if (ret) {
+ pci_emerg(pdev, "Bus lock! Please reset system.\n");
+ return ret;
+ }
+
+ ret = qm_set_pf_mse(qm, false);
+ if (ret) {
+ pci_err(pdev, "Fails to disable pf mse bit.\n");
+ return ret;
+ }
+
+ /* The reset related sub-control registers are not in PCI BAR */
+ if (ACPI_HANDLE(&pdev->dev)) {
+ unsigned long long value = 0;
+ acpi_status s;
+
+ s = acpi_evaluate_integer(ACPI_HANDLE(&pdev->dev),
+ qm->err_ini.err_info.acpi_rst,
+ NULL, &value);
+ if (ACPI_FAILURE(s)) {
+ pci_err(pdev, "NO controller reset method!\n");
+ return -EIO;
+ }
+
+ if (value) {
+ pci_err(pdev, "Reset step %llu failed!\n", value);
+ return -EIO;
+ }
+ } else {
+ pci_err(pdev, "No reset method!\n");
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+static int qm_vf_reset_done(struct pci_dev *pdev,
+ struct hisi_qm_list *qm_list)
+{
+ struct pci_dev *dev;
+ struct hisi_qm *qm;
+ int ret = 0;
+
+ mutex_lock(&qm_list->lock);
+ list_for_each_entry(qm, &qm_list->list, list) {
+ dev = qm->pdev;
+ if (dev == pdev)
+ continue;
+
+ if (pci_physfn(dev) == pdev) {
+ /* enable VFs PCIE BAR configuration */
+ pci_restore_state(dev);
+
+ ret = hisi_qm_restart(qm);
+ if (ret)
+ goto reset_fail;
+ }
+ }
+
+reset_fail:
+ mutex_unlock(&qm_list->lock);
+ return ret;
+}
+
+static int qm_get_dev_err_status(struct hisi_qm *qm)
+{
+ u32 err_sts;
+
+ err_sts = qm->err_ini.get_dev_hw_err_status(qm) &
+ qm->err_ini.err_info.ecc_2bits_mask;
+ if (err_sts)
+ return err_sts;
+
+ return 0;
+}
+
+static void hisi_qm_restart_prepare(struct hisi_qm *qm)
+{
+ u32 value;
+
+ if (!qm->err_ini.err_info.is_qm_ecc_mbit &&
+ !qm->err_ini.err_info.is_dev_ecc_mbit)
+ return;
+
+ value = readl(qm->io_base + AM_CFG_PORT_WR_EN);
+ writel(value & ~qm->err_ini.err_info.msi_wr_port,
+ qm->io_base + AM_CFG_PORT_WR_EN);
+
+ /* clear dev ecc 2bit error source if having */
+ value = qm_get_dev_err_status(qm);
+ if (value && qm->err_ini.clear_dev_hw_err_status)
+ qm->err_ini.clear_dev_hw_err_status(qm, value);
+
+ /* clear QM ecc mbit error source */
+ writel(QM_ECC_MBIT, qm->io_base +
+ QM_ABNORMAL_INT_SOURCE);
+
+ /* clear AM Reorder Buffer ecc mbit source */
+ writel(ROB_ECC_ERR_MULTPL, qm->io_base +
+ AM_ROB_ECC_INT_STS);
+
+ if (qm->err_ini.open_axi_master_ooo)
+ qm->err_ini.open_axi_master_ooo(qm);
+}
+
+static void hisi_qm_restart_done(struct hisi_qm *qm)
+{
+ u32 value;
+
+ if (!qm->err_ini.err_info.is_qm_ecc_mbit &&
+ !qm->err_ini.err_info.is_dev_ecc_mbit)
+ return;
+
+ value = readl(qm->io_base + AM_CFG_PORT_WR_EN);
+ value |= qm->err_ini.err_info.msi_wr_port;
+
+ writel(value, qm->io_base + AM_CFG_PORT_WR_EN);
+ qm->err_ini.err_info.is_qm_ecc_mbit = false;
+ qm->err_ini.err_info.is_dev_ecc_mbit = false;
+}
+
+static int qm_controller_reset_done(struct hisi_qm *qm)
+{
+ struct pci_dev *pdev = qm->pdev;
+ int ret;
+
+ ret = qm_set_msi(qm, true);
+ if (ret) {
+ pci_err(pdev, "Fails to enable peh msi bit!\n");
+ return ret;
+ }
+
+ ret = qm_set_pf_mse(qm, true);
+ if (ret) {
+ pci_err(pdev, "Fails to enable pf mse bit!\n");
+ return ret;
+ }
+
+ if (qm->vfs_num) {
+ ret = qm_set_vf_mse(qm, true);
+ if (ret) {
+ pci_err(pdev, "Fails to enable vf mse bit!\n");
+ return ret;
+ }
+ }
+
+ ret = qm->err_ini.set_usr_domain_cache(qm);
+ if (ret)
+ return ret;
+
+ hisi_qm_restart_prepare(qm);
+
+ ret = hisi_qm_restart(qm);
+ if (ret) {
+ pci_err(pdev, "Failed to start QM!\n");
+ return ret;
+ }
+
+ if (qm->vfs_num) {
+ ret = qm_vf_q_assign(qm, qm->vfs_num);
+ if (ret) {
+ pci_err(pdev, "Failed to assign queue!\n");
+ return ret;
+ }
+ }
+
+ ret = qm_vf_reset_done(pdev, qm->qm_list);
+ if (ret) {
+ pci_err(pdev, "Failed to start VFs!\n");
+ return -EPERM;
+ }
+
+ hisi_qm_dev_err_init(qm);
+
+ hisi_qm_restart_done(qm);
+
+ return 0;
+}
+
+int hisi_qm_controller_reset(struct hisi_qm *qm)
+{
+ struct pci_dev *pdev = qm->pdev;
+ int ret;
+
+ pci_info(pdev, "Controller resetting...\n");
+
+ ret = qm_controller_reset_prepare(qm);
+ if (ret)
+ return ret;
+
+ ret = qm_soft_reset(qm);
+ if (ret) {
+ pci_err(pdev, "Controller reset failed (%d)\n", ret);
+ return ret;
+ }
+
+ ret = qm_controller_reset_done(qm);
+ if (ret)
+ return ret;
+
+ clear_bit(QM_DEV_RESET_STATUS, &qm->hw_status);
+ pci_info(pdev, "Controller reset complete\n");
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(hisi_qm_controller_reset);
+
+pci_ers_result_t hisi_qm_dev_slot_reset(struct pci_dev *pdev)
+{
+ struct hisi_qm *qm = pci_get_drvdata(pdev);
+ int ret;
+
+ if (pdev->is_virtfn)
+ return PCI_ERS_RESULT_RECOVERED;
+
+ pci_info(pdev, "Requesting reset due to PCI error\n");
+ pci_cleanup_aer_uncorrect_error_status(pdev);
+
+ /* reset pcie device controller */
+ ret = hisi_qm_controller_reset(qm);
+ if (ret) {
+ pci_err(pdev, "controller reset failed (%d)\n", ret);
+ return PCI_ERS_RESULT_DISCONNECT;
+ }
+
+ return PCI_ERS_RESULT_RECOVERED;
+}
+EXPORT_SYMBOL_GPL(hisi_qm_dev_slot_reset);
+
+/* check the interrupt is ecc-mbit error or not */
+static int qm_check_dev_error(struct hisi_qm *qm)
+{
+ struct pci_dev *pdev = qm->pdev;
+ struct hisi_qm *pf_qm = pci_get_drvdata(pci_physfn(pdev));
+ int ret;
+
+ if (pf_qm->fun_type == QM_HW_VF)
+ return 0;
+
+ ret = qm_get_hw_error_status(pf_qm);
+ if (ret)
+ return ret;
+
+ return qm_get_dev_err_status(pf_qm);
+}
+
+void hisi_qm_reset_prepare(struct pci_dev *pdev)
+{
+ struct hisi_qm *qm = pci_get_drvdata(pdev);
+ u32 delay = 0;
+ int ret;
+
+ hisi_qm_dev_err_uninit(qm);
+
+ while (qm_check_dev_error(qm)) {
+ msleep(++delay);
+ if (delay > QM_RESET_WAIT_TIMEOUT)
+ return;
+ }
+
+ ret = qm_reset_prepare_ready(qm);
+ if (ret) {
+ pci_err(pdev, "FLR not ready!\n");
+ return;
+ }
+
+ if (qm->vfs_num) {
+ ret = qm_vf_reset_prepare(pdev, qm->qm_list, QM_FLR);
+ if (ret) {
+ pci_err(pdev, "Fails to prepare reset!\n");
+ return;
+ }
+ }
+
+ ret = hisi_qm_stop(qm, QM_FLR);
+ if (ret) {
+ pci_err(pdev, "Fails to stop QM!\n");
+ return;
+ }
+
+ pci_info(pdev, "FLR resetting...\n");
+}
+EXPORT_SYMBOL_GPL(hisi_qm_reset_prepare);
+
+static bool qm_flr_reset_complete(struct pci_dev *pdev)
+{
+ struct pci_dev *pf_pdev = pci_physfn(pdev);
+ struct hisi_qm *qm = pci_get_drvdata(pf_pdev);
+ u32 id;
+
+ pci_read_config_dword(qm->pdev, PCI_COMMAND, &id);
+ if (id == QM_PCI_COMMAND_INVALID) {
+ pci_err(pdev, "Device can not be used!\n");
+ return false;
+ }
+
+ clear_bit(QM_DEV_RESET_STATUS, &qm->hw_status);
+ return true;
+}
+
+void hisi_qm_reset_done(struct pci_dev *pdev)
+{
+ struct hisi_qm *qm = pci_get_drvdata(pdev);
+ int ret;
+
+ hisi_qm_dev_err_init(qm);
+
+ ret = hisi_qm_restart(qm);
+ if (ret) {
+ pci_err(pdev, "Failed to start QM!\n");
+ goto flr_done;
+ }
+
+ if (qm->fun_type == QM_HW_PF) {
+ ret = qm->err_ini.set_usr_domain_cache(qm);
+ if (ret) {
+ pci_err(pdev, "Failed to start QM!\n");
+ goto flr_done;
+ }
+
+ if (qm->vfs_num)
+ qm_vf_q_assign(qm, qm->vfs_num);
+
+ ret = qm_vf_reset_done(pdev, qm->qm_list);
+ if (ret) {
+ pci_err(pdev, "Failed to start VFs!\n");
+ goto flr_done;
+ }
+ }
- writel(AM_CFG_PORT_WR_EN_VALUE, qm->io_base + AM_CFG_PORT_WR_EN);
- qm->err_ini.is_qm_ecc_mbit = 0;
- qm->err_ini.is_dev_ecc_mbit = 0;
+flr_done:
+ if (qm_flr_reset_complete(pdev))
+ pci_info(pdev, "FLR reset complete\n");
}
-EXPORT_SYMBOL_GPL(hisi_qm_restart_done);
+EXPORT_SYMBOL_GPL(hisi_qm_reset_done);
MODULE_LICENSE("GPL v2");
MODULE_AUTHOR("Zhou Wang <wangzhou1(a)hisilicon.com>");
diff --git a/drivers/crypto/hisilicon/qm.h b/drivers/crypto/hisilicon/qm.h
index 24b3609..36e888f 100644
--- a/drivers/crypto/hisilicon/qm.h
+++ b/drivers/crypto/hisilicon/qm.h
@@ -17,6 +17,9 @@
#include "qm_usr_if.h"
+#define QNUM_V1 4096
+#define QNUM_V2 1024
+#define QM_MAX_VFS_NUM 63
/* qm user domain */
#define QM_ARUSER_M_CFG_1 0x100088
#define AXUSER_SNOOP_ENABLE BIT(30)
@@ -49,6 +52,7 @@
#define QM_AXI_M_CFG 0x1000ac
#define AXI_M_CFG 0xffff
#define QM_AXI_M_CFG_ENABLE 0x1000b0
+#define AM_CFG_SINGLE_PORT_MAX_TRANS 0x300014
#define AXI_M_CFG_ENABLE 0xffffffff
#define QM_PEH_AXUSER_CFG 0x1000cc
#define QM_PEH_AXUSER_CFG_ENABLE 0x1000d0
@@ -235,19 +239,41 @@ struct hisi_qm_status {
int stop_reason;
};
+struct hisi_qm_hw_error {
+ u32 int_msk;
+ const char *msg;
+};
+
struct hisi_qm;
-struct hisi_qm_err_ini {
- u32 qm_wr_port;
+struct hisi_qm_err_info {
+ char *acpi_rst;
+ u32 msi_wr_port;
+ u32 ecc_2bits_mask;
u32 is_qm_ecc_mbit;
u32 is_dev_ecc_mbit;
- u32 ecc_2bits_mask;
- void (*open_axi_master_ooo)(struct hisi_qm *qm);
+ u32 ce;
+ u32 nfe;
+ u32 fe;
+ u32 msi;
+};
+
+struct hisi_qm_err_ini {
u32 (*get_dev_hw_err_status)(struct hisi_qm *qm);
void (*clear_dev_hw_err_status)(struct hisi_qm *qm, u32 err_sts);
+ void (*hw_err_enable)(struct hisi_qm *qm);
+ void (*hw_err_disable)(struct hisi_qm *qm);
+ int (*set_usr_domain_cache)(struct hisi_qm *qm);
void (*log_dev_hw_err)(struct hisi_qm *qm, u32 err_sts);
- /* design for module can not hold on ooo through qm, such as zip */
- void (*inject_dev_hw_err)(struct hisi_qm *qm);
+ void (*open_axi_master_ooo)(struct hisi_qm *qm);
+ void (*close_axi_master_ooo)(struct hisi_qm *qm);
+ struct hisi_qm_err_info err_info;
+};
+
+struct hisi_qm_list {
+ struct mutex lock;
+ struct list_head list;
+ bool (*check)(struct hisi_qm *qm);
};
struct hisi_qm {
@@ -260,7 +286,9 @@ struct hisi_qm {
u32 qp_base;
u32 qp_num;
u32 ctrl_q_num;
-
+ u32 vfs_num;
+ struct list_head list;
+ struct hisi_qm_list *qm_list;
struct qm_dma qdma;
struct qm_sqc *sqc;
struct qm_cqc *cqc;
@@ -285,8 +313,7 @@ struct hisi_qm {
u32 error_mask;
u32 msi_mask;
-
- const char *algs;
+ unsigned long hw_status;
bool use_uacce; /* register to uacce */
bool use_sva;
@@ -294,7 +321,9 @@ struct hisi_qm {
resource_size_t phys_base;
resource_size_t size;
struct uacce uacce;
+ const char *algs;
void *reserve;
+ int uacce_mode;
dma_addr_t reserve_dma;
#endif
struct workqueue_struct *wq;
@@ -345,9 +374,144 @@ struct hisi_qp {
#endif
};
+static inline int q_num_set(const char *val, const struct kernel_param *kp,
+ unsigned int device)
+{
+ struct pci_dev *pdev = pci_get_device(PCI_VENDOR_ID_HUAWEI,
+ device, NULL);
+ u32 n, q_num;
+ u8 rev_id;
+ int ret;
+
+ if (!val)
+ return -EINVAL;
+
+ if (!pdev) {
+ q_num = min_t(u32, QNUM_V1, QNUM_V2);
+ pr_info("No device found currently, suppose queue number is %d\n",
+ q_num);
+ } else {
+ rev_id = pdev->revision;
+ switch (rev_id) {
+ case QM_HW_V1:
+ q_num = QNUM_V1;
+ break;
+ case QM_HW_V2:
+ q_num = QNUM_V2;
+ break;
+ default:
+ return -EINVAL;
+ }
+ }
+
+ ret = kstrtou32(val, 10, &n);
+ if (ret || !n || n > q_num)
+ return -EINVAL;
+
+ return param_set_int(val, kp);
+}
+
+static inline int vf_num_set(const char *val, const struct kernel_param *kp)
+{
+ u32 n;
+ int ret;
+
+ if (!val)
+ return -EINVAL;
+
+ ret = kstrtou32(val, 10, &n);
+ if (ret < 0)
+ return ret;
+
+ if (n > QM_MAX_VFS_NUM)
+ return -ERANGE;
+
+ return param_set_int(val, kp);
+}
+
+#ifdef CONFIG_CRYPTO_QM_UACCE
+static inline int mode_set(const char *val, const struct kernel_param *kp)
+{
+ u32 n;
+ int ret;
+
+ if (!val)
+ return -EINVAL;
+
+ ret = kstrtou32(val, 10, &n);
+ if (ret != 0 || (n != UACCE_MODE_NOIOMMU &&
+ n != UACCE_MODE_NOUACCE))
+ return -EINVAL;
+
+ return param_set_int(val, kp);
+}
+#endif
+
+static inline void hisi_qm_add_to_list(struct hisi_qm *qm,
+ struct hisi_qm_list *qm_list)
+{
+ mutex_lock(&qm_list->lock);
+ list_add_tail(&qm->list, &qm_list->list);
+ mutex_unlock(&qm_list->lock);
+}
+
+static inline void hisi_qm_del_from_list(struct hisi_qm *qm,
+ struct hisi_qm_list *qm_list)
+{
+ mutex_lock(&qm_list->lock);
+ list_del(&qm->list);
+ mutex_unlock(&qm_list->lock);
+}
+
+static inline int hisi_qm_pre_init(struct hisi_qm *qm,
+ u32 pf_q_num, u32 def_q_num)
+{
+ struct pci_dev *pdev = qm->pdev;
+
+ switch (pdev->revision) {
+ case QM_HW_V1:
+ case QM_HW_V2:
+ qm->ver = pdev->revision;
+ break;
+ default:
+ pci_err(pdev, "hardware version err!\n");
+ return -ENODEV;
+ }
+
+ pci_set_drvdata(pdev, qm);
+
+#ifdef CONFIG_CRYPTO_QM_UACCE
+ switch (qm->uacce_mode) {
+ case UACCE_MODE_NOUACCE:
+ qm->use_uacce = false;
+ break;
+ case UACCE_MODE_NOIOMMU:
+ qm->use_uacce = true;
+ break;
+ default:
+ pci_err(pdev, "uacce mode error!\n");
+ return -EINVAL;
+ }
+#else
+ qm->use_uacce = false;
+#endif
+ if (qm->fun_type == QM_HW_PF) {
+ qm->qp_base = def_q_num;
+ qm->qp_num = pf_q_num;
+ qm->debug.curr_qm_qp_num = pf_q_num;
+ }
+
+ return 0;
+}
+
+void hisi_qm_free_qps(struct hisi_qp **qps, int qp_num);
+int hisi_qm_alloc_qps_node(int node, struct hisi_qm_list *qm_list,
+ struct hisi_qp **qps, int qp_num, u8 alg_type);
int hisi_qm_init(struct hisi_qm *qm);
void hisi_qm_uninit(struct hisi_qm *qm);
-int hisi_qm_frozen(struct hisi_qm *qm);
+void hisi_qm_dev_shutdown(struct pci_dev *pdev);
+void hisi_qm_remove_wait_delay(struct hisi_qm *qm,
+ struct hisi_qm_list *qm_list);
int hisi_qm_start(struct hisi_qm *qm);
int hisi_qm_stop(struct hisi_qm *qm, enum qm_stop_reason r);
struct hisi_qp *hisi_qm_create_qp(struct hisi_qm *qm, u8 alg_type);
@@ -358,25 +522,20 @@ struct hisi_qp {
int hisi_qp_wait(struct hisi_qp *qp);
int hisi_qm_get_free_qp_num(struct hisi_qm *qm);
int hisi_qm_get_vft(struct hisi_qm *qm, u32 *base, u32 *number);
-int hisi_qm_set_vft(struct hisi_qm *qm, u32 fun_num, u32 base, u32 number);
void hisi_qm_debug_regs_clear(struct hisi_qm *qm);
int hisi_qm_debug_init(struct hisi_qm *qm);
-void hisi_qm_hw_error_init(struct hisi_qm *qm, u32 ce, u32 nfe, u32 fe,
- u32 msi);
-void hisi_qm_hw_error_uninit(struct hisi_qm *qm);
-pci_ers_result_t hisi_qm_hw_error_handle(struct hisi_qm *qm);
-void hisi_qm_clear_queues(struct hisi_qm *qm);
-enum qm_hw_ver hisi_qm_get_hw_version(struct pci_dev *pdev);
int hisi_qm_restart(struct hisi_qm *qm);
-int hisi_qm_get_hw_error_status(struct hisi_qm *qm);
+int hisi_qm_sriov_enable(struct pci_dev *pdev, int max_vfs);
+int hisi_qm_sriov_disable(struct pci_dev *pdev, struct hisi_qm_list *qm_list);
+void hisi_qm_dev_err_init(struct hisi_qm *qm);
+void hisi_qm_dev_err_uninit(struct hisi_qm *qm);
+pci_ers_result_t hisi_qm_dev_err_detected(struct pci_dev *pdev,
+ pci_channel_state_t state);
+pci_ers_result_t hisi_qm_dev_slot_reset(struct pci_dev *pdev);
+void hisi_qm_reset_prepare(struct pci_dev *pdev);
+void hisi_qm_reset_done(struct pci_dev *pdev);
pci_ers_result_t hisi_qm_process_dev_error(struct pci_dev *pdev);
-int hisi_qm_reg_test(struct hisi_qm *qm);
-int hisi_qm_set_pf_mse(struct hisi_qm *qm, bool set);
-int hisi_qm_set_vf_mse(struct hisi_qm *qm, bool set);
-int hisi_qm_set_msi(struct hisi_qm *qm, bool set);
-void hisi_qm_set_ecc(struct hisi_qm *qm);
-void hisi_qm_restart_prepare(struct hisi_qm *qm);
-void hisi_qm_restart_done(struct hisi_qm *qm);
+int hisi_qm_controller_reset(struct hisi_qm *qm);
struct hisi_acc_sgl_pool;
struct hisi_acc_hw_sgl *hisi_acc_sg_buf_map_to_hw_sgl(struct device *dev,
diff --git a/drivers/crypto/hisilicon/rde/rde.h b/drivers/crypto/hisilicon/rde/rde.h
index aa7887a..e06efc7 100644
--- a/drivers/crypto/hisilicon/rde/rde.h
+++ b/drivers/crypto/hisilicon/rde/rde.h
@@ -22,19 +22,11 @@
struct hisi_rde_ctrl;
-enum hisi_rde_status {
- HISI_RDE_RESET,
-};
-
struct hisi_rde {
struct hisi_qm qm;
- struct list_head list;
struct hisi_rde_ctrl *ctrl;
struct work_struct reset_work;
- struct mutex *rde_list_lock;
- unsigned long status;
u32 smmu_state;
- int q_ref;
};
#define RDE_CM_LOAD_ENABLE 1
@@ -134,7 +126,6 @@ struct hisi_rde_msg {
struct hisi_rde_ctx {
struct device *dev;
struct hisi_qp *qp;
- struct hisi_rde *rde_dev;
struct hisi_rde_msg *req_list;
unsigned long *req_bitmap;
spinlock_t req_lock;
@@ -323,7 +314,7 @@ static inline void rde_table_dump(const struct hisi_rde_msg *req)
}
}
-struct hisi_rde *find_rde_device(int node);
+struct hisi_qp *rde_create_qp(void);
int hisi_rde_abnormal_fix(struct hisi_qm *qm);
#endif
diff --git a/drivers/crypto/hisilicon/rde/rde_api.c b/drivers/crypto/hisilicon/rde/rde_api.c
index 1be468a..f1330f1 100644
--- a/drivers/crypto/hisilicon/rde/rde_api.c
+++ b/drivers/crypto/hisilicon/rde/rde_api.c
@@ -835,17 +835,12 @@ int hisi_rde_io_proc(struct acc_ctx *ctx, struct raid_ec_ctrl *ctrl,
return ret;
}
-static int hisi_rde_create_qp(struct hisi_qm *qm, struct acc_ctx *ctx,
- int alg_type, int req_type)
+static int hisi_rde_start_qp(struct hisi_qp *qp, struct acc_ctx *ctx,
+ int req_type)
{
- struct hisi_qp *qp;
struct hisi_rde_ctx *rde_ctx;
int ret;
- qp = hisi_qm_create_qp(qm, alg_type);
- if (IS_ERR(qp))
- return PTR_ERR(qp);
-
qp->req_type = req_type;
qp->qp_ctx = ctx;
@@ -994,9 +989,10 @@ static int hisi_rde_ctx_init(struct hisi_rde_ctx *rde_ctx, int qlen)
int acc_init(struct acc_ctx *ctx)
{
+ struct hisi_rde_ctx *rde_ctx;
struct hisi_rde *hisi_rde;
+ struct hisi_qp *qp;
struct hisi_qm *qm;
- struct hisi_rde_ctx *rde_ctx;
int ret;
if (unlikely(!ctx)) {
@@ -1004,9 +1000,9 @@ int acc_init(struct acc_ctx *ctx)
return -EINVAL;
}
- hisi_rde = find_rde_device(cpu_to_node(smp_processor_id()));
- if (unlikely(!hisi_rde)) {
- pr_err("[%s]Can not find proper RDE device.\n", __func__);
+ qp = rde_create_qp();
+ if (unlikely(!qp)) {
+ pr_err("[%s]Can not create RDE qp.\n", __func__);
return -ENODEV;
}
/* alloc inner private struct */
@@ -1017,20 +1013,20 @@ int acc_init(struct acc_ctx *ctx)
}
ctx->inner = (void *)rde_ctx;
- qm = &hisi_rde->qm;
+ qm = qp->qm;
if (unlikely(!qm->pdev)) {
pr_err("[%s] Pdev is NULL.\n", __func__);
return -ENODEV;
}
rde_ctx->dev = &qm->pdev->dev;
- ret = hisi_rde_create_qp(qm, ctx, 0, 0);
+ ret = hisi_rde_start_qp(qp, ctx, 0);
if (ret) {
- dev_err(rde_ctx->dev, "[%s] Create qp failed.\n", __func__);
+ dev_err(rde_ctx->dev, "[%s] start qp failed.\n", __func__);
goto qp_err;
}
- rde_ctx->rde_dev = hisi_rde;
+ hisi_rde = container_of(qm, struct hisi_rde, qm);
rde_ctx->smmu_state = hisi_rde->smmu_state;
rde_ctx->addr_type = ctx->addr_type;
hisi_rde_session_init(rde_ctx);
@@ -1081,9 +1077,6 @@ int acc_clear(struct acc_ctx *ctx)
rde_ctx->req_list = NULL;
hisi_rde_release_qp(rde_ctx);
- mutex_lock(rde_ctx->rde_dev->rde_list_lock);
- rde_ctx->rde_dev->q_ref = rde_ctx->rde_dev->q_ref - 1;
- mutex_unlock(rde_ctx->rde_dev->rde_list_lock);
kfree(rde_ctx);
ctx->inner = NULL;
diff --git a/drivers/crypto/hisilicon/rde/rde_api.h b/drivers/crypto/hisilicon/rde/rde_api.h
index 0f9021b..167607e 100644
--- a/drivers/crypto/hisilicon/rde/rde_api.h
+++ b/drivers/crypto/hisilicon/rde/rde_api.h
@@ -308,7 +308,7 @@ struct acc_dif {
* @input_block: number of sector
* @data_len: data len of per disk, block_size (with dif)* input_block
* @buf_type: denoted by ACC_BUF_TYPE_E
- * @src_dif��dif information of source disks
+ * @src_dif: dif information of source disks
* @dst_dif: dif information of dest disks
* @cm_load: coe_matrix reload control, 0: do not load, 1: load
* @cm_len: length of loaded coe_matrix, equal to src_num
diff --git a/drivers/crypto/hisilicon/rde/rde_main.c b/drivers/crypto/hisilicon/rde/rde_main.c
index 453657a..318d4a0 100644
--- a/drivers/crypto/hisilicon/rde/rde_main.c
+++ b/drivers/crypto/hisilicon/rde/rde_main.c
@@ -22,7 +22,6 @@
#include <linux/uacce.h>
#include "rde.h"
-#define HRDE_VF_NUM 63
#define HRDE_QUEUE_NUM_V1 4096
#define HRDE_QUEUE_NUM_V2 1024
#define HRDE_PCI_DEVICE_ID 0xa25a
@@ -32,7 +31,6 @@
#define HRDE_PF_DEF_Q_BASE 0
#define HRDE_RD_INTVRL_US 10
#define HRDE_RD_TMOUT_US 1000
-#define FORMAT_DECIMAL 10
#define HRDE_RST_TMOUT_MS 400
#define HRDE_ENABLE 1
#define HRDE_DISABLE 0
@@ -68,7 +66,7 @@
#define CHN_CFG 0x5010101
#define HRDE_AXI_SHUTDOWN_EN BIT(26)
#define HRDE_AXI_SHUTDOWN_DIS 0xFBFFFFFF
-#define HRDE_WR_MSI_PORT 0xFFFE
+#define HRDE_WR_MSI_PORT BIT(0)
#define HRDE_AWUSER_BD_1 0x310104
#define HRDE_ARUSER_BD_1 0x310114
#define HRDE_ARUSER_SGL_1 0x310124
@@ -87,9 +85,6 @@
#define HRDE_QM_IDEL_STATUS 0x1040e4
#define HRDE_QM_PEH_DFX_INFO0 0x1000fc
#define PEH_MSI_MASK_SHIFT 0x90
-#define HRDE_MASTER_GLOBAL_CTRL 0x300000
-#define MASTER_GLOBAL_CTRL_SHUTDOWN 0x1
-#define MASTER_TRANS_RETURN_RW 0x3
#define CACHE_CTL 0x1833
#define HRDE_DBGFS_VAL_MAX_LEN 20
#define HRDE_PROBE_ADDR 0x31025c
@@ -100,16 +95,9 @@
static const char hisi_rde_name[] = "hisi_rde";
static struct dentry *hrde_debugfs_root;
-LIST_HEAD(hisi_rde_list);
-DEFINE_MUTEX(hisi_rde_list_lock);
+static struct hisi_qm_list rde_devices;
static void hisi_rde_ras_proc(struct work_struct *work);
-struct hisi_rde_resource {
- struct hisi_rde *hrde;
- int distance;
- struct list_head list;
-};
-
static const struct hisi_rde_hw_error rde_hw_error[] = {
{.int_msk = BIT(0), .msg = "Rde_ecc_1bitt_err"},
{.int_msk = BIT(1), .msg = "Rde_ecc_2bit_err"},
@@ -157,7 +145,6 @@ struct ctrl_debug_file {
*/
struct hisi_rde_ctrl {
struct hisi_rde *hisi_rde;
- struct dentry *debug_root;
struct ctrl_debug_file files[HRDE_DEBUG_FILE_NUM];
};
@@ -199,78 +186,36 @@ struct hisi_rde_ctrl {
{"HRDE_AM_CURR_WR_TXID_STS_2", 0x300178ull},
};
-static int pf_q_num_set(const char *val, const struct kernel_param *kp)
+#ifdef CONFIG_CRYPTO_QM_UACCE
+static int uacce_mode_set(const char *val, const struct kernel_param *kp)
{
- struct pci_dev *pdev;
- u32 n;
- u32 q_num;
- u8 rev_id;
- int ret;
-
- if (!val)
- return -EINVAL;
-
- pdev = pci_get_device(PCI_VENDOR_ID_HUAWEI, HRDE_PCI_DEVICE_ID, NULL);
- if (unlikely(!pdev)) {
- q_num = min_t(u32, HRDE_QUEUE_NUM_V1, HRDE_QUEUE_NUM_V2);
- pr_info
- ("No device found currently, suppose queue number is %d.\n",
- q_num);
- } else {
- rev_id = pdev->revision;
- switch (rev_id) {
- case QM_HW_V1:
- q_num = HRDE_QUEUE_NUM_V1;
- break;
- case QM_HW_V2:
- q_num = HRDE_QUEUE_NUM_V2;
- break;
- default:
- return -EINVAL;
- }
- }
-
- ret = kstrtou32(val, 10, &n);
- if (ret != 0 || n > q_num)
- return -EINVAL;
-
- return param_set_int(val, kp);
+ return mode_set(val, kp);
}
-static const struct kernel_param_ops pf_q_num_ops = {
- .set = pf_q_num_set,
+static const struct kernel_param_ops uacce_mode_ops = {
+ .set = uacce_mode_set,
.get = param_get_int,
};
-static int uacce_mode_set(const char *val, const struct kernel_param *kp)
-{
- u32 n;
- int ret;
-
- if (!val)
- return -EINVAL;
-
- ret = kstrtou32(val, FORMAT_DECIMAL, &n);
- if (ret != 0 || (n != UACCE_MODE_NOIOMMU && n != UACCE_MODE_NOUACCE))
- return -EINVAL;
+static int uacce_mode = UACCE_MODE_NOUACCE;
+module_param_cb(uacce_mode, &uacce_mode_ops, &uacce_mode, 0444);
+MODULE_PARM_DESC(uacce_mode, "Mode of UACCE can be 0(default), 2");
+#endif
- return param_set_int(val, kp);
+static int pf_q_num_set(const char *val, const struct kernel_param *kp)
+{
+ return q_num_set(val, kp, HRDE_PCI_DEVICE_ID);
}
-static const struct kernel_param_ops uacce_mode_ops = {
- .set = uacce_mode_set,
+static const struct kernel_param_ops pf_q_num_ops = {
+ .set = pf_q_num_set,
.get = param_get_int,
};
-
static u32 pf_q_num = HRDE_PF_DEF_Q_NUM;
module_param_cb(pf_q_num, &pf_q_num_ops, &pf_q_num, 0444);
MODULE_PARM_DESC(pf_q_num, "Number of queues in PF(v1 0-4096, v2 0-1024)");
-static int uacce_mode = UACCE_MODE_NOUACCE;
-module_param_cb(uacce_mode, &uacce_mode_ops, &uacce_mode, 0444);
-MODULE_PARM_DESC(uacce_mode, "Mode of UACCE can be 0(default), 2");
-
static const struct pci_device_id hisi_rde_dev_ids[] = {
{PCI_DEVICE(PCI_VENDOR_ID_HUAWEI, HRDE_PCI_DEVICE_ID)},
{0,}
@@ -278,125 +223,59 @@ static int uacce_mode_set(const char *val, const struct kernel_param *kp)
MODULE_DEVICE_TABLE(pci, hisi_rde_dev_ids);
-static void free_list(struct list_head *head)
-{
- struct hisi_rde_resource *res;
- struct hisi_rde_resource *tmp;
-
- list_for_each_entry_safe(res, tmp, head, list) {
- list_del(&res->list);
- kfree(res);
- }
-}
-
-struct hisi_rde *find_rde_device(int node)
+struct hisi_qp *rde_create_qp(void)
{
- struct hisi_rde *ret = NULL;
-#ifdef CONFIG_NUMA
- struct hisi_rde_resource *res, *tmp;
- struct hisi_rde *hisi_rde;
- struct list_head *n;
- struct device *dev;
- LIST_HEAD(head);
-
- mutex_lock(&hisi_rde_list_lock);
-
- list_for_each_entry(hisi_rde, &hisi_rde_list, list) {
- res = kzalloc(sizeof(*res), GFP_KERNEL);
- if (!res)
- goto err;
-
- dev = &hisi_rde->qm.pdev->dev;
- res->hrde = hisi_rde;
- res->distance = node_distance(dev->numa_node, node);
- n = &head;
- list_for_each_entry(tmp, &head, list) {
- if (res->distance < tmp->distance) {
- n = &tmp->list;
- break;
- }
- }
- list_add_tail(&res->list, n);
- }
-
- list_for_each_entry(tmp, &head, list) {
- if (tmp->hrde->q_ref + 1 <= pf_q_num) {
- tmp->hrde->q_ref = tmp->hrde->q_ref + 1;
- ret = tmp->hrde;
- break;
- }
- }
+ int node = cpu_to_node(smp_processor_id());
+ struct hisi_qp *qp;
+ int ret;
- free_list(&head);
-#else
- mutex_lock(&hisi_rde_list_lock);
- ret = list_first_entry(&hisi_rde_list, struct hisi_rde, list);
-#endif
- mutex_unlock(&hisi_rde_list_lock);
- return ret;
+ ret = hisi_qm_alloc_qps_node(node, &rde_devices, &qp, 1, 0);
+ if (!ret)
+ return qp;
-err:
- free_list(&head);
- mutex_unlock(&hisi_rde_list_lock);
return NULL;
}
-static inline void hisi_rde_add_to_list(struct hisi_rde *hisi_rde)
-{
- mutex_lock(&hisi_rde_list_lock);
- list_add_tail(&hisi_rde->list, &hisi_rde_list);
- mutex_unlock(&hisi_rde_list_lock);
-}
-
-static inline void hisi_rde_remove_from_list(struct hisi_rde *hisi_rde)
-{
- mutex_lock(&hisi_rde_list_lock);
- list_del(&hisi_rde->list);
- mutex_unlock(&hisi_rde_list_lock);
-}
-
-static void hisi_rde_engine_init(struct hisi_rde *hisi_rde)
+static int hisi_rde_engine_init(struct hisi_qm *qm)
{
- writel(DFX_CTRL0, hisi_rde->qm.io_base + HRDE_DFX_CTRL_0);
+ writel(DFX_CTRL0, qm->io_base + HRDE_DFX_CTRL_0);
/* usr domain */
- writel(HRDE_USER_SMMU, hisi_rde->qm.io_base + HRDE_AWUSER_BD_1);
- writel(HRDE_USER_SMMU, hisi_rde->qm.io_base + HRDE_ARUSER_BD_1);
- writel(HRDE_USER_SMMU, hisi_rde->qm.io_base + HRDE_AWUSER_DAT_1);
- writel(HRDE_USER_SMMU, hisi_rde->qm.io_base + HRDE_ARUSER_DAT_1);
- writel(HRDE_USER_SMMU, hisi_rde->qm.io_base + HRDE_ARUSER_SGL_1);
+ writel(HRDE_USER_SMMU, qm->io_base + HRDE_AWUSER_BD_1);
+ writel(HRDE_USER_SMMU, qm->io_base + HRDE_ARUSER_BD_1);
+ writel(HRDE_USER_SMMU, qm->io_base + HRDE_AWUSER_DAT_1);
+ writel(HRDE_USER_SMMU, qm->io_base + HRDE_ARUSER_DAT_1);
+ writel(HRDE_USER_SMMU, qm->io_base + HRDE_ARUSER_SGL_1);
/* rde cache */
- writel(AWCACHE, hisi_rde->qm.io_base + HRDE_AWCACHE);
- writel(ARCACHE, hisi_rde->qm.io_base + HRDE_ARCACHE);
+ writel(AWCACHE, qm->io_base + HRDE_AWCACHE);
+ writel(ARCACHE, qm->io_base + HRDE_ARCACHE);
/* rde chn enable + outstangding config */
- writel(CHN_CFG, hisi_rde->qm.io_base + HRDE_CFG);
+ writel(CHN_CFG, qm->io_base + HRDE_CFG);
+
+ return 0;
}
-static void hisi_rde_set_user_domain_and_cache(struct hisi_rde *hisi_rde)
+static int hisi_rde_set_user_domain_and_cache(struct hisi_qm *qm)
{
/* qm user domain */
- writel(AXUSER_BASE, hisi_rde->qm.io_base + QM_ARUSER_M_CFG_1);
- writel(ARUSER_M_CFG_ENABLE, hisi_rde->qm.io_base +
- QM_ARUSER_M_CFG_ENABLE);
- writel(AXUSER_BASE, hisi_rde->qm.io_base + QM_AWUSER_M_CFG_1);
- writel(AWUSER_M_CFG_ENABLE, hisi_rde->qm.io_base +
- QM_AWUSER_M_CFG_ENABLE);
- writel(WUSER_M_CFG_ENABLE, hisi_rde->qm.io_base +
- QM_WUSER_M_CFG_ENABLE);
+ writel(AXUSER_BASE, qm->io_base + QM_ARUSER_M_CFG_1);
+ writel(ARUSER_M_CFG_ENABLE, qm->io_base + QM_ARUSER_M_CFG_ENABLE);
+ writel(AXUSER_BASE, qm->io_base + QM_AWUSER_M_CFG_1);
+ writel(AWUSER_M_CFG_ENABLE, qm->io_base + QM_AWUSER_M_CFG_ENABLE);
+ writel(WUSER_M_CFG_ENABLE, qm->io_base + QM_WUSER_M_CFG_ENABLE);
/* qm cache */
- writel(AXI_M_CFG, hisi_rde->qm.io_base + QM_AXI_M_CFG);
- writel(AXI_M_CFG_ENABLE, hisi_rde->qm.io_base + QM_AXI_M_CFG_ENABLE);
+ writel(AXI_M_CFG, qm->io_base + QM_AXI_M_CFG);
+ writel(AXI_M_CFG_ENABLE, qm->io_base + QM_AXI_M_CFG_ENABLE);
/* disable BME/PM/SRIOV FLR*/
- writel(PEH_AXUSER_CFG, hisi_rde->qm.io_base + QM_PEH_AXUSER_CFG);
- writel(PEH_AXUSER_CFG_ENABLE, hisi_rde->qm.io_base +
- QM_PEH_AXUSER_CFG_ENABLE);
+ writel(PEH_AXUSER_CFG, qm->io_base + QM_PEH_AXUSER_CFG);
+ writel(PEH_AXUSER_CFG_ENABLE, qm->io_base + QM_PEH_AXUSER_CFG_ENABLE);
- writel(CACHE_CTL, hisi_rde->qm.io_base + QM_CACHE_CTL);
+ writel(CACHE_CTL, qm->io_base + QM_CACHE_CTL);
- hisi_rde_engine_init(hisi_rde);
+ return hisi_rde_engine_init(qm);
}
static void hisi_rde_debug_regs_clear(struct hisi_qm *qm)
@@ -418,30 +297,38 @@ static void hisi_rde_debug_regs_clear(struct hisi_qm *qm)
hisi_qm_debug_regs_clear(qm);
}
-static void hisi_rde_hw_error_set_state(struct hisi_rde *hisi_rde, bool state)
+static void hisi_rde_hw_error_enable(struct hisi_qm *qm)
{
- u32 ras_msk = (HRDE_RAS_CE_MSK | HRDE_RAS_NFE_MSK);
u32 val;
- val = readl(hisi_rde->qm.io_base + HRDE_CFG);
- if (state) {
- writel(HRDE_INT_SOURCE_CLEAR,
- hisi_rde->qm.io_base + HRDE_INT_SOURCE);
- writel(HRDE_RAS_ENABLE,
- hisi_rde->qm.io_base + HRDE_RAS_INT_MSK);
- /* bd prefetch should bd masked to prevent misreport */
- writel((HRDE_INT_ENABLE | HRDE_BD_PREFETCH),
- hisi_rde->qm.io_base + HRDE_INT_MSK);
- /* make master ooo close, when m-bits error happens*/
- val = val | HRDE_AXI_SHUTDOWN_EN;
- } else {
- writel(ras_msk, hisi_rde->qm.io_base + HRDE_RAS_INT_MSK);
- writel(HRDE_INT_DISABLE, hisi_rde->qm.io_base + HRDE_INT_MSK);
- /* make master ooo open, when m-bits error happens*/
- val = val & HRDE_AXI_SHUTDOWN_DIS;
- }
+ val = readl(qm->io_base + HRDE_CFG);
+
+ /* clear RDE hw error source if having */
+ writel(HRDE_INT_SOURCE_CLEAR, qm->io_base + HRDE_INT_SOURCE);
+ writel(HRDE_RAS_ENABLE, qm->io_base + HRDE_RAS_INT_MSK);
+
+ /* bd prefetch should bd masked to prevent misreport */
+ writel((HRDE_INT_ENABLE | HRDE_BD_PREFETCH),
+ qm->io_base + HRDE_INT_MSK);
- writel(val, hisi_rde->qm.io_base + HRDE_CFG);
+ /* when m-bit error occur, master ooo will close */
+ val = val | HRDE_AXI_SHUTDOWN_EN;
+ writel(val, qm->io_base + HRDE_CFG);
+}
+
+static void hisi_rde_hw_error_disable(struct hisi_qm *qm)
+{
+ u32 ras_msk = HRDE_RAS_CE_MSK | HRDE_RAS_NFE_MSK;
+ u32 val;
+
+ val = readl(qm->io_base + HRDE_CFG);
+
+ writel(ras_msk, qm->io_base + HRDE_RAS_INT_MSK);
+ writel(HRDE_INT_DISABLE, qm->io_base + HRDE_INT_MSK);
+
+ /* when m-bit error occur, master ooo will not close */
+ val = val & HRDE_AXI_SHUTDOWN_DIS;
+ writel(val, qm->io_base + HRDE_CFG);
}
static inline struct hisi_qm *file_to_qm(struct ctrl_debug_file *file)
@@ -587,10 +474,8 @@ static ssize_t ctrl_debug_write(struct file *filp, const char __user *buf,
.write = ctrl_debug_write,
};
-static int hisi_rde_chn_debug_init(struct hisi_rde_ctrl *ctrl)
+static int hisi_rde_chn_debug_init(struct hisi_qm *qm)
{
- struct hisi_rde *hisi_rde = ctrl->hisi_rde;
- struct hisi_qm *qm = &hisi_rde->qm;
struct device *dev = &qm->pdev->dev;
struct debugfs_regset32 *regset, *regset_ooo;
struct dentry *tmp_d, *tmp;
@@ -601,7 +486,7 @@ static int hisi_rde_chn_debug_init(struct hisi_rde_ctrl *ctrl)
if (ret < 0)
return -ENOENT;
- tmp_d = debugfs_create_dir(buf, ctrl->debug_root);
+ tmp_d = debugfs_create_dir(buf, qm->debug.debug_root);
if (!tmp_d)
return -ENOENT;
@@ -628,29 +513,30 @@ static int hisi_rde_chn_debug_init(struct hisi_rde_ctrl *ctrl)
return 0;
}
-static int hisi_rde_ctrl_debug_init(struct hisi_rde_ctrl *ctrl)
+static int hisi_rde_ctrl_debug_init(struct hisi_qm *qm)
{
+ struct hisi_rde *hisi_rde = container_of(qm, struct hisi_rde, qm);
struct dentry *tmp;
int i;
for (i = HRDE_CURRENT_FUNCTION; i < HRDE_DEBUG_FILE_NUM; i++) {
- spin_lock_init(&ctrl->files[i].lock);
- ctrl->files[i].ctrl = ctrl;
- ctrl->files[i].index = i;
+ spin_lock_init(&hisi_rde->ctrl->files[i].lock);
+ hisi_rde->ctrl->files[i].ctrl = hisi_rde->ctrl;
+ hisi_rde->ctrl->files[i].index = i;
tmp = debugfs_create_file(ctrl_debug_file_name[i], 0600,
- ctrl->debug_root, ctrl->files + i,
+ qm->debug.debug_root,
+ hisi_rde->ctrl->files + i,
&ctrl_debug_fops);
if (!tmp)
return -ENOENT;
}
- return hisi_rde_chn_debug_init(ctrl);
+ return hisi_rde_chn_debug_init(qm);
}
-static int hisi_rde_debugfs_init(struct hisi_rde *hisi_rde)
+static int hisi_rde_debugfs_init(struct hisi_qm *qm)
{
- struct hisi_qm *qm = &hisi_rde->qm;
struct device *dev = &qm->pdev->dev;
struct dentry *dev_d;
int ret;
@@ -665,8 +551,7 @@ static int hisi_rde_debugfs_init(struct hisi_rde *hisi_rde)
goto failed_to_create;
if (qm->pdev->device == HRDE_PCI_DEVICE_ID) {
- hisi_rde->ctrl->debug_root = dev_d;
- ret = hisi_rde_ctrl_debug_init(hisi_rde->ctrl);
+ ret = hisi_rde_ctrl_debug_init(qm);
if (ret)
goto failed_to_create;
}
@@ -678,49 +563,17 @@ static int hisi_rde_debugfs_init(struct hisi_rde *hisi_rde)
return ret;
}
-static void hisi_rde_debugfs_exit(struct hisi_rde *hisi_rde)
+static void hisi_rde_debugfs_exit(struct hisi_qm *qm)
{
- struct hisi_qm *qm = &hisi_rde->qm;
-
debugfs_remove_recursive(qm->debug.debug_root);
+
if (qm->fun_type == QM_HW_PF) {
hisi_rde_debug_regs_clear(qm);
qm->debug.curr_qm_qp_num = 0;
}
}
-static void hisi_rde_set_hw_error(struct hisi_rde *hisi_rde, bool state)
-{
- if (state)
- hisi_qm_hw_error_init(&hisi_rde->qm, QM_BASE_CE,
- QM_BASE_NFE | QM_ACC_DO_TASK_TIMEOUT,
- 0, 0);
- else
- hisi_qm_hw_error_uninit(&hisi_rde->qm);
-
- hisi_rde_hw_error_set_state(hisi_rde, state);
-}
-
-static void hisi_rde_open_master_ooo(struct hisi_qm *qm)
-{
- u32 val;
-
- val = readl(qm->io_base + HRDE_CFG);
- writel(val & HRDE_AXI_SHUTDOWN_DIS, qm->io_base + HRDE_CFG);
- writel(val | HRDE_AXI_SHUTDOWN_EN, qm->io_base + HRDE_CFG);
-}
-
-static u32 hisi_rde_get_hw_err_status(struct hisi_qm *qm)
-{
- return readl(qm->io_base + HRDE_INT_STATUS);
-}
-
-static void hisi_rde_clear_hw_err_status(struct hisi_qm *qm, u32 err_sts)
-{
- writel(err_sts, qm->io_base + HRDE_INT_SOURCE);
-}
-
-static void hisi_rde_hw_error_log(struct hisi_qm *qm, u32 err_sts)
+void hisi_rde_hw_error_log(struct hisi_qm *qm, u32 err_sts)
{
const struct hisi_rde_hw_error *err = rde_hw_error;
struct device *dev = &qm->pdev->dev;
@@ -751,10 +604,30 @@ static void hisi_rde_hw_error_log(struct hisi_qm *qm, u32 err_sts)
}
}
-static int hisi_rde_pf_probe_init(struct hisi_rde *hisi_rde)
+u32 hisi_rde_get_hw_err_status(struct hisi_qm *qm)
+{
+ return readl(qm->io_base + HRDE_INT_STATUS);
+}
+
+void hisi_rde_clear_hw_err_status(struct hisi_qm *qm, u32 err_sts)
{
- struct hisi_qm *qm = &hisi_rde->qm;
+ writel(err_sts, qm->io_base + HRDE_INT_SOURCE);
+}
+
+static void hisi_rde_open_master_ooo(struct hisi_qm *qm)
+{
+ u32 val;
+
+ val = readl(qm->io_base + HRDE_CFG);
+ writel(val & HRDE_AXI_SHUTDOWN_DIS, qm->io_base + HRDE_CFG);
+ writel(val | HRDE_AXI_SHUTDOWN_EN, qm->io_base + HRDE_CFG);
+}
+
+static int hisi_rde_pf_probe_init(struct hisi_qm *qm)
+{
+ struct hisi_rde *hisi_rde = container_of(qm, struct hisi_rde, qm);
struct hisi_rde_ctrl *ctrl;
+ int ret;
ctrl = devm_kzalloc(&qm->pdev->dev, sizeof(*ctrl), GFP_KERNEL);
if (!ctrl)
@@ -776,14 +649,26 @@ static int hisi_rde_pf_probe_init(struct hisi_rde *hisi_rde)
return -EINVAL;
}
- qm->err_ini.qm_wr_port = HRDE_WR_MSI_PORT;
- qm->err_ini.ecc_2bits_mask = HRDE_ECC_2BIT_ERR;
- qm->err_ini.open_axi_master_ooo = hisi_rde_open_master_ooo;
qm->err_ini.get_dev_hw_err_status = hisi_rde_get_hw_err_status;
qm->err_ini.clear_dev_hw_err_status = hisi_rde_clear_hw_err_status;
+ qm->err_ini.err_info.ecc_2bits_mask = HRDE_ECC_2BIT_ERR;
+ qm->err_ini.err_info.ce = QM_BASE_CE;
+ qm->err_ini.err_info.nfe = QM_BASE_NFE | QM_ACC_DO_TASK_TIMEOUT;
+ qm->err_ini.err_info.fe = 0;
+ qm->err_ini.err_info.msi = 0;
+ qm->err_ini.err_info.acpi_rst = "RRST";
+ qm->err_ini.hw_err_disable = hisi_rde_hw_error_disable;
+ qm->err_ini.hw_err_enable = hisi_rde_hw_error_enable;
+ qm->err_ini.set_usr_domain_cache = hisi_rde_set_user_domain_and_cache;
qm->err_ini.log_dev_hw_err = hisi_rde_hw_error_log;
- hisi_rde_set_user_domain_and_cache(hisi_rde);
- hisi_rde_set_hw_error(hisi_rde, true);
+ qm->err_ini.open_axi_master_ooo = hisi_rde_open_master_ooo;
+ qm->err_ini.err_info.msi_wr_port = HRDE_WR_MSI_PORT;
+
+ ret = qm->err_ini.set_usr_domain_cache(qm);
+ if (ret)
+ return ret;
+
+ hisi_qm_dev_err_init(qm);
qm->err_ini.open_axi_master_ooo(qm);
hisi_rde_debug_regs_clear(qm);
@@ -792,33 +677,21 @@ static int hisi_rde_pf_probe_init(struct hisi_rde *hisi_rde)
static int hisi_rde_qm_pre_init(struct hisi_qm *qm, struct pci_dev *pdev)
{
- enum qm_hw_ver rev_id;
+ int ret;
- rev_id = hisi_qm_get_hw_version(pdev);
- if (rev_id == QM_HW_UNKNOWN)
- return -EINVAL;
+#ifdef CONFIG_CRYPTO_QM_UACCE
+ qm->algs = "ec\n";
+ qm->uacce_mode = uacce_mode;
+#endif
qm->pdev = pdev;
- qm->ver = rev_id;
+ ret = hisi_qm_pre_init(qm, pf_q_num, HRDE_PF_DEF_Q_BASE);
+ if (ret)
+ return ret;
+
+ qm->qm_list = &rde_devices;
qm->sqe_size = HRDE_SQE_SIZE;
qm->dev_name = hisi_rde_name;
- qm->fun_type = QM_HW_PF;
- qm->algs = "ec\n";
-
- switch (uacce_mode) {
- case UACCE_MODE_NOUACCE:
- qm->use_uacce = false;
- break;
- case UACCE_MODE_NOIOMMU:
- qm->use_uacce = true;
- break;
- default:
- return -EINVAL;
- }
-
- qm->qp_base = HRDE_PF_DEF_Q_BASE;
- qm->qp_num = pf_q_num;
- qm->debug.curr_qm_qp_num = pf_q_num;
qm->abnormal_fix = hisi_rde_abnormal_fix;
return 0;
@@ -849,11 +722,12 @@ static int hisi_rde_probe(struct pci_dev *pdev, const struct pci_device_id *id)
if (!hisi_rde)
return -ENOMEM;
- pci_set_drvdata(pdev, hisi_rde);
INIT_WORK(&hisi_rde->reset_work, hisi_rde_ras_proc);
hisi_rde->smmu_state = hisi_rde_smmu_state(&pdev->dev);
qm = &hisi_rde->qm;
+ qm->fun_type = QM_HW_PF;
+
ret = hisi_rde_qm_pre_init(qm, pdev);
if (ret) {
pci_err(pdev, "Pre init qm failed!\n");
@@ -866,7 +740,7 @@ static int hisi_rde_probe(struct pci_dev *pdev, const struct pci_device_id *id)
return ret;
}
- ret = hisi_rde_pf_probe_init(hisi_rde);
+ ret = hisi_rde_pf_probe_init(qm);
if (ret) {
pci_err(pdev, "Init pf failed!\n");
goto err_qm_uninit;
@@ -878,16 +752,15 @@ static int hisi_rde_probe(struct pci_dev *pdev, const struct pci_device_id *id)
goto err_qm_uninit;
}
- ret = hisi_rde_debugfs_init(hisi_rde);
+ ret = hisi_rde_debugfs_init(qm);
if (ret)
pci_warn(pdev, "Init debugfs failed!\n");
- hisi_rde_add_to_list(hisi_rde);
- hisi_rde->rde_list_lock = &hisi_rde_list_lock;
+ hisi_qm_add_to_list(qm, &rde_devices);
return 0;
- err_qm_uninit:
+err_qm_uninit:
hisi_qm_uninit(qm);
return ret;
@@ -895,198 +768,20 @@ static int hisi_rde_probe(struct pci_dev *pdev, const struct pci_device_id *id)
static void hisi_rde_remove(struct pci_dev *pdev)
{
- struct hisi_rde *hisi_rde = pci_get_drvdata(pdev);
- struct hisi_qm *qm = &hisi_rde->qm;
+ struct hisi_qm *qm = pci_get_drvdata(pdev);
+ struct hisi_rde *hisi_rde = container_of(qm, struct hisi_rde, qm);
+
+ hisi_qm_remove_wait_delay(qm, &rde_devices);
qm->abnormal_fix = NULL;
- hisi_rde_hw_error_set_state(hisi_rde, false);
+ hisi_qm_dev_err_uninit(qm);
cancel_work_sync(&hisi_rde->reset_work);
- hisi_rde_remove_from_list(hisi_rde);
- hisi_rde_debugfs_exit(hisi_rde);
+ hisi_qm_del_from_list(qm, &rde_devices);
+ hisi_rde_debugfs_exit(qm);
hisi_qm_stop(qm, QM_NORMAL);
hisi_qm_uninit(qm);
}
-static void hisi_rde_shutdown(struct pci_dev *pdev)
-{
- struct hisi_rde *hisi_rde = pci_get_drvdata(pdev);
-
- hisi_qm_stop(&hisi_rde->qm, QM_NORMAL);
-}
-
-static int hisi_rde_reset_prepare_rdy(struct hisi_rde *hisi_rde)
-{
- int delay = 0;
-
- while (test_and_set_bit(HISI_RDE_RESET, &hisi_rde->status)) {
- msleep(++delay);
- if (delay > HRDE_RST_TMOUT_MS)
- return -EBUSY;
- }
-
- return 0;
-}
-
-static int hisi_rde_controller_reset_prepare(struct hisi_rde *hisi_rde)
-{
- struct hisi_qm *qm = &hisi_rde->qm;
- struct pci_dev *pdev = qm->pdev;
- int ret;
-
- ret = hisi_rde_reset_prepare_rdy(hisi_rde);
- if (ret) {
- dev_err(&pdev->dev, "Controller reset not ready!\n");
- return ret;
- }
-
- ret = hisi_qm_stop(qm, QM_SOFT_RESET);
- if (ret) {
- dev_err(&pdev->dev, "Stop QM failed!\n");
- return ret;
- }
-
-#ifdef CONFIG_CRYPTO_QM_UACCE
- if (qm->use_uacce) {
- ret = uacce_hw_err_isolate(&qm->uacce);
- if (ret) {
- dev_err(&pdev->dev, "Isolate hw err failed!\n");
- return ret;
- }
- }
-#endif
-
- return 0;
-}
-
-static int hisi_rde_soft_reset(struct hisi_rde *hisi_rde)
-{
- struct hisi_qm *qm = &hisi_rde->qm;
- struct device *dev = &qm->pdev->dev;
- unsigned long long value;
- int ret;
- u32 val;
-
- /* Check PF stream stop */
- ret = hisi_qm_reg_test(qm);
- if (ret)
- return ret;
-
- /* Disable PEH MSI */
- ret = hisi_qm_set_msi(qm, HRDE_DISABLE);
- if (ret) {
- dev_err(dev, "Disable peh msi bit failed.\n");
- return ret;
- }
-
- /* Set qm ecc if dev ecc happened to hold on ooo */
- hisi_qm_set_ecc(qm);
-
- /* OOO register set and check */
- writel(MASTER_GLOBAL_CTRL_SHUTDOWN,
- hisi_rde->qm.io_base + HRDE_MASTER_GLOBAL_CTRL);
-
- /* If bus lock, reset chip */
- ret = readl_relaxed_poll_timeout(hisi_rde->qm.io_base +
- HRDE_MASTER_TRANS_RET, val,
- (val == MASTER_TRANS_RETURN_RW),
- HRDE_RD_INTVRL_US, HRDE_RD_TMOUT_US);
- if (ret) {
- dev_emerg(dev, "Bus lock! Please reset system.\n");
- return ret;
- }
-
- /* Disable PF MSE bit */
- ret = hisi_qm_set_pf_mse(qm, HRDE_DISABLE);
- if (ret) {
- dev_err(dev, "Disable pf mse bit failed.\n");
- return ret;
- }
-
- /* The reset related sub-control registers are not in PCI BAR */
- if (ACPI_HANDLE(dev)) {
- acpi_status s;
-
- s = acpi_evaluate_integer(ACPI_HANDLE(dev), "RRST",
- NULL, &value);
- if (ACPI_FAILURE(s)) {
- dev_err(dev, "No controller reset method.\n");
- return -EIO;
- }
-
- if (value) {
- dev_err(dev, "Reset step %llu failed.\n", value);
- return -EIO;
- }
- } else {
- dev_err(dev, "No reset method!\n");
- return -EINVAL;
- }
-
- return 0;
-}
-
-static int hisi_rde_controller_reset_done(struct hisi_rde *hisi_rde)
-{
- struct hisi_qm *qm = &hisi_rde->qm;
- struct pci_dev *pdev = qm->pdev;
- int ret;
-
- /* Enable PEH MSI */
- ret = hisi_qm_set_msi(qm, HRDE_ENABLE);
- if (ret) {
- dev_err(&pdev->dev, "Enable peh msi bit failed!\n");
- return ret;
- }
-
- /* Enable PF MSE bit */
- ret = hisi_qm_set_pf_mse(qm, HRDE_ENABLE);
- if (ret) {
- dev_err(&pdev->dev, "Enable pf mse bit failed!\n");
- return ret;
- }
-
- hisi_rde_set_user_domain_and_cache(hisi_rde);
- hisi_qm_restart_prepare(qm);
-
- ret = hisi_qm_restart(qm);
- if (ret) {
- dev_err(&pdev->dev, "Start QM failed!\n");
- return -EPERM;
- }
-
- hisi_qm_restart_done(qm);
- hisi_rde_set_hw_error(hisi_rde, true);
-
- return 0;
-}
-
-static int hisi_rde_controller_reset(struct hisi_rde *hisi_rde)
-{
- struct device *dev = &hisi_rde->qm.pdev->dev;
- int ret;
-
- dev_info_ratelimited(dev, "Controller resetting...\n");
-
- ret = hisi_rde_controller_reset_prepare(hisi_rde);
- if (ret)
- return ret;
-
- ret = hisi_rde_soft_reset(hisi_rde);
- if (ret) {
- dev_err(dev, "Controller reset failed (%d).\n", ret);
- return ret;
- }
-
- ret = hisi_rde_controller_reset_done(hisi_rde);
- if (ret)
- return ret;
-
- clear_bit(HISI_RDE_RESET, &hisi_rde->status);
- dev_info_ratelimited(dev, "Controller reset complete.\n");
-
- return 0;
-}
-
static void hisi_rde_ras_proc(struct work_struct *work)
{
struct pci_dev *pdev;
@@ -1100,121 +795,26 @@ static void hisi_rde_ras_proc(struct work_struct *work)
ret = hisi_qm_process_dev_error(pdev);
if (ret == PCI_ERS_RESULT_NEED_RESET)
- if (hisi_rde_controller_reset(hisi_rde))
+ if (hisi_qm_controller_reset(&hisi_rde->qm))
dev_err(&pdev->dev, "Hisi_rde reset fail.\n");
}
int hisi_rde_abnormal_fix(struct hisi_qm *qm)
{
- struct pci_dev *pdev;
struct hisi_rde *hisi_rde;
if (!qm)
return -EINVAL;
- pdev = qm->pdev;
- if (!pdev)
- return -EINVAL;
-
- hisi_rde = pci_get_drvdata(pdev);
- if (!hisi_rde) {
- dev_err(&pdev->dev, "Hisi_rde is NULL.\n");
- return -EINVAL;
- }
+ hisi_rde = container_of(qm, struct hisi_rde, qm);
return schedule_work(&hisi_rde->reset_work);
}
-static int hisi_rde_get_hw_error_status(struct hisi_rde *hisi_rde)
-{
- u32 err_sts;
-
- err_sts = readl(hisi_rde->qm.io_base + HRDE_INT_STATUS) &
- HRDE_ECC_2BIT_ERR;
- if (err_sts)
- return err_sts;
-
- return 0;
-}
-
-static int hisi_rde_check_hw_error(struct hisi_rde *hisi_rde)
-{
- int ret;
-
- ret = hisi_qm_get_hw_error_status(&hisi_rde->qm);
- if (ret)
- return ret;
-
- return hisi_rde_get_hw_error_status(hisi_rde);
-}
-
-static void hisi_rde_reset_prepare(struct pci_dev *pdev)
-{
- struct hisi_rde *hisi_rde = pci_get_drvdata(pdev);
- struct hisi_qm *qm = &hisi_rde->qm;
- u32 delay = 0;
- int ret;
-
- hisi_rde_set_hw_error(hisi_rde, false);
-
- while (hisi_rde_check_hw_error(hisi_rde)) {
- msleep(++delay);
- if (delay > HRDE_RST_TMOUT_MS)
- return;
- }
-
- ret = hisi_rde_reset_prepare_rdy(hisi_rde);
- if (ret) {
- dev_err(&pdev->dev, "FLR not ready!\n");
- return;
- }
-
- ret = hisi_qm_stop(qm, QM_FLR);
- if (ret) {
- dev_err(&pdev->dev, "Stop QM failed!\n");
- return;
- }
-
- dev_info(&pdev->dev, "FLR resetting...\n");
-}
-
-static void hisi_rde_flr_reset_complete(struct pci_dev *pdev,
- struct hisi_rde *hisi_rde)
-{
- u32 id;
-
- pci_read_config_dword(pdev, PCI_COMMAND, &id);
- if (id == HRDE_PCI_COMMAND_INVALID)
- dev_err(&pdev->dev, "Device can not be used!\n");
-
- clear_bit(HISI_RDE_RESET, &hisi_rde->status);
-}
-
-static void hisi_rde_reset_done(struct pci_dev *pdev)
-{
- struct hisi_rde *hisi_rde = pci_get_drvdata(pdev);
- struct hisi_qm *qm = &hisi_rde->qm;
- int ret;
-
- hisi_rde_set_hw_error(hisi_rde, true);
-
- ret = hisi_qm_restart(qm);
- if (ret) {
- dev_err(&pdev->dev, "Start QM failed!\n");
- goto flr_done;
- }
-
- hisi_rde_set_user_domain_and_cache(hisi_rde);
-
-flr_done:
- hisi_rde_flr_reset_complete(pdev, hisi_rde);
- dev_info(&pdev->dev, "FLR reset complete.\n");
-}
-
static const struct pci_error_handlers hisi_rde_err_handler = {
- .reset_prepare = hisi_rde_reset_prepare,
- .reset_done = hisi_rde_reset_done,
+ .reset_prepare = hisi_qm_reset_prepare,
+ .reset_done = hisi_qm_reset_done,
};
static struct pci_driver hisi_rde_pci_driver = {
@@ -1223,7 +823,7 @@ static void hisi_rde_reset_done(struct pci_dev *pdev)
.probe = hisi_rde_probe,
.remove = hisi_rde_remove,
.err_handler = &hisi_rde_err_handler,
- .shutdown = hisi_rde_shutdown,
+ .shutdown = hisi_qm_dev_shutdown,
};
static void hisi_rde_register_debugfs(void)
@@ -1245,6 +845,9 @@ static int __init hisi_rde_init(void)
{
int ret;
+ INIT_LIST_HEAD(&rde_devices.list);
+ mutex_init(&rde_devices.lock);
+ rde_devices.check = NULL;
hisi_rde_register_debugfs();
ret = pci_register_driver(&hisi_rde_pci_driver);
diff --git a/drivers/crypto/hisilicon/sec2/sec.h b/drivers/crypto/hisilicon/sec2/sec.h
index 0e16452..f85dd06 100644
--- a/drivers/crypto/hisilicon/sec2/sec.h
+++ b/drivers/crypto/hisilicon/sec2/sec.h
@@ -11,7 +11,6 @@
#undef pr_fmt
#define pr_fmt(fmt) "hisi_sec: " fmt
-#define CTX_Q_NUM_DEF 24
#define FUSION_LIMIT_DEF 1
#define FUSION_LIMIT_MAX 64
#define FUSION_TMOUT_NSEC_DEF (400 * 1000)
@@ -24,10 +23,6 @@ enum sec_endian {
struct hisi_sec_ctrl;
-enum hisi_sec_status {
- HISI_SEC_RESET,
-};
-
struct hisi_sec_dfx {
u64 send_cnt;
u64 send_by_tmout;
@@ -39,21 +34,19 @@ struct hisi_sec_dfx {
u64 thread_cnt;
u64 fake_busy_cnt;
u64 busy_comp_cnt;
- u64 sec_ctrl;
};
struct hisi_sec {
struct hisi_qm qm;
- struct list_head list;
struct hisi_sec_dfx sec_dfx;
struct hisi_sec_ctrl *ctrl;
- struct mutex *hisi_sec_list_lock;
- int q_ref;
int ctx_q_num;
int fusion_limit;
int fusion_tmout_nsec;
- unsigned long status;
};
+void sec_destroy_qps(struct hisi_qp **qps, int qp_num);
+struct hisi_qp **sec_create_qps(void);
struct hisi_sec *find_sec_device(int node);
+
#endif
diff --git a/drivers/crypto/hisilicon/sec2/sec_crypto.c b/drivers/crypto/hisilicon/sec2/sec_crypto.c
index 3a362ce..0643955 100644
--- a/drivers/crypto/hisilicon/sec2/sec_crypto.c
+++ b/drivers/crypto/hisilicon/sec2/sec_crypto.c
@@ -16,6 +16,8 @@
#include "sec.h"
#include "sec_crypto.h"
+static atomic_t sec_active_devs;
+
#define SEC_ASYNC
#define SEC_INVLD_REQ_ID (-1)
@@ -179,6 +181,7 @@ struct hisi_sec_ctx {
struct hisi_sec *sec;
struct device *dev;
struct hisi_sec_req_op *req_op;
+ struct hisi_qp **qps;
struct hrtimer timer;
struct work_struct work;
atomic_t thread_cnt;
@@ -200,11 +203,6 @@ struct hisi_sec_ctx {
u64 des_weak_key[DES_WEAK_KEY_NUM] = {0x0101010101010101, 0xFEFEFEFEFEFEFEFE,
0xE0E0E0E0F1F1F1F1, 0x1F1F1F1F0E0E0E0E};
-static void sec_update_iv(struct hisi_sec_req *req, u8 *iv)
-{
- // todo: update iv by cbc/ctr mode
-}
-
static void hisi_sec_req_cb(struct hisi_qp *qp, void *);
static int hisi_sec_alloc_req_id(struct hisi_sec_req *req,
@@ -324,19 +322,16 @@ static enum hrtimer_restart hrtimer_handler(struct hrtimer *timer)
return HRTIMER_RESTART;
}
-static int hisi_sec_create_qp_ctx(struct hisi_qm *qm, struct hisi_sec_ctx *ctx,
- int qp_ctx_id, int alg_type, int req_type)
+static int hisi_sec_create_qp_ctx(struct hisi_sec_ctx *ctx,
+ int qp_ctx_id, int req_type)
{
- struct hisi_qp *qp;
struct hisi_sec_qp_ctx *qp_ctx;
struct device *dev = ctx->dev;
+ struct hisi_qp *qp;
int ret;
- qp = hisi_qm_create_qp(qm, alg_type);
- if (IS_ERR(qp))
- return PTR_ERR(qp);
-
qp_ctx = &ctx->qp_ctx[qp_ctx_id];
+ qp = ctx->qps[qp_ctx_id];
qp->req_type = req_type;
qp->qp_ctx = qp_ctx;
#ifdef SEC_ASYNC
@@ -353,10 +348,8 @@ static int hisi_sec_create_qp_ctx(struct hisi_qm *qm, struct hisi_sec_ctx *ctx,
qp_ctx->req_bitmap = kcalloc(BITS_TO_LONGS(QM_Q_DEPTH), sizeof(long),
GFP_ATOMIC);
- if (!qp_ctx->req_bitmap) {
- ret = -ENOMEM;
- goto err_qm_release_qp;
- }
+ if (!qp_ctx->req_bitmap)
+ return -ENOMEM;
qp_ctx->req_list = kcalloc(QM_Q_DEPTH, sizeof(void *), GFP_ATOMIC);
if (!qp_ctx->req_list) {
@@ -407,8 +400,7 @@ static int hisi_sec_create_qp_ctx(struct hisi_qm *qm, struct hisi_sec_ctx *ctx,
kfree(qp_ctx->req_list);
err_free_req_bitmap:
kfree(qp_ctx->req_bitmap);
-err_qm_release_qp:
- hisi_qm_release_qp(qp);
+
return ret;
}
@@ -424,7 +416,6 @@ static void hisi_sec_release_qp_ctx(struct hisi_sec_ctx *ctx,
kfree(qp_ctx->req_bitmap);
kfree(qp_ctx->req_list);
kfree(qp_ctx->sqe_list);
- hisi_qm_release_qp(qp_ctx->qp);
}
static int __hisi_sec_ctx_init(struct hisi_sec_ctx *ctx, int qlen)
@@ -465,22 +456,22 @@ static void hisi_sec_get_fusion_param(struct hisi_sec_ctx *ctx,
static int hisi_sec_cipher_ctx_init(struct crypto_skcipher *tfm)
{
struct hisi_sec_ctx *ctx = crypto_skcipher_ctx(tfm);
- struct hisi_qm *qm;
struct hisi_sec_cipher_ctx *c_ctx;
struct hisi_sec *sec;
int i, ret;
crypto_skcipher_set_reqsize(tfm, sizeof(struct hisi_sec_req));
- sec = find_sec_device(cpu_to_node(smp_processor_id()));
- if (!sec) {
- pr_err("failed to find a proper sec device!\n");
+ ctx->qps = sec_create_qps();
+ if (!ctx->qps) {
+ pr_err("Can not create sec qps!\n");
return -ENODEV;
}
+
+ sec = container_of(ctx->qps[0]->qm, struct hisi_sec, qm);
ctx->sec = sec;
- qm = &sec->qm;
- ctx->dev = &qm->pdev->dev;
+ ctx->dev = &sec->qm.pdev->dev;
ctx->q_num = sec->ctx_q_num;
@@ -495,7 +486,7 @@ static int hisi_sec_cipher_ctx_init(struct crypto_skcipher *tfm)
hisi_sec_get_fusion_param(ctx, sec);
for (i = 0; i < ctx->q_num; i++) {
- ret = hisi_sec_create_qp_ctx(qm, ctx, i, 0, 0);
+ ret = hisi_sec_create_qp_ctx(ctx, i, 0);
if (ret)
goto err_sec_release_qp_ctx;
}
@@ -515,6 +506,7 @@ static int hisi_sec_cipher_ctx_init(struct crypto_skcipher *tfm)
for (i = i - 1; i >= 0; i--)
hisi_sec_release_qp_ctx(ctx, &ctx->qp_ctx[i]);
+ sec_destroy_qps(ctx->qps, sec->ctx_q_num);
kfree(ctx->qp_ctx);
return ret;
}
@@ -540,11 +532,8 @@ static void hisi_sec_cipher_ctx_exit(struct crypto_skcipher *tfm)
for (i = 0; i < ctx->q_num; i++)
hisi_sec_release_qp_ctx(ctx, &ctx->qp_ctx[i]);
+ sec_destroy_qps(ctx->qps, ctx->q_num);
kfree(ctx->qp_ctx);
-
- mutex_lock(ctx->sec->hisi_sec_list_lock);
- ctx->sec->q_ref -= ctx->sec->ctx_q_num;
- mutex_unlock(ctx->sec->hisi_sec_list_lock);
}
static int hisi_sec_skcipher_get_res(struct hisi_sec_ctx *ctx,
@@ -658,8 +647,6 @@ static void hisi_sec_req_cb(struct hisi_qp *qp, void *resp)
dfx = &req->ctx->sec->sec_dfx;
- sec_update_iv(req, req->c_req.sk_req->iv);
-
req->ctx->req_op->buf_unmap(req->ctx, req);
req->ctx->req_op->callback(req->ctx, req);
@@ -1497,20 +1484,28 @@ static int sec_skcipher_decrypt(struct skcipher_request *sk_req)
int hisi_sec_register_to_crypto(int fusion_limit)
{
- if (fusion_limit == 1)
- return crypto_register_skciphers(sec_normal_algs,
- ARRAY_SIZE(sec_normal_algs));
- else
- return crypto_register_skciphers(sec_fusion_algs,
- ARRAY_SIZE(sec_fusion_algs));
+ /* To avoid repeat register */
+ if (atomic_add_return(1, &sec_active_devs) == 1) {
+ if (fusion_limit == 1)
+ return crypto_register_skciphers(sec_normal_algs,
+ ARRAY_SIZE(sec_normal_algs));
+ else
+ return crypto_register_skciphers(sec_fusion_algs,
+ ARRAY_SIZE(sec_fusion_algs));
+ }
+
+ return 0;
}
void hisi_sec_unregister_from_crypto(int fusion_limit)
{
- if (fusion_limit == 1)
- crypto_unregister_skciphers(sec_normal_algs,
- ARRAY_SIZE(sec_normal_algs));
- else
- crypto_unregister_skciphers(sec_fusion_algs,
- ARRAY_SIZE(sec_fusion_algs));
+ if (atomic_sub_return(1, &sec_active_devs) == 0) {
+ if (fusion_limit == 1)
+ crypto_unregister_skciphers(sec_normal_algs,
+ ARRAY_SIZE(sec_normal_algs));
+ else
+ crypto_unregister_skciphers(sec_fusion_algs,
+ ARRAY_SIZE(sec_fusion_algs));
+ }
}
+
diff --git a/drivers/crypto/hisilicon/sec2/sec_main.c b/drivers/crypto/hisilicon/sec2/sec_main.c
index ba5c478..b4e5d57f 100644
--- a/drivers/crypto/hisilicon/sec2/sec_main.c
+++ b/drivers/crypto/hisilicon/sec2/sec_main.c
@@ -23,21 +23,24 @@
#include "sec.h"
#include "sec_crypto.h"
-#define SEC_VF_NUM 63
#define SEC_QUEUE_NUM_V1 4096
#define SEC_QUEUE_NUM_V2 1024
-#define SEC_PCI_DEVICE_ID_PF 0xa255
-#define SEC_PCI_DEVICE_ID_VF 0xa256
+#define SEC_PF_PCI_DEVICE_ID 0xa255
+#define SEC_VF_PCI_DEVICE_ID 0xa256
-#define SEC_COMMON_REG_OFF 0x1000
+#define SEC_SQE_SIZE 128
+#define SEC_SQ_SIZE (SEC_SQE_SIZE * QM_Q_DEPTH)
+#define SEC_PF_DEF_Q_NUM 64
+#define SEC_PF_DEF_Q_BASE 0
+#define SEC_CTX_Q_NUM_DEF 24
+#define SEC_CTX_Q_NUM_MAX 32
-#define SEC_MASTER_GLOBAL_CTRL 0x300000
-#define SEC_MASTER_GLOBAL_CTRL_SHUTDOWN 0x1
-#define SEC_MASTER_TRANS_RETURN 0x300150
-#define SEC_MASTER_TRANS_RETURN_RW 0x3
#define SEC_AM_CFG_SIG_PORT_MAX_TRANS 0x300014
#define SEC_SINGLE_PORT_MAX_TRANS 0x2060
-
+#define SEC_CTRL_CNT_CLR_CE 0x301120
+#define SEC_CTRL_CNT_CLR_CE_BIT BIT(0)
+#define SEC_ENGINE_PF_CFG_OFF 0x300000
+#define SEC_ACC_COMMON_REG_OFF 0x1000
#define SEC_CORE_INT_SOURCE 0x301010
#define SEC_CORE_INT_MASK 0x301000
#define SEC_CORE_INT_STATUS 0x301008
@@ -45,41 +48,17 @@
#define SEC_CORE_ECC_INFO 0x301C14
#define SEC_ECC_NUM(err_val) (((err_val) >> 16) & 0xFFFF)
#define SEC_ECC_ADDR(err_val) ((err_val) & 0xFFFF)
-
#define SEC_CORE_INT_DISABLE 0x0
#define SEC_CORE_INT_ENABLE 0x1ff
-#define SEC_HW_ERROR_IRQ_ENABLE 1
-#define SEC_HW_ERROR_IRQ_DISABLE 0
-
-#define SEC_BD_ERR_CHK_EN0 0xEFFFFFFF
-#define SEC_BD_ERR_CHK_EN1 0x7FFFF7FD
-#define SEC_BD_ERR_CHK_EN3 0xFFFFBFFF
-#define SEC_BD_ERR_CHK_EN_REG0 0x0380
-#define SEC_BD_ERR_CHK_EN_REG1 0x0384
-#define SEC_BD_ERR_CHK_EN_REG3 0x038c
-
-#define SEC_SQE_SIZE 128
-#define SEC_SQ_SIZE (SEC_SQE_SIZE * QM_Q_DEPTH)
-#define SEC_PF_DEF_Q_NUM 64
-#define SEC_PF_DEF_Q_BASE 0
-
-#define SEC_CTRL_CNT_CLR_CE 0x301120
-#define SEC_CTRL_CNT_CLR_CE_BIT BIT(0)
-
-#define SEC_ENGINE_PF_CFG_OFF 0x300000
-#define SEC_ACC_COMMON_REG_OFF 0x1000
+#define SEC_CORE_INT_CLEAR 0x1ff
-#define SEC_RAS_CE_REG 0x50
-#define SEC_RAS_FE_REG 0x54
-#define SEC_RAS_NFE_REG 0x58
+#define SEC_RAS_CE_REG 0x301050
+#define SEC_RAS_FE_REG 0x301054
+#define SEC_RAS_NFE_REG 0x301058
#define SEC_RAS_CE_ENB_MSK 0x88
#define SEC_RAS_FE_ENB_MSK 0x0
#define SEC_RAS_NFE_ENB_MSK 0x177
#define SEC_RAS_DISABLE 0x0
-
-#define SEC_SAA_EN_REG 0x270
-#define SEC_SAA_EN 0x17F
-
#define SEC_MEM_START_INIT_REG 0x0100
#define SEC_MEM_INIT_DONE_REG 0x0104
@@ -88,114 +67,39 @@
#define SEC_CLK_GATE_DISABLE (~BIT(3))
#define SEC_AXI_SHUTDOWN_ENABLE BIT(12)
#define SEC_AXI_SHUTDOWN_DISABLE 0xFFFFEFFF
-#define SEC_WR_MSI_PORT 0xFFFE
+#define SEC_WR_MSI_PORT BIT(0)
#define SEC_INTERFACE_USER_CTRL0_REG 0x0220
#define SEC_INTERFACE_USER_CTRL1_REG 0x0224
+#define SEC_SAA_EN_REG 0x270
+#define SEC_SAA_EN 0x17F
+#define SEC_BD_ERR_CHK_EN_REG0 0x0380
+#define SEC_BD_ERR_CHK_EN_REG1 0x0384
+#define SEC_BD_ERR_CHK_EN_REG3 0x038c
+#define SEC_BD_ERR_CHK_EN0 0xEFFFFFFF
+#define SEC_BD_ERR_CHK_EN1 0x7FFFF7FD
+#define SEC_BD_ERR_CHK_EN3 0xFFFFBFFF
#define SEC_USER0_SMMU_NORMAL (BIT(23) | BIT(15))
#define SEC_USER1_SMMU_NORMAL (BIT(31) | BIT(23) | BIT(15) | BIT(7))
#define SEC_DELAY_10_US 10
#define SEC_POLL_TIMEOUT_US 1000
-#define SEC_WAIT_DELAY 1000
-
#define SEC_DBGFS_VAL_MAX_LEN 20
-#define SEC_CHAIN_ABN_LEN 128UL
-#define SEC_ENABLE 1
-#define SEC_DISABLE 0
-#define SEC_RESET_WAIT_TIMEOUT 400
-#define SEC_PCI_COMMAND_INVALID 0xFFFFFFFF
-
-#define FORMAT_DECIMAL 10
-#define FROZEN_RANGE_MIN 10
-#define FROZEN_RANGE_MAX 20
-
-static const char sec_name[] = "hisi_sec2";
-static struct dentry *sec_debugfs_root;
-static u32 pf_q_num = SEC_PF_DEF_Q_NUM;
-static struct workqueue_struct *sec_wq;
-
-LIST_HEAD(hisi_sec_list);
-DEFINE_MUTEX(hisi_sec_list_lock);
-
-struct hisi_sec_resource {
- struct hisi_sec *sec;
- int distance;
- struct list_head list;
-};
-
-static void free_list(struct list_head *head)
-{
- struct hisi_sec_resource *res, *tmp;
-
- list_for_each_entry_safe(res, tmp, head, list) {
- list_del(&res->list);
- kfree(res);
- }
-}
-
-struct hisi_sec *find_sec_device(int node)
-{
- struct hisi_sec *ret = NULL;
-#ifdef CONFIG_NUMA
- struct hisi_sec_resource *res, *tmp;
- struct hisi_sec *hisi_sec;
- struct list_head *n;
- struct device *dev;
- LIST_HEAD(head);
-
- mutex_lock(&hisi_sec_list_lock);
-
- list_for_each_entry(hisi_sec, &hisi_sec_list, list) {
- res = kzalloc(sizeof(*res), GFP_KERNEL);
- if (!res)
- goto err;
-
- dev = &hisi_sec->qm.pdev->dev;
- res->sec = hisi_sec;
- res->distance = node_distance(dev->numa_node, node);
-
- n = &head;
- list_for_each_entry(tmp, &head, list) {
- if (res->distance < tmp->distance) {
- n = &tmp->list;
- break;
- }
- }
- list_add_tail(&res->list, n);
- }
-
- list_for_each_entry(tmp, &head, list) {
- if (tmp->sec->q_ref + tmp->sec->ctx_q_num <= pf_q_num) {
- tmp->sec->q_ref += tmp->sec->ctx_q_num;
- ret = tmp->sec;
- break;
- }
- }
-
- free_list(&head);
-#else
- mutex_lock(&hisi_sec_list_lock);
-
- ret = list_first_entry(&hisi_sec_list, struct hisi_sec, list);
-#endif
- mutex_unlock(&hisi_sec_list_lock);
-
- return ret;
-
-err:
- free_list(&head);
- mutex_unlock(&hisi_sec_list_lock);
- return NULL;
-}
+#define SEC_ADDR(qm, offset) ((qm)->io_base + (offset) + \
+ SEC_ENGINE_PF_CFG_OFF + SEC_ACC_COMMON_REG_OFF)
struct hisi_sec_hw_error {
u32 int_msk;
const char *msg;
};
+static const char sec_name[] = "hisi_sec2";
+static struct dentry *sec_debugfs_root;
+static struct hisi_qm_list sec_devices;
+static struct workqueue_struct *sec_wq;
+
static const struct hisi_sec_hw_error sec_hw_error[] = {
{.int_msk = BIT(0), .msg = "sec_axi_rresp_err_rint"},
{.int_msk = BIT(1), .msg = "sec_axi_bresp_err_rint"},
@@ -233,9 +137,7 @@ struct ctrl_debug_file {
* Just relevant for PF.
*/
struct hisi_sec_ctrl {
- u32 num_vfs;
struct hisi_sec *hisi_sec;
- struct dentry *debug_root;
struct ctrl_debug_file files[SEC_DEBUG_FILE_NUM];
};
@@ -263,94 +165,104 @@ struct hisi_sec_ctrl {
{"SEC_BD_SAA8 ", 0x301C40},
};
-static int pf_q_num_set(const char *val, const struct kernel_param *kp)
+static int sec_ctx_q_num_set(const char *val, const struct kernel_param *kp)
{
- struct pci_dev *pdev = pci_get_device(PCI_VENDOR_ID_HUAWEI,
- SEC_PCI_DEVICE_ID_PF, NULL);
- u32 n, q_num;
- u8 rev_id;
+ u32 ctx_q_num;
int ret;
if (!val)
return -EINVAL;
- if (unlikely(!pdev)) {
- q_num = min_t(u32, SEC_QUEUE_NUM_V1, SEC_QUEUE_NUM_V2);
- pr_info
- ("No device found currently, suppose queue number is %d\n",
- q_num);
- } else {
- rev_id = pdev->revision;
- switch (rev_id) {
- case QM_HW_V1:
- q_num = SEC_QUEUE_NUM_V1;
- break;
- case QM_HW_V2:
- q_num = SEC_QUEUE_NUM_V2;
- break;
- default:
- return -EINVAL;
- }
- }
+ ret = kstrtou32(val, 10, &ctx_q_num);
+ if (ret)
+ return -EINVAL;
- ret = kstrtou32(val, 10, &n);
- if (ret != 0 || n > q_num)
+ if (!ctx_q_num || ctx_q_num > SEC_CTX_Q_NUM_MAX || ctx_q_num & 0x1) {
+ pr_err("ctx queue num[%u] is invalid!\n", ctx_q_num);
return -EINVAL;
+ }
return param_set_int(val, kp);
}
-static const struct kernel_param_ops pf_q_num_ops = {
- .set = pf_q_num_set,
+static const struct kernel_param_ops sec_ctx_q_num_ops = {
+ .set = sec_ctx_q_num_set,
.get = param_get_int,
};
+static u32 ctx_q_num = SEC_CTX_Q_NUM_DEF;
+module_param_cb(ctx_q_num, &sec_ctx_q_num_ops, &ctx_q_num, 0444);
+MODULE_PARM_DESC(ctx_q_num, "Queue num in ctx (24 default, 2, 4, ..., 32)");
-static int uacce_mode_set(const char *val, const struct kernel_param *kp)
+void sec_destroy_qps(struct hisi_qp **qps, int qp_num)
+{
+ hisi_qm_free_qps(qps, qp_num);
+ kfree(qps);
+}
+
+struct hisi_qp **sec_create_qps(void)
{
- u32 n;
+ int node = cpu_to_node(smp_processor_id());
+ u32 ctx_num = ctx_q_num;
+ struct hisi_qp **qps;
int ret;
- if (!val)
- return -EINVAL;
+ qps = kcalloc(ctx_num, sizeof(struct hisi_qp *), GFP_KERNEL);
+ if (!qps)
+ return NULL;
- ret = kstrtou32(val, FORMAT_DECIMAL, &n);
- if (ret != 0 || (n != UACCE_MODE_NOIOMMU && n != UACCE_MODE_NOUACCE))
- return -EINVAL;
+ ret = hisi_qm_alloc_qps_node(node, &sec_devices, qps, ctx_num, 0);
+ if (!ret)
+ return qps;
- return param_set_int(val, kp);
+ kfree(qps);
+ return NULL;
+}
+
+#ifdef CONFIG_CRYPTO_QM_UACCE
+static int uacce_mode_set(const char *val, const struct kernel_param *kp)
+{
+ return mode_set(val, kp);
}
-static const struct kernel_param_ops uacce_mode_ops = {
+static const struct kernel_param_ops sec_uacce_mode_ops = {
.set = uacce_mode_set,
.get = param_get_int,
};
-static int ctx_q_num_set(const char *val, const struct kernel_param *kp)
-{
- u32 ctx_q_num;
- int ret;
+static u32 uacce_mode = UACCE_MODE_NOUACCE;
+module_param_cb(uacce_mode, &sec_uacce_mode_ops, &uacce_mode, 0444);
+MODULE_PARM_DESC(uacce_mode, "Mode of UACCE can be 0(default), 2");
+#endif
- if (!val)
- return -EINVAL;
+static int pf_q_num_set(const char *val, const struct kernel_param *kp)
+{
+ return q_num_set(val, kp, SEC_PF_PCI_DEVICE_ID);
+}
- ret = kstrtou32(val, FORMAT_DECIMAL, &ctx_q_num);
- if (ret)
- return -EINVAL;
+static const struct kernel_param_ops sec_pf_q_num_ops = {
+ .set = pf_q_num_set,
+ .get = param_get_int,
+};
- if (ctx_q_num == 0 || ctx_q_num > QM_Q_DEPTH || ctx_q_num % 2 == 1) {
- pr_err("ctx_q_num[%u] is invalid\n", ctx_q_num);
- return -EINVAL;
- }
+static u32 pf_q_num = SEC_PF_DEF_Q_NUM;
+module_param_cb(pf_q_num, &sec_pf_q_num_ops, &pf_q_num, 0444);
+MODULE_PARM_DESC(pf_q_num, "Number of queues in PF(v1 1-4096, v2 1-1024)");
- return param_set_int(val, kp);
+static int vfs_num_set(const char *val, const struct kernel_param *kp)
+{
+ return vf_num_set(val, kp);
}
-static const struct kernel_param_ops ctx_q_num_ops = {
- .set = ctx_q_num_set,
+static const struct kernel_param_ops vfs_num_ops = {
+ .set = vfs_num_set,
.get = param_get_int,
};
-static int fusion_limit_set(const char *val, const struct kernel_param *kp)
+static u32 vfs_num;
+module_param_cb(vfs_num, &vfs_num_ops, &vfs_num, 0444);
+MODULE_PARM_DESC(vfs_num, "Number of VFs to enable(1-63), 0(default)");
+
+static int sec_fusion_limit_set(const char *val, const struct kernel_param *kp)
{
u32 fusion_limit;
int ret;
@@ -358,11 +270,11 @@ static int fusion_limit_set(const char *val, const struct kernel_param *kp)
if (!val)
return -EINVAL;
- ret = kstrtou32(val, FORMAT_DECIMAL, &fusion_limit);
+ ret = kstrtou32(val, 10, &fusion_limit);
if (ret)
return ret;
- if (fusion_limit == 0 || fusion_limit > FUSION_LIMIT_MAX) {
+ if (!fusion_limit || fusion_limit > FUSION_LIMIT_MAX) {
pr_err("fusion_limit[%u] is't at range(0, %d)", fusion_limit,
FUSION_LIMIT_MAX);
return -EINVAL;
@@ -371,12 +283,17 @@ static int fusion_limit_set(const char *val, const struct kernel_param *kp)
return param_set_int(val, kp);
}
-static const struct kernel_param_ops fusion_limit_ops = {
- .set = fusion_limit_set,
+static const struct kernel_param_ops sec_fusion_limit_ops = {
+ .set = sec_fusion_limit_set,
.get = param_get_int,
};
+static u32 fusion_limit = FUSION_LIMIT_DEF;
-static int fusion_tmout_nsec_set(const char *val, const struct kernel_param *kp)
+module_param_cb(fusion_limit, &sec_fusion_limit_ops, &fusion_limit, 0444);
+MODULE_PARM_DESC(fusion_limit, "(1, acc_sgl_sge_nr of hisilicon QM)");
+
+static int sec_fusion_tmout_ns_set(const char *val,
+ const struct kernel_param *kp)
{
u32 fusion_tmout_nsec;
int ret;
@@ -384,7 +301,7 @@ static int fusion_tmout_nsec_set(const char *val, const struct kernel_param *kp)
if (!val)
return -EINVAL;
- ret = kstrtou32(val, FORMAT_DECIMAL, &fusion_tmout_nsec);
+ ret = kstrtou32(val, 10, &fusion_tmout_nsec);
if (ret)
return ret;
@@ -396,53 +313,22 @@ static int fusion_tmout_nsec_set(const char *val, const struct kernel_param *kp)
return param_set_int(val, kp);
}
-static const struct kernel_param_ops fusion_tmout_nsec_ops = {
- .set = fusion_tmout_nsec_set,
+static const struct kernel_param_ops sec_fusion_time_ops = {
+ .set = sec_fusion_tmout_ns_set,
.get = param_get_int,
};
-
-module_param_cb(pf_q_num, &pf_q_num_ops, &pf_q_num, 0444);
-MODULE_PARM_DESC(pf_q_num, "Number of queues in PF(v1 0-4096, v2 0-1024)");
-
-static int uacce_mode = UACCE_MODE_NOUACCE;
-module_param_cb(uacce_mode, &uacce_mode_ops, &uacce_mode, 0444);
-MODULE_PARM_DESC(uacce_mode, "Mode of UACCE can be 0(default), 2");
-
-static int ctx_q_num = CTX_Q_NUM_DEF;
-module_param_cb(ctx_q_num, &ctx_q_num_ops, &ctx_q_num, 0444);
-MODULE_PARM_DESC(ctx_q_num, "Number of queue in ctx (2, 4, 6, ..., 1024)");
-
-static int fusion_limit = FUSION_LIMIT_DEF;
-module_param_cb(fusion_limit, &fusion_limit_ops, &fusion_limit, 0444);
-MODULE_PARM_DESC(fusion_limit, "(1, acc_sgl_sge_nr)");
-
-static int fusion_tmout_nsec = FUSION_TMOUT_NSEC_DEF;
-module_param_cb(fusion_tmout_nsec, &fusion_tmout_nsec_ops, &fusion_tmout_nsec,
- 0444);
-MODULE_PARM_DESC(fusion_tmout_nsec, "(0, NSEC_PER_SEC)");
+static u32 fusion_time = FUSION_TMOUT_NSEC_DEF; /* ns */
+module_param_cb(fusion_time, &sec_fusion_time_ops, &fusion_time, 0444);
+MODULE_PARM_DESC(fusion_time, "(0, NSEC_PER_SEC)");
static const struct pci_device_id hisi_sec_dev_ids[] = {
- { PCI_DEVICE(PCI_VENDOR_ID_HUAWEI, SEC_PCI_DEVICE_ID_PF) },
- { PCI_DEVICE(PCI_VENDOR_ID_HUAWEI, SEC_PCI_DEVICE_ID_VF) },
+ { PCI_DEVICE(PCI_VENDOR_ID_HUAWEI, SEC_PF_PCI_DEVICE_ID) },
+ { PCI_DEVICE(PCI_VENDOR_ID_HUAWEI, SEC_VF_PCI_DEVICE_ID) },
{ 0, }
};
MODULE_DEVICE_TABLE(pci, hisi_sec_dev_ids);
-static inline void hisi_sec_add_to_list(struct hisi_sec *hisi_sec)
-{
- mutex_lock(&hisi_sec_list_lock);
- list_add_tail(&hisi_sec->list, &hisi_sec_list);
- mutex_unlock(&hisi_sec_list_lock);
-}
-
-static inline void hisi_sec_remove_from_list(struct hisi_sec *hisi_sec)
-{
- mutex_lock(&hisi_sec_list_lock);
- list_del(&hisi_sec->list);
- mutex_unlock(&hisi_sec_list_lock);
-}
-
-u8 sec_get_endian(struct hisi_sec *hisi_sec)
+static u8 sec_get_endian(struct hisi_qm *qm)
{
u32 reg;
@@ -450,83 +336,83 @@ u8 sec_get_endian(struct hisi_sec *hisi_sec)
* As for VF, it is a wrong way to get endian setting by
* reading a register of the engine
*/
- if (hisi_sec->qm.pdev->is_virtfn) {
- dev_err_ratelimited(&hisi_sec->qm.pdev->dev,
- "error! shouldn't access a register in VF\n");
+ if (qm->pdev->is_virtfn) {
+ dev_err_ratelimited(&qm->pdev->dev,
+ "cannot access a register in VF!\n");
return SEC_LE;
}
- reg = readl_relaxed(hisi_sec->qm.io_base + SEC_ENGINE_PF_CFG_OFF +
+ reg = readl_relaxed(qm->io_base + SEC_ENGINE_PF_CFG_OFF +
SEC_ACC_COMMON_REG_OFF + SEC_CONTROL_REG);
+
/* BD little endian mode */
if (!(reg & BIT(0)))
return SEC_LE;
+
/* BD 32-bits big endian mode */
else if (!(reg & BIT(1)))
return SEC_32BE;
+
/* BD 64-bits big endian mode */
else
return SEC_64BE;
}
-static int sec_engine_init(struct hisi_sec *hisi_sec)
+static int sec_engine_init(struct hisi_qm *qm)
{
int ret;
u32 reg;
- struct hisi_qm *qm = &hisi_sec->qm;
- void *base = qm->io_base + SEC_ENGINE_PF_CFG_OFF +
- SEC_ACC_COMMON_REG_OFF;
-
- /* config sec single port max outstanding */
- writel(SEC_SINGLE_PORT_MAX_TRANS,
- qm->io_base + SEC_AM_CFG_SIG_PORT_MAX_TRANS);
-
- /* config sec saa enable */
- writel(SEC_SAA_EN, base + SEC_SAA_EN_REG);
/* disable clock gate control */
- reg = readl_relaxed(base + SEC_CONTROL_REG);
+ reg = readl_relaxed(SEC_ADDR(qm, SEC_CONTROL_REG));
reg &= SEC_CLK_GATE_DISABLE;
- writel(reg, base + SEC_CONTROL_REG);
+ writel_relaxed(reg, SEC_ADDR(qm, SEC_CONTROL_REG));
- writel(0x1, base + SEC_MEM_START_INIT_REG);
- ret = readl_relaxed_poll_timeout(base +
- SEC_MEM_INIT_DONE_REG, reg, reg & 0x1,
- SEC_DELAY_10_US, SEC_POLL_TIMEOUT_US);
+ writel_relaxed(0x1, SEC_ADDR(qm, SEC_MEM_START_INIT_REG));
+
+ ret = readl_relaxed_poll_timeout(SEC_ADDR(qm, SEC_MEM_INIT_DONE_REG),
+ reg, reg & 0x1, SEC_DELAY_10_US,
+ SEC_POLL_TIMEOUT_US);
if (ret) {
- dev_err(&qm->pdev->dev, "fail to init sec mem\n");
+ pci_err(qm->pdev, "fail to init sec mem\n");
return ret;
}
- reg = readl_relaxed(base + SEC_CONTROL_REG);
+ reg = readl_relaxed(SEC_ADDR(qm, SEC_CONTROL_REG));
reg |= (0x1 << SEC_TRNG_EN_SHIFT);
- writel(reg, base + SEC_CONTROL_REG);
+ writel_relaxed(reg, SEC_ADDR(qm, SEC_CONTROL_REG));
- reg = readl_relaxed(base + SEC_INTERFACE_USER_CTRL0_REG);
+ reg = readl_relaxed(SEC_ADDR(qm, SEC_INTERFACE_USER_CTRL0_REG));
reg |= SEC_USER0_SMMU_NORMAL;
- writel(reg, base + SEC_INTERFACE_USER_CTRL0_REG);
+ writel_relaxed(reg, SEC_ADDR(qm, SEC_INTERFACE_USER_CTRL0_REG));
- reg = readl_relaxed(base + SEC_INTERFACE_USER_CTRL1_REG);
+ reg = readl_relaxed(SEC_ADDR(qm, SEC_INTERFACE_USER_CTRL1_REG));
reg |= SEC_USER1_SMMU_NORMAL;
- writel(reg, base + SEC_INTERFACE_USER_CTRL1_REG);
+ writel_relaxed(reg, SEC_ADDR(qm, SEC_INTERFACE_USER_CTRL1_REG));
+
+ writel(SEC_SINGLE_PORT_MAX_TRANS,
+ qm->io_base + SEC_AM_CFG_SIG_PORT_MAX_TRANS);
+
+ writel(SEC_SAA_EN, SEC_ADDR(qm, SEC_SAA_EN_REG));
/* Enable sm4 extra mode, as ctr/ecb */
- writel(SEC_BD_ERR_CHK_EN0, base + SEC_BD_ERR_CHK_EN_REG0);
+ writel_relaxed(SEC_BD_ERR_CHK_EN0,
+ SEC_ADDR(qm, SEC_BD_ERR_CHK_EN_REG0));
/* Enable sm4 xts mode multiple iv */
- writel(SEC_BD_ERR_CHK_EN1, base + SEC_BD_ERR_CHK_EN_REG1);
- writel(SEC_BD_ERR_CHK_EN3, base + SEC_BD_ERR_CHK_EN_REG3);
+ writel_relaxed(SEC_BD_ERR_CHK_EN1,
+ SEC_ADDR(qm, SEC_BD_ERR_CHK_EN_REG1));
+ writel_relaxed(SEC_BD_ERR_CHK_EN3,
+ SEC_ADDR(qm, SEC_BD_ERR_CHK_EN_REG3));
/* config endian */
- reg = readl_relaxed(base + SEC_CONTROL_REG);
- reg |= sec_get_endian(hisi_sec);
- writel(reg, base + SEC_CONTROL_REG);
+ reg = readl_relaxed(SEC_ADDR(qm, SEC_CONTROL_REG));
+ reg |= sec_get_endian(qm);
+ writel_relaxed(reg, SEC_ADDR(qm, SEC_CONTROL_REG));
return 0;
}
-static void hisi_sec_set_user_domain_and_cache(struct hisi_sec *hisi_sec)
+static int sec_set_user_domain_and_cache(struct hisi_qm *qm)
{
- struct hisi_qm *qm = &hisi_sec->qm;
-
/* qm user domain */
writel(AXUSER_BASE, qm->io_base + QM_ARUSER_M_CFG_1);
writel(ARUSER_M_CFG_ENABLE, qm->io_base + QM_ARUSER_M_CFG_ENABLE);
@@ -540,22 +426,18 @@ static void hisi_sec_set_user_domain_and_cache(struct hisi_sec *hisi_sec)
/* disable FLR triggered by BME(bus master enable) */
writel(PEH_AXUSER_CFG, qm->io_base + QM_PEH_AXUSER_CFG);
- writel(PEH_AXUSER_CFG_ENABLE, qm->io_base +
- QM_PEH_AXUSER_CFG_ENABLE);
+ writel(PEH_AXUSER_CFG_ENABLE, qm->io_base + QM_PEH_AXUSER_CFG_ENABLE);
/* enable sqc,cqc writeback */
writel(SQC_CACHE_ENABLE | CQC_CACHE_ENABLE | SQC_CACHE_WB_ENABLE |
CQC_CACHE_WB_ENABLE | FIELD_PREP(SQC_CACHE_WB_THRD, 1) |
FIELD_PREP(CQC_CACHE_WB_THRD, 1), qm->io_base + QM_CACHE_CTL);
- if (sec_engine_init(hisi_sec))
- dev_err(&qm->pdev->dev, "sec_engine_init failed");
+ return sec_engine_init(qm);
}
-static void hisi_sec_debug_regs_clear(struct hisi_sec *hisi_sec)
+static void sec_debug_regs_clear(struct hisi_qm *qm)
{
- struct hisi_qm *qm = &hisi_sec->qm;
-
/* clear current_qm */
writel(0x0, qm->io_base + QM_DFX_MB_CNT_VF);
writel(0x0, qm->io_base + QM_DFX_DB_CNT_VF);
@@ -566,50 +448,53 @@ static void hisi_sec_debug_regs_clear(struct hisi_sec *hisi_sec)
hisi_qm_debug_regs_clear(qm);
}
-static void hisi_sec_hw_error_set_state(struct hisi_sec *hisi_sec, bool state)
+static void sec_hw_error_enable(struct hisi_qm *qm)
{
- struct hisi_qm *qm = &hisi_sec->qm;
- void *base = qm->io_base + SEC_ENGINE_PF_CFG_OFF +
- SEC_ACC_COMMON_REG_OFF;
u32 val;
if (qm->ver == QM_HW_V1) {
writel(SEC_CORE_INT_DISABLE, qm->io_base + SEC_CORE_INT_MASK);
- dev_info(&qm->pdev->dev, "v%d don't support hw error handle\n",
- qm->ver);
+ pci_info(qm->pdev, "V1 not support hw error handle\n");
return;
}
- val = readl(base + SEC_CONTROL_REG);
- if (state) {
- /* clear SEC hw error source if having */
- writel(SEC_CORE_INT_ENABLE,
- hisi_sec->qm.io_base + SEC_CORE_INT_SOURCE);
+ val = readl(SEC_ADDR(qm, SEC_CONTROL_REG));
- /* enable SEC hw error interrupts */
- writel(SEC_CORE_INT_ENABLE, qm->io_base + SEC_CORE_INT_MASK);
+ /* clear SEC hw error source if having */
+ writel(SEC_CORE_INT_CLEAR, qm->io_base + SEC_CORE_INT_SOURCE);
- /* enable RAS int */
- writel(SEC_RAS_CE_ENB_MSK, base + SEC_RAS_CE_REG);
- writel(SEC_RAS_FE_ENB_MSK, base + SEC_RAS_FE_REG);
- writel(SEC_RAS_NFE_ENB_MSK, base + SEC_RAS_NFE_REG);
+ /* enable SEC hw error interrupts */
+ writel(SEC_CORE_INT_ENABLE, qm->io_base + SEC_CORE_INT_MASK);
- /* enable SEC block master OOO when m-bit error occur */
- val = val | SEC_AXI_SHUTDOWN_ENABLE;
- } else {
- /* disable RAS int */
- writel(SEC_RAS_DISABLE, base + SEC_RAS_CE_REG);
- writel(SEC_RAS_DISABLE, base + SEC_RAS_FE_REG);
- writel(SEC_RAS_DISABLE, base + SEC_RAS_NFE_REG);
+ /* enable RAS int */
+ writel(SEC_RAS_CE_ENB_MSK, qm->io_base + SEC_RAS_CE_REG);
+ writel(SEC_RAS_FE_ENB_MSK, qm->io_base + SEC_RAS_FE_REG);
+ writel(SEC_RAS_NFE_ENB_MSK, qm->io_base + SEC_RAS_NFE_REG);
- /* disable SEC hw error interrupts */
- writel(SEC_CORE_INT_DISABLE, qm->io_base + SEC_CORE_INT_MASK);
+ /* enable SEC block master OOO when m-bit error occur */
+ val = val | SEC_AXI_SHUTDOWN_ENABLE;
- /* disable SEC block master OOO when m-bit error occur */
- val = val & SEC_AXI_SHUTDOWN_DISABLE;
- }
+ writel(val, SEC_ADDR(qm, SEC_CONTROL_REG));
+}
- writel(val, base + SEC_CONTROL_REG);
+static void sec_hw_error_disable(struct hisi_qm *qm)
+{
+ u32 val;
+
+ val = readl(SEC_ADDR(qm, SEC_CONTROL_REG));
+
+ /* disable RAS int */
+ writel(SEC_RAS_DISABLE, qm->io_base + SEC_RAS_CE_REG);
+ writel(SEC_RAS_DISABLE, qm->io_base + SEC_RAS_FE_REG);
+ writel(SEC_RAS_DISABLE, qm->io_base + SEC_RAS_NFE_REG);
+
+ /* disable SEC hw error interrupts */
+ writel(SEC_CORE_INT_DISABLE, qm->io_base + SEC_CORE_INT_MASK);
+
+ /* disable SEC block master OOO when m-bit error occur */
+ val = val & SEC_AXI_SHUTDOWN_DISABLE;
+
+ writel(val, SEC_ADDR(qm, SEC_CONTROL_REG));
}
static inline struct hisi_qm *file_to_qm(struct ctrl_debug_file *file)
@@ -629,21 +514,21 @@ static u32 current_qm_read(struct ctrl_debug_file *file)
static int current_qm_write(struct ctrl_debug_file *file, u32 val)
{
struct hisi_qm *qm = file_to_qm(file);
- struct hisi_sec_ctrl *ctrl = file->ctrl;
- u32 tmp, vfq_num;
+ u32 vfq_num;
+ u32 tmp;
- if (val > ctrl->num_vfs)
+ if (val > qm->vfs_num)
return -EINVAL;
/* According PF or VF Dev ID to calculation curr_qm_qp_num and store */
if (val == 0) {
qm->debug.curr_qm_qp_num = qm->qp_num;
} else {
- vfq_num = (qm->ctrl_q_num - qm->qp_num) / ctrl->num_vfs;
- if (val == ctrl->num_vfs) {
+ vfq_num = (qm->ctrl_q_num - qm->qp_num) / qm->vfs_num;
+ if (val == qm->vfs_num) {
qm->debug.curr_qm_qp_num =
qm->ctrl_q_num - qm->qp_num -
- (ctrl->num_vfs - 1) * vfq_num;
+ (qm->vfs_num - 1) * vfq_num;
} else {
qm->debug.curr_qm_qp_num = vfq_num;
}
@@ -668,7 +553,7 @@ static u32 clear_enable_read(struct ctrl_debug_file *file)
struct hisi_qm *qm = file_to_qm(file);
return readl(qm->io_base + SEC_CTRL_CNT_CLR_CE) &
- SEC_CTRL_CNT_CLR_CE_BIT;
+ SEC_CTRL_CNT_CLR_CE_BIT;
}
static int clear_enable_write(struct ctrl_debug_file *file, u32 val)
@@ -676,11 +561,11 @@ static int clear_enable_write(struct ctrl_debug_file *file, u32 val)
struct hisi_qm *qm = file_to_qm(file);
u32 tmp;
- if (val != 1 && val != 0)
+ if (val != 1 && val)
return -EINVAL;
tmp = (readl(qm->io_base + SEC_CTRL_CNT_CLR_CE) &
- ~SEC_CTRL_CNT_CLR_CE_BIT) | val;
+ ~SEC_CTRL_CNT_CLR_CE_BIT) | val;
writel(tmp, qm->io_base + SEC_CTRL_CNT_CLR_CE);
return 0;
@@ -695,6 +580,7 @@ static ssize_t ctrl_debug_read(struct file *filp, char __user *buf,
int ret;
spin_lock_irq(&file->lock);
+
switch (file->index) {
case SEC_CURRENT_QM:
val = current_qm_read(file);
@@ -706,8 +592,10 @@ static ssize_t ctrl_debug_read(struct file *filp, char __user *buf,
spin_unlock_irq(&file->lock);
return -EINVAL;
}
+
spin_unlock_irq(&file->lock);
ret = snprintf(tbuf, SEC_DBGFS_VAL_MAX_LEN, "%u\n", val);
+
return simple_read_from_buffer(buf, count, pos, tbuf, ret);
}
@@ -726,7 +614,7 @@ static ssize_t ctrl_debug_write(struct file *filp, const char __user *buf,
return -ENOSPC;
len = simple_write_to_buffer(tbuf, SEC_DBGFS_VAL_MAX_LEN - 1,
- pos, buf, count);
+ pos, buf, count);
if (len < 0)
return len;
@@ -735,6 +623,7 @@ static ssize_t ctrl_debug_write(struct file *filp, const char __user *buf,
return -EFAULT;
spin_lock_irq(&file->lock);
+
switch (file->index) {
case SEC_CURRENT_QM:
ret = current_qm_write(file, val);
@@ -750,6 +639,7 @@ static ssize_t ctrl_debug_write(struct file *filp, const char __user *buf,
ret = -EINVAL;
goto err_input;
}
+
spin_unlock_irq(&file->lock);
return count;
@@ -766,12 +656,11 @@ static ssize_t ctrl_debug_write(struct file *filp, const char __user *buf,
.write = ctrl_debug_write,
};
-static int hisi_sec_core_debug_init(struct hisi_sec_ctrl *ctrl)
+static int hisi_sec_core_debug_init(struct hisi_qm *qm)
{
- struct hisi_sec *hisi_sec = ctrl->hisi_sec;
- struct hisi_qm *qm = &hisi_sec->qm;
+ struct hisi_sec *sec = container_of(qm, struct hisi_sec, qm);
struct device *dev = &qm->pdev->dev;
- struct hisi_sec_dfx *dfx = &hisi_sec->sec_dfx;
+ struct hisi_sec_dfx *dfx = &sec->sec_dfx;
struct debugfs_regset32 *regset;
struct dentry *tmp_d, *tmp;
char buf[SEC_DBGFS_VAL_MAX_LEN];
@@ -781,7 +670,7 @@ static int hisi_sec_core_debug_init(struct hisi_sec_ctrl *ctrl)
if (ret < 0)
return -ENOENT;
- tmp_d = debugfs_create_dir(buf, ctrl->debug_root);
+ tmp_d = debugfs_create_dir(buf, qm->debug.debug_root);
if (!tmp_d)
return -ENOENT;
@@ -847,29 +736,30 @@ static int hisi_sec_core_debug_init(struct hisi_sec_ctrl *ctrl)
return 0;
}
-static int hisi_sec_ctrl_debug_init(struct hisi_sec_ctrl *ctrl)
+static int hisi_sec_ctrl_debug_init(struct hisi_qm *qm)
{
+ struct hisi_sec *sec = container_of(qm, struct hisi_sec, qm);
struct dentry *tmp;
int i;
for (i = SEC_CURRENT_QM; i < SEC_DEBUG_FILE_NUM; i++) {
- spin_lock_init(&ctrl->files[i].lock);
- ctrl->files[i].ctrl = ctrl;
- ctrl->files[i].index = i;
+ spin_lock_init(&sec->ctrl->files[i].lock);
+ sec->ctrl->files[i].ctrl = sec->ctrl;
+ sec->ctrl->files[i].index = i;
tmp = debugfs_create_file(ctrl_debug_file_name[i], 0600,
- ctrl->debug_root, ctrl->files + i,
+ qm->debug.debug_root,
+ sec->ctrl->files + i,
&ctrl_debug_fops);
if (!tmp)
return -ENOENT;
}
- return hisi_sec_core_debug_init(ctrl);
+ return hisi_sec_core_debug_init(qm);
}
-static int hisi_sec_debugfs_init(struct hisi_sec *hisi_sec)
+static int hisi_sec_debugfs_init(struct hisi_qm *qm)
{
- struct hisi_qm *qm = &hisi_sec->qm;
struct device *dev = &qm->pdev->dev;
struct dentry *dev_d;
int ret;
@@ -883,9 +773,8 @@ static int hisi_sec_debugfs_init(struct hisi_sec *hisi_sec)
if (ret)
goto failed_to_create;
- if (qm->pdev->device == SEC_PCI_DEVICE_ID_PF) {
- hisi_sec->ctrl->debug_root = dev_d;
- ret = hisi_sec_ctrl_debug_init(hisi_sec->ctrl);
+ if (qm->pdev->device == SEC_PF_PCI_DEVICE_ID) {
+ ret = hisi_sec_ctrl_debug_init(qm);
if (ret)
goto failed_to_create;
}
@@ -897,71 +786,62 @@ static int hisi_sec_debugfs_init(struct hisi_sec *hisi_sec)
return ret;
}
-static void hisi_sec_debugfs_exit(struct hisi_sec *hisi_sec)
+static void hisi_sec_debugfs_exit(struct hisi_qm *qm)
{
- struct hisi_qm *qm = &hisi_sec->qm;
-
debugfs_remove_recursive(qm->debug.debug_root);
+
if (qm->fun_type == QM_HW_PF) {
- hisi_sec_debug_regs_clear(hisi_sec);
+ sec_debug_regs_clear(qm);
qm->debug.curr_qm_qp_num = 0;
}
}
-static void hisi_sec_hw_error_init(struct hisi_sec *hisi_sec)
+static void sec_log_hw_error(struct hisi_qm *qm, u32 err_sts)
{
- hisi_qm_hw_error_init(&hisi_sec->qm, QM_BASE_CE,
- QM_BASE_NFE | QM_ACC_DO_TASK_TIMEOUT
- | QM_ACC_WB_NOT_READY_TIMEOUT, 0,
- QM_DB_RANDOM_INVALID);
- hisi_sec_hw_error_set_state(hisi_sec, true);
-}
+ const struct hisi_sec_hw_error *errs = sec_hw_error;
+ struct device *dev = &qm->pdev->dev;
+ u32 err_val;
-static void hisi_sec_open_master_ooo(struct hisi_qm *qm)
-{
- u32 val;
- void *base = qm->io_base + SEC_ENGINE_PF_CFG_OFF +
- SEC_ACC_COMMON_REG_OFF;
+ while (errs->msg) {
+ if (errs->int_msk & err_sts) {
+ dev_err(dev, "%s [error status=0x%x] found\n",
+ errs->msg, errs->int_msk);
- val = readl(base + SEC_CONTROL_REG);
- writel(val & SEC_AXI_SHUTDOWN_DISABLE, base + SEC_CONTROL_REG);
- writel(val | SEC_AXI_SHUTDOWN_ENABLE, base + SEC_CONTROL_REG);
+ if (SEC_CORE_INT_STATUS_M_ECC & errs->int_msk) {
+ err_val = readl(qm->io_base +
+ SEC_CORE_ECC_INFO);
+ dev_err(dev, "multi ecc sram num=0x%x\n",
+ SEC_ECC_NUM(err_val));
+ }
+ }
+ errs++;
+ }
}
-static u32 hisi_sec_get_hw_err_status(struct hisi_qm *qm)
+static u32 sec_get_hw_err_status(struct hisi_qm *qm)
{
return readl(qm->io_base + SEC_CORE_INT_STATUS);
}
-static void hisi_sec_clear_hw_err_status(struct hisi_qm *qm, u32 err_sts)
+static void sec_clear_hw_err_status(struct hisi_qm *qm, u32 err_sts)
{
writel(err_sts, qm->io_base + SEC_CORE_INT_SOURCE);
}
-static void hisi_sec_log_hw_error(struct hisi_qm *qm, u32 err_sts)
+static void sec_open_axi_master_ooo(struct hisi_qm *qm)
{
- const struct hisi_sec_hw_error *err = sec_hw_error;
- struct device *dev = &qm->pdev->dev;
- u32 err_val;
-
- while (err->msg) {
- if (err->int_msk & err_sts)
- dev_err(dev, "%s [error status=0x%x] found\n",
- err->msg, err->int_msk);
- err++;
- }
+ u32 val;
- if (SEC_CORE_INT_STATUS_M_ECC & err_sts) {
- err_val = readl(qm->io_base + SEC_CORE_ECC_INFO);
- dev_err(dev, "hisi-sec multi ecc sram num=0x%x\n",
- SEC_ECC_NUM(err_val));
- }
+ val = readl(SEC_ADDR(qm, SEC_CONTROL_REG));
+ writel(val & SEC_AXI_SHUTDOWN_DISABLE, SEC_ADDR(qm, SEC_CONTROL_REG));
+ writel(val | SEC_AXI_SHUTDOWN_ENABLE, SEC_ADDR(qm, SEC_CONTROL_REG));
}
-static int hisi_sec_pf_probe_init(struct hisi_sec *hisi_sec)
+static int hisi_sec_pf_probe_init(struct hisi_qm *qm)
{
- struct hisi_qm *qm = &hisi_sec->qm;
+ struct hisi_sec *hisi_sec = container_of(qm, struct hisi_sec, qm);
struct hisi_sec_ctrl *ctrl;
+ int ret;
ctrl = devm_kzalloc(&qm->pdev->dev, sizeof(*ctrl), GFP_KERNEL);
if (!ctrl)
@@ -983,59 +863,57 @@ static int hisi_sec_pf_probe_init(struct hisi_sec *hisi_sec)
return -EINVAL;
}
- qm->err_ini.qm_wr_port = SEC_WR_MSI_PORT;
- qm->err_ini.ecc_2bits_mask = SEC_CORE_INT_STATUS_M_ECC;
- qm->err_ini.open_axi_master_ooo = hisi_sec_open_master_ooo;
- qm->err_ini.get_dev_hw_err_status = hisi_sec_get_hw_err_status;
- qm->err_ini.clear_dev_hw_err_status = hisi_sec_clear_hw_err_status;
- qm->err_ini.log_dev_hw_err = hisi_sec_log_hw_error;
- hisi_sec_set_user_domain_and_cache(hisi_sec);
- hisi_sec_hw_error_init(hisi_sec);
+ qm->err_ini.get_dev_hw_err_status = sec_get_hw_err_status;
+ qm->err_ini.clear_dev_hw_err_status = sec_clear_hw_err_status;
+ qm->err_ini.err_info.ecc_2bits_mask = SEC_CORE_INT_STATUS_M_ECC;
+ qm->err_ini.err_info.ce = QM_BASE_CE;
+ qm->err_ini.err_info.nfe = QM_BASE_NFE | QM_ACC_DO_TASK_TIMEOUT |
+ QM_ACC_WB_NOT_READY_TIMEOUT;
+ qm->err_ini.err_info.fe = 0;
+ qm->err_ini.err_info.msi = QM_DB_RANDOM_INVALID;
+ qm->err_ini.err_info.acpi_rst = "SRST";
+ qm->err_ini.hw_err_disable = sec_hw_error_disable;
+ qm->err_ini.hw_err_enable = sec_hw_error_enable;
+ qm->err_ini.set_usr_domain_cache = sec_set_user_domain_and_cache;
+ qm->err_ini.log_dev_hw_err = sec_log_hw_error;
+ qm->err_ini.open_axi_master_ooo = sec_open_axi_master_ooo;
+ qm->err_ini.err_info.msi_wr_port = SEC_WR_MSI_PORT;
+
+ ret = qm->err_ini.set_usr_domain_cache(qm);
+ if (ret)
+ return ret;
+
+ hisi_qm_dev_err_init(qm);
qm->err_ini.open_axi_master_ooo(qm);
- hisi_sec_debug_regs_clear(hisi_sec);
+ sec_debug_regs_clear(qm);
return 0;
}
-static int hisi_sec_qm_init(struct hisi_qm *qm, struct pci_dev *pdev)
+static int hisi_sec_qm_pre_init(struct hisi_qm *qm, struct pci_dev *pdev)
{
- enum qm_hw_ver rev_id;
-
- rev_id = hisi_qm_get_hw_version(pdev);
- if (rev_id == QM_HW_UNKNOWN)
- return -ENODEV;
+ int ret;
+#ifdef CONFIG_CRYPTO_QM_UACCE
+ qm->algs = "sec\ncipher\ndigest\n";
+ qm->uacce_mode = uacce_mode;
+#endif
qm->pdev = pdev;
- qm->ver = rev_id;
-
+ ret = hisi_qm_pre_init(qm, pf_q_num, SEC_PF_DEF_Q_BASE);
+ if (ret)
+ return ret;
qm->sqe_size = SEC_SQE_SIZE;
qm->dev_name = sec_name;
- qm->fun_type = (pdev->device == SEC_PCI_DEVICE_ID_PF) ?
- QM_HW_PF : QM_HW_VF;
- qm->algs = "sec\ncipher\ndigest\n";
+ qm->qm_list = &sec_devices;
qm->wq = sec_wq;
- switch (uacce_mode) {
- case UACCE_MODE_NOUACCE:
- qm->use_uacce = false;
- break;
- case UACCE_MODE_NOIOMMU:
- qm->use_uacce = true;
- break;
- default:
- return -EINVAL;
- }
-
- return hisi_qm_init(qm);
+ return 0;
}
-static int hisi_sec_probe_init(struct hisi_qm *qm, struct hisi_sec *hisi_sec)
+static int hisi_sec_probe_init(struct hisi_qm *qm)
{
if (qm->fun_type == QM_HW_PF) {
- qm->qp_base = SEC_PF_DEF_Q_BASE;
- qm->qp_num = pf_q_num;
- qm->debug.curr_qm_qp_num = pf_q_num;
- return hisi_sec_pf_probe_init(hisi_sec);
+ return hisi_sec_pf_probe_init(qm);
} else if (qm->fun_type == QM_HW_VF) {
/*
* have no way to get qm configure in VM in v1 hardware,
@@ -1066,660 +944,104 @@ static int hisi_sec_probe(struct pci_dev *pdev, const struct pci_device_id *id)
if (!hisi_sec)
return -ENOMEM;
- pci_set_drvdata(pdev, hisi_sec);
-
- hisi_sec_add_to_list(hisi_sec);
+ qm = &hisi_sec->qm;
+ qm->fun_type = pdev->is_physfn ? QM_HW_PF : QM_HW_VF;
- hisi_sec->hisi_sec_list_lock = &hisi_sec_list_lock;
+ ret = hisi_sec_qm_pre_init(qm, pdev);
+ if (ret)
+ return ret;
hisi_sec->ctx_q_num = ctx_q_num;
hisi_sec->fusion_limit = fusion_limit;
+ hisi_sec->fusion_tmout_nsec = fusion_time;
- hisi_sec->fusion_tmout_nsec = fusion_tmout_nsec;
-
- qm = &hisi_sec->qm;
-
- ret = hisi_sec_qm_init(qm, pdev);
+ ret = hisi_qm_init(qm);
if (ret) {
- dev_err(&pdev->dev, "Failed to pre init qm!\n");
- goto err_remove_from_list;
+ pci_err(pdev, "Failed to init qm (%d)!\n", ret);
+ return ret;
}
- ret = hisi_sec_probe_init(qm, hisi_sec);
+ ret = hisi_sec_probe_init(qm);
if (ret) {
- dev_err(&pdev->dev, "Failed to probe!\n");
+ pci_err(pdev, "Failed to probe init (%d)!\n", ret);
goto err_qm_uninit;
}
ret = hisi_qm_start(qm);
- if (ret)
+ if (ret) {
+ pci_err(pdev, "Failed to start qm (%d)!\n", ret);
goto err_qm_uninit;
+ }
- ret = hisi_sec_debugfs_init(hisi_sec);
+ ret = hisi_sec_debugfs_init(qm);
if (ret)
- dev_err(&pdev->dev, "Failed to init debugfs (%d)!\n", ret);
-
- return 0;
-
- err_qm_uninit:
- hisi_qm_uninit(qm);
- err_remove_from_list:
- hisi_sec_remove_from_list(hisi_sec);
- return ret;
-}
-
-/* now we only support equal assignment */
-static int hisi_sec_vf_q_assign(struct hisi_sec *hisi_sec, u32 num_vfs)
-{
- struct hisi_qm *qm = &hisi_sec->qm;
- u32 qp_num = qm->qp_num;
- u32 q_base = qp_num;
- u32 q_num, remain_q_num, i;
- int ret;
+ pci_warn(pdev, "Failed to init debugfs (%d)!\n", ret);
- if (!num_vfs)
- return -EINVAL;
-
- remain_q_num = qm->ctrl_q_num - qp_num;
- q_num = remain_q_num / num_vfs;
+ hisi_qm_add_to_list(qm, &sec_devices);
- for (i = 1; i <= num_vfs; i++) {
- if (i == num_vfs)
- q_num += remain_q_num % num_vfs;
- ret = hisi_qm_set_vft(qm, i, q_base, q_num);
- if (ret)
- return ret;
- q_base += q_num;
+ ret = hisi_sec_register_to_crypto(fusion_limit);
+ if (ret < 0) {
+ pci_err(pdev, "Failed to register driver to crypto!\n");
+ goto err_remove_from_list;
}
- return 0;
-}
-
-static int hisi_sec_clear_vft_config(struct hisi_sec *hisi_sec)
-{
- struct hisi_sec_ctrl *ctrl = hisi_sec->ctrl;
- struct hisi_qm *qm = &hisi_sec->qm;
- u32 num_vfs = ctrl->num_vfs;
- int ret;
- u32 i;
-
- for (i = 1; i <= num_vfs; i++) {
- ret = hisi_qm_set_vft(qm, i, 0, 0);
- if (ret)
- return ret;
+ if (qm->fun_type == QM_HW_PF && vfs_num > 0) {
+ ret = hisi_qm_sriov_enable(pdev, vfs_num);
+ if (ret < 0)
+ goto err_crypto_unregister;
}
- ctrl->num_vfs = 0;
-
return 0;
-}
-
-static int hisi_sec_sriov_enable(struct pci_dev *pdev, int max_vfs)
-{
-#ifdef CONFIG_PCI_IOV
- struct hisi_sec *hisi_sec = pci_get_drvdata(pdev);
- u32 num_vfs;
- int pre_existing_vfs, ret;
-
- pre_existing_vfs = pci_num_vf(pdev);
-
- if (pre_existing_vfs) {
- dev_err(&pdev->dev,
- "Can't enable VF. Please disable pre-enabled VFs!\n");
- return 0;
- }
-
- num_vfs = min_t(u32, max_vfs, SEC_VF_NUM);
-
- ret = hisi_sec_vf_q_assign(hisi_sec, num_vfs);
- if (ret) {
- dev_err(&pdev->dev, "Can't assign queues for VF!\n");
- return ret;
- }
- hisi_sec->ctrl->num_vfs = num_vfs;
+err_crypto_unregister:
+ hisi_sec_unregister_from_crypto(fusion_limit);
- ret = pci_enable_sriov(pdev, num_vfs);
- if (ret) {
- dev_err(&pdev->dev, "Can't enable VF!\n");
- hisi_sec_clear_vft_config(hisi_sec);
- return ret;
- }
+err_remove_from_list:
+ hisi_qm_del_from_list(qm, &sec_devices);
+ hisi_sec_debugfs_exit(qm);
+ hisi_qm_stop(qm, QM_NORMAL);
- return num_vfs;
-#else
- return 0;
-#endif
-}
-
-static int hisi_sec_try_frozen_vfs(struct pci_dev *pdev)
-{
- struct hisi_sec *sec, *vf_sec;
- struct pci_dev *dev;
- int ret = 0;
-
- /* Try to frozen all the VFs as disable SRIOV */
- mutex_lock(&hisi_sec_list_lock);
- list_for_each_entry(sec, &hisi_sec_list, list) {
- dev = sec->qm.pdev;
- if (dev == pdev)
- continue;
- if (pci_physfn(dev) == pdev) {
- vf_sec = pci_get_drvdata(dev);
- ret = hisi_qm_frozen(&vf_sec->qm);
- if (ret)
- goto frozen_fail;
- }
- }
+err_qm_uninit:
+ hisi_qm_uninit(qm);
-frozen_fail:
- mutex_unlock(&hisi_sec_list_lock);
return ret;
}
-static int hisi_sec_sriov_disable(struct pci_dev *pdev)
-{
- struct hisi_sec *hisi_sec = pci_get_drvdata(pdev);
-
- if (pci_vfs_assigned(pdev)) {
- dev_err(&pdev->dev,
- "Can't disable VFs while VFs are assigned!\n");
- return -EPERM;
- }
-
- if (hisi_sec_try_frozen_vfs(pdev)) {
- dev_err(&pdev->dev, "try frozen VFs failed!\n");
- return -EBUSY;
- }
-
- /* remove in hisi_sec_pci_driver will be called to free VF resources */
- pci_disable_sriov(pdev);
- return hisi_sec_clear_vft_config(hisi_sec);
-}
-
static int hisi_sec_sriov_configure(struct pci_dev *pdev, int num_vfs)
{
if (num_vfs == 0)
- return hisi_sec_sriov_disable(pdev);
+ return hisi_qm_sriov_disable(pdev, &sec_devices);
else
- return hisi_sec_sriov_enable(pdev, num_vfs);
-}
-
-static void hisi_sec_remove_wait_delay(struct hisi_sec *hisi_sec)
-{
- struct hisi_qm *qm = &hisi_sec->qm;
-
- while (hisi_qm_frozen(qm) || ((qm->fun_type == QM_HW_PF) &&
- hisi_sec_try_frozen_vfs(qm->pdev)))
- usleep_range(FROZEN_RANGE_MIN, FROZEN_RANGE_MAX);
-
- udelay(SEC_WAIT_DELAY);
+ return hisi_qm_sriov_enable(pdev, num_vfs);
}
static void hisi_sec_remove(struct pci_dev *pdev)
{
- struct hisi_sec *hisi_sec = pci_get_drvdata(pdev);
- struct hisi_qm *qm = &hisi_sec->qm;
+ struct hisi_qm *qm = pci_get_drvdata(pdev);
if (uacce_mode != UACCE_MODE_NOUACCE)
- hisi_sec_remove_wait_delay(hisi_sec);
+ hisi_qm_remove_wait_delay(qm, &sec_devices);
+
+ if (qm->fun_type == QM_HW_PF && qm->vfs_num)
+ (void)hisi_qm_sriov_disable(pdev, NULL);
- if (qm->fun_type == QM_HW_PF && hisi_sec->ctrl->num_vfs != 0)
- (void)hisi_sec_sriov_disable(pdev);
+ hisi_sec_unregister_from_crypto(fusion_limit);
- hisi_sec_debugfs_exit(hisi_sec);
+ hisi_qm_del_from_list(qm, &sec_devices);
+ hisi_sec_debugfs_exit(qm);
(void)hisi_qm_stop(qm, QM_NORMAL);
if (qm->fun_type == QM_HW_PF)
- hisi_sec_hw_error_set_state(hisi_sec, false);
+ hisi_qm_dev_err_uninit(qm);
hisi_qm_uninit(qm);
- hisi_sec_remove_from_list(hisi_sec);
-}
-
-static void hisi_sec_shutdown(struct pci_dev *pdev)
-{
- struct hisi_sec *hisi_sec = pci_get_drvdata(pdev);
-
- hisi_qm_stop(&hisi_sec->qm, QM_NORMAL);
-}
-
-static pci_ers_result_t hisi_sec_error_detected(struct pci_dev *pdev,
- pci_channel_state_t state)
-{
- if (pdev->is_virtfn)
- return PCI_ERS_RESULT_NONE;
-
- dev_info(&pdev->dev, "PCI error detected, state(=%d)!!\n", state);
- if (state == pci_channel_io_perm_failure)
- return PCI_ERS_RESULT_DISCONNECT;
-
- return hisi_qm_process_dev_error(pdev);
-}
-
-static int hisi_sec_reset_prepare_ready(struct hisi_sec *hisi_sec)
-{
- struct pci_dev *pdev = hisi_sec->qm.pdev;
- struct hisi_sec *sec = pci_get_drvdata(pci_physfn(pdev));
- int delay = 0;
-
- while (test_and_set_bit(HISI_SEC_RESET, &sec->status)) {
- msleep(++delay);
- if (delay > SEC_RESET_WAIT_TIMEOUT)
- return -EBUSY;
- }
-
- return 0;
-}
-
-static int hisi_sec_vf_reset_prepare(struct pci_dev *pdev,
- enum qm_stop_reason stop_reason)
-{
- struct hisi_sec *hisi_sec;
- struct pci_dev *dev;
- struct hisi_qm *qm;
- int ret = 0;
-
- mutex_lock(&hisi_sec_list_lock);
- if (pdev->is_physfn) {
- list_for_each_entry(hisi_sec, &hisi_sec_list, list) {
- dev = hisi_sec->qm.pdev;
- if (dev == pdev)
- continue;
-
- if (pci_physfn(dev) == pdev) {
- qm = &hisi_sec->qm;
-
- ret = hisi_qm_stop(qm, stop_reason);
- if (ret)
- goto prepare_fail;
- }
- }
- }
-
-prepare_fail:
- mutex_unlock(&hisi_sec_list_lock);
- return ret;
-}
-
-static int hisi_sec_controller_reset_prepare(struct hisi_sec *hisi_sec)
-{
- struct hisi_qm *qm = &hisi_sec->qm;
- struct pci_dev *pdev = qm->pdev;
- int ret;
-
- ret = hisi_sec_reset_prepare_ready(hisi_sec);
- if (ret) {
- dev_err(&pdev->dev, "Controller reset not ready!\n");
- return ret;
- }
-
- ret = hisi_sec_vf_reset_prepare(pdev, QM_SOFT_RESET);
- if (ret) {
- dev_err(&pdev->dev, "Fails to stop VFs!\n");
- return ret;
- }
-
- ret = hisi_qm_stop(qm, QM_SOFT_RESET);
- if (ret) {
- dev_err(&pdev->dev, "Fails to stop QM!\n");
- return ret;
- }
-
-#ifdef CONFIG_CRYPTO_QM_UACCE
- if (qm->use_uacce) {
- ret = uacce_hw_err_isolate(&qm->uacce);
- if (ret) {
- dev_err(&pdev->dev, "Fails to isolate hw err!\n");
- return ret;
- }
- }
-#endif
-
- return 0;
-}
-
-static int hisi_sec_soft_reset(struct hisi_sec *hisi_sec)
-{
- struct hisi_qm *qm = &hisi_sec->qm;
- struct device *dev = &qm->pdev->dev;
- unsigned long long value;
- int ret;
- u32 val;
-
- ret = hisi_qm_reg_test(qm);
- if (ret)
- return ret;
-
- ret = hisi_qm_set_vf_mse(qm, SEC_DISABLE);
- if (ret) {
- dev_err(dev, "Fails to disable vf mse bit.\n");
- return ret;
- }
-
- ret = hisi_qm_set_msi(qm, SEC_DISABLE);
- if (ret) {
- dev_err(dev, "Fails to disable peh msi bit.\n");
- return ret;
- }
-
- /* Set qm ecc if dev ecc happened to hold on ooo */
- hisi_qm_set_ecc(qm);
-
- /* OOO register set and check */
- writel(SEC_MASTER_GLOBAL_CTRL_SHUTDOWN,
- hisi_sec->qm.io_base + SEC_MASTER_GLOBAL_CTRL);
-
- /* If bus lock, reset chip */
- ret = readl_relaxed_poll_timeout(hisi_sec->qm.io_base +
- SEC_MASTER_TRANS_RETURN,
- val,
- (val == SEC_MASTER_TRANS_RETURN_RW),
- SEC_DELAY_10_US,
- SEC_POLL_TIMEOUT_US);
- if (ret) {
- dev_emerg(dev, "Bus lock! Please reset system.\n");
- return ret;
- }
-
- ret = hisi_qm_set_pf_mse(qm, SEC_DISABLE);
- if (ret) {
- dev_err(dev, "Fails to disable pf mse bit.\n");
- return ret;
- }
-
- /* The reset related sub-control registers are not in PCI BAR */
- if (ACPI_HANDLE(dev)) {
- acpi_status s;
-
- s = acpi_evaluate_integer(ACPI_HANDLE(dev), "SRST",
- NULL, &value);
- if (ACPI_FAILURE(s) || value) {
- dev_err(dev, "Controller reset fails %lld\n", value);
- return -EIO;
- }
- } else {
- dev_err(dev, "No reset method!\n");
- return -EINVAL;
- }
-
- return 0;
-}
-
-static int hisi_sec_vf_reset_done(struct pci_dev *pdev)
-{
- struct hisi_sec *hisi_sec;
- struct pci_dev *dev;
- struct hisi_qm *qm;
- int ret = 0;
-
- mutex_lock(&hisi_sec_list_lock);
- list_for_each_entry(hisi_sec, &hisi_sec_list, list) {
- dev = hisi_sec->qm.pdev;
- if (dev == pdev)
- continue;
-
- if (pci_physfn(dev) == pdev) {
- qm = &hisi_sec->qm;
-
- ret = hisi_qm_restart(qm);
- if (ret)
- goto reset_fail;
- }
- }
-
-reset_fail:
- mutex_unlock(&hisi_sec_list_lock);
- return ret;
-}
-
-static int hisi_sec_controller_reset_done(struct hisi_sec *hisi_sec)
-{
- struct hisi_qm *qm = &hisi_sec->qm;
- struct pci_dev *pdev = qm->pdev;
- struct device *dev = &pdev->dev;
- int ret;
-
- ret = hisi_qm_set_msi(qm, SEC_ENABLE);
- if (ret) {
- dev_err(dev, "Fails to enable peh msi bit!\n");
- return ret;
- }
-
- ret = hisi_qm_set_pf_mse(qm, SEC_ENABLE);
- if (ret) {
- dev_err(dev, "Fails to enable pf mse bit!\n");
- return ret;
- }
-
- ret = hisi_qm_set_vf_mse(qm, SEC_ENABLE);
- if (ret) {
- dev_err(dev, "Fails to enable vf mse bit!\n");
- return ret;
- }
-
- hisi_sec_set_user_domain_and_cache(hisi_sec);
- hisi_qm_restart_prepare(qm);
-
- ret = hisi_qm_restart(qm);
- if (ret) {
- dev_err(dev, "Failed to start QM!\n");
- return -EPERM;
- }
-
- if (hisi_sec->ctrl->num_vfs) {
- ret = hisi_sec_vf_q_assign(hisi_sec, hisi_sec->ctrl->num_vfs);
- if (ret) {
- dev_err(dev, "Failed to assign vf queues!\n");
- return ret;
- }
- }
-
- ret = hisi_sec_vf_reset_done(pdev);
- if (ret) {
- dev_err(dev, "Failed to start VFs!\n");
- return -EPERM;
- }
-
- hisi_qm_restart_done(qm);
- hisi_sec_hw_error_init(hisi_sec);
-
- return 0;
-}
-
-static int hisi_sec_controller_reset(struct hisi_sec *hisi_sec)
-{
- struct device *dev = &hisi_sec->qm.pdev->dev;
- int ret;
-
- dev_info(dev, "Controller resetting...\n");
-
- ret = hisi_sec_controller_reset_prepare(hisi_sec);
- if (ret)
- return ret;
-
- ret = hisi_sec_soft_reset(hisi_sec);
- if (ret) {
- dev_err(dev, "Controller reset failed (%d)\n", ret);
- return ret;
- }
-
- ret = hisi_sec_controller_reset_done(hisi_sec);
- if (ret)
- return ret;
-
- clear_bit(HISI_SEC_RESET, &hisi_sec->status);
- dev_info(dev, "Controller reset complete\n");
-
- return 0;
-}
-
-static pci_ers_result_t hisi_sec_slot_reset(struct pci_dev *pdev)
-{
- struct hisi_sec *hisi_sec = pci_get_drvdata(pdev);
- int ret;
-
- if (pdev->is_virtfn)
- return PCI_ERS_RESULT_RECOVERED;
-
- dev_info(&pdev->dev, "Requesting reset due to PCI error\n");
-
- pci_cleanup_aer_uncorrect_error_status(pdev);
-
- /* reset sec controller */
- ret = hisi_sec_controller_reset(hisi_sec);
- if (ret) {
- dev_warn(&pdev->dev, "hisi_sec controller reset failed (%d)\n",
- ret);
- return PCI_ERS_RESULT_DISCONNECT;
- }
-
- return PCI_ERS_RESULT_RECOVERED;
-}
-
-static void hisi_sec_set_hw_error(struct hisi_sec *hisi_sec, bool state)
-{
- struct pci_dev *pdev = hisi_sec->qm.pdev;
- struct hisi_sec *sec = pci_get_drvdata(pci_physfn(pdev));
- struct hisi_qm *qm = &sec->qm;
-
- if (qm->fun_type == QM_HW_VF)
- return;
-
- if (state)
- hisi_qm_hw_error_init(qm, QM_BASE_CE,
- QM_BASE_NFE | QM_ACC_WB_NOT_READY_TIMEOUT,
- 0, QM_DB_RANDOM_INVALID);
- else
- hisi_qm_hw_error_uninit(qm);
-
- hisi_sec_hw_error_set_state(sec, state);
-}
-
-static int hisi_sec_get_hw_error_status(struct hisi_sec *hisi_sec)
-{
- u32 err_sts;
-
- err_sts = readl(hisi_sec->qm.io_base + SEC_CORE_INT_STATUS) &
- SEC_CORE_INT_STATUS_M_ECC;
- if (err_sts)
- return err_sts;
-
- return 0;
-}
-
-static int hisi_sec_check_hw_error(struct hisi_sec *hisi_sec)
-{
- struct pci_dev *pdev = hisi_sec->qm.pdev;
- struct hisi_sec *sec = pci_get_drvdata(pci_physfn(pdev));
- struct hisi_qm *qm = &sec->qm;
- int ret;
-
- if (qm->fun_type == QM_HW_VF)
- return 0;
-
- ret = hisi_qm_get_hw_error_status(qm);
- if (ret)
- return ret;
-
- return hisi_sec_get_hw_error_status(sec);
-}
-
-static void hisi_sec_reset_prepare(struct pci_dev *pdev)
-{
- struct hisi_sec *hisi_sec = pci_get_drvdata(pdev);
- struct hisi_qm *qm = &hisi_sec->qm;
- struct device *dev = &pdev->dev;
- u32 delay = 0;
- int ret;
-
- hisi_sec_set_hw_error(hisi_sec, SEC_HW_ERROR_IRQ_DISABLE);
-
- while (hisi_sec_check_hw_error(hisi_sec)) {
- msleep(++delay);
- if (delay > SEC_RESET_WAIT_TIMEOUT)
- return;
- }
-
- ret = hisi_sec_reset_prepare_ready(hisi_sec);
- if (ret) {
- dev_err(dev, "FLR not ready!\n");
- return;
- }
-
- ret = hisi_sec_vf_reset_prepare(pdev, QM_FLR);
- if (ret) {
- dev_err(dev, "Fails to prepare reset!\n");
- return;
- }
-
- ret = hisi_qm_stop(qm, QM_FLR);
- if (ret) {
- dev_err(dev, "Fails to stop QM!\n");
- return;
- }
-
- dev_info(dev, "FLR resetting...\n");
-}
-
-static void hisi_sec_flr_reset_complete(struct pci_dev *pdev)
-{
- struct pci_dev *pf_pdev = pci_physfn(pdev);
- struct hisi_sec *hisi_sec = pci_get_drvdata(pf_pdev);
- struct device *dev = &hisi_sec->qm.pdev->dev;
- u32 id;
-
- pci_read_config_dword(hisi_sec->qm.pdev, PCI_COMMAND, &id);
- if (id == SEC_PCI_COMMAND_INVALID)
- dev_err(dev, "Device can not be used!\n");
-
- clear_bit(HISI_SEC_RESET, &hisi_sec->status);
-}
-
-static void hisi_sec_reset_done(struct pci_dev *pdev)
-{
- struct hisi_sec *hisi_sec = pci_get_drvdata(pdev);
- struct hisi_qm *qm = &hisi_sec->qm;
- struct device *dev = &pdev->dev;
- int ret;
-
- hisi_sec_set_hw_error(hisi_sec, SEC_HW_ERROR_IRQ_ENABLE);
-
- ret = hisi_qm_restart(qm);
- if (ret) {
- dev_err(dev, "Failed to start QM!\n");
- goto flr_done;
- }
-
- if (pdev->is_physfn) {
- hisi_sec_set_user_domain_and_cache(hisi_sec);
- if (hisi_sec->ctrl->num_vfs) {
- ret = hisi_sec_vf_q_assign(hisi_sec,
- hisi_sec->ctrl->num_vfs);
- if (ret) {
- dev_err(dev, "Failed to assign vf queue\n");
- goto flr_done;
- }
- }
-
- ret = hisi_sec_vf_reset_done(pdev);
- if (ret) {
- dev_err(dev, "Failed to reset vf\n");
- goto flr_done;
- }
- }
-
-flr_done:
- hisi_sec_flr_reset_complete(pdev);
-
- dev_info(dev, "FLR reset complete\n");
}
static const struct pci_error_handlers hisi_sec_err_handler = {
- .error_detected = hisi_sec_error_detected,
- .slot_reset = hisi_sec_slot_reset,
- .reset_prepare = hisi_sec_reset_prepare,
- .reset_done = hisi_sec_reset_done,
+ .error_detected = hisi_qm_dev_err_detected,
+ .slot_reset = hisi_qm_dev_slot_reset,
+ .reset_prepare = hisi_qm_reset_prepare,
+ .reset_done = hisi_qm_reset_done,
};
static struct pci_driver hisi_sec_pci_driver = {
@@ -1729,7 +1051,7 @@ static void hisi_sec_reset_done(struct pci_dev *pdev)
.remove = hisi_sec_remove,
.sriov_configure = hisi_sec_sriov_configure,
.err_handler = &hisi_sec_err_handler,
- .shutdown = hisi_sec_shutdown,
+ .shutdown = hisi_qm_dev_shutdown,
};
static void hisi_sec_register_debugfs(void)
@@ -1759,35 +1081,25 @@ static int __init hisi_sec_init(void)
return -ENOMEM;
}
+ INIT_LIST_HEAD(&sec_devices.list);
+ mutex_init(&sec_devices.lock);
+ sec_devices.check = NULL;
+
hisi_sec_register_debugfs();
ret = pci_register_driver(&hisi_sec_pci_driver);
if (ret < 0) {
+ hisi_sec_unregister_debugfs();
+ if (sec_wq)
+ destroy_workqueue(sec_wq);
pr_err("Failed to register pci driver.\n");
- goto err_pci;
- }
-
- pr_info("hisi_sec: register to crypto\n");
- ret = hisi_sec_register_to_crypto(fusion_limit);
- if (ret < 0) {
- pr_err("Failed to register driver to crypto.\n");
- goto err_probe_device;
}
- return 0;
-
- err_probe_device:
- pci_unregister_driver(&hisi_sec_pci_driver);
- err_pci:
- hisi_sec_unregister_debugfs();
- if (sec_wq)
- destroy_workqueue(sec_wq);
return ret;
}
static void __exit hisi_sec_exit(void)
{
- hisi_sec_unregister_from_crypto(fusion_limit);
pci_unregister_driver(&hisi_sec_pci_driver);
hisi_sec_unregister_debugfs();
if (sec_wq)
diff --git a/drivers/crypto/hisilicon/zip/zip.h b/drivers/crypto/hisilicon/zip/zip.h
index 560751a..ddd5924 100644
--- a/drivers/crypto/hisilicon/zip/zip.h
+++ b/drivers/crypto/hisilicon/zip/zip.h
@@ -18,19 +18,12 @@ enum hisi_zip_error_type {
};
struct hisi_zip_ctrl;
-
-enum hisi_zip_status {
- HISI_ZIP_RESET,
-};
-
struct hisi_zip {
struct hisi_qm qm;
- struct list_head list;
struct hisi_zip_ctrl *ctrl;
- unsigned long status;
};
-struct hisi_zip *find_zip_device(int node);
+int zip_create_qps(struct hisi_qp **qps, int ctx_num);
int hisi_zip_register_to_crypto(void);
void hisi_zip_unregister_from_crypto(void);
#endif
diff --git a/drivers/crypto/hisilicon/zip/zip_crypto.c b/drivers/crypto/hisilicon/zip/zip_crypto.c
index b2965ba..b247021 100644
--- a/drivers/crypto/hisilicon/zip/zip_crypto.c
+++ b/drivers/crypto/hisilicon/zip/zip_crypto.c
@@ -153,26 +153,19 @@ static void hisi_zip_fill_sqe(struct hisi_zip_sqe *sqe, u8 req_type,
sqe->dest_addr_h = upper_32_bits(d_addr);
}
-static int hisi_zip_create_qp(struct hisi_qm *qm, struct hisi_zip_qp_ctx *ctx,
+static int hisi_zip_start_qp(struct hisi_qp *qp, struct hisi_zip_qp_ctx *ctx,
int alg_type, int req_type)
{
- struct device *dev = &qm->pdev->dev;
- struct hisi_qp *qp;
+ struct device *dev = &qp->qm->pdev->dev;
int ret;
- qp = hisi_qm_create_qp(qm, alg_type);
- if (IS_ERR(qp)) {
- dev_err(dev, "create qp failed!\n");
- return PTR_ERR(qp);
- }
-
qp->req_type = req_type;
+ qp->alg_type = alg_type;
qp->qp_ctx = ctx;
ret = hisi_qm_start_qp(qp, 0);
if (ret < 0) {
dev_err(dev, "start qp failed!\n");
- hisi_qm_release_qp(qp);
return ret;
}
@@ -188,26 +181,27 @@ static void hisi_zip_release_qp(struct hisi_zip_qp_ctx *ctx)
static int hisi_zip_ctx_init(struct hisi_zip_ctx *hisi_zip_ctx, u8 req_type)
{
+ struct hisi_qp *qps[HZIP_CTX_Q_NUM] = { NULL };
struct hisi_zip *hisi_zip;
- struct hisi_qm *qm;
int ret, i, j;
- /* find the proper zip device */
- hisi_zip = find_zip_device(cpu_to_node(smp_processor_id()));
- if (!hisi_zip) {
- pr_err("Failed to find a proper ZIP device!\n");
+ ret = zip_create_qps(qps, HZIP_CTX_Q_NUM);
+ if (ret) {
+ pr_err("Can not create zip qps!\n");
return -ENODEV;
}
- qm = &hisi_zip->qm;
+
+ hisi_zip = container_of(qps[0]->qm, struct hisi_zip, qm);
for (i = 0; i < HZIP_CTX_Q_NUM; i++) {
/* alg_type = 0 for compress, 1 for decompress in hw sqe */
- ret = hisi_zip_create_qp(qm, &hisi_zip_ctx->qp_ctx[i], i,
+ ret = hisi_zip_start_qp(qps[i], &hisi_zip_ctx->qp_ctx[i], i,
req_type);
if (ret) {
for (j = i - 1; j >= 0; j--)
- hisi_zip_release_qp(&hisi_zip_ctx->qp_ctx[j]);
+ hisi_qm_stop_qp(hisi_zip_ctx->qp_ctx[j].qp);
+ hisi_qm_free_qps(qps, HZIP_CTX_Q_NUM);
return ret;
}
diff --git a/drivers/crypto/hisilicon/zip/zip_main.c b/drivers/crypto/hisilicon/zip/zip_main.c
index 5e40fbf..54681dc 100644
--- a/drivers/crypto/hisilicon/zip/zip_main.c
+++ b/drivers/crypto/hisilicon/zip/zip_main.c
@@ -15,7 +15,6 @@
#include <linux/uacce.h>
#include "zip.h"
-#define HZIP_VF_NUM 63
#define HZIP_QUEUE_NUM_V1 4096
#define HZIP_QUEUE_NUM_V2 1024
@@ -75,7 +74,6 @@
#define HZIP_CORE_SRAM_ECC_ERR_INFO 0x301148
#define HZIP_CORE_INT_RAS_CE_ENB 0x301160
#define HZIP_CORE_INT_RAS_NFE_ENB 0x301164
-#define HZIP_RAS_NFE_MBIT_DISABLE ~HZIP_CORE_INT_STATUS_M_ECC
#define HZIP_CORE_INT_RAS_FE_ENB 0x301168
#define HZIP_CORE_INT_RAS_NFE_ENABLE 0x7FE
#define HZIP_SRAM_ECC_ERR_NUM_SHIFT 16
@@ -95,95 +93,14 @@
#define HZIP_SOFT_CTRL_ZIP_CONTROL 0x30100C
#define HZIP_AXI_SHUTDOWN_ENABLE BIT(14)
#define HZIP_AXI_SHUTDOWN_DISABLE 0xFFFFBFFF
-#define HZIP_WR_MSI_PORT 0xF7FF
+#define HZIP_WR_PORT BIT(11)
-#define HZIP_ENABLE 1
-#define HZIP_DISABLE 0
-#define HZIP_NUMA_DISTANCE 100
#define HZIP_BUF_SIZE 22
#define FORMAT_DECIMAL 10
-#define HZIP_REG_RD_INTVRL_US 10
-#define HZIP_REG_RD_TMOUT_US 1000
-#define HZIP_RESET_WAIT_TIMEOUT 400
-#define HZIP_PCI_COMMAND_INVALID 0xFFFFFFFF
-
-#define FROZEN_RANGE_MIN 10
-#define FROZEN_RANGE_MAX 20
static const char hisi_zip_name[] = "hisi_zip";
static struct dentry *hzip_debugfs_root;
-static LIST_HEAD(hisi_zip_list);
-static DEFINE_MUTEX(hisi_zip_list_lock);
-
-struct hisi_zip_resource {
- struct hisi_zip *hzip;
- int distance;
- struct list_head list;
-};
-
-static void free_list(struct list_head *head)
-{
- struct hisi_zip_resource *res, *tmp;
-
- list_for_each_entry_safe(res, tmp, head, list) {
- list_del(&res->list);
- kfree(res);
- }
-}
-
-struct hisi_zip *find_zip_device(int node)
-{
- struct hisi_zip *ret = NULL;
-#ifdef CONFIG_NUMA
- struct hisi_zip_resource *res, *tmp;
- struct hisi_zip *hisi_zip;
- struct list_head *n;
- struct device *dev;
- LIST_HEAD(head);
-
- mutex_lock(&hisi_zip_list_lock);
-
- list_for_each_entry(hisi_zip, &hisi_zip_list, list) {
- res = kzalloc(sizeof(*res), GFP_KERNEL);
- if (!res)
- goto err;
-
- dev = &hisi_zip->qm.pdev->dev;
- res->hzip = hisi_zip;
- res->distance = node_distance(dev->numa_node, node);
-
- n = &head;
- list_for_each_entry(tmp, &head, list) {
- if (res->distance < tmp->distance) {
- n = &tmp->list;
- break;
- }
- }
- list_add_tail(&res->list, n);
- }
-
- list_for_each_entry(tmp, &head, list) {
- if (hisi_qm_get_free_qp_num(&tmp->hzip->qm)) {
- ret = tmp->hzip;
- break;
- }
- }
-
- free_list(&head);
-#else
- mutex_lock(&hisi_zip_list_lock);
-
- ret = list_first_entry(&hisi_zip_list, struct hisi_zip, list);
-#endif
- mutex_unlock(&hisi_zip_list_lock);
-
- return ret;
-
-err:
- free_list(&head);
- mutex_unlock(&hisi_zip_list_lock);
- return NULL;
-}
+static struct hisi_qm_list zip_devices;
struct hisi_zip_hw_error {
u32 int_msk;
@@ -229,9 +146,7 @@ struct ctrl_debug_file {
* Just relevant for PF.
*/
struct hisi_zip_ctrl {
- u32 num_vfs;
struct hisi_zip *hisi_zip;
- struct dentry *debug_root;
struct ctrl_debug_file files[HZIP_DEBUG_FILE_NUM];
};
@@ -282,73 +197,49 @@ enum {
{"HZIP_DECOMP_LZ77_CURR_ST ", 0x9cull},
};
-static int pf_q_num_set(const char *val, const struct kernel_param *kp)
+#ifdef CONFIG_CRYPTO_QM_UACCE
+static int uacce_mode_set(const char *val, const struct kernel_param *kp)
{
- struct pci_dev *pdev = pci_get_device(PCI_VENDOR_ID_HUAWEI,
- PCI_DEVICE_ID_ZIP_PF, NULL);
- u32 n, q_num;
- u8 rev_id;
- int ret;
-
- if (!val)
- return -EINVAL;
+ return mode_set(val, kp);
+}
- if (!pdev) {
- q_num = min_t(u32, HZIP_QUEUE_NUM_V1, HZIP_QUEUE_NUM_V2);
- pr_info("No device found currently, suppose queue number is %d\n",
- q_num);
- } else {
- rev_id = pdev->revision;
- switch (rev_id) {
- case QM_HW_V1:
- q_num = HZIP_QUEUE_NUM_V1;
- break;
- case QM_HW_V2:
- q_num = HZIP_QUEUE_NUM_V2;
- break;
- default:
- return -EINVAL;
- }
- }
+static const struct kernel_param_ops uacce_mode_ops = {
+ .set = uacce_mode_set,
+ .get = param_get_int,
+};
- ret = kstrtou32(val, 10, &n);
- if (ret != 0 || n == 0 || n > q_num)
- return -EINVAL;
+static int uacce_mode = UACCE_MODE_NOUACCE;
+module_param_cb(uacce_mode, &uacce_mode_ops, &uacce_mode, 0444);
+MODULE_PARM_DESC(uacce_mode, "Mode of UACCE can be 0(default), 2");
+#endif
- return param_set_int(val, kp);
+static int pf_q_num_set(const char *val, const struct kernel_param *kp)
+{
+ return q_num_set(val, kp, PCI_DEVICE_ID_ZIP_PF);
}
static const struct kernel_param_ops pf_q_num_ops = {
.set = pf_q_num_set,
.get = param_get_int,
};
-static int uacce_mode_set(const char *val, const struct kernel_param *kp)
-{
- u32 n;
- int ret;
-
- if (!val)
- return -EINVAL;
- ret = kstrtou32(val, FORMAT_DECIMAL, &n);
- if (ret != 0 || (n != UACCE_MODE_NOIOMMU && n != UACCE_MODE_NOUACCE))
- return -EINVAL;
+static u32 pf_q_num = HZIP_PF_DEF_Q_NUM;
+module_param_cb(pf_q_num, &pf_q_num_ops, &pf_q_num, 0444);
+MODULE_PARM_DESC(pf_q_num, "Number of queues in PF(v1 1-4096, v2 1-1024)");
- return param_set_int(val, kp);
+static int vfs_num_set(const char *val, const struct kernel_param *kp)
+{
+ return vf_num_set(val, kp);
}
-static const struct kernel_param_ops uacce_mode_ops = {
- .set = uacce_mode_set,
+static const struct kernel_param_ops vfs_num_ops = {
+ .set = vfs_num_set,
.get = param_get_int,
};
-static u32 pf_q_num = HZIP_PF_DEF_Q_NUM;
-module_param_cb(pf_q_num, &pf_q_num_ops, &pf_q_num, 0444);
-MODULE_PARM_DESC(pf_q_num, "Number of queues in PF(v1 1-4096, v2 1-1024)");
-
-static int uacce_mode = UACCE_MODE_NOUACCE;
-module_param_cb(uacce_mode, &uacce_mode_ops, &uacce_mode, 0444);
-MODULE_PARM_DESC(uacce_mode, "Mode of UACCE can be 0(default), 2");
+static u32 vfs_num;
+module_param_cb(vfs_num, &vfs_num_ops, &vfs_num, 0444);
+MODULE_PARM_DESC(vfs_num, "Number of VFs to enable(1-63), 0(default)");
static const struct pci_device_id hisi_zip_dev_ids[] = {
{ PCI_DEVICE(PCI_VENDOR_ID_HUAWEI, PCI_DEVICE_ID_ZIP_PF) },
@@ -357,81 +248,67 @@ static int uacce_mode_set(const char *val, const struct kernel_param *kp)
};
MODULE_DEVICE_TABLE(pci, hisi_zip_dev_ids);
-static inline void hisi_zip_add_to_list(struct hisi_zip *hisi_zip)
+int zip_create_qps(struct hisi_qp **qps, int ctx_num)
{
- mutex_lock(&hisi_zip_list_lock);
- list_add_tail(&hisi_zip->list, &hisi_zip_list);
- mutex_unlock(&hisi_zip_list_lock);
-}
+ int node = cpu_to_node(smp_processor_id());
-static inline void hisi_zip_remove_from_list(struct hisi_zip *hisi_zip)
-{
- mutex_lock(&hisi_zip_list_lock);
- list_del(&hisi_zip->list);
- mutex_unlock(&hisi_zip_list_lock);
+ return hisi_qm_alloc_qps_node(node, &zip_devices,
+ qps, ctx_num, 0);
}
-static void hisi_zip_set_user_domain_and_cache(struct hisi_zip *hisi_zip)
+static int hisi_zip_set_user_domain_and_cache(struct hisi_qm *qm)
{
- struct hisi_qm *qm = &hisi_zip->qm;
+ void __iomem *base = qm->io_base;
/* qm user domain */
- writel(AXUSER_BASE, hisi_zip->qm.io_base + QM_ARUSER_M_CFG_1);
- writel(ARUSER_M_CFG_ENABLE, hisi_zip->qm.io_base +
- QM_ARUSER_M_CFG_ENABLE);
- writel(AXUSER_BASE, hisi_zip->qm.io_base + QM_AWUSER_M_CFG_1);
- writel(AWUSER_M_CFG_ENABLE, hisi_zip->qm.io_base +
- QM_AWUSER_M_CFG_ENABLE);
- writel(WUSER_M_CFG_ENABLE, hisi_zip->qm.io_base +
- QM_WUSER_M_CFG_ENABLE);
+ writel(AXUSER_BASE, base + QM_ARUSER_M_CFG_1);
+ writel(ARUSER_M_CFG_ENABLE, base + QM_ARUSER_M_CFG_ENABLE);
+ writel(AXUSER_BASE, base + QM_AWUSER_M_CFG_1);
+ writel(AWUSER_M_CFG_ENABLE, base + QM_AWUSER_M_CFG_ENABLE);
+ writel(WUSER_M_CFG_ENABLE, base + QM_WUSER_M_CFG_ENABLE);
/* qm cache */
- writel(AXI_M_CFG, hisi_zip->qm.io_base + QM_AXI_M_CFG);
- writel(AXI_M_CFG_ENABLE, hisi_zip->qm.io_base + QM_AXI_M_CFG_ENABLE);
+ writel(AXI_M_CFG, base + QM_AXI_M_CFG);
+ writel(AXI_M_CFG_ENABLE, base + QM_AXI_M_CFG_ENABLE);
+
/* disable FLR triggered by BME(bus master enable) */
- writel(PEH_AXUSER_CFG, hisi_zip->qm.io_base + QM_PEH_AXUSER_CFG);
- writel(PEH_AXUSER_CFG_ENABLE, hisi_zip->qm.io_base +
- QM_PEH_AXUSER_CFG_ENABLE);
+ writel(PEH_AXUSER_CFG, base + QM_PEH_AXUSER_CFG);
+ writel(PEH_AXUSER_CFG_ENABLE, base + QM_PEH_AXUSER_CFG_ENABLE);
/* cache */
- writel(HZIP_CACHE_ALL_EN, hisi_zip->qm.io_base + HZIP_PORT_ARCA_CHE_0);
- writel(HZIP_CACHE_ALL_EN, hisi_zip->qm.io_base + HZIP_PORT_ARCA_CHE_1);
- writel(HZIP_CACHE_ALL_EN, hisi_zip->qm.io_base + HZIP_PORT_AWCA_CHE_0);
- writel(HZIP_CACHE_ALL_EN, hisi_zip->qm.io_base + HZIP_PORT_AWCA_CHE_1);
+ writel(HZIP_CACHE_ALL_EN, base + HZIP_PORT_ARCA_CHE_0);
+ writel(HZIP_CACHE_ALL_EN, base + HZIP_PORT_ARCA_CHE_1);
+ writel(HZIP_CACHE_ALL_EN, base + HZIP_PORT_AWCA_CHE_0);
+ writel(HZIP_CACHE_ALL_EN, base + HZIP_PORT_AWCA_CHE_1);
/* user domain configurations */
- writel(AXUSER_BASE, hisi_zip->qm.io_base + HZIP_BD_RUSER_32_63);
- writel(AXUSER_BASE, hisi_zip->qm.io_base + HZIP_SGL_RUSER_32_63);
- writel(AXUSER_BASE, hisi_zip->qm.io_base + HZIP_BD_WUSER_32_63);
+ writel(AXUSER_BASE, base + HZIP_BD_RUSER_32_63);
+ writel(AXUSER_BASE, base + HZIP_SGL_RUSER_32_63);
+ writel(AXUSER_BASE, base + HZIP_BD_WUSER_32_63);
if (qm->use_sva) {
- writel(AXUSER_BASE | AXUSER_SSV, hisi_zip->qm.io_base +
- HZIP_DATA_RUSER_32_63);
- writel(AXUSER_BASE | AXUSER_SSV, hisi_zip->qm.io_base +
- HZIP_DATA_WUSER_32_63);
+ writel(AXUSER_BASE | AXUSER_SSV, base + HZIP_DATA_RUSER_32_63);
+ writel(AXUSER_BASE | AXUSER_SSV, base + HZIP_DATA_WUSER_32_63);
} else {
- writel(AXUSER_BASE, hisi_zip->qm.io_base +
- HZIP_DATA_RUSER_32_63);
- writel(AXUSER_BASE, hisi_zip->qm.io_base +
- HZIP_DATA_WUSER_32_63);
+ writel(AXUSER_BASE, base + HZIP_DATA_RUSER_32_63);
+ writel(AXUSER_BASE, base + HZIP_DATA_WUSER_32_63);
}
/* let's open all compression/decompression cores */
writel(HZIP_DECOMP_CHECK_ENABLE | HZIP_ALL_COMP_DECOMP_EN,
- hisi_zip->qm.io_base + HZIP_CLOCK_GATE_CTRL);
+ base + HZIP_CLOCK_GATE_CTRL);
/* enable sqc,cqc writeback */
writel(SQC_CACHE_ENABLE | CQC_CACHE_ENABLE | SQC_CACHE_WB_ENABLE |
CQC_CACHE_WB_ENABLE | FIELD_PREP(SQC_CACHE_WB_THRD, 1) |
- FIELD_PREP(CQC_CACHE_WB_THRD, 1),
- hisi_zip->qm.io_base + QM_CACHE_CTL);
+ FIELD_PREP(CQC_CACHE_WB_THRD, 1), base + QM_CACHE_CTL);
+
+ return 0;
}
/* hisi_zip_debug_regs_clear() - clear the zip debug regs */
-static void hisi_zip_debug_regs_clear(struct hisi_zip *hisi_zip)
+static void hisi_zip_debug_regs_clear(struct hisi_qm *qm)
{
- struct hisi_qm *qm = &hisi_zip->qm;
-
/* clear current_qm */
writel(0x0, qm->io_base + QM_DFX_MB_CNT_VF);
writel(0x0, qm->io_base + QM_DFX_DB_CNT_VF);
@@ -442,52 +319,70 @@ static void hisi_zip_debug_regs_clear(struct hisi_zip *hisi_zip)
hisi_qm_debug_regs_clear(qm);
}
-
-static void hisi_zip_hw_error_set_state(struct hisi_zip *hisi_zip, bool state)
+static int hisi_zip_hw_err_pre_set(struct hisi_qm *qm, u32 *val)
{
- struct hisi_qm *qm = &hisi_zip->qm;
- u32 val;
-
if (qm->ver == QM_HW_V1) {
writel(HZIP_CORE_INT_DISABLE, qm->io_base + HZIP_CORE_INT_MASK);
pci_info(qm->pdev, "ZIP v%d cannot support hw error handle!\n",
qm->ver);
- return;
+ return -EINVAL;
}
/* configure error type */
- writel(0x1, hisi_zip->qm.io_base + HZIP_CORE_INT_RAS_CE_ENB);
- writel(0x0, hisi_zip->qm.io_base + HZIP_CORE_INT_RAS_FE_ENB);
+ writel(0x1, qm->io_base + HZIP_CORE_INT_RAS_CE_ENB);
+ writel(0x0, qm->io_base + HZIP_CORE_INT_RAS_FE_ENB);
writel(HZIP_CORE_INT_RAS_NFE_ENABLE,
- hisi_zip->qm.io_base + HZIP_CORE_INT_RAS_NFE_ENB);
-
- val = readl(hisi_zip->qm.io_base + HZIP_SOFT_CTRL_ZIP_CONTROL);
- if (state) {
- /* clear ZIP hw error source if having */
- writel(HZIP_CORE_INT_DISABLE, hisi_zip->qm.io_base +
- HZIP_CORE_INT_SOURCE);
- /* enable ZIP hw error interrupts */
- writel(0, hisi_zip->qm.io_base + HZIP_CORE_INT_MASK);
-
- /* enable ZIP block master OOO when m-bit error occur */
- val = val | HZIP_AXI_SHUTDOWN_ENABLE;
- } else {
- /* disable ZIP hw error interrupts */
- writel(HZIP_CORE_INT_DISABLE,
- hisi_zip->qm.io_base + HZIP_CORE_INT_MASK);
+ qm->io_base + HZIP_CORE_INT_RAS_NFE_ENB);
- /* disable ZIP block master OOO when m-bit error occur */
- val = val & HZIP_AXI_SHUTDOWN_DISABLE;
- }
+ *val = readl(qm->io_base + HZIP_SOFT_CTRL_ZIP_CONTROL);
+
+ return 0;
+}
+
+static void hisi_zip_hw_error_enable(struct hisi_qm *qm)
+{
+ u32 val;
+ int ret;
+
+ ret = hisi_zip_hw_err_pre_set(qm, &val);
+ if (ret)
+ return;
+
+ /* clear ZIP hw error source if having */
+ writel(HZIP_CORE_INT_DISABLE, qm->io_base + HZIP_CORE_INT_SOURCE);
+
+ /* enable ZIP hw error interrupts */
+ writel(0, qm->io_base + HZIP_CORE_INT_MASK);
+
+ /* enable ZIP block master OOO when m-bit error occur */
+ val = val | HZIP_AXI_SHUTDOWN_ENABLE;
- writel(val, hisi_zip->qm.io_base + HZIP_SOFT_CTRL_ZIP_CONTROL);
+ writel(val, qm->io_base + HZIP_SOFT_CTRL_ZIP_CONTROL);
+}
+
+static void hisi_zip_hw_error_disable(struct hisi_qm *qm)
+{
+ u32 val;
+ int ret;
+
+ ret = hisi_zip_hw_err_pre_set(qm, &val);
+ if (ret)
+ return;
+
+ /* disable ZIP hw error interrupts */
+ writel(HZIP_CORE_INT_DISABLE, qm->io_base + HZIP_CORE_INT_MASK);
+
+ /* disable ZIP block master OOO when m-bit error occur */
+ val = val & HZIP_AXI_SHUTDOWN_DISABLE;
+
+ writel(val, qm->io_base + HZIP_SOFT_CTRL_ZIP_CONTROL);
}
static inline struct hisi_qm *file_to_qm(struct ctrl_debug_file *file)
{
- struct hisi_zip *hisi_zip = file->ctrl->hisi_zip;
+ struct hisi_zip *zip = file->ctrl->hisi_zip;
- return &hisi_zip->qm;
+ return &zip->qm;
}
static u32 current_qm_read(struct ctrl_debug_file *file)
@@ -500,22 +395,21 @@ static u32 current_qm_read(struct ctrl_debug_file *file)
static int current_qm_write(struct ctrl_debug_file *file, u32 val)
{
struct hisi_qm *qm = file_to_qm(file);
- struct hisi_zip_ctrl *ctrl = file->ctrl;
u32 vfq_num;
u32 tmp;
- if (val > ctrl->num_vfs)
+ if (val > qm->vfs_num)
return -EINVAL;
/* According PF or VF Dev ID to calculation curr_qm_qp_num and store */
if (val == 0) {
qm->debug.curr_qm_qp_num = qm->qp_num;
} else {
- vfq_num = (qm->ctrl_q_num - qm->qp_num) / ctrl->num_vfs;
- if (val == ctrl->num_vfs) {
+ vfq_num = (qm->ctrl_q_num - qm->qp_num) / qm->vfs_num;
+ if (val == qm->vfs_num) {
qm->debug.curr_qm_qp_num =
qm->ctrl_q_num - qm->qp_num -
- (ctrl->num_vfs - 1) * vfq_num;
+ (qm->vfs_num - 1) * vfq_num;
} else {
qm->debug.curr_qm_qp_num = vfq_num;
}
@@ -638,10 +532,8 @@ static ssize_t hisi_zip_ctrl_debug_write(struct file *filp,
.write = hisi_zip_ctrl_debug_write,
};
-static int hisi_zip_core_debug_init(struct hisi_zip_ctrl *ctrl)
+static int hisi_zip_core_debug_init(struct hisi_qm *qm)
{
- struct hisi_zip *hisi_zip = ctrl->hisi_zip;
- struct hisi_qm *qm = &hisi_zip->qm;
struct device *dev = &qm->pdev->dev;
struct debugfs_regset32 *regset;
struct dentry *tmp_d, *tmp;
@@ -657,7 +549,7 @@ static int hisi_zip_core_debug_init(struct hisi_zip_ctrl *ctrl)
if (ret < 0)
return -EINVAL;
- tmp_d = debugfs_create_dir(buf, ctrl->debug_root);
+ tmp_d = debugfs_create_dir(buf, qm->debug.debug_root);
if (!tmp_d)
return -ENOENT;
@@ -677,29 +569,29 @@ static int hisi_zip_core_debug_init(struct hisi_zip_ctrl *ctrl)
return 0;
}
-static int hisi_zip_ctrl_debug_init(struct hisi_zip_ctrl *ctrl)
+static int hisi_zip_ctrl_debug_init(struct hisi_qm *qm)
{
+ struct hisi_zip *zip = container_of(qm, struct hisi_zip, qm);
struct dentry *tmp;
int i;
for (i = HZIP_CURRENT_QM; i < HZIP_DEBUG_FILE_NUM; i++) {
- spin_lock_init(&ctrl->files[i].lock);
- ctrl->files[i].ctrl = ctrl;
- ctrl->files[i].index = i;
+ spin_lock_init(&zip->ctrl->files[i].lock);
+ zip->ctrl->files[i].ctrl = zip->ctrl;
+ zip->ctrl->files[i].index = i;
tmp = debugfs_create_file(ctrl_debug_file_name[i], 0600,
- ctrl->debug_root, ctrl->files + i,
+ qm->debug.debug_root, zip->ctrl->files + i,
&ctrl_debug_fops);
if (!tmp)
return -ENOENT;
}
- return hisi_zip_core_debug_init(ctrl);
+ return hisi_zip_core_debug_init(qm);
}
-static int hisi_zip_debugfs_init(struct hisi_zip *hisi_zip)
+static int hisi_zip_debugfs_init(struct hisi_qm *qm)
{
- struct hisi_qm *qm = &hisi_zip->qm;
struct device *dev = &qm->pdev->dev;
struct dentry *dev_d;
int ret;
@@ -714,8 +606,7 @@ static int hisi_zip_debugfs_init(struct hisi_zip *hisi_zip)
goto failed_to_create;
if (qm->fun_type == QM_HW_PF) {
- hisi_zip->ctrl->debug_root = dev_d;
- ret = hisi_zip_ctrl_debug_init(hisi_zip->ctrl);
+ ret = hisi_zip_ctrl_debug_init(qm);
if (ret)
goto failed_to_create;
}
@@ -727,47 +618,16 @@ static int hisi_zip_debugfs_init(struct hisi_zip *hisi_zip)
return ret;
}
-static void hisi_zip_debugfs_exit(struct hisi_zip *hisi_zip)
+static void hisi_zip_debugfs_exit(struct hisi_qm *qm)
{
- struct hisi_qm *qm = &hisi_zip->qm;
-
debugfs_remove_recursive(qm->debug.debug_root);
if (qm->fun_type == QM_HW_PF) {
- hisi_zip_debug_regs_clear(hisi_zip);
+ hisi_zip_debug_regs_clear(qm);
qm->debug.curr_qm_qp_num = 0;
}
}
-static void hisi_zip_hw_error_init(struct hisi_zip *hisi_zip)
-{
- hisi_qm_hw_error_init(&hisi_zip->qm, QM_BASE_CE,
- QM_BASE_NFE | QM_ACC_WB_NOT_READY_TIMEOUT, 0,
- QM_DB_RANDOM_INVALID);
- hisi_zip_hw_error_set_state(hisi_zip, true);
-}
-
-static u32 hisi_zip_get_hw_err_status(struct hisi_qm *qm)
-{
- return readl(qm->io_base + HZIP_CORE_INT_STATUS);
-}
-
-static void hisi_zip_clear_hw_err_status(struct hisi_qm *qm, u32 err_sts)
-{
- writel(err_sts, qm->io_base + HZIP_CORE_INT_SOURCE);
-}
-
-static void hisi_zip_set_ecc(struct hisi_qm *qm)
-{
- u32 nfe_enb;
-
- nfe_enb = readl(qm->io_base + HZIP_CORE_INT_RAS_NFE_ENB);
- writel(nfe_enb & HZIP_RAS_NFE_MBIT_DISABLE,
- qm->io_base + HZIP_CORE_INT_RAS_NFE_ENB);
- writel(HZIP_CORE_INT_STATUS_M_ECC, qm->io_base + HZIP_CORE_INT_SET);
- qm->err_ini.is_dev_ecc_mbit = 1;
-}
-
static void hisi_zip_log_hw_error(struct hisi_qm *qm, u32 err_sts)
{
const struct hisi_zip_hw_error *err = zip_hw_error;
@@ -792,17 +652,53 @@ static void hisi_zip_log_hw_error(struct hisi_qm *qm, u32 err_sts)
}
}
-static int hisi_zip_pf_probe_init(struct hisi_zip *hisi_zip)
+static u32 hisi_zip_get_hw_err_status(struct hisi_qm *qm)
+{
+ return readl(qm->io_base + HZIP_CORE_INT_STATUS);
+}
+
+static void hisi_zip_clear_hw_err_status(struct hisi_qm *qm, u32 err_sts)
+{
+ writel(err_sts, qm->io_base + HZIP_CORE_INT_SOURCE);
+}
+
+static void hisi_zip_open_axi_master_ooo(struct hisi_qm *qm)
{
- struct hisi_qm *qm = &hisi_zip->qm;
+ u32 val;
+
+ val = readl(qm->io_base + HZIP_SOFT_CTRL_ZIP_CONTROL);
+
+ writel(val & HZIP_AXI_SHUTDOWN_DISABLE,
+ qm->io_base + HZIP_SOFT_CTRL_ZIP_CONTROL);
+
+ writel(val | HZIP_AXI_SHUTDOWN_ENABLE,
+ qm->io_base + HZIP_SOFT_CTRL_ZIP_CONTROL);
+}
+
+static void hisi_zip_close_axi_master_ooo(struct hisi_qm *qm)
+{
+ u32 nfe_enb;
+
+ nfe_enb = readl(qm->io_base + HZIP_CORE_INT_RAS_NFE_ENB);
+ writel(nfe_enb & ~HZIP_CORE_INT_STATUS_M_ECC,
+ qm->io_base + HZIP_CORE_INT_RAS_NFE_ENB);
+
+ writel(HZIP_CORE_INT_STATUS_M_ECC,
+ qm->io_base + HZIP_CORE_INT_SET);
+}
+
+static int hisi_zip_pf_probe_init(struct hisi_qm *qm)
+{
+ struct hisi_zip *zip = container_of(qm, struct hisi_zip, qm);
struct hisi_zip_ctrl *ctrl;
+ int ret;
ctrl = devm_kzalloc(&qm->pdev->dev, sizeof(*ctrl), GFP_KERNEL);
if (!ctrl)
return -ENOMEM;
- hisi_zip->ctrl = ctrl;
- ctrl->hisi_zip = hisi_zip;
+ zip->ctrl = ctrl;
+ ctrl->hisi_zip = zip;
switch (qm->ver) {
case QM_HW_V1:
@@ -817,61 +713,71 @@ static int hisi_zip_pf_probe_init(struct hisi_zip *hisi_zip)
return -EINVAL;
}
- qm->err_ini.qm_wr_port = HZIP_WR_MSI_PORT;
- qm->err_ini.ecc_2bits_mask = HZIP_CORE_INT_STATUS_M_ECC;
qm->err_ini.get_dev_hw_err_status = hisi_zip_get_hw_err_status;
qm->err_ini.clear_dev_hw_err_status = hisi_zip_clear_hw_err_status;
+ qm->err_ini.err_info.ecc_2bits_mask = HZIP_CORE_INT_STATUS_M_ECC;
+ qm->err_ini.err_info.ce = QM_BASE_CE;
+ qm->err_ini.err_info.nfe = QM_BASE_NFE | QM_ACC_WB_NOT_READY_TIMEOUT;
+ qm->err_ini.err_info.fe = 0;
+ qm->err_ini.err_info.msi = QM_DB_RANDOM_INVALID;
+ qm->err_ini.err_info.acpi_rst = "ZRST";
+ qm->err_ini.hw_err_disable = hisi_zip_hw_error_disable;
+ qm->err_ini.hw_err_enable = hisi_zip_hw_error_enable;
+ qm->err_ini.set_usr_domain_cache = hisi_zip_set_user_domain_and_cache;
qm->err_ini.log_dev_hw_err = hisi_zip_log_hw_error;
- qm->err_ini.inject_dev_hw_err = hisi_zip_set_ecc;
+ qm->err_ini.open_axi_master_ooo = hisi_zip_open_axi_master_ooo;
+ qm->err_ini.close_axi_master_ooo = hisi_zip_close_axi_master_ooo;
- hisi_zip_set_user_domain_and_cache(hisi_zip);
- hisi_zip_hw_error_init(hisi_zip);
- hisi_zip_debug_regs_clear(hisi_zip);
+ qm->err_ini.err_info.msi_wr_port = HZIP_WR_PORT;
+
+ ret = qm->err_ini.set_usr_domain_cache(qm);
+ if (ret)
+ return ret;
+
+ hisi_qm_dev_err_init(qm);
+
+ hisi_zip_debug_regs_clear(qm);
+
+ return 0;
+}
+
+static int hisi_zip_qm_pre_init(struct hisi_qm *qm, struct pci_dev *pdev)
+{
+ int ret;
+
+#ifdef CONFIG_CRYPTO_QM_UACCE
+ qm->algs = "zlib\ngzip\nxts(sm4)\nxts(aes)\n";
+ qm->uacce_mode = uacce_mode;
+#endif
+ qm->pdev = pdev;
+ ret = hisi_qm_pre_init(qm, pf_q_num, HZIP_PF_DEF_Q_BASE);
+ if (ret)
+ return ret;
+ qm->sqe_size = HZIP_SQE_SIZE;
+ qm->dev_name = hisi_zip_name;
+ qm->qm_list = &zip_devices;
return 0;
}
static int hisi_zip_probe(struct pci_dev *pdev, const struct pci_device_id *id)
{
- struct hisi_zip *hisi_zip;
- enum qm_hw_ver rev_id;
+ struct hisi_zip *zip;
struct hisi_qm *qm;
int ret;
- rev_id = hisi_qm_get_hw_version(pdev);
- if (rev_id == QM_HW_UNKNOWN)
- return -EINVAL;
-
- hisi_zip = devm_kzalloc(&pdev->dev, sizeof(*hisi_zip), GFP_KERNEL);
- if (!hisi_zip)
+ zip = devm_kzalloc(&pdev->dev, sizeof(*zip), GFP_KERNEL);
+ if (!zip)
return -ENOMEM;
- pci_set_drvdata(pdev, hisi_zip);
-
- hisi_zip_add_to_list(hisi_zip);
+ qm = &zip->qm;
+ qm->fun_type = pdev->is_physfn ? QM_HW_PF : QM_HW_VF;
- hisi_zip->status = 0;
- qm = &hisi_zip->qm;
- qm->pdev = pdev;
- qm->ver = rev_id;
+ ret = hisi_zip_qm_pre_init(qm, pdev);
+ if (ret)
+ return ret;
- qm->sqe_size = HZIP_SQE_SIZE;
- qm->dev_name = hisi_zip_name;
- qm->fun_type = (pdev->device == PCI_DEVICE_ID_ZIP_PF) ? QM_HW_PF :
- QM_HW_VF;
- qm->algs = "zlib\ngzip\nxts(sm4)\nxts(aes)\n";
-
- switch (uacce_mode) {
- case UACCE_MODE_NOUACCE:
- qm->use_uacce = false;
- break;
- case UACCE_MODE_NOIOMMU:
- qm->use_uacce = true;
- break;
- default:
- ret = -EINVAL;
- goto err_remove_from_list;
- }
+ hisi_qm_add_to_list(qm, &zip_devices);
ret = hisi_qm_init(qm);
if (ret) {
@@ -880,15 +786,11 @@ static int hisi_zip_probe(struct pci_dev *pdev, const struct pci_device_id *id)
}
if (qm->fun_type == QM_HW_PF) {
- ret = hisi_zip_pf_probe_init(hisi_zip);
+ ret = hisi_zip_pf_probe_init(qm);
if (ret) {
pci_err(pdev, "Failed to init pf probe (%d)!\n", ret);
goto err_remove_from_list;
}
-
- qm->qp_base = HZIP_PF_DEF_Q_BASE;
- qm->qp_num = pf_q_num;
- qm->debug.curr_qm_qp_num = pf_q_num;
} else if (qm->fun_type == QM_HW_VF) {
/*
* have no way to get qm configure in VM in v1 hardware,
@@ -914,7 +816,7 @@ static int hisi_zip_probe(struct pci_dev *pdev, const struct pci_device_id *id)
goto err_qm_uninit;
}
- ret = hisi_zip_debugfs_init(hisi_zip);
+ ret = hisi_zip_debugfs_init(qm);
if (ret)
pci_err(pdev, "Failed to init debugfs (%d)!\n", ret);
@@ -923,630 +825,62 @@ static int hisi_zip_probe(struct pci_dev *pdev, const struct pci_device_id *id)
pci_err(pdev, "Failed to register driver to crypto!\n");
goto err_qm_stop;
}
+
+ if (qm->fun_type == QM_HW_PF && vfs_num > 0) {
+ ret = hisi_qm_sriov_enable(pdev, vfs_num);
+ if (ret < 0)
+ goto err_crypto_unregister;
+ }
+
return 0;
+err_crypto_unregister:
+ hisi_zip_unregister_from_crypto();
err_qm_stop:
- hisi_zip_debugfs_exit(hisi_zip);
+ hisi_zip_debugfs_exit(qm);
hisi_qm_stop(qm, QM_NORMAL);
err_qm_uninit:
hisi_qm_uninit(qm);
err_remove_from_list:
- hisi_zip_remove_from_list(hisi_zip);
- return ret;
-}
-
-/* now we only support equal assignment */
-static int hisi_zip_vf_q_assign(struct hisi_zip *hisi_zip, u32 num_vfs)
-{
- struct hisi_qm *qm = &hisi_zip->qm;
- u32 qp_num = qm->qp_num;
- u32 q_base = qp_num;
- u32 q_num, remain_q_num, i;
- int ret;
-
- if (!num_vfs)
- return -EINVAL;
-
- remain_q_num = qm->ctrl_q_num - qp_num;
- /* If remain queues not enough, return error. */
- if (remain_q_num < num_vfs)
- return -EINVAL;
-
- q_num = remain_q_num / num_vfs;
- for (i = 1; i <= num_vfs; i++) {
- if (i == num_vfs)
- q_num += remain_q_num % num_vfs;
- ret = hisi_qm_set_vft(qm, i, q_base, q_num);
- if (ret)
- return ret;
- q_base += q_num;
- }
-
- return 0;
-}
-
-static int hisi_zip_clear_vft_config(struct hisi_zip *hisi_zip)
-{
- struct hisi_zip_ctrl *ctrl = hisi_zip->ctrl;
- struct hisi_qm *qm = &hisi_zip->qm;
- u32 i, num_vfs = ctrl->num_vfs;
- int ret;
-
- for (i = 1; i <= num_vfs; i++) {
- ret = hisi_qm_set_vft(qm, i, 0, 0);
- if (ret)
- return ret;
- }
-
- ctrl->num_vfs = 0;
-
- return 0;
-}
-
-static int hisi_zip_sriov_enable(struct pci_dev *pdev, int max_vfs)
-{
-#ifdef CONFIG_PCI_IOV
- struct hisi_zip *hisi_zip = pci_get_drvdata(pdev);
- int pre_existing_vfs, num_vfs, ret;
-
- pre_existing_vfs = pci_num_vf(pdev);
- if (pre_existing_vfs) {
- dev_err(&pdev->dev,
- "Can't enable VF. Please disable pre-enabled VFs!\n");
- return 0;
- }
-
- num_vfs = min_t(int, max_vfs, HZIP_VF_NUM);
-
- ret = hisi_zip_vf_q_assign(hisi_zip, num_vfs);
- if (ret) {
- dev_err(&pdev->dev, "Can't assign queues for VF!\n");
- return ret;
- }
-
- hisi_zip->ctrl->num_vfs = num_vfs;
-
- ret = pci_enable_sriov(pdev, num_vfs);
- if (ret) {
- dev_err(&pdev->dev, "Can't enable VF!\n");
- hisi_zip_clear_vft_config(hisi_zip);
- return ret;
- }
-
- return num_vfs;
-#else
- return 0;
-#endif
-}
-
-static int hisi_zip_try_frozen_vfs(struct pci_dev *pdev)
-{
- struct hisi_zip *zip, *vf_zip;
- struct pci_dev *dev;
- int ret = 0;
-
- /* Try to frozen all the VFs as disable SRIOV */
- mutex_lock(&hisi_zip_list_lock);
- list_for_each_entry(zip, &hisi_zip_list, list) {
- dev = zip->qm.pdev;
- if (dev == pdev)
- continue;
- if (pci_physfn(dev) == pdev) {
- vf_zip = pci_get_drvdata(dev);
- ret = hisi_qm_frozen(&vf_zip->qm);
- if (ret)
- goto frozen_fail;
- }
- }
-
-frozen_fail:
- mutex_unlock(&hisi_zip_list_lock);
+ hisi_qm_del_from_list(qm, &zip_devices);
return ret;
}
-static int hisi_zip_sriov_disable(struct pci_dev *pdev)
-{
- struct hisi_zip *hisi_zip = pci_get_drvdata(pdev);
-
- if (pci_vfs_assigned(pdev)) {
- dev_err(&pdev->dev,
- "Can't disable VFs while VFs are assigned!\n");
- return -EPERM;
- }
-
- if (hisi_zip_try_frozen_vfs(pdev)) {
- dev_err(&pdev->dev, "try frozen VFs failed!\n");
- return -EBUSY;
- }
-
- /* remove in hisi_zip_pci_driver will be called to free VF resources */
- pci_disable_sriov(pdev);
-
- return hisi_zip_clear_vft_config(hisi_zip);
-}
-
static int hisi_zip_sriov_configure(struct pci_dev *pdev, int num_vfs)
{
if (num_vfs == 0)
- return hisi_zip_sriov_disable(pdev);
+ return hisi_qm_sriov_disable(pdev, &zip_devices);
else
- return hisi_zip_sriov_enable(pdev, num_vfs);
-}
-
-static void hisi_zip_remove_wait_delay(struct hisi_zip *hisi_zip)
-{
- struct hisi_qm *qm = &hisi_zip->qm;
-
- while (hisi_qm_frozen(qm) || ((qm->fun_type == QM_HW_PF) &&
- hisi_zip_try_frozen_vfs(qm->pdev)))
- usleep_range(FROZEN_RANGE_MIN, FROZEN_RANGE_MAX);
-
- udelay(ZIP_WAIT_DELAY);
+ return hisi_qm_sriov_enable(pdev, num_vfs);
}
static void hisi_zip_remove(struct pci_dev *pdev)
{
- struct hisi_zip *hisi_zip = pci_get_drvdata(pdev);
- struct hisi_qm *qm = &hisi_zip->qm;
+ struct hisi_qm *qm = pci_get_drvdata(pdev);
if (uacce_mode != UACCE_MODE_NOUACCE)
- hisi_zip_remove_wait_delay(hisi_zip);
+ hisi_qm_remove_wait_delay(qm, &zip_devices);
- if (qm->fun_type == QM_HW_PF && hisi_zip->ctrl->num_vfs != 0)
- (void)hisi_zip_sriov_disable(pdev);
+ if (qm->fun_type == QM_HW_PF && qm->vfs_num)
+ hisi_qm_sriov_disable(pdev, NULL);
hisi_zip_unregister_from_crypto();
- hisi_zip_debugfs_exit(hisi_zip);
+
+ hisi_zip_debugfs_exit(qm);
hisi_qm_stop(qm, QM_NORMAL);
if (qm->fun_type == QM_HW_PF)
- hisi_zip_hw_error_set_state(hisi_zip, false);
+ hisi_qm_dev_err_uninit(qm);
hisi_qm_uninit(qm);
- hisi_zip_remove_from_list(hisi_zip);
-}
-
-static void hisi_zip_shutdown(struct pci_dev *pdev)
-{
- struct hisi_zip *hisi_zip = pci_get_drvdata(pdev);
-
- hisi_qm_stop(&hisi_zip->qm, QM_NORMAL);
-}
-
-static pci_ers_result_t hisi_zip_error_detected(struct pci_dev *pdev,
- pci_channel_state_t state)
-{
- if (pdev->is_virtfn)
- return PCI_ERS_RESULT_NONE;
-
- dev_info(&pdev->dev, "PCI error detected, state(=%d)!!\n", state);
- if (state == pci_channel_io_perm_failure)
- return PCI_ERS_RESULT_DISCONNECT;
-
- return hisi_qm_process_dev_error(pdev);
-}
-
-static int hisi_zip_reset_prepare_ready(struct hisi_zip *hisi_zip)
-{
- struct pci_dev *pdev = hisi_zip->qm.pdev;
- struct hisi_zip *zip = pci_get_drvdata(pci_physfn(pdev));
- int delay = 0;
-
- while (test_and_set_bit(HISI_ZIP_RESET, &zip->status)) {
- msleep(++delay);
- if (delay > HZIP_RESET_WAIT_TIMEOUT)
- return -EBUSY;
- }
-
- return 0;
-}
-
-static int hisi_zip_vf_reset_prepare(struct hisi_zip *hisi_zip,
- enum qm_stop_reason stop_reason)
-{
- struct pci_dev *pdev = hisi_zip->qm.pdev;
- struct pci_dev *dev;
- struct hisi_qm *qm;
- int ret = 0;
-
- mutex_lock(&hisi_zip_list_lock);
- if (pdev->is_physfn) {
- list_for_each_entry(hisi_zip, &hisi_zip_list, list) {
- dev = hisi_zip->qm.pdev;
- if (dev == pdev)
- continue;
-
- if (pci_physfn(dev) == pdev) {
- qm = &hisi_zip->qm;
-
- ret = hisi_qm_stop(qm, stop_reason);
- if (ret)
- goto prepare_fail;
- }
- }
- }
-
-prepare_fail:
- mutex_unlock(&hisi_zip_list_lock);
- return ret;
-}
-
-static int hisi_zip_controller_reset_prepare(struct hisi_zip *hisi_zip)
-{
- struct hisi_qm *qm = &hisi_zip->qm;
- struct device *dev = &qm->pdev->dev;
- int ret;
-
- ret = hisi_zip_reset_prepare_ready(hisi_zip);
- if (ret) {
- dev_err(dev, "Controller reset not ready!\n");
- return ret;
- }
-
- ret = hisi_zip_vf_reset_prepare(hisi_zip, QM_SOFT_RESET);
- if (ret) {
- dev_err(dev, "Fails to stop VFs!\n");
- return ret;
- }
-
- ret = hisi_qm_stop(qm, QM_SOFT_RESET);
- if (ret) {
- dev_err(dev, "Fails to stop QM!\n");
- return ret;
- }
-
-#ifdef CONFIG_CRYPTO_QM_UACCE
- if (qm->use_uacce) {
- ret = uacce_hw_err_isolate(&qm->uacce);
- if (ret) {
- dev_err(dev, "Fails to isolate hw err!\n");
- return ret;
- }
- }
-#endif
-
- return 0;
-}
-
-static int hisi_zip_soft_reset(struct hisi_zip *hisi_zip)
-{
- struct hisi_qm *qm = &hisi_zip->qm;
- struct device *dev = &qm->pdev->dev;
- unsigned long long value;
- int ret;
- u32 val;
-
- ret = hisi_qm_reg_test(qm);
- if (ret)
- return ret;
-
- ret = hisi_qm_set_vf_mse(qm, HZIP_DISABLE);
- if (ret) {
- dev_err(dev, "Fails to disable vf mse bit.\n");
- return ret;
- }
-
- ret = hisi_qm_set_msi(qm, HZIP_DISABLE);
- if (ret) {
- dev_err(dev, "Fails to disable peh msi bit.\n");
- return ret;
- }
-
- /* Set qm ecc if dev ecc happened to hold on ooo */
- hisi_qm_set_ecc(qm);
-
- /* OOO register set and check */
- writel(HZIP_MASTER_GLOBAL_CTRL_SHUTDOWN,
- hisi_zip->qm.io_base + HZIP_MASTER_GLOBAL_CTRL);
-
- /* If bus lock, reset chip */
- ret = readl_relaxed_poll_timeout(hisi_zip->qm.io_base +
- HZIP_MASTER_TRANS_RETURN, val,
- (val == HZIP_MASTER_TRANS_RETURN_RW),
- HZIP_REG_RD_INTVRL_US,
- HZIP_REG_RD_TMOUT_US);
- if (ret) {
- dev_emerg(dev, "Bus lock! Please reset system.\n");
- return ret;
- }
-
- ret = hisi_qm_set_pf_mse(qm, HZIP_DISABLE);
- if (ret) {
- dev_err(dev, "Fails to disable pf mse bit.\n");
- return ret;
- }
-
- /* The reset related sub-control registers are not in PCI BAR */
- if (ACPI_HANDLE(dev)) {
- acpi_status s;
-
- s = acpi_evaluate_integer(ACPI_HANDLE(dev), "ZRST",
- NULL, &value);
- if (ACPI_FAILURE(s)) {
- dev_err(dev, "NO controller reset method!\n");
- return -EIO;
- }
-
- if (value) {
- dev_err(dev, "Reset step %llu failed!\n", value);
- return -EIO;
- }
- } else {
- dev_err(dev, "No reset method!\n");
- return -EINVAL;
- }
-
- return 0;
-}
-
-static int hisi_zip_vf_reset_done(struct hisi_zip *hisi_zip)
-{
- struct pci_dev *pdev = hisi_zip->qm.pdev;
- struct pci_dev *dev;
- struct hisi_qm *qm;
- int ret = 0;
-
- mutex_lock(&hisi_zip_list_lock);
- list_for_each_entry(hisi_zip, &hisi_zip_list, list) {
- dev = hisi_zip->qm.pdev;
- if (dev == pdev)
- continue;
-
- if (pci_physfn(dev) == pdev) {
- qm = &hisi_zip->qm;
-
- ret = hisi_qm_restart(qm);
- if (ret)
- goto reset_fail;
- }
- }
-
-reset_fail:
- mutex_unlock(&hisi_zip_list_lock);
- return ret;
-}
-
-static int hisi_zip_controller_reset_done(struct hisi_zip *hisi_zip)
-{
- struct hisi_qm *qm = &hisi_zip->qm;
- struct device *dev = &qm->pdev->dev;
- int ret;
-
- ret = hisi_qm_set_msi(qm, HZIP_ENABLE);
- if (ret) {
- dev_err(dev, "Fails to enable peh msi bit!\n");
- return ret;
- }
-
- ret = hisi_qm_set_pf_mse(qm, HZIP_ENABLE);
- if (ret) {
- dev_err(dev, "Fails to enable pf mse bit!\n");
- return ret;
- }
-
- ret = hisi_qm_set_vf_mse(qm, HZIP_ENABLE);
- if (ret) {
- dev_err(dev, "Fails to enable vf mse bit!\n");
- return ret;
- }
-
- hisi_zip_set_user_domain_and_cache(hisi_zip);
- hisi_qm_restart_prepare(qm);
-
- ret = hisi_qm_restart(qm);
- if (ret) {
- dev_err(dev, "Failed to start QM!\n");
- return -EPERM;
- }
-
- if (hisi_zip->ctrl->num_vfs) {
- ret = hisi_zip_vf_q_assign(hisi_zip, hisi_zip->ctrl->num_vfs);
- if (ret) {
- dev_err(dev, "Failed to assign vf queues!\n");
- return ret;
- }
- }
-
- ret = hisi_zip_vf_reset_done(hisi_zip);
- if (ret) {
- dev_err(dev, "Failed to start VFs!\n");
- return -EPERM;
- }
-
- hisi_qm_restart_done(qm);
- hisi_zip_hw_error_init(hisi_zip);
-
- return 0;
-}
-
-static int hisi_zip_controller_reset(struct hisi_zip *hisi_zip)
-{
- struct hisi_qm *qm = &hisi_zip->qm;
- struct device *dev = &qm->pdev->dev;
- int ret;
-
- dev_info(dev, "Controller resetting...\n");
-
- ret = hisi_zip_controller_reset_prepare(hisi_zip);
- if (ret)
- return ret;
-
- ret = hisi_zip_soft_reset(hisi_zip);
- if (ret) {
- dev_err(dev, "Controller reset failed (%d)\n", ret);
- return ret;
- }
-
- ret = hisi_zip_controller_reset_done(hisi_zip);
- if (ret)
- return ret;
-
- clear_bit(HISI_ZIP_RESET, &hisi_zip->status);
-
- dev_info(dev, "Controller reset complete\n");
-
- return ret;
-}
-
-static pci_ers_result_t hisi_zip_slot_reset(struct pci_dev *pdev)
-{
- struct hisi_zip *hisi_zip = pci_get_drvdata(pdev);
- int ret;
-
- if (pdev->is_virtfn)
- return PCI_ERS_RESULT_RECOVERED;
-
- dev_info(&pdev->dev, "Requesting reset due to PCI error\n");
-
- pci_cleanup_aer_uncorrect_error_status(pdev);
-
- ret = hisi_zip_controller_reset(hisi_zip);
- if (ret) {
- dev_err(&pdev->dev, "hisi_zip controller reset failed (%d)\n",
- ret);
- return PCI_ERS_RESULT_DISCONNECT;
- }
-
- return PCI_ERS_RESULT_RECOVERED;
-}
-
-static void hisi_zip_set_hw_error(struct hisi_zip *hisi_zip, bool state)
-{
- struct pci_dev *pdev = hisi_zip->qm.pdev;
- struct hisi_zip *zip = pci_get_drvdata(pci_physfn(pdev));
- struct hisi_qm *qm = &zip->qm;
-
- if (qm->fun_type == QM_HW_VF)
- return;
-
- if (state)
- hisi_qm_hw_error_init(qm, QM_BASE_CE,
- QM_BASE_NFE | QM_ACC_WB_NOT_READY_TIMEOUT,
- 0, QM_DB_RANDOM_INVALID);
- else
- hisi_qm_hw_error_uninit(qm);
-
- hisi_zip_hw_error_set_state(zip, state);
-}
-
-static int hisi_zip_get_hw_error_status(struct hisi_zip *hisi_zip)
-{
- u32 err_sts;
-
- err_sts = readl(hisi_zip->qm.io_base + HZIP_CORE_INT_STATUS) &
- HZIP_CORE_INT_STATUS_M_ECC;
- if (err_sts)
- return err_sts;
-
- return 0;
-}
-
-static int hisi_zip_check_hw_error(struct hisi_zip *hisi_zip)
-{
- struct pci_dev *pdev = hisi_zip->qm.pdev;
- struct hisi_zip *zip = pci_get_drvdata(pci_physfn(pdev));
- struct hisi_qm *qm = &zip->qm;
- int ret;
-
- if (qm->fun_type == QM_HW_VF)
- return 0;
-
- ret = hisi_qm_get_hw_error_status(qm);
- if (ret)
- return ret;
-
- return hisi_zip_get_hw_error_status(zip);
-}
-
-static void hisi_zip_reset_prepare(struct pci_dev *pdev)
-{
- struct hisi_zip *hisi_zip = pci_get_drvdata(pdev);
- struct hisi_qm *qm = &hisi_zip->qm;
- struct device *dev = &pdev->dev;
- u32 delay = 0;
- int ret;
-
- hisi_zip_set_hw_error(hisi_zip, HZIP_HW_ERROR_IRQ_DISABLE);
-
- while (hisi_zip_check_hw_error(hisi_zip)) {
- msleep(++delay);
- if (delay > HZIP_RESET_WAIT_TIMEOUT)
- return;
- }
-
- ret = hisi_zip_reset_prepare_ready(hisi_zip);
- if (ret) {
- dev_err(dev, "FLR not ready!\n");
- return;
- }
-
- ret = hisi_zip_vf_reset_prepare(hisi_zip, QM_FLR);
- if (ret) {
- dev_err(dev, "Fails to prepare reset!\n");
- return;
- }
-
- ret = hisi_qm_stop(qm, QM_FLR);
- if (ret) {
- dev_err(dev, "Fails to stop QM!\n");
- return;
- }
-
- dev_info(dev, "FLR resetting...\n");
-}
-
-static void hisi_zip_flr_reset_complete(struct hisi_zip *hisi_zip)
-{
- struct pci_dev *pdev = hisi_zip->qm.pdev;
- struct hisi_zip *zip = pci_get_drvdata(pci_physfn(pdev));
- struct device *dev = &zip->qm.pdev->dev;
- u32 id;
-
- pci_read_config_dword(zip->qm.pdev, PCI_COMMAND, &id);
- if (id == HZIP_PCI_COMMAND_INVALID)
- dev_err(dev, "Device can not be used!\n");
-
- clear_bit(HISI_ZIP_RESET, &zip->status);
-}
-
-static void hisi_zip_reset_done(struct pci_dev *pdev)
-{
- struct hisi_zip *hisi_zip = pci_get_drvdata(pdev);
- struct hisi_qm *qm = &hisi_zip->qm;
- struct device *dev = &pdev->dev;
- int ret;
-
- hisi_zip_set_hw_error(hisi_zip, HZIP_HW_ERROR_IRQ_ENABLE);
-
- ret = hisi_qm_restart(qm);
- if (ret) {
- dev_err(dev, "Failed to start QM!\n");
- goto flr_done;
- }
-
- if (pdev->is_physfn) {
- hisi_zip_set_user_domain_and_cache(hisi_zip);
- if (hisi_zip->ctrl->num_vfs)
- hisi_zip_vf_q_assign(hisi_zip,
- hisi_zip->ctrl->num_vfs);
- ret = hisi_zip_vf_reset_done(hisi_zip);
- if (ret) {
- dev_err(dev, "Failed to start VFs!\n");
- goto flr_done;
- }
- }
-
-flr_done:
- hisi_zip_flr_reset_complete(hisi_zip);
-
- dev_info(dev, "FLR reset complete\n");
+ hisi_qm_del_from_list(qm, &zip_devices);
}
static const struct pci_error_handlers hisi_zip_err_handler = {
- .error_detected = hisi_zip_error_detected,
- .slot_reset = hisi_zip_slot_reset,
- .reset_prepare = hisi_zip_reset_prepare,
- .reset_done = hisi_zip_reset_done,
+ .error_detected = hisi_qm_dev_err_detected,
+ .slot_reset = hisi_qm_dev_slot_reset,
+ .reset_prepare = hisi_qm_reset_prepare,
+ .reset_done = hisi_qm_reset_done,
};
static struct pci_driver hisi_zip_pci_driver = {
@@ -1556,7 +890,7 @@ static void hisi_zip_reset_done(struct pci_dev *pdev)
.remove = hisi_zip_remove,
.sriov_configure = hisi_zip_sriov_configure,
.err_handler = &hisi_zip_err_handler,
- .shutdown = hisi_zip_shutdown,
+ .shutdown = hisi_qm_dev_shutdown,
};
static void hisi_zip_register_debugfs(void)
@@ -1578,6 +912,10 @@ static int __init hisi_zip_init(void)
{
int ret;
+ INIT_LIST_HEAD(&zip_devices.list);
+ mutex_init(&zip_devices.lock);
+ zip_devices.check = NULL;
+
hisi_zip_register_debugfs();
ret = pci_register_driver(&hisi_zip_pci_driver);
--
1.8.3
1
0

16 Apr '20
From: Eric Auger <eric.auger(a)redhat.com>
mainline inclusion
from mainline-5.3
commit b9a7f9816483b193
category: bugfix
bugzilla: 17401
CVE: NA
-------------------------------------------------
Several call sites are about to check whether a device belongs
to the PCI sub-hierarchy of a candidate PCI-PCI bridge.
Introduce an helper to perform that check.
Signed-off-by: Eric Auger <eric.auger(a)redhat.com>
Reviewed-by: Lu Baolu <baolu.lu(a)linux.intel.com>
Signed-off-by: Joerg Roedel <jroedel(a)suse.de>
Signed-off-by: Xiongfeng Wang <wangxiongfeng2(a)huawei.com>
Reviewed-by: Hanjun Guo <guohanjun(a)huawei.com>
Signed-off-by: Yang Yingliang <yangyingliang(a)huawei.com>
---
drivers/iommu/intel-iommu.c | 37 +++++++++++++++++++++++++++++--------
1 file changed, 29 insertions(+), 8 deletions(-)
diff --git a/drivers/iommu/intel-iommu.c b/drivers/iommu/intel-iommu.c
index f6d7955..2f52ea8 100644
--- a/drivers/iommu/intel-iommu.c
+++ b/drivers/iommu/intel-iommu.c
@@ -826,12 +826,39 @@ static int iommu_dummy(struct device *dev)
return dev->archdata.iommu == DUMMY_DEVICE_DOMAIN_INFO;
}
+/**
+ * is_downstream_to_pci_bridge - test if a device belongs to the PCI
+ * sub-hierarchy of a candidate PCI-PCI bridge
+ * @dev: candidate PCI device belonging to @bridge PCI sub-hierarchy
+ * @bridge: the candidate PCI-PCI bridge
+ *
+ * Return: true if @dev belongs to @bridge PCI sub-hierarchy, else false.
+ */
+static bool
+is_downstream_to_pci_bridge(struct device *dev, struct device *bridge)
+{
+ struct pci_dev *pdev, *pbridge;
+
+ if (!dev_is_pci(dev) || !dev_is_pci(bridge))
+ return false;
+
+ pdev = to_pci_dev(dev);
+ pbridge = to_pci_dev(bridge);
+
+ if (pbridge->subordinate &&
+ pbridge->subordinate->number <= pdev->bus->number &&
+ pbridge->subordinate->busn_res.end >= pdev->bus->number)
+ return true;
+
+ return false;
+}
+
static struct intel_iommu *device_to_iommu(struct device *dev, u8 *bus, u8 *devfn)
{
struct dmar_drhd_unit *drhd = NULL;
struct intel_iommu *iommu;
struct device *tmp;
- struct pci_dev *ptmp, *pdev = NULL;
+ struct pci_dev *pdev = NULL;
u16 segment = 0;
int i;
@@ -877,13 +904,7 @@ static struct intel_iommu *device_to_iommu(struct device *dev, u8 *bus, u8 *devf
goto out;
}
- if (!pdev || !dev_is_pci(tmp))
- continue;
-
- ptmp = to_pci_dev(tmp);
- if (ptmp->subordinate &&
- ptmp->subordinate->number <= pdev->bus->number &&
- ptmp->subordinate->busn_res.end >= pdev->bus->number)
+ if (is_downstream_to_pci_bridge(dev, tmp))
goto got_pdev;
}
--
1.8.3
1
17
From: fengsheng <fengsheng5(a)huawei.com>
driver inclusion
category: cleanup
bugzilla: NA
CVE: NA
1. sfc cleancode
Signed-off-by: fengsheng <fengsheng5(a)huawei.com>
Reviewed-by: zhangmu <zhangmu1(a)huawei.com>
Signed-off-by: Yang Yingliang <yangyingliang(a)huawei.com>
---
drivers/mtd/hisilicon/sfc/hrd_common.h | 54 +++++++++++++--------------
drivers/mtd/hisilicon/sfc/hrd_sfc_driver.c | 4 +-
drivers/mtd/hisilicon/sfc/hrd_sflash_core.c | 12 ++----
drivers/mtd/hisilicon/sfc/hrd_sflash_core.h | 2 +
drivers/mtd/hisilicon/sfc/hrd_sflash_driver.h | 6 +--
drivers/mtd/hisilicon/sfc/hrd_sflash_hal.c | 5 +--
drivers/mtd/hisilicon/sfc/hrd_sflash_hal.h | 1 +
drivers/mtd/hisilicon/sfc/hrd_sflash_spec.h | 4 +-
8 files changed, 42 insertions(+), 46 deletions(-)
diff --git a/drivers/mtd/hisilicon/sfc/hrd_common.h b/drivers/mtd/hisilicon/sfc/hrd_common.h
index d36a7a3..71dcaa9 100644
--- a/drivers/mtd/hisilicon/sfc/hrd_common.h
+++ b/drivers/mtd/hisilicon/sfc/hrd_common.h
@@ -40,39 +40,39 @@
#define HRD_COMMON_ERR_RES_NOT_EXIST (int)(HRD_COMMON_ERR_BASE - 13)
/* 16 bit nibble swap. example 0x1234 -> 0x2143 */
-#define HRD_NIBBLE_SWAP_16BIT(X) ((((X)&0xf) << 4) | \
- (((X)&0xF0) >> 4) | \
- (((X)&0xF00) << 4) | \
- (((X)&0xF000) >> 4))
+#define HRD_NIBBLE_SWAP_16BIT(X) ((((X) & 0xf) << 4) | \
+ (((X) & 0xF0) >> 4) | \
+ (((X) & 0xF00) << 4) | \
+ (((X) & 0xF000) >> 4))
/* 32 bit nibble swap. example 0x12345678 -> 0x21436587 */
-#define HRD_NIBBLE_SWAP_32BIT(X) (((X&0xF) << 4) | \
- (((X)&0xF0) >> 4) | \
- (((X)&0xF00) << 4) | \
- (((X)&0xF000) >> 4) | \
- (((X)&0xF0000) << 4) | \
- (((X)&0xF00000) >> 4) | \
- (((X)&0xF000000) << 4) | \
- (((X)&0xF0000000) >> 4))
+#define HRD_NIBBLE_SWAP_32BIT(X) ((((X) & 0xF) << 4) | \
+ (((X) & 0xF0) >> 4) | \
+ (((X) & 0xF00) << 4) | \
+ (((X) & 0xF000) >> 4) | \
+ (((X) & 0xF0000) << 4) | \
+ (((X) & 0xF00000) >> 4) | \
+ (((X) & 0xF000000) << 4) | \
+ (((X) & 0xF0000000) >> 4))
/* 16 bit byte swap. example 0x1234->0x3412 */
-#define HRD_BYTE_SWAP_16BIT(X) ((((X)&0xFF)<<8) | (((X)&0xFF00)>>8))
+#define HRD_BYTE_SWAP_16BIT(X) ((((X) & 0xFF) << 8) | (((X) & 0xFF00) >> 8))
/* 32 bit byte swap. example 0x12345678->0x78563412 */
-#define HRD_BYTE_SWAP_32BIT(X) ((((X)&0xFF)<<24) | \
- (((X)&0xFF00)<<8) | \
- (((X)&0xFF0000)>>8) | \
- (((X)&0xFF000000)>>24))
+#define HRD_BYTE_SWAP_32BIT(X) ((((X) & 0xFF) << 24) | \
+ (((X) & 0xFF00) << 8) | \
+ (((X) & 0xFF0000) >> 8) | \
+ (((X) & 0xFF000000) >> 24))
/* 64 bit byte swap. example 0x11223344.55667788 -> 0x88776655.44332211 */
-#define HRD_BYTE_SWAP_64BIT(X) ((l64) ((((X)&0xFFULL)<<56) | \
- (((X)&0xFF00ULL)<<40) | \
- (((X)&0xFF0000ULL)<<24) | \
- (((X)&0xFF000000ULL)<<8) | \
- (((X)&0xFF00000000ULL)>>8) | \
- (((X)&0xFF0000000000ULL)>>24) | \
- (((X)&0xFF000000000000ULL)>>40) | \
- (((X)&0xFF00000000000000ULL)>>56)))
+#define HRD_BYTE_SWAP_64BIT(X) ((l64) ((((X) & 0xFFULL) << 56) | \
+ (((X) & 0xFF00ULL) << 40) | \
+ (((X) & 0xFF0000ULL) << 24) | \
+ (((X) & 0xFF000000ULL) << 8) | \
+ (((X) & 0xFF00000000ULL) >> 8) | \
+ (((X) & 0xFF0000000000ULL) >> 24) | \
+ (((X) & 0xFF000000000000ULL) >> 40) | \
+ (((X) & 0xFF00000000000000ULL) >> 56)))
/* -- Endianess macros. */
#ifdef HRD_ENDNESS_BIGEND
@@ -91,10 +91,8 @@
#define HRD_64BIT_BE(X) HRD_BYTE_SWAP_64BIT(X)
#endif
-#define VOID void
-
#ifndef NULL
-#define NULL ((VOID *)0)
+#define NULL ((void *)0)
#endif
#define MTD_FLASH_MAP_DEBUG
diff --git a/drivers/mtd/hisilicon/sfc/hrd_sfc_driver.c b/drivers/mtd/hisilicon/sfc/hrd_sfc_driver.c
index 6a10106..8935263 100644
--- a/drivers/mtd/hisilicon/sfc/hrd_sfc_driver.c
+++ b/drivers/mtd/hisilicon/sfc/hrd_sfc_driver.c
@@ -26,9 +26,9 @@
#include "hrd_common.h"
#include "hrd_sflash_driver.h"
-#define SFC_DRIVER_VERSION "1.8.15.0"
+#define SFC_DRIVER_VERSION "1.9.39.0"
-static const char *g_sflashMtdList[] = { "sflash", NULL };
+static const char *g_sflashMtdList[] = {"sflash", NULL};
static unsigned int hrd_flash_info_fill(struct maps_init_info *maps,
struct resource *flash_iores, struct platform_device *pdev)
diff --git a/drivers/mtd/hisilicon/sfc/hrd_sflash_core.c b/drivers/mtd/hisilicon/sfc/hrd_sflash_core.c
index 7341f9e..68547d8 100644
--- a/drivers/mtd/hisilicon/sfc/hrd_sflash_core.c
+++ b/drivers/mtd/hisilicon/sfc/hrd_sflash_core.c
@@ -28,7 +28,6 @@
#include <linux/signal.h>
#include <linux/types.h>
#include "hrd_common.h"
-#include "hrd_sflash_driver.h"
#include "hrd_sflash_spec.h"
#include "hrd_sflash_core.h"
@@ -205,7 +204,6 @@ s32 SFC_ClearStatus(struct SFC_SFLASH_INFO *sflash)
(void)SFC_ClearInt(sflash->sfc_reg_base);
if (sflash->manufacturerId == HISI_SPANSION_MANF_ID) {
-
/* 30 for spansion , clear status */
SFC_RegisterWrite(sflash->sfc_reg_base + CMD_INS, 0x30);
@@ -234,7 +232,6 @@ void SFC_CheckErr(struct SFC_SFLASH_INFO *sflash)
unsigned long delay_us = 50; /* delay 50us */
if (sflash->manufacturerId == HISI_SPANSION_MANF_ID) {
-
ulRegValue = SFC_ReadStatus(sflash);
if (ulRegValue == WAIT_TIME_OUT) {
pr_err("[SFC] [%s %d]: SFC_ReadStatus time out\n",
@@ -362,7 +359,7 @@ s32 SFC_RegWordAlignRead(struct SFC_SFLASH_INFO *sflash,
u32 ulRegValue;
s32 ulRet;
- if (!ulReadLen || ulReadLen > SFC_HARD_BUF_LEN || (ulReadLen&0x3)) {
+ if (!ulReadLen || ulReadLen > SFC_HARD_BUF_LEN || (ulReadLen & 0x3)) {
pr_err("[SFC] [%s %d]: len=%u err\n", __func__, __LINE__, ulReadLen);
return HRD_ERR;
}
@@ -379,7 +376,7 @@ s32 SFC_RegWordAlignRead(struct SFC_SFLASH_INFO *sflash,
ulRegValue = SFC_RegisterRead(sflash->sfc_reg_base + CMD_CONFIG);
ulRegValue &= (~(0xff << DATA_CNT) & (~(1 << SEL_CS)));
ulRegValue |=
- ((ulReadLen-1) << DATA_CNT) | (1 << ADDR_EN) | (1 << DATA_EN) | (1 << RW_DATA)
+ ((ulReadLen - 1) << DATA_CNT) | (1 << ADDR_EN) | (1 << DATA_EN) | (1 << RW_DATA)
| (SFC_CHIP_CS << SEL_CS) | (0x1 << START);
wmb();
@@ -398,7 +395,6 @@ s32 SFC_RegWordAlignRead(struct SFC_SFLASH_INFO *sflash,
pulData[i] = SFC_RegisterRead(sflash->sfc_reg_base + DATABUFFER1 + (u32)(0x4 * i));
return ulRet;
-
}
s32 SFC_RegByteRead(struct SFC_SFLASH_INFO *sflash,
@@ -448,7 +444,7 @@ s32 SFC_RegWordAlignWrite(struct SFC_SFLASH_INFO *sflash,
s32 ulRet;
ulRet = SFC_WriteEnable(sflash);
- if (!ulWriteLen || ulWriteLen > SFC_HARD_BUF_LEN || (ulWriteLen&0x3)) {
+ if ((!ulWriteLen) || (ulWriteLen > SFC_HARD_BUF_LEN) || (ulWriteLen & 0x3)) {
pr_err("[SFC] [%s %d]: len=%u err\n", __func__, __LINE__, ulWriteLen);
ulRet = HRD_ERR;
goto rel;
@@ -471,7 +467,7 @@ s32 SFC_RegWordAlignWrite(struct SFC_SFLASH_INFO *sflash,
ulRegValue = SFC_RegisterRead(sflash->sfc_reg_base + CMD_CONFIG);
ulRegValue &=
(~(0xff << DATA_CNT)) & (~(1 << RW_DATA) & (~(1 << SEL_CS)));
- ulRegValue |= ((ulWriteLen-1) << DATA_CNT) | (1 << ADDR_EN) | (1 << DATA_EN)
+ ulRegValue |= ((ulWriteLen - 1) << DATA_CNT) | (1 << ADDR_EN) | (1 << DATA_EN)
| (SFC_CHIP_CS << SEL_CS) | (0x1 << START);
wmb();
diff --git a/drivers/mtd/hisilicon/sfc/hrd_sflash_core.h b/drivers/mtd/hisilicon/sfc/hrd_sflash_core.h
index 9002c3e..56c4417 100644
--- a/drivers/mtd/hisilicon/sfc/hrd_sflash_core.h
+++ b/drivers/mtd/hisilicon/sfc/hrd_sflash_core.h
@@ -17,6 +17,8 @@
#ifndef __HRD_SFLASH_CORE_H__
#define __HRD_SFLASH_CORE_H__
+#include "hrd_sflash_driver.h"
+
#define SFC_HARD_BUF_LEN (256)
#define SPI_CMD_SR_WIP 1 /* Write in Progress bit in status register position */
diff --git a/drivers/mtd/hisilicon/sfc/hrd_sflash_driver.h b/drivers/mtd/hisilicon/sfc/hrd_sflash_driver.h
index 3494787..f659758 100644
--- a/drivers/mtd/hisilicon/sfc/hrd_sflash_driver.h
+++ b/drivers/mtd/hisilicon/sfc/hrd_sflash_driver.h
@@ -14,8 +14,8 @@
*
*/
-#ifndef _HRD_SLASH_DRIVER_H
-#define _HRD_SLASH_DRIVER_H
+#ifndef _HRD_SFLASH_DRIVER_H
+#define _HRD_SFLASH_DRIVER_H
#include <linux/mtd/map.h>
@@ -102,4 +102,4 @@ struct SFC_SFLASH_INFO {
extern struct mtd_info *sflash_probe(struct map_info *map, struct resource *sfc_regres);
extern void sflash_destroy(struct mtd_info *mtd);
-#endif /* _HRD_SLASH_DRIVER_H */
+#endif /* _HRD_SLASH_DRIVER_H */
diff --git a/drivers/mtd/hisilicon/sfc/hrd_sflash_hal.c b/drivers/mtd/hisilicon/sfc/hrd_sflash_hal.c
index 8f5b387..ec9887a7 100644
--- a/drivers/mtd/hisilicon/sfc/hrd_sflash_hal.c
+++ b/drivers/mtd/hisilicon/sfc/hrd_sflash_hal.c
@@ -700,7 +700,6 @@ s32 SFC_BlockErase(struct SFC_SFLASH_INFO *sflash, u32 ulAddr, u32 ErCmd)
rel:
SFC_FlashUnlock(sflash);
return ulRet;
-
}
static s32 _SFC_RegModeWrite(struct SFC_SFLASH_INFO *sflash,
@@ -727,7 +726,7 @@ static s32 _SFC_RegModeWrite(struct SFC_SFLASH_INFO *sflash,
}
if (ulRemain >= 0x4) {
- slRet = SFC_RegWordAlignWrite(sflash, (const u32 *)(pucSrc + i), offset + i, ulRemain&(~0x3));
+ slRet = SFC_RegWordAlignWrite(sflash, (const u32 *)(pucSrc + i), offset + i, ulRemain & (~0x3));
if (slRet != HRD_OK) {
pr_err("[SFC] [%s %d]: SFC_RegWordAlignWrite fail\n", __func__, __LINE__);
return slRet;
@@ -805,7 +804,7 @@ s32 SFC_RegModeRead(struct SFC_SFLASH_INFO *sflash,
}
if (ulRemain >= 0x4) {
- ret = SFC_RegWordAlignRead(sflash, offset + i, (u32 *) (pucDest + i), ulRemain&(~0x3));
+ ret = SFC_RegWordAlignRead(sflash, offset + i, (u32 *) (pucDest + i), ulRemain & (~0x3));
if (ret != HRD_OK) {
pr_err("[SFC] [%s %d]: SFC_RegWordAlignRead fail\n", __func__, __LINE__);
return ret;
diff --git a/drivers/mtd/hisilicon/sfc/hrd_sflash_hal.h b/drivers/mtd/hisilicon/sfc/hrd_sflash_hal.h
index 78c921c..f612731 100644
--- a/drivers/mtd/hisilicon/sfc/hrd_sflash_hal.h
+++ b/drivers/mtd/hisilicon/sfc/hrd_sflash_hal.h
@@ -16,6 +16,7 @@
#ifndef __HRD_SFLASH_HAL_H__
#define __HRD_SFLASH_HAL_H__
+#include "hrd_sflash_driver.h"
extern void SFC_CheckErr(struct SFC_SFLASH_INFO *sflash);
extern s32 SFC_RegModeRead(struct SFC_SFLASH_INFO *sflash, u32 offset,
diff --git a/drivers/mtd/hisilicon/sfc/hrd_sflash_spec.h b/drivers/mtd/hisilicon/sfc/hrd_sflash_spec.h
index a59965b..151957d 100644
--- a/drivers/mtd/hisilicon/sfc/hrd_sflash_spec.h
+++ b/drivers/mtd/hisilicon/sfc/hrd_sflash_spec.h
@@ -14,8 +14,8 @@
*
*/
-#ifndef __SPI_FLASH_SPEC_H__
-#define __SPI_FLASH_SPEC_H__
+#ifndef __HRD_SFLASH_SPEC_H__
+#define __HRD_SFLASH_SPEC_H__
#define SFLASH_DEFAULT_RDID_OPCD 0x9F /* Default Read ID */
#define SFLASH_DEFAULT_WREN_OPCD 0x06 /* Default Write Enable */
--
1.8.3
1
2

16 Apr '20
From: Lyude Paul <lyude(a)redhat.com>
commit bf502391353b928e63096127e5fd8482080203f5 upstream.
This supports RMI4 and everything seems to work, including the touchpad
buttons. So, let's enable this by default.
Signed-off-by: Lyude Paul <lyude(a)redhat.com>
Cc: stable(a)vger.kernel.org
Link: https://lore.kernel.org/r/20200204194322.112638-1-lyude@redhat.com
Signed-off-by: Dmitry Torokhov <dmitry.torokhov(a)gmail.com>
Signed-off-by: Greg Kroah-Hartman <gregkh(a)linuxfoundation.org>
Signed-off-by: Yang Yingliang <yangyingliang(a)huawei.com>
---
drivers/input/mouse/synaptics.c | 1 +
1 file changed, 1 insertion(+)
diff --git a/drivers/input/mouse/synaptics.c b/drivers/input/mouse/synaptics.c
index e8d1134..064be84 100644
--- a/drivers/input/mouse/synaptics.c
+++ b/drivers/input/mouse/synaptics.c
@@ -172,6 +172,7 @@ void synaptics_reset(struct psmouse *psmouse)
"LEN004a", /* W541 */
"LEN005b", /* P50 */
"LEN005e", /* T560 */
+ "LEN006c", /* T470s */
"LEN0071", /* T480 */
"LEN0072", /* X1 Carbon Gen 5 (2017) - Elan/ALPS trackpoint */
"LEN0073", /* X1 Carbon G5 (Elantech) */
--
1.8.3
1
37

16 Apr '20
From: Ard Biesheuvel <ard.biesheuvel(a)linaro.org>
mainline inclusion
from mainline-v4.20-rc1
commit ab8085c130edd65be0d95cc95c28b51c4c6faf9d
category: bugfix
bugzilla: NA
CVE: NA
-----------------------------------------------
As it turns out, the AVX2 multibuffer SHA routines are currently
broken [0], in a way that would have likely been noticed if this
code were in wide use. Since the code is too complicated to be
maintained by anyone except the original authors, and since the
performance benefits for real-world use cases are debatable to
begin with, it is better to drop it entirely for the moment.
[0] https://marc.info/?l=linux-crypto-vger&m=153476243825350&w=2
Suggested-by: Eric Biggers <ebiggers(a)google.com>
Cc: Megha Dey <megha.dey(a)linux.intel.com>
Cc: Tim Chen <tim.c.chen(a)linux.intel.com>
Cc: Geert Uytterhoeven <geert(a)linux-m68k.org>
Cc: Martin Schwidefsky <schwidefsky(a)de.ibm.com>
Cc: Heiko Carstens <heiko.carstens(a)de.ibm.com>
Cc: Thomas Gleixner <tglx(a)linutronix.de>
Cc: Ingo Molnar <mingo(a)redhat.com>
Signed-off-by: Ard Biesheuvel <ard.biesheuvel(a)linaro.org>
Signed-off-by: Herbert Xu <herbert(a)gondor.apana.org.au>
Signed-off-by: Zhang Xiaoxu <zhangxiaoxu5(a)huawei.com>
Reviewed-by: Jason Yan <yanaijie(a)huawei.com>
Signed-off-by: Yang Yingliang <yangyingliang(a)huawei.com>
---
MAINTAINERS | 8 -
arch/m68k/configs/amiga_defconfig | 1 -
arch/m68k/configs/apollo_defconfig | 1 -
arch/m68k/configs/atari_defconfig | 1 -
arch/m68k/configs/bvme6000_defconfig | 1 -
arch/m68k/configs/hp300_defconfig | 1 -
arch/m68k/configs/mac_defconfig | 1 -
arch/m68k/configs/multi_defconfig | 1 -
arch/m68k/configs/mvme147_defconfig | 1 -
arch/m68k/configs/mvme16x_defconfig | 1 -
arch/m68k/configs/q40_defconfig | 1 -
arch/m68k/configs/sun3_defconfig | 1 -
arch/m68k/configs/sun3x_defconfig | 1 -
arch/s390/configs/debug_defconfig | 1 -
arch/s390/configs/performance_defconfig | 1 -
arch/x86/crypto/Makefile | 3 -
arch/x86/crypto/sha1-mb/Makefile | 14 -
arch/x86/crypto/sha1-mb/sha1_mb.c | 1011 -------------------
arch/x86/crypto/sha1-mb/sha1_mb_ctx.h | 134 ---
arch/x86/crypto/sha1-mb/sha1_mb_mgr.h | 110 --
arch/x86/crypto/sha1-mb/sha1_mb_mgr_datastruct.S | 287 ------
arch/x86/crypto/sha1-mb/sha1_mb_mgr_flush_avx2.S | 304 ------
arch/x86/crypto/sha1-mb/sha1_mb_mgr_init_avx2.c | 64 --
arch/x86/crypto/sha1-mb/sha1_mb_mgr_submit_avx2.S | 209 ----
arch/x86/crypto/sha1-mb/sha1_x8_avx2.S | 492 ---------
arch/x86/crypto/sha256-mb/Makefile | 14 -
arch/x86/crypto/sha256-mb/sha256_mb.c | 1013 -------------------
arch/x86/crypto/sha256-mb/sha256_mb_ctx.h | 134 ---
arch/x86/crypto/sha256-mb/sha256_mb_mgr.h | 108 --
.../crypto/sha256-mb/sha256_mb_mgr_datastruct.S | 304 ------
.../crypto/sha256-mb/sha256_mb_mgr_flush_avx2.S | 307 ------
.../x86/crypto/sha256-mb/sha256_mb_mgr_init_avx2.c | 65 --
.../crypto/sha256-mb/sha256_mb_mgr_submit_avx2.S | 214 ----
arch/x86/crypto/sha256-mb/sha256_x8_avx2.S | 598 -----------
arch/x86/crypto/sha512-mb/Makefile | 12 -
arch/x86/crypto/sha512-mb/sha512_mb.c | 1047 --------------------
arch/x86/crypto/sha512-mb/sha512_mb_ctx.h | 128 ---
arch/x86/crypto/sha512-mb/sha512_mb_mgr.h | 104 --
.../crypto/sha512-mb/sha512_mb_mgr_datastruct.S | 281 ------
.../crypto/sha512-mb/sha512_mb_mgr_flush_avx2.S | 297 ------
.../x86/crypto/sha512-mb/sha512_mb_mgr_init_avx2.c | 69 --
.../crypto/sha512-mb/sha512_mb_mgr_submit_avx2.S | 224 -----
arch/x86/crypto/sha512-mb/sha512_x4_avx2.S | 531 ----------
crypto/Kconfig | 62 --
crypto/Makefile | 1 -
crypto/mcryptd.c | 675 -------------
include/crypto/mcryptd.h | 114 ---
47 files changed, 8952 deletions(-)
delete mode 100644 arch/x86/crypto/sha1-mb/Makefile
delete mode 100644 arch/x86/crypto/sha1-mb/sha1_mb.c
delete mode 100644 arch/x86/crypto/sha1-mb/sha1_mb_ctx.h
delete mode 100644 arch/x86/crypto/sha1-mb/sha1_mb_mgr.h
delete mode 100644 arch/x86/crypto/sha1-mb/sha1_mb_mgr_datastruct.S
delete mode 100644 arch/x86/crypto/sha1-mb/sha1_mb_mgr_flush_avx2.S
delete mode 100644 arch/x86/crypto/sha1-mb/sha1_mb_mgr_init_avx2.c
delete mode 100644 arch/x86/crypto/sha1-mb/sha1_mb_mgr_submit_avx2.S
delete mode 100644 arch/x86/crypto/sha1-mb/sha1_x8_avx2.S
delete mode 100644 arch/x86/crypto/sha256-mb/Makefile
delete mode 100644 arch/x86/crypto/sha256-mb/sha256_mb.c
delete mode 100644 arch/x86/crypto/sha256-mb/sha256_mb_ctx.h
delete mode 100644 arch/x86/crypto/sha256-mb/sha256_mb_mgr.h
delete mode 100644 arch/x86/crypto/sha256-mb/sha256_mb_mgr_datastruct.S
delete mode 100644 arch/x86/crypto/sha256-mb/sha256_mb_mgr_flush_avx2.S
delete mode 100644 arch/x86/crypto/sha256-mb/sha256_mb_mgr_init_avx2.c
delete mode 100644 arch/x86/crypto/sha256-mb/sha256_mb_mgr_submit_avx2.S
delete mode 100644 arch/x86/crypto/sha256-mb/sha256_x8_avx2.S
delete mode 100644 arch/x86/crypto/sha512-mb/Makefile
delete mode 100644 arch/x86/crypto/sha512-mb/sha512_mb.c
delete mode 100644 arch/x86/crypto/sha512-mb/sha512_mb_ctx.h
delete mode 100644 arch/x86/crypto/sha512-mb/sha512_mb_mgr.h
delete mode 100644 arch/x86/crypto/sha512-mb/sha512_mb_mgr_datastruct.S
delete mode 100644 arch/x86/crypto/sha512-mb/sha512_mb_mgr_flush_avx2.S
delete mode 100644 arch/x86/crypto/sha512-mb/sha512_mb_mgr_init_avx2.c
delete mode 100644 arch/x86/crypto/sha512-mb/sha512_mb_mgr_submit_avx2.S
delete mode 100644 arch/x86/crypto/sha512-mb/sha512_x4_avx2.S
delete mode 100644 crypto/mcryptd.c
delete mode 100644 include/crypto/mcryptd.h
diff --git a/MAINTAINERS b/MAINTAINERS
index 588fc68..b143d31 100644
--- a/MAINTAINERS
+++ b/MAINTAINERS
@@ -7565,14 +7565,6 @@ S: Supported
F: drivers/infiniband/hw/i40iw/
F: include/uapi/rdma/i40iw-abi.h
-INTEL SHA MULTIBUFFER DRIVER
-M: Megha Dey <megha.dey(a)linux.intel.com>
-R: Tim Chen <tim.c.chen(a)linux.intel.com>
-L: linux-crypto(a)vger.kernel.org
-S: Supported
-F: arch/x86/crypto/sha*-mb/
-F: crypto/mcryptd.c
-
INTEL TELEMETRY DRIVER
M: Souvik Kumar Chakravarty <souvik.k.chakravarty(a)intel.com>
L: platform-driver-x86(a)vger.kernel.org
diff --git a/arch/m68k/configs/amiga_defconfig b/arch/m68k/configs/amiga_defconfig
index 93a3c3c..85904b7 100644
--- a/arch/m68k/configs/amiga_defconfig
+++ b/arch/m68k/configs/amiga_defconfig
@@ -621,7 +621,6 @@ CONFIG_CRYPTO_ECDH=m
CONFIG_CRYPTO_MANAGER=y
CONFIG_CRYPTO_USER=m
CONFIG_CRYPTO_CRYPTD=m
-CONFIG_CRYPTO_MCRYPTD=m
CONFIG_CRYPTO_TEST=m
CONFIG_CRYPTO_CHACHA20POLY1305=m
CONFIG_CRYPTO_AEGIS128=m
diff --git a/arch/m68k/configs/apollo_defconfig b/arch/m68k/configs/apollo_defconfig
index e3d0efd..9b3818b 100644
--- a/arch/m68k/configs/apollo_defconfig
+++ b/arch/m68k/configs/apollo_defconfig
@@ -578,7 +578,6 @@ CONFIG_CRYPTO_ECDH=m
CONFIG_CRYPTO_MANAGER=y
CONFIG_CRYPTO_USER=m
CONFIG_CRYPTO_CRYPTD=m
-CONFIG_CRYPTO_MCRYPTD=m
CONFIG_CRYPTO_TEST=m
CONFIG_CRYPTO_CHACHA20POLY1305=m
CONFIG_CRYPTO_AEGIS128=m
diff --git a/arch/m68k/configs/atari_defconfig b/arch/m68k/configs/atari_defconfig
index 75ac0c7..7696778 100644
--- a/arch/m68k/configs/atari_defconfig
+++ b/arch/m68k/configs/atari_defconfig
@@ -599,7 +599,6 @@ CONFIG_CRYPTO_ECDH=m
CONFIG_CRYPTO_MANAGER=y
CONFIG_CRYPTO_USER=m
CONFIG_CRYPTO_CRYPTD=m
-CONFIG_CRYPTO_MCRYPTD=m
CONFIG_CRYPTO_TEST=m
CONFIG_CRYPTO_CHACHA20POLY1305=m
CONFIG_CRYPTO_AEGIS128=m
diff --git a/arch/m68k/configs/bvme6000_defconfig b/arch/m68k/configs/bvme6000_defconfig
index c6e4927..7dd264d 100644
--- a/arch/m68k/configs/bvme6000_defconfig
+++ b/arch/m68k/configs/bvme6000_defconfig
@@ -570,7 +570,6 @@ CONFIG_CRYPTO_ECDH=m
CONFIG_CRYPTO_MANAGER=y
CONFIG_CRYPTO_USER=m
CONFIG_CRYPTO_CRYPTD=m
-CONFIG_CRYPTO_MCRYPTD=m
CONFIG_CRYPTO_TEST=m
CONFIG_CRYPTO_CHACHA20POLY1305=m
CONFIG_CRYPTO_AEGIS128=m
diff --git a/arch/m68k/configs/hp300_defconfig b/arch/m68k/configs/hp300_defconfig
index b00d1c4..515f743 100644
--- a/arch/m68k/configs/hp300_defconfig
+++ b/arch/m68k/configs/hp300_defconfig
@@ -580,7 +580,6 @@ CONFIG_CRYPTO_ECDH=m
CONFIG_CRYPTO_MANAGER=y
CONFIG_CRYPTO_USER=m
CONFIG_CRYPTO_CRYPTD=m
-CONFIG_CRYPTO_MCRYPTD=m
CONFIG_CRYPTO_TEST=m
CONFIG_CRYPTO_CHACHA20POLY1305=m
CONFIG_CRYPTO_AEGIS128=m
diff --git a/arch/m68k/configs/mac_defconfig b/arch/m68k/configs/mac_defconfig
index 85cac37..8e1038c 100644
--- a/arch/m68k/configs/mac_defconfig
+++ b/arch/m68k/configs/mac_defconfig
@@ -602,7 +602,6 @@ CONFIG_CRYPTO_ECDH=m
CONFIG_CRYPTO_MANAGER=y
CONFIG_CRYPTO_USER=m
CONFIG_CRYPTO_CRYPTD=m
-CONFIG_CRYPTO_MCRYPTD=m
CONFIG_CRYPTO_TEST=m
CONFIG_CRYPTO_CHACHA20POLY1305=m
CONFIG_CRYPTO_AEGIS128=m
diff --git a/arch/m68k/configs/multi_defconfig b/arch/m68k/configs/multi_defconfig
index b3a5d1e..62c8aaa 100644
--- a/arch/m68k/configs/multi_defconfig
+++ b/arch/m68k/configs/multi_defconfig
@@ -684,7 +684,6 @@ CONFIG_CRYPTO_ECDH=m
CONFIG_CRYPTO_MANAGER=y
CONFIG_CRYPTO_USER=m
CONFIG_CRYPTO_CRYPTD=m
-CONFIG_CRYPTO_MCRYPTD=m
CONFIG_CRYPTO_TEST=m
CONFIG_CRYPTO_CHACHA20POLY1305=m
CONFIG_CRYPTO_AEGIS128=m
diff --git a/arch/m68k/configs/mvme147_defconfig b/arch/m68k/configs/mvme147_defconfig
index 0ca2260..733973f 100644
--- a/arch/m68k/configs/mvme147_defconfig
+++ b/arch/m68k/configs/mvme147_defconfig
@@ -570,7 +570,6 @@ CONFIG_CRYPTO_ECDH=m
CONFIG_CRYPTO_MANAGER=y
CONFIG_CRYPTO_USER=m
CONFIG_CRYPTO_CRYPTD=m
-CONFIG_CRYPTO_MCRYPTD=m
CONFIG_CRYPTO_TEST=m
CONFIG_CRYPTO_CHACHA20POLY1305=m
CONFIG_CRYPTO_AEGIS128=m
diff --git a/arch/m68k/configs/mvme16x_defconfig b/arch/m68k/configs/mvme16x_defconfig
index 8e3d10d..fee30cc 100644
--- a/arch/m68k/configs/mvme16x_defconfig
+++ b/arch/m68k/configs/mvme16x_defconfig
@@ -570,7 +570,6 @@ CONFIG_CRYPTO_ECDH=m
CONFIG_CRYPTO_MANAGER=y
CONFIG_CRYPTO_USER=m
CONFIG_CRYPTO_CRYPTD=m
-CONFIG_CRYPTO_MCRYPTD=m
CONFIG_CRYPTO_TEST=m
CONFIG_CRYPTO_CHACHA20POLY1305=m
CONFIG_CRYPTO_AEGIS128=m
diff --git a/arch/m68k/configs/q40_defconfig b/arch/m68k/configs/q40_defconfig
index ff7e653..eebf9c9 100644
--- a/arch/m68k/configs/q40_defconfig
+++ b/arch/m68k/configs/q40_defconfig
@@ -593,7 +593,6 @@ CONFIG_CRYPTO_ECDH=m
CONFIG_CRYPTO_MANAGER=y
CONFIG_CRYPTO_USER=m
CONFIG_CRYPTO_CRYPTD=m
-CONFIG_CRYPTO_MCRYPTD=m
CONFIG_CRYPTO_TEST=m
CONFIG_CRYPTO_CHACHA20POLY1305=m
CONFIG_CRYPTO_AEGIS128=m
diff --git a/arch/m68k/configs/sun3_defconfig b/arch/m68k/configs/sun3_defconfig
index 612cf46..dabc543 100644
--- a/arch/m68k/configs/sun3_defconfig
+++ b/arch/m68k/configs/sun3_defconfig
@@ -571,7 +571,6 @@ CONFIG_CRYPTO_ECDH=m
CONFIG_CRYPTO_MANAGER=y
CONFIG_CRYPTO_USER=m
CONFIG_CRYPTO_CRYPTD=m
-CONFIG_CRYPTO_MCRYPTD=m
CONFIG_CRYPTO_TEST=m
CONFIG_CRYPTO_CHACHA20POLY1305=m
CONFIG_CRYPTO_AEGIS128=m
diff --git a/arch/m68k/configs/sun3x_defconfig b/arch/m68k/configs/sun3x_defconfig
index a6a7bb6..0d9a5c2 100644
--- a/arch/m68k/configs/sun3x_defconfig
+++ b/arch/m68k/configs/sun3x_defconfig
@@ -572,7 +572,6 @@ CONFIG_CRYPTO_ECDH=m
CONFIG_CRYPTO_MANAGER=y
CONFIG_CRYPTO_USER=m
CONFIG_CRYPTO_CRYPTD=m
-CONFIG_CRYPTO_MCRYPTD=m
CONFIG_CRYPTO_TEST=m
CONFIG_CRYPTO_CHACHA20POLY1305=m
CONFIG_CRYPTO_AEGIS128=m
diff --git a/arch/s390/configs/debug_defconfig b/arch/s390/configs/debug_defconfig
index 941d8cc..259d169 100644
--- a/arch/s390/configs/debug_defconfig
+++ b/arch/s390/configs/debug_defconfig
@@ -668,7 +668,6 @@ CONFIG_CRYPTO_USER=m
# CONFIG_CRYPTO_MANAGER_DISABLE_TESTS is not set
CONFIG_CRYPTO_PCRYPT=m
CONFIG_CRYPTO_CRYPTD=m
-CONFIG_CRYPTO_MCRYPTD=m
CONFIG_CRYPTO_TEST=m
CONFIG_CRYPTO_CHACHA20POLY1305=m
CONFIG_CRYPTO_LRW=m
diff --git a/arch/s390/configs/performance_defconfig b/arch/s390/configs/performance_defconfig
index eb6f75f..37fd60c 100644
--- a/arch/s390/configs/performance_defconfig
+++ b/arch/s390/configs/performance_defconfig
@@ -610,7 +610,6 @@ CONFIG_CRYPTO_USER=m
# CONFIG_CRYPTO_MANAGER_DISABLE_TESTS is not set
CONFIG_CRYPTO_PCRYPT=m
CONFIG_CRYPTO_CRYPTD=m
-CONFIG_CRYPTO_MCRYPTD=m
CONFIG_CRYPTO_TEST=m
CONFIG_CRYPTO_CHACHA20POLY1305=m
CONFIG_CRYPTO_LRW=m
diff --git a/arch/x86/crypto/Makefile b/arch/x86/crypto/Makefile
index a450ad5..9edfa54 100644
--- a/arch/x86/crypto/Makefile
+++ b/arch/x86/crypto/Makefile
@@ -60,9 +60,6 @@ endif
ifeq ($(avx2_supported),yes)
obj-$(CONFIG_CRYPTO_CAMELLIA_AESNI_AVX2_X86_64) += camellia-aesni-avx2.o
obj-$(CONFIG_CRYPTO_SERPENT_AVX2_X86_64) += serpent-avx2.o
- obj-$(CONFIG_CRYPTO_SHA1_MB) += sha1-mb/
- obj-$(CONFIG_CRYPTO_SHA256_MB) += sha256-mb/
- obj-$(CONFIG_CRYPTO_SHA512_MB) += sha512-mb/
obj-$(CONFIG_CRYPTO_MORUS1280_AVX2) += morus1280-avx2.o
endif
diff --git a/arch/x86/crypto/sha1-mb/Makefile b/arch/x86/crypto/sha1-mb/Makefile
deleted file mode 100644
index 815ded3..00000000
--- a/arch/x86/crypto/sha1-mb/Makefile
+++ /dev/null
@@ -1,14 +0,0 @@
-# SPDX-License-Identifier: GPL-2.0
-#
-# Arch-specific CryptoAPI modules.
-#
-
-OBJECT_FILES_NON_STANDARD := y
-
-avx2_supported := $(call as-instr,vpgatherdd %ymm0$(comma)(%eax$(comma)%ymm1\
- $(comma)4)$(comma)%ymm2,yes,no)
-ifeq ($(avx2_supported),yes)
- obj-$(CONFIG_CRYPTO_SHA1_MB) += sha1-mb.o
- sha1-mb-y := sha1_mb.o sha1_mb_mgr_flush_avx2.o \
- sha1_mb_mgr_init_avx2.o sha1_mb_mgr_submit_avx2.o sha1_x8_avx2.o
-endif
diff --git a/arch/x86/crypto/sha1-mb/sha1_mb.c b/arch/x86/crypto/sha1-mb/sha1_mb.c
deleted file mode 100644
index b938056..00000000
--- a/arch/x86/crypto/sha1-mb/sha1_mb.c
+++ /dev/null
@@ -1,1011 +0,0 @@
-/*
- * Multi buffer SHA1 algorithm Glue Code
- *
- * This file is provided under a dual BSD/GPLv2 license. When using or
- * redistributing this file, you may do so under either license.
- *
- * GPL LICENSE SUMMARY
- *
- * Copyright(c) 2014 Intel Corporation.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of version 2 of the GNU General Public License as
- * published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful, but
- * WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
- * General Public License for more details.
- *
- * Contact Information:
- * Tim Chen <tim.c.chen(a)linux.intel.com>
- *
- * BSD LICENSE
- *
- * Copyright(c) 2014 Intel Corporation.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions
- * are met:
- *
- * * Redistributions of source code must retain the above copyright
- * notice, this list of conditions and the following disclaimer.
- * * Redistributions in binary form must reproduce the above copyright
- * notice, this list of conditions and the following disclaimer in
- * the documentation and/or other materials provided with the
- * distribution.
- * * Neither the name of Intel Corporation nor the names of its
- * contributors may be used to endorse or promote products derived
- * from this software without specific prior written permission.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
- * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
- * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
- * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
- * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
- * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
- * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
- * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
- * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
- * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
- * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
- */
-
-#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
-
-#include <crypto/internal/hash.h>
-#include <linux/init.h>
-#include <linux/module.h>
-#include <linux/mm.h>
-#include <linux/cryptohash.h>
-#include <linux/types.h>
-#include <linux/list.h>
-#include <crypto/scatterwalk.h>
-#include <crypto/sha.h>
-#include <crypto/mcryptd.h>
-#include <crypto/crypto_wq.h>
-#include <asm/byteorder.h>
-#include <linux/hardirq.h>
-#include <asm/fpu/api.h>
-#include "sha1_mb_ctx.h"
-
-#define FLUSH_INTERVAL 1000 /* in usec */
-
-static struct mcryptd_alg_state sha1_mb_alg_state;
-
-struct sha1_mb_ctx {
- struct mcryptd_ahash *mcryptd_tfm;
-};
-
-static inline struct mcryptd_hash_request_ctx
- *cast_hash_to_mcryptd_ctx(struct sha1_hash_ctx *hash_ctx)
-{
- struct ahash_request *areq;
-
- areq = container_of((void *) hash_ctx, struct ahash_request, __ctx);
- return container_of(areq, struct mcryptd_hash_request_ctx, areq);
-}
-
-static inline struct ahash_request
- *cast_mcryptd_ctx_to_req(struct mcryptd_hash_request_ctx *ctx)
-{
- return container_of((void *) ctx, struct ahash_request, __ctx);
-}
-
-static void req_ctx_init(struct mcryptd_hash_request_ctx *rctx,
- struct ahash_request *areq)
-{
- rctx->flag = HASH_UPDATE;
-}
-
-static asmlinkage void (*sha1_job_mgr_init)(struct sha1_mb_mgr *state);
-static asmlinkage struct job_sha1* (*sha1_job_mgr_submit)
- (struct sha1_mb_mgr *state, struct job_sha1 *job);
-static asmlinkage struct job_sha1* (*sha1_job_mgr_flush)
- (struct sha1_mb_mgr *state);
-static asmlinkage struct job_sha1* (*sha1_job_mgr_get_comp_job)
- (struct sha1_mb_mgr *state);
-
-static inline uint32_t sha1_pad(uint8_t padblock[SHA1_BLOCK_SIZE * 2],
- uint64_t total_len)
-{
- uint32_t i = total_len & (SHA1_BLOCK_SIZE - 1);
-
- memset(&padblock[i], 0, SHA1_BLOCK_SIZE);
- padblock[i] = 0x80;
-
- i += ((SHA1_BLOCK_SIZE - 1) &
- (0 - (total_len + SHA1_PADLENGTHFIELD_SIZE + 1)))
- + 1 + SHA1_PADLENGTHFIELD_SIZE;
-
-#if SHA1_PADLENGTHFIELD_SIZE == 16
- *((uint64_t *) &padblock[i - 16]) = 0;
-#endif
-
- *((uint64_t *) &padblock[i - 8]) = cpu_to_be64(total_len << 3);
-
- /* Number of extra blocks to hash */
- return i >> SHA1_LOG2_BLOCK_SIZE;
-}
-
-static struct sha1_hash_ctx *sha1_ctx_mgr_resubmit(struct sha1_ctx_mgr *mgr,
- struct sha1_hash_ctx *ctx)
-{
- while (ctx) {
- if (ctx->status & HASH_CTX_STS_COMPLETE) {
- /* Clear PROCESSING bit */
- ctx->status = HASH_CTX_STS_COMPLETE;
- return ctx;
- }
-
- /*
- * If the extra blocks are empty, begin hashing what remains
- * in the user's buffer.
- */
- if (ctx->partial_block_buffer_length == 0 &&
- ctx->incoming_buffer_length) {
-
- const void *buffer = ctx->incoming_buffer;
- uint32_t len = ctx->incoming_buffer_length;
- uint32_t copy_len;
-
- /*
- * Only entire blocks can be hashed.
- * Copy remainder to extra blocks buffer.
- */
- copy_len = len & (SHA1_BLOCK_SIZE-1);
-
- if (copy_len) {
- len -= copy_len;
- memcpy(ctx->partial_block_buffer,
- ((const char *) buffer + len),
- copy_len);
- ctx->partial_block_buffer_length = copy_len;
- }
-
- ctx->incoming_buffer_length = 0;
-
- /* len should be a multiple of the block size now */
- assert((len % SHA1_BLOCK_SIZE) == 0);
-
- /* Set len to the number of blocks to be hashed */
- len >>= SHA1_LOG2_BLOCK_SIZE;
-
- if (len) {
-
- ctx->job.buffer = (uint8_t *) buffer;
- ctx->job.len = len;
- ctx = (struct sha1_hash_ctx *)sha1_job_mgr_submit(&mgr->mgr,
- &ctx->job);
- continue;
- }
- }
-
- /*
- * If the extra blocks are not empty, then we are
- * either on the last block(s) or we need more
- * user input before continuing.
- */
- if (ctx->status & HASH_CTX_STS_LAST) {
-
- uint8_t *buf = ctx->partial_block_buffer;
- uint32_t n_extra_blocks =
- sha1_pad(buf, ctx->total_length);
-
- ctx->status = (HASH_CTX_STS_PROCESSING |
- HASH_CTX_STS_COMPLETE);
- ctx->job.buffer = buf;
- ctx->job.len = (uint32_t) n_extra_blocks;
- ctx = (struct sha1_hash_ctx *)
- sha1_job_mgr_submit(&mgr->mgr, &ctx->job);
- continue;
- }
-
- ctx->status = HASH_CTX_STS_IDLE;
- return ctx;
- }
-
- return NULL;
-}
-
-static struct sha1_hash_ctx
- *sha1_ctx_mgr_get_comp_ctx(struct sha1_ctx_mgr *mgr)
-{
- /*
- * If get_comp_job returns NULL, there are no jobs complete.
- * If get_comp_job returns a job, verify that it is safe to return to
- * the user.
- * If it is not ready, resubmit the job to finish processing.
- * If sha1_ctx_mgr_resubmit returned a job, it is ready to be returned.
- * Otherwise, all jobs currently being managed by the hash_ctx_mgr
- * still need processing.
- */
- struct sha1_hash_ctx *ctx;
-
- ctx = (struct sha1_hash_ctx *) sha1_job_mgr_get_comp_job(&mgr->mgr);
- return sha1_ctx_mgr_resubmit(mgr, ctx);
-}
-
-static void sha1_ctx_mgr_init(struct sha1_ctx_mgr *mgr)
-{
- sha1_job_mgr_init(&mgr->mgr);
-}
-
-static struct sha1_hash_ctx *sha1_ctx_mgr_submit(struct sha1_ctx_mgr *mgr,
- struct sha1_hash_ctx *ctx,
- const void *buffer,
- uint32_t len,
- int flags)
-{
- if (flags & ~(HASH_UPDATE | HASH_LAST)) {
- /* User should not pass anything other than UPDATE or LAST */
- ctx->error = HASH_CTX_ERROR_INVALID_FLAGS;
- return ctx;
- }
-
- if (ctx->status & HASH_CTX_STS_PROCESSING) {
- /* Cannot submit to a currently processing job. */
- ctx->error = HASH_CTX_ERROR_ALREADY_PROCESSING;
- return ctx;
- }
-
- if (ctx->status & HASH_CTX_STS_COMPLETE) {
- /* Cannot update a finished job. */
- ctx->error = HASH_CTX_ERROR_ALREADY_COMPLETED;
- return ctx;
- }
-
- /*
- * If we made it here, there were no errors during this call to
- * submit
- */
- ctx->error = HASH_CTX_ERROR_NONE;
-
- /* Store buffer ptr info from user */
- ctx->incoming_buffer = buffer;
- ctx->incoming_buffer_length = len;
-
- /*
- * Store the user's request flags and mark this ctx as currently
- * being processed.
- */
- ctx->status = (flags & HASH_LAST) ?
- (HASH_CTX_STS_PROCESSING | HASH_CTX_STS_LAST) :
- HASH_CTX_STS_PROCESSING;
-
- /* Advance byte counter */
- ctx->total_length += len;
-
- /*
- * If there is anything currently buffered in the extra blocks,
- * append to it until it contains a whole block.
- * Or if the user's buffer contains less than a whole block,
- * append as much as possible to the extra block.
- */
- if (ctx->partial_block_buffer_length || len < SHA1_BLOCK_SIZE) {
- /*
- * Compute how many bytes to copy from user buffer into
- * extra block
- */
- uint32_t copy_len = SHA1_BLOCK_SIZE -
- ctx->partial_block_buffer_length;
- if (len < copy_len)
- copy_len = len;
-
- if (copy_len) {
- /* Copy and update relevant pointers and counters */
- memcpy(&ctx->partial_block_buffer[ctx->partial_block_buffer_length],
- buffer, copy_len);
-
- ctx->partial_block_buffer_length += copy_len;
- ctx->incoming_buffer = (const void *)
- ((const char *)buffer + copy_len);
- ctx->incoming_buffer_length = len - copy_len;
- }
-
- /*
- * The extra block should never contain more than 1 block
- * here
- */
- assert(ctx->partial_block_buffer_length <= SHA1_BLOCK_SIZE);
-
- /*
- * If the extra block buffer contains exactly 1 block, it can
- * be hashed.
- */
- if (ctx->partial_block_buffer_length >= SHA1_BLOCK_SIZE) {
- ctx->partial_block_buffer_length = 0;
-
- ctx->job.buffer = ctx->partial_block_buffer;
- ctx->job.len = 1;
- ctx = (struct sha1_hash_ctx *)
- sha1_job_mgr_submit(&mgr->mgr, &ctx->job);
- }
- }
-
- return sha1_ctx_mgr_resubmit(mgr, ctx);
-}
-
-static struct sha1_hash_ctx *sha1_ctx_mgr_flush(struct sha1_ctx_mgr *mgr)
-{
- struct sha1_hash_ctx *ctx;
-
- while (1) {
- ctx = (struct sha1_hash_ctx *) sha1_job_mgr_flush(&mgr->mgr);
-
- /* If flush returned 0, there are no more jobs in flight. */
- if (!ctx)
- return NULL;
-
- /*
- * If flush returned a job, resubmit the job to finish
- * processing.
- */
- ctx = sha1_ctx_mgr_resubmit(mgr, ctx);
-
- /*
- * If sha1_ctx_mgr_resubmit returned a job, it is ready to be
- * returned. Otherwise, all jobs currently being managed by the
- * sha1_ctx_mgr still need processing. Loop.
- */
- if (ctx)
- return ctx;
- }
-}
-
-static int sha1_mb_init(struct ahash_request *areq)
-{
- struct sha1_hash_ctx *sctx = ahash_request_ctx(areq);
-
- hash_ctx_init(sctx);
- sctx->job.result_digest[0] = SHA1_H0;
- sctx->job.result_digest[1] = SHA1_H1;
- sctx->job.result_digest[2] = SHA1_H2;
- sctx->job.result_digest[3] = SHA1_H3;
- sctx->job.result_digest[4] = SHA1_H4;
- sctx->total_length = 0;
- sctx->partial_block_buffer_length = 0;
- sctx->status = HASH_CTX_STS_IDLE;
-
- return 0;
-}
-
-static int sha1_mb_set_results(struct mcryptd_hash_request_ctx *rctx)
-{
- int i;
- struct sha1_hash_ctx *sctx = ahash_request_ctx(&rctx->areq);
- __be32 *dst = (__be32 *) rctx->out;
-
- for (i = 0; i < 5; ++i)
- dst[i] = cpu_to_be32(sctx->job.result_digest[i]);
-
- return 0;
-}
-
-static int sha_finish_walk(struct mcryptd_hash_request_ctx **ret_rctx,
- struct mcryptd_alg_cstate *cstate, bool flush)
-{
- int flag = HASH_UPDATE;
- int nbytes, err = 0;
- struct mcryptd_hash_request_ctx *rctx = *ret_rctx;
- struct sha1_hash_ctx *sha_ctx;
-
- /* more work ? */
- while (!(rctx->flag & HASH_DONE)) {
- nbytes = crypto_ahash_walk_done(&rctx->walk, 0);
- if (nbytes < 0) {
- err = nbytes;
- goto out;
- }
- /* check if the walk is done */
- if (crypto_ahash_walk_last(&rctx->walk)) {
- rctx->flag |= HASH_DONE;
- if (rctx->flag & HASH_FINAL)
- flag |= HASH_LAST;
-
- }
- sha_ctx = (struct sha1_hash_ctx *)
- ahash_request_ctx(&rctx->areq);
- kernel_fpu_begin();
- sha_ctx = sha1_ctx_mgr_submit(cstate->mgr, sha_ctx,
- rctx->walk.data, nbytes, flag);
- if (!sha_ctx) {
- if (flush)
- sha_ctx = sha1_ctx_mgr_flush(cstate->mgr);
- }
- kernel_fpu_end();
- if (sha_ctx)
- rctx = cast_hash_to_mcryptd_ctx(sha_ctx);
- else {
- rctx = NULL;
- goto out;
- }
- }
-
- /* copy the results */
- if (rctx->flag & HASH_FINAL)
- sha1_mb_set_results(rctx);
-
-out:
- *ret_rctx = rctx;
- return err;
-}
-
-static int sha_complete_job(struct mcryptd_hash_request_ctx *rctx,
- struct mcryptd_alg_cstate *cstate,
- int err)
-{
- struct ahash_request *req = cast_mcryptd_ctx_to_req(rctx);
- struct sha1_hash_ctx *sha_ctx;
- struct mcryptd_hash_request_ctx *req_ctx;
- int ret;
-
- /* remove from work list */
- spin_lock(&cstate->work_lock);
- list_del(&rctx->waiter);
- spin_unlock(&cstate->work_lock);
-
- if (irqs_disabled())
- rctx->complete(&req->base, err);
- else {
- local_bh_disable();
- rctx->complete(&req->base, err);
- local_bh_enable();
- }
-
- /* check to see if there are other jobs that are done */
- sha_ctx = sha1_ctx_mgr_get_comp_ctx(cstate->mgr);
- while (sha_ctx) {
- req_ctx = cast_hash_to_mcryptd_ctx(sha_ctx);
- ret = sha_finish_walk(&req_ctx, cstate, false);
- if (req_ctx) {
- spin_lock(&cstate->work_lock);
- list_del(&req_ctx->waiter);
- spin_unlock(&cstate->work_lock);
-
- req = cast_mcryptd_ctx_to_req(req_ctx);
- if (irqs_disabled())
- req_ctx->complete(&req->base, ret);
- else {
- local_bh_disable();
- req_ctx->complete(&req->base, ret);
- local_bh_enable();
- }
- }
- sha_ctx = sha1_ctx_mgr_get_comp_ctx(cstate->mgr);
- }
-
- return 0;
-}
-
-static void sha1_mb_add_list(struct mcryptd_hash_request_ctx *rctx,
- struct mcryptd_alg_cstate *cstate)
-{
- unsigned long next_flush;
- unsigned long delay = usecs_to_jiffies(FLUSH_INTERVAL);
-
- /* initialize tag */
- rctx->tag.arrival = jiffies; /* tag the arrival time */
- rctx->tag.seq_num = cstate->next_seq_num++;
- next_flush = rctx->tag.arrival + delay;
- rctx->tag.expire = next_flush;
-
- spin_lock(&cstate->work_lock);
- list_add_tail(&rctx->waiter, &cstate->work_list);
- spin_unlock(&cstate->work_lock);
-
- mcryptd_arm_flusher(cstate, delay);
-}
-
-static int sha1_mb_update(struct ahash_request *areq)
-{
- struct mcryptd_hash_request_ctx *rctx =
- container_of(areq, struct mcryptd_hash_request_ctx, areq);
- struct mcryptd_alg_cstate *cstate =
- this_cpu_ptr(sha1_mb_alg_state.alg_cstate);
-
- struct ahash_request *req = cast_mcryptd_ctx_to_req(rctx);
- struct sha1_hash_ctx *sha_ctx;
- int ret = 0, nbytes;
-
-
- /* sanity check */
- if (rctx->tag.cpu != smp_processor_id()) {
- pr_err("mcryptd error: cpu clash\n");
- goto done;
- }
-
- /* need to init context */
- req_ctx_init(rctx, areq);
-
- nbytes = crypto_ahash_walk_first(req, &rctx->walk);
-
- if (nbytes < 0) {
- ret = nbytes;
- goto done;
- }
-
- if (crypto_ahash_walk_last(&rctx->walk))
- rctx->flag |= HASH_DONE;
-
- /* submit */
- sha_ctx = (struct sha1_hash_ctx *) ahash_request_ctx(areq);
- sha1_mb_add_list(rctx, cstate);
- kernel_fpu_begin();
- sha_ctx = sha1_ctx_mgr_submit(cstate->mgr, sha_ctx, rctx->walk.data,
- nbytes, HASH_UPDATE);
- kernel_fpu_end();
-
- /* check if anything is returned */
- if (!sha_ctx)
- return -EINPROGRESS;
-
- if (sha_ctx->error) {
- ret = sha_ctx->error;
- rctx = cast_hash_to_mcryptd_ctx(sha_ctx);
- goto done;
- }
-
- rctx = cast_hash_to_mcryptd_ctx(sha_ctx);
- ret = sha_finish_walk(&rctx, cstate, false);
-
- if (!rctx)
- return -EINPROGRESS;
-done:
- sha_complete_job(rctx, cstate, ret);
- return ret;
-}
-
-static int sha1_mb_finup(struct ahash_request *areq)
-{
- struct mcryptd_hash_request_ctx *rctx =
- container_of(areq, struct mcryptd_hash_request_ctx, areq);
- struct mcryptd_alg_cstate *cstate =
- this_cpu_ptr(sha1_mb_alg_state.alg_cstate);
-
- struct ahash_request *req = cast_mcryptd_ctx_to_req(rctx);
- struct sha1_hash_ctx *sha_ctx;
- int ret = 0, flag = HASH_UPDATE, nbytes;
-
- /* sanity check */
- if (rctx->tag.cpu != smp_processor_id()) {
- pr_err("mcryptd error: cpu clash\n");
- goto done;
- }
-
- /* need to init context */
- req_ctx_init(rctx, areq);
-
- nbytes = crypto_ahash_walk_first(req, &rctx->walk);
-
- if (nbytes < 0) {
- ret = nbytes;
- goto done;
- }
-
- if (crypto_ahash_walk_last(&rctx->walk)) {
- rctx->flag |= HASH_DONE;
- flag = HASH_LAST;
- }
-
- /* submit */
- rctx->flag |= HASH_FINAL;
- sha_ctx = (struct sha1_hash_ctx *) ahash_request_ctx(areq);
- sha1_mb_add_list(rctx, cstate);
-
- kernel_fpu_begin();
- sha_ctx = sha1_ctx_mgr_submit(cstate->mgr, sha_ctx, rctx->walk.data,
- nbytes, flag);
- kernel_fpu_end();
-
- /* check if anything is returned */
- if (!sha_ctx)
- return -EINPROGRESS;
-
- if (sha_ctx->error) {
- ret = sha_ctx->error;
- goto done;
- }
-
- rctx = cast_hash_to_mcryptd_ctx(sha_ctx);
- ret = sha_finish_walk(&rctx, cstate, false);
- if (!rctx)
- return -EINPROGRESS;
-done:
- sha_complete_job(rctx, cstate, ret);
- return ret;
-}
-
-static int sha1_mb_final(struct ahash_request *areq)
-{
- struct mcryptd_hash_request_ctx *rctx =
- container_of(areq, struct mcryptd_hash_request_ctx, areq);
- struct mcryptd_alg_cstate *cstate =
- this_cpu_ptr(sha1_mb_alg_state.alg_cstate);
-
- struct sha1_hash_ctx *sha_ctx;
- int ret = 0;
- u8 data;
-
- /* sanity check */
- if (rctx->tag.cpu != smp_processor_id()) {
- pr_err("mcryptd error: cpu clash\n");
- goto done;
- }
-
- /* need to init context */
- req_ctx_init(rctx, areq);
-
- rctx->flag |= HASH_DONE | HASH_FINAL;
-
- sha_ctx = (struct sha1_hash_ctx *) ahash_request_ctx(areq);
- /* flag HASH_FINAL and 0 data size */
- sha1_mb_add_list(rctx, cstate);
- kernel_fpu_begin();
- sha_ctx = sha1_ctx_mgr_submit(cstate->mgr, sha_ctx, &data, 0,
- HASH_LAST);
- kernel_fpu_end();
-
- /* check if anything is returned */
- if (!sha_ctx)
- return -EINPROGRESS;
-
- if (sha_ctx->error) {
- ret = sha_ctx->error;
- rctx = cast_hash_to_mcryptd_ctx(sha_ctx);
- goto done;
- }
-
- rctx = cast_hash_to_mcryptd_ctx(sha_ctx);
- ret = sha_finish_walk(&rctx, cstate, false);
- if (!rctx)
- return -EINPROGRESS;
-done:
- sha_complete_job(rctx, cstate, ret);
- return ret;
-}
-
-static int sha1_mb_export(struct ahash_request *areq, void *out)
-{
- struct sha1_hash_ctx *sctx = ahash_request_ctx(areq);
-
- memcpy(out, sctx, sizeof(*sctx));
-
- return 0;
-}
-
-static int sha1_mb_import(struct ahash_request *areq, const void *in)
-{
- struct sha1_hash_ctx *sctx = ahash_request_ctx(areq);
-
- memcpy(sctx, in, sizeof(*sctx));
-
- return 0;
-}
-
-static int sha1_mb_async_init_tfm(struct crypto_tfm *tfm)
-{
- struct mcryptd_ahash *mcryptd_tfm;
- struct sha1_mb_ctx *ctx = crypto_tfm_ctx(tfm);
- struct mcryptd_hash_ctx *mctx;
-
- mcryptd_tfm = mcryptd_alloc_ahash("__intel_sha1-mb",
- CRYPTO_ALG_INTERNAL,
- CRYPTO_ALG_INTERNAL);
- if (IS_ERR(mcryptd_tfm))
- return PTR_ERR(mcryptd_tfm);
- mctx = crypto_ahash_ctx(&mcryptd_tfm->base);
- mctx->alg_state = &sha1_mb_alg_state;
- ctx->mcryptd_tfm = mcryptd_tfm;
- crypto_ahash_set_reqsize(__crypto_ahash_cast(tfm),
- sizeof(struct ahash_request) +
- crypto_ahash_reqsize(&mcryptd_tfm->base));
-
- return 0;
-}
-
-static void sha1_mb_async_exit_tfm(struct crypto_tfm *tfm)
-{
- struct sha1_mb_ctx *ctx = crypto_tfm_ctx(tfm);
-
- mcryptd_free_ahash(ctx->mcryptd_tfm);
-}
-
-static int sha1_mb_areq_init_tfm(struct crypto_tfm *tfm)
-{
- crypto_ahash_set_reqsize(__crypto_ahash_cast(tfm),
- sizeof(struct ahash_request) +
- sizeof(struct sha1_hash_ctx));
-
- return 0;
-}
-
-static void sha1_mb_areq_exit_tfm(struct crypto_tfm *tfm)
-{
- struct sha1_mb_ctx *ctx = crypto_tfm_ctx(tfm);
-
- mcryptd_free_ahash(ctx->mcryptd_tfm);
-}
-
-static struct ahash_alg sha1_mb_areq_alg = {
- .init = sha1_mb_init,
- .update = sha1_mb_update,
- .final = sha1_mb_final,
- .finup = sha1_mb_finup,
- .export = sha1_mb_export,
- .import = sha1_mb_import,
- .halg = {
- .digestsize = SHA1_DIGEST_SIZE,
- .statesize = sizeof(struct sha1_hash_ctx),
- .base = {
- .cra_name = "__sha1-mb",
- .cra_driver_name = "__intel_sha1-mb",
- .cra_priority = 100,
- /*
- * use ASYNC flag as some buffers in multi-buffer
- * algo may not have completed before hashing thread
- * sleep
- */
- .cra_flags = CRYPTO_ALG_ASYNC |
- CRYPTO_ALG_INTERNAL,
- .cra_blocksize = SHA1_BLOCK_SIZE,
- .cra_module = THIS_MODULE,
- .cra_list = LIST_HEAD_INIT
- (sha1_mb_areq_alg.halg.base.cra_list),
- .cra_init = sha1_mb_areq_init_tfm,
- .cra_exit = sha1_mb_areq_exit_tfm,
- .cra_ctxsize = sizeof(struct sha1_hash_ctx),
- }
- }
-};
-
-static int sha1_mb_async_init(struct ahash_request *req)
-{
- struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
- struct sha1_mb_ctx *ctx = crypto_ahash_ctx(tfm);
- struct ahash_request *mcryptd_req = ahash_request_ctx(req);
- struct mcryptd_ahash *mcryptd_tfm = ctx->mcryptd_tfm;
-
- memcpy(mcryptd_req, req, sizeof(*req));
- ahash_request_set_tfm(mcryptd_req, &mcryptd_tfm->base);
- return crypto_ahash_init(mcryptd_req);
-}
-
-static int sha1_mb_async_update(struct ahash_request *req)
-{
- struct ahash_request *mcryptd_req = ahash_request_ctx(req);
-
- struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
- struct sha1_mb_ctx *ctx = crypto_ahash_ctx(tfm);
- struct mcryptd_ahash *mcryptd_tfm = ctx->mcryptd_tfm;
-
- memcpy(mcryptd_req, req, sizeof(*req));
- ahash_request_set_tfm(mcryptd_req, &mcryptd_tfm->base);
- return crypto_ahash_update(mcryptd_req);
-}
-
-static int sha1_mb_async_finup(struct ahash_request *req)
-{
- struct ahash_request *mcryptd_req = ahash_request_ctx(req);
-
- struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
- struct sha1_mb_ctx *ctx = crypto_ahash_ctx(tfm);
- struct mcryptd_ahash *mcryptd_tfm = ctx->mcryptd_tfm;
-
- memcpy(mcryptd_req, req, sizeof(*req));
- ahash_request_set_tfm(mcryptd_req, &mcryptd_tfm->base);
- return crypto_ahash_finup(mcryptd_req);
-}
-
-static int sha1_mb_async_final(struct ahash_request *req)
-{
- struct ahash_request *mcryptd_req = ahash_request_ctx(req);
-
- struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
- struct sha1_mb_ctx *ctx = crypto_ahash_ctx(tfm);
- struct mcryptd_ahash *mcryptd_tfm = ctx->mcryptd_tfm;
-
- memcpy(mcryptd_req, req, sizeof(*req));
- ahash_request_set_tfm(mcryptd_req, &mcryptd_tfm->base);
- return crypto_ahash_final(mcryptd_req);
-}
-
-static int sha1_mb_async_digest(struct ahash_request *req)
-{
- struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
- struct sha1_mb_ctx *ctx = crypto_ahash_ctx(tfm);
- struct ahash_request *mcryptd_req = ahash_request_ctx(req);
- struct mcryptd_ahash *mcryptd_tfm = ctx->mcryptd_tfm;
-
- memcpy(mcryptd_req, req, sizeof(*req));
- ahash_request_set_tfm(mcryptd_req, &mcryptd_tfm->base);
- return crypto_ahash_digest(mcryptd_req);
-}
-
-static int sha1_mb_async_export(struct ahash_request *req, void *out)
-{
- struct ahash_request *mcryptd_req = ahash_request_ctx(req);
- struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
- struct sha1_mb_ctx *ctx = crypto_ahash_ctx(tfm);
- struct mcryptd_ahash *mcryptd_tfm = ctx->mcryptd_tfm;
-
- memcpy(mcryptd_req, req, sizeof(*req));
- ahash_request_set_tfm(mcryptd_req, &mcryptd_tfm->base);
- return crypto_ahash_export(mcryptd_req, out);
-}
-
-static int sha1_mb_async_import(struct ahash_request *req, const void *in)
-{
- struct ahash_request *mcryptd_req = ahash_request_ctx(req);
- struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
- struct sha1_mb_ctx *ctx = crypto_ahash_ctx(tfm);
- struct mcryptd_ahash *mcryptd_tfm = ctx->mcryptd_tfm;
- struct crypto_ahash *child = mcryptd_ahash_child(mcryptd_tfm);
- struct mcryptd_hash_request_ctx *rctx;
- struct ahash_request *areq;
-
- memcpy(mcryptd_req, req, sizeof(*req));
- ahash_request_set_tfm(mcryptd_req, &mcryptd_tfm->base);
- rctx = ahash_request_ctx(mcryptd_req);
- areq = &rctx->areq;
-
- ahash_request_set_tfm(areq, child);
- ahash_request_set_callback(areq, CRYPTO_TFM_REQ_MAY_SLEEP,
- rctx->complete, req);
-
- return crypto_ahash_import(mcryptd_req, in);
-}
-
-static struct ahash_alg sha1_mb_async_alg = {
- .init = sha1_mb_async_init,
- .update = sha1_mb_async_update,
- .final = sha1_mb_async_final,
- .finup = sha1_mb_async_finup,
- .digest = sha1_mb_async_digest,
- .export = sha1_mb_async_export,
- .import = sha1_mb_async_import,
- .halg = {
- .digestsize = SHA1_DIGEST_SIZE,
- .statesize = sizeof(struct sha1_hash_ctx),
- .base = {
- .cra_name = "sha1",
- .cra_driver_name = "sha1_mb",
- /*
- * Low priority, since with few concurrent hash requests
- * this is extremely slow due to the flush delay. Users
- * whose workloads would benefit from this can request
- * it explicitly by driver name, or can increase its
- * priority at runtime using NETLINK_CRYPTO.
- */
- .cra_priority = 50,
- .cra_flags = CRYPTO_ALG_ASYNC,
- .cra_blocksize = SHA1_BLOCK_SIZE,
- .cra_module = THIS_MODULE,
- .cra_list = LIST_HEAD_INIT(sha1_mb_async_alg.halg.base.cra_list),
- .cra_init = sha1_mb_async_init_tfm,
- .cra_exit = sha1_mb_async_exit_tfm,
- .cra_ctxsize = sizeof(struct sha1_mb_ctx),
- .cra_alignmask = 0,
- },
- },
-};
-
-static unsigned long sha1_mb_flusher(struct mcryptd_alg_cstate *cstate)
-{
- struct mcryptd_hash_request_ctx *rctx;
- unsigned long cur_time;
- unsigned long next_flush = 0;
- struct sha1_hash_ctx *sha_ctx;
-
-
- cur_time = jiffies;
-
- while (!list_empty(&cstate->work_list)) {
- rctx = list_entry(cstate->work_list.next,
- struct mcryptd_hash_request_ctx, waiter);
- if (time_before(cur_time, rctx->tag.expire))
- break;
- kernel_fpu_begin();
- sha_ctx = (struct sha1_hash_ctx *)
- sha1_ctx_mgr_flush(cstate->mgr);
- kernel_fpu_end();
- if (!sha_ctx) {
- pr_err("sha1_mb error: nothing got flushed for non-empty list\n");
- break;
- }
- rctx = cast_hash_to_mcryptd_ctx(sha_ctx);
- sha_finish_walk(&rctx, cstate, true);
- sha_complete_job(rctx, cstate, 0);
- }
-
- if (!list_empty(&cstate->work_list)) {
- rctx = list_entry(cstate->work_list.next,
- struct mcryptd_hash_request_ctx, waiter);
- /* get the hash context and then flush time */
- next_flush = rctx->tag.expire;
- mcryptd_arm_flusher(cstate, get_delay(next_flush));
- }
- return next_flush;
-}
-
-static int __init sha1_mb_mod_init(void)
-{
-
- int cpu;
- int err;
- struct mcryptd_alg_cstate *cpu_state;
-
- /* check for dependent cpu features */
- if (!boot_cpu_has(X86_FEATURE_AVX2) ||
- !boot_cpu_has(X86_FEATURE_BMI2))
- return -ENODEV;
-
- /* initialize multibuffer structures */
- sha1_mb_alg_state.alg_cstate = alloc_percpu(struct mcryptd_alg_cstate);
-
- sha1_job_mgr_init = sha1_mb_mgr_init_avx2;
- sha1_job_mgr_submit = sha1_mb_mgr_submit_avx2;
- sha1_job_mgr_flush = sha1_mb_mgr_flush_avx2;
- sha1_job_mgr_get_comp_job = sha1_mb_mgr_get_comp_job_avx2;
-
- if (!sha1_mb_alg_state.alg_cstate)
- return -ENOMEM;
- for_each_possible_cpu(cpu) {
- cpu_state = per_cpu_ptr(sha1_mb_alg_state.alg_cstate, cpu);
- cpu_state->next_flush = 0;
- cpu_state->next_seq_num = 0;
- cpu_state->flusher_engaged = false;
- INIT_DELAYED_WORK(&cpu_state->flush, mcryptd_flusher);
- cpu_state->cpu = cpu;
- cpu_state->alg_state = &sha1_mb_alg_state;
- cpu_state->mgr = kzalloc(sizeof(struct sha1_ctx_mgr),
- GFP_KERNEL);
- if (!cpu_state->mgr)
- goto err2;
- sha1_ctx_mgr_init(cpu_state->mgr);
- INIT_LIST_HEAD(&cpu_state->work_list);
- spin_lock_init(&cpu_state->work_lock);
- }
- sha1_mb_alg_state.flusher = &sha1_mb_flusher;
-
- err = crypto_register_ahash(&sha1_mb_areq_alg);
- if (err)
- goto err2;
- err = crypto_register_ahash(&sha1_mb_async_alg);
- if (err)
- goto err1;
-
-
- return 0;
-err1:
- crypto_unregister_ahash(&sha1_mb_areq_alg);
-err2:
- for_each_possible_cpu(cpu) {
- cpu_state = per_cpu_ptr(sha1_mb_alg_state.alg_cstate, cpu);
- kfree(cpu_state->mgr);
- }
- free_percpu(sha1_mb_alg_state.alg_cstate);
- return -ENODEV;
-}
-
-static void __exit sha1_mb_mod_fini(void)
-{
- int cpu;
- struct mcryptd_alg_cstate *cpu_state;
-
- crypto_unregister_ahash(&sha1_mb_async_alg);
- crypto_unregister_ahash(&sha1_mb_areq_alg);
- for_each_possible_cpu(cpu) {
- cpu_state = per_cpu_ptr(sha1_mb_alg_state.alg_cstate, cpu);
- kfree(cpu_state->mgr);
- }
- free_percpu(sha1_mb_alg_state.alg_cstate);
-}
-
-module_init(sha1_mb_mod_init);
-module_exit(sha1_mb_mod_fini);
-
-MODULE_LICENSE("GPL");
-MODULE_DESCRIPTION("SHA1 Secure Hash Algorithm, multi buffer accelerated");
-
-MODULE_ALIAS_CRYPTO("sha1");
diff --git a/arch/x86/crypto/sha1-mb/sha1_mb_ctx.h b/arch/x86/crypto/sha1-mb/sha1_mb_ctx.h
deleted file mode 100644
index 9454bd1..00000000
--- a/arch/x86/crypto/sha1-mb/sha1_mb_ctx.h
+++ /dev/null
@@ -1,134 +0,0 @@
-/*
- * Header file for multi buffer SHA context
- *
- * This file is provided under a dual BSD/GPLv2 license. When using or
- * redistributing this file, you may do so under either license.
- *
- * GPL LICENSE SUMMARY
- *
- * Copyright(c) 2014 Intel Corporation.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of version 2 of the GNU General Public License as
- * published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful, but
- * WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
- * General Public License for more details.
- *
- * Contact Information:
- * Tim Chen <tim.c.chen(a)linux.intel.com>
- *
- * BSD LICENSE
- *
- * Copyright(c) 2014 Intel Corporation.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions
- * are met:
- *
- * * Redistributions of source code must retain the above copyright
- * notice, this list of conditions and the following disclaimer.
- * * Redistributions in binary form must reproduce the above copyright
- * notice, this list of conditions and the following disclaimer in
- * the documentation and/or other materials provided with the
- * distribution.
- * * Neither the name of Intel Corporation nor the names of its
- * contributors may be used to endorse or promote products derived
- * from this software without specific prior written permission.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
- * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
- * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
- * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
- * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
- * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
- * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
- * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
- * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
- * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
- * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
- */
-
-#ifndef _SHA_MB_CTX_INTERNAL_H
-#define _SHA_MB_CTX_INTERNAL_H
-
-#include "sha1_mb_mgr.h"
-
-#define HASH_UPDATE 0x00
-#define HASH_LAST 0x01
-#define HASH_DONE 0x02
-#define HASH_FINAL 0x04
-
-#define HASH_CTX_STS_IDLE 0x00
-#define HASH_CTX_STS_PROCESSING 0x01
-#define HASH_CTX_STS_LAST 0x02
-#define HASH_CTX_STS_COMPLETE 0x04
-
-enum hash_ctx_error {
- HASH_CTX_ERROR_NONE = 0,
- HASH_CTX_ERROR_INVALID_FLAGS = -1,
- HASH_CTX_ERROR_ALREADY_PROCESSING = -2,
- HASH_CTX_ERROR_ALREADY_COMPLETED = -3,
-
-#ifdef HASH_CTX_DEBUG
- HASH_CTX_ERROR_DEBUG_DIGEST_MISMATCH = -4,
-#endif
-};
-
-
-#define hash_ctx_user_data(ctx) ((ctx)->user_data)
-#define hash_ctx_digest(ctx) ((ctx)->job.result_digest)
-#define hash_ctx_processing(ctx) ((ctx)->status & HASH_CTX_STS_PROCESSING)
-#define hash_ctx_complete(ctx) ((ctx)->status == HASH_CTX_STS_COMPLETE)
-#define hash_ctx_status(ctx) ((ctx)->status)
-#define hash_ctx_error(ctx) ((ctx)->error)
-#define hash_ctx_init(ctx) \
- do { \
- (ctx)->error = HASH_CTX_ERROR_NONE; \
- (ctx)->status = HASH_CTX_STS_COMPLETE; \
- } while (0)
-
-
-/* Hash Constants and Typedefs */
-#define SHA1_DIGEST_LENGTH 5
-#define SHA1_LOG2_BLOCK_SIZE 6
-
-#define SHA1_PADLENGTHFIELD_SIZE 8
-
-#ifdef SHA_MB_DEBUG
-#define assert(expr) \
-do { \
- if (unlikely(!(expr))) { \
- printk(KERN_ERR "Assertion failed! %s,%s,%s,line=%d\n", \
- #expr, __FILE__, __func__, __LINE__); \
- } \
-} while (0)
-#else
-#define assert(expr) do {} while (0)
-#endif
-
-struct sha1_ctx_mgr {
- struct sha1_mb_mgr mgr;
-};
-
-/* typedef struct sha1_ctx_mgr sha1_ctx_mgr; */
-
-struct sha1_hash_ctx {
- /* Must be at struct offset 0 */
- struct job_sha1 job;
- /* status flag */
- int status;
- /* error flag */
- int error;
-
- uint64_t total_length;
- const void *incoming_buffer;
- uint32_t incoming_buffer_length;
- uint8_t partial_block_buffer[SHA1_BLOCK_SIZE * 2];
- uint32_t partial_block_buffer_length;
- void *user_data;
-};
-
-#endif
diff --git a/arch/x86/crypto/sha1-mb/sha1_mb_mgr.h b/arch/x86/crypto/sha1-mb/sha1_mb_mgr.h
deleted file mode 100644
index 08ad1a9..00000000
--- a/arch/x86/crypto/sha1-mb/sha1_mb_mgr.h
+++ /dev/null
@@ -1,110 +0,0 @@
-/*
- * Header file for multi buffer SHA1 algorithm manager
- *
- * This file is provided under a dual BSD/GPLv2 license. When using or
- * redistributing this file, you may do so under either license.
- *
- * GPL LICENSE SUMMARY
- *
- * Copyright(c) 2014 Intel Corporation.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of version 2 of the GNU General Public License as
- * published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful, but
- * WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
- * General Public License for more details.
- *
- * Contact Information:
- * James Guilford <james.guilford(a)intel.com>
- * Tim Chen <tim.c.chen(a)linux.intel.com>
- *
- * BSD LICENSE
- *
- * Copyright(c) 2014 Intel Corporation.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions
- * are met:
- *
- * * Redistributions of source code must retain the above copyright
- * notice, this list of conditions and the following disclaimer.
- * * Redistributions in binary form must reproduce the above copyright
- * notice, this list of conditions and the following disclaimer in
- * the documentation and/or other materials provided with the
- * distribution.
- * * Neither the name of Intel Corporation nor the names of its
- * contributors may be used to endorse or promote products derived
- * from this software without specific prior written permission.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
- * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
- * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
- * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
- * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
- * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
- * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
- * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
- * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
- * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
- * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
- */
-#ifndef __SHA_MB_MGR_H
-#define __SHA_MB_MGR_H
-
-
-#include <linux/types.h>
-
-#define NUM_SHA1_DIGEST_WORDS 5
-
-enum job_sts { STS_UNKNOWN = 0,
- STS_BEING_PROCESSED = 1,
- STS_COMPLETED = 2,
- STS_INTERNAL_ERROR = 3,
- STS_ERROR = 4
-};
-
-struct job_sha1 {
- u8 *buffer;
- u32 len;
- u32 result_digest[NUM_SHA1_DIGEST_WORDS] __aligned(32);
- enum job_sts status;
- void *user_data;
-};
-
-/* SHA1 out-of-order scheduler */
-
-/* typedef uint32_t sha1_digest_array[5][8]; */
-
-struct sha1_args_x8 {
- uint32_t digest[5][8];
- uint8_t *data_ptr[8];
-};
-
-struct sha1_lane_data {
- struct job_sha1 *job_in_lane;
-};
-
-struct sha1_mb_mgr {
- struct sha1_args_x8 args;
-
- uint32_t lens[8];
-
- /* each byte is index (0...7) of unused lanes */
- uint64_t unused_lanes;
- /* byte 4 is set to FF as a flag */
- struct sha1_lane_data ldata[8];
-};
-
-
-#define SHA1_MB_MGR_NUM_LANES_AVX2 8
-
-void sha1_mb_mgr_init_avx2(struct sha1_mb_mgr *state);
-struct job_sha1 *sha1_mb_mgr_submit_avx2(struct sha1_mb_mgr *state,
- struct job_sha1 *job);
-struct job_sha1 *sha1_mb_mgr_flush_avx2(struct sha1_mb_mgr *state);
-struct job_sha1 *sha1_mb_mgr_get_comp_job_avx2(struct sha1_mb_mgr *state);
-
-#endif
diff --git a/arch/x86/crypto/sha1-mb/sha1_mb_mgr_datastruct.S b/arch/x86/crypto/sha1-mb/sha1_mb_mgr_datastruct.S
deleted file mode 100644
index 86688c6..00000000
--- a/arch/x86/crypto/sha1-mb/sha1_mb_mgr_datastruct.S
+++ /dev/null
@@ -1,287 +0,0 @@
-/*
- * Header file for multi buffer SHA1 algorithm data structure
- *
- * This file is provided under a dual BSD/GPLv2 license. When using or
- * redistributing this file, you may do so under either license.
- *
- * GPL LICENSE SUMMARY
- *
- * Copyright(c) 2014 Intel Corporation.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of version 2 of the GNU General Public License as
- * published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful, but
- * WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
- * General Public License for more details.
- *
- * Contact Information:
- * James Guilford <james.guilford(a)intel.com>
- * Tim Chen <tim.c.chen(a)linux.intel.com>
- *
- * BSD LICENSE
- *
- * Copyright(c) 2014 Intel Corporation.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions
- * are met:
- *
- * * Redistributions of source code must retain the above copyright
- * notice, this list of conditions and the following disclaimer.
- * * Redistributions in binary form must reproduce the above copyright
- * notice, this list of conditions and the following disclaimer in
- * the documentation and/or other materials provided with the
- * distribution.
- * * Neither the name of Intel Corporation nor the names of its
- * contributors may be used to endorse or promote products derived
- * from this software without specific prior written permission.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
- * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
- * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
- * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
- * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
- * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
- * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
- * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
- * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
- * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
- * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
- */
-
-# Macros for defining data structures
-
-# Usage example
-
-#START_FIELDS # JOB_AES
-### name size align
-#FIELD _plaintext, 8, 8 # pointer to plaintext
-#FIELD _ciphertext, 8, 8 # pointer to ciphertext
-#FIELD _IV, 16, 8 # IV
-#FIELD _keys, 8, 8 # pointer to keys
-#FIELD _len, 4, 4 # length in bytes
-#FIELD _status, 4, 4 # status enumeration
-#FIELD _user_data, 8, 8 # pointer to user data
-#UNION _union, size1, align1, \
-# size2, align2, \
-# size3, align3, \
-# ...
-#END_FIELDS
-#%assign _JOB_AES_size _FIELD_OFFSET
-#%assign _JOB_AES_align _STRUCT_ALIGN
-
-#########################################################################
-
-# Alternate "struc-like" syntax:
-# STRUCT job_aes2
-# RES_Q .plaintext, 1
-# RES_Q .ciphertext, 1
-# RES_DQ .IV, 1
-# RES_B .nested, _JOB_AES_SIZE, _JOB_AES_ALIGN
-# RES_U .union, size1, align1, \
-# size2, align2, \
-# ...
-# ENDSTRUCT
-# # Following only needed if nesting
-# %assign job_aes2_size _FIELD_OFFSET
-# %assign job_aes2_align _STRUCT_ALIGN
-#
-# RES_* macros take a name, a count and an optional alignment.
-# The count in in terms of the base size of the macro, and the
-# default alignment is the base size.
-# The macros are:
-# Macro Base size
-# RES_B 1
-# RES_W 2
-# RES_D 4
-# RES_Q 8
-# RES_DQ 16
-# RES_Y 32
-# RES_Z 64
-#
-# RES_U defines a union. It's arguments are a name and two or more
-# pairs of "size, alignment"
-#
-# The two assigns are only needed if this structure is being nested
-# within another. Even if the assigns are not done, one can still use
-# STRUCT_NAME_size as the size of the structure.
-#
-# Note that for nesting, you still need to assign to STRUCT_NAME_size.
-#
-# The differences between this and using "struc" directly are that each
-# type is implicitly aligned to its natural length (although this can be
-# over-ridden with an explicit third parameter), and that the structure
-# is padded at the end to its overall alignment.
-#
-
-#########################################################################
-
-#ifndef _SHA1_MB_MGR_DATASTRUCT_ASM_
-#define _SHA1_MB_MGR_DATASTRUCT_ASM_
-
-## START_FIELDS
-.macro START_FIELDS
- _FIELD_OFFSET = 0
- _STRUCT_ALIGN = 0
-.endm
-
-## FIELD name size align
-.macro FIELD name size align
- _FIELD_OFFSET = (_FIELD_OFFSET + (\align) - 1) & (~ ((\align)-1))
- \name = _FIELD_OFFSET
- _FIELD_OFFSET = _FIELD_OFFSET + (\size)
-.if (\align > _STRUCT_ALIGN)
- _STRUCT_ALIGN = \align
-.endif
-.endm
-
-## END_FIELDS
-.macro END_FIELDS
- _FIELD_OFFSET = (_FIELD_OFFSET + _STRUCT_ALIGN-1) & (~ (_STRUCT_ALIGN-1))
-.endm
-
-########################################################################
-
-.macro STRUCT p1
-START_FIELDS
-.struc \p1
-.endm
-
-.macro ENDSTRUCT
- tmp = _FIELD_OFFSET
- END_FIELDS
- tmp = (_FIELD_OFFSET - %%tmp)
-.if (tmp > 0)
- .lcomm tmp
-.endif
-.endstruc
-.endm
-
-## RES_int name size align
-.macro RES_int p1 p2 p3
- name = \p1
- size = \p2
- align = .\p3
-
- _FIELD_OFFSET = (_FIELD_OFFSET + (align) - 1) & (~ ((align)-1))
-.align align
-.lcomm name size
- _FIELD_OFFSET = _FIELD_OFFSET + (size)
-.if (align > _STRUCT_ALIGN)
- _STRUCT_ALIGN = align
-.endif
-.endm
-
-
-
-# macro RES_B name, size [, align]
-.macro RES_B _name, _size, _align=1
-RES_int _name _size _align
-.endm
-
-# macro RES_W name, size [, align]
-.macro RES_W _name, _size, _align=2
-RES_int _name 2*(_size) _align
-.endm
-
-# macro RES_D name, size [, align]
-.macro RES_D _name, _size, _align=4
-RES_int _name 4*(_size) _align
-.endm
-
-# macro RES_Q name, size [, align]
-.macro RES_Q _name, _size, _align=8
-RES_int _name 8*(_size) _align
-.endm
-
-# macro RES_DQ name, size [, align]
-.macro RES_DQ _name, _size, _align=16
-RES_int _name 16*(_size) _align
-.endm
-
-# macro RES_Y name, size [, align]
-.macro RES_Y _name, _size, _align=32
-RES_int _name 32*(_size) _align
-.endm
-
-# macro RES_Z name, size [, align]
-.macro RES_Z _name, _size, _align=64
-RES_int _name 64*(_size) _align
-.endm
-
-
-#endif
-
-########################################################################
-#### Define constants
-########################################################################
-
-########################################################################
-#### Define SHA1 Out Of Order Data Structures
-########################################################################
-
-START_FIELDS # LANE_DATA
-### name size align
-FIELD _job_in_lane, 8, 8 # pointer to job object
-END_FIELDS
-
-_LANE_DATA_size = _FIELD_OFFSET
-_LANE_DATA_align = _STRUCT_ALIGN
-
-########################################################################
-
-START_FIELDS # SHA1_ARGS_X8
-### name size align
-FIELD _digest, 4*5*8, 16 # transposed digest
-FIELD _data_ptr, 8*8, 8 # array of pointers to data
-END_FIELDS
-
-_SHA1_ARGS_X4_size = _FIELD_OFFSET
-_SHA1_ARGS_X4_align = _STRUCT_ALIGN
-_SHA1_ARGS_X8_size = _FIELD_OFFSET
-_SHA1_ARGS_X8_align = _STRUCT_ALIGN
-
-########################################################################
-
-START_FIELDS # MB_MGR
-### name size align
-FIELD _args, _SHA1_ARGS_X4_size, _SHA1_ARGS_X4_align
-FIELD _lens, 4*8, 8
-FIELD _unused_lanes, 8, 8
-FIELD _ldata, _LANE_DATA_size*8, _LANE_DATA_align
-END_FIELDS
-
-_MB_MGR_size = _FIELD_OFFSET
-_MB_MGR_align = _STRUCT_ALIGN
-
-_args_digest = _args + _digest
-_args_data_ptr = _args + _data_ptr
-
-
-########################################################################
-#### Define constants
-########################################################################
-
-#define STS_UNKNOWN 0
-#define STS_BEING_PROCESSED 1
-#define STS_COMPLETED 2
-
-########################################################################
-#### Define JOB_SHA1 structure
-########################################################################
-
-START_FIELDS # JOB_SHA1
-
-### name size align
-FIELD _buffer, 8, 8 # pointer to buffer
-FIELD _len, 4, 4 # length in bytes
-FIELD _result_digest, 5*4, 32 # Digest (output)
-FIELD _status, 4, 4
-FIELD _user_data, 8, 8
-END_FIELDS
-
-_JOB_SHA1_size = _FIELD_OFFSET
-_JOB_SHA1_align = _STRUCT_ALIGN
diff --git a/arch/x86/crypto/sha1-mb/sha1_mb_mgr_flush_avx2.S b/arch/x86/crypto/sha1-mb/sha1_mb_mgr_flush_avx2.S
deleted file mode 100644
index 7cfba73..00000000
--- a/arch/x86/crypto/sha1-mb/sha1_mb_mgr_flush_avx2.S
+++ /dev/null
@@ -1,304 +0,0 @@
-/*
- * Flush routine for SHA1 multibuffer
- *
- * This file is provided under a dual BSD/GPLv2 license. When using or
- * redistributing this file, you may do so under either license.
- *
- * GPL LICENSE SUMMARY
- *
- * Copyright(c) 2014 Intel Corporation.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of version 2 of the GNU General Public License as
- * published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful, but
- * WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
- * General Public License for more details.
- *
- * Contact Information:
- * James Guilford <james.guilford(a)intel.com>
- * Tim Chen <tim.c.chen(a)linux.intel.com>
- *
- * BSD LICENSE
- *
- * Copyright(c) 2014 Intel Corporation.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions
- * are met:
- *
- * * Redistributions of source code must retain the above copyright
- * notice, this list of conditions and the following disclaimer.
- * * Redistributions in binary form must reproduce the above copyright
- * notice, this list of conditions and the following disclaimer in
- * the documentation and/or other materials provided with the
- * distribution.
- * * Neither the name of Intel Corporation nor the names of its
- * contributors may be used to endorse or promote products derived
- * from this software without specific prior written permission.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
- * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
- * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
- * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
- * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
- * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
- * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
- * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
- * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
- * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
- * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
- */
-#include <linux/linkage.h>
-#include <asm/frame.h>
-#include "sha1_mb_mgr_datastruct.S"
-
-
-.extern sha1_x8_avx2
-
-# LINUX register definitions
-#define arg1 %rdi
-#define arg2 %rsi
-
-# Common definitions
-#define state arg1
-#define job arg2
-#define len2 arg2
-
-# idx must be a register not clobbered by sha1_x8_avx2
-#define idx %r8
-#define DWORD_idx %r8d
-
-#define unused_lanes %rbx
-#define lane_data %rbx
-#define tmp2 %rbx
-#define tmp2_w %ebx
-
-#define job_rax %rax
-#define tmp1 %rax
-#define size_offset %rax
-#define tmp %rax
-#define start_offset %rax
-
-#define tmp3 %arg1
-
-#define extra_blocks %arg2
-#define p %arg2
-
-.macro LABEL prefix n
-\prefix\n\():
-.endm
-
-.macro JNE_SKIP i
-jne skip_\i
-.endm
-
-.altmacro
-.macro SET_OFFSET _offset
-offset = \_offset
-.endm
-.noaltmacro
-
-# JOB* sha1_mb_mgr_flush_avx2(MB_MGR *state)
-# arg 1 : rcx : state
-ENTRY(sha1_mb_mgr_flush_avx2)
- FRAME_BEGIN
- push %rbx
-
- # If bit (32+3) is set, then all lanes are empty
- mov _unused_lanes(state), unused_lanes
- bt $32+3, unused_lanes
- jc return_null
-
- # find a lane with a non-null job
- xor idx, idx
- offset = (_ldata + 1 * _LANE_DATA_size + _job_in_lane)
- cmpq $0, offset(state)
- cmovne one(%rip), idx
- offset = (_ldata + 2 * _LANE_DATA_size + _job_in_lane)
- cmpq $0, offset(state)
- cmovne two(%rip), idx
- offset = (_ldata + 3 * _LANE_DATA_size + _job_in_lane)
- cmpq $0, offset(state)
- cmovne three(%rip), idx
- offset = (_ldata + 4 * _LANE_DATA_size + _job_in_lane)
- cmpq $0, offset(state)
- cmovne four(%rip), idx
- offset = (_ldata + 5 * _LANE_DATA_size + _job_in_lane)
- cmpq $0, offset(state)
- cmovne five(%rip), idx
- offset = (_ldata + 6 * _LANE_DATA_size + _job_in_lane)
- cmpq $0, offset(state)
- cmovne six(%rip), idx
- offset = (_ldata + 7 * _LANE_DATA_size + _job_in_lane)
- cmpq $0, offset(state)
- cmovne seven(%rip), idx
-
- # copy idx to empty lanes
-copy_lane_data:
- offset = (_args + _data_ptr)
- mov offset(state,idx,8), tmp
-
- I = 0
-.rep 8
- offset = (_ldata + I * _LANE_DATA_size + _job_in_lane)
- cmpq $0, offset(state)
-.altmacro
- JNE_SKIP %I
- offset = (_args + _data_ptr + 8*I)
- mov tmp, offset(state)
- offset = (_lens + 4*I)
- movl $0xFFFFFFFF, offset(state)
-LABEL skip_ %I
- I = (I+1)
-.noaltmacro
-.endr
-
- # Find min length
- vmovdqu _lens+0*16(state), %xmm0
- vmovdqu _lens+1*16(state), %xmm1
-
- vpminud %xmm1, %xmm0, %xmm2 # xmm2 has {D,C,B,A}
- vpalignr $8, %xmm2, %xmm3, %xmm3 # xmm3 has {x,x,D,C}
- vpminud %xmm3, %xmm2, %xmm2 # xmm2 has {x,x,E,F}
- vpalignr $4, %xmm2, %xmm3, %xmm3 # xmm3 has {x,x,x,E}
- vpminud %xmm3, %xmm2, %xmm2 # xmm2 has min value in low dword
-
- vmovd %xmm2, DWORD_idx
- mov idx, len2
- and $0xF, idx
- shr $4, len2
- jz len_is_0
-
- vpand clear_low_nibble(%rip), %xmm2, %xmm2
- vpshufd $0, %xmm2, %xmm2
-
- vpsubd %xmm2, %xmm0, %xmm0
- vpsubd %xmm2, %xmm1, %xmm1
-
- vmovdqu %xmm0, _lens+0*16(state)
- vmovdqu %xmm1, _lens+1*16(state)
-
- # "state" and "args" are the same address, arg1
- # len is arg2
- call sha1_x8_avx2
- # state and idx are intact
-
-
-len_is_0:
- # process completed job "idx"
- imul $_LANE_DATA_size, idx, lane_data
- lea _ldata(state, lane_data), lane_data
-
- mov _job_in_lane(lane_data), job_rax
- movq $0, _job_in_lane(lane_data)
- movl $STS_COMPLETED, _status(job_rax)
- mov _unused_lanes(state), unused_lanes
- shl $4, unused_lanes
- or idx, unused_lanes
- mov unused_lanes, _unused_lanes(state)
-
- movl $0xFFFFFFFF, _lens(state, idx, 4)
-
- vmovd _args_digest(state , idx, 4) , %xmm0
- vpinsrd $1, _args_digest+1*32(state, idx, 4), %xmm0, %xmm0
- vpinsrd $2, _args_digest+2*32(state, idx, 4), %xmm0, %xmm0
- vpinsrd $3, _args_digest+3*32(state, idx, 4), %xmm0, %xmm0
- movl _args_digest+4*32(state, idx, 4), tmp2_w
-
- vmovdqu %xmm0, _result_digest(job_rax)
- offset = (_result_digest + 1*16)
- mov tmp2_w, offset(job_rax)
-
-return:
- pop %rbx
- FRAME_END
- ret
-
-return_null:
- xor job_rax, job_rax
- jmp return
-ENDPROC(sha1_mb_mgr_flush_avx2)
-
-
-#################################################################
-
-.align 16
-ENTRY(sha1_mb_mgr_get_comp_job_avx2)
- push %rbx
-
- ## if bit 32+3 is set, then all lanes are empty
- mov _unused_lanes(state), unused_lanes
- bt $(32+3), unused_lanes
- jc .return_null
-
- # Find min length
- vmovdqu _lens(state), %xmm0
- vmovdqu _lens+1*16(state), %xmm1
-
- vpminud %xmm1, %xmm0, %xmm2 # xmm2 has {D,C,B,A}
- vpalignr $8, %xmm2, %xmm3, %xmm3 # xmm3 has {x,x,D,C}
- vpminud %xmm3, %xmm2, %xmm2 # xmm2 has {x,x,E,F}
- vpalignr $4, %xmm2, %xmm3, %xmm3 # xmm3 has {x,x,x,E}
- vpminud %xmm3, %xmm2, %xmm2 # xmm2 has min value in low dword
-
- vmovd %xmm2, DWORD_idx
- test $~0xF, idx
- jnz .return_null
-
- # process completed job "idx"
- imul $_LANE_DATA_size, idx, lane_data
- lea _ldata(state, lane_data), lane_data
-
- mov _job_in_lane(lane_data), job_rax
- movq $0, _job_in_lane(lane_data)
- movl $STS_COMPLETED, _status(job_rax)
- mov _unused_lanes(state), unused_lanes
- shl $4, unused_lanes
- or idx, unused_lanes
- mov unused_lanes, _unused_lanes(state)
-
- movl $0xFFFFFFFF, _lens(state, idx, 4)
-
- vmovd _args_digest(state, idx, 4), %xmm0
- vpinsrd $1, _args_digest+1*32(state, idx, 4), %xmm0, %xmm0
- vpinsrd $2, _args_digest+2*32(state, idx, 4), %xmm0, %xmm0
- vpinsrd $3, _args_digest+3*32(state, idx, 4), %xmm0, %xmm0
- movl _args_digest+4*32(state, idx, 4), tmp2_w
-
- vmovdqu %xmm0, _result_digest(job_rax)
- movl tmp2_w, _result_digest+1*16(job_rax)
-
- pop %rbx
-
- ret
-
-.return_null:
- xor job_rax, job_rax
- pop %rbx
- ret
-ENDPROC(sha1_mb_mgr_get_comp_job_avx2)
-
-.section .rodata.cst16.clear_low_nibble, "aM", @progbits, 16
-.align 16
-clear_low_nibble:
-.octa 0x000000000000000000000000FFFFFFF0
-
-.section .rodata.cst8, "aM", @progbits, 8
-.align 8
-one:
-.quad 1
-two:
-.quad 2
-three:
-.quad 3
-four:
-.quad 4
-five:
-.quad 5
-six:
-.quad 6
-seven:
-.quad 7
diff --git a/arch/x86/crypto/sha1-mb/sha1_mb_mgr_init_avx2.c b/arch/x86/crypto/sha1-mb/sha1_mb_mgr_init_avx2.c
deleted file mode 100644
index d2add0d..00000000
--- a/arch/x86/crypto/sha1-mb/sha1_mb_mgr_init_avx2.c
+++ /dev/null
@@ -1,64 +0,0 @@
-/*
- * Initialization code for multi buffer SHA1 algorithm for AVX2
- *
- * This file is provided under a dual BSD/GPLv2 license. When using or
- * redistributing this file, you may do so under either license.
- *
- * GPL LICENSE SUMMARY
- *
- * Copyright(c) 2014 Intel Corporation.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of version 2 of the GNU General Public License as
- * published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful, but
- * WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
- * General Public License for more details.
- *
- * Contact Information:
- * Tim Chen <tim.c.chen(a)linux.intel.com>
- *
- * BSD LICENSE
- *
- * Copyright(c) 2014 Intel Corporation.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions
- * are met:
- *
- * * Redistributions of source code must retain the above copyright
- * notice, this list of conditions and the following disclaimer.
- * * Redistributions in binary form must reproduce the above copyright
- * notice, this list of conditions and the following disclaimer in
- * the documentation and/or other materials provided with the
- * distribution.
- * * Neither the name of Intel Corporation nor the names of its
- * contributors may be used to endorse or promote products derived
- * from this software without specific prior written permission.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
- * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
- * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
- * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
- * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
- * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
- * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
- * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
- * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
- * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
- * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
- */
-
-#include "sha1_mb_mgr.h"
-
-void sha1_mb_mgr_init_avx2(struct sha1_mb_mgr *state)
-{
- unsigned int j;
- state->unused_lanes = 0xF76543210ULL;
- for (j = 0; j < 8; j++) {
- state->lens[j] = 0xFFFFFFFF;
- state->ldata[j].job_in_lane = NULL;
- }
-}
diff --git a/arch/x86/crypto/sha1-mb/sha1_mb_mgr_submit_avx2.S b/arch/x86/crypto/sha1-mb/sha1_mb_mgr_submit_avx2.S
deleted file mode 100644
index 7a93b1c..00000000
--- a/arch/x86/crypto/sha1-mb/sha1_mb_mgr_submit_avx2.S
+++ /dev/null
@@ -1,209 +0,0 @@
-/*
- * Buffer submit code for multi buffer SHA1 algorithm
- *
- * This file is provided under a dual BSD/GPLv2 license. When using or
- * redistributing this file, you may do so under either license.
- *
- * GPL LICENSE SUMMARY
- *
- * Copyright(c) 2014 Intel Corporation.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of version 2 of the GNU General Public License as
- * published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful, but
- * WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
- * General Public License for more details.
- *
- * Contact Information:
- * James Guilford <james.guilford(a)intel.com>
- * Tim Chen <tim.c.chen(a)linux.intel.com>
- *
- * BSD LICENSE
- *
- * Copyright(c) 2014 Intel Corporation.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions
- * are met:
- *
- * * Redistributions of source code must retain the above copyright
- * notice, this list of conditions and the following disclaimer.
- * * Redistributions in binary form must reproduce the above copyright
- * notice, this list of conditions and the following disclaimer in
- * the documentation and/or other materials provided with the
- * distribution.
- * * Neither the name of Intel Corporation nor the names of its
- * contributors may be used to endorse or promote products derived
- * from this software without specific prior written permission.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
- * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
- * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
- * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
- * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
- * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
- * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
- * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
- * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
- * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
- * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
- */
-
-#include <linux/linkage.h>
-#include <asm/frame.h>
-#include "sha1_mb_mgr_datastruct.S"
-
-
-.extern sha1_x8_avx
-
-# LINUX register definitions
-arg1 = %rdi
-arg2 = %rsi
-size_offset = %rcx
-tmp2 = %rcx
-extra_blocks = %rdx
-
-# Common definitions
-#define state arg1
-#define job %rsi
-#define len2 arg2
-#define p2 arg2
-
-# idx must be a register not clobberred by sha1_x8_avx2
-idx = %r8
-DWORD_idx = %r8d
-last_len = %r8
-
-p = %r11
-start_offset = %r11
-
-unused_lanes = %rbx
-BYTE_unused_lanes = %bl
-
-job_rax = %rax
-len = %rax
-DWORD_len = %eax
-
-lane = %r12
-tmp3 = %r12
-
-tmp = %r9
-DWORD_tmp = %r9d
-
-lane_data = %r10
-
-# JOB* submit_mb_mgr_submit_avx2(MB_MGR *state, job_sha1 *job)
-# arg 1 : rcx : state
-# arg 2 : rdx : job
-ENTRY(sha1_mb_mgr_submit_avx2)
- FRAME_BEGIN
- push %rbx
- push %r12
-
- mov _unused_lanes(state), unused_lanes
- mov unused_lanes, lane
- and $0xF, lane
- shr $4, unused_lanes
- imul $_LANE_DATA_size, lane, lane_data
- movl $STS_BEING_PROCESSED, _status(job)
- lea _ldata(state, lane_data), lane_data
- mov unused_lanes, _unused_lanes(state)
- movl _len(job), DWORD_len
-
- mov job, _job_in_lane(lane_data)
- shl $4, len
- or lane, len
-
- movl DWORD_len, _lens(state , lane, 4)
-
- # Load digest words from result_digest
- vmovdqu _result_digest(job), %xmm0
- mov _result_digest+1*16(job), DWORD_tmp
- vmovd %xmm0, _args_digest(state, lane, 4)
- vpextrd $1, %xmm0, _args_digest+1*32(state , lane, 4)
- vpextrd $2, %xmm0, _args_digest+2*32(state , lane, 4)
- vpextrd $3, %xmm0, _args_digest+3*32(state , lane, 4)
- movl DWORD_tmp, _args_digest+4*32(state , lane, 4)
-
- mov _buffer(job), p
- mov p, _args_data_ptr(state, lane, 8)
-
- cmp $0xF, unused_lanes
- jne return_null
-
-start_loop:
- # Find min length
- vmovdqa _lens(state), %xmm0
- vmovdqa _lens+1*16(state), %xmm1
-
- vpminud %xmm1, %xmm0, %xmm2 # xmm2 has {D,C,B,A}
- vpalignr $8, %xmm2, %xmm3, %xmm3 # xmm3 has {x,x,D,C}
- vpminud %xmm3, %xmm2, %xmm2 # xmm2 has {x,x,E,F}
- vpalignr $4, %xmm2, %xmm3, %xmm3 # xmm3 has {x,x,x,E}
- vpminud %xmm3, %xmm2, %xmm2 # xmm2 has min value in low dword
-
- vmovd %xmm2, DWORD_idx
- mov idx, len2
- and $0xF, idx
- shr $4, len2
- jz len_is_0
-
- vpand clear_low_nibble(%rip), %xmm2, %xmm2
- vpshufd $0, %xmm2, %xmm2
-
- vpsubd %xmm2, %xmm0, %xmm0
- vpsubd %xmm2, %xmm1, %xmm1
-
- vmovdqa %xmm0, _lens + 0*16(state)
- vmovdqa %xmm1, _lens + 1*16(state)
-
-
- # "state" and "args" are the same address, arg1
- # len is arg2
- call sha1_x8_avx2
-
- # state and idx are intact
-
-len_is_0:
- # process completed job "idx"
- imul $_LANE_DATA_size, idx, lane_data
- lea _ldata(state, lane_data), lane_data
-
- mov _job_in_lane(lane_data), job_rax
- mov _unused_lanes(state), unused_lanes
- movq $0, _job_in_lane(lane_data)
- movl $STS_COMPLETED, _status(job_rax)
- shl $4, unused_lanes
- or idx, unused_lanes
- mov unused_lanes, _unused_lanes(state)
-
- movl $0xFFFFFFFF, _lens(state, idx, 4)
-
- vmovd _args_digest(state, idx, 4), %xmm0
- vpinsrd $1, _args_digest+1*32(state , idx, 4), %xmm0, %xmm0
- vpinsrd $2, _args_digest+2*32(state , idx, 4), %xmm0, %xmm0
- vpinsrd $3, _args_digest+3*32(state , idx, 4), %xmm0, %xmm0
- movl _args_digest+4*32(state, idx, 4), DWORD_tmp
-
- vmovdqu %xmm0, _result_digest(job_rax)
- movl DWORD_tmp, _result_digest+1*16(job_rax)
-
-return:
- pop %r12
- pop %rbx
- FRAME_END
- ret
-
-return_null:
- xor job_rax, job_rax
- jmp return
-
-ENDPROC(sha1_mb_mgr_submit_avx2)
-
-.section .rodata.cst16.clear_low_nibble, "aM", @progbits, 16
-.align 16
-clear_low_nibble:
- .octa 0x000000000000000000000000FFFFFFF0
diff --git a/arch/x86/crypto/sha1-mb/sha1_x8_avx2.S b/arch/x86/crypto/sha1-mb/sha1_x8_avx2.S
deleted file mode 100644
index 20f77aa..00000000
--- a/arch/x86/crypto/sha1-mb/sha1_x8_avx2.S
+++ /dev/null
@@ -1,492 +0,0 @@
-/*
- * Multi-buffer SHA1 algorithm hash compute routine
- *
- * This file is provided under a dual BSD/GPLv2 license. When using or
- * redistributing this file, you may do so under either license.
- *
- * GPL LICENSE SUMMARY
- *
- * Copyright(c) 2014 Intel Corporation.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of version 2 of the GNU General Public License as
- * published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful, but
- * WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
- * General Public License for more details.
- *
- * Contact Information:
- * James Guilford <james.guilford(a)intel.com>
- * Tim Chen <tim.c.chen(a)linux.intel.com>
- *
- * BSD LICENSE
- *
- * Copyright(c) 2014 Intel Corporation.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions
- * are met:
- *
- * * Redistributions of source code must retain the above copyright
- * notice, this list of conditions and the following disclaimer.
- * * Redistributions in binary form must reproduce the above copyright
- * notice, this list of conditions and the following disclaimer in
- * the documentation and/or other materials provided with the
- * distribution.
- * * Neither the name of Intel Corporation nor the names of its
- * contributors may be used to endorse or promote products derived
- * from this software without specific prior written permission.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
- * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
- * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
- * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
- * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
- * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
- * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
- * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
- * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
- * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
- * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
- */
-
-#include <linux/linkage.h>
-#include "sha1_mb_mgr_datastruct.S"
-
-## code to compute oct SHA1 using SSE-256
-## outer calling routine takes care of save and restore of XMM registers
-
-## Function clobbers: rax, rcx, rdx, rbx, rsi, rdi, r9-r15# ymm0-15
-##
-## Linux clobbers: rax rbx rcx rdx rsi r9 r10 r11 r12 r13 r14 r15
-## Linux preserves: rdi rbp r8
-##
-## clobbers ymm0-15
-
-
-# TRANSPOSE8 r0, r1, r2, r3, r4, r5, r6, r7, t0, t1
-# "transpose" data in {r0...r7} using temps {t0...t1}
-# Input looks like: {r0 r1 r2 r3 r4 r5 r6 r7}
-# r0 = {a7 a6 a5 a4 a3 a2 a1 a0}
-# r1 = {b7 b6 b5 b4 b3 b2 b1 b0}
-# r2 = {c7 c6 c5 c4 c3 c2 c1 c0}
-# r3 = {d7 d6 d5 d4 d3 d2 d1 d0}
-# r4 = {e7 e6 e5 e4 e3 e2 e1 e0}
-# r5 = {f7 f6 f5 f4 f3 f2 f1 f0}
-# r6 = {g7 g6 g5 g4 g3 g2 g1 g0}
-# r7 = {h7 h6 h5 h4 h3 h2 h1 h0}
-#
-# Output looks like: {r0 r1 r2 r3 r4 r5 r6 r7}
-# r0 = {h0 g0 f0 e0 d0 c0 b0 a0}
-# r1 = {h1 g1 f1 e1 d1 c1 b1 a1}
-# r2 = {h2 g2 f2 e2 d2 c2 b2 a2}
-# r3 = {h3 g3 f3 e3 d3 c3 b3 a3}
-# r4 = {h4 g4 f4 e4 d4 c4 b4 a4}
-# r5 = {h5 g5 f5 e5 d5 c5 b5 a5}
-# r6 = {h6 g6 f6 e6 d6 c6 b6 a6}
-# r7 = {h7 g7 f7 e7 d7 c7 b7 a7}
-#
-
-.macro TRANSPOSE8 r0 r1 r2 r3 r4 r5 r6 r7 t0 t1
- # process top half (r0..r3) {a...d}
- vshufps $0x44, \r1, \r0, \t0 # t0 = {b5 b4 a5 a4 b1 b0 a1 a0}
- vshufps $0xEE, \r1, \r0, \r0 # r0 = {b7 b6 a7 a6 b3 b2 a3 a2}
- vshufps $0x44, \r3, \r2, \t1 # t1 = {d5 d4 c5 c4 d1 d0 c1 c0}
- vshufps $0xEE, \r3, \r2, \r2 # r2 = {d7 d6 c7 c6 d3 d2 c3 c2}
- vshufps $0xDD, \t1, \t0, \r3 # r3 = {d5 c5 b5 a5 d1 c1 b1 a1}
- vshufps $0x88, \r2, \r0, \r1 # r1 = {d6 c6 b6 a6 d2 c2 b2 a2}
- vshufps $0xDD, \r2, \r0, \r0 # r0 = {d7 c7 b7 a7 d3 c3 b3 a3}
- vshufps $0x88, \t1, \t0, \t0 # t0 = {d4 c4 b4 a4 d0 c0 b0 a0}
-
- # use r2 in place of t0
- # process bottom half (r4..r7) {e...h}
- vshufps $0x44, \r5, \r4, \r2 # r2 = {f5 f4 e5 e4 f1 f0 e1 e0}
- vshufps $0xEE, \r5, \r4, \r4 # r4 = {f7 f6 e7 e6 f3 f2 e3 e2}
- vshufps $0x44, \r7, \r6, \t1 # t1 = {h5 h4 g5 g4 h1 h0 g1 g0}
- vshufps $0xEE, \r7, \r6, \r6 # r6 = {h7 h6 g7 g6 h3 h2 g3 g2}
- vshufps $0xDD, \t1, \r2, \r7 # r7 = {h5 g5 f5 e5 h1 g1 f1 e1}
- vshufps $0x88, \r6, \r4, \r5 # r5 = {h6 g6 f6 e6 h2 g2 f2 e2}
- vshufps $0xDD, \r6, \r4, \r4 # r4 = {h7 g7 f7 e7 h3 g3 f3 e3}
- vshufps $0x88, \t1, \r2, \t1 # t1 = {h4 g4 f4 e4 h0 g0 f0 e0}
-
- vperm2f128 $0x13, \r1, \r5, \r6 # h6...a6
- vperm2f128 $0x02, \r1, \r5, \r2 # h2...a2
- vperm2f128 $0x13, \r3, \r7, \r5 # h5...a5
- vperm2f128 $0x02, \r3, \r7, \r1 # h1...a1
- vperm2f128 $0x13, \r0, \r4, \r7 # h7...a7
- vperm2f128 $0x02, \r0, \r4, \r3 # h3...a3
- vperm2f128 $0x13, \t0, \t1, \r4 # h4...a4
- vperm2f128 $0x02, \t0, \t1, \r0 # h0...a0
-
-.endm
-##
-## Magic functions defined in FIPS 180-1
-##
-# macro MAGIC_F0 F,B,C,D,T ## F = (D ^ (B & (C ^ D)))
-.macro MAGIC_F0 regF regB regC regD regT
- vpxor \regD, \regC, \regF
- vpand \regB, \regF, \regF
- vpxor \regD, \regF, \regF
-.endm
-
-# macro MAGIC_F1 F,B,C,D,T ## F = (B ^ C ^ D)
-.macro MAGIC_F1 regF regB regC regD regT
- vpxor \regC, \regD, \regF
- vpxor \regB, \regF, \regF
-.endm
-
-# macro MAGIC_F2 F,B,C,D,T ## F = ((B & C) | (B & D) | (C & D))
-.macro MAGIC_F2 regF regB regC regD regT
- vpor \regC, \regB, \regF
- vpand \regC, \regB, \regT
- vpand \regD, \regF, \regF
- vpor \regT, \regF, \regF
-.endm
-
-# macro MAGIC_F3 F,B,C,D,T ## F = (B ^ C ^ D)
-.macro MAGIC_F3 regF regB regC regD regT
- MAGIC_F1 \regF,\regB,\regC,\regD,\regT
-.endm
-
-# PROLD reg, imm, tmp
-.macro PROLD reg imm tmp
- vpsrld $(32-\imm), \reg, \tmp
- vpslld $\imm, \reg, \reg
- vpor \tmp, \reg, \reg
-.endm
-
-.macro PROLD_nd reg imm tmp src
- vpsrld $(32-\imm), \src, \tmp
- vpslld $\imm, \src, \reg
- vpor \tmp, \reg, \reg
-.endm
-
-.macro SHA1_STEP_00_15 regA regB regC regD regE regT regF memW immCNT MAGIC
- vpaddd \immCNT, \regE, \regE
- vpaddd \memW*32(%rsp), \regE, \regE
- PROLD_nd \regT, 5, \regF, \regA
- vpaddd \regT, \regE, \regE
- \MAGIC \regF, \regB, \regC, \regD, \regT
- PROLD \regB, 30, \regT
- vpaddd \regF, \regE, \regE
-.endm
-
-.macro SHA1_STEP_16_79 regA regB regC regD regE regT regF memW immCNT MAGIC
- vpaddd \immCNT, \regE, \regE
- offset = ((\memW - 14) & 15) * 32
- vmovdqu offset(%rsp), W14
- vpxor W14, W16, W16
- offset = ((\memW - 8) & 15) * 32
- vpxor offset(%rsp), W16, W16
- offset = ((\memW - 3) & 15) * 32
- vpxor offset(%rsp), W16, W16
- vpsrld $(32-1), W16, \regF
- vpslld $1, W16, W16
- vpor W16, \regF, \regF
-
- ROTATE_W
-
- offset = ((\memW - 0) & 15) * 32
- vmovdqu \regF, offset(%rsp)
- vpaddd \regF, \regE, \regE
- PROLD_nd \regT, 5, \regF, \regA
- vpaddd \regT, \regE, \regE
- \MAGIC \regF,\regB,\regC,\regD,\regT ## FUN = MAGIC_Fi(B,C,D)
- PROLD \regB,30, \regT
- vpaddd \regF, \regE, \regE
-.endm
-
-########################################################################
-########################################################################
-########################################################################
-
-## FRAMESZ plus pushes must be an odd multiple of 8
-YMM_SAVE = (15-15)*32
-FRAMESZ = 32*16 + YMM_SAVE
-_YMM = FRAMESZ - YMM_SAVE
-
-#define VMOVPS vmovups
-
-IDX = %rax
-inp0 = %r9
-inp1 = %r10
-inp2 = %r11
-inp3 = %r12
-inp4 = %r13
-inp5 = %r14
-inp6 = %r15
-inp7 = %rcx
-arg1 = %rdi
-arg2 = %rsi
-RSP_SAVE = %rdx
-
-# ymm0 A
-# ymm1 B
-# ymm2 C
-# ymm3 D
-# ymm4 E
-# ymm5 F AA
-# ymm6 T0 BB
-# ymm7 T1 CC
-# ymm8 T2 DD
-# ymm9 T3 EE
-# ymm10 T4 TMP
-# ymm11 T5 FUN
-# ymm12 T6 K
-# ymm13 T7 W14
-# ymm14 T8 W15
-# ymm15 T9 W16
-
-
-A = %ymm0
-B = %ymm1
-C = %ymm2
-D = %ymm3
-E = %ymm4
-F = %ymm5
-T0 = %ymm6
-T1 = %ymm7
-T2 = %ymm8
-T3 = %ymm9
-T4 = %ymm10
-T5 = %ymm11
-T6 = %ymm12
-T7 = %ymm13
-T8 = %ymm14
-T9 = %ymm15
-
-AA = %ymm5
-BB = %ymm6
-CC = %ymm7
-DD = %ymm8
-EE = %ymm9
-TMP = %ymm10
-FUN = %ymm11
-K = %ymm12
-W14 = %ymm13
-W15 = %ymm14
-W16 = %ymm15
-
-.macro ROTATE_ARGS
- TMP_ = E
- E = D
- D = C
- C = B
- B = A
- A = TMP_
-.endm
-
-.macro ROTATE_W
-TMP_ = W16
-W16 = W15
-W15 = W14
-W14 = TMP_
-.endm
-
-# 8 streams x 5 32bit words per digest x 4 bytes per word
-#define DIGEST_SIZE (8*5*4)
-
-.align 32
-
-# void sha1_x8_avx2(void **input_data, UINT128 *digest, UINT32 size)
-# arg 1 : pointer to array[4] of pointer to input data
-# arg 2 : size (in blocks) ;; assumed to be >= 1
-#
-ENTRY(sha1_x8_avx2)
-
- # save callee-saved clobbered registers to comply with C function ABI
- push %r12
- push %r13
- push %r14
- push %r15
-
- #save rsp
- mov %rsp, RSP_SAVE
- sub $FRAMESZ, %rsp
-
- #align rsp to 32 Bytes
- and $~0x1F, %rsp
-
- ## Initialize digests
- vmovdqu 0*32(arg1), A
- vmovdqu 1*32(arg1), B
- vmovdqu 2*32(arg1), C
- vmovdqu 3*32(arg1), D
- vmovdqu 4*32(arg1), E
-
- ## transpose input onto stack
- mov _data_ptr+0*8(arg1),inp0
- mov _data_ptr+1*8(arg1),inp1
- mov _data_ptr+2*8(arg1),inp2
- mov _data_ptr+3*8(arg1),inp3
- mov _data_ptr+4*8(arg1),inp4
- mov _data_ptr+5*8(arg1),inp5
- mov _data_ptr+6*8(arg1),inp6
- mov _data_ptr+7*8(arg1),inp7
-
- xor IDX, IDX
-lloop:
- vmovdqu PSHUFFLE_BYTE_FLIP_MASK(%rip), F
- I=0
-.rep 2
- VMOVPS (inp0, IDX), T0
- VMOVPS (inp1, IDX), T1
- VMOVPS (inp2, IDX), T2
- VMOVPS (inp3, IDX), T3
- VMOVPS (inp4, IDX), T4
- VMOVPS (inp5, IDX), T5
- VMOVPS (inp6, IDX), T6
- VMOVPS (inp7, IDX), T7
-
- TRANSPOSE8 T0, T1, T2, T3, T4, T5, T6, T7, T8, T9
- vpshufb F, T0, T0
- vmovdqu T0, (I*8)*32(%rsp)
- vpshufb F, T1, T1
- vmovdqu T1, (I*8+1)*32(%rsp)
- vpshufb F, T2, T2
- vmovdqu T2, (I*8+2)*32(%rsp)
- vpshufb F, T3, T3
- vmovdqu T3, (I*8+3)*32(%rsp)
- vpshufb F, T4, T4
- vmovdqu T4, (I*8+4)*32(%rsp)
- vpshufb F, T5, T5
- vmovdqu T5, (I*8+5)*32(%rsp)
- vpshufb F, T6, T6
- vmovdqu T6, (I*8+6)*32(%rsp)
- vpshufb F, T7, T7
- vmovdqu T7, (I*8+7)*32(%rsp)
- add $32, IDX
- I = (I+1)
-.endr
- # save old digests
- vmovdqu A,AA
- vmovdqu B,BB
- vmovdqu C,CC
- vmovdqu D,DD
- vmovdqu E,EE
-
-##
-## perform 0-79 steps
-##
- vmovdqu K00_19(%rip), K
-## do rounds 0...15
- I = 0
-.rep 16
- SHA1_STEP_00_15 A,B,C,D,E, TMP,FUN, I, K, MAGIC_F0
- ROTATE_ARGS
- I = (I+1)
-.endr
-
-## do rounds 16...19
- vmovdqu ((16 - 16) & 15) * 32 (%rsp), W16
- vmovdqu ((16 - 15) & 15) * 32 (%rsp), W15
-.rep 4
- SHA1_STEP_16_79 A,B,C,D,E, TMP,FUN, I, K, MAGIC_F0
- ROTATE_ARGS
- I = (I+1)
-.endr
-
-## do rounds 20...39
- vmovdqu K20_39(%rip), K
-.rep 20
- SHA1_STEP_16_79 A,B,C,D,E, TMP,FUN, I, K, MAGIC_F1
- ROTATE_ARGS
- I = (I+1)
-.endr
-
-## do rounds 40...59
- vmovdqu K40_59(%rip), K
-.rep 20
- SHA1_STEP_16_79 A,B,C,D,E, TMP,FUN, I, K, MAGIC_F2
- ROTATE_ARGS
- I = (I+1)
-.endr
-
-## do rounds 60...79
- vmovdqu K60_79(%rip), K
-.rep 20
- SHA1_STEP_16_79 A,B,C,D,E, TMP,FUN, I, K, MAGIC_F3
- ROTATE_ARGS
- I = (I+1)
-.endr
-
- vpaddd AA,A,A
- vpaddd BB,B,B
- vpaddd CC,C,C
- vpaddd DD,D,D
- vpaddd EE,E,E
-
- sub $1, arg2
- jne lloop
-
- # write out digests
- vmovdqu A, 0*32(arg1)
- vmovdqu B, 1*32(arg1)
- vmovdqu C, 2*32(arg1)
- vmovdqu D, 3*32(arg1)
- vmovdqu E, 4*32(arg1)
-
- # update input pointers
- add IDX, inp0
- add IDX, inp1
- add IDX, inp2
- add IDX, inp3
- add IDX, inp4
- add IDX, inp5
- add IDX, inp6
- add IDX, inp7
- mov inp0, _data_ptr (arg1)
- mov inp1, _data_ptr + 1*8(arg1)
- mov inp2, _data_ptr + 2*8(arg1)
- mov inp3, _data_ptr + 3*8(arg1)
- mov inp4, _data_ptr + 4*8(arg1)
- mov inp5, _data_ptr + 5*8(arg1)
- mov inp6, _data_ptr + 6*8(arg1)
- mov inp7, _data_ptr + 7*8(arg1)
-
- ################
- ## Postamble
-
- mov RSP_SAVE, %rsp
-
- # restore callee-saved clobbered registers
- pop %r15
- pop %r14
- pop %r13
- pop %r12
-
- ret
-ENDPROC(sha1_x8_avx2)
-
-
-.section .rodata.cst32.K00_19, "aM", @progbits, 32
-.align 32
-K00_19:
-.octa 0x5A8279995A8279995A8279995A827999
-.octa 0x5A8279995A8279995A8279995A827999
-
-.section .rodata.cst32.K20_39, "aM", @progbits, 32
-.align 32
-K20_39:
-.octa 0x6ED9EBA16ED9EBA16ED9EBA16ED9EBA1
-.octa 0x6ED9EBA16ED9EBA16ED9EBA16ED9EBA1
-
-.section .rodata.cst32.K40_59, "aM", @progbits, 32
-.align 32
-K40_59:
-.octa 0x8F1BBCDC8F1BBCDC8F1BBCDC8F1BBCDC
-.octa 0x8F1BBCDC8F1BBCDC8F1BBCDC8F1BBCDC
-
-.section .rodata.cst32.K60_79, "aM", @progbits, 32
-.align 32
-K60_79:
-.octa 0xCA62C1D6CA62C1D6CA62C1D6CA62C1D6
-.octa 0xCA62C1D6CA62C1D6CA62C1D6CA62C1D6
-
-.section .rodata.cst32.PSHUFFLE_BYTE_FLIP_MASK, "aM", @progbits, 32
-.align 32
-PSHUFFLE_BYTE_FLIP_MASK:
-.octa 0x0c0d0e0f08090a0b0405060700010203
-.octa 0x0c0d0e0f08090a0b0405060700010203
diff --git a/arch/x86/crypto/sha256-mb/Makefile b/arch/x86/crypto/sha256-mb/Makefile
deleted file mode 100644
index 53ad6e7..00000000
--- a/arch/x86/crypto/sha256-mb/Makefile
+++ /dev/null
@@ -1,14 +0,0 @@
-# SPDX-License-Identifier: GPL-2.0
-#
-# Arch-specific CryptoAPI modules.
-#
-
-OBJECT_FILES_NON_STANDARD := y
-
-avx2_supported := $(call as-instr,vpgatherdd %ymm0$(comma)(%eax$(comma)%ymm1\
- $(comma)4)$(comma)%ymm2,yes,no)
-ifeq ($(avx2_supported),yes)
- obj-$(CONFIG_CRYPTO_SHA256_MB) += sha256-mb.o
- sha256-mb-y := sha256_mb.o sha256_mb_mgr_flush_avx2.o \
- sha256_mb_mgr_init_avx2.o sha256_mb_mgr_submit_avx2.o sha256_x8_avx2.o
-endif
diff --git a/arch/x86/crypto/sha256-mb/sha256_mb.c b/arch/x86/crypto/sha256-mb/sha256_mb.c
deleted file mode 100644
index 97c5fc43..00000000
--- a/arch/x86/crypto/sha256-mb/sha256_mb.c
+++ /dev/null
@@ -1,1013 +0,0 @@
-/*
- * Multi buffer SHA256 algorithm Glue Code
- *
- * This file is provided under a dual BSD/GPLv2 license. When using or
- * redistributing this file, you may do so under either license.
- *
- * GPL LICENSE SUMMARY
- *
- * Copyright(c) 2016 Intel Corporation.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of version 2 of the GNU General Public License as
- * published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful, but
- * WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
- * General Public License for more details.
- *
- * Contact Information:
- * Megha Dey <megha.dey(a)linux.intel.com>
- *
- * BSD LICENSE
- *
- * Copyright(c) 2016 Intel Corporation.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions
- * are met:
- *
- * * Redistributions of source code must retain the above copyright
- * notice, this list of conditions and the following disclaimer.
- * * Redistributions in binary form must reproduce the above copyright
- * notice, this list of conditions and the following disclaimer in
- * the documentation and/or other materials provided with the
- * distribution.
- * * Neither the name of Intel Corporation nor the names of its
- * contributors may be used to endorse or promote products derived
- * from this software without specific prior written permission.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
- * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
- * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
- * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
- * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
- * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
- * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
- * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
- * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
- * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
- * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
- */
-
-#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
-
-#include <crypto/internal/hash.h>
-#include <linux/init.h>
-#include <linux/module.h>
-#include <linux/mm.h>
-#include <linux/cryptohash.h>
-#include <linux/types.h>
-#include <linux/list.h>
-#include <crypto/scatterwalk.h>
-#include <crypto/sha.h>
-#include <crypto/mcryptd.h>
-#include <crypto/crypto_wq.h>
-#include <asm/byteorder.h>
-#include <linux/hardirq.h>
-#include <asm/fpu/api.h>
-#include "sha256_mb_ctx.h"
-
-#define FLUSH_INTERVAL 1000 /* in usec */
-
-static struct mcryptd_alg_state sha256_mb_alg_state;
-
-struct sha256_mb_ctx {
- struct mcryptd_ahash *mcryptd_tfm;
-};
-
-static inline struct mcryptd_hash_request_ctx
- *cast_hash_to_mcryptd_ctx(struct sha256_hash_ctx *hash_ctx)
-{
- struct ahash_request *areq;
-
- areq = container_of((void *) hash_ctx, struct ahash_request, __ctx);
- return container_of(areq, struct mcryptd_hash_request_ctx, areq);
-}
-
-static inline struct ahash_request
- *cast_mcryptd_ctx_to_req(struct mcryptd_hash_request_ctx *ctx)
-{
- return container_of((void *) ctx, struct ahash_request, __ctx);
-}
-
-static void req_ctx_init(struct mcryptd_hash_request_ctx *rctx,
- struct ahash_request *areq)
-{
- rctx->flag = HASH_UPDATE;
-}
-
-static asmlinkage void (*sha256_job_mgr_init)(struct sha256_mb_mgr *state);
-static asmlinkage struct job_sha256* (*sha256_job_mgr_submit)
- (struct sha256_mb_mgr *state, struct job_sha256 *job);
-static asmlinkage struct job_sha256* (*sha256_job_mgr_flush)
- (struct sha256_mb_mgr *state);
-static asmlinkage struct job_sha256* (*sha256_job_mgr_get_comp_job)
- (struct sha256_mb_mgr *state);
-
-inline uint32_t sha256_pad(uint8_t padblock[SHA256_BLOCK_SIZE * 2],
- uint64_t total_len)
-{
- uint32_t i = total_len & (SHA256_BLOCK_SIZE - 1);
-
- memset(&padblock[i], 0, SHA256_BLOCK_SIZE);
- padblock[i] = 0x80;
-
- i += ((SHA256_BLOCK_SIZE - 1) &
- (0 - (total_len + SHA256_PADLENGTHFIELD_SIZE + 1)))
- + 1 + SHA256_PADLENGTHFIELD_SIZE;
-
-#if SHA256_PADLENGTHFIELD_SIZE == 16
- *((uint64_t *) &padblock[i - 16]) = 0;
-#endif
-
- *((uint64_t *) &padblock[i - 8]) = cpu_to_be64(total_len << 3);
-
- /* Number of extra blocks to hash */
- return i >> SHA256_LOG2_BLOCK_SIZE;
-}
-
-static struct sha256_hash_ctx
- *sha256_ctx_mgr_resubmit(struct sha256_ctx_mgr *mgr,
- struct sha256_hash_ctx *ctx)
-{
- while (ctx) {
- if (ctx->status & HASH_CTX_STS_COMPLETE) {
- /* Clear PROCESSING bit */
- ctx->status = HASH_CTX_STS_COMPLETE;
- return ctx;
- }
-
- /*
- * If the extra blocks are empty, begin hashing what remains
- * in the user's buffer.
- */
- if (ctx->partial_block_buffer_length == 0 &&
- ctx->incoming_buffer_length) {
-
- const void *buffer = ctx->incoming_buffer;
- uint32_t len = ctx->incoming_buffer_length;
- uint32_t copy_len;
-
- /*
- * Only entire blocks can be hashed.
- * Copy remainder to extra blocks buffer.
- */
- copy_len = len & (SHA256_BLOCK_SIZE-1);
-
- if (copy_len) {
- len -= copy_len;
- memcpy(ctx->partial_block_buffer,
- ((const char *) buffer + len),
- copy_len);
- ctx->partial_block_buffer_length = copy_len;
- }
-
- ctx->incoming_buffer_length = 0;
-
- /* len should be a multiple of the block size now */
- assert((len % SHA256_BLOCK_SIZE) == 0);
-
- /* Set len to the number of blocks to be hashed */
- len >>= SHA256_LOG2_BLOCK_SIZE;
-
- if (len) {
-
- ctx->job.buffer = (uint8_t *) buffer;
- ctx->job.len = len;
- ctx = (struct sha256_hash_ctx *)
- sha256_job_mgr_submit(&mgr->mgr, &ctx->job);
- continue;
- }
- }
-
- /*
- * If the extra blocks are not empty, then we are
- * either on the last block(s) or we need more
- * user input before continuing.
- */
- if (ctx->status & HASH_CTX_STS_LAST) {
-
- uint8_t *buf = ctx->partial_block_buffer;
- uint32_t n_extra_blocks =
- sha256_pad(buf, ctx->total_length);
-
- ctx->status = (HASH_CTX_STS_PROCESSING |
- HASH_CTX_STS_COMPLETE);
- ctx->job.buffer = buf;
- ctx->job.len = (uint32_t) n_extra_blocks;
- ctx = (struct sha256_hash_ctx *)
- sha256_job_mgr_submit(&mgr->mgr, &ctx->job);
- continue;
- }
-
- ctx->status = HASH_CTX_STS_IDLE;
- return ctx;
- }
-
- return NULL;
-}
-
-static struct sha256_hash_ctx
- *sha256_ctx_mgr_get_comp_ctx(struct sha256_ctx_mgr *mgr)
-{
- /*
- * If get_comp_job returns NULL, there are no jobs complete.
- * If get_comp_job returns a job, verify that it is safe to return to
- * the user. If it is not ready, resubmit the job to finish processing.
- * If sha256_ctx_mgr_resubmit returned a job, it is ready to be
- * returned. Otherwise, all jobs currently being managed by the
- * hash_ctx_mgr still need processing.
- */
- struct sha256_hash_ctx *ctx;
-
- ctx = (struct sha256_hash_ctx *) sha256_job_mgr_get_comp_job(&mgr->mgr);
- return sha256_ctx_mgr_resubmit(mgr, ctx);
-}
-
-static void sha256_ctx_mgr_init(struct sha256_ctx_mgr *mgr)
-{
- sha256_job_mgr_init(&mgr->mgr);
-}
-
-static struct sha256_hash_ctx *sha256_ctx_mgr_submit(struct sha256_ctx_mgr *mgr,
- struct sha256_hash_ctx *ctx,
- const void *buffer,
- uint32_t len,
- int flags)
-{
- if (flags & ~(HASH_UPDATE | HASH_LAST)) {
- /* User should not pass anything other than UPDATE or LAST */
- ctx->error = HASH_CTX_ERROR_INVALID_FLAGS;
- return ctx;
- }
-
- if (ctx->status & HASH_CTX_STS_PROCESSING) {
- /* Cannot submit to a currently processing job. */
- ctx->error = HASH_CTX_ERROR_ALREADY_PROCESSING;
- return ctx;
- }
-
- if (ctx->status & HASH_CTX_STS_COMPLETE) {
- /* Cannot update a finished job. */
- ctx->error = HASH_CTX_ERROR_ALREADY_COMPLETED;
- return ctx;
- }
-
- /* If we made it here, there was no error during this call to submit */
- ctx->error = HASH_CTX_ERROR_NONE;
-
- /* Store buffer ptr info from user */
- ctx->incoming_buffer = buffer;
- ctx->incoming_buffer_length = len;
-
- /*
- * Store the user's request flags and mark this ctx as currently
- * being processed.
- */
- ctx->status = (flags & HASH_LAST) ?
- (HASH_CTX_STS_PROCESSING | HASH_CTX_STS_LAST) :
- HASH_CTX_STS_PROCESSING;
-
- /* Advance byte counter */
- ctx->total_length += len;
-
- /*
- * If there is anything currently buffered in the extra blocks,
- * append to it until it contains a whole block.
- * Or if the user's buffer contains less than a whole block,
- * append as much as possible to the extra block.
- */
- if (ctx->partial_block_buffer_length || len < SHA256_BLOCK_SIZE) {
- /*
- * Compute how many bytes to copy from user buffer into
- * extra block
- */
- uint32_t copy_len = SHA256_BLOCK_SIZE -
- ctx->partial_block_buffer_length;
- if (len < copy_len)
- copy_len = len;
-
- if (copy_len) {
- /* Copy and update relevant pointers and counters */
- memcpy(
- &ctx->partial_block_buffer[ctx->partial_block_buffer_length],
- buffer, copy_len);
-
- ctx->partial_block_buffer_length += copy_len;
- ctx->incoming_buffer = (const void *)
- ((const char *)buffer + copy_len);
- ctx->incoming_buffer_length = len - copy_len;
- }
-
- /* The extra block should never contain more than 1 block */
- assert(ctx->partial_block_buffer_length <= SHA256_BLOCK_SIZE);
-
- /*
- * If the extra block buffer contains exactly 1 block,
- * it can be hashed.
- */
- if (ctx->partial_block_buffer_length >= SHA256_BLOCK_SIZE) {
- ctx->partial_block_buffer_length = 0;
-
- ctx->job.buffer = ctx->partial_block_buffer;
- ctx->job.len = 1;
- ctx = (struct sha256_hash_ctx *)
- sha256_job_mgr_submit(&mgr->mgr, &ctx->job);
- }
- }
-
- return sha256_ctx_mgr_resubmit(mgr, ctx);
-}
-
-static struct sha256_hash_ctx *sha256_ctx_mgr_flush(struct sha256_ctx_mgr *mgr)
-{
- struct sha256_hash_ctx *ctx;
-
- while (1) {
- ctx = (struct sha256_hash_ctx *)
- sha256_job_mgr_flush(&mgr->mgr);
-
- /* If flush returned 0, there are no more jobs in flight. */
- if (!ctx)
- return NULL;
-
- /*
- * If flush returned a job, resubmit the job to finish
- * processing.
- */
- ctx = sha256_ctx_mgr_resubmit(mgr, ctx);
-
- /*
- * If sha256_ctx_mgr_resubmit returned a job, it is ready to
- * be returned. Otherwise, all jobs currently being managed by
- * the sha256_ctx_mgr still need processing. Loop.
- */
- if (ctx)
- return ctx;
- }
-}
-
-static int sha256_mb_init(struct ahash_request *areq)
-{
- struct sha256_hash_ctx *sctx = ahash_request_ctx(areq);
-
- hash_ctx_init(sctx);
- sctx->job.result_digest[0] = SHA256_H0;
- sctx->job.result_digest[1] = SHA256_H1;
- sctx->job.result_digest[2] = SHA256_H2;
- sctx->job.result_digest[3] = SHA256_H3;
- sctx->job.result_digest[4] = SHA256_H4;
- sctx->job.result_digest[5] = SHA256_H5;
- sctx->job.result_digest[6] = SHA256_H6;
- sctx->job.result_digest[7] = SHA256_H7;
- sctx->total_length = 0;
- sctx->partial_block_buffer_length = 0;
- sctx->status = HASH_CTX_STS_IDLE;
-
- return 0;
-}
-
-static int sha256_mb_set_results(struct mcryptd_hash_request_ctx *rctx)
-{
- int i;
- struct sha256_hash_ctx *sctx = ahash_request_ctx(&rctx->areq);
- __be32 *dst = (__be32 *) rctx->out;
-
- for (i = 0; i < 8; ++i)
- dst[i] = cpu_to_be32(sctx->job.result_digest[i]);
-
- return 0;
-}
-
-static int sha_finish_walk(struct mcryptd_hash_request_ctx **ret_rctx,
- struct mcryptd_alg_cstate *cstate, bool flush)
-{
- int flag = HASH_UPDATE;
- int nbytes, err = 0;
- struct mcryptd_hash_request_ctx *rctx = *ret_rctx;
- struct sha256_hash_ctx *sha_ctx;
-
- /* more work ? */
- while (!(rctx->flag & HASH_DONE)) {
- nbytes = crypto_ahash_walk_done(&rctx->walk, 0);
- if (nbytes < 0) {
- err = nbytes;
- goto out;
- }
- /* check if the walk is done */
- if (crypto_ahash_walk_last(&rctx->walk)) {
- rctx->flag |= HASH_DONE;
- if (rctx->flag & HASH_FINAL)
- flag |= HASH_LAST;
-
- }
- sha_ctx = (struct sha256_hash_ctx *)
- ahash_request_ctx(&rctx->areq);
- kernel_fpu_begin();
- sha_ctx = sha256_ctx_mgr_submit(cstate->mgr, sha_ctx,
- rctx->walk.data, nbytes, flag);
- if (!sha_ctx) {
- if (flush)
- sha_ctx = sha256_ctx_mgr_flush(cstate->mgr);
- }
- kernel_fpu_end();
- if (sha_ctx)
- rctx = cast_hash_to_mcryptd_ctx(sha_ctx);
- else {
- rctx = NULL;
- goto out;
- }
- }
-
- /* copy the results */
- if (rctx->flag & HASH_FINAL)
- sha256_mb_set_results(rctx);
-
-out:
- *ret_rctx = rctx;
- return err;
-}
-
-static int sha_complete_job(struct mcryptd_hash_request_ctx *rctx,
- struct mcryptd_alg_cstate *cstate,
- int err)
-{
- struct ahash_request *req = cast_mcryptd_ctx_to_req(rctx);
- struct sha256_hash_ctx *sha_ctx;
- struct mcryptd_hash_request_ctx *req_ctx;
- int ret;
-
- /* remove from work list */
- spin_lock(&cstate->work_lock);
- list_del(&rctx->waiter);
- spin_unlock(&cstate->work_lock);
-
- if (irqs_disabled())
- rctx->complete(&req->base, err);
- else {
- local_bh_disable();
- rctx->complete(&req->base, err);
- local_bh_enable();
- }
-
- /* check to see if there are other jobs that are done */
- sha_ctx = sha256_ctx_mgr_get_comp_ctx(cstate->mgr);
- while (sha_ctx) {
- req_ctx = cast_hash_to_mcryptd_ctx(sha_ctx);
- ret = sha_finish_walk(&req_ctx, cstate, false);
- if (req_ctx) {
- spin_lock(&cstate->work_lock);
- list_del(&req_ctx->waiter);
- spin_unlock(&cstate->work_lock);
-
- req = cast_mcryptd_ctx_to_req(req_ctx);
- if (irqs_disabled())
- req_ctx->complete(&req->base, ret);
- else {
- local_bh_disable();
- req_ctx->complete(&req->base, ret);
- local_bh_enable();
- }
- }
- sha_ctx = sha256_ctx_mgr_get_comp_ctx(cstate->mgr);
- }
-
- return 0;
-}
-
-static void sha256_mb_add_list(struct mcryptd_hash_request_ctx *rctx,
- struct mcryptd_alg_cstate *cstate)
-{
- unsigned long next_flush;
- unsigned long delay = usecs_to_jiffies(FLUSH_INTERVAL);
-
- /* initialize tag */
- rctx->tag.arrival = jiffies; /* tag the arrival time */
- rctx->tag.seq_num = cstate->next_seq_num++;
- next_flush = rctx->tag.arrival + delay;
- rctx->tag.expire = next_flush;
-
- spin_lock(&cstate->work_lock);
- list_add_tail(&rctx->waiter, &cstate->work_list);
- spin_unlock(&cstate->work_lock);
-
- mcryptd_arm_flusher(cstate, delay);
-}
-
-static int sha256_mb_update(struct ahash_request *areq)
-{
- struct mcryptd_hash_request_ctx *rctx =
- container_of(areq, struct mcryptd_hash_request_ctx, areq);
- struct mcryptd_alg_cstate *cstate =
- this_cpu_ptr(sha256_mb_alg_state.alg_cstate);
-
- struct ahash_request *req = cast_mcryptd_ctx_to_req(rctx);
- struct sha256_hash_ctx *sha_ctx;
- int ret = 0, nbytes;
-
- /* sanity check */
- if (rctx->tag.cpu != smp_processor_id()) {
- pr_err("mcryptd error: cpu clash\n");
- goto done;
- }
-
- /* need to init context */
- req_ctx_init(rctx, areq);
-
- nbytes = crypto_ahash_walk_first(req, &rctx->walk);
-
- if (nbytes < 0) {
- ret = nbytes;
- goto done;
- }
-
- if (crypto_ahash_walk_last(&rctx->walk))
- rctx->flag |= HASH_DONE;
-
- /* submit */
- sha_ctx = (struct sha256_hash_ctx *) ahash_request_ctx(areq);
- sha256_mb_add_list(rctx, cstate);
- kernel_fpu_begin();
- sha_ctx = sha256_ctx_mgr_submit(cstate->mgr, sha_ctx, rctx->walk.data,
- nbytes, HASH_UPDATE);
- kernel_fpu_end();
-
- /* check if anything is returned */
- if (!sha_ctx)
- return -EINPROGRESS;
-
- if (sha_ctx->error) {
- ret = sha_ctx->error;
- rctx = cast_hash_to_mcryptd_ctx(sha_ctx);
- goto done;
- }
-
- rctx = cast_hash_to_mcryptd_ctx(sha_ctx);
- ret = sha_finish_walk(&rctx, cstate, false);
-
- if (!rctx)
- return -EINPROGRESS;
-done:
- sha_complete_job(rctx, cstate, ret);
- return ret;
-}
-
-static int sha256_mb_finup(struct ahash_request *areq)
-{
- struct mcryptd_hash_request_ctx *rctx =
- container_of(areq, struct mcryptd_hash_request_ctx, areq);
- struct mcryptd_alg_cstate *cstate =
- this_cpu_ptr(sha256_mb_alg_state.alg_cstate);
-
- struct ahash_request *req = cast_mcryptd_ctx_to_req(rctx);
- struct sha256_hash_ctx *sha_ctx;
- int ret = 0, flag = HASH_UPDATE, nbytes;
-
- /* sanity check */
- if (rctx->tag.cpu != smp_processor_id()) {
- pr_err("mcryptd error: cpu clash\n");
- goto done;
- }
-
- /* need to init context */
- req_ctx_init(rctx, areq);
-
- nbytes = crypto_ahash_walk_first(req, &rctx->walk);
-
- if (nbytes < 0) {
- ret = nbytes;
- goto done;
- }
-
- if (crypto_ahash_walk_last(&rctx->walk)) {
- rctx->flag |= HASH_DONE;
- flag = HASH_LAST;
- }
-
- /* submit */
- rctx->flag |= HASH_FINAL;
- sha_ctx = (struct sha256_hash_ctx *) ahash_request_ctx(areq);
- sha256_mb_add_list(rctx, cstate);
-
- kernel_fpu_begin();
- sha_ctx = sha256_ctx_mgr_submit(cstate->mgr, sha_ctx, rctx->walk.data,
- nbytes, flag);
- kernel_fpu_end();
-
- /* check if anything is returned */
- if (!sha_ctx)
- return -EINPROGRESS;
-
- if (sha_ctx->error) {
- ret = sha_ctx->error;
- goto done;
- }
-
- rctx = cast_hash_to_mcryptd_ctx(sha_ctx);
- ret = sha_finish_walk(&rctx, cstate, false);
- if (!rctx)
- return -EINPROGRESS;
-done:
- sha_complete_job(rctx, cstate, ret);
- return ret;
-}
-
-static int sha256_mb_final(struct ahash_request *areq)
-{
- struct mcryptd_hash_request_ctx *rctx =
- container_of(areq, struct mcryptd_hash_request_ctx,
- areq);
- struct mcryptd_alg_cstate *cstate =
- this_cpu_ptr(sha256_mb_alg_state.alg_cstate);
-
- struct sha256_hash_ctx *sha_ctx;
- int ret = 0;
- u8 data;
-
- /* sanity check */
- if (rctx->tag.cpu != smp_processor_id()) {
- pr_err("mcryptd error: cpu clash\n");
- goto done;
- }
-
- /* need to init context */
- req_ctx_init(rctx, areq);
-
- rctx->flag |= HASH_DONE | HASH_FINAL;
-
- sha_ctx = (struct sha256_hash_ctx *) ahash_request_ctx(areq);
- /* flag HASH_FINAL and 0 data size */
- sha256_mb_add_list(rctx, cstate);
- kernel_fpu_begin();
- sha_ctx = sha256_ctx_mgr_submit(cstate->mgr, sha_ctx, &data, 0,
- HASH_LAST);
- kernel_fpu_end();
-
- /* check if anything is returned */
- if (!sha_ctx)
- return -EINPROGRESS;
-
- if (sha_ctx->error) {
- ret = sha_ctx->error;
- rctx = cast_hash_to_mcryptd_ctx(sha_ctx);
- goto done;
- }
-
- rctx = cast_hash_to_mcryptd_ctx(sha_ctx);
- ret = sha_finish_walk(&rctx, cstate, false);
- if (!rctx)
- return -EINPROGRESS;
-done:
- sha_complete_job(rctx, cstate, ret);
- return ret;
-}
-
-static int sha256_mb_export(struct ahash_request *areq, void *out)
-{
- struct sha256_hash_ctx *sctx = ahash_request_ctx(areq);
-
- memcpy(out, sctx, sizeof(*sctx));
-
- return 0;
-}
-
-static int sha256_mb_import(struct ahash_request *areq, const void *in)
-{
- struct sha256_hash_ctx *sctx = ahash_request_ctx(areq);
-
- memcpy(sctx, in, sizeof(*sctx));
-
- return 0;
-}
-
-static int sha256_mb_async_init_tfm(struct crypto_tfm *tfm)
-{
- struct mcryptd_ahash *mcryptd_tfm;
- struct sha256_mb_ctx *ctx = crypto_tfm_ctx(tfm);
- struct mcryptd_hash_ctx *mctx;
-
- mcryptd_tfm = mcryptd_alloc_ahash("__intel_sha256-mb",
- CRYPTO_ALG_INTERNAL,
- CRYPTO_ALG_INTERNAL);
- if (IS_ERR(mcryptd_tfm))
- return PTR_ERR(mcryptd_tfm);
- mctx = crypto_ahash_ctx(&mcryptd_tfm->base);
- mctx->alg_state = &sha256_mb_alg_state;
- ctx->mcryptd_tfm = mcryptd_tfm;
- crypto_ahash_set_reqsize(__crypto_ahash_cast(tfm),
- sizeof(struct ahash_request) +
- crypto_ahash_reqsize(&mcryptd_tfm->base));
-
- return 0;
-}
-
-static void sha256_mb_async_exit_tfm(struct crypto_tfm *tfm)
-{
- struct sha256_mb_ctx *ctx = crypto_tfm_ctx(tfm);
-
- mcryptd_free_ahash(ctx->mcryptd_tfm);
-}
-
-static int sha256_mb_areq_init_tfm(struct crypto_tfm *tfm)
-{
- crypto_ahash_set_reqsize(__crypto_ahash_cast(tfm),
- sizeof(struct ahash_request) +
- sizeof(struct sha256_hash_ctx));
-
- return 0;
-}
-
-static void sha256_mb_areq_exit_tfm(struct crypto_tfm *tfm)
-{
- struct sha256_mb_ctx *ctx = crypto_tfm_ctx(tfm);
-
- mcryptd_free_ahash(ctx->mcryptd_tfm);
-}
-
-static struct ahash_alg sha256_mb_areq_alg = {
- .init = sha256_mb_init,
- .update = sha256_mb_update,
- .final = sha256_mb_final,
- .finup = sha256_mb_finup,
- .export = sha256_mb_export,
- .import = sha256_mb_import,
- .halg = {
- .digestsize = SHA256_DIGEST_SIZE,
- .statesize = sizeof(struct sha256_hash_ctx),
- .base = {
- .cra_name = "__sha256-mb",
- .cra_driver_name = "__intel_sha256-mb",
- .cra_priority = 100,
- /*
- * use ASYNC flag as some buffers in multi-buffer
- * algo may not have completed before hashing thread
- * sleep
- */
- .cra_flags = CRYPTO_ALG_ASYNC |
- CRYPTO_ALG_INTERNAL,
- .cra_blocksize = SHA256_BLOCK_SIZE,
- .cra_module = THIS_MODULE,
- .cra_list = LIST_HEAD_INIT
- (sha256_mb_areq_alg.halg.base.cra_list),
- .cra_init = sha256_mb_areq_init_tfm,
- .cra_exit = sha256_mb_areq_exit_tfm,
- .cra_ctxsize = sizeof(struct sha256_hash_ctx),
- }
- }
-};
-
-static int sha256_mb_async_init(struct ahash_request *req)
-{
- struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
- struct sha256_mb_ctx *ctx = crypto_ahash_ctx(tfm);
- struct ahash_request *mcryptd_req = ahash_request_ctx(req);
- struct mcryptd_ahash *mcryptd_tfm = ctx->mcryptd_tfm;
-
- memcpy(mcryptd_req, req, sizeof(*req));
- ahash_request_set_tfm(mcryptd_req, &mcryptd_tfm->base);
- return crypto_ahash_init(mcryptd_req);
-}
-
-static int sha256_mb_async_update(struct ahash_request *req)
-{
- struct ahash_request *mcryptd_req = ahash_request_ctx(req);
-
- struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
- struct sha256_mb_ctx *ctx = crypto_ahash_ctx(tfm);
- struct mcryptd_ahash *mcryptd_tfm = ctx->mcryptd_tfm;
-
- memcpy(mcryptd_req, req, sizeof(*req));
- ahash_request_set_tfm(mcryptd_req, &mcryptd_tfm->base);
- return crypto_ahash_update(mcryptd_req);
-}
-
-static int sha256_mb_async_finup(struct ahash_request *req)
-{
- struct ahash_request *mcryptd_req = ahash_request_ctx(req);
-
- struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
- struct sha256_mb_ctx *ctx = crypto_ahash_ctx(tfm);
- struct mcryptd_ahash *mcryptd_tfm = ctx->mcryptd_tfm;
-
- memcpy(mcryptd_req, req, sizeof(*req));
- ahash_request_set_tfm(mcryptd_req, &mcryptd_tfm->base);
- return crypto_ahash_finup(mcryptd_req);
-}
-
-static int sha256_mb_async_final(struct ahash_request *req)
-{
- struct ahash_request *mcryptd_req = ahash_request_ctx(req);
-
- struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
- struct sha256_mb_ctx *ctx = crypto_ahash_ctx(tfm);
- struct mcryptd_ahash *mcryptd_tfm = ctx->mcryptd_tfm;
-
- memcpy(mcryptd_req, req, sizeof(*req));
- ahash_request_set_tfm(mcryptd_req, &mcryptd_tfm->base);
- return crypto_ahash_final(mcryptd_req);
-}
-
-static int sha256_mb_async_digest(struct ahash_request *req)
-{
- struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
- struct sha256_mb_ctx *ctx = crypto_ahash_ctx(tfm);
- struct ahash_request *mcryptd_req = ahash_request_ctx(req);
- struct mcryptd_ahash *mcryptd_tfm = ctx->mcryptd_tfm;
-
- memcpy(mcryptd_req, req, sizeof(*req));
- ahash_request_set_tfm(mcryptd_req, &mcryptd_tfm->base);
- return crypto_ahash_digest(mcryptd_req);
-}
-
-static int sha256_mb_async_export(struct ahash_request *req, void *out)
-{
- struct ahash_request *mcryptd_req = ahash_request_ctx(req);
- struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
- struct sha256_mb_ctx *ctx = crypto_ahash_ctx(tfm);
- struct mcryptd_ahash *mcryptd_tfm = ctx->mcryptd_tfm;
-
- memcpy(mcryptd_req, req, sizeof(*req));
- ahash_request_set_tfm(mcryptd_req, &mcryptd_tfm->base);
- return crypto_ahash_export(mcryptd_req, out);
-}
-
-static int sha256_mb_async_import(struct ahash_request *req, const void *in)
-{
- struct ahash_request *mcryptd_req = ahash_request_ctx(req);
- struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
- struct sha256_mb_ctx *ctx = crypto_ahash_ctx(tfm);
- struct mcryptd_ahash *mcryptd_tfm = ctx->mcryptd_tfm;
- struct crypto_ahash *child = mcryptd_ahash_child(mcryptd_tfm);
- struct mcryptd_hash_request_ctx *rctx;
- struct ahash_request *areq;
-
- memcpy(mcryptd_req, req, sizeof(*req));
- ahash_request_set_tfm(mcryptd_req, &mcryptd_tfm->base);
- rctx = ahash_request_ctx(mcryptd_req);
- areq = &rctx->areq;
-
- ahash_request_set_tfm(areq, child);
- ahash_request_set_callback(areq, CRYPTO_TFM_REQ_MAY_SLEEP,
- rctx->complete, req);
-
- return crypto_ahash_import(mcryptd_req, in);
-}
-
-static struct ahash_alg sha256_mb_async_alg = {
- .init = sha256_mb_async_init,
- .update = sha256_mb_async_update,
- .final = sha256_mb_async_final,
- .finup = sha256_mb_async_finup,
- .export = sha256_mb_async_export,
- .import = sha256_mb_async_import,
- .digest = sha256_mb_async_digest,
- .halg = {
- .digestsize = SHA256_DIGEST_SIZE,
- .statesize = sizeof(struct sha256_hash_ctx),
- .base = {
- .cra_name = "sha256",
- .cra_driver_name = "sha256_mb",
- /*
- * Low priority, since with few concurrent hash requests
- * this is extremely slow due to the flush delay. Users
- * whose workloads would benefit from this can request
- * it explicitly by driver name, or can increase its
- * priority at runtime using NETLINK_CRYPTO.
- */
- .cra_priority = 50,
- .cra_flags = CRYPTO_ALG_ASYNC,
- .cra_blocksize = SHA256_BLOCK_SIZE,
- .cra_module = THIS_MODULE,
- .cra_list = LIST_HEAD_INIT
- (sha256_mb_async_alg.halg.base.cra_list),
- .cra_init = sha256_mb_async_init_tfm,
- .cra_exit = sha256_mb_async_exit_tfm,
- .cra_ctxsize = sizeof(struct sha256_mb_ctx),
- .cra_alignmask = 0,
- },
- },
-};
-
-static unsigned long sha256_mb_flusher(struct mcryptd_alg_cstate *cstate)
-{
- struct mcryptd_hash_request_ctx *rctx;
- unsigned long cur_time;
- unsigned long next_flush = 0;
- struct sha256_hash_ctx *sha_ctx;
-
-
- cur_time = jiffies;
-
- while (!list_empty(&cstate->work_list)) {
- rctx = list_entry(cstate->work_list.next,
- struct mcryptd_hash_request_ctx, waiter);
- if (time_before(cur_time, rctx->tag.expire))
- break;
- kernel_fpu_begin();
- sha_ctx = (struct sha256_hash_ctx *)
- sha256_ctx_mgr_flush(cstate->mgr);
- kernel_fpu_end();
- if (!sha_ctx) {
- pr_err("sha256_mb error: nothing got"
- " flushed for non-empty list\n");
- break;
- }
- rctx = cast_hash_to_mcryptd_ctx(sha_ctx);
- sha_finish_walk(&rctx, cstate, true);
- sha_complete_job(rctx, cstate, 0);
- }
-
- if (!list_empty(&cstate->work_list)) {
- rctx = list_entry(cstate->work_list.next,
- struct mcryptd_hash_request_ctx, waiter);
- /* get the hash context and then flush time */
- next_flush = rctx->tag.expire;
- mcryptd_arm_flusher(cstate, get_delay(next_flush));
- }
- return next_flush;
-}
-
-static int __init sha256_mb_mod_init(void)
-{
-
- int cpu;
- int err;
- struct mcryptd_alg_cstate *cpu_state;
-
- /* check for dependent cpu features */
- if (!boot_cpu_has(X86_FEATURE_AVX2) ||
- !boot_cpu_has(X86_FEATURE_BMI2))
- return -ENODEV;
-
- /* initialize multibuffer structures */
- sha256_mb_alg_state.alg_cstate = alloc_percpu
- (struct mcryptd_alg_cstate);
-
- sha256_job_mgr_init = sha256_mb_mgr_init_avx2;
- sha256_job_mgr_submit = sha256_mb_mgr_submit_avx2;
- sha256_job_mgr_flush = sha256_mb_mgr_flush_avx2;
- sha256_job_mgr_get_comp_job = sha256_mb_mgr_get_comp_job_avx2;
-
- if (!sha256_mb_alg_state.alg_cstate)
- return -ENOMEM;
- for_each_possible_cpu(cpu) {
- cpu_state = per_cpu_ptr(sha256_mb_alg_state.alg_cstate, cpu);
- cpu_state->next_flush = 0;
- cpu_state->next_seq_num = 0;
- cpu_state->flusher_engaged = false;
- INIT_DELAYED_WORK(&cpu_state->flush, mcryptd_flusher);
- cpu_state->cpu = cpu;
- cpu_state->alg_state = &sha256_mb_alg_state;
- cpu_state->mgr = kzalloc(sizeof(struct sha256_ctx_mgr),
- GFP_KERNEL);
- if (!cpu_state->mgr)
- goto err2;
- sha256_ctx_mgr_init(cpu_state->mgr);
- INIT_LIST_HEAD(&cpu_state->work_list);
- spin_lock_init(&cpu_state->work_lock);
- }
- sha256_mb_alg_state.flusher = &sha256_mb_flusher;
-
- err = crypto_register_ahash(&sha256_mb_areq_alg);
- if (err)
- goto err2;
- err = crypto_register_ahash(&sha256_mb_async_alg);
- if (err)
- goto err1;
-
-
- return 0;
-err1:
- crypto_unregister_ahash(&sha256_mb_areq_alg);
-err2:
- for_each_possible_cpu(cpu) {
- cpu_state = per_cpu_ptr(sha256_mb_alg_state.alg_cstate, cpu);
- kfree(cpu_state->mgr);
- }
- free_percpu(sha256_mb_alg_state.alg_cstate);
- return -ENODEV;
-}
-
-static void __exit sha256_mb_mod_fini(void)
-{
- int cpu;
- struct mcryptd_alg_cstate *cpu_state;
-
- crypto_unregister_ahash(&sha256_mb_async_alg);
- crypto_unregister_ahash(&sha256_mb_areq_alg);
- for_each_possible_cpu(cpu) {
- cpu_state = per_cpu_ptr(sha256_mb_alg_state.alg_cstate, cpu);
- kfree(cpu_state->mgr);
- }
- free_percpu(sha256_mb_alg_state.alg_cstate);
-}
-
-module_init(sha256_mb_mod_init);
-module_exit(sha256_mb_mod_fini);
-
-MODULE_LICENSE("GPL");
-MODULE_DESCRIPTION("SHA256 Secure Hash Algorithm, multi buffer accelerated");
-
-MODULE_ALIAS_CRYPTO("sha256");
diff --git a/arch/x86/crypto/sha256-mb/sha256_mb_ctx.h b/arch/x86/crypto/sha256-mb/sha256_mb_ctx.h
deleted file mode 100644
index 7c43254..00000000
--- a/arch/x86/crypto/sha256-mb/sha256_mb_ctx.h
+++ /dev/null
@@ -1,134 +0,0 @@
-/*
- * Header file for multi buffer SHA256 context
- *
- * This file is provided under a dual BSD/GPLv2 license. When using or
- * redistributing this file, you may do so under either license.
- *
- * GPL LICENSE SUMMARY
- *
- * Copyright(c) 2016 Intel Corporation.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of version 2 of the GNU General Public License as
- * published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful, but
- * WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
- * General Public License for more details.
- *
- * Contact Information:
- * Megha Dey <megha.dey(a)linux.intel.com>
- *
- * BSD LICENSE
- *
- * Copyright(c) 2016 Intel Corporation.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions
- * are met:
- *
- * * Redistributions of source code must retain the above copyright
- * notice, this list of conditions and the following disclaimer.
- * * Redistributions in binary form must reproduce the above copyright
- * notice, this list of conditions and the following disclaimer in
- * the documentation and/or other materials provided with the
- * distribution.
- * * Neither the name of Intel Corporation nor the names of its
- * contributors may be used to endorse or promote products derived
- * from this software without specific prior written permission.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
- * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
- * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
- * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
- * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
- * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
- * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
- * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
- * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
- * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
- * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
- */
-
-#ifndef _SHA_MB_CTX_INTERNAL_H
-#define _SHA_MB_CTX_INTERNAL_H
-
-#include "sha256_mb_mgr.h"
-
-#define HASH_UPDATE 0x00
-#define HASH_LAST 0x01
-#define HASH_DONE 0x02
-#define HASH_FINAL 0x04
-
-#define HASH_CTX_STS_IDLE 0x00
-#define HASH_CTX_STS_PROCESSING 0x01
-#define HASH_CTX_STS_LAST 0x02
-#define HASH_CTX_STS_COMPLETE 0x04
-
-enum hash_ctx_error {
- HASH_CTX_ERROR_NONE = 0,
- HASH_CTX_ERROR_INVALID_FLAGS = -1,
- HASH_CTX_ERROR_ALREADY_PROCESSING = -2,
- HASH_CTX_ERROR_ALREADY_COMPLETED = -3,
-
-#ifdef HASH_CTX_DEBUG
- HASH_CTX_ERROR_DEBUG_DIGEST_MISMATCH = -4,
-#endif
-};
-
-
-#define hash_ctx_user_data(ctx) ((ctx)->user_data)
-#define hash_ctx_digest(ctx) ((ctx)->job.result_digest)
-#define hash_ctx_processing(ctx) ((ctx)->status & HASH_CTX_STS_PROCESSING)
-#define hash_ctx_complete(ctx) ((ctx)->status == HASH_CTX_STS_COMPLETE)
-#define hash_ctx_status(ctx) ((ctx)->status)
-#define hash_ctx_error(ctx) ((ctx)->error)
-#define hash_ctx_init(ctx) \
- do { \
- (ctx)->error = HASH_CTX_ERROR_NONE; \
- (ctx)->status = HASH_CTX_STS_COMPLETE; \
- } while (0)
-
-
-/* Hash Constants and Typedefs */
-#define SHA256_DIGEST_LENGTH 8
-#define SHA256_LOG2_BLOCK_SIZE 6
-
-#define SHA256_PADLENGTHFIELD_SIZE 8
-
-#ifdef SHA_MB_DEBUG
-#define assert(expr) \
-do { \
- if (unlikely(!(expr))) { \
- printk(KERN_ERR "Assertion failed! %s,%s,%s,line=%d\n", \
- #expr, __FILE__, __func__, __LINE__); \
- } \
-} while (0)
-#else
-#define assert(expr) do {} while (0)
-#endif
-
-struct sha256_ctx_mgr {
- struct sha256_mb_mgr mgr;
-};
-
-/* typedef struct sha256_ctx_mgr sha256_ctx_mgr; */
-
-struct sha256_hash_ctx {
- /* Must be at struct offset 0 */
- struct job_sha256 job;
- /* status flag */
- int status;
- /* error flag */
- int error;
-
- uint64_t total_length;
- const void *incoming_buffer;
- uint32_t incoming_buffer_length;
- uint8_t partial_block_buffer[SHA256_BLOCK_SIZE * 2];
- uint32_t partial_block_buffer_length;
- void *user_data;
-};
-
-#endif
diff --git a/arch/x86/crypto/sha256-mb/sha256_mb_mgr.h b/arch/x86/crypto/sha256-mb/sha256_mb_mgr.h
deleted file mode 100644
index b01ae40..00000000
--- a/arch/x86/crypto/sha256-mb/sha256_mb_mgr.h
+++ /dev/null
@@ -1,108 +0,0 @@
-/*
- * Header file for multi buffer SHA256 algorithm manager
- *
- * This file is provided under a dual BSD/GPLv2 license. When using or
- * redistributing this file, you may do so under either license.
- *
- * GPL LICENSE SUMMARY
- *
- * Copyright(c) 2016 Intel Corporation.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of version 2 of the GNU General Public License as
- * published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful, but
- * WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
- * General Public License for more details.
- *
- * Contact Information:
- * Megha Dey <megha.dey(a)linux.intel.com>
- *
- * BSD LICENSE
- *
- * Copyright(c) 2016 Intel Corporation.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions
- * are met:
- *
- * * Redistributions of source code must retain the above copyright
- * notice, this list of conditions and the following disclaimer.
- * * Redistributions in binary form must reproduce the above copyright
- * notice, this list of conditions and the following disclaimer in
- * the documentation and/or other materials provided with the
- * distribution.
- * * Neither the name of Intel Corporation nor the names of its
- * contributors may be used to endorse or promote products derived
- * from this software without specific prior written permission.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
- * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
- * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
- * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
- * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
- * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
- * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
- * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
- * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
- * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
- * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
- */
-#ifndef __SHA_MB_MGR_H
-#define __SHA_MB_MGR_H
-
-#include <linux/types.h>
-
-#define NUM_SHA256_DIGEST_WORDS 8
-
-enum job_sts { STS_UNKNOWN = 0,
- STS_BEING_PROCESSED = 1,
- STS_COMPLETED = 2,
- STS_INTERNAL_ERROR = 3,
- STS_ERROR = 4
-};
-
-struct job_sha256 {
- u8 *buffer;
- u32 len;
- u32 result_digest[NUM_SHA256_DIGEST_WORDS] __aligned(32);
- enum job_sts status;
- void *user_data;
-};
-
-/* SHA256 out-of-order scheduler */
-
-/* typedef uint32_t sha8_digest_array[8][8]; */
-
-struct sha256_args_x8 {
- uint32_t digest[8][8];
- uint8_t *data_ptr[8];
-};
-
-struct sha256_lane_data {
- struct job_sha256 *job_in_lane;
-};
-
-struct sha256_mb_mgr {
- struct sha256_args_x8 args;
-
- uint32_t lens[8];
-
- /* each byte is index (0...7) of unused lanes */
- uint64_t unused_lanes;
- /* byte 4 is set to FF as a flag */
- struct sha256_lane_data ldata[8];
-};
-
-
-#define SHA256_MB_MGR_NUM_LANES_AVX2 8
-
-void sha256_mb_mgr_init_avx2(struct sha256_mb_mgr *state);
-struct job_sha256 *sha256_mb_mgr_submit_avx2(struct sha256_mb_mgr *state,
- struct job_sha256 *job);
-struct job_sha256 *sha256_mb_mgr_flush_avx2(struct sha256_mb_mgr *state);
-struct job_sha256 *sha256_mb_mgr_get_comp_job_avx2(struct sha256_mb_mgr *state);
-
-#endif
diff --git a/arch/x86/crypto/sha256-mb/sha256_mb_mgr_datastruct.S b/arch/x86/crypto/sha256-mb/sha256_mb_mgr_datastruct.S
deleted file mode 100644
index 5c377ba..00000000
--- a/arch/x86/crypto/sha256-mb/sha256_mb_mgr_datastruct.S
+++ /dev/null
@@ -1,304 +0,0 @@
-/*
- * Header file for multi buffer SHA256 algorithm data structure
- *
- * This file is provided under a dual BSD/GPLv2 license. When using or
- * redistributing this file, you may do so under either license.
- *
- * GPL LICENSE SUMMARY
- *
- * Copyright(c) 2016 Intel Corporation.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of version 2 of the GNU General Public License as
- * published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful, but
- * WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
- * General Public License for more details.
- *
- * Contact Information:
- * Megha Dey <megha.dey(a)linux.intel.com>
- *
- * BSD LICENSE
- *
- * Copyright(c) 2016 Intel Corporation.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions
- * are met:
- *
- * * Redistributions of source code must retain the above copyright
- * notice, this list of conditions and the following disclaimer.
- * * Redistributions in binary form must reproduce the above copyright
- * notice, this list of conditions and the following disclaimer in
- * the documentation and/or other materials provided with the
- * distribution.
- * * Neither the name of Intel Corporation nor the names of its
- * contributors may be used to endorse or promote products derived
- * from this software without specific prior written permission.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
- * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
- * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
- * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
- * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
- * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
- * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
- * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
- * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
- * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
- * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
- */
-
-# Macros for defining data structures
-
-# Usage example
-
-#START_FIELDS # JOB_AES
-### name size align
-#FIELD _plaintext, 8, 8 # pointer to plaintext
-#FIELD _ciphertext, 8, 8 # pointer to ciphertext
-#FIELD _IV, 16, 8 # IV
-#FIELD _keys, 8, 8 # pointer to keys
-#FIELD _len, 4, 4 # length in bytes
-#FIELD _status, 4, 4 # status enumeration
-#FIELD _user_data, 8, 8 # pointer to user data
-#UNION _union, size1, align1, \
-# size2, align2, \
-# size3, align3, \
-# ...
-#END_FIELDS
-#%assign _JOB_AES_size _FIELD_OFFSET
-#%assign _JOB_AES_align _STRUCT_ALIGN
-
-#########################################################################
-
-# Alternate "struc-like" syntax:
-# STRUCT job_aes2
-# RES_Q .plaintext, 1
-# RES_Q .ciphertext, 1
-# RES_DQ .IV, 1
-# RES_B .nested, _JOB_AES_SIZE, _JOB_AES_ALIGN
-# RES_U .union, size1, align1, \
-# size2, align2, \
-# ...
-# ENDSTRUCT
-# # Following only needed if nesting
-# %assign job_aes2_size _FIELD_OFFSET
-# %assign job_aes2_align _STRUCT_ALIGN
-#
-# RES_* macros take a name, a count and an optional alignment.
-# The count in in terms of the base size of the macro, and the
-# default alignment is the base size.
-# The macros are:
-# Macro Base size
-# RES_B 1
-# RES_W 2
-# RES_D 4
-# RES_Q 8
-# RES_DQ 16
-# RES_Y 32
-# RES_Z 64
-#
-# RES_U defines a union. It's arguments are a name and two or more
-# pairs of "size, alignment"
-#
-# The two assigns are only needed if this structure is being nested
-# within another. Even if the assigns are not done, one can still use
-# STRUCT_NAME_size as the size of the structure.
-#
-# Note that for nesting, you still need to assign to STRUCT_NAME_size.
-#
-# The differences between this and using "struc" directly are that each
-# type is implicitly aligned to its natural length (although this can be
-# over-ridden with an explicit third parameter), and that the structure
-# is padded at the end to its overall alignment.
-#
-
-#########################################################################
-
-#ifndef _DATASTRUCT_ASM_
-#define _DATASTRUCT_ASM_
-
-#define SZ8 8*SHA256_DIGEST_WORD_SIZE
-#define ROUNDS 64*SZ8
-#define PTR_SZ 8
-#define SHA256_DIGEST_WORD_SIZE 4
-#define MAX_SHA256_LANES 8
-#define SHA256_DIGEST_WORDS 8
-#define SHA256_DIGEST_ROW_SIZE (MAX_SHA256_LANES * SHA256_DIGEST_WORD_SIZE)
-#define SHA256_DIGEST_SIZE (SHA256_DIGEST_ROW_SIZE * SHA256_DIGEST_WORDS)
-#define SHA256_BLK_SZ 64
-
-# START_FIELDS
-.macro START_FIELDS
- _FIELD_OFFSET = 0
- _STRUCT_ALIGN = 0
-.endm
-
-# FIELD name size align
-.macro FIELD name size align
- _FIELD_OFFSET = (_FIELD_OFFSET + (\align) - 1) & (~ ((\align)-1))
- \name = _FIELD_OFFSET
- _FIELD_OFFSET = _FIELD_OFFSET + (\size)
-.if (\align > _STRUCT_ALIGN)
- _STRUCT_ALIGN = \align
-.endif
-.endm
-
-# END_FIELDS
-.macro END_FIELDS
- _FIELD_OFFSET = (_FIELD_OFFSET + _STRUCT_ALIGN-1) & (~ (_STRUCT_ALIGN-1))
-.endm
-
-########################################################################
-
-.macro STRUCT p1
-START_FIELDS
-.struc \p1
-.endm
-
-.macro ENDSTRUCT
- tmp = _FIELD_OFFSET
- END_FIELDS
- tmp = (_FIELD_OFFSET - %%tmp)
-.if (tmp > 0)
- .lcomm tmp
-.endif
-.endstruc
-.endm
-
-## RES_int name size align
-.macro RES_int p1 p2 p3
- name = \p1
- size = \p2
- align = .\p3
-
- _FIELD_OFFSET = (_FIELD_OFFSET + (align) - 1) & (~ ((align)-1))
-.align align
-.lcomm name size
- _FIELD_OFFSET = _FIELD_OFFSET + (size)
-.if (align > _STRUCT_ALIGN)
- _STRUCT_ALIGN = align
-.endif
-.endm
-
-# macro RES_B name, size [, align]
-.macro RES_B _name, _size, _align=1
-RES_int _name _size _align
-.endm
-
-# macro RES_W name, size [, align]
-.macro RES_W _name, _size, _align=2
-RES_int _name 2*(_size) _align
-.endm
-
-# macro RES_D name, size [, align]
-.macro RES_D _name, _size, _align=4
-RES_int _name 4*(_size) _align
-.endm
-
-# macro RES_Q name, size [, align]
-.macro RES_Q _name, _size, _align=8
-RES_int _name 8*(_size) _align
-.endm
-
-# macro RES_DQ name, size [, align]
-.macro RES_DQ _name, _size, _align=16
-RES_int _name 16*(_size) _align
-.endm
-
-# macro RES_Y name, size [, align]
-.macro RES_Y _name, _size, _align=32
-RES_int _name 32*(_size) _align
-.endm
-
-# macro RES_Z name, size [, align]
-.macro RES_Z _name, _size, _align=64
-RES_int _name 64*(_size) _align
-.endm
-
-#endif
-
-
-########################################################################
-#### Define SHA256 Out Of Order Data Structures
-########################################################################
-
-START_FIELDS # LANE_DATA
-### name size align
-FIELD _job_in_lane, 8, 8 # pointer to job object
-END_FIELDS
-
- _LANE_DATA_size = _FIELD_OFFSET
- _LANE_DATA_align = _STRUCT_ALIGN
-
-########################################################################
-
-START_FIELDS # SHA256_ARGS_X4
-### name size align
-FIELD _digest, 4*8*8, 4 # transposed digest
-FIELD _data_ptr, 8*8, 8 # array of pointers to data
-END_FIELDS
-
- _SHA256_ARGS_X4_size = _FIELD_OFFSET
- _SHA256_ARGS_X4_align = _STRUCT_ALIGN
- _SHA256_ARGS_X8_size = _FIELD_OFFSET
- _SHA256_ARGS_X8_align = _STRUCT_ALIGN
-
-#######################################################################
-
-START_FIELDS # MB_MGR
-### name size align
-FIELD _args, _SHA256_ARGS_X4_size, _SHA256_ARGS_X4_align
-FIELD _lens, 4*8, 8
-FIELD _unused_lanes, 8, 8
-FIELD _ldata, _LANE_DATA_size*8, _LANE_DATA_align
-END_FIELDS
-
- _MB_MGR_size = _FIELD_OFFSET
- _MB_MGR_align = _STRUCT_ALIGN
-
-_args_digest = _args + _digest
-_args_data_ptr = _args + _data_ptr
-
-#######################################################################
-
-START_FIELDS #STACK_FRAME
-### name size align
-FIELD _data, 16*SZ8, 1 # transposed digest
-FIELD _digest, 8*SZ8, 1 # array of pointers to data
-FIELD _ytmp, 4*SZ8, 1
-FIELD _rsp, 8, 1
-END_FIELDS
-
- _STACK_FRAME_size = _FIELD_OFFSET
- _STACK_FRAME_align = _STRUCT_ALIGN
-
-#######################################################################
-
-########################################################################
-#### Define constants
-########################################################################
-
-#define STS_UNKNOWN 0
-#define STS_BEING_PROCESSED 1
-#define STS_COMPLETED 2
-
-########################################################################
-#### Define JOB_SHA256 structure
-########################################################################
-
-START_FIELDS # JOB_SHA256
-
-### name size align
-FIELD _buffer, 8, 8 # pointer to buffer
-FIELD _len, 8, 8 # length in bytes
-FIELD _result_digest, 8*4, 32 # Digest (output)
-FIELD _status, 4, 4
-FIELD _user_data, 8, 8
-END_FIELDS
-
- _JOB_SHA256_size = _FIELD_OFFSET
- _JOB_SHA256_align = _STRUCT_ALIGN
diff --git a/arch/x86/crypto/sha256-mb/sha256_mb_mgr_flush_avx2.S b/arch/x86/crypto/sha256-mb/sha256_mb_mgr_flush_avx2.S
deleted file mode 100644
index d2364c5..00000000
--- a/arch/x86/crypto/sha256-mb/sha256_mb_mgr_flush_avx2.S
+++ /dev/null
@@ -1,307 +0,0 @@
-/*
- * Flush routine for SHA256 multibuffer
- *
- * This file is provided under a dual BSD/GPLv2 license. When using or
- * redistributing this file, you may do so under either license.
- *
- * GPL LICENSE SUMMARY
- *
- * Copyright(c) 2016 Intel Corporation.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of version 2 of the GNU General Public License as
- * published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful, but
- * WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
- * General Public License for more details.
- *
- * Contact Information:
- * Megha Dey <megha.dey(a)linux.intel.com>
- *
- * BSD LICENSE
- *
- * Copyright(c) 2016 Intel Corporation.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions
- * are met:
- *
- * * Redistributions of source code must retain the above copyright
- * notice, this list of conditions and the following disclaimer.
- * * Redistributions in binary form must reproduce the above copyright
- * notice, this list of conditions and the following disclaimer in
- * the documentation and/or other materials provided with the
- * distribution.
- * * Neither the name of Intel Corporation nor the names of its
- * contributors may be used to endorse or promote products derived
- * from this software without specific prior written permission.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
- * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
- * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
- * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
- * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
- * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
- * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
- * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
- * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
- * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
- * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
- */
-#include <linux/linkage.h>
-#include <asm/frame.h>
-#include "sha256_mb_mgr_datastruct.S"
-
-.extern sha256_x8_avx2
-
-#LINUX register definitions
-#define arg1 %rdi
-#define arg2 %rsi
-
-# Common register definitions
-#define state arg1
-#define job arg2
-#define len2 arg2
-
-# idx must be a register not clobberred by sha1_mult
-#define idx %r8
-#define DWORD_idx %r8d
-
-#define unused_lanes %rbx
-#define lane_data %rbx
-#define tmp2 %rbx
-#define tmp2_w %ebx
-
-#define job_rax %rax
-#define tmp1 %rax
-#define size_offset %rax
-#define tmp %rax
-#define start_offset %rax
-
-#define tmp3 %arg1
-
-#define extra_blocks %arg2
-#define p %arg2
-
-.macro LABEL prefix n
-\prefix\n\():
-.endm
-
-.macro JNE_SKIP i
-jne skip_\i
-.endm
-
-.altmacro
-.macro SET_OFFSET _offset
-offset = \_offset
-.endm
-.noaltmacro
-
-# JOB_SHA256* sha256_mb_mgr_flush_avx2(MB_MGR *state)
-# arg 1 : rcx : state
-ENTRY(sha256_mb_mgr_flush_avx2)
- FRAME_BEGIN
- push %rbx
-
- # If bit (32+3) is set, then all lanes are empty
- mov _unused_lanes(state), unused_lanes
- bt $32+3, unused_lanes
- jc return_null
-
- # find a lane with a non-null job
- xor idx, idx
- offset = (_ldata + 1 * _LANE_DATA_size + _job_in_lane)
- cmpq $0, offset(state)
- cmovne one(%rip), idx
- offset = (_ldata + 2 * _LANE_DATA_size + _job_in_lane)
- cmpq $0, offset(state)
- cmovne two(%rip), idx
- offset = (_ldata + 3 * _LANE_DATA_size + _job_in_lane)
- cmpq $0, offset(state)
- cmovne three(%rip), idx
- offset = (_ldata + 4 * _LANE_DATA_size + _job_in_lane)
- cmpq $0, offset(state)
- cmovne four(%rip), idx
- offset = (_ldata + 5 * _LANE_DATA_size + _job_in_lane)
- cmpq $0, offset(state)
- cmovne five(%rip), idx
- offset = (_ldata + 6 * _LANE_DATA_size + _job_in_lane)
- cmpq $0, offset(state)
- cmovne six(%rip), idx
- offset = (_ldata + 7 * _LANE_DATA_size + _job_in_lane)
- cmpq $0, offset(state)
- cmovne seven(%rip), idx
-
- # copy idx to empty lanes
-copy_lane_data:
- offset = (_args + _data_ptr)
- mov offset(state,idx,8), tmp
-
- I = 0
-.rep 8
- offset = (_ldata + I * _LANE_DATA_size + _job_in_lane)
- cmpq $0, offset(state)
-.altmacro
- JNE_SKIP %I
- offset = (_args + _data_ptr + 8*I)
- mov tmp, offset(state)
- offset = (_lens + 4*I)
- movl $0xFFFFFFFF, offset(state)
-LABEL skip_ %I
- I = (I+1)
-.noaltmacro
-.endr
-
- # Find min length
- vmovdqu _lens+0*16(state), %xmm0
- vmovdqu _lens+1*16(state), %xmm1
-
- vpminud %xmm1, %xmm0, %xmm2 # xmm2 has {D,C,B,A}
- vpalignr $8, %xmm2, %xmm3, %xmm3 # xmm3 has {x,x,D,C}
- vpminud %xmm3, %xmm2, %xmm2 # xmm2 has {x,x,E,F}
- vpalignr $4, %xmm2, %xmm3, %xmm3 # xmm3 has {x,x,x,E}
- vpminud %xmm3, %xmm2, %xmm2 # xmm2 has min val in low dword
-
- vmovd %xmm2, DWORD_idx
- mov idx, len2
- and $0xF, idx
- shr $4, len2
- jz len_is_0
-
- vpand clear_low_nibble(%rip), %xmm2, %xmm2
- vpshufd $0, %xmm2, %xmm2
-
- vpsubd %xmm2, %xmm0, %xmm0
- vpsubd %xmm2, %xmm1, %xmm1
-
- vmovdqu %xmm0, _lens+0*16(state)
- vmovdqu %xmm1, _lens+1*16(state)
-
- # "state" and "args" are the same address, arg1
- # len is arg2
- call sha256_x8_avx2
- # state and idx are intact
-
-len_is_0:
- # process completed job "idx"
- imul $_LANE_DATA_size, idx, lane_data
- lea _ldata(state, lane_data), lane_data
-
- mov _job_in_lane(lane_data), job_rax
- movq $0, _job_in_lane(lane_data)
- movl $STS_COMPLETED, _status(job_rax)
- mov _unused_lanes(state), unused_lanes
- shl $4, unused_lanes
- or idx, unused_lanes
-
- mov unused_lanes, _unused_lanes(state)
- movl $0xFFFFFFFF, _lens(state,idx,4)
-
- vmovd _args_digest(state , idx, 4) , %xmm0
- vpinsrd $1, _args_digest+1*32(state, idx, 4), %xmm0, %xmm0
- vpinsrd $2, _args_digest+2*32(state, idx, 4), %xmm0, %xmm0
- vpinsrd $3, _args_digest+3*32(state, idx, 4), %xmm0, %xmm0
- vmovd _args_digest+4*32(state, idx, 4), %xmm1
- vpinsrd $1, _args_digest+5*32(state, idx, 4), %xmm1, %xmm1
- vpinsrd $2, _args_digest+6*32(state, idx, 4), %xmm1, %xmm1
- vpinsrd $3, _args_digest+7*32(state, idx, 4), %xmm1, %xmm1
-
- vmovdqu %xmm0, _result_digest(job_rax)
- offset = (_result_digest + 1*16)
- vmovdqu %xmm1, offset(job_rax)
-
-return:
- pop %rbx
- FRAME_END
- ret
-
-return_null:
- xor job_rax, job_rax
- jmp return
-ENDPROC(sha256_mb_mgr_flush_avx2)
-
-##############################################################################
-
-.align 16
-ENTRY(sha256_mb_mgr_get_comp_job_avx2)
- push %rbx
-
- ## if bit 32+3 is set, then all lanes are empty
- mov _unused_lanes(state), unused_lanes
- bt $(32+3), unused_lanes
- jc .return_null
-
- # Find min length
- vmovdqu _lens(state), %xmm0
- vmovdqu _lens+1*16(state), %xmm1
-
- vpminud %xmm1, %xmm0, %xmm2 # xmm2 has {D,C,B,A}
- vpalignr $8, %xmm2, %xmm3, %xmm3 # xmm3 has {x,x,D,C}
- vpminud %xmm3, %xmm2, %xmm2 # xmm2 has {x,x,E,F}
- vpalignr $4, %xmm2, %xmm3, %xmm3 # xmm3 has {x,x,x,E}
- vpminud %xmm3, %xmm2, %xmm2 # xmm2 has min val in low dword
-
- vmovd %xmm2, DWORD_idx
- test $~0xF, idx
- jnz .return_null
-
- # process completed job "idx"
- imul $_LANE_DATA_size, idx, lane_data
- lea _ldata(state, lane_data), lane_data
-
- mov _job_in_lane(lane_data), job_rax
- movq $0, _job_in_lane(lane_data)
- movl $STS_COMPLETED, _status(job_rax)
- mov _unused_lanes(state), unused_lanes
- shl $4, unused_lanes
- or idx, unused_lanes
- mov unused_lanes, _unused_lanes(state)
-
- movl $0xFFFFFFFF, _lens(state, idx, 4)
-
- vmovd _args_digest(state, idx, 4), %xmm0
- vpinsrd $1, _args_digest+1*32(state, idx, 4), %xmm0, %xmm0
- vpinsrd $2, _args_digest+2*32(state, idx, 4), %xmm0, %xmm0
- vpinsrd $3, _args_digest+3*32(state, idx, 4), %xmm0, %xmm0
- vmovd _args_digest+4*32(state, idx, 4), %xmm1
- vpinsrd $1, _args_digest+5*32(state, idx, 4), %xmm1, %xmm1
- vpinsrd $2, _args_digest+6*32(state, idx, 4), %xmm1, %xmm1
- vpinsrd $3, _args_digest+7*32(state, idx, 4), %xmm1, %xmm1
-
- vmovdqu %xmm0, _result_digest(job_rax)
- offset = (_result_digest + 1*16)
- vmovdqu %xmm1, offset(job_rax)
-
- pop %rbx
-
- ret
-
-.return_null:
- xor job_rax, job_rax
- pop %rbx
- ret
-ENDPROC(sha256_mb_mgr_get_comp_job_avx2)
-
-.section .rodata.cst16.clear_low_nibble, "aM", @progbits, 16
-.align 16
-clear_low_nibble:
-.octa 0x000000000000000000000000FFFFFFF0
-
-.section .rodata.cst8, "aM", @progbits, 8
-.align 8
-one:
-.quad 1
-two:
-.quad 2
-three:
-.quad 3
-four:
-.quad 4
-five:
-.quad 5
-six:
-.quad 6
-seven:
-.quad 7
diff --git a/arch/x86/crypto/sha256-mb/sha256_mb_mgr_init_avx2.c b/arch/x86/crypto/sha256-mb/sha256_mb_mgr_init_avx2.c
deleted file mode 100644
index b0c4983..00000000
--- a/arch/x86/crypto/sha256-mb/sha256_mb_mgr_init_avx2.c
+++ /dev/null
@@ -1,65 +0,0 @@
-/*
- * Initialization code for multi buffer SHA256 algorithm for AVX2
- *
- * This file is provided under a dual BSD/GPLv2 license. When using or
- * redistributing this file, you may do so under either license.
- *
- * GPL LICENSE SUMMARY
- *
- * Copyright(c) 2016 Intel Corporation.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of version 2 of the GNU General Public License as
- * published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful, but
- * WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
- * General Public License for more details.
- *
- * Contact Information:
- * Megha Dey <megha.dey(a)linux.intel.com>
- *
- * BSD LICENSE
- *
- * Copyright(c) 2016 Intel Corporation.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions
- * are met:
- *
- * * Redistributions of source code must retain the above copyright
- * notice, this list of conditions and the following disclaimer.
- * * Redistributions in binary form must reproduce the above copyright
- * notice, this list of conditions and the following disclaimer in
- * the documentation and/or other materials provided with the
- * distribution.
- * * Neither the name of Intel Corporation nor the names of its
- * contributors may be used to endorse or promote products derived
- * from this software without specific prior written permission.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
- * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
- * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
- * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
- * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
- * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
- * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
- * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
- * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
- * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
- * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
- */
-
-#include "sha256_mb_mgr.h"
-
-void sha256_mb_mgr_init_avx2(struct sha256_mb_mgr *state)
-{
- unsigned int j;
-
- state->unused_lanes = 0xF76543210ULL;
- for (j = 0; j < 8; j++) {
- state->lens[j] = 0xFFFFFFFF;
- state->ldata[j].job_in_lane = NULL;
- }
-}
diff --git a/arch/x86/crypto/sha256-mb/sha256_mb_mgr_submit_avx2.S b/arch/x86/crypto/sha256-mb/sha256_mb_mgr_submit_avx2.S
deleted file mode 100644
index b36ae74..00000000
--- a/arch/x86/crypto/sha256-mb/sha256_mb_mgr_submit_avx2.S
+++ /dev/null
@@ -1,214 +0,0 @@
-/*
- * Buffer submit code for multi buffer SHA256 algorithm
- *
- * This file is provided under a dual BSD/GPLv2 license. When using or
- * redistributing this file, you may do so under either license.
- *
- * GPL LICENSE SUMMARY
- *
- * Copyright(c) 2016 Intel Corporation.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of version 2 of the GNU General Public License as
- * published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful, but
- * WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
- * General Public License for more details.
- *
- * Contact Information:
- * Megha Dey <megha.dey(a)linux.intel.com>
- *
- * BSD LICENSE
- *
- * Copyright(c) 2016 Intel Corporation.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions
- * are met:
- *
- * * Redistributions of source code must retain the above copyright
- * notice, this list of conditions and the following disclaimer.
- * * Redistributions in binary form must reproduce the above copyright
- * notice, this list of conditions and the following disclaimer in
- * the documentation and/or other materials provided with the
- * distribution.
- * * Neither the name of Intel Corporation nor the names of its
- * contributors may be used to endorse or promote products derived
- * from this software without specific prior written permission.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
- * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
- * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
- * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
- * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
- * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
- * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
- * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
- * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
- * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
- * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
- */
-
-#include <linux/linkage.h>
-#include <asm/frame.h>
-#include "sha256_mb_mgr_datastruct.S"
-
-.extern sha256_x8_avx2
-
-# LINUX register definitions
-arg1 = %rdi
-arg2 = %rsi
-size_offset = %rcx
-tmp2 = %rcx
-extra_blocks = %rdx
-
-# Common definitions
-#define state arg1
-#define job %rsi
-#define len2 arg2
-#define p2 arg2
-
-# idx must be a register not clobberred by sha1_x8_avx2
-idx = %r8
-DWORD_idx = %r8d
-last_len = %r8
-
-p = %r11
-start_offset = %r11
-
-unused_lanes = %rbx
-BYTE_unused_lanes = %bl
-
-job_rax = %rax
-len = %rax
-DWORD_len = %eax
-
-lane = %r12
-tmp3 = %r12
-
-tmp = %r9
-DWORD_tmp = %r9d
-
-lane_data = %r10
-
-# JOB* sha256_mb_mgr_submit_avx2(MB_MGR *state, JOB_SHA256 *job)
-# arg 1 : rcx : state
-# arg 2 : rdx : job
-ENTRY(sha256_mb_mgr_submit_avx2)
- FRAME_BEGIN
- push %rbx
- push %r12
-
- mov _unused_lanes(state), unused_lanes
- mov unused_lanes, lane
- and $0xF, lane
- shr $4, unused_lanes
- imul $_LANE_DATA_size, lane, lane_data
- movl $STS_BEING_PROCESSED, _status(job)
- lea _ldata(state, lane_data), lane_data
- mov unused_lanes, _unused_lanes(state)
- movl _len(job), DWORD_len
-
- mov job, _job_in_lane(lane_data)
- shl $4, len
- or lane, len
-
- movl DWORD_len, _lens(state , lane, 4)
-
- # Load digest words from result_digest
- vmovdqu _result_digest(job), %xmm0
- vmovdqu _result_digest+1*16(job), %xmm1
- vmovd %xmm0, _args_digest(state, lane, 4)
- vpextrd $1, %xmm0, _args_digest+1*32(state , lane, 4)
- vpextrd $2, %xmm0, _args_digest+2*32(state , lane, 4)
- vpextrd $3, %xmm0, _args_digest+3*32(state , lane, 4)
- vmovd %xmm1, _args_digest+4*32(state , lane, 4)
-
- vpextrd $1, %xmm1, _args_digest+5*32(state , lane, 4)
- vpextrd $2, %xmm1, _args_digest+6*32(state , lane, 4)
- vpextrd $3, %xmm1, _args_digest+7*32(state , lane, 4)
-
- mov _buffer(job), p
- mov p, _args_data_ptr(state, lane, 8)
-
- cmp $0xF, unused_lanes
- jne return_null
-
-start_loop:
- # Find min length
- vmovdqa _lens(state), %xmm0
- vmovdqa _lens+1*16(state), %xmm1
-
- vpminud %xmm1, %xmm0, %xmm2 # xmm2 has {D,C,B,A}
- vpalignr $8, %xmm2, %xmm3, %xmm3 # xmm3 has {x,x,D,C}
- vpminud %xmm3, %xmm2, %xmm2 # xmm2 has {x,x,E,F}
- vpalignr $4, %xmm2, %xmm3, %xmm3 # xmm3 has {x,x,x,E}
- vpminud %xmm3, %xmm2, %xmm2 # xmm2 has min val in low dword
-
- vmovd %xmm2, DWORD_idx
- mov idx, len2
- and $0xF, idx
- shr $4, len2
- jz len_is_0
-
- vpand clear_low_nibble(%rip), %xmm2, %xmm2
- vpshufd $0, %xmm2, %xmm2
-
- vpsubd %xmm2, %xmm0, %xmm0
- vpsubd %xmm2, %xmm1, %xmm1
-
- vmovdqa %xmm0, _lens + 0*16(state)
- vmovdqa %xmm1, _lens + 1*16(state)
-
- # "state" and "args" are the same address, arg1
- # len is arg2
- call sha256_x8_avx2
-
- # state and idx are intact
-
-len_is_0:
- # process completed job "idx"
- imul $_LANE_DATA_size, idx, lane_data
- lea _ldata(state, lane_data), lane_data
-
- mov _job_in_lane(lane_data), job_rax
- mov _unused_lanes(state), unused_lanes
- movq $0, _job_in_lane(lane_data)
- movl $STS_COMPLETED, _status(job_rax)
- shl $4, unused_lanes
- or idx, unused_lanes
- mov unused_lanes, _unused_lanes(state)
-
- movl $0xFFFFFFFF, _lens(state,idx,4)
-
- vmovd _args_digest(state, idx, 4), %xmm0
- vpinsrd $1, _args_digest+1*32(state , idx, 4), %xmm0, %xmm0
- vpinsrd $2, _args_digest+2*32(state , idx, 4), %xmm0, %xmm0
- vpinsrd $3, _args_digest+3*32(state , idx, 4), %xmm0, %xmm0
- vmovd _args_digest+4*32(state, idx, 4), %xmm1
-
- vpinsrd $1, _args_digest+5*32(state , idx, 4), %xmm1, %xmm1
- vpinsrd $2, _args_digest+6*32(state , idx, 4), %xmm1, %xmm1
- vpinsrd $3, _args_digest+7*32(state , idx, 4), %xmm1, %xmm1
-
- vmovdqu %xmm0, _result_digest(job_rax)
- vmovdqu %xmm1, _result_digest+1*16(job_rax)
-
-return:
- pop %r12
- pop %rbx
- FRAME_END
- ret
-
-return_null:
- xor job_rax, job_rax
- jmp return
-
-ENDPROC(sha256_mb_mgr_submit_avx2)
-
-.section .rodata.cst16.clear_low_nibble, "aM", @progbits, 16
-.align 16
-clear_low_nibble:
- .octa 0x000000000000000000000000FFFFFFF0
diff --git a/arch/x86/crypto/sha256-mb/sha256_x8_avx2.S b/arch/x86/crypto/sha256-mb/sha256_x8_avx2.S
deleted file mode 100644
index 1687c80..00000000
--- a/arch/x86/crypto/sha256-mb/sha256_x8_avx2.S
+++ /dev/null
@@ -1,598 +0,0 @@
-/*
- * Multi-buffer SHA256 algorithm hash compute routine
- *
- * This file is provided under a dual BSD/GPLv2 license. When using or
- * redistributing this file, you may do so under either license.
- *
- * GPL LICENSE SUMMARY
- *
- * Copyright(c) 2016 Intel Corporation.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of version 2 of the GNU General Public License as
- * published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful, but
- * WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
- * General Public License for more details.
- *
- * Contact Information:
- * Megha Dey <megha.dey(a)linux.intel.com>
- *
- * BSD LICENSE
- *
- * Copyright(c) 2016 Intel Corporation.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions
- * are met:
- *
- * * Redistributions of source code must retain the above copyright
- * notice, this list of conditions and the following disclaimer.
- * * Redistributions in binary form must reproduce the above copyright
- * notice, this list of conditions and the following disclaimer in
- * the documentation and/or other materials provided with the
- * distribution.
- * * Neither the name of Intel Corporation nor the names of its
- * contributors may be used to endorse or promote products derived
- * from this software without specific prior written permission.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
- * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
- * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
- * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
- * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
- * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
- * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
- * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
- * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
- * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
- * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
- */
-
-#include <linux/linkage.h>
-#include "sha256_mb_mgr_datastruct.S"
-
-## code to compute oct SHA256 using SSE-256
-## outer calling routine takes care of save and restore of XMM registers
-## Logic designed/laid out by JDG
-
-## Function clobbers: rax, rcx, rdx, rbx, rsi, rdi, r9-r15; %ymm0-15
-## Linux clobbers: rax rbx rcx rdx rsi r9 r10 r11 r12 r13 r14 r15
-## Linux preserves: rdi rbp r8
-##
-## clobbers %ymm0-15
-
-arg1 = %rdi
-arg2 = %rsi
-reg3 = %rcx
-reg4 = %rdx
-
-# Common definitions
-STATE = arg1
-INP_SIZE = arg2
-
-IDX = %rax
-ROUND = %rbx
-TBL = reg3
-
-inp0 = %r9
-inp1 = %r10
-inp2 = %r11
-inp3 = %r12
-inp4 = %r13
-inp5 = %r14
-inp6 = %r15
-inp7 = reg4
-
-a = %ymm0
-b = %ymm1
-c = %ymm2
-d = %ymm3
-e = %ymm4
-f = %ymm5
-g = %ymm6
-h = %ymm7
-
-T1 = %ymm8
-
-a0 = %ymm12
-a1 = %ymm13
-a2 = %ymm14
-TMP = %ymm15
-TMP0 = %ymm6
-TMP1 = %ymm7
-
-TT0 = %ymm8
-TT1 = %ymm9
-TT2 = %ymm10
-TT3 = %ymm11
-TT4 = %ymm12
-TT5 = %ymm13
-TT6 = %ymm14
-TT7 = %ymm15
-
-# Define stack usage
-
-# Assume stack aligned to 32 bytes before call
-# Therefore FRAMESZ mod 32 must be 32-8 = 24
-
-#define FRAMESZ 0x388
-
-#define VMOVPS vmovups
-
-# TRANSPOSE8 r0, r1, r2, r3, r4, r5, r6, r7, t0, t1
-# "transpose" data in {r0...r7} using temps {t0...t1}
-# Input looks like: {r0 r1 r2 r3 r4 r5 r6 r7}
-# r0 = {a7 a6 a5 a4 a3 a2 a1 a0}
-# r1 = {b7 b6 b5 b4 b3 b2 b1 b0}
-# r2 = {c7 c6 c5 c4 c3 c2 c1 c0}
-# r3 = {d7 d6 d5 d4 d3 d2 d1 d0}
-# r4 = {e7 e6 e5 e4 e3 e2 e1 e0}
-# r5 = {f7 f6 f5 f4 f3 f2 f1 f0}
-# r6 = {g7 g6 g5 g4 g3 g2 g1 g0}
-# r7 = {h7 h6 h5 h4 h3 h2 h1 h0}
-#
-# Output looks like: {r0 r1 r2 r3 r4 r5 r6 r7}
-# r0 = {h0 g0 f0 e0 d0 c0 b0 a0}
-# r1 = {h1 g1 f1 e1 d1 c1 b1 a1}
-# r2 = {h2 g2 f2 e2 d2 c2 b2 a2}
-# r3 = {h3 g3 f3 e3 d3 c3 b3 a3}
-# r4 = {h4 g4 f4 e4 d4 c4 b4 a4}
-# r5 = {h5 g5 f5 e5 d5 c5 b5 a5}
-# r6 = {h6 g6 f6 e6 d6 c6 b6 a6}
-# r7 = {h7 g7 f7 e7 d7 c7 b7 a7}
-#
-
-.macro TRANSPOSE8 r0 r1 r2 r3 r4 r5 r6 r7 t0 t1
- # process top half (r0..r3) {a...d}
- vshufps $0x44, \r1, \r0, \t0 # t0 = {b5 b4 a5 a4 b1 b0 a1 a0}
- vshufps $0xEE, \r1, \r0, \r0 # r0 = {b7 b6 a7 a6 b3 b2 a3 a2}
- vshufps $0x44, \r3, \r2, \t1 # t1 = {d5 d4 c5 c4 d1 d0 c1 c0}
- vshufps $0xEE, \r3, \r2, \r2 # r2 = {d7 d6 c7 c6 d3 d2 c3 c2}
- vshufps $0xDD, \t1, \t0, \r3 # r3 = {d5 c5 b5 a5 d1 c1 b1 a1}
- vshufps $0x88, \r2, \r0, \r1 # r1 = {d6 c6 b6 a6 d2 c2 b2 a2}
- vshufps $0xDD, \r2, \r0, \r0 # r0 = {d7 c7 b7 a7 d3 c3 b3 a3}
- vshufps $0x88, \t1, \t0, \t0 # t0 = {d4 c4 b4 a4 d0 c0 b0 a0}
-
- # use r2 in place of t0
- # process bottom half (r4..r7) {e...h}
- vshufps $0x44, \r5, \r4, \r2 # r2 = {f5 f4 e5 e4 f1 f0 e1 e0}
- vshufps $0xEE, \r5, \r4, \r4 # r4 = {f7 f6 e7 e6 f3 f2 e3 e2}
- vshufps $0x44, \r7, \r6, \t1 # t1 = {h5 h4 g5 g4 h1 h0 g1 g0}
- vshufps $0xEE, \r7, \r6, \r6 # r6 = {h7 h6 g7 g6 h3 h2 g3 g2}
- vshufps $0xDD, \t1, \r2, \r7 # r7 = {h5 g5 f5 e5 h1 g1 f1 e1}
- vshufps $0x88, \r6, \r4, \r5 # r5 = {h6 g6 f6 e6 h2 g2 f2 e2}
- vshufps $0xDD, \r6, \r4, \r4 # r4 = {h7 g7 f7 e7 h3 g3 f3 e3}
- vshufps $0x88, \t1, \r2, \t1 # t1 = {h4 g4 f4 e4 h0 g0 f0 e0}
-
- vperm2f128 $0x13, \r1, \r5, \r6 # h6...a6
- vperm2f128 $0x02, \r1, \r5, \r2 # h2...a2
- vperm2f128 $0x13, \r3, \r7, \r5 # h5...a5
- vperm2f128 $0x02, \r3, \r7, \r1 # h1...a1
- vperm2f128 $0x13, \r0, \r4, \r7 # h7...a7
- vperm2f128 $0x02, \r0, \r4, \r3 # h3...a3
- vperm2f128 $0x13, \t0, \t1, \r4 # h4...a4
- vperm2f128 $0x02, \t0, \t1, \r0 # h0...a0
-
-.endm
-
-.macro ROTATE_ARGS
-TMP_ = h
-h = g
-g = f
-f = e
-e = d
-d = c
-c = b
-b = a
-a = TMP_
-.endm
-
-.macro _PRORD reg imm tmp
- vpslld $(32-\imm),\reg,\tmp
- vpsrld $\imm,\reg, \reg
- vpor \tmp,\reg, \reg
-.endm
-
-# PRORD_nd reg, imm, tmp, src
-.macro _PRORD_nd reg imm tmp src
- vpslld $(32-\imm), \src, \tmp
- vpsrld $\imm, \src, \reg
- vpor \tmp, \reg, \reg
-.endm
-
-# PRORD dst/src, amt
-.macro PRORD reg imm
- _PRORD \reg,\imm,TMP
-.endm
-
-# PRORD_nd dst, src, amt
-.macro PRORD_nd reg tmp imm
- _PRORD_nd \reg, \imm, TMP, \tmp
-.endm
-
-# arguments passed implicitly in preprocessor symbols i, a...h
-.macro ROUND_00_15 _T1 i
- PRORD_nd a0,e,5 # sig1: a0 = (e >> 5)
-
- vpxor g, f, a2 # ch: a2 = f^g
- vpand e,a2, a2 # ch: a2 = (f^g)&e
- vpxor g, a2, a2 # a2 = ch
-
- PRORD_nd a1,e,25 # sig1: a1 = (e >> 25)
-
- vmovdqu \_T1,(SZ8*(\i & 0xf))(%rsp)
- vpaddd (TBL,ROUND,1), \_T1, \_T1 # T1 = W + K
- vpxor e,a0, a0 # sig1: a0 = e ^ (e >> 5)
- PRORD a0, 6 # sig1: a0 = (e >> 6) ^ (e >> 11)
- vpaddd a2, h, h # h = h + ch
- PRORD_nd a2,a,11 # sig0: a2 = (a >> 11)
- vpaddd \_T1,h, h # h = h + ch + W + K
- vpxor a1, a0, a0 # a0 = sigma1
- PRORD_nd a1,a,22 # sig0: a1 = (a >> 22)
- vpxor c, a, \_T1 # maj: T1 = a^c
- add $SZ8, ROUND # ROUND++
- vpand b, \_T1, \_T1 # maj: T1 = (a^c)&b
- vpaddd a0, h, h
- vpaddd h, d, d
- vpxor a, a2, a2 # sig0: a2 = a ^ (a >> 11)
- PRORD a2,2 # sig0: a2 = (a >> 2) ^ (a >> 13)
- vpxor a1, a2, a2 # a2 = sig0
- vpand c, a, a1 # maj: a1 = a&c
- vpor \_T1, a1, a1 # a1 = maj
- vpaddd a1, h, h # h = h + ch + W + K + maj
- vpaddd a2, h, h # h = h + ch + W + K + maj + sigma0
- ROTATE_ARGS
-.endm
-
-# arguments passed implicitly in preprocessor symbols i, a...h
-.macro ROUND_16_XX _T1 i
- vmovdqu (SZ8*((\i-15)&0xf))(%rsp), \_T1
- vmovdqu (SZ8*((\i-2)&0xf))(%rsp), a1
- vmovdqu \_T1, a0
- PRORD \_T1,11
- vmovdqu a1, a2
- PRORD a1,2
- vpxor a0, \_T1, \_T1
- PRORD \_T1, 7
- vpxor a2, a1, a1
- PRORD a1, 17
- vpsrld $3, a0, a0
- vpxor a0, \_T1, \_T1
- vpsrld $10, a2, a2
- vpxor a2, a1, a1
- vpaddd (SZ8*((\i-16)&0xf))(%rsp), \_T1, \_T1
- vpaddd (SZ8*((\i-7)&0xf))(%rsp), a1, a1
- vpaddd a1, \_T1, \_T1
-
- ROUND_00_15 \_T1,\i
-.endm
-
-# SHA256_ARGS:
-# UINT128 digest[8]; // transposed digests
-# UINT8 *data_ptr[4];
-
-# void sha256_x8_avx2(SHA256_ARGS *args, UINT64 bytes);
-# arg 1 : STATE : pointer to array of pointers to input data
-# arg 2 : INP_SIZE : size of input in blocks
- # general registers preserved in outer calling routine
- # outer calling routine saves all the XMM registers
- # save rsp, allocate 32-byte aligned for local variables
-ENTRY(sha256_x8_avx2)
-
- # save callee-saved clobbered registers to comply with C function ABI
- push %r12
- push %r13
- push %r14
- push %r15
-
- mov %rsp, IDX
- sub $FRAMESZ, %rsp
- and $~0x1F, %rsp
- mov IDX, _rsp(%rsp)
-
- # Load the pre-transposed incoming digest.
- vmovdqu 0*SHA256_DIGEST_ROW_SIZE(STATE),a
- vmovdqu 1*SHA256_DIGEST_ROW_SIZE(STATE),b
- vmovdqu 2*SHA256_DIGEST_ROW_SIZE(STATE),c
- vmovdqu 3*SHA256_DIGEST_ROW_SIZE(STATE),d
- vmovdqu 4*SHA256_DIGEST_ROW_SIZE(STATE),e
- vmovdqu 5*SHA256_DIGEST_ROW_SIZE(STATE),f
- vmovdqu 6*SHA256_DIGEST_ROW_SIZE(STATE),g
- vmovdqu 7*SHA256_DIGEST_ROW_SIZE(STATE),h
-
- lea K256_8(%rip),TBL
-
- # load the address of each of the 4 message lanes
- # getting ready to transpose input onto stack
- mov _args_data_ptr+0*PTR_SZ(STATE),inp0
- mov _args_data_ptr+1*PTR_SZ(STATE),inp1
- mov _args_data_ptr+2*PTR_SZ(STATE),inp2
- mov _args_data_ptr+3*PTR_SZ(STATE),inp3
- mov _args_data_ptr+4*PTR_SZ(STATE),inp4
- mov _args_data_ptr+5*PTR_SZ(STATE),inp5
- mov _args_data_ptr+6*PTR_SZ(STATE),inp6
- mov _args_data_ptr+7*PTR_SZ(STATE),inp7
-
- xor IDX, IDX
-lloop:
- xor ROUND, ROUND
-
- # save old digest
- vmovdqu a, _digest(%rsp)
- vmovdqu b, _digest+1*SZ8(%rsp)
- vmovdqu c, _digest+2*SZ8(%rsp)
- vmovdqu d, _digest+3*SZ8(%rsp)
- vmovdqu e, _digest+4*SZ8(%rsp)
- vmovdqu f, _digest+5*SZ8(%rsp)
- vmovdqu g, _digest+6*SZ8(%rsp)
- vmovdqu h, _digest+7*SZ8(%rsp)
- i = 0
-.rep 2
- VMOVPS i*32(inp0, IDX), TT0
- VMOVPS i*32(inp1, IDX), TT1
- VMOVPS i*32(inp2, IDX), TT2
- VMOVPS i*32(inp3, IDX), TT3
- VMOVPS i*32(inp4, IDX), TT4
- VMOVPS i*32(inp5, IDX), TT5
- VMOVPS i*32(inp6, IDX), TT6
- VMOVPS i*32(inp7, IDX), TT7
- vmovdqu g, _ytmp(%rsp)
- vmovdqu h, _ytmp+1*SZ8(%rsp)
- TRANSPOSE8 TT0, TT1, TT2, TT3, TT4, TT5, TT6, TT7, TMP0, TMP1
- vmovdqu PSHUFFLE_BYTE_FLIP_MASK(%rip), TMP1
- vmovdqu _ytmp(%rsp), g
- vpshufb TMP1, TT0, TT0
- vpshufb TMP1, TT1, TT1
- vpshufb TMP1, TT2, TT2
- vpshufb TMP1, TT3, TT3
- vpshufb TMP1, TT4, TT4
- vpshufb TMP1, TT5, TT5
- vpshufb TMP1, TT6, TT6
- vpshufb TMP1, TT7, TT7
- vmovdqu _ytmp+1*SZ8(%rsp), h
- vmovdqu TT4, _ytmp(%rsp)
- vmovdqu TT5, _ytmp+1*SZ8(%rsp)
- vmovdqu TT6, _ytmp+2*SZ8(%rsp)
- vmovdqu TT7, _ytmp+3*SZ8(%rsp)
- ROUND_00_15 TT0,(i*8+0)
- vmovdqu _ytmp(%rsp), TT0
- ROUND_00_15 TT1,(i*8+1)
- vmovdqu _ytmp+1*SZ8(%rsp), TT1
- ROUND_00_15 TT2,(i*8+2)
- vmovdqu _ytmp+2*SZ8(%rsp), TT2
- ROUND_00_15 TT3,(i*8+3)
- vmovdqu _ytmp+3*SZ8(%rsp), TT3
- ROUND_00_15 TT0,(i*8+4)
- ROUND_00_15 TT1,(i*8+5)
- ROUND_00_15 TT2,(i*8+6)
- ROUND_00_15 TT3,(i*8+7)
- i = (i+1)
-.endr
- add $64, IDX
- i = (i*8)
-
- jmp Lrounds_16_xx
-.align 16
-Lrounds_16_xx:
-.rep 16
- ROUND_16_XX T1, i
- i = (i+1)
-.endr
-
- cmp $ROUNDS,ROUND
- jb Lrounds_16_xx
-
- # add old digest
- vpaddd _digest+0*SZ8(%rsp), a, a
- vpaddd _digest+1*SZ8(%rsp), b, b
- vpaddd _digest+2*SZ8(%rsp), c, c
- vpaddd _digest+3*SZ8(%rsp), d, d
- vpaddd _digest+4*SZ8(%rsp), e, e
- vpaddd _digest+5*SZ8(%rsp), f, f
- vpaddd _digest+6*SZ8(%rsp), g, g
- vpaddd _digest+7*SZ8(%rsp), h, h
-
- sub $1, INP_SIZE # unit is blocks
- jne lloop
-
- # write back to memory (state object) the transposed digest
- vmovdqu a, 0*SHA256_DIGEST_ROW_SIZE(STATE)
- vmovdqu b, 1*SHA256_DIGEST_ROW_SIZE(STATE)
- vmovdqu c, 2*SHA256_DIGEST_ROW_SIZE(STATE)
- vmovdqu d, 3*SHA256_DIGEST_ROW_SIZE(STATE)
- vmovdqu e, 4*SHA256_DIGEST_ROW_SIZE(STATE)
- vmovdqu f, 5*SHA256_DIGEST_ROW_SIZE(STATE)
- vmovdqu g, 6*SHA256_DIGEST_ROW_SIZE(STATE)
- vmovdqu h, 7*SHA256_DIGEST_ROW_SIZE(STATE)
-
- # update input pointers
- add IDX, inp0
- mov inp0, _args_data_ptr+0*8(STATE)
- add IDX, inp1
- mov inp1, _args_data_ptr+1*8(STATE)
- add IDX, inp2
- mov inp2, _args_data_ptr+2*8(STATE)
- add IDX, inp3
- mov inp3, _args_data_ptr+3*8(STATE)
- add IDX, inp4
- mov inp4, _args_data_ptr+4*8(STATE)
- add IDX, inp5
- mov inp5, _args_data_ptr+5*8(STATE)
- add IDX, inp6
- mov inp6, _args_data_ptr+6*8(STATE)
- add IDX, inp7
- mov inp7, _args_data_ptr+7*8(STATE)
-
- # Postamble
- mov _rsp(%rsp), %rsp
-
- # restore callee-saved clobbered registers
- pop %r15
- pop %r14
- pop %r13
- pop %r12
-
- ret
-ENDPROC(sha256_x8_avx2)
-
-.section .rodata.K256_8, "a", @progbits
-.align 64
-K256_8:
- .octa 0x428a2f98428a2f98428a2f98428a2f98
- .octa 0x428a2f98428a2f98428a2f98428a2f98
- .octa 0x71374491713744917137449171374491
- .octa 0x71374491713744917137449171374491
- .octa 0xb5c0fbcfb5c0fbcfb5c0fbcfb5c0fbcf
- .octa 0xb5c0fbcfb5c0fbcfb5c0fbcfb5c0fbcf
- .octa 0xe9b5dba5e9b5dba5e9b5dba5e9b5dba5
- .octa 0xe9b5dba5e9b5dba5e9b5dba5e9b5dba5
- .octa 0x3956c25b3956c25b3956c25b3956c25b
- .octa 0x3956c25b3956c25b3956c25b3956c25b
- .octa 0x59f111f159f111f159f111f159f111f1
- .octa 0x59f111f159f111f159f111f159f111f1
- .octa 0x923f82a4923f82a4923f82a4923f82a4
- .octa 0x923f82a4923f82a4923f82a4923f82a4
- .octa 0xab1c5ed5ab1c5ed5ab1c5ed5ab1c5ed5
- .octa 0xab1c5ed5ab1c5ed5ab1c5ed5ab1c5ed5
- .octa 0xd807aa98d807aa98d807aa98d807aa98
- .octa 0xd807aa98d807aa98d807aa98d807aa98
- .octa 0x12835b0112835b0112835b0112835b01
- .octa 0x12835b0112835b0112835b0112835b01
- .octa 0x243185be243185be243185be243185be
- .octa 0x243185be243185be243185be243185be
- .octa 0x550c7dc3550c7dc3550c7dc3550c7dc3
- .octa 0x550c7dc3550c7dc3550c7dc3550c7dc3
- .octa 0x72be5d7472be5d7472be5d7472be5d74
- .octa 0x72be5d7472be5d7472be5d7472be5d74
- .octa 0x80deb1fe80deb1fe80deb1fe80deb1fe
- .octa 0x80deb1fe80deb1fe80deb1fe80deb1fe
- .octa 0x9bdc06a79bdc06a79bdc06a79bdc06a7
- .octa 0x9bdc06a79bdc06a79bdc06a79bdc06a7
- .octa 0xc19bf174c19bf174c19bf174c19bf174
- .octa 0xc19bf174c19bf174c19bf174c19bf174
- .octa 0xe49b69c1e49b69c1e49b69c1e49b69c1
- .octa 0xe49b69c1e49b69c1e49b69c1e49b69c1
- .octa 0xefbe4786efbe4786efbe4786efbe4786
- .octa 0xefbe4786efbe4786efbe4786efbe4786
- .octa 0x0fc19dc60fc19dc60fc19dc60fc19dc6
- .octa 0x0fc19dc60fc19dc60fc19dc60fc19dc6
- .octa 0x240ca1cc240ca1cc240ca1cc240ca1cc
- .octa 0x240ca1cc240ca1cc240ca1cc240ca1cc
- .octa 0x2de92c6f2de92c6f2de92c6f2de92c6f
- .octa 0x2de92c6f2de92c6f2de92c6f2de92c6f
- .octa 0x4a7484aa4a7484aa4a7484aa4a7484aa
- .octa 0x4a7484aa4a7484aa4a7484aa4a7484aa
- .octa 0x5cb0a9dc5cb0a9dc5cb0a9dc5cb0a9dc
- .octa 0x5cb0a9dc5cb0a9dc5cb0a9dc5cb0a9dc
- .octa 0x76f988da76f988da76f988da76f988da
- .octa 0x76f988da76f988da76f988da76f988da
- .octa 0x983e5152983e5152983e5152983e5152
- .octa 0x983e5152983e5152983e5152983e5152
- .octa 0xa831c66da831c66da831c66da831c66d
- .octa 0xa831c66da831c66da831c66da831c66d
- .octa 0xb00327c8b00327c8b00327c8b00327c8
- .octa 0xb00327c8b00327c8b00327c8b00327c8
- .octa 0xbf597fc7bf597fc7bf597fc7bf597fc7
- .octa 0xbf597fc7bf597fc7bf597fc7bf597fc7
- .octa 0xc6e00bf3c6e00bf3c6e00bf3c6e00bf3
- .octa 0xc6e00bf3c6e00bf3c6e00bf3c6e00bf3
- .octa 0xd5a79147d5a79147d5a79147d5a79147
- .octa 0xd5a79147d5a79147d5a79147d5a79147
- .octa 0x06ca635106ca635106ca635106ca6351
- .octa 0x06ca635106ca635106ca635106ca6351
- .octa 0x14292967142929671429296714292967
- .octa 0x14292967142929671429296714292967
- .octa 0x27b70a8527b70a8527b70a8527b70a85
- .octa 0x27b70a8527b70a8527b70a8527b70a85
- .octa 0x2e1b21382e1b21382e1b21382e1b2138
- .octa 0x2e1b21382e1b21382e1b21382e1b2138
- .octa 0x4d2c6dfc4d2c6dfc4d2c6dfc4d2c6dfc
- .octa 0x4d2c6dfc4d2c6dfc4d2c6dfc4d2c6dfc
- .octa 0x53380d1353380d1353380d1353380d13
- .octa 0x53380d1353380d1353380d1353380d13
- .octa 0x650a7354650a7354650a7354650a7354
- .octa 0x650a7354650a7354650a7354650a7354
- .octa 0x766a0abb766a0abb766a0abb766a0abb
- .octa 0x766a0abb766a0abb766a0abb766a0abb
- .octa 0x81c2c92e81c2c92e81c2c92e81c2c92e
- .octa 0x81c2c92e81c2c92e81c2c92e81c2c92e
- .octa 0x92722c8592722c8592722c8592722c85
- .octa 0x92722c8592722c8592722c8592722c85
- .octa 0xa2bfe8a1a2bfe8a1a2bfe8a1a2bfe8a1
- .octa 0xa2bfe8a1a2bfe8a1a2bfe8a1a2bfe8a1
- .octa 0xa81a664ba81a664ba81a664ba81a664b
- .octa 0xa81a664ba81a664ba81a664ba81a664b
- .octa 0xc24b8b70c24b8b70c24b8b70c24b8b70
- .octa 0xc24b8b70c24b8b70c24b8b70c24b8b70
- .octa 0xc76c51a3c76c51a3c76c51a3c76c51a3
- .octa 0xc76c51a3c76c51a3c76c51a3c76c51a3
- .octa 0xd192e819d192e819d192e819d192e819
- .octa 0xd192e819d192e819d192e819d192e819
- .octa 0xd6990624d6990624d6990624d6990624
- .octa 0xd6990624d6990624d6990624d6990624
- .octa 0xf40e3585f40e3585f40e3585f40e3585
- .octa 0xf40e3585f40e3585f40e3585f40e3585
- .octa 0x106aa070106aa070106aa070106aa070
- .octa 0x106aa070106aa070106aa070106aa070
- .octa 0x19a4c11619a4c11619a4c11619a4c116
- .octa 0x19a4c11619a4c11619a4c11619a4c116
- .octa 0x1e376c081e376c081e376c081e376c08
- .octa 0x1e376c081e376c081e376c081e376c08
- .octa 0x2748774c2748774c2748774c2748774c
- .octa 0x2748774c2748774c2748774c2748774c
- .octa 0x34b0bcb534b0bcb534b0bcb534b0bcb5
- .octa 0x34b0bcb534b0bcb534b0bcb534b0bcb5
- .octa 0x391c0cb3391c0cb3391c0cb3391c0cb3
- .octa 0x391c0cb3391c0cb3391c0cb3391c0cb3
- .octa 0x4ed8aa4a4ed8aa4a4ed8aa4a4ed8aa4a
- .octa 0x4ed8aa4a4ed8aa4a4ed8aa4a4ed8aa4a
- .octa 0x5b9cca4f5b9cca4f5b9cca4f5b9cca4f
- .octa 0x5b9cca4f5b9cca4f5b9cca4f5b9cca4f
- .octa 0x682e6ff3682e6ff3682e6ff3682e6ff3
- .octa 0x682e6ff3682e6ff3682e6ff3682e6ff3
- .octa 0x748f82ee748f82ee748f82ee748f82ee
- .octa 0x748f82ee748f82ee748f82ee748f82ee
- .octa 0x78a5636f78a5636f78a5636f78a5636f
- .octa 0x78a5636f78a5636f78a5636f78a5636f
- .octa 0x84c8781484c8781484c8781484c87814
- .octa 0x84c8781484c8781484c8781484c87814
- .octa 0x8cc702088cc702088cc702088cc70208
- .octa 0x8cc702088cc702088cc702088cc70208
- .octa 0x90befffa90befffa90befffa90befffa
- .octa 0x90befffa90befffa90befffa90befffa
- .octa 0xa4506ceba4506ceba4506ceba4506ceb
- .octa 0xa4506ceba4506ceba4506ceba4506ceb
- .octa 0xbef9a3f7bef9a3f7bef9a3f7bef9a3f7
- .octa 0xbef9a3f7bef9a3f7bef9a3f7bef9a3f7
- .octa 0xc67178f2c67178f2c67178f2c67178f2
- .octa 0xc67178f2c67178f2c67178f2c67178f2
-
-.section .rodata.cst32.PSHUFFLE_BYTE_FLIP_MASK, "aM", @progbits, 32
-.align 32
-PSHUFFLE_BYTE_FLIP_MASK:
-.octa 0x0c0d0e0f08090a0b0405060700010203
-.octa 0x0c0d0e0f08090a0b0405060700010203
-
-.section .rodata.cst256.K256, "aM", @progbits, 256
-.align 64
-.global K256
-K256:
- .int 0x428a2f98,0x71374491,0xb5c0fbcf,0xe9b5dba5
- .int 0x3956c25b,0x59f111f1,0x923f82a4,0xab1c5ed5
- .int 0xd807aa98,0x12835b01,0x243185be,0x550c7dc3
- .int 0x72be5d74,0x80deb1fe,0x9bdc06a7,0xc19bf174
- .int 0xe49b69c1,0xefbe4786,0x0fc19dc6,0x240ca1cc
- .int 0x2de92c6f,0x4a7484aa,0x5cb0a9dc,0x76f988da
- .int 0x983e5152,0xa831c66d,0xb00327c8,0xbf597fc7
- .int 0xc6e00bf3,0xd5a79147,0x06ca6351,0x14292967
- .int 0x27b70a85,0x2e1b2138,0x4d2c6dfc,0x53380d13
- .int 0x650a7354,0x766a0abb,0x81c2c92e,0x92722c85
- .int 0xa2bfe8a1,0xa81a664b,0xc24b8b70,0xc76c51a3
- .int 0xd192e819,0xd6990624,0xf40e3585,0x106aa070
- .int 0x19a4c116,0x1e376c08,0x2748774c,0x34b0bcb5
- .int 0x391c0cb3,0x4ed8aa4a,0x5b9cca4f,0x682e6ff3
- .int 0x748f82ee,0x78a5636f,0x84c87814,0x8cc70208
- .int 0x90befffa,0xa4506ceb,0xbef9a3f7,0xc67178f2
diff --git a/arch/x86/crypto/sha512-mb/Makefile b/arch/x86/crypto/sha512-mb/Makefile
deleted file mode 100644
index 90f1ef6..00000000
--- a/arch/x86/crypto/sha512-mb/Makefile
+++ /dev/null
@@ -1,12 +0,0 @@
-# SPDX-License-Identifier: GPL-2.0
-#
-# Arch-specific CryptoAPI modules.
-#
-
-avx2_supported := $(call as-instr,vpgatherdd %ymm0$(comma)(%eax$(comma)%ymm1\
- $(comma)4)$(comma)%ymm2,yes,no)
-ifeq ($(avx2_supported),yes)
- obj-$(CONFIG_CRYPTO_SHA512_MB) += sha512-mb.o
- sha512-mb-y := sha512_mb.o sha512_mb_mgr_flush_avx2.o \
- sha512_mb_mgr_init_avx2.o sha512_mb_mgr_submit_avx2.o sha512_x4_avx2.o
-endif
diff --git a/arch/x86/crypto/sha512-mb/sha512_mb.c b/arch/x86/crypto/sha512-mb/sha512_mb.c
deleted file mode 100644
index 26b8567..00000000
--- a/arch/x86/crypto/sha512-mb/sha512_mb.c
+++ /dev/null
@@ -1,1047 +0,0 @@
-/*
- * Multi buffer SHA512 algorithm Glue Code
- *
- * This file is provided under a dual BSD/GPLv2 license. When using or
- * redistributing this file, you may do so under either license.
- *
- * GPL LICENSE SUMMARY
- *
- * Copyright(c) 2016 Intel Corporation.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of version 2 of the GNU General Public License as
- * published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful, but
- * WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
- * General Public License for more details.
- *
- * Contact Information:
- * Megha Dey <megha.dey(a)linux.intel.com>
- *
- * BSD LICENSE
- *
- * Copyright(c) 2016 Intel Corporation.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions
- * are met:
- *
- * * Redistributions of source code must retain the above copyright
- * notice, this list of conditions and the following disclaimer.
- * * Redistributions in binary form must reproduce the above copyright
- * notice, this list of conditions and the following disclaimer in
- * the documentation and/or other materials provided with the
- * distribution.
- * * Neither the name of Intel Corporation nor the names of its
- * contributors may be used to endorse or promote products derived
- * from this software without specific prior written permission.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
- * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
- * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
- * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
- * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
- * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
- * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
- * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
- * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
- * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
- * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
- */
-
-#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
-
-#include <crypto/internal/hash.h>
-#include <linux/init.h>
-#include <linux/module.h>
-#include <linux/mm.h>
-#include <linux/cryptohash.h>
-#include <linux/types.h>
-#include <linux/list.h>
-#include <crypto/scatterwalk.h>
-#include <crypto/sha.h>
-#include <crypto/mcryptd.h>
-#include <crypto/crypto_wq.h>
-#include <asm/byteorder.h>
-#include <linux/hardirq.h>
-#include <asm/fpu/api.h>
-#include "sha512_mb_ctx.h"
-
-#define FLUSH_INTERVAL 1000 /* in usec */
-
-static struct mcryptd_alg_state sha512_mb_alg_state;
-
-struct sha512_mb_ctx {
- struct mcryptd_ahash *mcryptd_tfm;
-};
-
-static inline struct mcryptd_hash_request_ctx
- *cast_hash_to_mcryptd_ctx(struct sha512_hash_ctx *hash_ctx)
-{
- struct ahash_request *areq;
-
- areq = container_of((void *) hash_ctx, struct ahash_request, __ctx);
- return container_of(areq, struct mcryptd_hash_request_ctx, areq);
-}
-
-static inline struct ahash_request
- *cast_mcryptd_ctx_to_req(struct mcryptd_hash_request_ctx *ctx)
-{
- return container_of((void *) ctx, struct ahash_request, __ctx);
-}
-
-static void req_ctx_init(struct mcryptd_hash_request_ctx *rctx,
- struct ahash_request *areq)
-{
- rctx->flag = HASH_UPDATE;
-}
-
-static asmlinkage void (*sha512_job_mgr_init)(struct sha512_mb_mgr *state);
-static asmlinkage struct job_sha512* (*sha512_job_mgr_submit)
- (struct sha512_mb_mgr *state,
- struct job_sha512 *job);
-static asmlinkage struct job_sha512* (*sha512_job_mgr_flush)
- (struct sha512_mb_mgr *state);
-static asmlinkage struct job_sha512* (*sha512_job_mgr_get_comp_job)
- (struct sha512_mb_mgr *state);
-
-inline uint32_t sha512_pad(uint8_t padblock[SHA512_BLOCK_SIZE * 2],
- uint64_t total_len)
-{
- uint32_t i = total_len & (SHA512_BLOCK_SIZE - 1);
-
- memset(&padblock[i], 0, SHA512_BLOCK_SIZE);
- padblock[i] = 0x80;
-
- i += ((SHA512_BLOCK_SIZE - 1) &
- (0 - (total_len + SHA512_PADLENGTHFIELD_SIZE + 1)))
- + 1 + SHA512_PADLENGTHFIELD_SIZE;
-
-#if SHA512_PADLENGTHFIELD_SIZE == 16
- *((uint64_t *) &padblock[i - 16]) = 0;
-#endif
-
- *((uint64_t *) &padblock[i - 8]) = cpu_to_be64(total_len << 3);
-
- /* Number of extra blocks to hash */
- return i >> SHA512_LOG2_BLOCK_SIZE;
-}
-
-static struct sha512_hash_ctx *sha512_ctx_mgr_resubmit
- (struct sha512_ctx_mgr *mgr, struct sha512_hash_ctx *ctx)
-{
- while (ctx) {
- if (ctx->status & HASH_CTX_STS_COMPLETE) {
- /* Clear PROCESSING bit */
- ctx->status = HASH_CTX_STS_COMPLETE;
- return ctx;
- }
-
- /*
- * If the extra blocks are empty, begin hashing what remains
- * in the user's buffer.
- */
- if (ctx->partial_block_buffer_length == 0 &&
- ctx->incoming_buffer_length) {
-
- const void *buffer = ctx->incoming_buffer;
- uint32_t len = ctx->incoming_buffer_length;
- uint32_t copy_len;
-
- /*
- * Only entire blocks can be hashed.
- * Copy remainder to extra blocks buffer.
- */
- copy_len = len & (SHA512_BLOCK_SIZE-1);
-
- if (copy_len) {
- len -= copy_len;
- memcpy(ctx->partial_block_buffer,
- ((const char *) buffer + len),
- copy_len);
- ctx->partial_block_buffer_length = copy_len;
- }
-
- ctx->incoming_buffer_length = 0;
-
- /* len should be a multiple of the block size now */
- assert((len % SHA512_BLOCK_SIZE) == 0);
-
- /* Set len to the number of blocks to be hashed */
- len >>= SHA512_LOG2_BLOCK_SIZE;
-
- if (len) {
-
- ctx->job.buffer = (uint8_t *) buffer;
- ctx->job.len = len;
- ctx = (struct sha512_hash_ctx *)
- sha512_job_mgr_submit(&mgr->mgr,
- &ctx->job);
- continue;
- }
- }
-
- /*
- * If the extra blocks are not empty, then we are
- * either on the last block(s) or we need more
- * user input before continuing.
- */
- if (ctx->status & HASH_CTX_STS_LAST) {
-
- uint8_t *buf = ctx->partial_block_buffer;
- uint32_t n_extra_blocks =
- sha512_pad(buf, ctx->total_length);
-
- ctx->status = (HASH_CTX_STS_PROCESSING |
- HASH_CTX_STS_COMPLETE);
- ctx->job.buffer = buf;
- ctx->job.len = (uint32_t) n_extra_blocks;
- ctx = (struct sha512_hash_ctx *)
- sha512_job_mgr_submit(&mgr->mgr, &ctx->job);
- continue;
- }
-
- if (ctx)
- ctx->status = HASH_CTX_STS_IDLE;
- return ctx;
- }
-
- return NULL;
-}
-
-static struct sha512_hash_ctx
- *sha512_ctx_mgr_get_comp_ctx(struct mcryptd_alg_cstate *cstate)
-{
- /*
- * If get_comp_job returns NULL, there are no jobs complete.
- * If get_comp_job returns a job, verify that it is safe to return to
- * the user.
- * If it is not ready, resubmit the job to finish processing.
- * If sha512_ctx_mgr_resubmit returned a job, it is ready to be
- * returned.
- * Otherwise, all jobs currently being managed by the hash_ctx_mgr
- * still need processing.
- */
- struct sha512_ctx_mgr *mgr;
- struct sha512_hash_ctx *ctx;
- unsigned long flags;
-
- mgr = cstate->mgr;
- spin_lock_irqsave(&cstate->work_lock, flags);
- ctx = (struct sha512_hash_ctx *)
- sha512_job_mgr_get_comp_job(&mgr->mgr);
- ctx = sha512_ctx_mgr_resubmit(mgr, ctx);
- spin_unlock_irqrestore(&cstate->work_lock, flags);
- return ctx;
-}
-
-static void sha512_ctx_mgr_init(struct sha512_ctx_mgr *mgr)
-{
- sha512_job_mgr_init(&mgr->mgr);
-}
-
-static struct sha512_hash_ctx
- *sha512_ctx_mgr_submit(struct mcryptd_alg_cstate *cstate,
- struct sha512_hash_ctx *ctx,
- const void *buffer,
- uint32_t len,
- int flags)
-{
- struct sha512_ctx_mgr *mgr;
- unsigned long irqflags;
-
- mgr = cstate->mgr;
- spin_lock_irqsave(&cstate->work_lock, irqflags);
- if (flags & ~(HASH_UPDATE | HASH_LAST)) {
- /* User should not pass anything other than UPDATE or LAST */
- ctx->error = HASH_CTX_ERROR_INVALID_FLAGS;
- goto unlock;
- }
-
- if (ctx->status & HASH_CTX_STS_PROCESSING) {
- /* Cannot submit to a currently processing job. */
- ctx->error = HASH_CTX_ERROR_ALREADY_PROCESSING;
- goto unlock;
- }
-
- if (ctx->status & HASH_CTX_STS_COMPLETE) {
- /* Cannot update a finished job. */
- ctx->error = HASH_CTX_ERROR_ALREADY_COMPLETED;
- goto unlock;
- }
-
- /*
- * If we made it here, there were no errors during this call to
- * submit
- */
- ctx->error = HASH_CTX_ERROR_NONE;
-
- /* Store buffer ptr info from user */
- ctx->incoming_buffer = buffer;
- ctx->incoming_buffer_length = len;
-
- /*
- * Store the user's request flags and mark this ctx as currently being
- * processed.
- */
- ctx->status = (flags & HASH_LAST) ?
- (HASH_CTX_STS_PROCESSING | HASH_CTX_STS_LAST) :
- HASH_CTX_STS_PROCESSING;
-
- /* Advance byte counter */
- ctx->total_length += len;
-
- /*
- * If there is anything currently buffered in the extra blocks,
- * append to it until it contains a whole block.
- * Or if the user's buffer contains less than a whole block,
- * append as much as possible to the extra block.
- */
- if (ctx->partial_block_buffer_length || len < SHA512_BLOCK_SIZE) {
- /* Compute how many bytes to copy from user buffer into extra
- * block
- */
- uint32_t copy_len = SHA512_BLOCK_SIZE -
- ctx->partial_block_buffer_length;
- if (len < copy_len)
- copy_len = len;
-
- if (copy_len) {
- /* Copy and update relevant pointers and counters */
- memcpy
- (&ctx->partial_block_buffer[ctx->partial_block_buffer_length],
- buffer, copy_len);
-
- ctx->partial_block_buffer_length += copy_len;
- ctx->incoming_buffer = (const void *)
- ((const char *)buffer + copy_len);
- ctx->incoming_buffer_length = len - copy_len;
- }
-
- /* The extra block should never contain more than 1 block
- * here
- */
- assert(ctx->partial_block_buffer_length <= SHA512_BLOCK_SIZE);
-
- /* If the extra block buffer contains exactly 1 block, it can
- * be hashed.
- */
- if (ctx->partial_block_buffer_length >= SHA512_BLOCK_SIZE) {
- ctx->partial_block_buffer_length = 0;
-
- ctx->job.buffer = ctx->partial_block_buffer;
- ctx->job.len = 1;
- ctx = (struct sha512_hash_ctx *)
- sha512_job_mgr_submit(&mgr->mgr, &ctx->job);
- }
- }
-
- ctx = sha512_ctx_mgr_resubmit(mgr, ctx);
-unlock:
- spin_unlock_irqrestore(&cstate->work_lock, irqflags);
- return ctx;
-}
-
-static struct sha512_hash_ctx *sha512_ctx_mgr_flush(struct mcryptd_alg_cstate *cstate)
-{
- struct sha512_ctx_mgr *mgr;
- struct sha512_hash_ctx *ctx;
- unsigned long flags;
-
- mgr = cstate->mgr;
- spin_lock_irqsave(&cstate->work_lock, flags);
- while (1) {
- ctx = (struct sha512_hash_ctx *)
- sha512_job_mgr_flush(&mgr->mgr);
-
- /* If flush returned 0, there are no more jobs in flight. */
- if (!ctx)
- break;
-
- /*
- * If flush returned a job, resubmit the job to finish
- * processing.
- */
- ctx = sha512_ctx_mgr_resubmit(mgr, ctx);
-
- /*
- * If sha512_ctx_mgr_resubmit returned a job, it is ready to
- * be returned. Otherwise, all jobs currently being managed by
- * the sha512_ctx_mgr still need processing. Loop.
- */
- if (ctx)
- break;
- }
- spin_unlock_irqrestore(&cstate->work_lock, flags);
- return ctx;
-}
-
-static int sha512_mb_init(struct ahash_request *areq)
-{
- struct sha512_hash_ctx *sctx = ahash_request_ctx(areq);
-
- hash_ctx_init(sctx);
- sctx->job.result_digest[0] = SHA512_H0;
- sctx->job.result_digest[1] = SHA512_H1;
- sctx->job.result_digest[2] = SHA512_H2;
- sctx->job.result_digest[3] = SHA512_H3;
- sctx->job.result_digest[4] = SHA512_H4;
- sctx->job.result_digest[5] = SHA512_H5;
- sctx->job.result_digest[6] = SHA512_H6;
- sctx->job.result_digest[7] = SHA512_H7;
- sctx->total_length = 0;
- sctx->partial_block_buffer_length = 0;
- sctx->status = HASH_CTX_STS_IDLE;
-
- return 0;
-}
-
-static int sha512_mb_set_results(struct mcryptd_hash_request_ctx *rctx)
-{
- int i;
- struct sha512_hash_ctx *sctx = ahash_request_ctx(&rctx->areq);
- __be64 *dst = (__be64 *) rctx->out;
-
- for (i = 0; i < 8; ++i)
- dst[i] = cpu_to_be64(sctx->job.result_digest[i]);
-
- return 0;
-}
-
-static int sha_finish_walk(struct mcryptd_hash_request_ctx **ret_rctx,
- struct mcryptd_alg_cstate *cstate, bool flush)
-{
- int flag = HASH_UPDATE;
- int nbytes, err = 0;
- struct mcryptd_hash_request_ctx *rctx = *ret_rctx;
- struct sha512_hash_ctx *sha_ctx;
-
- /* more work ? */
- while (!(rctx->flag & HASH_DONE)) {
- nbytes = crypto_ahash_walk_done(&rctx->walk, 0);
- if (nbytes < 0) {
- err = nbytes;
- goto out;
- }
- /* check if the walk is done */
- if (crypto_ahash_walk_last(&rctx->walk)) {
- rctx->flag |= HASH_DONE;
- if (rctx->flag & HASH_FINAL)
- flag |= HASH_LAST;
-
- }
- sha_ctx = (struct sha512_hash_ctx *)
- ahash_request_ctx(&rctx->areq);
- kernel_fpu_begin();
- sha_ctx = sha512_ctx_mgr_submit(cstate, sha_ctx,
- rctx->walk.data, nbytes, flag);
- if (!sha_ctx) {
- if (flush)
- sha_ctx = sha512_ctx_mgr_flush(cstate);
- }
- kernel_fpu_end();
- if (sha_ctx)
- rctx = cast_hash_to_mcryptd_ctx(sha_ctx);
- else {
- rctx = NULL;
- goto out;
- }
- }
-
- /* copy the results */
- if (rctx->flag & HASH_FINAL)
- sha512_mb_set_results(rctx);
-
-out:
- *ret_rctx = rctx;
- return err;
-}
-
-static int sha_complete_job(struct mcryptd_hash_request_ctx *rctx,
- struct mcryptd_alg_cstate *cstate,
- int err)
-{
- struct ahash_request *req = cast_mcryptd_ctx_to_req(rctx);
- struct sha512_hash_ctx *sha_ctx;
- struct mcryptd_hash_request_ctx *req_ctx;
- int ret;
- unsigned long flags;
-
- /* remove from work list */
- spin_lock_irqsave(&cstate->work_lock, flags);
- list_del(&rctx->waiter);
- spin_unlock_irqrestore(&cstate->work_lock, flags);
-
- if (irqs_disabled())
- rctx->complete(&req->base, err);
- else {
- local_bh_disable();
- rctx->complete(&req->base, err);
- local_bh_enable();
- }
-
- /* check to see if there are other jobs that are done */
- sha_ctx = sha512_ctx_mgr_get_comp_ctx(cstate);
- while (sha_ctx) {
- req_ctx = cast_hash_to_mcryptd_ctx(sha_ctx);
- ret = sha_finish_walk(&req_ctx, cstate, false);
- if (req_ctx) {
- spin_lock_irqsave(&cstate->work_lock, flags);
- list_del(&req_ctx->waiter);
- spin_unlock_irqrestore(&cstate->work_lock, flags);
-
- req = cast_mcryptd_ctx_to_req(req_ctx);
- if (irqs_disabled())
- req_ctx->complete(&req->base, ret);
- else {
- local_bh_disable();
- req_ctx->complete(&req->base, ret);
- local_bh_enable();
- }
- }
- sha_ctx = sha512_ctx_mgr_get_comp_ctx(cstate);
- }
-
- return 0;
-}
-
-static void sha512_mb_add_list(struct mcryptd_hash_request_ctx *rctx,
- struct mcryptd_alg_cstate *cstate)
-{
- unsigned long next_flush;
- unsigned long delay = usecs_to_jiffies(FLUSH_INTERVAL);
- unsigned long flags;
-
- /* initialize tag */
- rctx->tag.arrival = jiffies; /* tag the arrival time */
- rctx->tag.seq_num = cstate->next_seq_num++;
- next_flush = rctx->tag.arrival + delay;
- rctx->tag.expire = next_flush;
-
- spin_lock_irqsave(&cstate->work_lock, flags);
- list_add_tail(&rctx->waiter, &cstate->work_list);
- spin_unlock_irqrestore(&cstate->work_lock, flags);
-
- mcryptd_arm_flusher(cstate, delay);
-}
-
-static int sha512_mb_update(struct ahash_request *areq)
-{
- struct mcryptd_hash_request_ctx *rctx =
- container_of(areq, struct mcryptd_hash_request_ctx,
- areq);
- struct mcryptd_alg_cstate *cstate =
- this_cpu_ptr(sha512_mb_alg_state.alg_cstate);
-
- struct ahash_request *req = cast_mcryptd_ctx_to_req(rctx);
- struct sha512_hash_ctx *sha_ctx;
- int ret = 0, nbytes;
-
-
- /* sanity check */
- if (rctx->tag.cpu != smp_processor_id()) {
- pr_err("mcryptd error: cpu clash\n");
- goto done;
- }
-
- /* need to init context */
- req_ctx_init(rctx, areq);
-
- nbytes = crypto_ahash_walk_first(req, &rctx->walk);
-
- if (nbytes < 0) {
- ret = nbytes;
- goto done;
- }
-
- if (crypto_ahash_walk_last(&rctx->walk))
- rctx->flag |= HASH_DONE;
-
- /* submit */
- sha_ctx = (struct sha512_hash_ctx *) ahash_request_ctx(areq);
- sha512_mb_add_list(rctx, cstate);
- kernel_fpu_begin();
- sha_ctx = sha512_ctx_mgr_submit(cstate, sha_ctx, rctx->walk.data,
- nbytes, HASH_UPDATE);
- kernel_fpu_end();
-
- /* check if anything is returned */
- if (!sha_ctx)
- return -EINPROGRESS;
-
- if (sha_ctx->error) {
- ret = sha_ctx->error;
- rctx = cast_hash_to_mcryptd_ctx(sha_ctx);
- goto done;
- }
-
- rctx = cast_hash_to_mcryptd_ctx(sha_ctx);
- ret = sha_finish_walk(&rctx, cstate, false);
-
- if (!rctx)
- return -EINPROGRESS;
-done:
- sha_complete_job(rctx, cstate, ret);
- return ret;
-}
-
-static int sha512_mb_finup(struct ahash_request *areq)
-{
- struct mcryptd_hash_request_ctx *rctx =
- container_of(areq, struct mcryptd_hash_request_ctx,
- areq);
- struct mcryptd_alg_cstate *cstate =
- this_cpu_ptr(sha512_mb_alg_state.alg_cstate);
-
- struct ahash_request *req = cast_mcryptd_ctx_to_req(rctx);
- struct sha512_hash_ctx *sha_ctx;
- int ret = 0, flag = HASH_UPDATE, nbytes;
-
- /* sanity check */
- if (rctx->tag.cpu != smp_processor_id()) {
- pr_err("mcryptd error: cpu clash\n");
- goto done;
- }
-
- /* need to init context */
- req_ctx_init(rctx, areq);
-
- nbytes = crypto_ahash_walk_first(req, &rctx->walk);
-
- if (nbytes < 0) {
- ret = nbytes;
- goto done;
- }
-
- if (crypto_ahash_walk_last(&rctx->walk)) {
- rctx->flag |= HASH_DONE;
- flag = HASH_LAST;
- }
-
- /* submit */
- rctx->flag |= HASH_FINAL;
- sha_ctx = (struct sha512_hash_ctx *) ahash_request_ctx(areq);
- sha512_mb_add_list(rctx, cstate);
-
- kernel_fpu_begin();
- sha_ctx = sha512_ctx_mgr_submit(cstate, sha_ctx, rctx->walk.data,
- nbytes, flag);
- kernel_fpu_end();
-
- /* check if anything is returned */
- if (!sha_ctx)
- return -EINPROGRESS;
-
- if (sha_ctx->error) {
- ret = sha_ctx->error;
- goto done;
- }
-
- rctx = cast_hash_to_mcryptd_ctx(sha_ctx);
- ret = sha_finish_walk(&rctx, cstate, false);
- if (!rctx)
- return -EINPROGRESS;
-done:
- sha_complete_job(rctx, cstate, ret);
- return ret;
-}
-
-static int sha512_mb_final(struct ahash_request *areq)
-{
- struct mcryptd_hash_request_ctx *rctx =
- container_of(areq, struct mcryptd_hash_request_ctx,
- areq);
- struct mcryptd_alg_cstate *cstate =
- this_cpu_ptr(sha512_mb_alg_state.alg_cstate);
-
- struct sha512_hash_ctx *sha_ctx;
- int ret = 0;
- u8 data;
-
- /* sanity check */
- if (rctx->tag.cpu != smp_processor_id()) {
- pr_err("mcryptd error: cpu clash\n");
- goto done;
- }
-
- /* need to init context */
- req_ctx_init(rctx, areq);
-
- rctx->flag |= HASH_DONE | HASH_FINAL;
-
- sha_ctx = (struct sha512_hash_ctx *) ahash_request_ctx(areq);
- /* flag HASH_FINAL and 0 data size */
- sha512_mb_add_list(rctx, cstate);
- kernel_fpu_begin();
- sha_ctx = sha512_ctx_mgr_submit(cstate, sha_ctx, &data, 0, HASH_LAST);
- kernel_fpu_end();
-
- /* check if anything is returned */
- if (!sha_ctx)
- return -EINPROGRESS;
-
- if (sha_ctx->error) {
- ret = sha_ctx->error;
- rctx = cast_hash_to_mcryptd_ctx(sha_ctx);
- goto done;
- }
-
- rctx = cast_hash_to_mcryptd_ctx(sha_ctx);
- ret = sha_finish_walk(&rctx, cstate, false);
- if (!rctx)
- return -EINPROGRESS;
-done:
- sha_complete_job(rctx, cstate, ret);
- return ret;
-}
-
-static int sha512_mb_export(struct ahash_request *areq, void *out)
-{
- struct sha512_hash_ctx *sctx = ahash_request_ctx(areq);
-
- memcpy(out, sctx, sizeof(*sctx));
-
- return 0;
-}
-
-static int sha512_mb_import(struct ahash_request *areq, const void *in)
-{
- struct sha512_hash_ctx *sctx = ahash_request_ctx(areq);
-
- memcpy(sctx, in, sizeof(*sctx));
-
- return 0;
-}
-
-static int sha512_mb_async_init_tfm(struct crypto_tfm *tfm)
-{
- struct mcryptd_ahash *mcryptd_tfm;
- struct sha512_mb_ctx *ctx = crypto_tfm_ctx(tfm);
- struct mcryptd_hash_ctx *mctx;
-
- mcryptd_tfm = mcryptd_alloc_ahash("__intel_sha512-mb",
- CRYPTO_ALG_INTERNAL,
- CRYPTO_ALG_INTERNAL);
- if (IS_ERR(mcryptd_tfm))
- return PTR_ERR(mcryptd_tfm);
- mctx = crypto_ahash_ctx(&mcryptd_tfm->base);
- mctx->alg_state = &sha512_mb_alg_state;
- ctx->mcryptd_tfm = mcryptd_tfm;
- crypto_ahash_set_reqsize(__crypto_ahash_cast(tfm),
- sizeof(struct ahash_request) +
- crypto_ahash_reqsize(&mcryptd_tfm->base));
-
- return 0;
-}
-
-static void sha512_mb_async_exit_tfm(struct crypto_tfm *tfm)
-{
- struct sha512_mb_ctx *ctx = crypto_tfm_ctx(tfm);
-
- mcryptd_free_ahash(ctx->mcryptd_tfm);
-}
-
-static int sha512_mb_areq_init_tfm(struct crypto_tfm *tfm)
-{
- crypto_ahash_set_reqsize(__crypto_ahash_cast(tfm),
- sizeof(struct ahash_request) +
- sizeof(struct sha512_hash_ctx));
-
- return 0;
-}
-
-static void sha512_mb_areq_exit_tfm(struct crypto_tfm *tfm)
-{
- struct sha512_mb_ctx *ctx = crypto_tfm_ctx(tfm);
-
- mcryptd_free_ahash(ctx->mcryptd_tfm);
-}
-
-static struct ahash_alg sha512_mb_areq_alg = {
- .init = sha512_mb_init,
- .update = sha512_mb_update,
- .final = sha512_mb_final,
- .finup = sha512_mb_finup,
- .export = sha512_mb_export,
- .import = sha512_mb_import,
- .halg = {
- .digestsize = SHA512_DIGEST_SIZE,
- .statesize = sizeof(struct sha512_hash_ctx),
- .base = {
- .cra_name = "__sha512-mb",
- .cra_driver_name = "__intel_sha512-mb",
- .cra_priority = 100,
- /*
- * use ASYNC flag as some buffers in multi-buffer
- * algo may not have completed before hashing thread
- * sleep
- */
- .cra_flags = CRYPTO_ALG_ASYNC |
- CRYPTO_ALG_INTERNAL,
- .cra_blocksize = SHA512_BLOCK_SIZE,
- .cra_module = THIS_MODULE,
- .cra_list = LIST_HEAD_INIT
- (sha512_mb_areq_alg.halg.base.cra_list),
- .cra_init = sha512_mb_areq_init_tfm,
- .cra_exit = sha512_mb_areq_exit_tfm,
- .cra_ctxsize = sizeof(struct sha512_hash_ctx),
- }
- }
-};
-
-static int sha512_mb_async_init(struct ahash_request *req)
-{
- struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
- struct sha512_mb_ctx *ctx = crypto_ahash_ctx(tfm);
- struct ahash_request *mcryptd_req = ahash_request_ctx(req);
- struct mcryptd_ahash *mcryptd_tfm = ctx->mcryptd_tfm;
-
- memcpy(mcryptd_req, req, sizeof(*req));
- ahash_request_set_tfm(mcryptd_req, &mcryptd_tfm->base);
- return crypto_ahash_init(mcryptd_req);
-}
-
-static int sha512_mb_async_update(struct ahash_request *req)
-{
- struct ahash_request *mcryptd_req = ahash_request_ctx(req);
-
- struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
- struct sha512_mb_ctx *ctx = crypto_ahash_ctx(tfm);
- struct mcryptd_ahash *mcryptd_tfm = ctx->mcryptd_tfm;
-
- memcpy(mcryptd_req, req, sizeof(*req));
- ahash_request_set_tfm(mcryptd_req, &mcryptd_tfm->base);
- return crypto_ahash_update(mcryptd_req);
-}
-
-static int sha512_mb_async_finup(struct ahash_request *req)
-{
- struct ahash_request *mcryptd_req = ahash_request_ctx(req);
-
- struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
- struct sha512_mb_ctx *ctx = crypto_ahash_ctx(tfm);
- struct mcryptd_ahash *mcryptd_tfm = ctx->mcryptd_tfm;
-
- memcpy(mcryptd_req, req, sizeof(*req));
- ahash_request_set_tfm(mcryptd_req, &mcryptd_tfm->base);
- return crypto_ahash_finup(mcryptd_req);
-}
-
-static int sha512_mb_async_final(struct ahash_request *req)
-{
- struct ahash_request *mcryptd_req = ahash_request_ctx(req);
-
- struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
- struct sha512_mb_ctx *ctx = crypto_ahash_ctx(tfm);
- struct mcryptd_ahash *mcryptd_tfm = ctx->mcryptd_tfm;
-
- memcpy(mcryptd_req, req, sizeof(*req));
- ahash_request_set_tfm(mcryptd_req, &mcryptd_tfm->base);
- return crypto_ahash_final(mcryptd_req);
-}
-
-static int sha512_mb_async_digest(struct ahash_request *req)
-{
- struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
- struct sha512_mb_ctx *ctx = crypto_ahash_ctx(tfm);
- struct ahash_request *mcryptd_req = ahash_request_ctx(req);
- struct mcryptd_ahash *mcryptd_tfm = ctx->mcryptd_tfm;
-
- memcpy(mcryptd_req, req, sizeof(*req));
- ahash_request_set_tfm(mcryptd_req, &mcryptd_tfm->base);
- return crypto_ahash_digest(mcryptd_req);
-}
-
-static int sha512_mb_async_export(struct ahash_request *req, void *out)
-{
- struct ahash_request *mcryptd_req = ahash_request_ctx(req);
- struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
- struct sha512_mb_ctx *ctx = crypto_ahash_ctx(tfm);
- struct mcryptd_ahash *mcryptd_tfm = ctx->mcryptd_tfm;
-
- memcpy(mcryptd_req, req, sizeof(*req));
- ahash_request_set_tfm(mcryptd_req, &mcryptd_tfm->base);
- return crypto_ahash_export(mcryptd_req, out);
-}
-
-static int sha512_mb_async_import(struct ahash_request *req, const void *in)
-{
- struct ahash_request *mcryptd_req = ahash_request_ctx(req);
- struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
- struct sha512_mb_ctx *ctx = crypto_ahash_ctx(tfm);
- struct mcryptd_ahash *mcryptd_tfm = ctx->mcryptd_tfm;
- struct crypto_ahash *child = mcryptd_ahash_child(mcryptd_tfm);
- struct mcryptd_hash_request_ctx *rctx;
- struct ahash_request *areq;
-
- memcpy(mcryptd_req, req, sizeof(*req));
- ahash_request_set_tfm(mcryptd_req, &mcryptd_tfm->base);
- rctx = ahash_request_ctx(mcryptd_req);
-
- areq = &rctx->areq;
-
- ahash_request_set_tfm(areq, child);
- ahash_request_set_callback(areq, CRYPTO_TFM_REQ_MAY_SLEEP,
- rctx->complete, req);
-
- return crypto_ahash_import(mcryptd_req, in);
-}
-
-static struct ahash_alg sha512_mb_async_alg = {
- .init = sha512_mb_async_init,
- .update = sha512_mb_async_update,
- .final = sha512_mb_async_final,
- .finup = sha512_mb_async_finup,
- .digest = sha512_mb_async_digest,
- .export = sha512_mb_async_export,
- .import = sha512_mb_async_import,
- .halg = {
- .digestsize = SHA512_DIGEST_SIZE,
- .statesize = sizeof(struct sha512_hash_ctx),
- .base = {
- .cra_name = "sha512",
- .cra_driver_name = "sha512_mb",
- /*
- * Low priority, since with few concurrent hash requests
- * this is extremely slow due to the flush delay. Users
- * whose workloads would benefit from this can request
- * it explicitly by driver name, or can increase its
- * priority at runtime using NETLINK_CRYPTO.
- */
- .cra_priority = 50,
- .cra_flags = CRYPTO_ALG_ASYNC,
- .cra_blocksize = SHA512_BLOCK_SIZE,
- .cra_module = THIS_MODULE,
- .cra_list = LIST_HEAD_INIT
- (sha512_mb_async_alg.halg.base.cra_list),
- .cra_init = sha512_mb_async_init_tfm,
- .cra_exit = sha512_mb_async_exit_tfm,
- .cra_ctxsize = sizeof(struct sha512_mb_ctx),
- .cra_alignmask = 0,
- },
- },
-};
-
-static unsigned long sha512_mb_flusher(struct mcryptd_alg_cstate *cstate)
-{
- struct mcryptd_hash_request_ctx *rctx;
- unsigned long cur_time;
- unsigned long next_flush = 0;
- struct sha512_hash_ctx *sha_ctx;
-
-
- cur_time = jiffies;
-
- while (!list_empty(&cstate->work_list)) {
- rctx = list_entry(cstate->work_list.next,
- struct mcryptd_hash_request_ctx, waiter);
- if time_before(cur_time, rctx->tag.expire)
- break;
- kernel_fpu_begin();
- sha_ctx = (struct sha512_hash_ctx *)
- sha512_ctx_mgr_flush(cstate);
- kernel_fpu_end();
- if (!sha_ctx) {
- pr_err("sha512_mb error: nothing got flushed for"
- " non-empty list\n");
- break;
- }
- rctx = cast_hash_to_mcryptd_ctx(sha_ctx);
- sha_finish_walk(&rctx, cstate, true);
- sha_complete_job(rctx, cstate, 0);
- }
-
- if (!list_empty(&cstate->work_list)) {
- rctx = list_entry(cstate->work_list.next,
- struct mcryptd_hash_request_ctx, waiter);
- /* get the hash context and then flush time */
- next_flush = rctx->tag.expire;
- mcryptd_arm_flusher(cstate, get_delay(next_flush));
- }
- return next_flush;
-}
-
-static int __init sha512_mb_mod_init(void)
-{
-
- int cpu;
- int err;
- struct mcryptd_alg_cstate *cpu_state;
-
- /* check for dependent cpu features */
- if (!boot_cpu_has(X86_FEATURE_AVX2) ||
- !boot_cpu_has(X86_FEATURE_BMI2))
- return -ENODEV;
-
- /* initialize multibuffer structures */
- sha512_mb_alg_state.alg_cstate =
- alloc_percpu(struct mcryptd_alg_cstate);
-
- sha512_job_mgr_init = sha512_mb_mgr_init_avx2;
- sha512_job_mgr_submit = sha512_mb_mgr_submit_avx2;
- sha512_job_mgr_flush = sha512_mb_mgr_flush_avx2;
- sha512_job_mgr_get_comp_job = sha512_mb_mgr_get_comp_job_avx2;
-
- if (!sha512_mb_alg_state.alg_cstate)
- return -ENOMEM;
- for_each_possible_cpu(cpu) {
- cpu_state = per_cpu_ptr(sha512_mb_alg_state.alg_cstate, cpu);
- cpu_state->next_flush = 0;
- cpu_state->next_seq_num = 0;
- cpu_state->flusher_engaged = false;
- INIT_DELAYED_WORK(&cpu_state->flush, mcryptd_flusher);
- cpu_state->cpu = cpu;
- cpu_state->alg_state = &sha512_mb_alg_state;
- cpu_state->mgr = kzalloc(sizeof(struct sha512_ctx_mgr),
- GFP_KERNEL);
- if (!cpu_state->mgr)
- goto err2;
- sha512_ctx_mgr_init(cpu_state->mgr);
- INIT_LIST_HEAD(&cpu_state->work_list);
- spin_lock_init(&cpu_state->work_lock);
- }
- sha512_mb_alg_state.flusher = &sha512_mb_flusher;
-
- err = crypto_register_ahash(&sha512_mb_areq_alg);
- if (err)
- goto err2;
- err = crypto_register_ahash(&sha512_mb_async_alg);
- if (err)
- goto err1;
-
-
- return 0;
-err1:
- crypto_unregister_ahash(&sha512_mb_areq_alg);
-err2:
- for_each_possible_cpu(cpu) {
- cpu_state = per_cpu_ptr(sha512_mb_alg_state.alg_cstate, cpu);
- kfree(cpu_state->mgr);
- }
- free_percpu(sha512_mb_alg_state.alg_cstate);
- return -ENODEV;
-}
-
-static void __exit sha512_mb_mod_fini(void)
-{
- int cpu;
- struct mcryptd_alg_cstate *cpu_state;
-
- crypto_unregister_ahash(&sha512_mb_async_alg);
- crypto_unregister_ahash(&sha512_mb_areq_alg);
- for_each_possible_cpu(cpu) {
- cpu_state = per_cpu_ptr(sha512_mb_alg_state.alg_cstate, cpu);
- kfree(cpu_state->mgr);
- }
- free_percpu(sha512_mb_alg_state.alg_cstate);
-}
-
-module_init(sha512_mb_mod_init);
-module_exit(sha512_mb_mod_fini);
-
-MODULE_LICENSE("GPL");
-MODULE_DESCRIPTION("SHA512 Secure Hash Algorithm, multi buffer accelerated");
-
-MODULE_ALIAS("sha512");
diff --git a/arch/x86/crypto/sha512-mb/sha512_mb_ctx.h b/arch/x86/crypto/sha512-mb/sha512_mb_ctx.h
deleted file mode 100644
index e5c465b..00000000
--- a/arch/x86/crypto/sha512-mb/sha512_mb_ctx.h
+++ /dev/null
@@ -1,128 +0,0 @@
-/*
- * Header file for multi buffer SHA512 context
- *
- * This file is provided under a dual BSD/GPLv2 license. When using or
- * redistributing this file, you may do so under either license.
- *
- * GPL LICENSE SUMMARY
- *
- * Copyright(c) 2016 Intel Corporation.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of version 2 of the GNU General Public License as
- * published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful, but
- * WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
- * General Public License for more details.
- *
- * Contact Information:
- * Megha Dey <megha.dey(a)linux.intel.com>
- *
- * BSD LICENSE
- *
- * Copyright(c) 2016 Intel Corporation.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions
- * are met:
- *
- * * Redistributions of source code must retain the above copyright
- * notice, this list of conditions and the following disclaimer.
- * * Redistributions in binary form must reproduce the above copyright
- * notice, this list of conditions and the following disclaimer in
- * the documentation and/or other materials provided with the
- * distribution.
- * * Neither the name of Intel Corporation nor the names of its
- * contributors may be used to endorse or promote products derived
- * from this software without specific prior written permission.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
- * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
- * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
- * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
- * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
- * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
- * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
- * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
- * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
- * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
- * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
- */
-
-#ifndef _SHA_MB_CTX_INTERNAL_H
-#define _SHA_MB_CTX_INTERNAL_H
-
-#include "sha512_mb_mgr.h"
-
-#define HASH_UPDATE 0x00
-#define HASH_LAST 0x01
-#define HASH_DONE 0x02
-#define HASH_FINAL 0x04
-
-#define HASH_CTX_STS_IDLE 0x00
-#define HASH_CTX_STS_PROCESSING 0x01
-#define HASH_CTX_STS_LAST 0x02
-#define HASH_CTX_STS_COMPLETE 0x04
-
-enum hash_ctx_error {
- HASH_CTX_ERROR_NONE = 0,
- HASH_CTX_ERROR_INVALID_FLAGS = -1,
- HASH_CTX_ERROR_ALREADY_PROCESSING = -2,
- HASH_CTX_ERROR_ALREADY_COMPLETED = -3,
-};
-
-#define hash_ctx_user_data(ctx) ((ctx)->user_data)
-#define hash_ctx_digest(ctx) ((ctx)->job.result_digest)
-#define hash_ctx_processing(ctx) ((ctx)->status & HASH_CTX_STS_PROCESSING)
-#define hash_ctx_complete(ctx) ((ctx)->status == HASH_CTX_STS_COMPLETE)
-#define hash_ctx_status(ctx) ((ctx)->status)
-#define hash_ctx_error(ctx) ((ctx)->error)
-#define hash_ctx_init(ctx) \
- do { \
- (ctx)->error = HASH_CTX_ERROR_NONE; \
- (ctx)->status = HASH_CTX_STS_COMPLETE; \
- } while (0)
-
-/* Hash Constants and Typedefs */
-#define SHA512_DIGEST_LENGTH 8
-#define SHA512_LOG2_BLOCK_SIZE 7
-
-#define SHA512_PADLENGTHFIELD_SIZE 16
-
-#ifdef SHA_MB_DEBUG
-#define assert(expr) \
-do { \
- if (unlikely(!(expr))) { \
- printk(KERN_ERR "Assertion failed! %s,%s,%s,line=%d\n", \
- #expr, __FILE__, __func__, __LINE__); \
- } \
-} while (0)
-#else
-#define assert(expr) do {} while (0)
-#endif
-
-struct sha512_ctx_mgr {
- struct sha512_mb_mgr mgr;
-};
-
-/* typedef struct sha512_ctx_mgr sha512_ctx_mgr; */
-
-struct sha512_hash_ctx {
- /* Must be at struct offset 0 */
- struct job_sha512 job;
- /* status flag */
- int status;
- /* error flag */
- int error;
-
- uint64_t total_length;
- const void *incoming_buffer;
- uint32_t incoming_buffer_length;
- uint8_t partial_block_buffer[SHA512_BLOCK_SIZE * 2];
- uint32_t partial_block_buffer_length;
- void *user_data;
-};
-
-#endif
diff --git a/arch/x86/crypto/sha512-mb/sha512_mb_mgr.h b/arch/x86/crypto/sha512-mb/sha512_mb_mgr.h
deleted file mode 100644
index 178f17e..00000000
--- a/arch/x86/crypto/sha512-mb/sha512_mb_mgr.h
+++ /dev/null
@@ -1,104 +0,0 @@
-/*
- * Header file for multi buffer SHA512 algorithm manager
- *
- * This file is provided under a dual BSD/GPLv2 license. When using or
- * redistributing this file, you may do so under either license.
- *
- * GPL LICENSE SUMMARY
- *
- * Copyright(c) 2016 Intel Corporation.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of version 2 of the GNU General Public License as
- * published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful, but
- * WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
- * General Public License for more details.
- *
- * Contact Information:
- * Megha Dey <megha.dey(a)linux.intel.com>
- *
- * BSD LICENSE
- *
- * Copyright(c) 2016 Intel Corporation.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions
- * are met:
- *
- * * Redistributions of source code must retain the above copyright
- * notice, this list of conditions and the following disclaimer.
- * * Redistributions in binary form must reproduce the above copyright
- * notice, this list of conditions and the following disclaimer in
- * the documentation and/or other materials provided with the
- * distribution.
- * * Neither the name of Intel Corporation nor the names of its
- * contributors may be used to endorse or promote products derived
- * from this software without specific prior written permission.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
- * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
- * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
- * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
- * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
- * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
- * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
- * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
- * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
- * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
- * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
- */
-
-#ifndef __SHA_MB_MGR_H
-#define __SHA_MB_MGR_H
-
-#include <linux/types.h>
-
-#define NUM_SHA512_DIGEST_WORDS 8
-
-enum job_sts {STS_UNKNOWN = 0,
- STS_BEING_PROCESSED = 1,
- STS_COMPLETED = 2,
- STS_INTERNAL_ERROR = 3,
- STS_ERROR = 4
-};
-
-struct job_sha512 {
- u8 *buffer;
- u64 len;
- u64 result_digest[NUM_SHA512_DIGEST_WORDS] __aligned(32);
- enum job_sts status;
- void *user_data;
-};
-
-struct sha512_args_x4 {
- uint64_t digest[8][4];
- uint8_t *data_ptr[4];
-};
-
-struct sha512_lane_data {
- struct job_sha512 *job_in_lane;
-};
-
-struct sha512_mb_mgr {
- struct sha512_args_x4 args;
-
- uint64_t lens[4];
-
- /* each byte is index (0...7) of unused lanes */
- uint64_t unused_lanes;
- /* byte 4 is set to FF as a flag */
- struct sha512_lane_data ldata[4];
-};
-
-#define SHA512_MB_MGR_NUM_LANES_AVX2 4
-
-void sha512_mb_mgr_init_avx2(struct sha512_mb_mgr *state);
-struct job_sha512 *sha512_mb_mgr_submit_avx2(struct sha512_mb_mgr *state,
- struct job_sha512 *job);
-struct job_sha512 *sha512_mb_mgr_flush_avx2(struct sha512_mb_mgr *state);
-struct job_sha512 *sha512_mb_mgr_get_comp_job_avx2(struct sha512_mb_mgr *state);
-
-#endif
diff --git a/arch/x86/crypto/sha512-mb/sha512_mb_mgr_datastruct.S b/arch/x86/crypto/sha512-mb/sha512_mb_mgr_datastruct.S
deleted file mode 100644
index cf2636d..00000000
--- a/arch/x86/crypto/sha512-mb/sha512_mb_mgr_datastruct.S
+++ /dev/null
@@ -1,281 +0,0 @@
-/*
- * Header file for multi buffer SHA256 algorithm data structure
- *
- * This file is provided under a dual BSD/GPLv2 license. When using or
- * redistributing this file, you may do so under either license.
- *
- * GPL LICENSE SUMMARY
- *
- * Copyright(c) 2016 Intel Corporation.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of version 2 of the GNU General Public License as
- * published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful, but
- * WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
- * General Public License for more details.
- *
- * Contact Information:
- * Megha Dey <megha.dey(a)linux.intel.com>
- *
- * BSD LICENSE
- *
- * Copyright(c) 2016 Intel Corporation.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions
- * are met:
- *
- * * Redistributions of source code must retain the above copyright
- * notice, this list of conditions and the following disclaimer.
- * * Redistributions in binary form must reproduce the above copyright
- * notice, this list of conditions and the following disclaimer in
- * the documentation and/or other materials provided with the
- * distribution.
- * * Neither the name of Intel Corporation nor the names of its
- * contributors may be used to endorse or promote products derived
- * from this software without specific prior written permission.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
- * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
- * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
- * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
- * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
- * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
- * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
- * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
- * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
- * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
- * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
- */
-
-# Macros for defining data structures
-
-# Usage example
-
-#START_FIELDS # JOB_AES
-### name size align
-#FIELD _plaintext, 8, 8 # pointer to plaintext
-#FIELD _ciphertext, 8, 8 # pointer to ciphertext
-#FIELD _IV, 16, 8 # IV
-#FIELD _keys, 8, 8 # pointer to keys
-#FIELD _len, 4, 4 # length in bytes
-#FIELD _status, 4, 4 # status enumeration
-#FIELD _user_data, 8, 8 # pointer to user data
-#UNION _union, size1, align1, \
-# size2, align2, \
-# size3, align3, \
-# ...
-#END_FIELDS
-#%assign _JOB_AES_size _FIELD_OFFSET
-#%assign _JOB_AES_align _STRUCT_ALIGN
-
-#########################################################################
-
-# Alternate "struc-like" syntax:
-# STRUCT job_aes2
-# RES_Q .plaintext, 1
-# RES_Q .ciphertext, 1
-# RES_DQ .IV, 1
-# RES_B .nested, _JOB_AES_SIZE, _JOB_AES_ALIGN
-# RES_U .union, size1, align1, \
-# size2, align2, \
-# ...
-# ENDSTRUCT
-# # Following only needed if nesting
-# %assign job_aes2_size _FIELD_OFFSET
-# %assign job_aes2_align _STRUCT_ALIGN
-#
-# RES_* macros take a name, a count and an optional alignment.
-# The count in in terms of the base size of the macro, and the
-# default alignment is the base size.
-# The macros are:
-# Macro Base size
-# RES_B 1
-# RES_W 2
-# RES_D 4
-# RES_Q 8
-# RES_DQ 16
-# RES_Y 32
-# RES_Z 64
-#
-# RES_U defines a union. It's arguments are a name and two or more
-# pairs of "size, alignment"
-#
-# The two assigns are only needed if this structure is being nested
-# within another. Even if the assigns are not done, one can still use
-# STRUCT_NAME_size as the size of the structure.
-#
-# Note that for nesting, you still need to assign to STRUCT_NAME_size.
-#
-# The differences between this and using "struc" directly are that each
-# type is implicitly aligned to its natural length (although this can be
-# over-ridden with an explicit third parameter), and that the structure
-# is padded at the end to its overall alignment.
-#
-
-#########################################################################
-
-#ifndef _DATASTRUCT_ASM_
-#define _DATASTRUCT_ASM_
-
-#define PTR_SZ 8
-#define SHA512_DIGEST_WORD_SIZE 8
-#define SHA512_MB_MGR_NUM_LANES_AVX2 4
-#define NUM_SHA512_DIGEST_WORDS 8
-#define SZ4 4*SHA512_DIGEST_WORD_SIZE
-#define ROUNDS 80*SZ4
-#define SHA512_DIGEST_ROW_SIZE (SHA512_MB_MGR_NUM_LANES_AVX2 * 8)
-
-# START_FIELDS
-.macro START_FIELDS
- _FIELD_OFFSET = 0
- _STRUCT_ALIGN = 0
-.endm
-
-# FIELD name size align
-.macro FIELD name size align
- _FIELD_OFFSET = (_FIELD_OFFSET + (\align) - 1) & (~ ((\align)-1))
- \name = _FIELD_OFFSET
- _FIELD_OFFSET = _FIELD_OFFSET + (\size)
-.if (\align > _STRUCT_ALIGN)
- _STRUCT_ALIGN = \align
-.endif
-.endm
-
-# END_FIELDS
-.macro END_FIELDS
- _FIELD_OFFSET = (_FIELD_OFFSET + _STRUCT_ALIGN-1) & (~ (_STRUCT_ALIGN-1))
-.endm
-
-.macro STRUCT p1
-START_FIELDS
-.struc \p1
-.endm
-
-.macro ENDSTRUCT
- tmp = _FIELD_OFFSET
- END_FIELDS
- tmp = (_FIELD_OFFSET - ##tmp)
-.if (tmp > 0)
- .lcomm tmp
-.endm
-
-## RES_int name size align
-.macro RES_int p1 p2 p3
- name = \p1
- size = \p2
- align = .\p3
-
- _FIELD_OFFSET = (_FIELD_OFFSET + (align) - 1) & (~ ((align)-1))
-.align align
-.lcomm name size
- _FIELD_OFFSET = _FIELD_OFFSET + (size)
-.if (align > _STRUCT_ALIGN)
- _STRUCT_ALIGN = align
-.endif
-.endm
-
-# macro RES_B name, size [, align]
-.macro RES_B _name, _size, _align=1
-RES_int _name _size _align
-.endm
-
-# macro RES_W name, size [, align]
-.macro RES_W _name, _size, _align=2
-RES_int _name 2*(_size) _align
-.endm
-
-# macro RES_D name, size [, align]
-.macro RES_D _name, _size, _align=4
-RES_int _name 4*(_size) _align
-.endm
-
-# macro RES_Q name, size [, align]
-.macro RES_Q _name, _size, _align=8
-RES_int _name 8*(_size) _align
-.endm
-
-# macro RES_DQ name, size [, align]
-.macro RES_DQ _name, _size, _align=16
-RES_int _name 16*(_size) _align
-.endm
-
-# macro RES_Y name, size [, align]
-.macro RES_Y _name, _size, _align=32
-RES_int _name 32*(_size) _align
-.endm
-
-# macro RES_Z name, size [, align]
-.macro RES_Z _name, _size, _align=64
-RES_int _name 64*(_size) _align
-.endm
-
-#endif
-
-###################################################################
-### Define SHA512 Out Of Order Data Structures
-###################################################################
-
-START_FIELDS # LANE_DATA
-### name size align
-FIELD _job_in_lane, 8, 8 # pointer to job object
-END_FIELDS
-
- _LANE_DATA_size = _FIELD_OFFSET
- _LANE_DATA_align = _STRUCT_ALIGN
-
-####################################################################
-
-START_FIELDS # SHA512_ARGS_X4
-### name size align
-FIELD _digest, 8*8*4, 4 # transposed digest
-FIELD _data_ptr, 8*4, 8 # array of pointers to data
-END_FIELDS
-
- _SHA512_ARGS_X4_size = _FIELD_OFFSET
- _SHA512_ARGS_X4_align = _STRUCT_ALIGN
-
-#####################################################################
-
-START_FIELDS # MB_MGR
-### name size align
-FIELD _args, _SHA512_ARGS_X4_size, _SHA512_ARGS_X4_align
-FIELD _lens, 8*4, 8
-FIELD _unused_lanes, 8, 8
-FIELD _ldata, _LANE_DATA_size*4, _LANE_DATA_align
-END_FIELDS
-
- _MB_MGR_size = _FIELD_OFFSET
- _MB_MGR_align = _STRUCT_ALIGN
-
-_args_digest = _args + _digest
-_args_data_ptr = _args + _data_ptr
-
-#######################################################################
-
-#######################################################################
-#### Define constants
-#######################################################################
-
-#define STS_UNKNOWN 0
-#define STS_BEING_PROCESSED 1
-#define STS_COMPLETED 2
-
-#######################################################################
-#### Define JOB_SHA512 structure
-#######################################################################
-
-START_FIELDS # JOB_SHA512
-### name size align
-FIELD _buffer, 8, 8 # pointer to buffer
-FIELD _len, 8, 8 # length in bytes
-FIELD _result_digest, 8*8, 32 # Digest (output)
-FIELD _status, 4, 4
-FIELD _user_data, 8, 8
-END_FIELDS
-
- _JOB_SHA512_size = _FIELD_OFFSET
- _JOB_SHA512_align = _STRUCT_ALIGN
diff --git a/arch/x86/crypto/sha512-mb/sha512_mb_mgr_flush_avx2.S b/arch/x86/crypto/sha512-mb/sha512_mb_mgr_flush_avx2.S
deleted file mode 100644
index 7c629ca..00000000
--- a/arch/x86/crypto/sha512-mb/sha512_mb_mgr_flush_avx2.S
+++ /dev/null
@@ -1,297 +0,0 @@
-/*
- * Flush routine for SHA512 multibuffer
- *
- * This file is provided under a dual BSD/GPLv2 license. When using or
- * redistributing this file, you may do so under either license.
- *
- * GPL LICENSE SUMMARY
- *
- * Copyright(c) 2016 Intel Corporation.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of version 2 of the GNU General Public License as
- * published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful, but
- * WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
- * General Public License for more details.
- *
- * Contact Information:
- * Megha Dey <megha.dey(a)linux.intel.com>
- *
- * BSD LICENSE
- *
- * Copyright(c) 2016 Intel Corporation.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions
- * are met:
- *
- * * Redistributions of source code must retain the above copyright
- * notice, this list of conditions and the following disclaimer.
- * * Redistributions in binary form must reproduce the above copyright
- * notice, this list of conditions and the following disclaimer in
- * the documentation and/or other materials provided with the
- * distribution.
- * * Neither the name of Intel Corporation nor the names of its
- * contributors may be used to endorse or promote products derived
- * from this software without specific prior written permission.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
- * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
- * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
- * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
- * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
- * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
- * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
- * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
- * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
- * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
- * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
- */
-
-#include <linux/linkage.h>
-#include <asm/frame.h>
-#include "sha512_mb_mgr_datastruct.S"
-
-.extern sha512_x4_avx2
-
-# LINUX register definitions
-#define arg1 %rdi
-#define arg2 %rsi
-
-# idx needs to be other than arg1, arg2, rbx, r12
-#define idx %rdx
-
-# Common definitions
-#define state arg1
-#define job arg2
-#define len2 arg2
-
-#define unused_lanes %rbx
-#define lane_data %rbx
-#define tmp2 %rbx
-
-#define job_rax %rax
-#define tmp1 %rax
-#define size_offset %rax
-#define tmp %rax
-#define start_offset %rax
-
-#define tmp3 arg1
-
-#define extra_blocks arg2
-#define p arg2
-
-#define tmp4 %r8
-#define lens0 %r8
-
-#define lens1 %r9
-#define lens2 %r10
-#define lens3 %r11
-
-.macro LABEL prefix n
-\prefix\n\():
-.endm
-
-.macro JNE_SKIP i
-jne skip_\i
-.endm
-
-.altmacro
-.macro SET_OFFSET _offset
-offset = \_offset
-.endm
-.noaltmacro
-
-# JOB* sha512_mb_mgr_flush_avx2(MB_MGR *state)
-# arg 1 : rcx : state
-ENTRY(sha512_mb_mgr_flush_avx2)
- FRAME_BEGIN
- push %rbx
-
- # If bit (32+3) is set, then all lanes are empty
- mov _unused_lanes(state), unused_lanes
- bt $32+7, unused_lanes
- jc return_null
-
- # find a lane with a non-null job
- xor idx, idx
- offset = (_ldata + 1*_LANE_DATA_size + _job_in_lane)
- cmpq $0, offset(state)
- cmovne one(%rip), idx
- offset = (_ldata + 2*_LANE_DATA_size + _job_in_lane)
- cmpq $0, offset(state)
- cmovne two(%rip), idx
- offset = (_ldata + 3*_LANE_DATA_size + _job_in_lane)
- cmpq $0, offset(state)
- cmovne three(%rip), idx
-
- # copy idx to empty lanes
-copy_lane_data:
- offset = (_args + _data_ptr)
- mov offset(state,idx,8), tmp
-
- I = 0
-.rep 4
- offset = (_ldata + I * _LANE_DATA_size + _job_in_lane)
- cmpq $0, offset(state)
-.altmacro
- JNE_SKIP %I
- offset = (_args + _data_ptr + 8*I)
- mov tmp, offset(state)
- offset = (_lens + 8*I +4)
- movl $0xFFFFFFFF, offset(state)
-LABEL skip_ %I
- I = (I+1)
-.noaltmacro
-.endr
-
- # Find min length
- mov _lens + 0*8(state),lens0
- mov lens0,idx
- mov _lens + 1*8(state),lens1
- cmp idx,lens1
- cmovb lens1,idx
- mov _lens + 2*8(state),lens2
- cmp idx,lens2
- cmovb lens2,idx
- mov _lens + 3*8(state),lens3
- cmp idx,lens3
- cmovb lens3,idx
- mov idx,len2
- and $0xF,idx
- and $~0xFF,len2
- jz len_is_0
-
- sub len2, lens0
- sub len2, lens1
- sub len2, lens2
- sub len2, lens3
- shr $32,len2
- mov lens0, _lens + 0*8(state)
- mov lens1, _lens + 1*8(state)
- mov lens2, _lens + 2*8(state)
- mov lens3, _lens + 3*8(state)
-
- # "state" and "args" are the same address, arg1
- # len is arg2
- call sha512_x4_avx2
- # state and idx are intact
-
-len_is_0:
- # process completed job "idx"
- imul $_LANE_DATA_size, idx, lane_data
- lea _ldata(state, lane_data), lane_data
-
- mov _job_in_lane(lane_data), job_rax
- movq $0, _job_in_lane(lane_data)
- movl $STS_COMPLETED, _status(job_rax)
- mov _unused_lanes(state), unused_lanes
- shl $8, unused_lanes
- or idx, unused_lanes
- mov unused_lanes, _unused_lanes(state)
-
- movl $0xFFFFFFFF, _lens+4(state, idx, 8)
-
- vmovq _args_digest+0*32(state, idx, 8), %xmm0
- vpinsrq $1, _args_digest+1*32(state, idx, 8), %xmm0, %xmm0
- vmovq _args_digest+2*32(state, idx, 8), %xmm1
- vpinsrq $1, _args_digest+3*32(state, idx, 8), %xmm1, %xmm1
- vmovq _args_digest+4*32(state, idx, 8), %xmm2
- vpinsrq $1, _args_digest+5*32(state, idx, 8), %xmm2, %xmm2
- vmovq _args_digest+6*32(state, idx, 8), %xmm3
- vpinsrq $1, _args_digest+7*32(state, idx, 8), %xmm3, %xmm3
-
- vmovdqu %xmm0, _result_digest(job_rax)
- vmovdqu %xmm1, _result_digest+1*16(job_rax)
- vmovdqu %xmm2, _result_digest+2*16(job_rax)
- vmovdqu %xmm3, _result_digest+3*16(job_rax)
-
-return:
- pop %rbx
- FRAME_END
- ret
-
-return_null:
- xor job_rax, job_rax
- jmp return
-ENDPROC(sha512_mb_mgr_flush_avx2)
-.align 16
-
-ENTRY(sha512_mb_mgr_get_comp_job_avx2)
- push %rbx
-
- mov _unused_lanes(state), unused_lanes
- bt $(32+7), unused_lanes
- jc .return_null
-
- # Find min length
- mov _lens(state),lens0
- mov lens0,idx
- mov _lens+1*8(state),lens1
- cmp idx,lens1
- cmovb lens1,idx
- mov _lens+2*8(state),lens2
- cmp idx,lens2
- cmovb lens2,idx
- mov _lens+3*8(state),lens3
- cmp idx,lens3
- cmovb lens3,idx
- test $~0xF,idx
- jnz .return_null
- and $0xF,idx
-
- #process completed job "idx"
- imul $_LANE_DATA_size, idx, lane_data
- lea _ldata(state, lane_data), lane_data
-
- mov _job_in_lane(lane_data), job_rax
- movq $0, _job_in_lane(lane_data)
- movl $STS_COMPLETED, _status(job_rax)
- mov _unused_lanes(state), unused_lanes
- shl $8, unused_lanes
- or idx, unused_lanes
- mov unused_lanes, _unused_lanes(state)
-
- movl $0xFFFFFFFF, _lens+4(state, idx, 8)
-
- vmovq _args_digest(state, idx, 8), %xmm0
- vpinsrq $1, _args_digest+1*32(state, idx, 8), %xmm0, %xmm0
- vmovq _args_digest+2*32(state, idx, 8), %xmm1
- vpinsrq $1, _args_digest+3*32(state, idx, 8), %xmm1, %xmm1
- vmovq _args_digest+4*32(state, idx, 8), %xmm2
- vpinsrq $1, _args_digest+5*32(state, idx, 8), %xmm2, %xmm2
- vmovq _args_digest+6*32(state, idx, 8), %xmm3
- vpinsrq $1, _args_digest+7*32(state, idx, 8), %xmm3, %xmm3
-
- vmovdqu %xmm0, _result_digest+0*16(job_rax)
- vmovdqu %xmm1, _result_digest+1*16(job_rax)
- vmovdqu %xmm2, _result_digest+2*16(job_rax)
- vmovdqu %xmm3, _result_digest+3*16(job_rax)
-
- pop %rbx
-
- ret
-
-.return_null:
- xor job_rax, job_rax
- pop %rbx
- ret
-ENDPROC(sha512_mb_mgr_get_comp_job_avx2)
-
-.section .rodata.cst8.one, "aM", @progbits, 8
-.align 8
-one:
-.quad 1
-
-.section .rodata.cst8.two, "aM", @progbits, 8
-.align 8
-two:
-.quad 2
-
-.section .rodata.cst8.three, "aM", @progbits, 8
-.align 8
-three:
-.quad 3
diff --git a/arch/x86/crypto/sha512-mb/sha512_mb_mgr_init_avx2.c b/arch/x86/crypto/sha512-mb/sha512_mb_mgr_init_avx2.c
deleted file mode 100644
index d088050..00000000
--- a/arch/x86/crypto/sha512-mb/sha512_mb_mgr_init_avx2.c
+++ /dev/null
@@ -1,69 +0,0 @@
-/*
- * Initialization code for multi buffer SHA256 algorithm for AVX2
- *
- * This file is provided under a dual BSD/GPLv2 license. When using or
- * redistributing this file, you may do so under either license.
- *
- * GPL LICENSE SUMMARY
- *
- * Copyright(c) 2016 Intel Corporation.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of version 2 of the GNU General Public License as
- * published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful, but
- * WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
- * General Public License for more details.
- *
- * Contact Information:
- * Megha Dey <megha.dey(a)linux.intel.com>
- *
- * BSD LICENSE
- *
- * Copyright(c) 2016 Intel Corporation.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions
- * are met:
- *
- * * Redistributions of source code must retain the above copyright
- * notice, this list of conditions and the following disclaimer.
- * * Redistributions in binary form must reproduce the above copyright
- * notice, this list of conditions and the following disclaimer in
- * the documentation and/or other materials provided with the
- * distribution.
- * * Neither the name of Intel Corporation nor the names of its
- * contributors may be used to endorse or promote products derived
- * from this software without specific prior written permission.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
- * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
- * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
- * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
- * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
- * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
- * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
- * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
- * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
- * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
- * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
- */
-
-#include "sha512_mb_mgr.h"
-
-void sha512_mb_mgr_init_avx2(struct sha512_mb_mgr *state)
-{
- unsigned int j;
-
- /* initially all lanes are unused */
- state->lens[0] = 0xFFFFFFFF00000000;
- state->lens[1] = 0xFFFFFFFF00000001;
- state->lens[2] = 0xFFFFFFFF00000002;
- state->lens[3] = 0xFFFFFFFF00000003;
-
- state->unused_lanes = 0xFF03020100;
- for (j = 0; j < 4; j++)
- state->ldata[j].job_in_lane = NULL;
-}
diff --git a/arch/x86/crypto/sha512-mb/sha512_mb_mgr_submit_avx2.S b/arch/x86/crypto/sha512-mb/sha512_mb_mgr_submit_avx2.S
deleted file mode 100644
index 4ba709b..00000000
--- a/arch/x86/crypto/sha512-mb/sha512_mb_mgr_submit_avx2.S
+++ /dev/null
@@ -1,224 +0,0 @@
-/*
- * Buffer submit code for multi buffer SHA512 algorithm
- *
- * This file is provided under a dual BSD/GPLv2 license. When using or
- * redistributing this file, you may do so under either license.
- *
- * GPL LICENSE SUMMARY
- *
- * Copyright(c) 2016 Intel Corporation.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of version 2 of the GNU General Public License as
- * published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful, but
- * WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
- * General Public License for more details.
- *
- * Contact Information:
- * Megha Dey <megha.dey(a)linux.intel.com>
- *
- * BSD LICENSE
- *
- * Copyright(c) 2016 Intel Corporation.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions
- * are met:
- *
- * * Redistributions of source code must retain the above copyright
- * notice, this list of conditions and the following disclaimer.
- * * Redistributions in binary form must reproduce the above copyright
- * notice, this list of conditions and the following disclaimer in
- * the documentation and/or other materials provided with the
- * distribution.
- * * Neither the name of Intel Corporation nor the names of its
- * contributors may be used to endorse or promote products derived
- * from this software without specific prior written permission.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
- * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
- * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
- * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
- * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
- * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
- * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
- * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
- * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
- * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
- * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
- */
-
-#include <linux/linkage.h>
-#include <asm/frame.h>
-#include "sha512_mb_mgr_datastruct.S"
-
-.extern sha512_x4_avx2
-
-#define arg1 %rdi
-#define arg2 %rsi
-
-#define idx %rdx
-#define last_len %rdx
-
-#define size_offset %rcx
-#define tmp2 %rcx
-
-# Common definitions
-#define state arg1
-#define job arg2
-#define len2 arg2
-#define p2 arg2
-
-#define p %r11
-#define start_offset %r11
-
-#define unused_lanes %rbx
-
-#define job_rax %rax
-#define len %rax
-
-#define lane %r12
-#define tmp3 %r12
-#define lens3 %r12
-
-#define extra_blocks %r8
-#define lens0 %r8
-
-#define tmp %r9
-#define lens1 %r9
-
-#define lane_data %r10
-#define lens2 %r10
-
-#define DWORD_len %eax
-
-# JOB* sha512_mb_mgr_submit_avx2(MB_MGR *state, JOB *job)
-# arg 1 : rcx : state
-# arg 2 : rdx : job
-ENTRY(sha512_mb_mgr_submit_avx2)
- FRAME_BEGIN
- push %rbx
- push %r12
-
- mov _unused_lanes(state), unused_lanes
- movzb %bl,lane
- shr $8, unused_lanes
- imul $_LANE_DATA_size, lane,lane_data
- movl $STS_BEING_PROCESSED, _status(job)
- lea _ldata(state, lane_data), lane_data
- mov unused_lanes, _unused_lanes(state)
- movl _len(job), DWORD_len
-
- mov job, _job_in_lane(lane_data)
- movl DWORD_len,_lens+4(state , lane, 8)
-
- # Load digest words from result_digest
- vmovdqu _result_digest+0*16(job), %xmm0
- vmovdqu _result_digest+1*16(job), %xmm1
- vmovdqu _result_digest+2*16(job), %xmm2
- vmovdqu _result_digest+3*16(job), %xmm3
-
- vmovq %xmm0, _args_digest(state, lane, 8)
- vpextrq $1, %xmm0, _args_digest+1*32(state , lane, 8)
- vmovq %xmm1, _args_digest+2*32(state , lane, 8)
- vpextrq $1, %xmm1, _args_digest+3*32(state , lane, 8)
- vmovq %xmm2, _args_digest+4*32(state , lane, 8)
- vpextrq $1, %xmm2, _args_digest+5*32(state , lane, 8)
- vmovq %xmm3, _args_digest+6*32(state , lane, 8)
- vpextrq $1, %xmm3, _args_digest+7*32(state , lane, 8)
-
- mov _buffer(job), p
- mov p, _args_data_ptr(state, lane, 8)
-
- cmp $0xFF, unused_lanes
- jne return_null
-
-start_loop:
-
- # Find min length
- mov _lens+0*8(state),lens0
- mov lens0,idx
- mov _lens+1*8(state),lens1
- cmp idx,lens1
- cmovb lens1, idx
- mov _lens+2*8(state),lens2
- cmp idx,lens2
- cmovb lens2,idx
- mov _lens+3*8(state),lens3
- cmp idx,lens3
- cmovb lens3,idx
- mov idx,len2
- and $0xF,idx
- and $~0xFF,len2
- jz len_is_0
-
- sub len2,lens0
- sub len2,lens1
- sub len2,lens2
- sub len2,lens3
- shr $32,len2
- mov lens0, _lens + 0*8(state)
- mov lens1, _lens + 1*8(state)
- mov lens2, _lens + 2*8(state)
- mov lens3, _lens + 3*8(state)
-
- # "state" and "args" are the same address, arg1
- # len is arg2
- call sha512_x4_avx2
- # state and idx are intact
-
-len_is_0:
-
- # process completed job "idx"
- imul $_LANE_DATA_size, idx, lane_data
- lea _ldata(state, lane_data), lane_data
-
- mov _job_in_lane(lane_data), job_rax
- mov _unused_lanes(state), unused_lanes
- movq $0, _job_in_lane(lane_data)
- movl $STS_COMPLETED, _status(job_rax)
- shl $8, unused_lanes
- or idx, unused_lanes
- mov unused_lanes, _unused_lanes(state)
-
- movl $0xFFFFFFFF,_lens+4(state,idx,8)
- vmovq _args_digest+0*32(state , idx, 8), %xmm0
- vpinsrq $1, _args_digest+1*32(state , idx, 8), %xmm0, %xmm0
- vmovq _args_digest+2*32(state , idx, 8), %xmm1
- vpinsrq $1, _args_digest+3*32(state , idx, 8), %xmm1, %xmm1
- vmovq _args_digest+4*32(state , idx, 8), %xmm2
- vpinsrq $1, _args_digest+5*32(state , idx, 8), %xmm2, %xmm2
- vmovq _args_digest+6*32(state , idx, 8), %xmm3
- vpinsrq $1, _args_digest+7*32(state , idx, 8), %xmm3, %xmm3
-
- vmovdqu %xmm0, _result_digest + 0*16(job_rax)
- vmovdqu %xmm1, _result_digest + 1*16(job_rax)
- vmovdqu %xmm2, _result_digest + 2*16(job_rax)
- vmovdqu %xmm3, _result_digest + 3*16(job_rax)
-
-return:
- pop %r12
- pop %rbx
- FRAME_END
- ret
-
-return_null:
- xor job_rax, job_rax
- jmp return
-ENDPROC(sha512_mb_mgr_submit_avx2)
-
-/* UNUSED?
-.section .rodata.cst16, "aM", @progbits, 16
-.align 16
-H0: .int 0x6a09e667
-H1: .int 0xbb67ae85
-H2: .int 0x3c6ef372
-H3: .int 0xa54ff53a
-H4: .int 0x510e527f
-H5: .int 0x9b05688c
-H6: .int 0x1f83d9ab
-H7: .int 0x5be0cd19
-*/
diff --git a/arch/x86/crypto/sha512-mb/sha512_x4_avx2.S b/arch/x86/crypto/sha512-mb/sha512_x4_avx2.S
deleted file mode 100644
index e22e907..00000000
--- a/arch/x86/crypto/sha512-mb/sha512_x4_avx2.S
+++ /dev/null
@@ -1,531 +0,0 @@
-/*
- * Multi-buffer SHA512 algorithm hash compute routine
- *
- * This file is provided under a dual BSD/GPLv2 license. When using or
- * redistributing this file, you may do so under either license.
- *
- * GPL LICENSE SUMMARY
- *
- * Copyright(c) 2016 Intel Corporation.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of version 2 of the GNU General Public License as
- * published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful, but
- * WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
- * General Public License for more details.
- *
- * Contact Information:
- * Megha Dey <megha.dey(a)linux.intel.com>
- *
- * BSD LICENSE
- *
- * Copyright(c) 2016 Intel Corporation.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions
- * are met:
- *
- * * Redistributions of source code must retain the above copyright
- * notice, this list of conditions and the following disclaimer.
- * * Redistributions in binary form must reproduce the above copyright
- * notice, this list of conditions and the following disclaimer in
- * the documentation and/or other materials provided with the
- * distribution.
- * * Neither the name of Intel Corporation nor the names of its
- * contributors may be used to endorse or promote products derived
- * from this software without specific prior written permission.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
- * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
- * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
- * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
- * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
- * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
- * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
- * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
- * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
- * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
- * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
- */
-
-# code to compute quad SHA512 using AVX2
-# use YMMs to tackle the larger digest size
-# outer calling routine takes care of save and restore of XMM registers
-# Logic designed/laid out by JDG
-
-# Function clobbers: rax, rcx, rdx, rbx, rsi, rdi, r9-r15; ymm0-15
-# Stack must be aligned to 32 bytes before call
-# Linux clobbers: rax rbx rcx rsi r8 r9 r10 r11 r12
-# Linux preserves: rcx rdx rdi rbp r13 r14 r15
-# clobbers ymm0-15
-
-#include <linux/linkage.h>
-#include "sha512_mb_mgr_datastruct.S"
-
-arg1 = %rdi
-arg2 = %rsi
-
-# Common definitions
-STATE = arg1
-INP_SIZE = arg2
-
-IDX = %rax
-ROUND = %rbx
-TBL = %r8
-
-inp0 = %r9
-inp1 = %r10
-inp2 = %r11
-inp3 = %r12
-
-a = %ymm0
-b = %ymm1
-c = %ymm2
-d = %ymm3
-e = %ymm4
-f = %ymm5
-g = %ymm6
-h = %ymm7
-
-a0 = %ymm8
-a1 = %ymm9
-a2 = %ymm10
-
-TT0 = %ymm14
-TT1 = %ymm13
-TT2 = %ymm12
-TT3 = %ymm11
-TT4 = %ymm10
-TT5 = %ymm9
-
-T1 = %ymm14
-TMP = %ymm15
-
-# Define stack usage
-STACK_SPACE1 = SZ4*16 + NUM_SHA512_DIGEST_WORDS*SZ4 + 24
-
-#define VMOVPD vmovupd
-_digest = SZ4*16
-
-# transpose r0, r1, r2, r3, t0, t1
-# "transpose" data in {r0..r3} using temps {t0..t3}
-# Input looks like: {r0 r1 r2 r3}
-# r0 = {a7 a6 a5 a4 a3 a2 a1 a0}
-# r1 = {b7 b6 b5 b4 b3 b2 b1 b0}
-# r2 = {c7 c6 c5 c4 c3 c2 c1 c0}
-# r3 = {d7 d6 d5 d4 d3 d2 d1 d0}
-#
-# output looks like: {t0 r1 r0 r3}
-# t0 = {d1 d0 c1 c0 b1 b0 a1 a0}
-# r1 = {d3 d2 c3 c2 b3 b2 a3 a2}
-# r0 = {d5 d4 c5 c4 b5 b4 a5 a4}
-# r3 = {d7 d6 c7 c6 b7 b6 a7 a6}
-
-.macro TRANSPOSE r0 r1 r2 r3 t0 t1
- vshufps $0x44, \r1, \r0, \t0 # t0 = {b5 b4 a5 a4 b1 b0 a1 a0}
- vshufps $0xEE, \r1, \r0, \r0 # r0 = {b7 b6 a7 a6 b3 b2 a3 a2}
- vshufps $0x44, \r3, \r2, \t1 # t1 = {d5 d4 c5 c4 d1 d0 c1 c0}
- vshufps $0xEE, \r3, \r2, \r2 # r2 = {d7 d6 c7 c6 d3 d2 c3 c2}
-
- vperm2f128 $0x20, \r2, \r0, \r1 # h6...a6
- vperm2f128 $0x31, \r2, \r0, \r3 # h2...a2
- vperm2f128 $0x31, \t1, \t0, \r0 # h5...a5
- vperm2f128 $0x20, \t1, \t0, \t0 # h1...a1
-.endm
-
-.macro ROTATE_ARGS
-TMP_ = h
-h = g
-g = f
-f = e
-e = d
-d = c
-c = b
-b = a
-a = TMP_
-.endm
-
-# PRORQ reg, imm, tmp
-# packed-rotate-right-double
-# does a rotate by doing two shifts and an or
-.macro _PRORQ reg imm tmp
- vpsllq $(64-\imm),\reg,\tmp
- vpsrlq $\imm,\reg, \reg
- vpor \tmp,\reg, \reg
-.endm
-
-# non-destructive
-# PRORQ_nd reg, imm, tmp, src
-.macro _PRORQ_nd reg imm tmp src
- vpsllq $(64-\imm), \src, \tmp
- vpsrlq $\imm, \src, \reg
- vpor \tmp, \reg, \reg
-.endm
-
-# PRORQ dst/src, amt
-.macro PRORQ reg imm
- _PRORQ \reg, \imm, TMP
-.endm
-
-# PRORQ_nd dst, src, amt
-.macro PRORQ_nd reg tmp imm
- _PRORQ_nd \reg, \imm, TMP, \tmp
-.endm
-
-#; arguments passed implicitly in preprocessor symbols i, a...h
-.macro ROUND_00_15 _T1 i
- PRORQ_nd a0, e, (18-14) # sig1: a0 = (e >> 4)
-
- vpxor g, f, a2 # ch: a2 = f^g
- vpand e,a2, a2 # ch: a2 = (f^g)&e
- vpxor g, a2, a2 # a2 = ch
-
- PRORQ_nd a1,e,41 # sig1: a1 = (e >> 25)
-
- offset = SZ4*(\i & 0xf)
- vmovdqu \_T1,offset(%rsp)
- vpaddq (TBL,ROUND,1), \_T1, \_T1 # T1 = W + K
- vpxor e,a0, a0 # sig1: a0 = e ^ (e >> 5)
- PRORQ a0, 14 # sig1: a0 = (e >> 6) ^ (e >> 11)
- vpaddq a2, h, h # h = h + ch
- PRORQ_nd a2,a,6 # sig0: a2 = (a >> 11)
- vpaddq \_T1,h, h # h = h + ch + W + K
- vpxor a1, a0, a0 # a0 = sigma1
- vmovdqu a,\_T1
- PRORQ_nd a1,a,39 # sig0: a1 = (a >> 22)
- vpxor c, \_T1, \_T1 # maj: T1 = a^c
- add $SZ4, ROUND # ROUND++
- vpand b, \_T1, \_T1 # maj: T1 = (a^c)&b
- vpaddq a0, h, h
- vpaddq h, d, d
- vpxor a, a2, a2 # sig0: a2 = a ^ (a >> 11)
- PRORQ a2,28 # sig0: a2 = (a >> 2) ^ (a >> 13)
- vpxor a1, a2, a2 # a2 = sig0
- vpand c, a, a1 # maj: a1 = a&c
- vpor \_T1, a1, a1 # a1 = maj
- vpaddq a1, h, h # h = h + ch + W + K + maj
- vpaddq a2, h, h # h = h + ch + W + K + maj + sigma0
- ROTATE_ARGS
-.endm
-
-
-#; arguments passed implicitly in preprocessor symbols i, a...h
-.macro ROUND_16_XX _T1 i
- vmovdqu SZ4*((\i-15)&0xf)(%rsp), \_T1
- vmovdqu SZ4*((\i-2)&0xf)(%rsp), a1
- vmovdqu \_T1, a0
- PRORQ \_T1,7
- vmovdqu a1, a2
- PRORQ a1,42
- vpxor a0, \_T1, \_T1
- PRORQ \_T1, 1
- vpxor a2, a1, a1
- PRORQ a1, 19
- vpsrlq $7, a0, a0
- vpxor a0, \_T1, \_T1
- vpsrlq $6, a2, a2
- vpxor a2, a1, a1
- vpaddq SZ4*((\i-16)&0xf)(%rsp), \_T1, \_T1
- vpaddq SZ4*((\i-7)&0xf)(%rsp), a1, a1
- vpaddq a1, \_T1, \_T1
-
- ROUND_00_15 \_T1,\i
-.endm
-
-
-# void sha512_x4_avx2(void *STATE, const int INP_SIZE)
-# arg 1 : STATE : pointer to input data
-# arg 2 : INP_SIZE : size of data in blocks (assumed >= 1)
-ENTRY(sha512_x4_avx2)
- # general registers preserved in outer calling routine
- # outer calling routine saves all the XMM registers
- # save callee-saved clobbered registers to comply with C function ABI
- push %r12
- push %r13
- push %r14
- push %r15
-
- sub $STACK_SPACE1, %rsp
-
- # Load the pre-transposed incoming digest.
- vmovdqu 0*SHA512_DIGEST_ROW_SIZE(STATE),a
- vmovdqu 1*SHA512_DIGEST_ROW_SIZE(STATE),b
- vmovdqu 2*SHA512_DIGEST_ROW_SIZE(STATE),c
- vmovdqu 3*SHA512_DIGEST_ROW_SIZE(STATE),d
- vmovdqu 4*SHA512_DIGEST_ROW_SIZE(STATE),e
- vmovdqu 5*SHA512_DIGEST_ROW_SIZE(STATE),f
- vmovdqu 6*SHA512_DIGEST_ROW_SIZE(STATE),g
- vmovdqu 7*SHA512_DIGEST_ROW_SIZE(STATE),h
-
- lea K512_4(%rip),TBL
-
- # load the address of each of the 4 message lanes
- # getting ready to transpose input onto stack
- mov _data_ptr+0*PTR_SZ(STATE),inp0
- mov _data_ptr+1*PTR_SZ(STATE),inp1
- mov _data_ptr+2*PTR_SZ(STATE),inp2
- mov _data_ptr+3*PTR_SZ(STATE),inp3
-
- xor IDX, IDX
-lloop:
- xor ROUND, ROUND
-
- # save old digest
- vmovdqu a, _digest(%rsp)
- vmovdqu b, _digest+1*SZ4(%rsp)
- vmovdqu c, _digest+2*SZ4(%rsp)
- vmovdqu d, _digest+3*SZ4(%rsp)
- vmovdqu e, _digest+4*SZ4(%rsp)
- vmovdqu f, _digest+5*SZ4(%rsp)
- vmovdqu g, _digest+6*SZ4(%rsp)
- vmovdqu h, _digest+7*SZ4(%rsp)
- i = 0
-.rep 4
- vmovdqu PSHUFFLE_BYTE_FLIP_MASK(%rip), TMP
- VMOVPD i*32(inp0, IDX), TT2
- VMOVPD i*32(inp1, IDX), TT1
- VMOVPD i*32(inp2, IDX), TT4
- VMOVPD i*32(inp3, IDX), TT3
- TRANSPOSE TT2, TT1, TT4, TT3, TT0, TT5
- vpshufb TMP, TT0, TT0
- vpshufb TMP, TT1, TT1
- vpshufb TMP, TT2, TT2
- vpshufb TMP, TT3, TT3
- ROUND_00_15 TT0,(i*4+0)
- ROUND_00_15 TT1,(i*4+1)
- ROUND_00_15 TT2,(i*4+2)
- ROUND_00_15 TT3,(i*4+3)
- i = (i+1)
-.endr
- add $128, IDX
-
- i = (i*4)
-
- jmp Lrounds_16_xx
-.align 16
-Lrounds_16_xx:
-.rep 16
- ROUND_16_XX T1, i
- i = (i+1)
-.endr
- cmp $0xa00,ROUND
- jb Lrounds_16_xx
-
- # add old digest
- vpaddq _digest(%rsp), a, a
- vpaddq _digest+1*SZ4(%rsp), b, b
- vpaddq _digest+2*SZ4(%rsp), c, c
- vpaddq _digest+3*SZ4(%rsp), d, d
- vpaddq _digest+4*SZ4(%rsp), e, e
- vpaddq _digest+5*SZ4(%rsp), f, f
- vpaddq _digest+6*SZ4(%rsp), g, g
- vpaddq _digest+7*SZ4(%rsp), h, h
-
- sub $1, INP_SIZE # unit is blocks
- jne lloop
-
- # write back to memory (state object) the transposed digest
- vmovdqu a, 0*SHA512_DIGEST_ROW_SIZE(STATE)
- vmovdqu b, 1*SHA512_DIGEST_ROW_SIZE(STATE)
- vmovdqu c, 2*SHA512_DIGEST_ROW_SIZE(STATE)
- vmovdqu d, 3*SHA512_DIGEST_ROW_SIZE(STATE)
- vmovdqu e, 4*SHA512_DIGEST_ROW_SIZE(STATE)
- vmovdqu f, 5*SHA512_DIGEST_ROW_SIZE(STATE)
- vmovdqu g, 6*SHA512_DIGEST_ROW_SIZE(STATE)
- vmovdqu h, 7*SHA512_DIGEST_ROW_SIZE(STATE)
-
- # update input data pointers
- add IDX, inp0
- mov inp0, _data_ptr+0*PTR_SZ(STATE)
- add IDX, inp1
- mov inp1, _data_ptr+1*PTR_SZ(STATE)
- add IDX, inp2
- mov inp2, _data_ptr+2*PTR_SZ(STATE)
- add IDX, inp3
- mov inp3, _data_ptr+3*PTR_SZ(STATE)
-
- #;;;;;;;;;;;;;;;
- #; Postamble
- add $STACK_SPACE1, %rsp
- # restore callee-saved clobbered registers
-
- pop %r15
- pop %r14
- pop %r13
- pop %r12
-
- # outer calling routine restores XMM and other GP registers
- ret
-ENDPROC(sha512_x4_avx2)
-
-.section .rodata.K512_4, "a", @progbits
-.align 64
-K512_4:
- .octa 0x428a2f98d728ae22428a2f98d728ae22,\
- 0x428a2f98d728ae22428a2f98d728ae22
- .octa 0x7137449123ef65cd7137449123ef65cd,\
- 0x7137449123ef65cd7137449123ef65cd
- .octa 0xb5c0fbcfec4d3b2fb5c0fbcfec4d3b2f,\
- 0xb5c0fbcfec4d3b2fb5c0fbcfec4d3b2f
- .octa 0xe9b5dba58189dbbce9b5dba58189dbbc,\
- 0xe9b5dba58189dbbce9b5dba58189dbbc
- .octa 0x3956c25bf348b5383956c25bf348b538,\
- 0x3956c25bf348b5383956c25bf348b538
- .octa 0x59f111f1b605d01959f111f1b605d019,\
- 0x59f111f1b605d01959f111f1b605d019
- .octa 0x923f82a4af194f9b923f82a4af194f9b,\
- 0x923f82a4af194f9b923f82a4af194f9b
- .octa 0xab1c5ed5da6d8118ab1c5ed5da6d8118,\
- 0xab1c5ed5da6d8118ab1c5ed5da6d8118
- .octa 0xd807aa98a3030242d807aa98a3030242,\
- 0xd807aa98a3030242d807aa98a3030242
- .octa 0x12835b0145706fbe12835b0145706fbe,\
- 0x12835b0145706fbe12835b0145706fbe
- .octa 0x243185be4ee4b28c243185be4ee4b28c,\
- 0x243185be4ee4b28c243185be4ee4b28c
- .octa 0x550c7dc3d5ffb4e2550c7dc3d5ffb4e2,\
- 0x550c7dc3d5ffb4e2550c7dc3d5ffb4e2
- .octa 0x72be5d74f27b896f72be5d74f27b896f,\
- 0x72be5d74f27b896f72be5d74f27b896f
- .octa 0x80deb1fe3b1696b180deb1fe3b1696b1,\
- 0x80deb1fe3b1696b180deb1fe3b1696b1
- .octa 0x9bdc06a725c712359bdc06a725c71235,\
- 0x9bdc06a725c712359bdc06a725c71235
- .octa 0xc19bf174cf692694c19bf174cf692694,\
- 0xc19bf174cf692694c19bf174cf692694
- .octa 0xe49b69c19ef14ad2e49b69c19ef14ad2,\
- 0xe49b69c19ef14ad2e49b69c19ef14ad2
- .octa 0xefbe4786384f25e3efbe4786384f25e3,\
- 0xefbe4786384f25e3efbe4786384f25e3
- .octa 0x0fc19dc68b8cd5b50fc19dc68b8cd5b5,\
- 0x0fc19dc68b8cd5b50fc19dc68b8cd5b5
- .octa 0x240ca1cc77ac9c65240ca1cc77ac9c65,\
- 0x240ca1cc77ac9c65240ca1cc77ac9c65
- .octa 0x2de92c6f592b02752de92c6f592b0275,\
- 0x2de92c6f592b02752de92c6f592b0275
- .octa 0x4a7484aa6ea6e4834a7484aa6ea6e483,\
- 0x4a7484aa6ea6e4834a7484aa6ea6e483
- .octa 0x5cb0a9dcbd41fbd45cb0a9dcbd41fbd4,\
- 0x5cb0a9dcbd41fbd45cb0a9dcbd41fbd4
- .octa 0x76f988da831153b576f988da831153b5,\
- 0x76f988da831153b576f988da831153b5
- .octa 0x983e5152ee66dfab983e5152ee66dfab,\
- 0x983e5152ee66dfab983e5152ee66dfab
- .octa 0xa831c66d2db43210a831c66d2db43210,\
- 0xa831c66d2db43210a831c66d2db43210
- .octa 0xb00327c898fb213fb00327c898fb213f,\
- 0xb00327c898fb213fb00327c898fb213f
- .octa 0xbf597fc7beef0ee4bf597fc7beef0ee4,\
- 0xbf597fc7beef0ee4bf597fc7beef0ee4
- .octa 0xc6e00bf33da88fc2c6e00bf33da88fc2,\
- 0xc6e00bf33da88fc2c6e00bf33da88fc2
- .octa 0xd5a79147930aa725d5a79147930aa725,\
- 0xd5a79147930aa725d5a79147930aa725
- .octa 0x06ca6351e003826f06ca6351e003826f,\
- 0x06ca6351e003826f06ca6351e003826f
- .octa 0x142929670a0e6e70142929670a0e6e70,\
- 0x142929670a0e6e70142929670a0e6e70
- .octa 0x27b70a8546d22ffc27b70a8546d22ffc,\
- 0x27b70a8546d22ffc27b70a8546d22ffc
- .octa 0x2e1b21385c26c9262e1b21385c26c926,\
- 0x2e1b21385c26c9262e1b21385c26c926
- .octa 0x4d2c6dfc5ac42aed4d2c6dfc5ac42aed,\
- 0x4d2c6dfc5ac42aed4d2c6dfc5ac42aed
- .octa 0x53380d139d95b3df53380d139d95b3df,\
- 0x53380d139d95b3df53380d139d95b3df
- .octa 0x650a73548baf63de650a73548baf63de,\
- 0x650a73548baf63de650a73548baf63de
- .octa 0x766a0abb3c77b2a8766a0abb3c77b2a8,\
- 0x766a0abb3c77b2a8766a0abb3c77b2a8
- .octa 0x81c2c92e47edaee681c2c92e47edaee6,\
- 0x81c2c92e47edaee681c2c92e47edaee6
- .octa 0x92722c851482353b92722c851482353b,\
- 0x92722c851482353b92722c851482353b
- .octa 0xa2bfe8a14cf10364a2bfe8a14cf10364,\
- 0xa2bfe8a14cf10364a2bfe8a14cf10364
- .octa 0xa81a664bbc423001a81a664bbc423001,\
- 0xa81a664bbc423001a81a664bbc423001
- .octa 0xc24b8b70d0f89791c24b8b70d0f89791,\
- 0xc24b8b70d0f89791c24b8b70d0f89791
- .octa 0xc76c51a30654be30c76c51a30654be30,\
- 0xc76c51a30654be30c76c51a30654be30
- .octa 0xd192e819d6ef5218d192e819d6ef5218,\
- 0xd192e819d6ef5218d192e819d6ef5218
- .octa 0xd69906245565a910d69906245565a910,\
- 0xd69906245565a910d69906245565a910
- .octa 0xf40e35855771202af40e35855771202a,\
- 0xf40e35855771202af40e35855771202a
- .octa 0x106aa07032bbd1b8106aa07032bbd1b8,\
- 0x106aa07032bbd1b8106aa07032bbd1b8
- .octa 0x19a4c116b8d2d0c819a4c116b8d2d0c8,\
- 0x19a4c116b8d2d0c819a4c116b8d2d0c8
- .octa 0x1e376c085141ab531e376c085141ab53,\
- 0x1e376c085141ab531e376c085141ab53
- .octa 0x2748774cdf8eeb992748774cdf8eeb99,\
- 0x2748774cdf8eeb992748774cdf8eeb99
- .octa 0x34b0bcb5e19b48a834b0bcb5e19b48a8,\
- 0x34b0bcb5e19b48a834b0bcb5e19b48a8
- .octa 0x391c0cb3c5c95a63391c0cb3c5c95a63,\
- 0x391c0cb3c5c95a63391c0cb3c5c95a63
- .octa 0x4ed8aa4ae3418acb4ed8aa4ae3418acb,\
- 0x4ed8aa4ae3418acb4ed8aa4ae3418acb
- .octa 0x5b9cca4f7763e3735b9cca4f7763e373,\
- 0x5b9cca4f7763e3735b9cca4f7763e373
- .octa 0x682e6ff3d6b2b8a3682e6ff3d6b2b8a3,\
- 0x682e6ff3d6b2b8a3682e6ff3d6b2b8a3
- .octa 0x748f82ee5defb2fc748f82ee5defb2fc,\
- 0x748f82ee5defb2fc748f82ee5defb2fc
- .octa 0x78a5636f43172f6078a5636f43172f60,\
- 0x78a5636f43172f6078a5636f43172f60
- .octa 0x84c87814a1f0ab7284c87814a1f0ab72,\
- 0x84c87814a1f0ab7284c87814a1f0ab72
- .octa 0x8cc702081a6439ec8cc702081a6439ec,\
- 0x8cc702081a6439ec8cc702081a6439ec
- .octa 0x90befffa23631e2890befffa23631e28,\
- 0x90befffa23631e2890befffa23631e28
- .octa 0xa4506cebde82bde9a4506cebde82bde9,\
- 0xa4506cebde82bde9a4506cebde82bde9
- .octa 0xbef9a3f7b2c67915bef9a3f7b2c67915,\
- 0xbef9a3f7b2c67915bef9a3f7b2c67915
- .octa 0xc67178f2e372532bc67178f2e372532b,\
- 0xc67178f2e372532bc67178f2e372532b
- .octa 0xca273eceea26619cca273eceea26619c,\
- 0xca273eceea26619cca273eceea26619c
- .octa 0xd186b8c721c0c207d186b8c721c0c207,\
- 0xd186b8c721c0c207d186b8c721c0c207
- .octa 0xeada7dd6cde0eb1eeada7dd6cde0eb1e,\
- 0xeada7dd6cde0eb1eeada7dd6cde0eb1e
- .octa 0xf57d4f7fee6ed178f57d4f7fee6ed178,\
- 0xf57d4f7fee6ed178f57d4f7fee6ed178
- .octa 0x06f067aa72176fba06f067aa72176fba,\
- 0x06f067aa72176fba06f067aa72176fba
- .octa 0x0a637dc5a2c898a60a637dc5a2c898a6,\
- 0x0a637dc5a2c898a60a637dc5a2c898a6
- .octa 0x113f9804bef90dae113f9804bef90dae,\
- 0x113f9804bef90dae113f9804bef90dae
- .octa 0x1b710b35131c471b1b710b35131c471b,\
- 0x1b710b35131c471b1b710b35131c471b
- .octa 0x28db77f523047d8428db77f523047d84,\
- 0x28db77f523047d8428db77f523047d84
- .octa 0x32caab7b40c7249332caab7b40c72493,\
- 0x32caab7b40c7249332caab7b40c72493
- .octa 0x3c9ebe0a15c9bebc3c9ebe0a15c9bebc,\
- 0x3c9ebe0a15c9bebc3c9ebe0a15c9bebc
- .octa 0x431d67c49c100d4c431d67c49c100d4c,\
- 0x431d67c49c100d4c431d67c49c100d4c
- .octa 0x4cc5d4becb3e42b64cc5d4becb3e42b6,\
- 0x4cc5d4becb3e42b64cc5d4becb3e42b6
- .octa 0x597f299cfc657e2a597f299cfc657e2a,\
- 0x597f299cfc657e2a597f299cfc657e2a
- .octa 0x5fcb6fab3ad6faec5fcb6fab3ad6faec,\
- 0x5fcb6fab3ad6faec5fcb6fab3ad6faec
- .octa 0x6c44198c4a4758176c44198c4a475817,\
- 0x6c44198c4a4758176c44198c4a475817
-
-.section .rodata.cst32.PSHUFFLE_BYTE_FLIP_MASK, "aM", @progbits, 32
-.align 32
-PSHUFFLE_BYTE_FLIP_MASK: .octa 0x08090a0b0c0d0e0f0001020304050607
- .octa 0x18191a1b1c1d1e1f1011121314151617
diff --git a/crypto/Kconfig b/crypto/Kconfig
index 0fb9586..0ec4767 100644
--- a/crypto/Kconfig
+++ b/crypto/Kconfig
@@ -213,20 +213,6 @@ config CRYPTO_CRYPTD
converts an arbitrary synchronous software crypto algorithm
into an asynchronous algorithm that executes in a kernel thread.
-config CRYPTO_MCRYPTD
- tristate "Software async multi-buffer crypto daemon"
- select CRYPTO_BLKCIPHER
- select CRYPTO_HASH
- select CRYPTO_MANAGER
- select CRYPTO_WORKQUEUE
- help
- This is a generic software asynchronous crypto daemon that
- provides the kernel thread to assist multi-buffer crypto
- algorithms for submitting jobs and flushing jobs in multi-buffer
- crypto algorithms. Multi-buffer crypto algorithms are executed
- in the context of this kernel thread and drivers can post
- their crypto request asynchronously to be processed by this daemon.
-
config CRYPTO_AUTHENC
tristate "Authenc support"
select CRYPTO_AEAD
@@ -848,54 +834,6 @@ config CRYPTO_SHA1_PPC_SPE
SHA-1 secure hash standard (DFIPS 180-4) implemented
using powerpc SPE SIMD instruction set.
-config CRYPTO_SHA1_MB
- tristate "SHA1 digest algorithm (x86_64 Multi-Buffer, Experimental)"
- depends on X86 && 64BIT
- select CRYPTO_SHA1
- select CRYPTO_HASH
- select CRYPTO_MCRYPTD
- help
- SHA-1 secure hash standard (FIPS 180-1/DFIPS 180-2) implemented
- using multi-buffer technique. This algorithm computes on
- multiple data lanes concurrently with SIMD instructions for
- better throughput. It should not be enabled by default but
- used when there is significant amount of work to keep the keep
- the data lanes filled to get performance benefit. If the data
- lanes remain unfilled, a flush operation will be initiated to
- process the crypto jobs, adding a slight latency.
-
-config CRYPTO_SHA256_MB
- tristate "SHA256 digest algorithm (x86_64 Multi-Buffer, Experimental)"
- depends on X86 && 64BIT
- select CRYPTO_SHA256
- select CRYPTO_HASH
- select CRYPTO_MCRYPTD
- help
- SHA-256 secure hash standard (FIPS 180-1/DFIPS 180-2) implemented
- using multi-buffer technique. This algorithm computes on
- multiple data lanes concurrently with SIMD instructions for
- better throughput. It should not be enabled by default but
- used when there is significant amount of work to keep the keep
- the data lanes filled to get performance benefit. If the data
- lanes remain unfilled, a flush operation will be initiated to
- process the crypto jobs, adding a slight latency.
-
-config CRYPTO_SHA512_MB
- tristate "SHA512 digest algorithm (x86_64 Multi-Buffer, Experimental)"
- depends on X86 && 64BIT
- select CRYPTO_SHA512
- select CRYPTO_HASH
- select CRYPTO_MCRYPTD
- help
- SHA-512 secure hash standard (FIPS 180-1/DFIPS 180-2) implemented
- using multi-buffer technique. This algorithm computes on
- multiple data lanes concurrently with SIMD instructions for
- better throughput. It should not be enabled by default but
- used when there is significant amount of work to keep the keep
- the data lanes filled to get performance benefit. If the data
- lanes remain unfilled, a flush operation will be initiated to
- process the crypto jobs, adding a slight latency.
-
config CRYPTO_SHA256
tristate "SHA224 and SHA256 digest algorithm"
select CRYPTO_HASH
diff --git a/crypto/Makefile b/crypto/Makefile
index f6a234d..d719843 100644
--- a/crypto/Makefile
+++ b/crypto/Makefile
@@ -93,7 +93,6 @@ obj-$(CONFIG_CRYPTO_MORUS640) += morus640.o
obj-$(CONFIG_CRYPTO_MORUS1280) += morus1280.o
obj-$(CONFIG_CRYPTO_PCRYPT) += pcrypt.o
obj-$(CONFIG_CRYPTO_CRYPTD) += cryptd.o
-obj-$(CONFIG_CRYPTO_MCRYPTD) += mcryptd.o
obj-$(CONFIG_CRYPTO_DES) += des_generic.o
obj-$(CONFIG_CRYPTO_FCRYPT) += fcrypt.o
obj-$(CONFIG_CRYPTO_BLOWFISH) += blowfish_generic.o
diff --git a/crypto/mcryptd.c b/crypto/mcryptd.c
deleted file mode 100644
index f141521..00000000
--- a/crypto/mcryptd.c
+++ /dev/null
@@ -1,675 +0,0 @@
-/*
- * Software multibuffer async crypto daemon.
- *
- * Copyright (c) 2014 Tim Chen <tim.c.chen(a)linux.intel.com>
- *
- * Adapted from crypto daemon.
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License as published by the Free
- * Software Foundation; either version 2 of the License, or (at your option)
- * any later version.
- *
- */
-
-#include <crypto/algapi.h>
-#include <crypto/internal/hash.h>
-#include <crypto/internal/aead.h>
-#include <crypto/mcryptd.h>
-#include <crypto/crypto_wq.h>
-#include <linux/err.h>
-#include <linux/init.h>
-#include <linux/kernel.h>
-#include <linux/list.h>
-#include <linux/module.h>
-#include <linux/scatterlist.h>
-#include <linux/sched.h>
-#include <linux/sched/stat.h>
-#include <linux/slab.h>
-
-#define MCRYPTD_MAX_CPU_QLEN 100
-#define MCRYPTD_BATCH 9
-
-static void *mcryptd_alloc_instance(struct crypto_alg *alg, unsigned int head,
- unsigned int tail);
-
-struct mcryptd_flush_list {
- struct list_head list;
- struct mutex lock;
-};
-
-static struct mcryptd_flush_list __percpu *mcryptd_flist;
-
-struct hashd_instance_ctx {
- struct crypto_ahash_spawn spawn;
- struct mcryptd_queue *queue;
-};
-
-static void mcryptd_queue_worker(struct work_struct *work);
-
-void mcryptd_arm_flusher(struct mcryptd_alg_cstate *cstate, unsigned long delay)
-{
- struct mcryptd_flush_list *flist;
-
- if (!cstate->flusher_engaged) {
- /* put the flusher on the flush list */
- flist = per_cpu_ptr(mcryptd_flist, smp_processor_id());
- mutex_lock(&flist->lock);
- list_add_tail(&cstate->flush_list, &flist->list);
- cstate->flusher_engaged = true;
- cstate->next_flush = jiffies + delay;
- queue_delayed_work_on(smp_processor_id(), kcrypto_wq,
- &cstate->flush, delay);
- mutex_unlock(&flist->lock);
- }
-}
-EXPORT_SYMBOL(mcryptd_arm_flusher);
-
-static int mcryptd_init_queue(struct mcryptd_queue *queue,
- unsigned int max_cpu_qlen)
-{
- int cpu;
- struct mcryptd_cpu_queue *cpu_queue;
-
- queue->cpu_queue = alloc_percpu(struct mcryptd_cpu_queue);
- pr_debug("mqueue:%p mcryptd_cpu_queue %p\n", queue, queue->cpu_queue);
- if (!queue->cpu_queue)
- return -ENOMEM;
- for_each_possible_cpu(cpu) {
- cpu_queue = per_cpu_ptr(queue->cpu_queue, cpu);
- pr_debug("cpu_queue #%d %p\n", cpu, queue->cpu_queue);
- crypto_init_queue(&cpu_queue->queue, max_cpu_qlen);
- INIT_WORK(&cpu_queue->work, mcryptd_queue_worker);
- spin_lock_init(&cpu_queue->q_lock);
- }
- return 0;
-}
-
-static void mcryptd_fini_queue(struct mcryptd_queue *queue)
-{
- int cpu;
- struct mcryptd_cpu_queue *cpu_queue;
-
- for_each_possible_cpu(cpu) {
- cpu_queue = per_cpu_ptr(queue->cpu_queue, cpu);
- BUG_ON(cpu_queue->queue.qlen);
- }
- free_percpu(queue->cpu_queue);
-}
-
-static int mcryptd_enqueue_request(struct mcryptd_queue *queue,
- struct crypto_async_request *request,
- struct mcryptd_hash_request_ctx *rctx)
-{
- int cpu, err;
- struct mcryptd_cpu_queue *cpu_queue;
-
- cpu_queue = raw_cpu_ptr(queue->cpu_queue);
- spin_lock(&cpu_queue->q_lock);
- cpu = smp_processor_id();
- rctx->tag.cpu = smp_processor_id();
-
- err = crypto_enqueue_request(&cpu_queue->queue, request);
- pr_debug("enqueue request: cpu %d cpu_queue %p request %p\n",
- cpu, cpu_queue, request);
- spin_unlock(&cpu_queue->q_lock);
- queue_work_on(cpu, kcrypto_wq, &cpu_queue->work);
-
- return err;
-}
-
-/*
- * Try to opportunisticlly flush the partially completed jobs if
- * crypto daemon is the only task running.
- */
-static void mcryptd_opportunistic_flush(void)
-{
- struct mcryptd_flush_list *flist;
- struct mcryptd_alg_cstate *cstate;
-
- flist = per_cpu_ptr(mcryptd_flist, smp_processor_id());
- while (single_task_running()) {
- mutex_lock(&flist->lock);
- cstate = list_first_entry_or_null(&flist->list,
- struct mcryptd_alg_cstate, flush_list);
- if (!cstate || !cstate->flusher_engaged) {
- mutex_unlock(&flist->lock);
- return;
- }
- list_del(&cstate->flush_list);
- cstate->flusher_engaged = false;
- mutex_unlock(&flist->lock);
- cstate->alg_state->flusher(cstate);
- }
-}
-
-/*
- * Called in workqueue context, do one real cryption work (via
- * req->complete) and reschedule itself if there are more work to
- * do.
- */
-static void mcryptd_queue_worker(struct work_struct *work)
-{
- struct mcryptd_cpu_queue *cpu_queue;
- struct crypto_async_request *req, *backlog;
- int i;
-
- /*
- * Need to loop through more than once for multi-buffer to
- * be effective.
- */
-
- cpu_queue = container_of(work, struct mcryptd_cpu_queue, work);
- i = 0;
- while (i < MCRYPTD_BATCH || single_task_running()) {
-
- spin_lock_bh(&cpu_queue->q_lock);
- backlog = crypto_get_backlog(&cpu_queue->queue);
- req = crypto_dequeue_request(&cpu_queue->queue);
- spin_unlock_bh(&cpu_queue->q_lock);
-
- if (!req) {
- mcryptd_opportunistic_flush();
- return;
- }
-
- if (backlog)
- backlog->complete(backlog, -EINPROGRESS);
- req->complete(req, 0);
- if (!cpu_queue->queue.qlen)
- return;
- ++i;
- }
- if (cpu_queue->queue.qlen)
- queue_work_on(smp_processor_id(), kcrypto_wq, &cpu_queue->work);
-}
-
-void mcryptd_flusher(struct work_struct *__work)
-{
- struct mcryptd_alg_cstate *alg_cpu_state;
- struct mcryptd_alg_state *alg_state;
- struct mcryptd_flush_list *flist;
- int cpu;
-
- cpu = smp_processor_id();
- alg_cpu_state = container_of(to_delayed_work(__work),
- struct mcryptd_alg_cstate, flush);
- alg_state = alg_cpu_state->alg_state;
- if (alg_cpu_state->cpu != cpu)
- pr_debug("mcryptd error: work on cpu %d, should be cpu %d\n",
- cpu, alg_cpu_state->cpu);
-
- if (alg_cpu_state->flusher_engaged) {
- flist = per_cpu_ptr(mcryptd_flist, cpu);
- mutex_lock(&flist->lock);
- list_del(&alg_cpu_state->flush_list);
- alg_cpu_state->flusher_engaged = false;
- mutex_unlock(&flist->lock);
- alg_state->flusher(alg_cpu_state);
- }
-}
-EXPORT_SYMBOL_GPL(mcryptd_flusher);
-
-static inline struct mcryptd_queue *mcryptd_get_queue(struct crypto_tfm *tfm)
-{
- struct crypto_instance *inst = crypto_tfm_alg_instance(tfm);
- struct mcryptd_instance_ctx *ictx = crypto_instance_ctx(inst);
-
- return ictx->queue;
-}
-
-static void *mcryptd_alloc_instance(struct crypto_alg *alg, unsigned int head,
- unsigned int tail)
-{
- char *p;
- struct crypto_instance *inst;
- int err;
-
- p = kzalloc(head + sizeof(*inst) + tail, GFP_KERNEL);
- if (!p)
- return ERR_PTR(-ENOMEM);
-
- inst = (void *)(p + head);
-
- err = -ENAMETOOLONG;
- if (snprintf(inst->alg.cra_driver_name, CRYPTO_MAX_ALG_NAME,
- "mcryptd(%s)", alg->cra_driver_name) >= CRYPTO_MAX_ALG_NAME)
- goto out_free_inst;
-
- memcpy(inst->alg.cra_name, alg->cra_name, CRYPTO_MAX_ALG_NAME);
-
- inst->alg.cra_priority = alg->cra_priority + 50;
- inst->alg.cra_blocksize = alg->cra_blocksize;
- inst->alg.cra_alignmask = alg->cra_alignmask;
-
-out:
- return p;
-
-out_free_inst:
- kfree(p);
- p = ERR_PTR(err);
- goto out;
-}
-
-static inline bool mcryptd_check_internal(struct rtattr **tb, u32 *type,
- u32 *mask)
-{
- struct crypto_attr_type *algt;
-
- algt = crypto_get_attr_type(tb);
- if (IS_ERR(algt))
- return false;
-
- *type |= algt->type & CRYPTO_ALG_INTERNAL;
- *mask |= algt->mask & CRYPTO_ALG_INTERNAL;
-
- if (*type & *mask & CRYPTO_ALG_INTERNAL)
- return true;
- else
- return false;
-}
-
-static int mcryptd_hash_init_tfm(struct crypto_tfm *tfm)
-{
- struct crypto_instance *inst = crypto_tfm_alg_instance(tfm);
- struct hashd_instance_ctx *ictx = crypto_instance_ctx(inst);
- struct crypto_ahash_spawn *spawn = &ictx->spawn;
- struct mcryptd_hash_ctx *ctx = crypto_tfm_ctx(tfm);
- struct crypto_ahash *hash;
-
- hash = crypto_spawn_ahash(spawn);
- if (IS_ERR(hash))
- return PTR_ERR(hash);
-
- ctx->child = hash;
- crypto_ahash_set_reqsize(__crypto_ahash_cast(tfm),
- sizeof(struct mcryptd_hash_request_ctx) +
- crypto_ahash_reqsize(hash));
- return 0;
-}
-
-static void mcryptd_hash_exit_tfm(struct crypto_tfm *tfm)
-{
- struct mcryptd_hash_ctx *ctx = crypto_tfm_ctx(tfm);
-
- crypto_free_ahash(ctx->child);
-}
-
-static int mcryptd_hash_setkey(struct crypto_ahash *parent,
- const u8 *key, unsigned int keylen)
-{
- struct mcryptd_hash_ctx *ctx = crypto_ahash_ctx(parent);
- struct crypto_ahash *child = ctx->child;
- int err;
-
- crypto_ahash_clear_flags(child, CRYPTO_TFM_REQ_MASK);
- crypto_ahash_set_flags(child, crypto_ahash_get_flags(parent) &
- CRYPTO_TFM_REQ_MASK);
- err = crypto_ahash_setkey(child, key, keylen);
- crypto_ahash_set_flags(parent, crypto_ahash_get_flags(child) &
- CRYPTO_TFM_RES_MASK);
- return err;
-}
-
-static int mcryptd_hash_enqueue(struct ahash_request *req,
- crypto_completion_t complete)
-{
- int ret;
-
- struct mcryptd_hash_request_ctx *rctx = ahash_request_ctx(req);
- struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
- struct mcryptd_queue *queue =
- mcryptd_get_queue(crypto_ahash_tfm(tfm));
-
- rctx->complete = req->base.complete;
- req->base.complete = complete;
-
- ret = mcryptd_enqueue_request(queue, &req->base, rctx);
-
- return ret;
-}
-
-static void mcryptd_hash_init(struct crypto_async_request *req_async, int err)
-{
- struct mcryptd_hash_ctx *ctx = crypto_tfm_ctx(req_async->tfm);
- struct crypto_ahash *child = ctx->child;
- struct ahash_request *req = ahash_request_cast(req_async);
- struct mcryptd_hash_request_ctx *rctx = ahash_request_ctx(req);
- struct ahash_request *desc = &rctx->areq;
-
- if (unlikely(err == -EINPROGRESS))
- goto out;
-
- ahash_request_set_tfm(desc, child);
- ahash_request_set_callback(desc, CRYPTO_TFM_REQ_MAY_SLEEP,
- rctx->complete, req_async);
-
- rctx->out = req->result;
- err = crypto_ahash_init(desc);
-
-out:
- local_bh_disable();
- rctx->complete(&req->base, err);
- local_bh_enable();
-}
-
-static int mcryptd_hash_init_enqueue(struct ahash_request *req)
-{
- return mcryptd_hash_enqueue(req, mcryptd_hash_init);
-}
-
-static void mcryptd_hash_update(struct crypto_async_request *req_async, int err)
-{
- struct ahash_request *req = ahash_request_cast(req_async);
- struct mcryptd_hash_request_ctx *rctx = ahash_request_ctx(req);
-
- if (unlikely(err == -EINPROGRESS))
- goto out;
-
- rctx->out = req->result;
- err = crypto_ahash_update(&rctx->areq);
- if (err) {
- req->base.complete = rctx->complete;
- goto out;
- }
-
- return;
-out:
- local_bh_disable();
- rctx->complete(&req->base, err);
- local_bh_enable();
-}
-
-static int mcryptd_hash_update_enqueue(struct ahash_request *req)
-{
- return mcryptd_hash_enqueue(req, mcryptd_hash_update);
-}
-
-static void mcryptd_hash_final(struct crypto_async_request *req_async, int err)
-{
- struct ahash_request *req = ahash_request_cast(req_async);
- struct mcryptd_hash_request_ctx *rctx = ahash_request_ctx(req);
-
- if (unlikely(err == -EINPROGRESS))
- goto out;
-
- rctx->out = req->result;
- err = crypto_ahash_final(&rctx->areq);
- if (err) {
- req->base.complete = rctx->complete;
- goto out;
- }
-
- return;
-out:
- local_bh_disable();
- rctx->complete(&req->base, err);
- local_bh_enable();
-}
-
-static int mcryptd_hash_final_enqueue(struct ahash_request *req)
-{
- return mcryptd_hash_enqueue(req, mcryptd_hash_final);
-}
-
-static void mcryptd_hash_finup(struct crypto_async_request *req_async, int err)
-{
- struct ahash_request *req = ahash_request_cast(req_async);
- struct mcryptd_hash_request_ctx *rctx = ahash_request_ctx(req);
-
- if (unlikely(err == -EINPROGRESS))
- goto out;
- rctx->out = req->result;
- err = crypto_ahash_finup(&rctx->areq);
-
- if (err) {
- req->base.complete = rctx->complete;
- goto out;
- }
-
- return;
-out:
- local_bh_disable();
- rctx->complete(&req->base, err);
- local_bh_enable();
-}
-
-static int mcryptd_hash_finup_enqueue(struct ahash_request *req)
-{
- return mcryptd_hash_enqueue(req, mcryptd_hash_finup);
-}
-
-static void mcryptd_hash_digest(struct crypto_async_request *req_async, int err)
-{
- struct mcryptd_hash_ctx *ctx = crypto_tfm_ctx(req_async->tfm);
- struct crypto_ahash *child = ctx->child;
- struct ahash_request *req = ahash_request_cast(req_async);
- struct mcryptd_hash_request_ctx *rctx = ahash_request_ctx(req);
- struct ahash_request *desc = &rctx->areq;
-
- if (unlikely(err == -EINPROGRESS))
- goto out;
-
- ahash_request_set_tfm(desc, child);
- ahash_request_set_callback(desc, CRYPTO_TFM_REQ_MAY_SLEEP,
- rctx->complete, req_async);
-
- rctx->out = req->result;
- err = crypto_ahash_init(desc) ?: crypto_ahash_finup(desc);
-
-out:
- local_bh_disable();
- rctx->complete(&req->base, err);
- local_bh_enable();
-}
-
-static int mcryptd_hash_digest_enqueue(struct ahash_request *req)
-{
- return mcryptd_hash_enqueue(req, mcryptd_hash_digest);
-}
-
-static int mcryptd_hash_export(struct ahash_request *req, void *out)
-{
- struct mcryptd_hash_request_ctx *rctx = ahash_request_ctx(req);
-
- return crypto_ahash_export(&rctx->areq, out);
-}
-
-static int mcryptd_hash_import(struct ahash_request *req, const void *in)
-{
- struct mcryptd_hash_request_ctx *rctx = ahash_request_ctx(req);
-
- return crypto_ahash_import(&rctx->areq, in);
-}
-
-static int mcryptd_create_hash(struct crypto_template *tmpl, struct rtattr **tb,
- struct mcryptd_queue *queue)
-{
- struct hashd_instance_ctx *ctx;
- struct ahash_instance *inst;
- struct hash_alg_common *halg;
- struct crypto_alg *alg;
- u32 type = 0;
- u32 mask = 0;
- int err;
-
- if (!mcryptd_check_internal(tb, &type, &mask))
- return -EINVAL;
-
- halg = ahash_attr_alg(tb[1], type, mask);
- if (IS_ERR(halg))
- return PTR_ERR(halg);
-
- alg = &halg->base;
- pr_debug("crypto: mcryptd hash alg: %s\n", alg->cra_name);
- inst = mcryptd_alloc_instance(alg, ahash_instance_headroom(),
- sizeof(*ctx));
- err = PTR_ERR(inst);
- if (IS_ERR(inst))
- goto out_put_alg;
-
- ctx = ahash_instance_ctx(inst);
- ctx->queue = queue;
-
- err = crypto_init_ahash_spawn(&ctx->spawn, halg,
- ahash_crypto_instance(inst));
- if (err)
- goto out_free_inst;
-
- inst->alg.halg.base.cra_flags = CRYPTO_ALG_ASYNC |
- (alg->cra_flags & (CRYPTO_ALG_INTERNAL |
- CRYPTO_ALG_OPTIONAL_KEY));
-
- inst->alg.halg.digestsize = halg->digestsize;
- inst->alg.halg.statesize = halg->statesize;
- inst->alg.halg.base.cra_ctxsize = sizeof(struct mcryptd_hash_ctx);
-
- inst->alg.halg.base.cra_init = mcryptd_hash_init_tfm;
- inst->alg.halg.base.cra_exit = mcryptd_hash_exit_tfm;
-
- inst->alg.init = mcryptd_hash_init_enqueue;
- inst->alg.update = mcryptd_hash_update_enqueue;
- inst->alg.final = mcryptd_hash_final_enqueue;
- inst->alg.finup = mcryptd_hash_finup_enqueue;
- inst->alg.export = mcryptd_hash_export;
- inst->alg.import = mcryptd_hash_import;
- if (crypto_hash_alg_has_setkey(halg))
- inst->alg.setkey = mcryptd_hash_setkey;
- inst->alg.digest = mcryptd_hash_digest_enqueue;
-
- err = ahash_register_instance(tmpl, inst);
- if (err) {
- crypto_drop_ahash(&ctx->spawn);
-out_free_inst:
- kfree(inst);
- }
-
-out_put_alg:
- crypto_mod_put(alg);
- return err;
-}
-
-static struct mcryptd_queue mqueue;
-
-static int mcryptd_create(struct crypto_template *tmpl, struct rtattr **tb)
-{
- struct crypto_attr_type *algt;
-
- algt = crypto_get_attr_type(tb);
- if (IS_ERR(algt))
- return PTR_ERR(algt);
-
- switch (algt->type & algt->mask & CRYPTO_ALG_TYPE_MASK) {
- case CRYPTO_ALG_TYPE_DIGEST:
- return mcryptd_create_hash(tmpl, tb, &mqueue);
- break;
- }
-
- return -EINVAL;
-}
-
-static void mcryptd_free(struct crypto_instance *inst)
-{
- struct mcryptd_instance_ctx *ctx = crypto_instance_ctx(inst);
- struct hashd_instance_ctx *hctx = crypto_instance_ctx(inst);
-
- switch (inst->alg.cra_flags & CRYPTO_ALG_TYPE_MASK) {
- case CRYPTO_ALG_TYPE_AHASH:
- crypto_drop_ahash(&hctx->spawn);
- kfree(ahash_instance(inst));
- return;
- default:
- crypto_drop_spawn(&ctx->spawn);
- kfree(inst);
- }
-}
-
-static struct crypto_template mcryptd_tmpl = {
- .name = "mcryptd",
- .create = mcryptd_create,
- .free = mcryptd_free,
- .module = THIS_MODULE,
-};
-
-struct mcryptd_ahash *mcryptd_alloc_ahash(const char *alg_name,
- u32 type, u32 mask)
-{
- char mcryptd_alg_name[CRYPTO_MAX_ALG_NAME];
- struct crypto_ahash *tfm;
-
- if (snprintf(mcryptd_alg_name, CRYPTO_MAX_ALG_NAME,
- "mcryptd(%s)", alg_name) >= CRYPTO_MAX_ALG_NAME)
- return ERR_PTR(-EINVAL);
- tfm = crypto_alloc_ahash(mcryptd_alg_name, type, mask);
- if (IS_ERR(tfm))
- return ERR_CAST(tfm);
- if (tfm->base.__crt_alg->cra_module != THIS_MODULE) {
- crypto_free_ahash(tfm);
- return ERR_PTR(-EINVAL);
- }
-
- return __mcryptd_ahash_cast(tfm);
-}
-EXPORT_SYMBOL_GPL(mcryptd_alloc_ahash);
-
-struct crypto_ahash *mcryptd_ahash_child(struct mcryptd_ahash *tfm)
-{
- struct mcryptd_hash_ctx *ctx = crypto_ahash_ctx(&tfm->base);
-
- return ctx->child;
-}
-EXPORT_SYMBOL_GPL(mcryptd_ahash_child);
-
-struct ahash_request *mcryptd_ahash_desc(struct ahash_request *req)
-{
- struct mcryptd_hash_request_ctx *rctx = ahash_request_ctx(req);
- return &rctx->areq;
-}
-EXPORT_SYMBOL_GPL(mcryptd_ahash_desc);
-
-void mcryptd_free_ahash(struct mcryptd_ahash *tfm)
-{
- crypto_free_ahash(&tfm->base);
-}
-EXPORT_SYMBOL_GPL(mcryptd_free_ahash);
-
-static int __init mcryptd_init(void)
-{
- int err, cpu;
- struct mcryptd_flush_list *flist;
-
- mcryptd_flist = alloc_percpu(struct mcryptd_flush_list);
- for_each_possible_cpu(cpu) {
- flist = per_cpu_ptr(mcryptd_flist, cpu);
- INIT_LIST_HEAD(&flist->list);
- mutex_init(&flist->lock);
- }
-
- err = mcryptd_init_queue(&mqueue, MCRYPTD_MAX_CPU_QLEN);
- if (err) {
- free_percpu(mcryptd_flist);
- return err;
- }
-
- err = crypto_register_template(&mcryptd_tmpl);
- if (err) {
- mcryptd_fini_queue(&mqueue);
- free_percpu(mcryptd_flist);
- }
-
- return err;
-}
-
-static void __exit mcryptd_exit(void)
-{
- mcryptd_fini_queue(&mqueue);
- crypto_unregister_template(&mcryptd_tmpl);
- free_percpu(mcryptd_flist);
-}
-
-subsys_initcall(mcryptd_init);
-module_exit(mcryptd_exit);
-
-MODULE_LICENSE("GPL");
-MODULE_DESCRIPTION("Software async multibuffer crypto daemon");
-MODULE_ALIAS_CRYPTO("mcryptd");
diff --git a/include/crypto/mcryptd.h b/include/crypto/mcryptd.h
deleted file mode 100644
index b67404f..00000000
--- a/include/crypto/mcryptd.h
+++ /dev/null
@@ -1,114 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0 */
-/*
- * Software async multibuffer crypto daemon headers
- *
- * Author:
- * Tim Chen <tim.c.chen(a)linux.intel.com>
- *
- * Copyright (c) 2014, Intel Corporation.
- */
-
-#ifndef _CRYPTO_MCRYPT_H
-#define _CRYPTO_MCRYPT_H
-
-#include <linux/crypto.h>
-#include <linux/kernel.h>
-#include <crypto/hash.h>
-
-struct mcryptd_ahash {
- struct crypto_ahash base;
-};
-
-static inline struct mcryptd_ahash *__mcryptd_ahash_cast(
- struct crypto_ahash *tfm)
-{
- return (struct mcryptd_ahash *)tfm;
-}
-
-struct mcryptd_cpu_queue {
- struct crypto_queue queue;
- spinlock_t q_lock;
- struct work_struct work;
-};
-
-struct mcryptd_queue {
- struct mcryptd_cpu_queue __percpu *cpu_queue;
-};
-
-struct mcryptd_instance_ctx {
- struct crypto_spawn spawn;
- struct mcryptd_queue *queue;
-};
-
-struct mcryptd_hash_ctx {
- struct crypto_ahash *child;
- struct mcryptd_alg_state *alg_state;
-};
-
-struct mcryptd_tag {
- /* seq number of request */
- unsigned seq_num;
- /* arrival time of request */
- unsigned long arrival;
- unsigned long expire;
- int cpu;
-};
-
-struct mcryptd_hash_request_ctx {
- struct list_head waiter;
- crypto_completion_t complete;
- struct mcryptd_tag tag;
- struct crypto_hash_walk walk;
- u8 *out;
- int flag;
- struct ahash_request areq;
-};
-
-struct mcryptd_ahash *mcryptd_alloc_ahash(const char *alg_name,
- u32 type, u32 mask);
-struct crypto_ahash *mcryptd_ahash_child(struct mcryptd_ahash *tfm);
-struct ahash_request *mcryptd_ahash_desc(struct ahash_request *req);
-void mcryptd_free_ahash(struct mcryptd_ahash *tfm);
-void mcryptd_flusher(struct work_struct *work);
-
-enum mcryptd_req_type {
- MCRYPTD_NONE,
- MCRYPTD_UPDATE,
- MCRYPTD_FINUP,
- MCRYPTD_DIGEST,
- MCRYPTD_FINAL
-};
-
-struct mcryptd_alg_cstate {
- unsigned long next_flush;
- unsigned next_seq_num;
- bool flusher_engaged;
- struct delayed_work flush;
- int cpu;
- struct mcryptd_alg_state *alg_state;
- void *mgr;
- spinlock_t work_lock;
- struct list_head work_list;
- struct list_head flush_list;
-};
-
-struct mcryptd_alg_state {
- struct mcryptd_alg_cstate __percpu *alg_cstate;
- unsigned long (*flusher)(struct mcryptd_alg_cstate *cstate);
-};
-
-/* return delay in jiffies from current time */
-static inline unsigned long get_delay(unsigned long t)
-{
- long delay;
-
- delay = (long) t - (long) jiffies;
- if (delay <= 0)
- return 0;
- else
- return (unsigned long) delay;
-}
-
-void mcryptd_arm_flusher(struct mcryptd_alg_cstate *cstate, unsigned long delay);
-
-#endif
--
1.8.3
1
1

[PATCH 01/55] ASoC: pcm: update FE/BE trigger order based on the command
by Yang Yingliang 16 Apr '20
by Yang Yingliang 16 Apr '20
16 Apr '20
From: Ranjani Sridharan <ranjani.sridharan(a)linux.intel.com>
[ Upstream commit acbf27746ecfa96b290b54cc7f05273482ea128a ]
Currently, the trigger orders SND_SOC_DPCM_TRIGGER_PRE/POST
determine the order in which FE DAI and BE DAI are triggered.
In the case of SND_SOC_DPCM_TRIGGER_PRE, the FE DAI is
triggered before the BE DAI and in the case of
SND_SOC_DPCM_TRIGGER_POST, the BE DAI is triggered before
the FE DAI. And this order remains the same irrespective of the
trigger command.
In the case of the SOF driver, during playback, the FW
expects the BE DAI to be triggered before the FE DAI during
the START trigger. The BE DAI trigger handles the starting of
Link DMA and so it must be started before the FE DAI is started
to prevent xruns during pause/release. This can be addressed
by setting the trigger order for the FE dai link to
SND_SOC_DPCM_TRIGGER_POST. But during the STOP trigger,
the FW expects the FE DAI to be triggered before the BE DAI.
Retaining the same order during the START and STOP commands,
results in FW error as the DAI component in the FW is still
active.
The issue can be fixed by mirroring the trigger order of
FE and BE DAI's during the START and STOP trigger. So, with the
trigger order set to SND_SOC_DPCM_TRIGGER_PRE, the FE DAI will be
trigger first during SNDRV_PCM_TRIGGER_START/STOP/RESUME
and the BE DAI will be triggered first during the
STOP/SUSPEND/PAUSE commands. Conversely, with the trigger order
set to SND_SOC_DPCM_TRIGGER_POST, the BE DAI will be triggered
first during the SNDRV_PCM_TRIGGER_START/STOP/RESUME commands
and the FE DAI will be triggered first during the
SNDRV_PCM_TRIGGER_STOP/SUSPEND/PAUSE commands.
Signed-off-by: Ranjani Sridharan <ranjani.sridharan(a)linux.intel.com>
Signed-off-by: Pierre-Louis Bossart <pierre-louis.bossart(a)linux.intel.com>
Link: https://lore.kernel.org/r/20191104224812.3393-2-ranjani.sridharan@linux.int…
Signed-off-by: Mark Brown <broonie(a)kernel.org>
Signed-off-by: Sasha Levin <sashal(a)kernel.org>
Signed-off-by: Yang Yingliang <yangyingliang(a)huawei.com>
---
sound/soc/soc-pcm.c | 95 ++++++++++++++++++++++++++++++++++++++---------------
1 file changed, 68 insertions(+), 27 deletions(-)
diff --git a/sound/soc/soc-pcm.c b/sound/soc/soc-pcm.c
index 53fefa7..f7d4a77 100644
--- a/sound/soc/soc-pcm.c
+++ b/sound/soc/soc-pcm.c
@@ -2341,42 +2341,81 @@ int dpcm_be_dai_trigger(struct snd_soc_pcm_runtime *fe, int stream,
}
EXPORT_SYMBOL_GPL(dpcm_be_dai_trigger);
+static int dpcm_dai_trigger_fe_be(struct snd_pcm_substream *substream,
+ int cmd, bool fe_first)
+{
+ struct snd_soc_pcm_runtime *fe = substream->private_data;
+ int ret;
+
+ /* call trigger on the frontend before the backend. */
+ if (fe_first) {
+ dev_dbg(fe->dev, "ASoC: pre trigger FE %s cmd %d\n",
+ fe->dai_link->name, cmd);
+
+ ret = soc_pcm_trigger(substream, cmd);
+ if (ret < 0)
+ return ret;
+
+ ret = dpcm_be_dai_trigger(fe, substream->stream, cmd);
+ return ret;
+ }
+
+ /* call trigger on the frontend after the backend. */
+ ret = dpcm_be_dai_trigger(fe, substream->stream, cmd);
+ if (ret < 0)
+ return ret;
+
+ dev_dbg(fe->dev, "ASoC: post trigger FE %s cmd %d\n",
+ fe->dai_link->name, cmd);
+
+ ret = soc_pcm_trigger(substream, cmd);
+
+ return ret;
+}
+
static int dpcm_fe_dai_do_trigger(struct snd_pcm_substream *substream, int cmd)
{
struct snd_soc_pcm_runtime *fe = substream->private_data;
- int stream = substream->stream, ret;
+ int stream = substream->stream;
+ int ret = 0;
enum snd_soc_dpcm_trigger trigger = fe->dai_link->trigger[stream];
fe->dpcm[stream].runtime_update = SND_SOC_DPCM_UPDATE_FE;
switch (trigger) {
case SND_SOC_DPCM_TRIGGER_PRE:
- /* call trigger on the frontend before the backend. */
-
- dev_dbg(fe->dev, "ASoC: pre trigger FE %s cmd %d\n",
- fe->dai_link->name, cmd);
-
- ret = soc_pcm_trigger(substream, cmd);
- if (ret < 0) {
- dev_err(fe->dev,"ASoC: trigger FE failed %d\n", ret);
- goto out;
+ switch (cmd) {
+ case SNDRV_PCM_TRIGGER_START:
+ case SNDRV_PCM_TRIGGER_RESUME:
+ case SNDRV_PCM_TRIGGER_PAUSE_RELEASE:
+ ret = dpcm_dai_trigger_fe_be(substream, cmd, true);
+ break;
+ case SNDRV_PCM_TRIGGER_STOP:
+ case SNDRV_PCM_TRIGGER_SUSPEND:
+ case SNDRV_PCM_TRIGGER_PAUSE_PUSH:
+ ret = dpcm_dai_trigger_fe_be(substream, cmd, false);
+ break;
+ default:
+ ret = -EINVAL;
+ break;
}
-
- ret = dpcm_be_dai_trigger(fe, substream->stream, cmd);
break;
case SND_SOC_DPCM_TRIGGER_POST:
- /* call trigger on the frontend after the backend. */
-
- ret = dpcm_be_dai_trigger(fe, substream->stream, cmd);
- if (ret < 0) {
- dev_err(fe->dev,"ASoC: trigger FE failed %d\n", ret);
- goto out;
+ switch (cmd) {
+ case SNDRV_PCM_TRIGGER_START:
+ case SNDRV_PCM_TRIGGER_RESUME:
+ case SNDRV_PCM_TRIGGER_PAUSE_RELEASE:
+ ret = dpcm_dai_trigger_fe_be(substream, cmd, false);
+ break;
+ case SNDRV_PCM_TRIGGER_STOP:
+ case SNDRV_PCM_TRIGGER_SUSPEND:
+ case SNDRV_PCM_TRIGGER_PAUSE_PUSH:
+ ret = dpcm_dai_trigger_fe_be(substream, cmd, true);
+ break;
+ default:
+ ret = -EINVAL;
+ break;
}
-
- dev_dbg(fe->dev, "ASoC: post trigger FE %s cmd %d\n",
- fe->dai_link->name, cmd);
-
- ret = soc_pcm_trigger(substream, cmd);
break;
case SND_SOC_DPCM_TRIGGER_BESPOKE:
/* bespoke trigger() - handles both FE and BEs */
@@ -2385,10 +2424,6 @@ static int dpcm_fe_dai_do_trigger(struct snd_pcm_substream *substream, int cmd)
fe->dai_link->name, cmd);
ret = soc_pcm_bespoke_trigger(substream, cmd);
- if (ret < 0) {
- dev_err(fe->dev,"ASoC: trigger FE failed %d\n", ret);
- goto out;
- }
break;
default:
dev_err(fe->dev, "ASoC: invalid trigger cmd %d for %s\n", cmd,
@@ -2397,6 +2432,12 @@ static int dpcm_fe_dai_do_trigger(struct snd_pcm_substream *substream, int cmd)
goto out;
}
+ if (ret < 0) {
+ dev_err(fe->dev, "ASoC: trigger FE cmd: %d failed: %d\n",
+ cmd, ret);
+ goto out;
+ }
+
switch (cmd) {
case SNDRV_PCM_TRIGGER_START:
case SNDRV_PCM_TRIGGER_RESUME:
--
1.8.3
1
54

[PATCH 001/195] Revert "drm/sun4i: dsi: Change the start delay calculation"
by Yang Yingliang 16 Apr '20
by Yang Yingliang 16 Apr '20
16 Apr '20
From: Icenowy Zheng <icenowy(a)aosc.io>
[ Upstream commit a00d17e0a71ae2e4fdaac46e1c12785d3346c3f2 ]
This reverts commit da676c6aa6413d59ab0a80c97bbc273025e640b2.
The original commit adds a start parameter to the calculation of the
start delay according to some old BSP versions from Allwinner. However,
there're two ways to add this delay -- add it in DSI controller or add
it in the TCON. Add it in both controllers won't work.
The code before this commit is picked from new versions of BSP kernel,
which has a comment for the 1 that says "put start_delay to tcon". By
checking the sun4i_tcon0_mode_set_cpu() in sun4i_tcon driver, it has
already added this delay, so we shouldn't repeat to add the delay in DSI
controller, otherwise the timing won't match.
Signed-off-by: Icenowy Zheng <icenowy(a)aosc.io>
Reviewed-by: Jagan Teki <jagan(a)amarulasolutions.com>
Signed-off-by: Maxime Ripard <mripard(a)kernel.org>
Link: https://patchwork.freedesktop.org/patch/msgid/20191001080253.6135-2-icenowy…
Signed-off-by: Sasha Levin <sashal(a)kernel.org>
Signed-off-by: Yang Yingliang <yangyingliang(a)huawei.com>
---
drivers/gpu/drm/sun4i/sun6i_mipi_dsi.c | 3 +--
1 file changed, 1 insertion(+), 2 deletions(-)
diff --git a/drivers/gpu/drm/sun4i/sun6i_mipi_dsi.c b/drivers/gpu/drm/sun4i/sun6i_mipi_dsi.c
index 97a0573..79eb11c 100644
--- a/drivers/gpu/drm/sun4i/sun6i_mipi_dsi.c
+++ b/drivers/gpu/drm/sun4i/sun6i_mipi_dsi.c
@@ -357,8 +357,7 @@ static void sun6i_dsi_inst_init(struct sun6i_dsi *dsi,
static u16 sun6i_dsi_get_video_start_delay(struct sun6i_dsi *dsi,
struct drm_display_mode *mode)
{
- u16 start = clamp(mode->vtotal - mode->vdisplay - 10, 8, 100);
- u16 delay = mode->vtotal - (mode->vsync_end - mode->vdisplay) + start;
+ u16 delay = mode->vtotal - (mode->vsync_end - mode->vdisplay) + 1;
if (delay > mode->vtotal)
delay = delay % mode->vtotal;
--
1.8.3
1
194
From: Al Viro <viro(a)zeniv.linux.org.uk>
commit 6404674acd596de41fd3ad5f267b4525494a891a upstream.
Brown paperbag time: fetching ->i_uid/->i_mode really should've been
done from nd->inode. I even suggested that, but the reason for that has
slipped through the cracks and I went for dir->d_inode instead - made
for more "obvious" patch.
Analysis:
- at the entry into do_last() and all the way to step_into(): dir (aka
nd->path.dentry) is known not to have been freed; so's nd->inode and
it's equal to dir->d_inode unless we are already doomed to -ECHILD.
inode of the file to get opened is not known.
- after step_into(): inode of the file to get opened is known; dir
might be pointing to freed memory/be negative/etc.
- at the call of may_create_in_sticky(): guaranteed to be out of RCU
mode; inode of the file to get opened is known and pinned; dir might
be garbage.
The last was the reason for the original patch. Except that at the
do_last() entry we can be in RCU mode and it is possible that
nd->path.dentry->d_inode has already changed under us.
In that case we are going to fail with -ECHILD, but we need to be
careful; nd->inode is pointing to valid struct inode and it's the same
as nd->path.dentry->d_inode in "won't fail with -ECHILD" case, so we
should use that.
Reported-by: "Rantala, Tommi T. (Nokia - FI/Espoo)" <tommi.t.rantala(a)nokia.com>
Reported-by: syzbot+190005201ced78a74ad6(a)syzkaller.appspotmail.com
Wearing-brown-paperbag: Al Viro <viro(a)zeniv.linux.org.uk>
Cc: stable(a)kernel.org
Fixes: d0cb50185ae9 ("do_last(): fetch directory ->i_mode and ->i_uid before it's too late")
Signed-off-by: Al Viro <viro(a)zeniv.linux.org.uk>
Signed-off-by: Linus Torvalds <torvalds(a)linux-foundation.org>
Signed-off-by: Greg Kroah-Hartman <gregkh(a)linuxfoundation.org>
Signed-off-by: Yang Yingliang <yangyingliang(a)huawei.com>
---
fs/namei.c | 4 ++--
1 file changed, 2 insertions(+), 2 deletions(-)
diff --git a/fs/namei.c b/fs/namei.c
index 1dd68b3..18ddae1 100644
--- a/fs/namei.c
+++ b/fs/namei.c
@@ -3266,8 +3266,8 @@ static int do_last(struct nameidata *nd,
struct file *file, const struct open_flags *op)
{
struct dentry *dir = nd->path.dentry;
- kuid_t dir_uid = dir->d_inode->i_uid;
- umode_t dir_mode = dir->d_inode->i_mode;
+ kuid_t dir_uid = nd->inode->i_uid;
+ umode_t dir_mode = nd->inode->i_mode;
int open_flag = op->open_flag;
bool will_truncate = (open_flag & O_TRUNC) != 0;
bool got_write = false;
--
1.8.3
1
71

[PATCH 001/137] can, slip: Protect tty->disc_data in write_wakeup and close with RCU
by Yang Yingliang 16 Apr '20
by Yang Yingliang 16 Apr '20
16 Apr '20
From: Richard Palethorpe <rpalethorpe(a)suse.com>
[ Upstream commit 0ace17d56824165c7f4c68785d6b58971db954dd ]
write_wakeup can happen in parallel with close/hangup where tty->disc_data
is set to NULL and the netdevice is freed thus also freeing
disc_data. write_wakeup accesses disc_data so we must prevent close from
freeing the netdev while write_wakeup has a non-NULL view of
tty->disc_data.
We also need to make sure that accesses to disc_data are atomic. Which can
all be done with RCU.
This problem was found by Syzkaller on SLCAN, but the same issue is
reproducible with the SLIP line discipline using an LTP test based on the
Syzkaller reproducer.
A fix which didn't use RCU was posted by Hillf Danton.
Fixes: 661f7fda21b1 ("slip: Fix deadlock in write_wakeup")
Fixes: a8e83b17536a ("slcan: Port write_wakeup deadlock fix from slip")
Reported-by: syzbot+017e491ae13c0068598a(a)syzkaller.appspotmail.com
Signed-off-by: Richard Palethorpe <rpalethorpe(a)suse.com>
Cc: Wolfgang Grandegger <wg(a)grandegger.com>
Cc: Marc Kleine-Budde <mkl(a)pengutronix.de>
Cc: "David S. Miller" <davem(a)davemloft.net>
Cc: Tyler Hall <tylerwhall(a)gmail.com>
Cc: linux-can(a)vger.kernel.org
Cc: netdev(a)vger.kernel.org
Cc: linux-kernel(a)vger.kernel.org
Cc: syzkaller(a)googlegroups.com
Signed-off-by: David S. Miller <davem(a)davemloft.net>
Signed-off-by: Greg Kroah-Hartman <gregkh(a)linuxfoundation.org>
Signed-off-by: Yang Yingliang <yangyingliang(a)huawei.com>
---
drivers/net/can/slcan.c | 12 ++++++++++--
drivers/net/slip/slip.c | 12 ++++++++++--
2 files changed, 20 insertions(+), 4 deletions(-)
diff --git a/drivers/net/can/slcan.c b/drivers/net/can/slcan.c
index cf0769a..b2e5bca 100644
--- a/drivers/net/can/slcan.c
+++ b/drivers/net/can/slcan.c
@@ -343,9 +343,16 @@ static void slcan_transmit(struct work_struct *work)
*/
static void slcan_write_wakeup(struct tty_struct *tty)
{
- struct slcan *sl = tty->disc_data;
+ struct slcan *sl;
+
+ rcu_read_lock();
+ sl = rcu_dereference(tty->disc_data);
+ if (!sl)
+ goto out;
schedule_work(&sl->tx_work);
+out:
+ rcu_read_unlock();
}
/* Send a can_frame to a TTY queue. */
@@ -640,10 +647,11 @@ static void slcan_close(struct tty_struct *tty)
return;
spin_lock_bh(&sl->lock);
- tty->disc_data = NULL;
+ rcu_assign_pointer(tty->disc_data, NULL);
sl->tty = NULL;
spin_unlock_bh(&sl->lock);
+ synchronize_rcu();
flush_work(&sl->tx_work);
/* Flush network side */
diff --git a/drivers/net/slip/slip.c b/drivers/net/slip/slip.c
index 77207f9..93f303e 100644
--- a/drivers/net/slip/slip.c
+++ b/drivers/net/slip/slip.c
@@ -452,9 +452,16 @@ static void slip_transmit(struct work_struct *work)
*/
static void slip_write_wakeup(struct tty_struct *tty)
{
- struct slip *sl = tty->disc_data;
+ struct slip *sl;
+
+ rcu_read_lock();
+ sl = rcu_dereference(tty->disc_data);
+ if (!sl)
+ goto out;
schedule_work(&sl->tx_work);
+out:
+ rcu_read_unlock();
}
static void sl_tx_timeout(struct net_device *dev)
@@ -882,10 +889,11 @@ static void slip_close(struct tty_struct *tty)
return;
spin_lock_bh(&sl->lock);
- tty->disc_data = NULL;
+ rcu_assign_pointer(tty->disc_data, NULL);
sl->tty = NULL;
spin_unlock_bh(&sl->lock);
+ synchronize_rcu();
flush_work(&sl->tx_work);
/* VSV = very important to remove timers */
--
1.8.3
1
136

[PATCH 1/3] mm/memory_hotplug: simplify and fix check_hotplug_memory_range()
by Yang Yingliang 16 Apr '20
by Yang Yingliang 16 Apr '20
16 Apr '20
From: David Hildenbrand <david(a)redhat.com>
mainline inclusion
from mainline-5.3-rc1
commit cec3ebd083d4e8d161d0b18894c78e3311bcd026
category: bugfix
bugzilla: 29418
CVE: NA
-------------------------------------------------
Patch series "mm/memory_hotplug: Factor out memory block devicehandling", v3.
We only want memory block devices for memory to be onlined/offlined
(add/remove from the buddy). This is required so user space can
online/offline memory and kdump gets notified about newly onlined
memory.
Let's factor out creation/removal of memory block devices. This helps
to further cleanup arch_add_memory/arch_remove_memory() and to make
implementation of new features easier - especially sub-section memory
hot add from Dan.
Anshuman Khandual is currently working on arch_remove_memory(). I added
a temporary solution via "arm64/mm: Add temporary arch_remove_memory()
implementation", that is sufficient as a firsts tep in the context of
this series. (we don't cleanup page tables in case anything goes wrong
already)
Did a quick sanity test with DIMM plug/unplug, making sure all devices
and sysfs links properly get added/removed. Compile tested on s390x and
x86-64.
This patch (of 11):
By converting start and size to page granularity, we actually ignore
unaligned parts within a page instead of properly bailing out with an
error.
Link: http://lkml.kernel.org/r/20190527111152.16324-2-david@redhat.com
Signed-off-by: David Hildenbrand <david(a)redhat.com>
Reviewed-by: Dan Williams <dan.j.williams(a)intel.com>
Reviewed-by: Wei Yang <richardw.yang(a)linux.intel.com>
Reviewed-by: Pavel Tatashin <pasha.tatashin(a)soleen.com>
Reviewed-by: Oscar Salvador <osalvador(a)suse.de>
Acked-by: Michal Hocko <mhocko(a)suse.com>
Cc: David Hildenbrand <david(a)redhat.com>
Cc: Qian Cai <cai(a)lca.pw>
Cc: Arun KS <arunks(a)codeaurora.org>
Cc: Mathieu Malaterre <malat(a)debian.org>
Cc: Alex Deucher <alexander.deucher(a)amd.com>
Cc: Andrew Banman <andrew.banman(a)hpe.com>
Cc: Andy Lutomirski <luto(a)kernel.org>
Cc: Anshuman Khandual <anshuman.khandual(a)arm.com>
Cc: Ard Biesheuvel <ard.biesheuvel(a)linaro.org>
Cc: Baoquan He <bhe(a)redhat.com>
Cc: Benjamin Herrenschmidt <benh(a)kernel.crashing.org>
Cc: Borislav Petkov <bp(a)alien8.de>
Cc: Catalin Marinas <catalin.marinas(a)arm.com>
Cc: Chintan Pandya <cpandya(a)codeaurora.org>
Cc: Christophe Leroy <christophe.leroy(a)c-s.fr>
Cc: Chris Wilson <chris(a)chris-wilson.co.uk>
Cc: Dave Hansen <dave.hansen(a)linux.intel.com>
Cc: "David S. Miller" <davem(a)davemloft.net>
Cc: Fenghua Yu <fenghua.yu(a)intel.com>
Cc: Greg Kroah-Hartman <gregkh(a)linuxfoundation.org>
Cc: Heiko Carstens <heiko.carstens(a)de.ibm.com>
Cc: "H. Peter Anvin" <hpa(a)zytor.com>
Cc: Ingo Molnar <mingo(a)kernel.org>
Cc: Jonathan Cameron <Jonathan.Cameron(a)huawei.com>
Cc: Joonsoo Kim <iamjoonsoo.kim(a)lge.com>
Cc: Jun Yao <yaojun8558363(a)gmail.com>
Cc: "Kirill A. Shutemov" <kirill.shutemov(a)linux.intel.com>
Cc: Logan Gunthorpe <logang(a)deltatee.com>
Cc: Mark Brown <broonie(a)kernel.org>
Cc: Mark Rutland <mark.rutland(a)arm.com>
Cc: Masahiro Yamada <yamada.masahiro(a)socionext.com>
Cc: Michael Ellerman <mpe(a)ellerman.id.au>
Cc: Mike Rapoport <rppt(a)linux.vnet.ibm.com>
Cc: "mike.travis(a)hpe.com" <mike.travis(a)hpe.com>
Cc: Nicholas Piggin <npiggin(a)gmail.com>
Cc: Paul Mackerras <paulus(a)samba.org>
Cc: Peter Zijlstra <peterz(a)infradead.org>
Cc: "Rafael J. Wysocki" <rafael(a)kernel.org>
Cc: Rich Felker <dalias(a)libc.org>
Cc: Rob Herring <robh(a)kernel.org>
Cc: Robin Murphy <robin.murphy(a)arm.com>
Cc: Thomas Gleixner <tglx(a)linutronix.de>
Cc: Tony Luck <tony.luck(a)intel.com>
Cc: Vasily Gorbik <gor(a)linux.ibm.com>
Cc: Will Deacon <will.deacon(a)arm.com>
Cc: Yoshinori Sato <ysato(a)users.sourceforge.jp>
Cc: Yu Zhao <yuzhao(a)google.com>
Signed-off-by: Andrew Morton <akpm(a)linux-foundation.org>
Signed-off-by: Linus Torvalds <torvalds(a)linux-foundation.org>
Signed-off-by: Yang Yingliang <yangyingliang(a)huawei.com>
Reviewed-by: Kefeng Wang <wangkefeng.wang(a)huawei.com>
Signed-off-by: Yang Yingliang <yangyingliang(a)huawei.com>
---
mm/memory_hotplug.c | 11 +++--------
1 file changed, 3 insertions(+), 8 deletions(-)
diff --git a/mm/memory_hotplug.c b/mm/memory_hotplug.c
index 8a6ad9b..bfd148d 100644
--- a/mm/memory_hotplug.c
+++ b/mm/memory_hotplug.c
@@ -1059,16 +1059,11 @@ int try_online_node(int nid)
static int check_hotplug_memory_range(u64 start, u64 size)
{
- unsigned long block_sz = memory_block_size_bytes();
- u64 block_nr_pages = block_sz >> PAGE_SHIFT;
- u64 nr_pages = size >> PAGE_SHIFT;
- u64 start_pfn = PFN_DOWN(start);
-
/* memory range must be block size aligned */
- if (!nr_pages || !IS_ALIGNED(start_pfn, block_nr_pages) ||
- !IS_ALIGNED(nr_pages, block_nr_pages)) {
+ if (!size || !IS_ALIGNED(start, memory_block_size_bytes()) ||
+ !IS_ALIGNED(size, memory_block_size_bytes())) {
pr_err("Block size [%#lx] unaligned hotplug range: start %#llx, size %#llx",
- block_sz, start, size);
+ memory_block_size_bytes(), start, size);
return -EINVAL;
}
--
1.8.3
1
2