Kernel
Threads by month
- ----- 2025 -----
- February
- January
- ----- 2024 -----
- December
- November
- October
- September
- August
- July
- June
- May
- April
- March
- February
- January
- ----- 2023 -----
- December
- November
- October
- September
- August
- July
- June
- May
- April
- March
- February
- January
- ----- 2022 -----
- December
- November
- October
- September
- August
- July
- June
- May
- April
- March
- February
- January
- ----- 2021 -----
- December
- November
- October
- September
- August
- July
- June
- May
- April
- March
- February
- January
- ----- 2020 -----
- December
- November
- October
- September
- August
- July
- June
- May
- April
- March
- February
- January
- ----- 2019 -----
- December
April 2020
- 22 participants
- 74 discussions
From: Yu'an Wang <wangyuan46(a)huawei.com>
driver inclusion
category: feature
bugzilla: NA
CVE: NA
In this patch, we try to add dfx for io operation, including send/
recv/send_fail/send_busy. We also can define overtime_threshold to
judge timeout task.
Signed-off-by: Yu'an Wang <wangyuan46(a)huawei.com>
Reviewed-by: Mingqiang Ling <lingmingqiang(a)huawei.com>
Reviewed-by: Guangwei Zhou <zhouguangwei5(a)huawei.com>
Reviewed-by: Ye Kai <yekai13(a)huawei.com>
Signed-off-by: Yang Yingliang <yangyingliang(a)huawei.com>
---
drivers/crypto/hisilicon/hpre/hpre.h | 16 ++++++
drivers/crypto/hisilicon/hpre/hpre_crypto.c | 89 ++++++++++++++++++++++++-----
drivers/crypto/hisilicon/hpre/hpre_main.c | 55 ++++++++++++++++++
3 files changed, 146 insertions(+), 14 deletions(-)
diff --git a/drivers/crypto/hisilicon/hpre/hpre.h b/drivers/crypto/hisilicon/hpre/hpre.h
index 42b2f2a..203eb2a 100644
--- a/drivers/crypto/hisilicon/hpre/hpre.h
+++ b/drivers/crypto/hisilicon/hpre/hpre.h
@@ -25,6 +25,16 @@ enum hpre_ctrl_dbgfs_file {
HPRE_DEBUG_FILE_NUM,
};
+enum hpre_dfx_dbgfs_file {
+ HPRE_SEND_CNT,
+ HPRE_RECV_CNT,
+ HPRE_SEND_FAIL_CNT,
+ HPRE_SEND_BUSY_CNT,
+ HPRE_OVER_THRHLD_CNT,
+ HPRE_OVERTIME_THRHLD,
+ HPRE_DFX_FILE_NUM
+};
+
#define HPRE_DEBUGFS_FILE_NUM (HPRE_DEBUG_FILE_NUM + HPRE_CLUSTERS_NUM - 1)
struct hpre_debugfs_file {
@@ -34,12 +44,18 @@ struct hpre_debugfs_file {
struct hpre_debug *debug;
};
+struct hpre_dfx {
+ atomic64_t value;
+ enum hpre_dfx_dbgfs_file type;
+};
+
/*
* One HPRE controller has one PF and multiple VFs, some global configurations
* which PF has need this structure.
* Just relevant for PF.
*/
struct hpre_debug {
+ struct hpre_dfx dfx[HPRE_DFX_FILE_NUM];
struct hpre_debugfs_file files[HPRE_DEBUGFS_FILE_NUM];
};
diff --git a/drivers/crypto/hisilicon/hpre/hpre_crypto.c b/drivers/crypto/hisilicon/hpre/hpre_crypto.c
index 7610e13..b68b30c 100644
--- a/drivers/crypto/hisilicon/hpre/hpre_crypto.c
+++ b/drivers/crypto/hisilicon/hpre/hpre_crypto.c
@@ -10,6 +10,7 @@
#include <linux/dma-mapping.h>
#include <linux/fips.h>
#include <linux/module.h>
+#include <linux/time.h>
#include "hpre.h"
struct hpre_ctx;
@@ -68,6 +69,7 @@ struct hpre_dh_ctx {
struct hpre_ctx {
struct hisi_qp *qp;
struct hpre_asym_request **req_list;
+ struct hpre *hpre;
spinlock_t req_lock;
unsigned int key_sz;
bool crt_g2_mode;
@@ -90,6 +92,7 @@ struct hpre_asym_request {
int err;
int req_id;
hpre_cb cb;
+ struct timespec64 req_time;
};
static DEFINE_MUTEX(hpre_alg_lock);
@@ -119,6 +122,7 @@ static void hpre_free_req_id(struct hpre_ctx *ctx, int req_id)
static int hpre_add_req_to_ctx(struct hpre_asym_request *hpre_req)
{
struct hpre_ctx *ctx;
+ struct hpre_dfx *dfx;
int id;
ctx = hpre_req->ctx;
@@ -129,6 +133,10 @@ static int hpre_add_req_to_ctx(struct hpre_asym_request *hpre_req)
ctx->req_list[id] = hpre_req;
hpre_req->req_id = id;
+ dfx = ctx->hpre->debug.dfx;
+ if (atomic64_read(&dfx[HPRE_OVERTIME_THRHLD].value))
+ ktime_get_ts64(&hpre_req->req_time);
+
return id;
}
@@ -308,12 +316,16 @@ static int hpre_alg_res_post_hf(struct hpre_ctx *ctx, struct hpre_sqe *sqe,
static int hpre_ctx_set(struct hpre_ctx *ctx, struct hisi_qp *qp, int qlen)
{
+ struct hpre *hpre;
+
if (!ctx || !qp || qlen < 0)
return -EINVAL;
spin_lock_init(&ctx->req_lock);
ctx->qp = qp;
+ hpre = container_of(ctx->qp->qm, struct hpre, qm);
+ ctx->hpre = hpre;
ctx->req_list = kcalloc(qlen, sizeof(void *), GFP_KERNEL);
if (!ctx->req_list)
return -ENOMEM;
@@ -336,30 +348,67 @@ static void hpre_ctx_clear(struct hpre_ctx *ctx, bool is_clear_all)
ctx->key_sz = 0;
}
+static bool hpre_is_bd_timeout(struct hpre_asym_request *req,
+ u64 overtime_thrhld)
+{
+ struct timespec64 reply_time;
+ u64 time_use_us;
+
+#define HPRE_DFX_SEC_TO_US 1000000
+#define HPRE_DFX_US_TO_NS 1000
+
+ ktime_get_ts64(&reply_time);
+ time_use_us = (reply_time.tv_sec - req->req_time.tv_sec) *
+ HPRE_DFX_SEC_TO_US +
+ (reply_time.tv_nsec - req->req_time.tv_nsec) /
+ HPRE_DFX_US_TO_NS;
+
+ if (time_use_us <= overtime_thrhld)
+ return false;
+
+ return true;
+}
+
static void hpre_dh_cb(struct hpre_ctx *ctx, void *resp)
{
+ struct hpre_dfx *dfx = ctx->hpre->debug.dfx;
struct hpre_asym_request *req;
struct kpp_request *areq;
+ u64 overtime_thrhld;
int ret;
ret = hpre_alg_res_post_hf(ctx, resp, (void **)&req);
areq = req->areq.dh;
areq->dst_len = ctx->key_sz;
+
+ overtime_thrhld = atomic64_read(&dfx[HPRE_OVERTIME_THRHLD].value);
+ if (overtime_thrhld && hpre_is_bd_timeout(req, overtime_thrhld))
+ atomic64_inc(&dfx[HPRE_OVER_THRHLD_CNT].value);
+
hpre_hw_data_clr_all(ctx, req, areq->dst, areq->src);
kpp_request_complete(areq, ret);
+ atomic64_inc(&dfx[HPRE_RECV_CNT].value);
}
static void hpre_rsa_cb(struct hpre_ctx *ctx, void *resp)
{
+ struct hpre_dfx *dfx = ctx->hpre->debug.dfx;
struct hpre_asym_request *req;
struct akcipher_request *areq;
+ u64 overtime_thrhld;
int ret;
ret = hpre_alg_res_post_hf(ctx, resp, (void **)&req);
+
+ overtime_thrhld = atomic64_read(&dfx[HPRE_OVERTIME_THRHLD].value);
+ if (overtime_thrhld && hpre_is_bd_timeout(req, overtime_thrhld))
+ atomic64_inc(&dfx[HPRE_OVER_THRHLD_CNT].value);
+
areq = req->areq.rsa;
areq->dst_len = ctx->key_sz;
hpre_hw_data_clr_all(ctx, req, areq->dst, areq->src);
akcipher_request_complete(areq, ret);
+ atomic64_inc(&dfx[HPRE_RECV_CNT].value);
}
static void hpre_alg_cb(struct hisi_qp *qp, void *resp)
@@ -435,6 +484,29 @@ static int hpre_msg_request_set(struct hpre_ctx *ctx, void *req, bool is_rsa)
return 0;
}
+static int hpre_send(struct hpre_ctx *ctx, struct hpre_sqe *msg)
+{
+ struct hpre_dfx *dfx = ctx->hpre->debug.dfx;
+ int ctr = 0;
+ int ret;
+
+ do {
+ atomic64_inc(&dfx[HPRE_SEND_CNT].value);
+ ret = hisi_qp_send(ctx->qp, msg);
+ if (ret != -EBUSY)
+ break;
+ atomic64_inc(&dfx[HPRE_SEND_BUSY_CNT].value);
+ } while (ctr++ < HPRE_TRY_SEND_TIMES);
+
+ if (likely(!ret))
+ return ret;
+
+ if (ret != -EBUSY)
+ atomic64_inc(&dfx[HPRE_SEND_FAIL_CNT].value);
+
+ return ret;
+}
+
#ifdef CONFIG_CRYPTO_DH
static int hpre_dh_compute_value(struct kpp_request *req)
{
@@ -443,7 +515,6 @@ static int hpre_dh_compute_value(struct kpp_request *req)
void *tmp = kpp_request_ctx(req);
struct hpre_asym_request *hpre_req = PTR_ALIGN(tmp, HPRE_ALIGN_SZ);
struct hpre_sqe *msg = &hpre_req->req;
- int ctr = 0;
int ret;
ret = hpre_msg_request_set(ctx, req, false);
@@ -464,11 +535,9 @@ static int hpre_dh_compute_value(struct kpp_request *req)
msg->dw0 = cpu_to_le32(le32_to_cpu(msg->dw0) | HPRE_ALG_DH_G2);
else
msg->dw0 = cpu_to_le32(le32_to_cpu(msg->dw0) | HPRE_ALG_DH);
- do {
- ret = hisi_qp_send(ctx->qp, msg);
- } while (ret == -EBUSY && ctr++ < HPRE_TRY_SEND_TIMES);
/* success */
+ ret = hpre_send(ctx, msg);
if (likely(!ret))
return -EINPROGRESS;
@@ -646,7 +715,6 @@ static int hpre_rsa_enc(struct akcipher_request *req)
void *tmp = akcipher_request_ctx(req);
struct hpre_asym_request *hpre_req = PTR_ALIGN(tmp, HPRE_ALIGN_SZ);
struct hpre_sqe *msg = &hpre_req->req;
- int ctr = 0;
int ret;
/* For 512 and 1536 bits key size, use soft tfm instead */
@@ -676,11 +744,8 @@ static int hpre_rsa_enc(struct akcipher_request *req)
if (unlikely(ret))
goto clear_all;
- do {
- ret = hisi_qp_send(ctx->qp, msg);
- } while (ret == -EBUSY && ctr++ < HPRE_TRY_SEND_TIMES);
-
/* success */
+ ret = hpre_send(ctx, msg);
if (likely(!ret))
return -EINPROGRESS;
@@ -698,7 +763,6 @@ static int hpre_rsa_dec(struct akcipher_request *req)
void *tmp = akcipher_request_ctx(req);
struct hpre_asym_request *hpre_req = PTR_ALIGN(tmp, HPRE_ALIGN_SZ);
struct hpre_sqe *msg = &hpre_req->req;
- int ctr = 0;
int ret;
/* For 512 and 1536 bits key size, use soft tfm instead */
@@ -735,11 +799,8 @@ static int hpre_rsa_dec(struct akcipher_request *req)
if (unlikely(ret))
goto clear_all;
- do {
- ret = hisi_qp_send(ctx->qp, msg);
- } while (ret == -EBUSY && ctr++ < HPRE_TRY_SEND_TIMES);
-
/* success */
+ ret = hpre_send(ctx, msg);
if (likely(!ret))
return -EINPROGRESS;
diff --git a/drivers/crypto/hisilicon/hpre/hpre_main.c b/drivers/crypto/hisilicon/hpre/hpre_main.c
index f727158..2ede8d78 100644
--- a/drivers/crypto/hisilicon/hpre/hpre_main.c
+++ b/drivers/crypto/hisilicon/hpre/hpre_main.c
@@ -169,6 +169,15 @@ struct hpre_hw_error {
{"INT_STATUS ", HPRE_INT_STATUS},
};
+static const char *hpre_dfx_files[HPRE_DFX_FILE_NUM] = {
+ "send_cnt",
+ "recv_cnt",
+ "send_fail_cnt",
+ "send_busy_cnt",
+ "over_thrhld_cnt",
+ "overtime_thrhld"
+};
+
#ifdef CONFIG_CRYPTO_QM_UACCE
static int uacce_mode_set(const char *val, const struct kernel_param *kp)
{
@@ -588,6 +597,33 @@ static ssize_t hpre_ctrl_debug_write(struct file *filp, const char __user *buf,
.write = hpre_ctrl_debug_write,
};
+static int hpre_debugfs_atomic64_get(void *data, u64 *val)
+{
+ struct hpre_dfx *dfx_item = data;
+
+ *val = atomic64_read(&dfx_item->value);
+ return 0;
+}
+
+static int hpre_debugfs_atomic64_set(void *data, u64 val)
+{
+ struct hpre_dfx *dfx_item = data;
+
+ if (dfx_item->type == HPRE_OVERTIME_THRHLD) {
+ struct hpre_dfx *hpre_dfx = dfx_item - HPRE_OVERTIME_THRHLD;
+
+ atomic64_set(&hpre_dfx[HPRE_OVER_THRHLD_CNT].value, 0);
+ } else if (val) {
+ return -EINVAL;
+ }
+
+ atomic64_set(&dfx_item->value, val);
+ return 0;
+}
+
+DEFINE_DEBUGFS_ATTRIBUTE(hpre_atomic64_ops, hpre_debugfs_atomic64_get,
+ hpre_debugfs_atomic64_set, "%llu\n");
+
static int hpre_create_debugfs_file(struct hpre_debug *dbg, struct dentry *dir,
enum hpre_ctrl_dbgfs_file type, int indx)
{
@@ -691,6 +727,22 @@ static int hpre_ctrl_debug_init(struct hpre_debug *debug)
return hpre_cluster_debugfs_init(debug);
}
+static void hpre_dfx_debug_init(struct hpre_debug *debug)
+{
+ struct hpre *hpre = container_of(debug, struct hpre, debug);
+ struct hpre_dfx *dfx = hpre->debug.dfx;
+ struct hisi_qm *qm = &hpre->qm;
+ struct dentry *parent;
+ int i;
+
+ parent = debugfs_create_dir("hpre_dfx", qm->debug.debug_root);
+ for (i = 0; i < HPRE_DFX_FILE_NUM; i++) {
+ dfx[i].type = i;
+ debugfs_create_file(hpre_dfx_files[i], 0644, parent, &dfx[i],
+ &hpre_atomic64_ops);
+ }
+}
+
static int hpre_debugfs_init(struct hisi_qm *qm)
{
struct hpre *hpre = container_of(qm, struct hpre, qm);
@@ -709,6 +761,9 @@ static int hpre_debugfs_init(struct hisi_qm *qm)
if (ret)
goto failed_to_create;
}
+
+ hpre_dfx_debug_init(&hpre->debug);
+
return 0;
failed_to_create:
--
1.8.3
1
8
From: Greg Kroah-Hartman <gregkh(a)linuxfoundation.org>
When I backported 52918ed5fcf0 ("KVM: SVM: Override default MMIO mask if
memory encryption is enabled") to 4.19 (which resulted in commit
a4e761c9f63a ("KVM: SVM: Override default MMIO mask if memory encryption
is enabled")), I messed up the call to kvm_mmu_set_mmio_spte_mask()
Fix that here now.
Reported-by: Tom Lendacky <thomas.lendacky(a)amd.com>
Cc: Sean Christopherson <sean.j.christopherson(a)intel.com>
Cc: Paolo Bonzini <pbonzini(a)redhat.com>
Signed-off-by: Greg Kroah-Hartman <gregkh(a)linuxfoundation.org>
Signed-off-by: Yang Yingliang <yangyingliang(a)huawei.com>
---
arch/x86/kvm/svm.c | 2 +-
1 file changed, 1 insertion(+), 1 deletion(-)
diff --git a/arch/x86/kvm/svm.c b/arch/x86/kvm/svm.c
index 3f0565e..cc8f3b41 100644
--- a/arch/x86/kvm/svm.c
+++ b/arch/x86/kvm/svm.c
@@ -1336,7 +1336,7 @@ static __init void svm_adjust_mmio_mask(void)
*/
mask = (mask_bit < 52) ? rsvd_bits(mask_bit, 51) | PT_PRESENT_MASK : 0;
- kvm_mmu_set_mmio_spte_mask(mask, PT_WRITABLE_MASK | PT_USER_MASK);
+ kvm_mmu_set_mmio_spte_mask(mask, mask);
}
static __init int svm_hardware_setup(void)
--
1.8.3
1
91

[PATCH 1/5] Revert "dm crypt: fix benbi IV constructor crash if used in authenticated mode"
by Yang Yingliang 17 Apr '20
by Yang Yingliang 17 Apr '20
17 Apr '20
From: Xiongfeng Wang <wangxiongfeng2(a)huawei.com>
hulk inclusion
category: bugfix
bugzilla: 31797
CVE: NA
-------------------------
The next two patches to be reverted will conflict with this patch. Let's
revert this patch and merge the original patch.
This reverts commit 808f3768deec5e1a5c9d2a4a2d8593fbbaf3e4cc.
Signed-off-by: Xiongfeng Wang <wangxiongfeng2(a)huawei.com>
Reviewed-by: ZhangXiaoxu <zhangxiaoxu5(a)huawei.com>
Reviewed-by: Hou Tao <houtao1(a)huawei.com>
Signed-off-by: Yang Yingliang <yangyingliang(a)huawei.com>
---
drivers/md/dm-crypt.c | 10 ++--------
1 file changed, 2 insertions(+), 8 deletions(-)
diff --git a/drivers/md/dm-crypt.c b/drivers/md/dm-crypt.c
index d451f98..aa7f741 100644
--- a/drivers/md/dm-crypt.c
+++ b/drivers/md/dm-crypt.c
@@ -563,14 +563,8 @@ static int crypt_iv_essiv_gen(struct geniv_ctx *ctx,
static int crypt_iv_benbi_ctr(struct geniv_ctx *ctx)
{
- unsigned bs;
- int log;
-
- if (test_bit(CRYPT_MODE_INTEGRITY_AEAD, &ctx->cipher_flags))
- bs = crypto_aead_blocksize(ctx->tfms.tfms_aead[0]);
- else
- bs = crypto_skcipher_blocksize(ctx->tfms.tfms[0]);
- log = ilog2(bs);
+ unsigned int bs = crypto_skcipher_blocksize(ctx->tfms.tfms[0]);
+ int log = ilog2(bs);
/* we need to calculate how far we must shift the sector count
* to get the cipher block count, we use this shift in _gen */
--
1.8.3
1
4

[PATCH] arm64: clear_page: Add new implementation of clear_page() by STNP
by Yang Yingliang 17 Apr '20
by Yang Yingliang 17 Apr '20
17 Apr '20
From: Wei Li <liwei391(a)huawei.com>
hulk inclusion
category: feature
bugzilla: 31400
CVE: NA
---------------------------
Currently, clear_page() clear the page through 'dc zva', while the page may
not be used immediately mostly, so the cache flush is in vain.
Add an optimized implementation of clear_page() by 'stnp' for performance
promotion. It can be switched by the boot cmdline 'mm.use_clearpage_stnp'.
In the hugetlb clear test, we gained about 53.7% performance improvement:
Set mm.use_clearpage_stnp = 0 | Set mm.use_clearpage_stnp = 1
[root@localhost liwei]# ./a.out 50 20 | [root@localhost liwei]# ./a.out 50 20
size is 50 Gib, test times is 20 | size is 50 Gib, test times is 20
test_time[0] : use 8.438046 sec | test_time[0] : use 3.722682 sec
test_time[1] : use 8.028493 sec | test_time[1] : use 3.640274 sec
test_time[2] : use 8.646547 sec | test_time[2] : use 4.095052 sec
test_time[3] : use 8.122490 sec | test_time[3] : use 3.998446 sec
test_time[4] : use 8.053038 sec | test_time[4] : use 4.084259 sec
test_time[5] : use 8.843512 sec | test_time[5] : use 3.933871 sec
test_time[6] : use 8.308906 sec | test_time[6] : use 3.934334 sec
test_time[7] : use 8.093817 sec | test_time[7] : use 3.869142 sec
test_time[8] : use 8.303504 sec | test_time[8] : use 3.902916 sec
test_time[9] : use 8.178336 sec | test_time[9] : use 3.541885 sec
test_time[10] : use 8.003625 sec | test_time[10] : use 3.595554 sec
test_time[11] : use 8.163807 sec | test_time[11] : use 3.583813 sec
test_time[12] : use 8.267464 sec | test_time[12] : use 3.863033 sec
test_time[13] : use 8.055326 sec | test_time[13] : use 3.770953 sec
test_time[14] : use 8.246986 sec | test_time[14] : use 3.808006 sec
test_time[15] : use 8.546992 sec | test_time[15] : use 3.653194 sec
test_time[16] : use 8.727256 sec | test_time[16] : use 3.722395 sec
test_time[17] : use 8.288951 sec | test_time[17] : use 3.683508 sec
test_time[18] : use 8.019322 sec | test_time[18] : use 4.253087 sec
test_time[19] : use 8.250685 sec | test_time[19] : use 4.082845 sec
hugetlb test end! | hugetlb test end!
Signed-off-by: Wei Li <liwei391(a)huawei.com>
Reviewed-by: Hanjun Guo <guohanjun(a)huawei.com>
Signed-off-by: Yang Yingliang <yangyingliang(a)huawei.com>
---
arch/arm64/include/asm/cpucaps.h | 3 ++-
arch/arm64/kernel/cpufeature.c | 34 ++++++++++++++++++++++++++++++++++
arch/arm64/lib/clear_page.S | 21 +++++++++++++++++++++
3 files changed, 57 insertions(+), 1 deletion(-)
diff --git a/arch/arm64/include/asm/cpucaps.h b/arch/arm64/include/asm/cpucaps.h
index a9090f2..3cd169f 100644
--- a/arch/arm64/include/asm/cpucaps.h
+++ b/arch/arm64/include/asm/cpucaps.h
@@ -56,7 +56,8 @@
#define ARM64_WORKAROUND_1463225 35
#define ARM64_HAS_CRC32 36
#define ARM64_SSBS 37
+#define ARM64_CLEARPAGE_STNP 38
-#define ARM64_NCAPS 38
+#define ARM64_NCAPS 39
#endif /* __ASM_CPUCAPS_H */
diff --git a/arch/arm64/kernel/cpufeature.c b/arch/arm64/kernel/cpufeature.c
index b1f621c..8b84a47 100644
--- a/arch/arm64/kernel/cpufeature.c
+++ b/arch/arm64/kernel/cpufeature.c
@@ -1232,6 +1232,34 @@ static bool can_use_gic_priorities(const struct arm64_cpu_capabilities *entry,
}
#endif
+static bool use_clearpage_stnp;
+
+static int __init early_use_clearpage_stnp(char *p)
+{
+ return strtobool(p, &use_clearpage_stnp);
+}
+early_param("mm.use_clearpage_stnp", early_use_clearpage_stnp);
+
+static bool has_mor_nontemporal(const struct arm64_cpu_capabilities *entry)
+{
+ /*
+ * List of CPUs which have memory ordering ruled non-temporal
+ * load and store.
+ */
+ static const struct midr_range cpus[] = {
+ MIDR_ALL_VERSIONS(MIDR_HISI_TSV110),
+ {},
+ };
+
+ return is_midr_in_range_list(read_cpuid_id(), cpus);
+}
+
+static bool can_clearpage_use_stnp(const struct arm64_cpu_capabilities *entry,
+ int scope)
+{
+ return use_clearpage_stnp && has_mor_nontemporal(entry);
+}
+
static const struct arm64_cpu_capabilities arm64_features[] = {
{
.desc = "GIC system register CPU interface",
@@ -1467,6 +1495,12 @@ static bool can_use_gic_priorities(const struct arm64_cpu_capabilities *entry,
.cpu_enable = cpu_enable_ssbs,
},
#endif
+ {
+ .desc = "Clear Page by STNP",
+ .capability = ARM64_CLEARPAGE_STNP,
+ .type = ARM64_CPUCAP_SYSTEM_FEATURE,
+ .matches = can_clearpage_use_stnp,
+ },
{},
};
diff --git a/arch/arm64/lib/clear_page.S b/arch/arm64/lib/clear_page.S
index ef08e90..9aa1de1 100644
--- a/arch/arm64/lib/clear_page.S
+++ b/arch/arm64/lib/clear_page.S
@@ -18,6 +18,25 @@
#include <linux/const.h>
#include <asm/assembler.h>
#include <asm/page.h>
+#include <asm/alternative.h>
+
+/*
+ * Clear page @dest
+ *
+ * Parameters:
+ * x0 - dest
+ */
+ENTRY(clear_page_stnp)
+ .align 6
+1: stnp xzr, xzr, [x0]
+ stnp xzr, xzr, [x0, #0x10]
+ stnp xzr, xzr, [x0, #0x20]
+ stnp xzr, xzr, [x0, #0x30]
+ add x0, x0, #0x40
+ tst x0, #(PAGE_SIZE - 1)
+ b.ne 1b
+ ret
+ENDPROC(clear_page_stnp)
/*
* Clear page @dest
@@ -26,6 +45,8 @@
* x0 - dest
*/
ENTRY(clear_page)
+ ALTERNATIVE("nop", "b clear_page_stnp", ARM64_CLEARPAGE_STNP)
+
mrs x1, dczid_el0
and w1, w1, #0xf
mov x2, #4
--
1.8.3
1
0

17 Apr '20
From: Tao Jihua <taojihua4(a)huawei.com>
driver inclusion
category: Bugfix
bugzilla: NA
This modification is mainly to optimize mtr management
and solve mtr addressing bug:When mtt_ba_pg_sz = 0,
hem-> start / step = 1, which eventually results in an
additional BA_BYTE_LEN added to the offset
Signed-off-by: Tao Jihua <taojihua4(a)huawei.com>
Reviewed-by: Hu Chunzhi <huchunzhi(a)huawei.com>
Reviewed-by: Wang Lin <wanglin137(a)huawei.com>
Reviewed-by: Zhao Weibo <zhaoweibo3(a)huawei.com>
Signed-off-by: Yang Yingliang <yangyingliang(a)huawei.com>
---
drivers/infiniband/hw/hns/hns_roce_hem.c | 43 ++++++++++++++++----------------
1 file changed, 21 insertions(+), 22 deletions(-)
diff --git a/drivers/infiniband/hw/hns/hns_roce_hem.c b/drivers/infiniband/hw/hns/hns_roce_hem.c
index 510c008..1911470 100644
--- a/drivers/infiniband/hw/hns/hns_roce_hem.c
+++ b/drivers/infiniband/hw/hns/hns_roce_hem.c
@@ -1165,13 +1165,6 @@ struct roce_hem_item {
int end; /* end buf offset in this hem */
};
-#define hem_list_for_each(pos, n, head) \
- list_for_each_entry_safe(pos, n, head, list)
-
-#define hem_list_del_item(hem) list_del(&hem->list)
-#define hem_list_add_item(hem, head) list_add(&hem->list, head)
-#define hem_list_link_item(hem, head) list_add(&hem->sibling, head)
-
static struct roce_hem_item *hem_list_alloc_item(struct hns_roce_dev *hr_dev,
int start, int end,
int count, bool exist_bt,
@@ -1216,8 +1209,8 @@ static void hem_list_free_all(struct hns_roce_dev *hr_dev,
{
struct roce_hem_item *hem, *temp_hem;
- hem_list_for_each(hem, temp_hem, head) {
- hem_list_del_item(hem);
+ list_for_each_entry_safe(hem, temp_hem, head, list) {
+ list_del(&hem->list);
hem_list_free_item(hr_dev, hem, exist_bt);
}
}
@@ -1249,7 +1242,7 @@ static struct roce_hem_item *hem_list_search_item(struct list_head *ba_list,
struct roce_hem_item *hem, *temp_hem;
struct roce_hem_item *found = NULL;
- hem_list_for_each(hem, temp_hem, ba_list) {
+ list_for_each_entry_safe(hem, temp_hem, ba_list, list) {
if (hem_list_page_is_in_range(hem, page_offset)) {
found = hem;
break;
@@ -1391,9 +1384,9 @@ static int hem_list_alloc_mid_bt(struct hns_roce_dev *hr_dev,
goto err_exit;
}
hem_ptrs[level] = cur;
- hem_list_add_item(cur, &temp_list[level]);
+ list_add(&cur->list, &temp_list[level]);
if (hem_list_is_bottom_bt(hopnum, level))
- hem_list_link_item(cur, &temp_list[0]);
+ list_add(&cur->sibling, &temp_list[0]);
/* link bt to parent bt */
if (level > 1) {
@@ -1430,6 +1423,7 @@ static int hem_list_alloc_root_bt(struct hns_roce_dev *hr_dev,
void *cpu_base;
u64 phy_base;
int ret = 0;
+ int ba_num;
int offset;
int total;
int step;
@@ -1440,15 +1434,19 @@ static int hem_list_alloc_root_bt(struct hns_roce_dev *hr_dev,
if (root_hem)
return 0;
+ ba_num = hns_roce_hem_list_calc_root_ba(regions, region_cnt, unit);
+ if (ba_num < 1)
+ return -ENOMEM;
+
INIT_LIST_HEAD(&temp_root);
- total = r->offset;
+ offset = r->offset;
/* indicate to last region */
r = ®ions[region_cnt - 1];
- root_hem = hem_list_alloc_item(hr_dev, total, r->offset + r->count - 1,
- unit, true, 0);
+ root_hem = hem_list_alloc_item(hr_dev, offset, r->offset + r->count - 1,
+ ba_num, true, 0);
if (!root_hem)
return -ENOMEM;
- hem_list_add_item(root_hem, &temp_root);
+ list_add(&root_hem->list, &temp_root);
hem_list->root_ba = root_hem->dma_addr;
@@ -1457,7 +1455,7 @@ static int hem_list_alloc_root_bt(struct hns_roce_dev *hr_dev,
INIT_LIST_HEAD(&temp_list[i]);
total = 0;
- for (i = 0; i < region_cnt && total < unit; i++) {
+ for (i = 0; i < region_cnt && total < ba_num; i++) {
r = ®ions[i];
if (!r->count)
continue;
@@ -1478,8 +1476,8 @@ static int hem_list_alloc_root_bt(struct hns_roce_dev *hr_dev,
goto err_exit;
}
hem_list_assign_bt(hr_dev, hem, cpu_base, phy_base);
- hem_list_add_item(hem, &temp_list[i]);
- hem_list_link_item(hem, &temp_btm);
+ list_add(&hem->list, &temp_list[i]);
+ list_add(&hem->sibling, &temp_btm);
total += r->count;
} else {
step = hem_list_calc_ba_range(r->hopnum, 1, unit);
@@ -1488,9 +1486,10 @@ static int hem_list_alloc_root_bt(struct hns_roce_dev *hr_dev,
goto err_exit;
}
/* if exist mid bt, link L1 to L0 */
- hem_list_for_each(hem, temp_hem,
- &hem_list->mid_bt[i][1]) {
- offset = hem->start / step * BA_BYTE_LEN;
+ list_for_each_entry_safe(hem, temp_hem,
+ &hem_list->mid_bt[i][1], list) {
+ offset = ((hem->start - r->offset) / step) *
+ BA_BYTE_LEN;
hem_list_link_bt(hr_dev, cpu_base + offset,
hem->dma_addr);
total++;
--
1.8.3
1
0

17 Apr '20
From: Joe Perches <joe(a)perches.com>
[ Upstream commit 20faba848752901de23a4d45a1174d64d2069dde ]
Arguments are supposed to be ordered high then low.
Signed-off-by: Joe Perches <joe(a)perches.com>
Signed-off-by: Thomas Gleixner <tglx(a)linutronix.de>
Acked-by: Marc Zyngier <marc.zyngier(a)arm.com>
Link: https://lkml.kernel.org/r/ab5deb4fc3cd604cb620054770b7d00016d736bc.15627348…
Signed-off-by: Sasha Levin <sashal(a)kernel.org>
Signed-off-by: Yang Yingliang <yangyingliang(a)huawei.com>
---
drivers/irqchip/irq-gic-v3-its.c | 2 +-
1 file changed, 1 insertion(+), 1 deletion(-)
diff --git a/drivers/irqchip/irq-gic-v3-its.c b/drivers/irqchip/irq-gic-v3-its.c
index 4a9c14f..860f3ef 100644
--- a/drivers/irqchip/irq-gic-v3-its.c
+++ b/drivers/irqchip/irq-gic-v3-its.c
@@ -215,7 +215,7 @@ static struct its_collection *dev_event_to_col(struct its_device *its_dev,
static struct its_collection *valid_col(struct its_collection *col)
{
- if (WARN_ON_ONCE(col->target_address & GENMASK_ULL(0, 15)))
+ if (WARN_ON_ONCE(col->target_address & GENMASK_ULL(15, 0)))
return NULL;
return col;
--
1.8.3
1
159
From: Yu'an Wang <wangyuan46(a)huawei.com>
driver inclusion
category: bugfix
bugzilla: NA
CVE: NA
1. we delete sec_usr_if.h, then move the define of sec hardware structure
into sec_crypto.h and normalize two structure types.
2. In sec_main.c, we remove fusion_limit/fusion_time, because this part of
logic is not used in the end. We also optimize the logic of debugfs without
judging some return codes, because this does not affect the driver loading.
Probe flow is also be optimized, including add sec_iommu_used_check, modify
sec_probe_init, realize sec_qm_pre_init and so on.
3. In sec.h, we define structure of sec_ctx, which defines queue/cipher/
request .etc relatives.
4. In sec_crypto.c,we encapsulate independent interfaces, such as init/
uninit/map/unmap/callback/alloc resource/free resource/encrypt/decrypt/
filling hardware descriptor/set key .etc, which removes fusion logic and is
easy to expand algorithm. Meanwhile, we remove DES algorithm support,
because of its weak key.
Signed-off-by: Yu'an Wang <wangyuan46(a)huawei.com>
Reviewed-by: Cheng Hu <hucheng.hu(a)huawei.com>
Reviewed-by: Guangwei Zhou <zhouguangwei5(a)huawei.com>
Reviewed-by: Ye Kai <yekai13(a)huawei.com>
Signed-off-by: Yang Yingliang <yangyingliang(a)huawei.com>
---
drivers/crypto/hisilicon/qm.c | 3 +-
drivers/crypto/hisilicon/sec2/sec.h | 166 ++-
drivers/crypto/hisilicon/sec2/sec_crypto.c | 1770 +++++++++++-----------------
drivers/crypto/hisilicon/sec2/sec_crypto.h | 237 +++-
drivers/crypto/hisilicon/sec2/sec_main.c | 541 ++++-----
drivers/crypto/hisilicon/sec2/sec_usr_if.h | 179 ---
6 files changed, 1246 insertions(+), 1650 deletions(-)
delete mode 100644 drivers/crypto/hisilicon/sec2/sec_usr_if.h
diff --git a/drivers/crypto/hisilicon/qm.c b/drivers/crypto/hisilicon/qm.c
index d3429e7..8b49902 100644
--- a/drivers/crypto/hisilicon/qm.c
+++ b/drivers/crypto/hisilicon/qm.c
@@ -1389,6 +1389,7 @@ static int qm_sq_ctx_cfg(struct hisi_qp *qp, int qp_id, int pasid)
sqc->w13 = cpu_to_le16(QM_MK_SQC_W13(0, 1, qp->alg_type));
ret = qm_mb(qm, QM_MB_CMD_SQC, sqc_dma, qp_id, 0);
+
dma_unmap_single(dev, sqc_dma, sizeof(struct qm_sqc), DMA_TO_DEVICE);
kfree(sqc);
@@ -1598,7 +1599,7 @@ static int hisi_qm_stop_qp_nolock(struct hisi_qp *qp)
else
flush_work(&qp->qm->work);
- /* wait for increase used count in qp send and last poll qp finish */
+ /* waiting for increase used count in qp send and last poll qp finish */
udelay(WAIT_PERIOD);
if (unlikely(qp->is_resetting && atomic_read(&qp->qp_status.used)))
qp_stop_fail_cb(qp);
diff --git a/drivers/crypto/hisilicon/sec2/sec.h b/drivers/crypto/hisilicon/sec2/sec.h
index f85dd06..e3b581a 100644
--- a/drivers/crypto/hisilicon/sec2/sec.h
+++ b/drivers/crypto/hisilicon/sec2/sec.h
@@ -1,19 +1,124 @@
/* SPDX-License-Identifier: GPL-2.0+ */
/* Copyright (c) 2018-2019 HiSilicon Limited. */
-#ifndef HISI_SEC_H
-#define HISI_SEC_H
+#ifndef __HISI_SEC_V2_H
+#define __HISI_SEC_V2_H
#include <linux/list.h>
+
#include "../qm.h"
-#include "sec_usr_if.h"
+#include "sec_crypto.h"
+
+/* Algorithm resource per hardware SEC queue */
+struct sec_alg_res {
+ u8 *pbuf;
+ dma_addr_t pbuf_dma;
+ u8 *c_ivin;
+ dma_addr_t c_ivin_dma;
+ u8 *out_mac;
+ dma_addr_t out_mac_dma;
+};
+
+/* Cipher request of SEC private */
+struct sec_cipher_req {
+ struct hisi_acc_hw_sgl *c_in;
+ dma_addr_t c_in_dma;
+ struct hisi_acc_hw_sgl *c_out;
+ dma_addr_t c_out_dma;
+ u8 *c_ivin;
+ dma_addr_t c_ivin_dma;
+ struct skcipher_request *sk_req;
+ u32 c_len;
+ bool encrypt;
+};
+
+/* SEC request of Crypto */
+struct sec_req {
+ struct sec_sqe sec_sqe;
+ struct sec_ctx *ctx;
+ struct sec_qp_ctx *qp_ctx;
+
+ struct sec_cipher_req c_req;
+
+ int err_type;
+ int req_id;
+
+ /* Status of the SEC request */
+ bool fake_busy;
+};
+
+/**
+ * struct sec_req_op - Operations for SEC request
+ * @buf_map: DMA map the SGL buffers of the request
+ * @buf_unmap: DMA unmap the SGL buffers of the request
+ * @bd_fill: Fill the SEC queue BD
+ * @bd_send: Send the SEC BD into the hardware queue
+ * @callback: Call back for the request
+ * @process: Main processing logic of Skcipher
+ */
+struct sec_req_op {
+ int (*buf_map)(struct sec_ctx *ctx, struct sec_req *req);
+ void (*buf_unmap)(struct sec_ctx *ctx, struct sec_req *req);
+ void (*do_transfer)(struct sec_ctx *ctx, struct sec_req *req);
+ int (*bd_fill)(struct sec_ctx *ctx, struct sec_req *req);
+ int (*bd_send)(struct sec_ctx *ctx, struct sec_req *req);
+ void (*callback)(struct sec_ctx *ctx, struct sec_req *req, int err);
+ int (*process)(struct sec_ctx *ctx, struct sec_req *req);
+};
+
+/* SEC cipher context which cipher's relatives */
+struct sec_cipher_ctx {
+ u8 *c_key;
+ dma_addr_t c_key_dma;
+ sector_t iv_offset;
+ u32 c_gran_size;
+ u32 ivsize;
+ u8 c_mode;
+ u8 c_alg;
+ u8 c_key_len;
+};
-#undef pr_fmt
-#define pr_fmt(fmt) "hisi_sec: " fmt
+/* SEC queue context which defines queue's relatives */
+struct sec_qp_ctx {
+ struct hisi_qp *qp;
+ struct sec_req *req_list[QM_Q_DEPTH];
+ struct idr req_idr;
+ struct sec_alg_res res[QM_Q_DEPTH];
+ struct sec_ctx *ctx;
+ struct mutex req_lock;
+ struct hisi_acc_sgl_pool *c_in_pool;
+ struct hisi_acc_sgl_pool *c_out_pool;
+ atomic_t pending_reqs;
+};
+enum sec_alg_type {
+ SEC_SKCIPHER,
+ SEC_AEAD
+};
+
+/* SEC Crypto TFM context which defines queue and cipher .etc relatives */
+struct sec_ctx {
+ struct sec_qp_ctx *qp_ctx;
+ struct sec_dev *sec;
+ const struct sec_req_op *req_op;
+ struct hisi_qp **qps;
+
+ /* Half queues for encipher, and half for decipher */
+ u32 hlf_q_num;
+
+ /* Threshold for fake busy, trigger to return -EBUSY to user */
+ u32 fake_req_limit;
+
+ /* Currrent cyclic index to select a queue for encipher */
+ atomic_t enc_qcyclic;
+
+ /* Currrent cyclic index to select a queue for decipher */
+ atomic_t dec_qcyclic;
-#define FUSION_LIMIT_DEF 1
-#define FUSION_LIMIT_MAX 64
-#define FUSION_TMOUT_NSEC_DEF (400 * 1000)
+ enum sec_alg_type alg_type;
+ bool pbuf_supported;
+ bool use_pbuf;
+ struct sec_cipher_ctx c_ctx;
+};
enum sec_endian {
SEC_LE = 0,
@@ -21,32 +126,37 @@ enum sec_endian {
SEC_64BE
};
-struct hisi_sec_ctrl;
+enum sec_debug_file_index {
+ SEC_CURRENT_QM,
+ SEC_CLEAR_ENABLE,
+ SEC_DEBUG_FILE_NUM,
+};
+
+struct sec_debug_file {
+ enum sec_debug_file_index index;
+ spinlock_t lock;
+ struct hisi_qm *qm;
+};
-struct hisi_sec_dfx {
- u64 send_cnt;
- u64 send_by_tmout;
- u64 send_by_full;
- u64 recv_cnt;
- u64 get_task_cnt;
- u64 put_task_cnt;
- u64 gran_task_cnt;
- u64 thread_cnt;
- u64 fake_busy_cnt;
- u64 busy_comp_cnt;
+struct sec_dfx {
+ atomic64_t send_cnt;
+ atomic64_t recv_cnt;
};
-struct hisi_sec {
+struct sec_debug {
+ struct sec_dfx dfx;
+ struct sec_debug_file files[SEC_DEBUG_FILE_NUM];
+};
+
+struct sec_dev {
struct hisi_qm qm;
- struct hisi_sec_dfx sec_dfx;
- struct hisi_sec_ctrl *ctrl;
- int ctx_q_num;
- int fusion_limit;
- int fusion_tmout_nsec;
+ struct sec_debug debug;
+ u32 ctx_q_num;
+ bool iommu_used;
};
void sec_destroy_qps(struct hisi_qp **qps, int qp_num);
struct hisi_qp **sec_create_qps(void);
-struct hisi_sec *find_sec_device(int node);
-
+int sec_register_to_crypto(void);
+void sec_unregister_from_crypto(void);
#endif
diff --git a/drivers/crypto/hisilicon/sec2/sec_crypto.c b/drivers/crypto/hisilicon/sec2/sec_crypto.c
index 0643955..52448d0 100644
--- a/drivers/crypto/hisilicon/sec2/sec_crypto.c
+++ b/drivers/crypto/hisilicon/sec2/sec_crypto.c
@@ -1,384 +1,329 @@
// SPDX-License-Identifier: GPL-2.0+
/* Copyright (c) 2018-2019 HiSilicon Limited. */
-#include <linux/crypto.h>
-#include <linux/hrtimer.h>
-#include <linux/dma-mapping.h>
-#include <linux/ktime.h>
-
#include <crypto/aes.h>
#include <crypto/algapi.h>
#include <crypto/des.h>
#include <crypto/skcipher.h>
#include <crypto/xts.h>
-#include <crypto/internal/skcipher.h>
+#include <linux/crypto.h>
+#include <linux/dma-mapping.h>
+#include <linux/idr.h>
#include "sec.h"
#include "sec_crypto.h"
-static atomic_t sec_active_devs;
-
-#define SEC_ASYNC
-
-#define SEC_INVLD_REQ_ID (-1)
-#define SEC_PRIORITY 4001
-#define SEC_XTS_MIN_KEY_SIZE (2 * AES_MIN_KEY_SIZE)
-#define SEC_XTS_MAX_KEY_SIZE (2 * AES_MAX_KEY_SIZE)
-#define SEC_DES3_2KEY_SIZE (2 * DES_KEY_SIZE)
-#define SEC_DES3_3KEY_SIZE (3 * DES_KEY_SIZE)
-
-#define BUF_MAP_PER_SGL 64
-#define SEC_FUSION_BD
-
-enum C_ALG {
- C_ALG_DES = 0x0,
- C_ALG_3DES = 0x1,
- C_ALG_AES = 0x2,
- C_ALG_SM4 = 0x3,
-};
-
-enum C_MODE {
- C_MODE_ECB = 0x0,
- C_MODE_CBC = 0x1,
- C_MODE_CTR = 0x4,
- C_MODE_CCM = 0x5,
- C_MODE_GCM = 0x6,
- C_MODE_XTS = 0x7,
- C_MODE_CBC_CS = 0x9,
-};
-
-enum CKEY_LEN {
- CKEY_LEN_128_BIT = 0x0,
- CKEY_LEN_192_BIT = 0x1,
- CKEY_LEN_256_BIT = 0x2,
- CKEY_LEN_DES = 0x1,
- CKEY_LEN_3DES_3KEY = 0x1,
- CKEY_LEN_3DES_2KEY = 0x3,
-};
-
-enum SEC_BD_TYPE {
- BD_TYPE1 = 0x1,
- BD_TYPE2 = 0x2,
-};
-
-enum SEC_CIPHER_TYPE {
- SEC_CIPHER_ENC = 0x1,
- SEC_CIPHER_DEC = 0x2,
-};
-
-enum SEC_ADDR_TYPE {
- PBUF = 0x0,
- SGL = 0x1,
- PRP = 0x2,
-};
-
-enum SEC_CI_GEN {
- CI_GEN_BY_ADDR = 0x0,
- CI_GEN_BY_LBA = 0X3,
-};
-
-enum SEC_SCENE {
- SCENE_IPSEC = 0x0,
- SCENE_STORAGE = 0x5,
-};
-
-enum {
- SEC_NO_FUSION = 0x0,
- SEC_IV_FUSION = 0x1,
- SEC_FUSION_BUTT
-};
-
-enum SEC_REQ_OPS_TYPE {
- SEC_OPS_SKCIPHER_ALG = 0x0,
- SEC_OPS_MULTI_IV = 0x1,
- SEC_OPS_BUTT
-};
-
-struct cipher_res {
- struct skcipher_request_ctx **sk_reqs;
- u8 *c_ivin;
- dma_addr_t c_ivin_dma;
- struct scatterlist *src;
- struct scatterlist *dst;
-};
-
-struct hisi_sec_cipher_req {
- struct hisi_acc_hw_sgl *c_in;
- dma_addr_t c_in_dma;
- struct hisi_acc_hw_sgl *c_out;
- dma_addr_t c_out_dma;
- u8 *c_ivin;
- dma_addr_t c_ivin_dma;
- struct skcipher_request *sk_req;
- struct scatterlist *src;
- struct scatterlist *dst;
- u32 c_len;
- u32 gran_num;
- u64 lba;
- bool encrypt;
-};
-
-struct hisi_sec_ctx;
-struct hisi_sec_qp_ctx;
-
-struct hisi_sec_req {
- struct hisi_sec_sqe sec_sqe;
- struct hisi_sec_ctx *ctx;
- struct hisi_sec_qp_ctx *qp_ctx;
- void **priv;
- struct hisi_sec_cipher_req c_req;
- ktime_t st_time;
- int err_type;
- int req_id;
- int req_cnt;
- int fusion_num;
- int fake_busy;
-};
+#define SEC_PRIORITY 4001
+#define SEC_XTS_MIN_KEY_SIZE (2 * AES_MIN_KEY_SIZE)
+#define SEC_XTS_MAX_KEY_SIZE (2 * AES_MAX_KEY_SIZE)
+#define SEC_DES3_2KEY_SIZE (2 * DES_KEY_SIZE)
+#define SEC_DES3_3KEY_SIZE (3 * DES_KEY_SIZE)
+
+/* SEC sqe(bd) bit operational relative MACRO */
+#define SEC_DE_OFFSET 1
+#define SEC_CI_GEN_OFFSET 6
+#define SEC_CIPHER_OFFSET 4
+#define SEC_SCENE_OFFSET 3
+#define SEC_DST_SGL_OFFSET 2
+#define SEC_SRC_SGL_OFFSET 7
+#define SEC_CKEY_OFFSET 9
+#define SEC_CMODE_OFFSET 12
+#define SEC_AKEY_OFFSET 5
+#define SEC_AEAD_ALG_OFFSET 11
+#define SEC_AUTH_OFFSET 6
+
+#define SEC_FLAG_OFFSET 7
+#define SEC_FLAG_MASK 0x0780
+#define SEC_TYPE_MASK 0x0F
+#define SEC_DONE_MASK 0x0001
+
+#define SEC_TOTAL_IV_SZ (SEC_IV_SIZE * QM_Q_DEPTH)
+#define SEC_SGL_SGE_NR 128
+#define SEC_CTX_DEV(ctx) (&(ctx)->sec->qm.pdev->dev)
+#define SEC_CIPHER_AUTH 0xfe
+#define SEC_AUTH_CIPHER 0x1
+#define SEC_MAX_MAC_LEN 64
+#define SEC_MAX_AAD_LEN 65535
+#define SEC_TOTAL_MAC_SZ (SEC_MAX_MAC_LEN * QM_Q_DEPTH)
+
+#define SEC_PBUF_SZ 512
+#define SEC_PBUF_IV_OFFSET SEC_PBUF_SZ
+#define SEC_PBUF_MAC_OFFSET (SEC_PBUF_SZ + SEC_IV_SIZE)
+#define SEC_PBUF_PKG (SEC_PBUF_SZ + SEC_IV_SIZE + \
+ SEC_MAX_MAC_LEN * 2)
+#define SEC_PBUF_NUM (PAGE_SIZE / SEC_PBUF_PKG)
+#define SEC_PBUF_PAGE_NUM (QM_Q_DEPTH / SEC_PBUF_NUM)
+#define SEC_PBUF_LEFT_SZ (SEC_PBUF_PKG * (QM_Q_DEPTH - \
+ SEC_PBUF_PAGE_NUM * SEC_PBUF_NUM))
+#define SEC_TOTAL_PBUF_SZ (PAGE_SIZE * SEC_PBUF_PAGE_NUM + \
+ SEC_PBUF_LEFT_SZ)
+
+#define SEC_SQE_LEN_RATE 4
+#define SEC_SQE_CFLAG 2
+#define SEC_SQE_AEAD_FLAG 3
+#define SEC_SQE_DONE 0x1
-struct hisi_sec_req_op {
- int fusion_type;
- int (*get_res)(struct hisi_sec_ctx *ctx, struct hisi_sec_req *req);
- int (*queue_alloc)(struct hisi_sec_ctx *ctx,
- struct hisi_sec_qp_ctx *qp_ctx);
- int (*queue_free)(struct hisi_sec_ctx *ctx,
- struct hisi_sec_qp_ctx *qp_ctx);
- int (*buf_map)(struct hisi_sec_ctx *ctx, struct hisi_sec_req *req);
- int (*buf_unmap)(struct hisi_sec_ctx *ctx, struct hisi_sec_req *req);
- int (*do_transfer)(struct hisi_sec_ctx *ctx, struct hisi_sec_req *req);
- int (*bd_fill)(struct hisi_sec_ctx *ctx, struct hisi_sec_req *req);
- int (*bd_send)(struct hisi_sec_ctx *ctx, struct hisi_sec_req *req);
- int (*callback)(struct hisi_sec_ctx *ctx, struct hisi_sec_req *req);
-};
-
-struct hisi_sec_cipher_ctx {
- u8 *c_key;
- dma_addr_t c_key_dma;
- sector_t iv_offset;
- u32 c_gran_size;
- u8 c_mode;
- u8 c_alg;
- u8 c_key_len;
-};
-
-struct hisi_sec_qp_ctx {
- struct hisi_qp *qp;
- struct hisi_sec_req **req_list;
- struct hisi_sec_req *fusion_req;
- unsigned long *req_bitmap;
- void *priv_req_res;
- struct hisi_sec_ctx *ctx;
- struct mutex req_lock;
- atomic_t req_cnt;
- struct hisi_sec_sqe *sqe_list;
- struct hisi_acc_sgl_pool *c_in_pool;
- struct hisi_acc_sgl_pool *c_out_pool;
- int fusion_num;
- int fusion_limit;
-};
+static atomic_t sec_active_devs;
-struct hisi_sec_ctx {
- struct hisi_sec_qp_ctx *qp_ctx;
- struct hisi_sec *sec;
- struct device *dev;
- struct hisi_sec_req_op *req_op;
- struct hisi_qp **qps;
- struct hrtimer timer;
- struct work_struct work;
- atomic_t thread_cnt;
- int req_fake_limit;
- int req_limit;
- int q_num;
- int enc_q_num;
- atomic_t enc_qid;
- atomic_t dec_qid;
- struct hisi_sec_cipher_ctx c_ctx;
- int fusion_tmout_nsec;
- int fusion_limit;
- u64 enc_fusion_num;
- u64 dec_fusion_num;
- bool is_fusion;
-};
+/* Get an en/de-cipher queue cyclically to balance load over queues of TFM */
+static inline int sec_alloc_queue_id(struct sec_ctx *ctx, struct sec_req *req)
+{
+ if (req->c_req.encrypt)
+ return atomic_inc_return(&ctx->enc_qcyclic) % ctx->hlf_q_num;
-#define DES_WEAK_KEY_NUM 4
-u64 des_weak_key[DES_WEAK_KEY_NUM] = {0x0101010101010101, 0xFEFEFEFEFEFEFEFE,
- 0xE0E0E0E0F1F1F1F1, 0x1F1F1F1F0E0E0E0E};
+ return atomic_inc_return(&ctx->dec_qcyclic) % ctx->hlf_q_num +
+ ctx->hlf_q_num;
+}
-static void hisi_sec_req_cb(struct hisi_qp *qp, void *);
+static inline void sec_free_queue_id(struct sec_ctx *ctx, struct sec_req *req)
+{
+ if (req->c_req.encrypt)
+ atomic_dec(&ctx->enc_qcyclic);
+ else
+ atomic_dec(&ctx->dec_qcyclic);
+}
-static int hisi_sec_alloc_req_id(struct hisi_sec_req *req,
- struct hisi_sec_qp_ctx *qp_ctx)
+static int sec_alloc_req_id(struct sec_req *req, struct sec_qp_ctx *qp_ctx)
{
- struct hisi_sec_ctx *ctx = req->ctx;
int req_id;
- req_id = find_first_zero_bit(qp_ctx->req_bitmap, ctx->req_limit);
- if (req_id >= ctx->req_limit || req_id < 0) {
- dev_err(ctx->dev, "no free req id\n");
- return -ENOBUFS;
+ mutex_lock(&qp_ctx->req_lock);
+
+ req_id = idr_alloc_cyclic(&qp_ctx->req_idr, NULL,
+ 0, QM_Q_DEPTH, GFP_ATOMIC);
+ mutex_unlock(&qp_ctx->req_lock);
+ if (unlikely(req_id < 0)) {
+ dev_err(SEC_CTX_DEV(req->ctx), "alloc req id fail!\n");
+ return req_id;
}
- set_bit(req_id, qp_ctx->req_bitmap);
- qp_ctx->req_list[req_id] = req;
- req->req_id = req_id;
req->qp_ctx = qp_ctx;
-
- return 0;
+ qp_ctx->req_list[req_id] = req;
+ return req_id;
}
-static void hisi_sec_free_req_id(struct hisi_sec_qp_ctx *qp_ctx, int req_id)
+static void sec_free_req_id(struct sec_req *req)
{
- if (req_id < 0 || req_id >= qp_ctx->ctx->req_limit) {
- pr_err("invalid req_id[%d]\n", req_id);
+ struct sec_qp_ctx *qp_ctx = req->qp_ctx;
+ int req_id = req->req_id;
+
+ if (unlikely(req_id < 0 || req_id >= QM_Q_DEPTH)) {
+ dev_err(SEC_CTX_DEV(req->ctx), "free request id invalid!\n");
return;
}
qp_ctx->req_list[req_id] = NULL;
+ req->qp_ctx = NULL;
mutex_lock(&qp_ctx->req_lock);
- clear_bit(req_id, qp_ctx->req_bitmap);
- atomic_dec(&qp_ctx->req_cnt);
+ idr_remove(&qp_ctx->req_idr, req_id);
mutex_unlock(&qp_ctx->req_lock);
}
-static int sec_request_transfer(struct hisi_sec_ctx *, struct hisi_sec_req *);
-static int sec_request_send(struct hisi_sec_ctx *, struct hisi_sec_req *);
-
-void qp_ctx_work_process(struct hisi_sec_qp_ctx *qp_ctx)
+static void sec_req_cb(struct hisi_qp *qp, void *resp)
{
- struct hisi_sec_req *req;
- struct hisi_sec_ctx *ctx;
- ktime_t cur_time = ktime_get();
- int ret;
-
- mutex_lock(&qp_ctx->req_lock);
-
- req = qp_ctx->fusion_req;
- if (req == NULL) {
- mutex_unlock(&qp_ctx->req_lock);
+ struct sec_qp_ctx *qp_ctx = qp->qp_ctx;
+ struct sec_sqe *bd = resp;
+ struct sec_ctx *ctx;
+ struct sec_req *req;
+ u16 done, flag;
+ int err = 0;
+ u8 type;
+
+ type = bd->type_cipher_auth & SEC_TYPE_MASK;
+ if (unlikely(type != SEC_BD_TYPE2)) {
+ pr_err("err bd type [%d]\n", type);
return;
}
- ctx = req->ctx;
- if (ctx == NULL || req->fusion_num == qp_ctx->fusion_limit) {
- mutex_unlock(&qp_ctx->req_lock);
+ req = qp_ctx->req_list[le16_to_cpu(bd->type2.tag)];
+ if (unlikely(!req)) {
+ atomic_inc(&qp->qp_status.used);
return;
}
- if (cur_time - qp_ctx->fusion_req->st_time < ctx->fusion_tmout_nsec) {
- mutex_unlock(&qp_ctx->req_lock);
- return;
+ req->err_type = bd->type2.error_type;
+ ctx = req->ctx;
+ done = le16_to_cpu(bd->type2.done_flag) & SEC_DONE_MASK;
+ flag = (le16_to_cpu(bd->type2.done_flag) &
+ SEC_FLAG_MASK) >> SEC_FLAG_OFFSET;
+ if (unlikely(req->err_type || done != SEC_SQE_DONE ||
+ (ctx->alg_type == SEC_SKCIPHER && flag != SEC_SQE_CFLAG))) {
+ dev_err_ratelimited(SEC_CTX_DEV(ctx),
+ "err_type[%d],done[%d],flag[%d]\n",
+ req->err_type, done, flag);
+ err = -EIO;
}
- qp_ctx->fusion_req = NULL;
+ atomic64_inc(&ctx->sec->debug.dfx.recv_cnt);
+
+ ctx->req_op->buf_unmap(ctx, req);
+
+ ctx->req_op->callback(ctx, req, err);
+}
+
+static int sec_bd_send(struct sec_ctx *ctx, struct sec_req *req)
+{
+ struct sec_qp_ctx *qp_ctx = req->qp_ctx;
+ int ret;
+ mutex_lock(&qp_ctx->req_lock);
+ ret = hisi_qp_send(qp_ctx->qp, &req->sec_sqe);
mutex_unlock(&qp_ctx->req_lock);
+ atomic64_inc(&ctx->sec->debug.dfx.send_cnt);
- ret = sec_request_transfer(ctx, req);
- if (ret)
- goto err_free_req;
-
- ret = sec_request_send(ctx, req);
- __sync_add_and_fetch(&ctx->sec->sec_dfx.send_by_tmout, 1);
- if (ret != -EBUSY && ret != -EINPROGRESS) {
- dev_err(ctx->dev, "[%s][%d] ret[%d]\n", __func__,
- __LINE__, ret);
- goto err_unmap_req;
- }
+ if (unlikely(ret == -EBUSY))
+ return -ENOBUFS;
- return;
+ if (!ret) {
+ if (req->fake_busy)
+ ret = -EBUSY;
+ else
+ ret = -EINPROGRESS;
+ }
-err_unmap_req:
- ctx->req_op->buf_unmap(ctx, req);
-err_free_req:
- hisi_sec_free_req_id(qp_ctx, req->req_id);
- atomic_dec(&ctx->thread_cnt);
+ return ret;
}
-void ctx_work_process(struct work_struct *work)
+/* Get DMA memory resources */
+static int sec_alloc_civ_resource(struct device *dev, struct sec_alg_res *res)
{
- struct hisi_sec_ctx *ctx;
int i;
- ctx = container_of(work, struct hisi_sec_ctx, work);
- for (i = 0; i < ctx->q_num; i++)
- qp_ctx_work_process(&ctx->qp_ctx[i]);
+ res->c_ivin = dma_alloc_coherent(dev, SEC_TOTAL_IV_SZ,
+ &res->c_ivin_dma, GFP_KERNEL);
+ if (!res->c_ivin)
+ return -ENOMEM;
+
+ for (i = 1; i < QM_Q_DEPTH; i++) {
+ res[i].c_ivin_dma = res->c_ivin_dma + i * SEC_IV_SIZE;
+ res[i].c_ivin = res->c_ivin + i * SEC_IV_SIZE;
+ }
+
+ return 0;
}
-static enum hrtimer_restart hrtimer_handler(struct hrtimer *timer)
+static void sec_free_civ_resource(struct device *dev, struct sec_alg_res *res)
{
- struct hisi_sec_ctx *ctx;
- ktime_t tim;
+ if (res->c_ivin)
+ dma_free_coherent(dev, SEC_TOTAL_IV_SZ,
+ res->c_ivin, res->c_ivin_dma);
+}
- ctx = container_of(timer, struct hisi_sec_ctx, timer);
- tim = ktime_set(0, ctx->fusion_tmout_nsec);
+static void sec_free_pbuf_resource(struct device *dev, struct sec_alg_res *res)
+{
+ if (res->pbuf)
+ dma_free_coherent(dev, SEC_TOTAL_PBUF_SZ,
+ res->pbuf, res->pbuf_dma);
+}
- if (ctx->sec->qm.wq)
- queue_work(ctx->sec->qm.wq, &ctx->work);
- else
- schedule_work(&ctx->work);
+/*
+ * To improve performance, pbuffer is used for
+ * small packets (< 576Bytes) as IOMMU translation using.
+ */
+static int sec_alloc_pbuf_resource(struct device *dev, struct sec_alg_res *res)
+{
+ int pbuf_page_offset;
+ int i, j, k;
- hrtimer_forward(timer, timer->base->get_time(), tim);
+ res->pbuf = dma_alloc_coherent(dev, SEC_TOTAL_PBUF_SZ, &res->pbuf_dma,
+ GFP_KERNEL);
+ if (!res->pbuf)
+ return -ENOMEM;
- return HRTIMER_RESTART;
+ /*
+ * SEC_PBUF_PKG contains data pbuf, iv and
+ * out_mac : <SEC_PBUF|SEC_IV|SEC_MAC>
+ * Every PAGE contains six SEC_PBUF_PKG
+ * The sec_qp_ctx contains QM_Q_DEPTH numbers of SEC_PBUF_PKG
+ * So we need SEC_PBUF_PAGE_NUM numbers of PAGE
+ * for the SEC_TOTAL_PBUF_SZ
+ */
+ for (i = 0; i <= SEC_PBUF_PAGE_NUM; i++) {
+ pbuf_page_offset = PAGE_SIZE * i;
+ for (j = 0; j < SEC_PBUF_NUM; j++) {
+ k = i * SEC_PBUF_NUM + j;
+ if (k == QM_Q_DEPTH)
+ break;
+ res[k].pbuf = res->pbuf +
+ j * SEC_PBUF_PKG + pbuf_page_offset;
+ res[k].pbuf_dma = res->pbuf_dma +
+ j * SEC_PBUF_PKG + pbuf_page_offset;
+ }
+ }
+ return 0;
}
-static int hisi_sec_create_qp_ctx(struct hisi_sec_ctx *ctx,
- int qp_ctx_id, int req_type)
+static int sec_alg_resource_alloc(struct sec_ctx *ctx,
+ struct sec_qp_ctx *qp_ctx)
{
- struct hisi_sec_qp_ctx *qp_ctx;
- struct device *dev = ctx->dev;
- struct hisi_qp *qp;
+ struct device *dev = SEC_CTX_DEV(ctx);
+ struct sec_alg_res *res = qp_ctx->res;
int ret;
+ ret = sec_alloc_civ_resource(dev, res);
+ if (ret)
+ return ret;
+
+ if (ctx->pbuf_supported) {
+ ret = sec_alloc_pbuf_resource(dev, res);
+ if (ret) {
+ dev_err(dev, "fail to alloc pbuf dma resource!\n");
+ goto alloc_fail;
+ }
+ }
+ return 0;
+alloc_fail:
+ sec_free_civ_resource(dev, res);
+
+ return ret;
+}
+
+static void sec_alg_resource_free(struct sec_ctx *ctx,
+ struct sec_qp_ctx *qp_ctx)
+{
+ struct device *dev = SEC_CTX_DEV(ctx);
+
+ sec_free_civ_resource(dev, qp_ctx->res);
+
+ if (ctx->pbuf_supported)
+ sec_free_pbuf_resource(dev, qp_ctx->res);
+}
+
+static int sec_create_qp_ctx(struct sec_ctx *ctx, int qp_ctx_id, int alg_type)
+{
+ struct device *dev = SEC_CTX_DEV(ctx);
+ struct sec_qp_ctx *qp_ctx;
+ struct hisi_qp *qp;
+ int ret = -ENOMEM;
+
qp_ctx = &ctx->qp_ctx[qp_ctx_id];
qp = ctx->qps[qp_ctx_id];
- qp->req_type = req_type;
+ qp->req_type = 0;
qp->qp_ctx = qp_ctx;
-#ifdef SEC_ASYNC
- qp->req_cb = hisi_sec_req_cb;
-#endif
+ qp->req_cb = sec_req_cb;
qp_ctx->qp = qp;
- qp_ctx->fusion_num = 0;
- qp_ctx->fusion_req = NULL;
- qp_ctx->fusion_limit = ctx->fusion_limit;
qp_ctx->ctx = ctx;
mutex_init(&qp_ctx->req_lock);
- atomic_set(&qp_ctx->req_cnt, 0);
-
- qp_ctx->req_bitmap = kcalloc(BITS_TO_LONGS(QM_Q_DEPTH), sizeof(long),
- GFP_ATOMIC);
- if (!qp_ctx->req_bitmap)
- return -ENOMEM;
-
- qp_ctx->req_list = kcalloc(QM_Q_DEPTH, sizeof(void *), GFP_ATOMIC);
- if (!qp_ctx->req_list) {
- ret = -ENOMEM;
- goto err_free_req_bitmap;
- }
-
- qp_ctx->sqe_list = kcalloc(ctx->fusion_limit,
- sizeof(struct hisi_sec_sqe), GFP_KERNEL);
- if (!qp_ctx->sqe_list) {
- ret = -ENOMEM;
- goto err_free_req_list;
- }
+ atomic_set(&qp_ctx->pending_reqs, 0);
+ idr_init(&qp_ctx->req_idr);
qp_ctx->c_in_pool = hisi_acc_create_sgl_pool(dev, QM_Q_DEPTH,
- FUSION_LIMIT_MAX);
+ SEC_SGL_SGE_NR);
if (IS_ERR(qp_ctx->c_in_pool)) {
- ret = PTR_ERR(qp_ctx->c_in_pool);
- goto err_free_sqe_list;
+ dev_err(dev, "fail to create sgl pool for input!\n");
+ goto err_destroy_idr;
}
qp_ctx->c_out_pool = hisi_acc_create_sgl_pool(dev, QM_Q_DEPTH,
- FUSION_LIMIT_MAX);
+ SEC_SGL_SGE_NR);
if (IS_ERR(qp_ctx->c_out_pool)) {
- ret = PTR_ERR(qp_ctx->c_out_pool);
+ dev_err(dev, "fail to create sgl pool for output!\n");
goto err_free_c_in_pool;
}
- ret = ctx->req_op->queue_alloc(ctx, qp_ctx);
+ ret = sec_alg_resource_alloc(ctx, qp_ctx);
if (ret)
goto err_free_c_out_pool;
@@ -389,304 +334,153 @@ static int hisi_sec_create_qp_ctx(struct hisi_sec_ctx *ctx,
return 0;
err_queue_free:
- ctx->req_op->queue_free(ctx, qp_ctx);
+ sec_alg_resource_free(ctx, qp_ctx);
err_free_c_out_pool:
hisi_acc_free_sgl_pool(dev, qp_ctx->c_out_pool);
err_free_c_in_pool:
hisi_acc_free_sgl_pool(dev, qp_ctx->c_in_pool);
-err_free_sqe_list:
- kfree(qp_ctx->sqe_list);
-err_free_req_list:
- kfree(qp_ctx->req_list);
-err_free_req_bitmap:
- kfree(qp_ctx->req_bitmap);
+err_destroy_idr:
+ idr_destroy(&qp_ctx->req_idr);
return ret;
}
-static void hisi_sec_release_qp_ctx(struct hisi_sec_ctx *ctx,
- struct hisi_sec_qp_ctx *qp_ctx)
+static void sec_release_qp_ctx(struct sec_ctx *ctx,
+ struct sec_qp_ctx *qp_ctx)
{
- struct device *dev = ctx->dev;
+ struct device *dev = SEC_CTX_DEV(ctx);
hisi_qm_stop_qp(qp_ctx->qp);
- ctx->req_op->queue_free(ctx, qp_ctx);
+ sec_alg_resource_free(ctx, qp_ctx);
+
hisi_acc_free_sgl_pool(dev, qp_ctx->c_out_pool);
hisi_acc_free_sgl_pool(dev, qp_ctx->c_in_pool);
- kfree(qp_ctx->req_bitmap);
- kfree(qp_ctx->req_list);
- kfree(qp_ctx->sqe_list);
-}
-
-static int __hisi_sec_ctx_init(struct hisi_sec_ctx *ctx, int qlen)
-{
- if (!ctx || qlen < 0)
- return -EINVAL;
-
- ctx->req_limit = qlen;
- ctx->req_fake_limit = qlen / 2;
- atomic_set(&ctx->thread_cnt, 0);
- atomic_set(&ctx->enc_qid, 0);
- atomic_set(&ctx->dec_qid, ctx->enc_q_num);
- if (ctx->fusion_limit > 1 && ctx->fusion_tmout_nsec > 0) {
- ktime_t tim = ktime_set(0, ctx->fusion_tmout_nsec);
-
- hrtimer_init(&ctx->timer, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
- ctx->timer.function = hrtimer_handler;
- hrtimer_start(&ctx->timer, tim, HRTIMER_MODE_REL);
- INIT_WORK(&ctx->work, ctx_work_process);
- }
-
- return 0;
-}
-
-static void hisi_sec_get_fusion_param(struct hisi_sec_ctx *ctx,
- struct hisi_sec *sec)
-{
- if (ctx->is_fusion) {
- ctx->fusion_tmout_nsec = sec->fusion_tmout_nsec;
- ctx->fusion_limit = sec->fusion_limit;
- } else {
- ctx->fusion_tmout_nsec = 0;
- ctx->fusion_limit = 1;
- }
+ idr_destroy(&qp_ctx->req_idr);
}
-static int hisi_sec_cipher_ctx_init(struct crypto_skcipher *tfm)
+static int sec_ctx_base_init(struct sec_ctx *ctx)
{
- struct hisi_sec_ctx *ctx = crypto_skcipher_ctx(tfm);
- struct hisi_sec_cipher_ctx *c_ctx;
- struct hisi_sec *sec;
+ struct sec_dev *sec;
int i, ret;
- crypto_skcipher_set_reqsize(tfm, sizeof(struct hisi_sec_req));
-
ctx->qps = sec_create_qps();
if (!ctx->qps) {
pr_err("Can not create sec qps!\n");
return -ENODEV;
}
- sec = container_of(ctx->qps[0]->qm, struct hisi_sec, qm);
+ sec = container_of(ctx->qps[0]->qm, struct sec_dev, qm);
ctx->sec = sec;
+ ctx->hlf_q_num = sec->ctx_q_num >> 1;
- ctx->dev = &sec->qm.pdev->dev;
-
- ctx->q_num = sec->ctx_q_num;
-
- ctx->enc_q_num = ctx->q_num / 2;
- ctx->qp_ctx = kcalloc(ctx->q_num, sizeof(struct hisi_sec_qp_ctx),
- GFP_KERNEL);
- if (!ctx->qp_ctx) {
- dev_err(ctx->dev, "failed to alloc qp_ctx");
+ if (ctx->sec->iommu_used)
+ ctx->pbuf_supported = true;
+ else
+ ctx->pbuf_supported = false;
+ ctx->use_pbuf = false;
+
+ /* Half of queue depth is taken as fake requests limit in the queue. */
+ ctx->fake_req_limit = QM_Q_DEPTH >> 1;
+ ctx->qp_ctx = kcalloc(sec->ctx_q_num, sizeof(struct sec_qp_ctx),
+ GFP_KERNEL);
+ if (!ctx->qp_ctx)
return -ENOMEM;
- }
-
- hisi_sec_get_fusion_param(ctx, sec);
- for (i = 0; i < ctx->q_num; i++) {
- ret = hisi_sec_create_qp_ctx(ctx, i, 0);
+ for (i = 0; i < sec->ctx_q_num; i++) {
+ ret = sec_create_qp_ctx(ctx, i, 0);
if (ret)
goto err_sec_release_qp_ctx;
}
-
- c_ctx = &ctx->c_ctx;
- c_ctx->c_key = dma_alloc_coherent(ctx->dev,
- SEC_MAX_KEY_SIZE, &c_ctx->c_key_dma, GFP_KERNEL);
-
- if (!ctx->c_ctx.c_key) {
- ret = -ENOMEM;
- goto err_sec_release_qp_ctx;
- }
-
- return __hisi_sec_ctx_init(ctx, QM_Q_DEPTH);
-
+ return 0;
err_sec_release_qp_ctx:
for (i = i - 1; i >= 0; i--)
- hisi_sec_release_qp_ctx(ctx, &ctx->qp_ctx[i]);
+ sec_release_qp_ctx(ctx, &ctx->qp_ctx[i]);
sec_destroy_qps(ctx->qps, sec->ctx_q_num);
kfree(ctx->qp_ctx);
+
return ret;
}
-static void hisi_sec_cipher_ctx_exit(struct crypto_skcipher *tfm)
+static void sec_ctx_base_uninit(struct sec_ctx *ctx)
{
- struct hisi_sec_ctx *ctx = crypto_skcipher_ctx(tfm);
- struct hisi_sec_cipher_ctx *c_ctx;
- int i = 0;
-
- c_ctx = &ctx->c_ctx;
-
- if (ctx->fusion_limit > 1 && ctx->fusion_tmout_nsec > 0)
- hrtimer_cancel(&ctx->timer);
-
- if (c_ctx->c_key) {
- memzero_explicit(c_ctx->c_key, SEC_MAX_KEY_SIZE);
- dma_free_coherent(ctx->dev, SEC_MAX_KEY_SIZE, c_ctx->c_key,
- c_ctx->c_key_dma);
- c_ctx->c_key = NULL;
- }
+ int i;
- for (i = 0; i < ctx->q_num; i++)
- hisi_sec_release_qp_ctx(ctx, &ctx->qp_ctx[i]);
+ for (i = 0; i < ctx->sec->ctx_q_num; i++)
+ sec_release_qp_ctx(ctx, &ctx->qp_ctx[i]);
- sec_destroy_qps(ctx->qps, ctx->q_num);
+ sec_destroy_qps(ctx->qps, ctx->sec->ctx_q_num);
kfree(ctx->qp_ctx);
}
-static int hisi_sec_skcipher_get_res(struct hisi_sec_ctx *ctx,
- struct hisi_sec_req *req);
-static int hisi_sec_skcipher_queue_alloc(struct hisi_sec_ctx *ctx,
- struct hisi_sec_qp_ctx *qp_ctx);
-static int hisi_sec_skcipher_queue_free(struct hisi_sec_ctx *ctx,
- struct hisi_sec_qp_ctx *qp_ctx);
-static int hisi_sec_skcipher_buf_map(struct hisi_sec_ctx *ctx,
- struct hisi_sec_req *req);
-static int hisi_sec_skcipher_buf_unmap(struct hisi_sec_ctx *ctx,
- struct hisi_sec_req *req);
-static int hisi_sec_skcipher_copy_iv(struct hisi_sec_ctx *ctx,
- struct hisi_sec_req *req);
-static int hisi_sec_skcipher_bd_fill_base(struct hisi_sec_ctx *ctx,
- struct hisi_sec_req *req);
-static int hisi_sec_skcipher_bd_fill_storage(struct hisi_sec_ctx *ctx,
- struct hisi_sec_req *req);
-static int hisi_sec_skcipher_bd_fill_multi_iv(struct hisi_sec_ctx *ctx,
- struct hisi_sec_req *req);
-static int hisi_sec_bd_send_asyn(struct hisi_sec_ctx *ctx,
- struct hisi_sec_req *req);
-static int hisi_sec_skcipher_callback(struct hisi_sec_ctx *ctx,
- struct hisi_sec_req *req);
-
-struct hisi_sec_req_op sec_req_ops_tbl[] = {
- {
- .fusion_type = SEC_NO_FUSION,
- .get_res = hisi_sec_skcipher_get_res,
- .queue_alloc = hisi_sec_skcipher_queue_alloc,
- .queue_free = hisi_sec_skcipher_queue_free,
- .buf_map = hisi_sec_skcipher_buf_map,
- .buf_unmap = hisi_sec_skcipher_buf_unmap,
- .do_transfer = hisi_sec_skcipher_copy_iv,
- .bd_fill = hisi_sec_skcipher_bd_fill_base,
- .bd_send = hisi_sec_bd_send_asyn,
- .callback = hisi_sec_skcipher_callback,
- }, {
- .fusion_type = SEC_IV_FUSION,
- .get_res = hisi_sec_skcipher_get_res,
- .queue_alloc = hisi_sec_skcipher_queue_alloc,
- .queue_free = hisi_sec_skcipher_queue_free,
- .buf_map = hisi_sec_skcipher_buf_map,
- .buf_unmap = hisi_sec_skcipher_buf_unmap,
- .do_transfer = hisi_sec_skcipher_copy_iv,
- .bd_fill = hisi_sec_skcipher_bd_fill_multi_iv,
- .bd_send = hisi_sec_bd_send_asyn,
- .callback = hisi_sec_skcipher_callback,
- }
-};
-
-static int hisi_sec_cipher_ctx_init_alg(struct crypto_skcipher *tfm)
+static int sec_cipher_init(struct sec_ctx *ctx)
{
- struct hisi_sec_ctx *ctx = crypto_skcipher_ctx(tfm);
+ struct sec_cipher_ctx *c_ctx = &ctx->c_ctx;
- ctx->req_op = &sec_req_ops_tbl[SEC_OPS_SKCIPHER_ALG];
- ctx->is_fusion = ctx->req_op->fusion_type;
+ c_ctx->c_key = dma_alloc_coherent(SEC_CTX_DEV(ctx), SEC_MAX_KEY_SIZE,
+ &c_ctx->c_key_dma, GFP_KERNEL);
+ if (!c_ctx->c_key)
+ return -ENOMEM;
- return hisi_sec_cipher_ctx_init(tfm);
+ return 0;
}
-static int hisi_sec_cipher_ctx_init_multi_iv(struct crypto_skcipher *tfm)
+static void sec_cipher_uninit(struct sec_ctx *ctx)
{
- struct hisi_sec_ctx *ctx = crypto_skcipher_ctx(tfm);
-
- ctx->req_op = &sec_req_ops_tbl[SEC_OPS_MULTI_IV];
- ctx->is_fusion = ctx->req_op->fusion_type;
+ struct sec_cipher_ctx *c_ctx = &ctx->c_ctx;
- return hisi_sec_cipher_ctx_init(tfm);
+ memzero_explicit(c_ctx->c_key, SEC_MAX_KEY_SIZE);
+ dma_free_coherent(SEC_CTX_DEV(ctx), SEC_MAX_KEY_SIZE,
+ c_ctx->c_key, c_ctx->c_key_dma);
}
-static void hisi_sec_req_cb(struct hisi_qp *qp, void *resp)
+static int sec_skcipher_init(struct crypto_skcipher *tfm)
{
- struct hisi_sec_sqe *sec_sqe = (struct hisi_sec_sqe *)resp;
- struct hisi_sec_qp_ctx *qp_ctx = qp->qp_ctx;
- struct device *dev = &qp->qm->pdev->dev;
- struct hisi_sec_req *req;
- struct hisi_sec_dfx *dfx;
- u32 req_id;
-
- if (sec_sqe->type == 1) {
- req_id = sec_sqe->type1.tag;
- req = qp_ctx->req_list[req_id];
-
- req->err_type = sec_sqe->type1.error_type;
- if (req->err_type || sec_sqe->type1.done != 0x1 ||
- sec_sqe->type1.flag != 0x2) {
- dev_err_ratelimited(dev,
- "err_type[%d] done[%d] flag[%d]\n",
- req->err_type,
- sec_sqe->type1.done,
- sec_sqe->type1.flag);
- }
- } else if (sec_sqe->type == 2) {
- req_id = sec_sqe->type2.tag;
- req = qp_ctx->req_list[req_id];
-
- req->err_type = sec_sqe->type2.error_type;
- if (req->err_type || sec_sqe->type2.done != 0x1 ||
- sec_sqe->type2.flag != 0x2) {
- dev_err_ratelimited(dev,
- "err_type[%d] done[%d] flag[%d]\n",
- req->err_type,
- sec_sqe->type2.done,
- sec_sqe->type2.flag);
- }
- } else {
- dev_err_ratelimited(dev, "err bd type [%d]\n", sec_sqe->type);
- return;
- }
-
- dfx = &req->ctx->sec->sec_dfx;
-
- req->ctx->req_op->buf_unmap(req->ctx, req);
- req->ctx->req_op->callback(req->ctx, req);
+ struct sec_ctx *ctx = crypto_skcipher_ctx(tfm);
+ int ret;
- __sync_add_and_fetch(&dfx->recv_cnt, 1);
-}
+ ctx->alg_type = SEC_SKCIPHER;
+ crypto_skcipher_set_reqsize(tfm, sizeof(struct sec_req));
+ ctx->c_ctx.ivsize = crypto_skcipher_ivsize(tfm);
+ if (ctx->c_ctx.ivsize > SEC_IV_SIZE) {
+ dev_err(SEC_CTX_DEV(ctx), "get error skcipher iv size!\n");
+ return -EINVAL;
+ }
-static int sec_des_weak_key(const u64 *key, const u32 keylen)
-{
- int i;
+ ret = sec_ctx_base_init(ctx);
+ if (ret)
+ return ret;
- for (i = 0; i < DES_WEAK_KEY_NUM; i++)
- if (*key == des_weak_key[i])
- return 1;
+ ret = sec_cipher_init(ctx);
+ if (ret)
+ goto err_cipher_init;
return 0;
+err_cipher_init:
+ sec_ctx_base_uninit(ctx);
+
+ return ret;
}
-static int sec_skcipher_des_setkey(struct hisi_sec_cipher_ctx *c_ctx,
- const u32 keylen, const u8 *key)
+static void sec_skcipher_uninit(struct crypto_skcipher *tfm)
{
- if (keylen != DES_KEY_SIZE)
- return -EINVAL;
-
- if (sec_des_weak_key((const u64 *)key, keylen))
- return -EKEYREJECTED;
-
- c_ctx->c_key_len = CKEY_LEN_DES;
+ struct sec_ctx *ctx = crypto_skcipher_ctx(tfm);
- return 0;
+ sec_cipher_uninit(ctx);
+ sec_ctx_base_uninit(ctx);
}
-static int sec_skcipher_3des_setkey(struct hisi_sec_cipher_ctx *c_ctx,
- const u32 keylen, const enum C_MODE c_mode)
+static int sec_skcipher_3des_setkey(struct sec_cipher_ctx *c_ctx,
+ const u32 keylen,
+ const enum sec_cmode c_mode)
{
switch (keylen) {
case SEC_DES3_2KEY_SIZE:
- c_ctx->c_key_len = CKEY_LEN_3DES_2KEY;
+ c_ctx->c_key_len = SEC_CKEY_3DES_2KEY;
break;
case SEC_DES3_3KEY_SIZE:
- c_ctx->c_key_len = CKEY_LEN_3DES_3KEY;
+ c_ctx->c_key_len = SEC_CKEY_3DES_3KEY;
break;
default:
return -EINVAL;
@@ -695,32 +489,35 @@ static int sec_skcipher_3des_setkey(struct hisi_sec_cipher_ctx *c_ctx,
return 0;
}
-static int sec_skcipher_aes_sm4_setkey(struct hisi_sec_cipher_ctx *c_ctx,
- const u32 keylen, const enum C_MODE c_mode)
+static int sec_skcipher_aes_sm4_setkey(struct sec_cipher_ctx *c_ctx,
+ const u32 keylen,
+ const enum sec_cmode c_mode)
{
- if (c_mode == C_MODE_XTS) {
+ if (c_mode == SEC_CMODE_XTS) {
switch (keylen) {
case SEC_XTS_MIN_KEY_SIZE:
- c_ctx->c_key_len = CKEY_LEN_128_BIT;
+ c_ctx->c_key_len = SEC_CKEY_128BIT;
break;
case SEC_XTS_MAX_KEY_SIZE:
- c_ctx->c_key_len = CKEY_LEN_256_BIT;
+ c_ctx->c_key_len = SEC_CKEY_256BIT;
break;
default:
+ pr_err("hisi_sec2: xts mode key error!\n");
return -EINVAL;
}
} else {
switch (keylen) {
case AES_KEYSIZE_128:
- c_ctx->c_key_len = CKEY_LEN_128_BIT;
+ c_ctx->c_key_len = SEC_CKEY_128BIT;
break;
case AES_KEYSIZE_192:
- c_ctx->c_key_len = CKEY_LEN_192_BIT;
+ c_ctx->c_key_len = SEC_CKEY_192BIT;
break;
case AES_KEYSIZE_256:
- c_ctx->c_key_len = CKEY_LEN_256_BIT;
+ c_ctx->c_key_len = SEC_CKEY_256BIT;
break;
default:
+ pr_err("hisi_sec2: aes key error!\n");
return -EINVAL;
}
}
@@ -729,38 +526,40 @@ static int sec_skcipher_aes_sm4_setkey(struct hisi_sec_cipher_ctx *c_ctx,
}
static int sec_skcipher_setkey(struct crypto_skcipher *tfm, const u8 *key,
- const u32 keylen, const enum C_ALG c_alg, const enum C_MODE c_mode)
+ const u32 keylen, const enum sec_calg c_alg,
+ const enum sec_cmode c_mode)
{
- struct hisi_sec_ctx *ctx = crypto_skcipher_ctx(tfm);
- struct hisi_sec_cipher_ctx *c_ctx = &ctx->c_ctx;
+ struct sec_ctx *ctx = crypto_skcipher_ctx(tfm);
+ struct sec_cipher_ctx *c_ctx = &ctx->c_ctx;
int ret;
- if (c_mode == C_MODE_XTS) {
+ if (c_mode == SEC_CMODE_XTS) {
ret = xts_verify_key(tfm, key, keylen);
- if (ret)
+ if (ret) {
+ dev_err(SEC_CTX_DEV(ctx), "xts mode key err!\n");
return ret;
+ }
}
c_ctx->c_alg = c_alg;
c_ctx->c_mode = c_mode;
switch (c_alg) {
- case C_ALG_DES:
- ret = sec_skcipher_des_setkey(c_ctx, keylen, key);
- break;
- case C_ALG_3DES:
+ case SEC_CALG_3DES:
ret = sec_skcipher_3des_setkey(c_ctx, keylen, c_mode);
break;
- case C_ALG_AES:
- case C_ALG_SM4:
+ case SEC_CALG_AES:
+ case SEC_CALG_SM4:
ret = sec_skcipher_aes_sm4_setkey(c_ctx, keylen, c_mode);
break;
default:
return -EINVAL;
}
- if (ret)
+ if (ret) {
+ dev_err(SEC_CTX_DEV(ctx), "set sec key err!\n");
return ret;
+ }
memcpy(c_ctx->c_key, key, keylen);
@@ -769,639 +568,423 @@ static int sec_skcipher_setkey(struct crypto_skcipher *tfm, const u8 *key,
#define GEN_SEC_SETKEY_FUNC(name, c_alg, c_mode) \
static int sec_setkey_##name(struct crypto_skcipher *tfm, const u8 *key,\
- u32 keylen)\
+ u32 keylen) \
{ \
return sec_skcipher_setkey(tfm, key, keylen, c_alg, c_mode); \
}
-GEN_SEC_SETKEY_FUNC(aes_ecb, C_ALG_AES, C_MODE_ECB)
-GEN_SEC_SETKEY_FUNC(aes_cbc, C_ALG_AES, C_MODE_CBC)
-GEN_SEC_SETKEY_FUNC(sm4_cbc, C_ALG_SM4, C_MODE_CBC)
-
-GEN_SEC_SETKEY_FUNC(des_ecb, C_ALG_DES, C_MODE_ECB)
-GEN_SEC_SETKEY_FUNC(des_cbc, C_ALG_DES, C_MODE_CBC)
-GEN_SEC_SETKEY_FUNC(3des_ecb, C_ALG_3DES, C_MODE_ECB)
-GEN_SEC_SETKEY_FUNC(3des_cbc, C_ALG_3DES, C_MODE_CBC)
-
-GEN_SEC_SETKEY_FUNC(aes_xts, C_ALG_AES, C_MODE_XTS)
-GEN_SEC_SETKEY_FUNC(sm4_xts, C_ALG_SM4, C_MODE_XTS)
+GEN_SEC_SETKEY_FUNC(aes_ecb, SEC_CALG_AES, SEC_CMODE_ECB)
+GEN_SEC_SETKEY_FUNC(aes_cbc, SEC_CALG_AES, SEC_CMODE_CBC)
+GEN_SEC_SETKEY_FUNC(aes_xts, SEC_CALG_AES, SEC_CMODE_XTS)
-static int hisi_sec_get_async_ret(int ret, int req_cnt, int req_fake_limit)
-{
- if (ret == 0) {
- if (req_cnt >= req_fake_limit)
- ret = -EBUSY;
- else
- ret = -EINPROGRESS;
- } else {
- if (ret == -EBUSY)
- ret = -ENOBUFS;
- }
+GEN_SEC_SETKEY_FUNC(3des_ecb, SEC_CALG_3DES, SEC_CMODE_ECB)
+GEN_SEC_SETKEY_FUNC(3des_cbc, SEC_CALG_3DES, SEC_CMODE_CBC)
- return ret;
-}
+GEN_SEC_SETKEY_FUNC(sm4_xts, SEC_CALG_SM4, SEC_CMODE_XTS)
+GEN_SEC_SETKEY_FUNC(sm4_cbc, SEC_CALG_SM4, SEC_CMODE_CBC)
-static int hisi_sec_skcipher_get_res(struct hisi_sec_ctx *ctx,
- struct hisi_sec_req *req)
+static int sec_cipher_pbuf_map(struct sec_ctx *ctx, struct sec_req *req,
+ struct scatterlist *src)
{
- struct hisi_sec_cipher_req *c_req = &req->c_req;
- struct hisi_sec_qp_ctx *qp_ctx = req->qp_ctx;
- struct cipher_res *c_res = (struct cipher_res *)qp_ctx->priv_req_res;
+ struct sec_cipher_req *c_req = &req->c_req;
+ struct sec_qp_ctx *qp_ctx = req->qp_ctx;
+ struct device *dev = SEC_CTX_DEV(ctx);
+ int copy_size, pbuf_length;
int req_id = req->req_id;
- c_req->c_ivin = c_res[req_id].c_ivin;
- c_req->c_ivin_dma = c_res[req_id].c_ivin_dma;
- req->priv = (void **)c_res[req_id].sk_reqs;
- c_req->src = c_res[req_id].src;
- c_req->dst = c_res[req_id].dst;
-
- return 0;
-}
-
-static int hisi_sec_skcipher_queue_alloc(struct hisi_sec_ctx *ctx,
- struct hisi_sec_qp_ctx *qp_ctx)
-{
- struct cipher_res *c_res;
- int req_num = ctx->fusion_limit;
- int alloc_num = QM_Q_DEPTH * ctx->fusion_limit;
- int buf_map_num = QM_Q_DEPTH * ctx->fusion_limit;
- struct device *dev = ctx->dev;
- int i, ret;
-
- c_res = kcalloc(QM_Q_DEPTH, sizeof(struct cipher_res), GFP_KERNEL);
- if (!c_res)
- return -ENOMEM;
-
- qp_ctx->priv_req_res = (void *)c_res;
-
- c_res[0].sk_reqs = kcalloc(alloc_num,
- sizeof(struct skcipher_request_ctx *), GFP_KERNEL);
- if (!c_res[0].sk_reqs) {
- ret = -ENOMEM;
- goto err_free_c_res;
- }
-
- c_res[0].c_ivin = dma_alloc_coherent(dev,
- SEC_IV_SIZE * alloc_num, &c_res[0].c_ivin_dma, GFP_KERNEL);
- if (!c_res[0].c_ivin) {
- ret = -ENOMEM;
- goto err_free_sk_reqs;
- }
+ copy_size = c_req->c_len;
- c_res[0].src = kcalloc(buf_map_num, sizeof(struct scatterlist),
- GFP_KERNEL);
- if (!c_res[0].src) {
- ret = -ENOMEM;
- goto err_free_c_ivin;
+ pbuf_length = sg_copy_to_buffer(src, sg_nents(src),
+ qp_ctx->res[req_id].pbuf, copy_size);
+ if (unlikely(pbuf_length != copy_size)) {
+ dev_err(dev, "copy src data to pbuf error!\n");
+ return -EINVAL;
}
- c_res[0].dst = kcalloc(buf_map_num, sizeof(struct scatterlist),
- GFP_KERNEL);
- if (!c_res[0].dst) {
- ret = -ENOMEM;
- goto err_free_src;
+ c_req->c_in_dma = qp_ctx->res[req_id].pbuf_dma;
+ if (!c_req->c_in_dma) {
+ dev_err(dev, "fail to set pbuffer address!\n");
+ return -ENOMEM;
}
- for (i = 1; i < QM_Q_DEPTH; i++) {
- c_res[i].sk_reqs = c_res[0].sk_reqs + i * req_num;
- c_res[i].c_ivin = c_res[0].c_ivin
- + i * req_num * SEC_IV_SIZE;
- c_res[i].c_ivin_dma = c_res[0].c_ivin_dma
- + i * req_num * SEC_IV_SIZE;
- c_res[i].src = c_res[0].src + i * req_num;
- c_res[i].dst = c_res[0].dst + i * req_num;
- }
+ c_req->c_out_dma = c_req->c_in_dma;
return 0;
-
-err_free_src:
- kfree(c_res[0].src);
-err_free_c_ivin:
- dma_free_coherent(dev, SEC_IV_SIZE * alloc_num, c_res[0].c_ivin,
- c_res[0].c_ivin_dma);
-err_free_sk_reqs:
- kfree(c_res[0].sk_reqs);
-err_free_c_res:
- kfree(c_res);
-
- return ret;
}
-static int hisi_sec_skcipher_queue_free(struct hisi_sec_ctx *ctx,
- struct hisi_sec_qp_ctx *qp_ctx)
+static void sec_cipher_pbuf_unmap(struct sec_ctx *ctx, struct sec_req *req,
+ struct scatterlist *dst)
{
- struct cipher_res *c_res = (struct cipher_res *)qp_ctx->priv_req_res;
- struct device *dev = ctx->dev;
- int alloc_num = QM_Q_DEPTH * ctx->fusion_limit;
+ struct sec_cipher_req *c_req = &req->c_req;
+ struct sec_qp_ctx *qp_ctx = req->qp_ctx;
+ struct device *dev = SEC_CTX_DEV(ctx);
+ int copy_size, pbuf_length;
+ int req_id = req->req_id;
- kfree(c_res[0].dst);
- kfree(c_res[0].src);
- dma_free_coherent(dev, SEC_IV_SIZE * alloc_num, c_res[0].c_ivin,
- c_res[0].c_ivin_dma);
- kfree(c_res[0].sk_reqs);
- kfree(c_res);
+ copy_size = c_req->c_len;
- return 0;
+ pbuf_length = sg_copy_from_buffer(dst, sg_nents(dst),
+ qp_ctx->res[req_id].pbuf, copy_size);
+ if (unlikely(pbuf_length != copy_size))
+ dev_err(dev, "copy pbuf data to dst error!\n");
}
-static int hisi_sec_skcipher_buf_map(struct hisi_sec_ctx *ctx,
- struct hisi_sec_req *req)
+static int sec_cipher_map(struct sec_ctx *ctx, struct sec_req *req,
+ struct scatterlist *src, struct scatterlist *dst)
{
- struct hisi_sec_cipher_req *c_req = &req->c_req;
- struct device *dev = ctx->dev;
- struct skcipher_request *sk_next;
- struct hisi_sec_qp_ctx *qp_ctx = req->qp_ctx;
- int src_nents, src_nents_sum, copyed_src_nents;
- int dst_nents, dst_nents_sum, copyed_dst_nents;
- int i, ret, buf_map_limit;
-
- src_nents_sum = 0;
- dst_nents_sum = 0;
- for (i = 0; i < req->fusion_num; i++) {
- sk_next = (struct skcipher_request *)req->priv[i];
- if (sk_next == NULL) {
- dev_err(ctx->dev, "nullptr at [%d]\n", i);
- return -EFAULT;
- }
- src_nents_sum += sg_nents(sk_next->src);
- dst_nents_sum += sg_nents(sk_next->dst);
- if (sk_next->src == sk_next->dst && i > 0) {
- dev_err(ctx->dev, "err: src == dst\n");
- return -EFAULT;
- }
- }
+ struct sec_cipher_req *c_req = &req->c_req;
+ struct sec_qp_ctx *qp_ctx = req->qp_ctx;
+ struct device *dev = SEC_CTX_DEV(ctx);
- buf_map_limit = FUSION_LIMIT_MAX;
- if (src_nents_sum > buf_map_limit || dst_nents_sum > buf_map_limit) {
- dev_err(ctx->dev, "src[%d] or dst[%d] bigger than %d\n",
- src_nents_sum, dst_nents_sum, buf_map_limit);
- return -ENOBUFS;
- }
-
- copyed_src_nents = 0;
- copyed_dst_nents = 0;
- for (i = 0; i < req->fusion_num; i++) {
- sk_next = (struct skcipher_request *)req->priv[i];
- src_nents = sg_nents(sk_next->src);
- dst_nents = sg_nents(sk_next->dst);
-
- if (i != req->fusion_num - 1) {
- sg_unmark_end(&sk_next->src[src_nents - 1]);
- sg_unmark_end(&sk_next->dst[dst_nents - 1]);
- }
-
- memcpy(c_req->src + copyed_src_nents, sk_next->src,
- src_nents * sizeof(struct scatterlist));
- memcpy(c_req->dst + copyed_dst_nents, sk_next->dst,
- dst_nents * sizeof(struct scatterlist));
+ if (ctx->use_pbuf)
+ return sec_cipher_pbuf_map(ctx, req, src);
- copyed_src_nents += src_nents;
- copyed_dst_nents += dst_nents;
- }
-
- c_req->c_in = hisi_acc_sg_buf_map_to_hw_sgl(dev, c_req->src,
- qp_ctx->c_in_pool, req->req_id, &c_req->c_in_dma);
+ c_req->c_in = hisi_acc_sg_buf_map_to_hw_sgl(dev, src,
+ qp_ctx->c_in_pool,
+ req->req_id,
+ &c_req->c_in_dma);
- if (IS_ERR(c_req->c_in))
+ if (IS_ERR(c_req->c_in)) {
+ dev_err(dev, "fail to dma map input sgl buffers!\n");
return PTR_ERR(c_req->c_in);
+ }
- if (c_req->dst == c_req->src) {
+ if (dst == src) {
c_req->c_out = c_req->c_in;
c_req->c_out_dma = c_req->c_in_dma;
} else {
- c_req->c_out = hisi_acc_sg_buf_map_to_hw_sgl(dev, c_req->dst,
- qp_ctx->c_out_pool, req->req_id, &c_req->c_out_dma);
+ c_req->c_out = hisi_acc_sg_buf_map_to_hw_sgl(dev, dst,
+ qp_ctx->c_out_pool,
+ req->req_id,
+ &c_req->c_out_dma);
+
if (IS_ERR(c_req->c_out)) {
- ret = PTR_ERR(c_req->c_out);
- goto err_unmap_src;
+ dev_err(dev, "fail to dma map output sgl buffers!\n");
+ hisi_acc_sg_buf_unmap(dev, src, c_req->c_in);
+ return PTR_ERR(c_req->c_out);
}
}
return 0;
-
-err_unmap_src:
- hisi_acc_sg_buf_unmap(dev, c_req->src, c_req->c_in);
-
- return ret;
}
-static int hisi_sec_skcipher_buf_unmap(struct hisi_sec_ctx *ctx,
- struct hisi_sec_req *req)
+static void sec_cipher_unmap(struct sec_ctx *ctx, struct sec_req *req,
+ struct scatterlist *src, struct scatterlist *dst)
{
- struct hisi_sec_cipher_req *c_req = &req->c_req;
- struct device *dev = ctx->dev;
-
- if (c_req->dst != c_req->src)
- hisi_acc_sg_buf_unmap(dev, c_req->src, c_req->c_in);
+ struct sec_cipher_req *c_req = &req->c_req;
+ struct device *dev = SEC_CTX_DEV(ctx);
- hisi_acc_sg_buf_unmap(dev, c_req->dst, c_req->c_out);
+ if (ctx->use_pbuf) {
+ sec_cipher_pbuf_unmap(ctx, req, dst);
+ } else {
+ if (dst != src)
+ hisi_acc_sg_buf_unmap(dev, src, c_req->c_in);
- return 0;
+ hisi_acc_sg_buf_unmap(dev, dst, c_req->c_out);
+ }
}
-static int hisi_sec_skcipher_copy_iv(struct hisi_sec_ctx *ctx,
- struct hisi_sec_req *req)
+static int sec_skcipher_sgl_map(struct sec_ctx *ctx, struct sec_req *req)
{
- struct hisi_sec_cipher_ctx *c_ctx = &ctx->c_ctx;
- struct hisi_sec_cipher_req *c_req = &req->c_req;
- struct skcipher_request *sk_req =
- (struct skcipher_request *)req->priv[0];
- struct crypto_skcipher *atfm = crypto_skcipher_reqtfm(sk_req);
- struct skcipher_request *sk_next;
- int i, iv_size;
-
- c_req->c_len = sk_req->cryptlen;
-
- iv_size = crypto_skcipher_ivsize(atfm);
- if (iv_size > SEC_IV_SIZE)
- return -EINVAL;
+ struct skcipher_request *sq = req->c_req.sk_req;
- memcpy(c_req->c_ivin, sk_req->iv, iv_size);
-
- if (ctx->is_fusion) {
- for (i = 1; i < req->fusion_num; i++) {
- sk_next = (struct skcipher_request *)req->priv[i];
- memcpy(c_req->c_ivin + i * iv_size, sk_next->iv,
- iv_size);
- }
-
- c_req->gran_num = req->fusion_num;
- c_ctx->c_gran_size = sk_req->cryptlen;
- }
-
- return 0;
+ return sec_cipher_map(ctx, req, sq->src, sq->dst);
}
-static int hisi_sec_skcipher_bd_fill_storage(struct hisi_sec_ctx *ctx,
- struct hisi_sec_req *req)
+static void sec_skcipher_sgl_unmap(struct sec_ctx *ctx, struct sec_req *req)
{
- struct hisi_sec_cipher_ctx *c_ctx = &ctx->c_ctx;
- struct hisi_sec_cipher_req *c_req = &req->c_req;
- struct hisi_sec_sqe *sec_sqe = &req->sec_sqe;
+ struct skcipher_request *sq = req->c_req.sk_req;
- if (!c_req->c_len)
- return -EINVAL;
-
- sec_sqe->type1.c_key_addr_l = lower_32_bits(c_ctx->c_key_dma);
- sec_sqe->type1.c_key_addr_h = upper_32_bits(c_ctx->c_key_dma);
- sec_sqe->type1.c_ivin_addr_l = lower_32_bits(c_req->c_ivin_dma);
- sec_sqe->type1.c_ivin_addr_h = upper_32_bits(c_req->c_ivin_dma);
- sec_sqe->type1.data_src_addr_l = lower_32_bits(c_req->c_in_dma);
- sec_sqe->type1.data_src_addr_h = upper_32_bits(c_req->c_in_dma);
- sec_sqe->type1.data_dst_addr_l = lower_32_bits(c_req->c_out_dma);
- sec_sqe->type1.data_dst_addr_h = upper_32_bits(c_req->c_out_dma);
-
- sec_sqe->type1.c_mode = c_ctx->c_mode;
- sec_sqe->type1.c_alg = c_ctx->c_alg;
- sec_sqe->type1.c_key_len = c_ctx->c_key_len;
-
- sec_sqe->src_addr_type = SGL;
- sec_sqe->dst_addr_type = SGL;
- sec_sqe->type = BD_TYPE1;
- sec_sqe->scene = SCENE_STORAGE;
- sec_sqe->de = c_req->c_in_dma != c_req->c_out_dma;
-
- if (c_req->encrypt)
- sec_sqe->cipher = SEC_CIPHER_ENC;
- else
- sec_sqe->cipher = SEC_CIPHER_DEC;
-
- if (c_ctx->c_mode == C_MODE_XTS)
- sec_sqe->type1.ci_gen = CI_GEN_BY_LBA;
-
- sec_sqe->type1.cipher_gran_size = c_ctx->c_gran_size;
- sec_sqe->type1.gran_num = c_req->gran_num;
- __sync_fetch_and_add(&ctx->sec->sec_dfx.gran_task_cnt, c_req->gran_num);
- sec_sqe->type1.block_size = c_req->c_len;
-
- sec_sqe->type1.lba_l = lower_32_bits(c_req->lba);
- sec_sqe->type1.lba_h = upper_32_bits(c_req->lba);
-
- sec_sqe->type1.tag = req->req_id;
-
- return 0;
+ sec_cipher_unmap(ctx, req, sq->src, sq->dst);
}
-static int hisi_sec_skcipher_bd_fill_multi_iv(struct hisi_sec_ctx *ctx,
- struct hisi_sec_req *req)
+static int sec_request_transfer(struct sec_ctx *ctx, struct sec_req *req)
{
int ret;
- ret = hisi_sec_skcipher_bd_fill_storage(ctx, req);
- if (ret)
+ ret = ctx->req_op->buf_map(ctx, req);
+ if (unlikely(ret))
return ret;
- req->sec_sqe.type1.ci_gen = CI_GEN_BY_ADDR;
-
- return 0;
-}
-
-static int hisi_sec_skcipher_bd_fill_base(struct hisi_sec_ctx *ctx,
- struct hisi_sec_req *req)
-{
- struct hisi_sec_cipher_ctx *c_ctx = &ctx->c_ctx;
- struct hisi_sec_cipher_req *c_req = &req->c_req;
- struct hisi_sec_sqe *sec_sqe = &req->sec_sqe;
-
- if (!c_req->c_len)
- return -EINVAL;
-
- sec_sqe->type2.c_key_addr_l = lower_32_bits(c_ctx->c_key_dma);
- sec_sqe->type2.c_key_addr_h = upper_32_bits(c_ctx->c_key_dma);
- sec_sqe->type2.c_ivin_addr_l = lower_32_bits(c_req->c_ivin_dma);
- sec_sqe->type2.c_ivin_addr_h = upper_32_bits(c_req->c_ivin_dma);
- sec_sqe->type2.data_src_addr_l = lower_32_bits(c_req->c_in_dma);
- sec_sqe->type2.data_src_addr_h = upper_32_bits(c_req->c_in_dma);
- sec_sqe->type2.data_dst_addr_l = lower_32_bits(c_req->c_out_dma);
- sec_sqe->type2.data_dst_addr_h = upper_32_bits(c_req->c_out_dma);
+ ctx->req_op->do_transfer(ctx, req);
- sec_sqe->type2.c_mode = c_ctx->c_mode;
- sec_sqe->type2.c_alg = c_ctx->c_alg;
- sec_sqe->type2.c_key_len = c_ctx->c_key_len;
-
- sec_sqe->src_addr_type = SGL;
- sec_sqe->dst_addr_type = SGL;
- sec_sqe->type = BD_TYPE2;
- sec_sqe->scene = SCENE_IPSEC;
- sec_sqe->de = c_req->c_in_dma != c_req->c_out_dma;
+ ret = ctx->req_op->bd_fill(ctx, req);
+ if (unlikely(ret))
+ goto unmap_req_buf;
- __sync_fetch_and_add(&ctx->sec->sec_dfx.gran_task_cnt, 1);
+ return ret;
- if (c_req->encrypt)
- sec_sqe->cipher = SEC_CIPHER_ENC;
- else
- sec_sqe->cipher = SEC_CIPHER_DEC;
+unmap_req_buf:
+ ctx->req_op->buf_unmap(ctx, req);
- sec_sqe->type2.c_len = c_req->c_len;
- sec_sqe->type2.tag = req->req_id;
+ return ret;
+}
- return 0;
+static void sec_request_untransfer(struct sec_ctx *ctx, struct sec_req *req)
+{
+ ctx->req_op->buf_unmap(ctx, req);
}
-static int hisi_sec_bd_send_asyn(struct hisi_sec_ctx *ctx,
- struct hisi_sec_req *req)
+static void sec_skcipher_copy_iv(struct sec_ctx *ctx, struct sec_req *req)
{
- struct hisi_sec_qp_ctx *qp_ctx = req->qp_ctx;
- int req_cnt = req->req_cnt;
- int ret;
+ struct skcipher_request *sk_req = req->c_req.sk_req;
+ struct sec_cipher_req *c_req = &req->c_req;
+ struct sec_alg_res *res = &req->qp_ctx->res[req->req_id];
- mutex_lock(&qp_ctx->req_lock);
- ret = hisi_qp_send(qp_ctx->qp, &req->sec_sqe);
- if (ret == 0)
- ctx->sec->sec_dfx.send_cnt++;
- mutex_unlock(&qp_ctx->req_lock);
+ if (ctx->use_pbuf) {
+ c_req->c_ivin = res->pbuf + SEC_PBUF_IV_OFFSET;
+ c_req->c_ivin_dma = res->pbuf_dma + SEC_PBUF_IV_OFFSET;
+ } else {
+ c_req->c_ivin = res->c_ivin;
+ c_req->c_ivin_dma = res->c_ivin_dma;
+ }
- return hisi_sec_get_async_ret(ret, req_cnt, ctx->req_fake_limit);
+ memcpy(c_req->c_ivin, sk_req->iv, ctx->c_ctx.ivsize);
}
-static void hisi_sec_skcipher_complete(struct hisi_sec_ctx *ctx,
- struct hisi_sec_req *req, int err_code)
+static int sec_skcipher_bd_fill(struct sec_ctx *ctx, struct sec_req *req)
{
- struct skcipher_request **sk_reqs =
- (struct skcipher_request **)req->priv;
- int i, req_fusion_num;
-
- if (ctx->is_fusion == SEC_NO_FUSION)
- req_fusion_num = 1;
+ struct sec_cipher_ctx *c_ctx = &ctx->c_ctx;
+ struct sec_cipher_req *c_req = &req->c_req;
+ struct sec_sqe *sec_sqe = &req->sec_sqe;
+ u8 scene, sa_type, da_type;
+ u8 bd_type, cipher;
+ u8 de = 0;
+
+ memset(sec_sqe, 0, sizeof(struct sec_sqe));
+
+ sec_sqe->type2.c_key_addr = cpu_to_le64(c_ctx->c_key_dma);
+ sec_sqe->type2.c_ivin_addr = cpu_to_le64(c_req->c_ivin_dma);
+ sec_sqe->type2.data_src_addr = cpu_to_le64(c_req->c_in_dma);
+ sec_sqe->type2.data_dst_addr = cpu_to_le64(c_req->c_out_dma);
+
+ sec_sqe->type2.icvw_kmode |= cpu_to_le16(((u16)c_ctx->c_mode) <<
+ SEC_CMODE_OFFSET);
+ sec_sqe->type2.c_alg = c_ctx->c_alg;
+ sec_sqe->type2.icvw_kmode |= cpu_to_le16(((u16)c_ctx->c_key_len) <<
+ SEC_CKEY_OFFSET);
+
+ bd_type = SEC_BD_TYPE2;
+ if (c_req->encrypt)
+ cipher = SEC_CIPHER_ENC << SEC_CIPHER_OFFSET;
else
- req_fusion_num = req->fusion_num;
+ cipher = SEC_CIPHER_DEC << SEC_CIPHER_OFFSET;
+ sec_sqe->type_cipher_auth = bd_type | cipher;
- for (i = 0; i < req_fusion_num; i++)
- sk_reqs[i]->base.complete(&sk_reqs[i]->base, err_code);
-
- /* free sk_reqs if this request is completed */
- if (err_code != -EINPROGRESS)
- __sync_add_and_fetch(&ctx->sec->sec_dfx.put_task_cnt,
- req_fusion_num);
+ if (ctx->use_pbuf)
+ sa_type = SEC_PBUF << SEC_SRC_SGL_OFFSET;
else
- __sync_add_and_fetch(&ctx->sec->sec_dfx.busy_comp_cnt,
- req_fusion_num);
-}
-
-static int hisi_sec_skcipher_callback(struct hisi_sec_ctx *ctx,
- struct hisi_sec_req *req)
-{
- struct hisi_sec_qp_ctx *qp_ctx = req->qp_ctx;
- int req_id = req->req_id;
+ sa_type = SEC_SGL << SEC_SRC_SGL_OFFSET;
+ scene = SEC_COMM_SCENE << SEC_SCENE_OFFSET;
+ if (c_req->c_in_dma != c_req->c_out_dma)
+ de = 0x1 << SEC_DE_OFFSET;
- if (__sync_bool_compare_and_swap(&req->fake_busy, 1, 0))
- hisi_sec_skcipher_complete(ctx, req, -EINPROGRESS);
+ sec_sqe->sds_sa_type = (de | scene | sa_type);
- hisi_sec_skcipher_complete(ctx, req, req->err_type);
+ /* Just set DST address type */
+ if (ctx->use_pbuf)
+ da_type = SEC_PBUF << SEC_DST_SGL_OFFSET;
+ else
+ da_type = SEC_SGL << SEC_DST_SGL_OFFSET;
+ sec_sqe->sdm_addr_type |= da_type;
- hisi_sec_free_req_id(qp_ctx, req_id);
+ sec_sqe->type2.clen_ivhlen |= cpu_to_le32(c_req->c_len);
+ sec_sqe->type2.tag = cpu_to_le16((u16)req->req_id);
return 0;
}
-static int sec_get_issue_id_range(atomic_t *qid, int start, int end)
+static void sec_update_iv(struct sec_req *req, enum sec_alg_type alg_type)
{
- int issue_id;
- int issue_len = end - start;
+ struct skcipher_request *sk_req = req->c_req.sk_req;
+ u32 iv_size = req->ctx->c_ctx.ivsize;
+ struct scatterlist *sgl;
+ unsigned int cryptlen;
+ size_t sz;
+ u8 *iv;
+
+ if (req->c_req.encrypt)
+ sgl = sk_req->dst;
+ else
+ sgl = sk_req->src;
- issue_id = (atomic_inc_return(qid) - start) % issue_len + start;
- if (issue_id % issue_len == 0 && atomic_read(qid) > issue_len)
- atomic_sub(issue_len, qid);
+ iv = sk_req->iv;
+ cryptlen = sk_req->cryptlen;
- return issue_id;
+ sz = sg_pcopy_to_buffer(sgl, sg_nents(sgl), iv, iv_size,
+ cryptlen - iv_size);
+ if (unlikely(sz != iv_size))
+ dev_err(SEC_CTX_DEV(req->ctx), "copy output iv error!\n");
}
-static inline int sec_get_issue_id(struct hisi_sec_ctx *ctx,
- struct hisi_sec_req *req)
+static void sec_skcipher_callback(struct sec_ctx *ctx, struct sec_req *req,
+ int err)
{
- int issue_id;
+ struct skcipher_request *sk_req = req->c_req.sk_req;
+ struct sec_qp_ctx *qp_ctx = req->qp_ctx;
- if (req->c_req.encrypt == 1)
- issue_id = sec_get_issue_id_range(&ctx->enc_qid, 0,
- ctx->enc_q_num);
- else
- issue_id = sec_get_issue_id_range(&ctx->dec_qid, ctx->enc_q_num,
- ctx->q_num);
+ atomic_dec(&qp_ctx->pending_reqs);
+ sec_free_req_id(req);
- return issue_id;
-}
+ /* IV output at encrypto of CBC mode */
+ if (!err && ctx->c_ctx.c_mode == SEC_CMODE_CBC && req->c_req.encrypt)
+ sec_update_iv(req, SEC_SKCIPHER);
-static inline void hisi_sec_inc_thread_cnt(struct hisi_sec_ctx *ctx)
-{
- int thread_cnt = atomic_inc_return(&ctx->thread_cnt);
+ if (req->fake_busy)
+ sk_req->base.complete(&sk_req->base, -EINPROGRESS);
- if (thread_cnt > ctx->sec->sec_dfx.thread_cnt)
- ctx->sec->sec_dfx.thread_cnt = thread_cnt;
+ sk_req->base.complete(&sk_req->base, err);
}
-static struct hisi_sec_req *sec_request_alloc(struct hisi_sec_ctx *ctx,
- struct hisi_sec_req *in_req, int *fusion_send, int *fake_busy)
+static void sec_request_uninit(struct sec_ctx *ctx, struct sec_req *req)
{
- struct hisi_sec_qp_ctx *qp_ctx;
- struct hisi_sec_req *req;
- int issue_id, ret;
-
- __sync_add_and_fetch(&ctx->sec->sec_dfx.get_task_cnt, 1);
-
- issue_id = sec_get_issue_id(ctx, in_req);
- hisi_sec_inc_thread_cnt(ctx);
-
- qp_ctx = &ctx->qp_ctx[issue_id];
-
- mutex_lock(&qp_ctx->req_lock);
-
- if (in_req->c_req.sk_req->src == in_req->c_req.sk_req->dst) {
- *fusion_send = 1;
- } else if (qp_ctx->fusion_req &&
- qp_ctx->fusion_req->fusion_num < qp_ctx->fusion_limit) {
- req = qp_ctx->fusion_req;
-
- *fake_busy = req->fake_busy;
- __sync_add_and_fetch(&ctx->sec->sec_dfx.fake_busy_cnt,
- *fake_busy);
-
- req->priv[req->fusion_num] = in_req->c_req.sk_req;
- req->fusion_num++;
- in_req->fusion_num = req->fusion_num;
- if (req->fusion_num == qp_ctx->fusion_limit) {
- *fusion_send = 1;
- qp_ctx->fusion_req = NULL;
- }
- mutex_unlock(&qp_ctx->req_lock);
- return req;
- }
+ struct sec_qp_ctx *qp_ctx = req->qp_ctx;
- req = in_req;
-
- if (hisi_sec_alloc_req_id(req, qp_ctx)) {
- mutex_unlock(&qp_ctx->req_lock);
- return NULL;
- }
+ atomic_dec(&qp_ctx->pending_reqs);
+ sec_free_req_id(req);
+ sec_free_queue_id(ctx, req);
+}
- req->fake_busy = 0;
+static int sec_request_init(struct sec_ctx *ctx, struct sec_req *req)
+{
+ struct sec_qp_ctx *qp_ctx;
+ int queue_id;
- req->req_cnt = atomic_inc_return(&qp_ctx->req_cnt);
- if (req->req_cnt >= ctx->req_fake_limit) {
- req->fake_busy = 1;
- *fake_busy = 1;
- __sync_add_and_fetch(&ctx->sec->sec_dfx.fake_busy_cnt, 1);
- }
+ /* To load balance */
+ queue_id = sec_alloc_queue_id(ctx, req);
+ qp_ctx = &ctx->qp_ctx[queue_id];
- ret = ctx->req_op->get_res(ctx, req);
- if (ret) {
- dev_err(ctx->dev, "req_op get_res failed\n");
- mutex_unlock(&qp_ctx->req_lock);
- goto err_free_req_id;
+ req->req_id = sec_alloc_req_id(req, qp_ctx);
+ if (unlikely(req->req_id < 0)) {
+ sec_free_queue_id(ctx, req);
+ return req->req_id;
}
- if (ctx->fusion_limit <= 1 || ctx->fusion_tmout_nsec == 0)
- *fusion_send = 1;
-
- if (ctx->is_fusion && *fusion_send == 0)
- qp_ctx->fusion_req = req;
-
- req->fusion_num = 1;
-
- req->priv[0] = in_req->c_req.sk_req;
- req->st_time = ktime_get();
-
- mutex_unlock(&qp_ctx->req_lock);
-
- return req;
+ if (ctx->fake_req_limit <= atomic_inc_return(&qp_ctx->pending_reqs))
+ req->fake_busy = true;
+ else
+ req->fake_busy = false;
-err_free_req_id:
- hisi_sec_free_req_id(qp_ctx, req->req_id);
- return NULL;
+ return 0;
}
-static int sec_request_transfer(struct hisi_sec_ctx *ctx,
- struct hisi_sec_req *req)
+static int sec_process(struct sec_ctx *ctx, struct sec_req *req)
{
+ struct sec_cipher_req *c_req = &req->c_req;
int ret;
- ret = ctx->req_op->buf_map(ctx, req);
- if (ret)
+ ret = sec_request_init(ctx, req);
+ if (unlikely(ret))
return ret;
- ret = ctx->req_op->do_transfer(ctx, req);
- if (ret)
- goto unmap_req_buf;
+ ret = sec_request_transfer(ctx, req);
+ if (unlikely(ret))
+ goto err_uninit_req;
- memset(&req->sec_sqe, 0, sizeof(struct hisi_sec_sqe));
- ret = ctx->req_op->bd_fill(ctx, req);
- if (ret)
- goto unmap_req_buf;
+ /* Output IV as decrypto */
+ if (ctx->c_ctx.c_mode == SEC_CMODE_CBC && !req->c_req.encrypt)
+ sec_update_iv(req, ctx->alg_type);
- return 0;
+ ret = ctx->req_op->bd_send(ctx, req);
+ if (unlikely(ret != -EBUSY && ret != -EINPROGRESS)) {
+ dev_err_ratelimited(SEC_CTX_DEV(ctx),
+ "send sec request failed!\n");
+ goto err_send_req;
+ }
-unmap_req_buf:
- ctx->req_op->buf_unmap(ctx, req);
return ret;
-}
-
-static int sec_request_send(struct hisi_sec_ctx *ctx, struct hisi_sec_req *req)
-{
- int ret;
- ret = ctx->req_op->bd_send(ctx, req);
+err_send_req:
+ /* As failing, restore the IV from user */
+ if (ctx->c_ctx.c_mode == SEC_CMODE_CBC && !req->c_req.encrypt) {
+ if (ctx->alg_type == SEC_SKCIPHER)
+ memcpy(req->c_req.sk_req->iv, c_req->c_ivin,
+ ctx->c_ctx.ivsize);
+ }
- if (ret == 0 || ret == -EBUSY || ret == -EINPROGRESS)
- atomic_dec(&ctx->thread_cnt);
+ sec_request_untransfer(ctx, req);
+err_uninit_req:
+ sec_request_uninit(ctx, req);
return ret;
}
-static int sec_io_proc(struct hisi_sec_ctx *ctx, struct hisi_sec_req *in_req)
+static const struct sec_req_op sec_skcipher_req_ops = {
+ .buf_map = sec_skcipher_sgl_map,
+ .buf_unmap = sec_skcipher_sgl_unmap,
+ .do_transfer = sec_skcipher_copy_iv,
+ .bd_fill = sec_skcipher_bd_fill,
+ .bd_send = sec_bd_send,
+ .callback = sec_skcipher_callback,
+ .process = sec_process,
+};
+
+static int sec_skcipher_ctx_init(struct crypto_skcipher *tfm)
{
- struct hisi_sec_req *req;
- int fusion_send = 0;
- int fake_busy = 0;
- int ret;
+ struct sec_ctx *ctx = crypto_skcipher_ctx(tfm);
- in_req->fusion_num = 1;
+ ctx->req_op = &sec_skcipher_req_ops;
- req = sec_request_alloc(ctx, in_req, &fusion_send, &fake_busy);
+ return sec_skcipher_init(tfm);
+}
- if (!req) {
- dev_err_ratelimited(ctx->dev, "sec_request_alloc failed\n");
- return -ENOMEM;
- }
+static void sec_skcipher_ctx_exit(struct crypto_skcipher *tfm)
+{
+ sec_skcipher_uninit(tfm);
+}
- if (ctx->is_fusion && fusion_send == 0)
- return fake_busy ? -EBUSY : -EINPROGRESS;
+static int sec_skcipher_param_check(struct sec_ctx *ctx, struct sec_req *sreq)
+{
+ struct skcipher_request *sk_req = sreq->c_req.sk_req;
+ struct device *dev = SEC_CTX_DEV(ctx);
+ u8 c_alg = ctx->c_ctx.c_alg;
- ret = sec_request_transfer(ctx, req);
- if (ret) {
- dev_err_ratelimited(ctx->dev, "sec_transfer ret[%d]\n", ret);
- goto err_free_req;
+ if (unlikely(!sk_req->src || !sk_req->dst)) {
+ dev_err(dev, "skcipher input param error!\n");
+ return -EINVAL;
}
+ sreq->c_req.c_len = sk_req->cryptlen;
- ret = sec_request_send(ctx, req);
- __sync_add_and_fetch(&ctx->sec->sec_dfx.send_by_full, 1);
- if (ret != -EBUSY && ret != -EINPROGRESS) {
- dev_err_ratelimited(ctx->dev, "sec_send ret[%d]\n", ret);
- goto err_unmap_req;
- }
+ if (ctx->pbuf_supported && sk_req->cryptlen <= SEC_PBUF_SZ)
+ ctx->use_pbuf = true;
- return ret;
+ if (c_alg == SEC_CALG_3DES) {
+ if (unlikely(sk_req->cryptlen & (DES3_EDE_BLOCK_SIZE - 1))) {
+ dev_err(dev, "skcipher 3des input length error!\n");
+ return -EINVAL;
+ }
+ return 0;
+ } else if (c_alg == SEC_CALG_AES || c_alg == SEC_CALG_SM4) {
+ if (unlikely(sk_req->cryptlen & (AES_BLOCK_SIZE - 1))) {
+ dev_err(dev, "skcipher aes input length error!\n");
+ return -EINVAL;
+ }
+ return 0;
+ }
-err_unmap_req:
- ctx->req_op->buf_unmap(ctx, req);
-err_free_req:
- hisi_sec_free_req_id(req->qp_ctx, req->req_id);
- atomic_dec(&ctx->thread_cnt);
- return ret;
+ dev_err(dev, "skcipher algorithm error!\n");
+ return -EINVAL;
}
static int sec_skcipher_crypto(struct skcipher_request *sk_req, bool encrypt)
{
- struct crypto_skcipher *atfm = crypto_skcipher_reqtfm(sk_req);
- struct hisi_sec_ctx *ctx = crypto_skcipher_ctx(atfm);
- struct hisi_sec_req *req = skcipher_request_ctx(sk_req);
+ struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(sk_req);
+ struct sec_req *req = skcipher_request_ctx(sk_req);
+ struct sec_ctx *ctx = crypto_skcipher_ctx(tfm);
+ int ret;
- if (!sk_req->src || !sk_req->dst || !sk_req->cryptlen)
- return -EINVAL;
+ if (!sk_req->cryptlen)
+ return 0;
- req->c_req.sk_req = sk_req;
+ req->c_req.sk_req = sk_req;
req->c_req.encrypt = encrypt;
- req->ctx = ctx;
+ req->ctx = ctx;
+
+ ret = sec_skcipher_param_check(ctx, req);
+ if (unlikely(ret))
+ return -EINVAL;
- return sec_io_proc(ctx, req);
+ return ctx->req_op->process(ctx, req);
}
static int sec_skcipher_encrypt(struct skcipher_request *sk_req)
@@ -1415,7 +998,7 @@ static int sec_skcipher_decrypt(struct skcipher_request *sk_req)
}
#define SEC_SKCIPHER_GEN_ALG(sec_cra_name, sec_set_key, sec_min_key_size, \
- sec_max_key_size, hisi_sec_cipher_ctx_init_func, blk_size, iv_size)\
+ sec_max_key_size, ctx_init, ctx_exit, blk_size, iv_size)\
{\
.base = {\
.cra_name = sec_cra_name,\
@@ -1423,12 +1006,11 @@ static int sec_skcipher_decrypt(struct skcipher_request *sk_req)
.cra_priority = SEC_PRIORITY,\
.cra_flags = CRYPTO_ALG_ASYNC,\
.cra_blocksize = blk_size,\
- .cra_ctxsize = sizeof(struct hisi_sec_ctx),\
- .cra_alignmask = 0,\
+ .cra_ctxsize = sizeof(struct sec_ctx),\
.cra_module = THIS_MODULE,\
},\
- .init = hisi_sec_cipher_ctx_init_func,\
- .exit = hisi_sec_cipher_ctx_exit,\
+ .init = ctx_init,\
+ .exit = ctx_exit,\
.setkey = sec_set_key,\
.decrypt = sec_skcipher_decrypt,\
.encrypt = sec_skcipher_encrypt,\
@@ -1437,75 +1019,55 @@ static int sec_skcipher_decrypt(struct skcipher_request *sk_req)
.ivsize = iv_size,\
},
-#define SEC_SKCIPHER_NORMAL_ALG(name, key_func, min_key_size, \
+#define SEC_SKCIPHER_ALG(name, key_func, min_key_size, \
max_key_size, blk_size, iv_size) \
SEC_SKCIPHER_GEN_ALG(name, key_func, min_key_size, max_key_size, \
- hisi_sec_cipher_ctx_init_alg, blk_size, iv_size)
+ sec_skcipher_ctx_init, sec_skcipher_ctx_exit, blk_size, iv_size)
-#define SEC_SKCIPHER_FUSION_ALG(name, key_func, min_key_size, \
- max_key_size, blk_size, iv_size) \
- SEC_SKCIPHER_GEN_ALG(name, key_func, min_key_size, max_key_size, \
- hisi_sec_cipher_ctx_init_multi_iv, blk_size, iv_size)
-
-static struct skcipher_alg sec_normal_algs[] = {
- SEC_SKCIPHER_NORMAL_ALG("ecb(aes)", sec_setkey_aes_ecb,
- AES_MIN_KEY_SIZE, AES_MAX_KEY_SIZE, AES_BLOCK_SIZE, 0)
- SEC_SKCIPHER_NORMAL_ALG("cbc(aes)", sec_setkey_aes_cbc,
- AES_MIN_KEY_SIZE, AES_MAX_KEY_SIZE, AES_BLOCK_SIZE,
- AES_BLOCK_SIZE)
- SEC_SKCIPHER_NORMAL_ALG("xts(aes)", sec_setkey_aes_xts,
- SEC_XTS_MIN_KEY_SIZE, SEC_XTS_MAX_KEY_SIZE, AES_BLOCK_SIZE,
- AES_BLOCK_SIZE)
- SEC_SKCIPHER_NORMAL_ALG("ecb(des)", sec_setkey_des_ecb,
- DES_KEY_SIZE, DES_KEY_SIZE, DES_BLOCK_SIZE, 0)
- SEC_SKCIPHER_NORMAL_ALG("cbc(des)", sec_setkey_des_cbc,
- DES_KEY_SIZE, DES_KEY_SIZE, DES_BLOCK_SIZE, DES_BLOCK_SIZE)
- SEC_SKCIPHER_NORMAL_ALG("ecb(des3_ede)", sec_setkey_3des_ecb,
- SEC_DES3_2KEY_SIZE, SEC_DES3_3KEY_SIZE, DES3_EDE_BLOCK_SIZE, 0)
- SEC_SKCIPHER_NORMAL_ALG("cbc(des3_ede)", sec_setkey_3des_cbc,
- SEC_DES3_2KEY_SIZE, SEC_DES3_3KEY_SIZE, DES3_EDE_BLOCK_SIZE,
- DES3_EDE_BLOCK_SIZE)
- SEC_SKCIPHER_NORMAL_ALG("xts(sm4)", sec_setkey_sm4_xts,
- SEC_XTS_MIN_KEY_SIZE, SEC_XTS_MIN_KEY_SIZE, AES_BLOCK_SIZE,
- AES_BLOCK_SIZE)
- SEC_SKCIPHER_NORMAL_ALG("cbc(sm4)", sec_setkey_sm4_cbc,
- AES_MIN_KEY_SIZE, AES_MIN_KEY_SIZE, AES_BLOCK_SIZE,
- AES_BLOCK_SIZE)
-};
+static struct skcipher_alg sec_skciphers[] = {
+ SEC_SKCIPHER_ALG("ecb(aes)", sec_setkey_aes_ecb,
+ AES_MIN_KEY_SIZE, AES_MAX_KEY_SIZE,
+ AES_BLOCK_SIZE, 0)
+
+ SEC_SKCIPHER_ALG("cbc(aes)", sec_setkey_aes_cbc,
+ AES_MIN_KEY_SIZE, AES_MAX_KEY_SIZE,
+ AES_BLOCK_SIZE, AES_BLOCK_SIZE)
+
+ SEC_SKCIPHER_ALG("xts(aes)", sec_setkey_aes_xts,
+ SEC_XTS_MIN_KEY_SIZE, SEC_XTS_MAX_KEY_SIZE,
+ AES_BLOCK_SIZE, AES_BLOCK_SIZE)
+
+ SEC_SKCIPHER_ALG("ecb(des3_ede)", sec_setkey_3des_ecb,
+ SEC_DES3_2KEY_SIZE, SEC_DES3_3KEY_SIZE,
+ DES3_EDE_BLOCK_SIZE, 0)
+
+ SEC_SKCIPHER_ALG("cbc(des3_ede)", sec_setkey_3des_cbc,
+ SEC_DES3_2KEY_SIZE, SEC_DES3_3KEY_SIZE,
+ DES3_EDE_BLOCK_SIZE, DES3_EDE_BLOCK_SIZE)
+
+ SEC_SKCIPHER_ALG("xts(sm4)", sec_setkey_sm4_xts,
+ SEC_XTS_MIN_KEY_SIZE, SEC_XTS_MIN_KEY_SIZE,
+ AES_BLOCK_SIZE, AES_BLOCK_SIZE)
+
+ SEC_SKCIPHER_ALG("cbc(sm4)", sec_setkey_sm4_cbc,
+ AES_MIN_KEY_SIZE, AES_MIN_KEY_SIZE,
+ AES_BLOCK_SIZE, AES_BLOCK_SIZE)
-static struct skcipher_alg sec_fusion_algs[] = {
- SEC_SKCIPHER_FUSION_ALG("xts(sm4)", sec_setkey_sm4_xts,
- SEC_XTS_MIN_KEY_SIZE, SEC_XTS_MIN_KEY_SIZE, AES_BLOCK_SIZE,
- AES_BLOCK_SIZE)
- SEC_SKCIPHER_FUSION_ALG("cbc(sm4)", sec_setkey_sm4_cbc,
- AES_MIN_KEY_SIZE, AES_MIN_KEY_SIZE, AES_BLOCK_SIZE,
- AES_BLOCK_SIZE)
};
-int hisi_sec_register_to_crypto(int fusion_limit)
+int sec_register_to_crypto(void)
{
/* To avoid repeat register */
- if (atomic_add_return(1, &sec_active_devs) == 1) {
- if (fusion_limit == 1)
- return crypto_register_skciphers(sec_normal_algs,
- ARRAY_SIZE(sec_normal_algs));
- else
- return crypto_register_skciphers(sec_fusion_algs,
- ARRAY_SIZE(sec_fusion_algs));
- }
+ if (atomic_add_return(1, &sec_active_devs) == 1)
+ return crypto_register_skciphers(sec_skciphers,
+ ARRAY_SIZE(sec_skciphers));
return 0;
}
-void hisi_sec_unregister_from_crypto(int fusion_limit)
+void sec_unregister_from_crypto(void)
{
- if (atomic_sub_return(1, &sec_active_devs) == 0) {
- if (fusion_limit == 1)
- crypto_unregister_skciphers(sec_normal_algs,
- ARRAY_SIZE(sec_normal_algs));
- else
- crypto_unregister_skciphers(sec_fusion_algs,
- ARRAY_SIZE(sec_fusion_algs));
- }
+ if (atomic_sub_return(1, &sec_active_devs) == 0)
+ crypto_unregister_skciphers(sec_skciphers,
+ ARRAY_SIZE(sec_skciphers));
}
-
diff --git a/drivers/crypto/hisilicon/sec2/sec_crypto.h b/drivers/crypto/hisilicon/sec2/sec_crypto.h
index bffbeba..221257e 100644
--- a/drivers/crypto/hisilicon/sec2/sec_crypto.h
+++ b/drivers/crypto/hisilicon/sec2/sec_crypto.h
@@ -1,13 +1,238 @@
/* SPDX-License-Identifier: GPL-2.0+ */
/* Copyright (c) 2018-2019 HiSilicon Limited. */
-#ifndef HISI_SEC_CRYPTO_H
-#define HISI_SEC_CRYPTO_H
+#ifndef __HISI_SEC_V2_CRYPTO_H
+#define __HISI_SEC_V2_CRYPTO_H
-#define SEC_IV_SIZE 24
-#define SEC_MAX_KEY_SIZE 64
+#define SEC_IV_SIZE 24
+#define SEC_MAX_KEY_SIZE 64
+#define SEC_MAX_AUTH_KEY_SIZE 64
-int hisi_sec_register_to_crypto(int fusion_limit);
-void hisi_sec_unregister_from_crypto(int fusion_limit);
+#define SEC_COMM_SCENE 0
+enum sec_calg {
+ SEC_CALG_3DES = 0x1,
+ SEC_CALG_AES = 0x2,
+ SEC_CALG_SM4 = 0x3,
+};
+
+enum sec_hash_alg {
+ SEC_A_HMAC_SHA1 = 0x10,
+ SEC_A_HMAC_SHA256 = 0x11,
+ SEC_A_HMAC_SHA512 = 0x15,
+};
+
+enum sec_mac_len {
+ SEC_HMAC_SHA1_MAC = 20,
+ SEC_HMAC_SHA256_MAC = 32,
+ SEC_HMAC_SHA512_MAC = 64,
+};
+
+enum sec_cmode {
+ SEC_CMODE_ECB = 0x0,
+ SEC_CMODE_CBC = 0x1,
+ SEC_CMODE_CTR = 0x4,
+ SEC_CMODE_XTS = 0x7,
+};
+
+enum sec_ckey_type {
+ SEC_CKEY_128BIT = 0x0,
+ SEC_CKEY_192BIT = 0x1,
+ SEC_CKEY_256BIT = 0x2,
+ SEC_CKEY_3DES_3KEY = 0x1,
+ SEC_CKEY_3DES_2KEY = 0x3,
+};
+
+enum sec_bd_type {
+ SEC_BD_TYPE1 = 0x1,
+ SEC_BD_TYPE2 = 0x2,
+};
+
+enum sec_auth {
+ SEC_NO_AUTH = 0x0,
+ SEC_AUTH_TYPE1 = 0x1,
+ SEC_AUTH_TYPE2 = 0x2,
+};
+
+enum sec_cipher_dir {
+ SEC_CIPHER_ENC = 0x1,
+ SEC_CIPHER_DEC = 0x2,
+};
+
+enum sec_addr_type {
+ SEC_PBUF = 0x0,
+ SEC_SGL = 0x1,
+ SEC_PRP = 0x2,
+};
+
+enum sec_ci_gen {
+ SEC_CI_GEN_BY_ADDR = 0x0,
+ SEC_CI_GEN_BY_LBA = 0X3,
+};
+
+enum sec_scene {
+ SEC_SCENE_IPSEC = 0x1,
+ SEC_SCENE_STORAGE = 0x5,
+};
+
+enum sec_work_mode {
+ SEC_NO_FUSION = 0x0,
+ SEC_IV_FUSION = 0x1,
+ SEC_FUSION_BUTT
+};
+
+enum sec_req_ops_type {
+ SEC_OPS_SKCIPHER_ALG = 0x0,
+ SEC_OPS_DMCRYPT = 0x1,
+ SEC_OPS_MULTI_IV = 0x2,
+ SEC_OPS_BUTT
+};
+
+struct sec_sqe_type2 {
+ /*
+ * mac_len: 0~4 bits
+ * a_key_len: 5~10 bits
+ * a_alg: 11~16 bits
+ */
+ __le32 mac_key_alg;
+
+ /*
+ * c_icv_len: 0~5 bits
+ * c_width: 6~8 bits
+ * c_key_len: 9~11 bits
+ * c_mode: 12~15 bits
+ */
+ __le16 icvw_kmode;
+
+ /* c_alg: 0~3 bits */
+ __u8 c_alg;
+ __u8 rsvd4;
+
+ /*
+ * a_len: 0~23 bits
+ * iv_offset_l: 24~31 bits
+ */
+ __le32 alen_ivllen;
+
+ /*
+ * c_len: 0~23 bits
+ * iv_offset_h: 24~31 bits
+ */
+ __le32 clen_ivhlen;
+
+ __le16 auth_src_offset;
+ __le16 cipher_src_offset;
+ __le16 cs_ip_header_offset;
+ __le16 cs_udp_header_offset;
+ __le16 pass_word_len;
+ __le16 dk_len;
+ __u8 salt3;
+ __u8 salt2;
+ __u8 salt1;
+ __u8 salt0;
+
+ __le16 tag;
+ __le16 rsvd5;
+
+ /*
+ * c_pad_type: 0~3 bits
+ * c_pad_len: 4~11 bits
+ * c_pad_data_type: 12~15 bits
+ */
+ __le16 cph_pad;
+
+ /* c_pad_len_field: 0~1 bits */
+ __le16 c_pad_len_field;
+
+ __le64 long_a_data_len;
+ __le64 a_ivin_addr;
+ __le64 a_key_addr;
+ __le64 mac_addr;
+ __le64 c_ivin_addr;
+ __le64 c_key_addr;
+
+ __le64 data_src_addr;
+ __le64 data_dst_addr;
+
+ /*
+ * done: 0 bit
+ * icv: 1~3 bits
+ * csc: 4~6 bits
+ * flag: 7-10 bits
+ * dif_check: 11~13 bits
+ */
+ __le16 done_flag;
+
+ __u8 error_type;
+ __u8 warning_type;
+ __u8 mac_i3;
+ __u8 mac_i2;
+ __u8 mac_i1;
+ __u8 mac_i0;
+ __le16 check_sum_i;
+ __u8 tls_pad_len_i;
+ __u8 rsvd12;
+ __le32 counter;
+};
+
+struct sec_sqe {
+ /*
+ * type: 0~3 bits
+ * cipher: 4~5 bits
+ * auth: 6~7 bit s
+ */
+ __u8 type_cipher_auth;
+
+ /*
+ * seq: 0 bit
+ * de: 1~2 bits
+ * scene: 3~6 bits
+ * src_addr_type: ~7 bit, with sdm_addr_type 0-1 bits
+ */
+ __u8 sds_sa_type;
+
+ /*
+ * src_addr_type: 0~1 bits, not used now,
+ * if support PRP, set this field, or set zero.
+ * dst_addr_type: 2~4 bits
+ * mac_addr_type: 5~7 bits
+ */
+ __u8 sdm_addr_type;
+ __u8 rsvd0;
+
+ /*
+ * nonce_len(type2): 0~3 bits
+ * huk(type2): 4 bit
+ * key_s(type2): 5 bit
+ * ci_gen: 6~7 bits
+ */
+ __u8 huk_key_ci;
+
+ /*
+ * ai_gen: 0~1 bits
+ * a_pad(type2): 2~3 bits
+ * c_s(type2): 4~5 bits
+ */
+ __u8 ai_apd_cs;
+
+ /*
+ * rhf(type2): 0 bit
+ * c_key_type: 1~2 bits
+ * a_key_type: 3~4 bits
+ * write_frame_len(type2): 5~7 bits
+ */
+ __u8 rca_key_frm;
+
+ /*
+ * cal_iv_addr_en(type2): 0 bit
+ * tls_up(type2): 1 bit
+ * inveld: 7 bit
+ */
+ __u8 iv_tls_ld;
+
+ struct sec_sqe_type2 type2;
+};
+
+int sec_register_to_crypto(void);
+void sec_unregister_from_crypto(void);
#endif
diff --git a/drivers/crypto/hisilicon/sec2/sec_main.c b/drivers/crypto/hisilicon/sec2/sec_main.c
index b4e5d57f..b3340c0 100644
--- a/drivers/crypto/hisilicon/sec2/sec_main.c
+++ b/drivers/crypto/hisilicon/sec2/sec_main.c
@@ -1,33 +1,30 @@
// SPDX-License-Identifier: GPL-2.0+
-/*
- * Copyright (c) 2018-2019 HiSilicon Limited.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- */
+/* Copyright (c) 2018-2019 HiSilicon Limited. */
#include <linux/acpi.h>
#include <linux/aer.h>
#include <linux/bitops.h>
#include <linux/debugfs.h>
#include <linux/init.h>
+#include <linux/iommu.h>
#include <linux/io.h>
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/pci.h>
#include <linux/seq_file.h>
#include <linux/topology.h>
-#include <linux/uacce.h>
+
#include "sec.h"
-#include "sec_crypto.h"
#define SEC_QUEUE_NUM_V1 4096
#define SEC_QUEUE_NUM_V2 1024
#define SEC_PF_PCI_DEVICE_ID 0xa255
#define SEC_VF_PCI_DEVICE_ID 0xa256
+#define SEC_BD_ERR_CHK_EN0 0xEFFFFFFF
+#define SEC_BD_ERR_CHK_EN1 0x7ffff7fd
+#define SEC_BD_ERR_CHK_EN3 0xffffbfff
+
#define SEC_SQE_SIZE 128
#define SEC_SQ_SIZE (SEC_SQE_SIZE * QM_Q_DEPTH)
#define SEC_PF_DEF_Q_NUM 64
@@ -35,8 +32,6 @@
#define SEC_CTX_Q_NUM_DEF 24
#define SEC_CTX_Q_NUM_MAX 32
-#define SEC_AM_CFG_SIG_PORT_MAX_TRANS 0x300014
-#define SEC_SINGLE_PORT_MAX_TRANS 0x2060
#define SEC_CTRL_CNT_CLR_CE 0x301120
#define SEC_CTRL_CNT_CLR_CE_BIT BIT(0)
#define SEC_ENGINE_PF_CFG_OFF 0x300000
@@ -44,13 +39,13 @@
#define SEC_CORE_INT_SOURCE 0x301010
#define SEC_CORE_INT_MASK 0x301000
#define SEC_CORE_INT_STATUS 0x301008
-#define SEC_CORE_INT_STATUS_M_ECC BIT(2)
-#define SEC_CORE_ECC_INFO 0x301C14
-#define SEC_ECC_NUM(err_val) (((err_val) >> 16) & 0xFFFF)
-#define SEC_ECC_ADDR(err_val) ((err_val) & 0xFFFF)
+#define SEC_CORE_SRAM_ECC_ERR_INFO 0x301C14
+#define SEC_ECC_NUM(err) (((err) >> 16) & 0xFFFF)
+#define SEC_ECC_ADDR(err) ((err) >> 0)
#define SEC_CORE_INT_DISABLE 0x0
#define SEC_CORE_INT_ENABLE 0x1ff
#define SEC_CORE_INT_CLEAR 0x1ff
+#define SEC_SAA_ENABLE 0x17f
#define SEC_RAS_CE_REG 0x301050
#define SEC_RAS_FE_REG 0x301054
@@ -64,6 +59,7 @@
#define SEC_CONTROL_REG 0x0200
#define SEC_TRNG_EN_SHIFT 8
+#define SEC_CLK_GATE_ENABLE BIT(3)
#define SEC_CLK_GATE_DISABLE (~BIT(3))
#define SEC_AXI_SHUTDOWN_ENABLE BIT(12)
#define SEC_AXI_SHUTDOWN_DISABLE 0xFFFFEFFF
@@ -71,26 +67,24 @@
#define SEC_INTERFACE_USER_CTRL0_REG 0x0220
#define SEC_INTERFACE_USER_CTRL1_REG 0x0224
-#define SEC_SAA_EN_REG 0x270
-#define SEC_SAA_EN 0x17F
+#define SEC_SAA_EN_REG 0x0270
#define SEC_BD_ERR_CHK_EN_REG0 0x0380
#define SEC_BD_ERR_CHK_EN_REG1 0x0384
#define SEC_BD_ERR_CHK_EN_REG3 0x038c
-#define SEC_BD_ERR_CHK_EN0 0xEFFFFFFF
-#define SEC_BD_ERR_CHK_EN1 0x7FFFF7FD
-#define SEC_BD_ERR_CHK_EN3 0xFFFFBFFF
#define SEC_USER0_SMMU_NORMAL (BIT(23) | BIT(15))
#define SEC_USER1_SMMU_NORMAL (BIT(31) | BIT(23) | BIT(15) | BIT(7))
+#define SEC_CORE_INT_STATUS_M_ECC BIT(2)
#define SEC_DELAY_10_US 10
#define SEC_POLL_TIMEOUT_US 1000
#define SEC_DBGFS_VAL_MAX_LEN 20
+#define SEC_SINGLE_PORT_MAX_TRANS 0x2060
#define SEC_ADDR(qm, offset) ((qm)->io_base + (offset) + \
SEC_ENGINE_PF_CFG_OFF + SEC_ACC_COMMON_REG_OFF)
-struct hisi_sec_hw_error {
+struct sec_hw_error {
u32 int_msk;
const char *msg;
};
@@ -98,9 +92,8 @@ struct hisi_sec_hw_error {
static const char sec_name[] = "hisi_sec2";
static struct dentry *sec_debugfs_root;
static struct hisi_qm_list sec_devices;
-static struct workqueue_struct *sec_wq;
-static const struct hisi_sec_hw_error sec_hw_error[] = {
+static const struct sec_hw_error sec_hw_errors[] = {
{.int_msk = BIT(0), .msg = "sec_axi_rresp_err_rint"},
{.int_msk = BIT(1), .msg = "sec_axi_bresp_err_rint"},
{.int_msk = BIT(2), .msg = "sec_ecc_2bit_err_rint"},
@@ -113,36 +106,13 @@ struct hisi_sec_hw_error {
{ /* sentinel */ }
};
-enum ctrl_debug_file_index {
- SEC_CURRENT_QM,
- SEC_CLEAR_ENABLE,
- SEC_DEBUG_FILE_NUM,
-};
-
-static const char * const ctrl_debug_file_name[] = {
+static const char * const sec_dbg_file_name[] = {
[SEC_CURRENT_QM] = "current_qm",
[SEC_CLEAR_ENABLE] = "clear_enable",
};
-struct ctrl_debug_file {
- enum ctrl_debug_file_index index;
- spinlock_t lock;
- struct hisi_sec_ctrl *ctrl;
-};
-
-/*
- * One SEC controller has one PF and multiple VFs, some global configurations
- * which PF has need this structure.
- *
- * Just relevant for PF.
- */
-struct hisi_sec_ctrl {
- struct hisi_sec *hisi_sec;
- struct ctrl_debug_file files[SEC_DEBUG_FILE_NUM];
-};
-
static struct debugfs_reg32 sec_dfx_regs[] = {
- {"SEC_PF_ABNORMAL_INT_SOURCE ", 0x301010},
+ {"SEC_PF_ABNORMAL_INT_SOURCE ", 0x301010},
{"SEC_SAA_EN ", 0x301270},
{"SEC_BD_LATENCY_MIN ", 0x301600},
{"SEC_BD_LATENCY_MAX ", 0x301608},
@@ -262,71 +232,12 @@ static int vfs_num_set(const char *val, const struct kernel_param *kp)
module_param_cb(vfs_num, &vfs_num_ops, &vfs_num, 0444);
MODULE_PARM_DESC(vfs_num, "Number of VFs to enable(1-63), 0(default)");
-static int sec_fusion_limit_set(const char *val, const struct kernel_param *kp)
-{
- u32 fusion_limit;
- int ret;
-
- if (!val)
- return -EINVAL;
-
- ret = kstrtou32(val, 10, &fusion_limit);
- if (ret)
- return ret;
-
- if (!fusion_limit || fusion_limit > FUSION_LIMIT_MAX) {
- pr_err("fusion_limit[%u] is't at range(0, %d)", fusion_limit,
- FUSION_LIMIT_MAX);
- return -EINVAL;
- }
-
- return param_set_int(val, kp);
-}
-
-static const struct kernel_param_ops sec_fusion_limit_ops = {
- .set = sec_fusion_limit_set,
- .get = param_get_int,
-};
-static u32 fusion_limit = FUSION_LIMIT_DEF;
-
-module_param_cb(fusion_limit, &sec_fusion_limit_ops, &fusion_limit, 0444);
-MODULE_PARM_DESC(fusion_limit, "(1, acc_sgl_sge_nr of hisilicon QM)");
-
-static int sec_fusion_tmout_ns_set(const char *val,
- const struct kernel_param *kp)
-{
- u32 fusion_tmout_nsec;
- int ret;
-
- if (!val)
- return -EINVAL;
-
- ret = kstrtou32(val, 10, &fusion_tmout_nsec);
- if (ret)
- return ret;
-
- if (fusion_tmout_nsec > NSEC_PER_SEC) {
- pr_err("fusion_tmout_nsec[%u] is too large", fusion_tmout_nsec);
- return -EINVAL;
- }
-
- return param_set_int(val, kp);
-}
-
-static const struct kernel_param_ops sec_fusion_time_ops = {
- .set = sec_fusion_tmout_ns_set,
- .get = param_get_int,
-};
-static u32 fusion_time = FUSION_TMOUT_NSEC_DEF; /* ns */
-module_param_cb(fusion_time, &sec_fusion_time_ops, &fusion_time, 0444);
-MODULE_PARM_DESC(fusion_time, "(0, NSEC_PER_SEC)");
-
-static const struct pci_device_id hisi_sec_dev_ids[] = {
+static const struct pci_device_id sec_dev_ids[] = {
{ PCI_DEVICE(PCI_VENDOR_ID_HUAWEI, SEC_PF_PCI_DEVICE_ID) },
{ PCI_DEVICE(PCI_VENDOR_ID_HUAWEI, SEC_VF_PCI_DEVICE_ID) },
{ 0, }
};
-MODULE_DEVICE_TABLE(pci, hisi_sec_dev_ids);
+MODULE_DEVICE_TABLE(pci, sec_dev_ids);
static u8 sec_get_endian(struct hisi_qm *qm)
{
@@ -390,9 +301,9 @@ static int sec_engine_init(struct hisi_qm *qm)
writel_relaxed(reg, SEC_ADDR(qm, SEC_INTERFACE_USER_CTRL1_REG));
writel(SEC_SINGLE_PORT_MAX_TRANS,
- qm->io_base + SEC_AM_CFG_SIG_PORT_MAX_TRANS);
+ qm->io_base + AM_CFG_SINGLE_PORT_MAX_TRANS);
- writel(SEC_SAA_EN, SEC_ADDR(qm, SEC_SAA_EN_REG));
+ writel(SEC_SAA_ENABLE, SEC_ADDR(qm, SEC_SAA_EN_REG));
/* Enable sm4 extra mode, as ctr/ecb */
writel_relaxed(SEC_BD_ERR_CHK_EN0,
@@ -436,6 +347,7 @@ static int sec_set_user_domain_and_cache(struct hisi_qm *qm)
return sec_engine_init(qm);
}
+/* sec_debug_regs_clear() - clear the sec debug regs */
static void sec_debug_regs_clear(struct hisi_qm *qm)
{
/* clear current_qm */
@@ -497,23 +409,16 @@ static void sec_hw_error_disable(struct hisi_qm *qm)
writel(val, SEC_ADDR(qm, SEC_CONTROL_REG));
}
-static inline struct hisi_qm *file_to_qm(struct ctrl_debug_file *file)
+static u32 sec_current_qm_read(struct sec_debug_file *file)
{
- struct hisi_sec *hisi_sec = file->ctrl->hisi_sec;
-
- return &hisi_sec->qm;
-}
-
-static u32 current_qm_read(struct ctrl_debug_file *file)
-{
- struct hisi_qm *qm = file_to_qm(file);
+ struct hisi_qm *qm = file->qm;
return readl(qm->io_base + QM_DFX_MB_CNT_VF);
}
-static int current_qm_write(struct ctrl_debug_file *file, u32 val)
+static int sec_current_qm_write(struct sec_debug_file *file, u32 val)
{
- struct hisi_qm *qm = file_to_qm(file);
+ struct hisi_qm *qm = file->qm;
u32 vfq_num;
u32 tmp;
@@ -521,17 +426,17 @@ static int current_qm_write(struct ctrl_debug_file *file, u32 val)
return -EINVAL;
/* According PF or VF Dev ID to calculation curr_qm_qp_num and store */
- if (val == 0) {
+ if (!val) {
qm->debug.curr_qm_qp_num = qm->qp_num;
} else {
vfq_num = (qm->ctrl_q_num - qm->qp_num) / qm->vfs_num;
- if (val == qm->vfs_num) {
+
+ if (val == qm->vfs_num)
qm->debug.curr_qm_qp_num =
qm->ctrl_q_num - qm->qp_num -
(qm->vfs_num - 1) * vfq_num;
- } else {
+ else
qm->debug.curr_qm_qp_num = vfq_num;
- }
}
writel(val, qm->io_base + QM_DFX_MB_CNT_VF);
@@ -548,33 +453,33 @@ static int current_qm_write(struct ctrl_debug_file *file, u32 val)
return 0;
}
-static u32 clear_enable_read(struct ctrl_debug_file *file)
+static u32 sec_clear_enable_read(struct sec_debug_file *file)
{
- struct hisi_qm *qm = file_to_qm(file);
+ struct hisi_qm *qm = file->qm;
return readl(qm->io_base + SEC_CTRL_CNT_CLR_CE) &
SEC_CTRL_CNT_CLR_CE_BIT;
}
-static int clear_enable_write(struct ctrl_debug_file *file, u32 val)
+static int sec_clear_enable_write(struct sec_debug_file *file, u32 val)
{
- struct hisi_qm *qm = file_to_qm(file);
+ struct hisi_qm *qm = file->qm;
u32 tmp;
if (val != 1 && val)
return -EINVAL;
tmp = (readl(qm->io_base + SEC_CTRL_CNT_CLR_CE) &
- ~SEC_CTRL_CNT_CLR_CE_BIT) | val;
+ ~SEC_CTRL_CNT_CLR_CE_BIT) | val;
writel(tmp, qm->io_base + SEC_CTRL_CNT_CLR_CE);
return 0;
}
-static ssize_t ctrl_debug_read(struct file *filp, char __user *buf,
+static ssize_t sec_debug_read(struct file *filp, char __user *buf,
size_t count, loff_t *pos)
{
- struct ctrl_debug_file *file = filp->private_data;
+ struct sec_debug_file *file = filp->private_data;
char tbuf[SEC_DBGFS_VAL_MAX_LEN];
u32 val;
int ret;
@@ -583,10 +488,10 @@ static ssize_t ctrl_debug_read(struct file *filp, char __user *buf,
switch (file->index) {
case SEC_CURRENT_QM:
- val = current_qm_read(file);
+ val = sec_current_qm_read(file);
break;
case SEC_CLEAR_ENABLE:
- val = clear_enable_read(file);
+ val = sec_clear_enable_read(file);
break;
default:
spin_unlock_irq(&file->lock);
@@ -599,10 +504,10 @@ static ssize_t ctrl_debug_read(struct file *filp, char __user *buf,
return simple_read_from_buffer(buf, count, pos, tbuf, ret);
}
-static ssize_t ctrl_debug_write(struct file *filp, const char __user *buf,
- size_t count, loff_t *pos)
+static ssize_t sec_debug_write(struct file *filp, const char __user *buf,
+ size_t count, loff_t *pos)
{
- struct ctrl_debug_file *file = filp->private_data;
+ struct sec_debug_file *file = filp->private_data;
char tbuf[SEC_DBGFS_VAL_MAX_LEN];
unsigned long val;
int len, ret;
@@ -626,12 +531,12 @@ static ssize_t ctrl_debug_write(struct file *filp, const char __user *buf,
switch (file->index) {
case SEC_CURRENT_QM:
- ret = current_qm_write(file, val);
+ ret = sec_current_qm_write(file, val);
if (ret)
goto err_input;
break;
case SEC_CLEAR_ENABLE:
- ret = clear_enable_write(file, val);
+ ret = sec_clear_enable_write(file, val);
if (ret)
goto err_input;
break;
@@ -649,30 +554,30 @@ static ssize_t ctrl_debug_write(struct file *filp, const char __user *buf,
return ret;
}
-static const struct file_operations ctrl_debug_fops = {
+static const struct file_operations sec_dbg_fops = {
.owner = THIS_MODULE,
.open = simple_open,
- .read = ctrl_debug_read,
- .write = ctrl_debug_write,
+ .read = sec_debug_read,
+ .write = sec_debug_write,
};
-static int hisi_sec_core_debug_init(struct hisi_qm *qm)
+static int sec_debugfs_atomic64_get(void *data, u64 *val)
{
- struct hisi_sec *sec = container_of(qm, struct hisi_sec, qm);
+ *val = atomic64_read((atomic64_t *)data);
+ return 0;
+}
+DEFINE_DEBUGFS_ATTRIBUTE(sec_atomic64_ops, sec_debugfs_atomic64_get,
+ NULL, "%lld\n");
+
+static int sec_core_debug_init(struct hisi_qm *qm)
+{
+ struct sec_dev *sec = container_of(qm, struct sec_dev, qm);
struct device *dev = &qm->pdev->dev;
- struct hisi_sec_dfx *dfx = &sec->sec_dfx;
+ struct sec_dfx *dfx = &sec->debug.dfx;
struct debugfs_regset32 *regset;
- struct dentry *tmp_d, *tmp;
- char buf[SEC_DBGFS_VAL_MAX_LEN];
- int ret;
+ struct dentry *tmp_d;
- ret = snprintf(buf, SEC_DBGFS_VAL_MAX_LEN, "sec_dfx");
- if (ret < 0)
- return -ENOENT;
-
- tmp_d = debugfs_create_dir(buf, qm->debug.debug_root);
- if (!tmp_d)
- return -ENOENT;
+ tmp_d = debugfs_create_dir("sec_dfx", qm->debug.debug_root);
regset = devm_kzalloc(dev, sizeof(*regset), GFP_KERNEL);
if (!regset)
@@ -682,123 +587,69 @@ static int hisi_sec_core_debug_init(struct hisi_qm *qm)
regset->nregs = ARRAY_SIZE(sec_dfx_regs);
regset->base = qm->io_base;
- tmp = debugfs_create_regset32("regs", 0444, tmp_d, regset);
- if (!tmp)
- return -ENOENT;
-
- tmp = debugfs_create_u64("send_cnt", 0444, tmp_d, &dfx->send_cnt);
- if (!tmp)
- return -ENOENT;
+ debugfs_create_regset32("regs", 0444, tmp_d, regset);
- tmp = debugfs_create_u64("send_by_tmout", 0444, tmp_d,
- &dfx->send_by_tmout);
- if (!tmp)
- return -ENOENT;
-
- tmp = debugfs_create_u64("send_by_full", 0444, tmp_d,
- &dfx->send_by_full);
- if (!tmp)
- return -ENOENT;
-
- tmp = debugfs_create_u64("recv_cnt", 0444, tmp_d, &dfx->recv_cnt);
- if (!tmp)
- return -ENOENT;
-
- tmp = debugfs_create_u64("get_task_cnt", 0444, tmp_d,
- &dfx->get_task_cnt);
- if (!tmp)
- return -ENOENT;
-
- tmp = debugfs_create_u64("put_task_cnt", 0444, tmp_d,
- &dfx->put_task_cnt);
- if (!tmp)
- return -ENOENT;
-
- tmp = debugfs_create_u64("gran_task_cnt", 0444, tmp_d,
- &dfx->gran_task_cnt);
- if (!tmp)
- return -ENOENT;
-
- tmp = debugfs_create_u64("thread_cnt", 0444, tmp_d, &dfx->thread_cnt);
- if (!tmp)
- return -ENOENT;
-
- tmp = debugfs_create_u64("fake_busy_cnt", 0444,
- tmp_d, &dfx->fake_busy_cnt);
- if (!tmp)
- return -ENOENT;
+ debugfs_create_file("send_cnt", 0444, tmp_d,
+ &dfx->send_cnt, &sec_atomic64_ops);
- tmp = debugfs_create_u64("busy_comp_cnt", 0444, tmp_d,
- &dfx->busy_comp_cnt);
- if (!tmp)
- return -ENOENT;
+ debugfs_create_file("recv_cnt", 0444, tmp_d,
+ &dfx->recv_cnt, &sec_atomic64_ops);
return 0;
}
-static int hisi_sec_ctrl_debug_init(struct hisi_qm *qm)
+static int sec_debug_init(struct hisi_qm *qm)
{
- struct hisi_sec *sec = container_of(qm, struct hisi_sec, qm);
- struct dentry *tmp;
+ struct sec_dev *sec = container_of(qm, struct sec_dev, qm);
int i;
for (i = SEC_CURRENT_QM; i < SEC_DEBUG_FILE_NUM; i++) {
- spin_lock_init(&sec->ctrl->files[i].lock);
- sec->ctrl->files[i].ctrl = sec->ctrl;
- sec->ctrl->files[i].index = i;
+ spin_lock_init(&sec->debug.files[i].lock);
+ sec->debug.files[i].index = i;
+ sec->debug.files[i].qm = qm;
- tmp = debugfs_create_file(ctrl_debug_file_name[i], 0600,
+ debugfs_create_file(sec_dbg_file_name[i], 0600,
qm->debug.debug_root,
- sec->ctrl->files + i,
- &ctrl_debug_fops);
- if (!tmp)
- return -ENOENT;
+ sec->debug.files + i,
+ &sec_dbg_fops);
}
- return hisi_sec_core_debug_init(qm);
+ return sec_core_debug_init(qm);
}
-static int hisi_sec_debugfs_init(struct hisi_qm *qm)
+static int sec_debugfs_init(struct hisi_qm *qm)
{
struct device *dev = &qm->pdev->dev;
- struct dentry *dev_d;
int ret;
- dev_d = debugfs_create_dir(dev_name(dev), sec_debugfs_root);
- if (!dev_d)
- return -ENOENT;
-
- qm->debug.debug_root = dev_d;
+ qm->debug.debug_root = debugfs_create_dir(dev_name(dev),
+ sec_debugfs_root);
ret = hisi_qm_debug_init(qm);
if (ret)
goto failed_to_create;
if (qm->pdev->device == SEC_PF_PCI_DEVICE_ID) {
- ret = hisi_sec_ctrl_debug_init(qm);
+ ret = sec_debug_init(qm);
if (ret)
goto failed_to_create;
}
return 0;
- failed_to_create:
+failed_to_create:
debugfs_remove_recursive(sec_debugfs_root);
+
return ret;
}
-static void hisi_sec_debugfs_exit(struct hisi_qm *qm)
+static void sec_debugfs_exit(struct hisi_qm *qm)
{
debugfs_remove_recursive(qm->debug.debug_root);
-
- if (qm->fun_type == QM_HW_PF) {
- sec_debug_regs_clear(qm);
- qm->debug.curr_qm_qp_num = 0;
- }
}
static void sec_log_hw_error(struct hisi_qm *qm, u32 err_sts)
{
- const struct hisi_sec_hw_error *errs = sec_hw_error;
+ const struct sec_hw_error *errs = sec_hw_errors;
struct device *dev = &qm->pdev->dev;
u32 err_val;
@@ -809,7 +660,7 @@ static void sec_log_hw_error(struct hisi_qm *qm, u32 err_sts)
if (SEC_CORE_INT_STATUS_M_ECC & errs->int_msk) {
err_val = readl(qm->io_base +
- SEC_CORE_ECC_INFO);
+ SEC_CORE_SRAM_ECC_ERR_INFO);
dev_err(dev, "multi ecc sram num=0x%x\n",
SEC_ECC_NUM(err_val));
}
@@ -837,19 +688,10 @@ static void sec_open_axi_master_ooo(struct hisi_qm *qm)
writel(val | SEC_AXI_SHUTDOWN_ENABLE, SEC_ADDR(qm, SEC_CONTROL_REG));
}
-static int hisi_sec_pf_probe_init(struct hisi_qm *qm)
+static int sec_pf_probe_init(struct hisi_qm *qm)
{
- struct hisi_sec *hisi_sec = container_of(qm, struct hisi_sec, qm);
- struct hisi_sec_ctrl *ctrl;
int ret;
- ctrl = devm_kzalloc(&qm->pdev->dev, sizeof(*ctrl), GFP_KERNEL);
- if (!ctrl)
- return -ENOMEM;
-
- hisi_sec->ctrl = ctrl;
- ctrl->hisi_sec = hisi_sec;
-
switch (qm->ver) {
case QM_HW_V1:
qm->ctrl_q_num = SEC_QUEUE_NUM_V1;
@@ -868,7 +710,7 @@ static int hisi_sec_pf_probe_init(struct hisi_qm *qm)
qm->err_ini.err_info.ecc_2bits_mask = SEC_CORE_INT_STATUS_M_ECC;
qm->err_ini.err_info.ce = QM_BASE_CE;
qm->err_ini.err_info.nfe = QM_BASE_NFE | QM_ACC_DO_TASK_TIMEOUT |
- QM_ACC_WB_NOT_READY_TIMEOUT;
+ QM_ACC_WB_NOT_READY_TIMEOUT;
qm->err_ini.err_info.fe = 0;
qm->err_ini.err_info.msi = QM_DB_RANDOM_INVALID;
qm->err_ini.err_info.acpi_rst = "SRST";
@@ -884,42 +726,32 @@ static int hisi_sec_pf_probe_init(struct hisi_qm *qm)
return ret;
hisi_qm_dev_err_init(qm);
- qm->err_ini.open_axi_master_ooo(qm);
sec_debug_regs_clear(qm);
return 0;
}
-static int hisi_sec_qm_pre_init(struct hisi_qm *qm, struct pci_dev *pdev)
+static int sec_probe_init(struct hisi_qm *qm)
{
int ret;
-#ifdef CONFIG_CRYPTO_QM_UACCE
- qm->algs = "sec\ncipher\ndigest\n";
- qm->uacce_mode = uacce_mode;
-#endif
- qm->pdev = pdev;
- ret = hisi_qm_pre_init(qm, pf_q_num, SEC_PF_DEF_Q_BASE);
- if (ret)
- return ret;
- qm->sqe_size = SEC_SQE_SIZE;
- qm->dev_name = sec_name;
- qm->qm_list = &sec_devices;
- qm->wq = sec_wq;
-
- return 0;
-}
+ qm->wq = alloc_workqueue("%s", WQ_HIGHPRI | WQ_CPU_INTENSIVE |
+ WQ_MEM_RECLAIM | WQ_UNBOUND, num_online_cpus(),
+ pci_name(qm->pdev));
+ if (!qm->wq) {
+ pci_err(qm->pdev, "fail to alloc workqueue\n");
+ return -ENOMEM;
+ }
-static int hisi_sec_probe_init(struct hisi_qm *qm)
-{
if (qm->fun_type == QM_HW_PF) {
- return hisi_sec_pf_probe_init(qm);
+ ret = sec_pf_probe_init(qm);
+ if (ret)
+ goto err_probe_uninit;
} else if (qm->fun_type == QM_HW_VF) {
/*
* have no way to get qm configure in VM in v1 hardware,
* so currently force PF to uses SEC_PF_DEF_Q_NUM, and force
* to trigger only one VF in v1 hardware.
- *
* v2 hardware has no such problem.
*/
if (qm->ver == QM_HW_V1) {
@@ -927,41 +759,92 @@ static int hisi_sec_probe_init(struct hisi_qm *qm)
qm->qp_num = SEC_QUEUE_NUM_V1 - SEC_PF_DEF_Q_NUM;
} else if (qm->ver == QM_HW_V2) {
/* v2 starts to support get vft by mailbox */
- return hisi_qm_get_vft(qm, &qm->qp_base, &qm->qp_num);
+ ret = hisi_qm_get_vft(qm, &qm->qp_base, &qm->qp_num);
+ if (ret)
+ goto err_probe_uninit;
}
+ } else {
+ ret = -ENODEV;
+ goto err_probe_uninit;
}
return 0;
+
+err_probe_uninit:
+ destroy_workqueue(qm->wq);
+ return ret;
+}
+
+static void sec_probe_uninit(struct hisi_qm *qm)
+{
+ if (qm->fun_type == QM_HW_PF)
+ hisi_qm_dev_err_uninit(qm);
+ destroy_workqueue(qm->wq);
+}
+
+static int sec_qm_pre_init(struct hisi_qm *qm, struct pci_dev *pdev)
+{
+ int ret;
+
+#ifdef CONFIG_CRYPTO_QM_UACCE
+ qm->algs = "sec\ncipher\ndigest\n";
+ qm->uacce_mode = uacce_mode;
+#endif
+ qm->pdev = pdev;
+ ret = hisi_qm_pre_init(qm, pf_q_num, SEC_PF_DEF_Q_BASE);
+ if (ret)
+ return ret;
+
+ qm->qm_list = &sec_devices;
+ qm->sqe_size = SEC_SQE_SIZE;
+ qm->dev_name = sec_name;
+
+ return 0;
+}
+
+static void sec_iommu_used_check(struct sec_dev *sec)
+{
+ struct iommu_domain *domain;
+ struct device *dev = &sec->qm.pdev->dev;
+
+ domain = iommu_get_domain_for_dev(dev);
+
+ /* Check if iommu is used */
+ sec->iommu_used = false;
+ if (domain) {
+ if (domain->type & __IOMMU_DOMAIN_PAGING)
+ sec->iommu_used = true;
+ dev_info(dev, "SMMU Opened! the iommu type:= %d!\n",
+ domain->type);
+ }
}
-static int hisi_sec_probe(struct pci_dev *pdev, const struct pci_device_id *id)
+static int sec_probe(struct pci_dev *pdev, const struct pci_device_id *id)
{
- struct hisi_sec *hisi_sec;
+ struct sec_dev *sec;
struct hisi_qm *qm;
int ret;
- hisi_sec = devm_kzalloc(&pdev->dev, sizeof(*hisi_sec), GFP_KERNEL);
- if (!hisi_sec)
+ sec = devm_kzalloc(&pdev->dev, sizeof(*sec), GFP_KERNEL);
+ if (!sec)
return -ENOMEM;
- qm = &hisi_sec->qm;
+ qm = &sec->qm;
qm->fun_type = pdev->is_physfn ? QM_HW_PF : QM_HW_VF;
- ret = hisi_sec_qm_pre_init(qm, pdev);
+ ret = sec_qm_pre_init(qm, pdev);
if (ret)
return ret;
- hisi_sec->ctx_q_num = ctx_q_num;
- hisi_sec->fusion_limit = fusion_limit;
- hisi_sec->fusion_tmout_nsec = fusion_time;
-
+ sec->ctx_q_num = ctx_q_num;
+ sec_iommu_used_check(sec);
ret = hisi_qm_init(qm);
if (ret) {
pci_err(pdev, "Failed to init qm (%d)!\n", ret);
return ret;
}
- ret = hisi_sec_probe_init(qm);
+ ret = sec_probe_init(qm);
if (ret) {
pci_err(pdev, "Failed to probe init (%d)!\n", ret);
goto err_qm_uninit;
@@ -970,18 +853,18 @@ static int hisi_sec_probe(struct pci_dev *pdev, const struct pci_device_id *id)
ret = hisi_qm_start(qm);
if (ret) {
pci_err(pdev, "Failed to start qm (%d)!\n", ret);
- goto err_qm_uninit;
+ goto err_probe_uninit;
}
- ret = hisi_sec_debugfs_init(qm);
+ ret = sec_debugfs_init(qm);
if (ret)
pci_warn(pdev, "Failed to init debugfs (%d)!\n", ret);
hisi_qm_add_to_list(qm, &sec_devices);
- ret = hisi_sec_register_to_crypto(fusion_limit);
+ ret = sec_register_to_crypto();
if (ret < 0) {
- pci_err(pdev, "Failed to register driver to crypto!\n");
+ pr_err("Failed to register driver to crypto!\n");
goto err_remove_from_list;
}
@@ -994,121 +877,115 @@ static int hisi_sec_probe(struct pci_dev *pdev, const struct pci_device_id *id)
return 0;
err_crypto_unregister:
- hisi_sec_unregister_from_crypto(fusion_limit);
+ sec_unregister_from_crypto();
err_remove_from_list:
hisi_qm_del_from_list(qm, &sec_devices);
- hisi_sec_debugfs_exit(qm);
+ sec_debugfs_exit(qm);
hisi_qm_stop(qm, QM_NORMAL);
+err_probe_uninit:
+ sec_probe_uninit(qm);
+
err_qm_uninit:
hisi_qm_uninit(qm);
return ret;
}
-static int hisi_sec_sriov_configure(struct pci_dev *pdev, int num_vfs)
+static int sec_sriov_configure(struct pci_dev *pdev, int num_vfs)
{
- if (num_vfs == 0)
- return hisi_qm_sriov_disable(pdev, &sec_devices);
- else
+ if (num_vfs)
return hisi_qm_sriov_enable(pdev, num_vfs);
+ else
+ return hisi_qm_sriov_disable(pdev, &sec_devices);
}
-static void hisi_sec_remove(struct pci_dev *pdev)
+static void sec_remove(struct pci_dev *pdev)
{
struct hisi_qm *qm = pci_get_drvdata(pdev);
- if (uacce_mode != UACCE_MODE_NOUACCE)
- hisi_qm_remove_wait_delay(qm, &sec_devices);
+ hisi_qm_remove_wait_delay(qm, &sec_devices);
+
+ sec_unregister_from_crypto();
+
+ hisi_qm_del_from_list(qm, &sec_devices);
if (qm->fun_type == QM_HW_PF && qm->vfs_num)
(void)hisi_qm_sriov_disable(pdev, NULL);
- hisi_sec_unregister_from_crypto(fusion_limit);
+ sec_debugfs_exit(qm);
- hisi_qm_del_from_list(qm, &sec_devices);
- hisi_sec_debugfs_exit(qm);
(void)hisi_qm_stop(qm, QM_NORMAL);
if (qm->fun_type == QM_HW_PF)
- hisi_qm_dev_err_uninit(qm);
+ sec_debug_regs_clear(qm);
+
+ sec_probe_uninit(qm);
hisi_qm_uninit(qm);
}
-static const struct pci_error_handlers hisi_sec_err_handler = {
+static const struct pci_error_handlers sec_err_handler = {
.error_detected = hisi_qm_dev_err_detected,
- .slot_reset = hisi_qm_dev_slot_reset,
- .reset_prepare = hisi_qm_reset_prepare,
- .reset_done = hisi_qm_reset_done,
+ .slot_reset = hisi_qm_dev_slot_reset,
+ .reset_prepare = hisi_qm_reset_prepare,
+ .reset_done = hisi_qm_reset_done,
};
-static struct pci_driver hisi_sec_pci_driver = {
+static struct pci_driver sec_pci_driver = {
.name = "hisi_sec2",
- .id_table = hisi_sec_dev_ids,
- .probe = hisi_sec_probe,
- .remove = hisi_sec_remove,
- .sriov_configure = hisi_sec_sriov_configure,
- .err_handler = &hisi_sec_err_handler,
+ .id_table = sec_dev_ids,
+ .probe = sec_probe,
+ .remove = sec_remove,
+ .err_handler = &sec_err_handler,
+ .sriov_configure = sec_sriov_configure,
.shutdown = hisi_qm_dev_shutdown,
};
-static void hisi_sec_register_debugfs(void)
+static void sec_register_debugfs(void)
{
if (!debugfs_initialized())
return;
sec_debugfs_root = debugfs_create_dir("hisi_sec2", NULL);
- if (IS_ERR_OR_NULL(sec_debugfs_root))
- sec_debugfs_root = NULL;
}
-static void hisi_sec_unregister_debugfs(void)
+static void sec_unregister_debugfs(void)
{
debugfs_remove_recursive(sec_debugfs_root);
}
-static int __init hisi_sec_init(void)
+static int __init sec_init(void)
{
int ret;
- sec_wq = alloc_workqueue("hisi_sec2", WQ_HIGHPRI | WQ_CPU_INTENSIVE |
- WQ_MEM_RECLAIM | WQ_UNBOUND, num_online_cpus());
-
- if (!sec_wq) {
- pr_err("Fallied to alloc workqueue\n");
- return -ENOMEM;
- }
-
INIT_LIST_HEAD(&sec_devices.list);
mutex_init(&sec_devices.lock);
sec_devices.check = NULL;
+ sec_register_debugfs();
- hisi_sec_register_debugfs();
-
- ret = pci_register_driver(&hisi_sec_pci_driver);
+ ret = pci_register_driver(&sec_pci_driver);
if (ret < 0) {
- hisi_sec_unregister_debugfs();
- if (sec_wq)
- destroy_workqueue(sec_wq);
+ sec_unregister_debugfs();
pr_err("Failed to register pci driver.\n");
+ return ret;
}
- return ret;
+ return 0;
}
-static void __exit hisi_sec_exit(void)
+static void __exit sec_exit(void)
{
- pci_unregister_driver(&hisi_sec_pci_driver);
- hisi_sec_unregister_debugfs();
- if (sec_wq)
- destroy_workqueue(sec_wq);
+ pci_unregister_driver(&sec_pci_driver);
+ sec_unregister_debugfs();
}
-module_init(hisi_sec_init);
-module_exit(hisi_sec_exit);
+module_init(sec_init);
+module_exit(sec_exit);
MODULE_LICENSE("GPL v2");
+MODULE_AUTHOR("Zaibo Xu <xuzaibo(a)huawei.com>");
+MODULE_AUTHOR("Longfang Liu <liulongfang(a)huawei.com>");
MODULE_AUTHOR("Zhang Wei <zhangwei375(a)huawei.com>");
MODULE_DESCRIPTION("Driver for HiSilicon SEC accelerator");
diff --git a/drivers/crypto/hisilicon/sec2/sec_usr_if.h b/drivers/crypto/hisilicon/sec2/sec_usr_if.h
deleted file mode 100644
index 7c76e19..00000000
--- a/drivers/crypto/hisilicon/sec2/sec_usr_if.h
+++ /dev/null
@@ -1,179 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0+ */
-/* Copyright (c) 2018-2019 HiSilicon Limited. */
-
-#ifndef HISI_SEC_USR_IF_H
-#define HISI_SEC_USR_IF_H
-
-struct hisi_sec_sqe_type1 {
- __u32 rsvd2:6;
- __u32 ci_gen:2;
- __u32 ai_gen:2;
- __u32 rsvd1:7;
- __u32 c_key_type:2;
- __u32 a_key_type:2;
- __u32 rsvd0:10;
- __u32 inveld:1;
-
- __u32 mac_len:6;
- __u32 a_key_len:5;
- __u32 a_alg:6;
- __u32 rsvd3:15;
- __u32 c_icv_len:6;
- __u32 c_width:3;
- __u32 c_key_len:3;
- __u32 c_mode:4;
- __u32 c_alg:4;
- __u32 rsvd4:12;
- __u32 auth_gran_size:24;
- __u32:8;
- __u32 cipher_gran_size:24;
- __u32:8;
- __u32 auth_src_offset:16;
- __u32 cipher_src_offset:16;
- __u32 gran_num:16;
- __u32 rsvd5:16;
- __u32 src_skip_data_len:24;
- __u32 rsvd6:8;
- __u32 dst_skip_data_len:24;
- __u32 rsvd7:8;
- __u32 tag:16;
- __u32 rsvd8:16;
- __u32 gen_page_pad_ctrl:4;
- __u32 gen_grd_ctrl:4;
- __u32 gen_ver_ctrl:4;
- __u32 gen_app_ctrl:4;
- __u32 gen_ver_val:8;
- __u32 gen_app_val:8;
- __u32 private_info;
- __u32 gen_ref_ctrl:4;
- __u32 page_pad_type:2;
- __u32 rsvd9:2;
- __u32 chk_grd_ctrl:4;
- __u32 chk_ref_ctrl:4;
- __u32 block_size:16;
- __u32 lba_l;
- __u32 lba_h;
- __u32 a_key_addr_l;
- __u32 a_key_addr_h;
- __u32 mac_addr_l;
- __u32 mac_addr_h;
- __u32 c_ivin_addr_l;
- __u32 c_ivin_addr_h;
- __u32 c_key_addr_l;
- __u32 c_key_addr_h;
- __u32 data_src_addr_l;
- __u32 data_src_addr_h;
- __u32 data_dst_addr_l;
- __u32 data_dst_addr_h;
- __u32 done:1;
- __u32 icv:3;
- __u32 rsvd11:3;
- __u32 flag:4;
- __u32 dif_check:3;
- __u32 rsvd10:2;
- __u32 error_type:8;
- __u32 warning_type:8;
- __u32 dw29;
- __u32 dw30;
- __u32 dw31;
-};
-
-struct hisi_sec_sqe_type2 {
- __u32 nonce_len:4;
- __u32 huk:1;
- __u32 key_s:1;
- __u32 ci_gen:2;
- __u32 ai_gen:2;
- __u32 a_pad:2;
- __u32 c_s:2;
- __u32 rsvd1:2;
- __u32 rhf:1;
- __u32 c_key_type:2;
- __u32 a_key_type:2;
- __u32 write_frame_len:3;
- __u32 cal_iv_addr_en:1;
- __u32 tls_up:1;
- __u32 rsvd0:5;
- __u32 inveld:1;
- __u32 mac_len:5;
- __u32 a_key_len:6;
- __u32 a_alg:6;
- __u32 rsvd3:15;
- __u32 c_icv_len:6;
- __u32 c_width:3;
- __u32 c_key_len:3;
- __u32 c_mode:4;
- __u32 c_alg:4;
- __u32 rsvd4:12;
- __u32 a_len:24;
- __u32 iv_offset_l:8;
- __u32 c_len:24;
- __u32 iv_offset_h:8;
- __u32 auth_src_offset:16;
- __u32 cipher_src_offset:16;
- __u32 cs_ip_header_offset:16;
- __u32 cs_udp_header_offset:16;
- __u32 pass_word_len:16;
- __u32 dk_len:16;
- __u32 salt3:8;
- __u32 salt2:8;
- __u32 salt1:8;
- __u32 salt0:8;
- __u32 tag:16;
- __u32 rsvd5:16;
- __u32 c_pad_type:4;
- __u32 c_pad_len:8;
- __u32 c_pad_data_type:4;
- __u32 c_pad_len_field:2;
- __u32 rsvd6:14;
- __u32 long_a_data_len_l;
- __u32 long_a_data_len_h;
- __u32 a_ivin_addr_l;
- __u32 a_ivin_addr_h;
- __u32 a_key_addr_l;
- __u32 a_key_addr_h;
- __u32 mac_addr_l;
- __u32 mac_addr_h;
- __u32 c_ivin_addr_l;
- __u32 c_ivin_addr_h;
- __u32 c_key_addr_l;
- __u32 c_key_addr_h;
- __u32 data_src_addr_l;
- __u32 data_src_addr_h;
- __u32 data_dst_addr_l;
- __u32 data_dst_addr_h;
- __u32 done:1;
- __u32 icv:3;
- __u32 rsvd11:3;
- __u32 flag:4;
- __u32 rsvd10:5;
- __u32 error_type:8;
- __u32 warning_type:8;
- __u32 mac_i3:8;
- __u32 mac_i2:8;
- __u32 mac_i1:8;
- __u32 mac_i0:8;
- __u32 check_sum_i:16;
- __u32 tls_pad_len_i:8;
- __u32 rsvd12:8;
- __u32 counter;
-};
-
-struct hisi_sec_sqe {
- __u32 type:4;
- __u32 cipher:2;
- __u32 auth:2;
- __u32 seq:1;
- __u32 de:2;
- __u32 scene:4;
- __u32 src_addr_type:3;
- __u32 dst_addr_type:3;
- __u32 mac_addr_type:3;
- __u32 rsvd0:8;
- union {
- struct hisi_sec_sqe_type1 type1;
- struct hisi_sec_sqe_type2 type2;
- };
-};
-
-#endif
--
1.8.3
1
1

[PATCH] arm64: kprobes: Recover pstate.D in single-step exception handler
by Yang Yingliang 17 Apr '20
by Yang Yingliang 17 Apr '20
17 Apr '20
From: Masami Hiramatsu <mhiramat(a)kernel.org>
mainline inclusion
from mainline-5.3-rc3
commit b3980e48528c
category: bugfix
bugzilla: 20080
CVE: NA
-------------------------------------------------
kprobes manipulates the interrupted PSTATE for single step, and
doesn't restore it. Thus, if we put a kprobe where the pstate.D
(debug) masked, the mask will be cleared after the kprobe hits.
Moreover, in the most complicated case, this can lead a kernel
crash with below message when a nested kprobe hits.
[ 152.118921] Unexpected kernel single-step exception at EL1
When the 1st kprobe hits, do_debug_exception() will be called.
At this point, debug exception (= pstate.D) must be masked (=1).
But if another kprobes hits before single-step of the first kprobe
(e.g. inside user pre_handler), it unmask the debug exception
(pstate.D = 0) and return.
Then, when the 1st kprobe setting up single-step, it saves current
DAIF, mask DAIF, enable single-step, and restore DAIF.
However, since "D" flag in DAIF is cleared by the 2nd kprobe, the
single-step exception happens soon after restoring DAIF.
This has been introduced by commit 7419333fa15e ("arm64: kprobe:
Always clear pstate.D in breakpoint exception handler")
To solve this issue, this stores all DAIF bits and restore it
after single stepping.
Reported-by: Naresh Kamboju <naresh.kamboju(a)linaro.org>
Fixes: 7419333fa15e ("arm64: kprobe: Always clear pstate.D in breakpoint exception handler")
Reviewed-by: James Morse <james.morse(a)arm.com>
Tested-by: James Morse <james.morse(a)arm.com>
Signed-off-by: Masami Hiramatsu <mhiramat(a)kernel.org>
Signed-off-by: Will Deacon <will(a)kernel.org>
Signed-off-by: Wei Li <liwei391(a)huawei.com>
Reviewed-by: Hanjun Guo <guohanjun(a)huawei.com>
Signed-off-by: Yang Yingliang <yangyingliang(a)huawei.com>
---
arch/arm64/include/asm/daifflags.h | 1 +
arch/arm64/kernel/probes/kprobes.c | 40 ++++++--------------------------------
2 files changed, 7 insertions(+), 34 deletions(-)
diff --git a/arch/arm64/include/asm/daifflags.h b/arch/arm64/include/asm/daifflags.h
index 3441ca0..1230923 100644
--- a/arch/arm64/include/asm/daifflags.h
+++ b/arch/arm64/include/asm/daifflags.h
@@ -24,6 +24,7 @@
#define DAIF_PROCCTX 0
#define DAIF_PROCCTX_NOIRQ PSR_I_BIT
+#define DAIF_MASK (PSR_D_BIT | PSR_A_BIT | PSR_I_BIT | PSR_F_BIT)
/* mask/save/unmask/restore all exceptions, including interrupts. */
static inline void local_daif_mask(void)
diff --git a/arch/arm64/kernel/probes/kprobes.c b/arch/arm64/kernel/probes/kprobes.c
index 2d63df1..fe9d207 100644
--- a/arch/arm64/kernel/probes/kprobes.c
+++ b/arch/arm64/kernel/probes/kprobes.c
@@ -30,6 +30,7 @@
#include <asm/ptrace.h>
#include <asm/cacheflush.h>
#include <asm/debug-monitors.h>
+#include <asm/daifflags.h>
#include <asm/system_misc.h>
#include <asm/insn.h>
#include <linux/uaccess.h>
@@ -180,33 +181,6 @@ static void __kprobes set_current_kprobe(struct kprobe *p)
}
/*
- * When PSTATE.D is set (masked), then software step exceptions can not be
- * generated.
- * SPSR's D bit shows the value of PSTATE.D immediately before the
- * exception was taken. PSTATE.D is set while entering into any exception
- * mode, however software clears it for any normal (none-debug-exception)
- * mode in the exception entry. Therefore, when we are entering into kprobe
- * breakpoint handler from any normal mode then SPSR.D bit is already
- * cleared, however it is set when we are entering from any debug exception
- * mode.
- * Since we always need to generate single step exception after a kprobe
- * breakpoint exception therefore we need to clear it unconditionally, when
- * we become sure that the current breakpoint exception is for kprobe.
- */
-static void __kprobes
-spsr_set_debug_flag(struct pt_regs *regs, int mask)
-{
- unsigned long spsr = regs->pstate;
-
- if (mask)
- spsr |= PSR_D_BIT;
- else
- spsr &= ~PSR_D_BIT;
-
- regs->pstate = spsr;
-}
-
-/*
* Interrupts need to be disabled before single-step mode is set, and not
* reenabled until after single-step mode ends.
* Without disabling interrupt on local CPU, there is a chance of
@@ -217,17 +191,17 @@ static void __kprobes set_current_kprobe(struct kprobe *p)
static void __kprobes kprobes_save_local_irqflag(struct kprobe_ctlblk *kcb,
struct pt_regs *regs)
{
- kcb->saved_irqflag = regs->pstate;
+ kcb->saved_irqflag = regs->pstate & DAIF_MASK;
regs->pstate |= PSR_I_BIT;
+ /* Unmask PSTATE.D for enabling software step exceptions. */
+ regs->pstate &= ~PSR_D_BIT;
}
static void __kprobes kprobes_restore_local_irqflag(struct kprobe_ctlblk *kcb,
struct pt_regs *regs)
{
- if (kcb->saved_irqflag & PSR_I_BIT)
- regs->pstate |= PSR_I_BIT;
- else
- regs->pstate &= ~PSR_I_BIT;
+ regs->pstate &= ~DAIF_MASK;
+ regs->pstate |= kcb->saved_irqflag;
}
static void __kprobes
@@ -264,8 +238,6 @@ static void __kprobes setup_singlestep(struct kprobe *p,
set_ss_context(kcb, slot); /* mark pending ss */
- spsr_set_debug_flag(regs, 0);
-
/* IRQs and single stepping do not mix well. */
kprobes_save_local_irqflag(kcb, regs);
kernel_enable_single_step(regs);
--
1.8.3
1
0

[PATCH 1/2] qm: optimize the maximum number of VF and delete invalid addr
by Yang Yingliang 17 Apr '20
by Yang Yingliang 17 Apr '20
17 Apr '20
From: Yu'an Wang <wangyuan46(a)huawei.com>
driver inclusion
category: bugfix
bugzilla: NA
CVE: NA
In this patch, we try to optimize the way to set the maximum number
of VF, which is designed for compation with next hardware standards.
Then we remove invalid address parameter definition and assignment.
Meanwhile, the return code judgment of debugfs related functions is
deleted, because this does not affect the main function of driver.
Signed-off-by: Yu'an Wang <wangyuan46(a)huawei.com>
Reviewed-by: Cheng Hu <hucheng.hu(a)huawei.com>
Reviewed-by: Guangwei Zhou <zhouguangwei5(a)huawei.com>
Reviewed-by: Junxian Liu <liujunxian3(a)huawei.com>
Signed-off-by: Yang Yingliang <yangyingliang(a)huawei.com>
---
drivers/crypto/hisilicon/hpre/hpre.h | 1 -
drivers/crypto/hisilicon/hpre/hpre_main.c | 54 ++++++++++++++-----------------
drivers/crypto/hisilicon/qm.c | 22 ++++---------
drivers/crypto/hisilicon/qm.h | 6 ++--
drivers/crypto/hisilicon/rde/rde_main.c | 2 ++
drivers/crypto/hisilicon/zip/zip_main.c | 2 ++
6 files changed, 38 insertions(+), 49 deletions(-)
diff --git a/drivers/crypto/hisilicon/hpre/hpre.h b/drivers/crypto/hisilicon/hpre/hpre.h
index 3ac02ef..42b2f2a 100644
--- a/drivers/crypto/hisilicon/hpre/hpre.h
+++ b/drivers/crypto/hisilicon/hpre/hpre.h
@@ -18,7 +18,6 @@ enum {
HPRE_CLUSTERS_NUM,
};
-
enum hpre_ctrl_dbgfs_file {
HPRE_CURRENT_QM,
HPRE_CLEAR_ENABLE,
diff --git a/drivers/crypto/hisilicon/hpre/hpre_main.c b/drivers/crypto/hisilicon/hpre/hpre_main.c
index 4dc0d3e..f727158 100644
--- a/drivers/crypto/hisilicon/hpre/hpre_main.c
+++ b/drivers/crypto/hisilicon/hpre/hpre_main.c
@@ -435,8 +435,7 @@ static int hpre_current_qm_write(struct hpre_debugfs_file *file, u32 val)
vfq_num = (qm->ctrl_q_num - qm->qp_num) / num_vfs;
if (val == num_vfs) {
qm->debug.curr_qm_qp_num =
- qm->ctrl_q_num - qm->qp_num -
- (num_vfs - 1) * vfq_num;
+ qm->ctrl_q_num - qm->qp_num - (num_vfs - 1) * vfq_num;
} else {
qm->debug.curr_qm_qp_num = vfq_num;
}
@@ -592,7 +591,7 @@ static ssize_t hpre_ctrl_debug_write(struct file *filp, const char __user *buf,
static int hpre_create_debugfs_file(struct hpre_debug *dbg, struct dentry *dir,
enum hpre_ctrl_dbgfs_file type, int indx)
{
- struct dentry *tmp, *file_dir;
+ struct dentry *file_dir;
struct hpre *hpre;
if (dir) {
@@ -609,10 +608,9 @@ static int hpre_create_debugfs_file(struct hpre_debug *dbg, struct dentry *dir,
dbg->files[indx].debug = dbg;
dbg->files[indx].type = type;
dbg->files[indx].index = indx;
- tmp = debugfs_create_file(hpre_debug_file_name[type], 0600, file_dir,
- dbg->files + indx, &hpre_ctrl_debug_fops);
- if (!tmp)
- return -ENOENT;
+
+ debugfs_create_file(hpre_debug_file_name[type], 0600, file_dir,
+ dbg->files + indx, &hpre_ctrl_debug_fops);
return 0;
}
@@ -623,7 +621,6 @@ static int hpre_pf_comm_regs_debugfs_init(struct hpre_debug *debug)
struct hisi_qm *qm = &hpre->qm;
struct device *dev = &qm->pdev->dev;
struct debugfs_regset32 *regset;
- struct dentry *tmp;
regset = devm_kzalloc(dev, sizeof(*regset), GFP_KERNEL);
if (!regset)
@@ -633,11 +630,7 @@ static int hpre_pf_comm_regs_debugfs_init(struct hpre_debug *debug)
regset->nregs = ARRAY_SIZE(hpre_com_dfx_regs);
regset->base = qm->io_base;
- tmp = debugfs_create_regset32("regs", 0444, qm->debug.debug_root,
- regset);
- if (!tmp)
- return -ENOENT;
-
+ debugfs_create_regset32("regs", 0444, qm->debug.debug_root, regset);
return 0;
}
@@ -648,7 +641,7 @@ static int hpre_cluster_debugfs_init(struct hpre_debug *debug)
struct device *dev = &qm->pdev->dev;
char buf[HPRE_DBGFS_VAL_MAX_LEN];
struct debugfs_regset32 *regset;
- struct dentry *tmp_d, *tmp;
+ struct dentry *tmp_d;
int i, ret;
for (i = 0; i < HPRE_CLUSTERS_NUM; i++) {
@@ -657,8 +650,6 @@ static int hpre_cluster_debugfs_init(struct hpre_debug *debug)
return -EINVAL;
tmp_d = debugfs_create_dir(buf, qm->debug.debug_root);
- if (!tmp_d)
- return -ENOENT;
regset = devm_kzalloc(dev, sizeof(*regset), GFP_KERNEL);
if (!regset)
@@ -668,9 +659,8 @@ static int hpre_cluster_debugfs_init(struct hpre_debug *debug)
regset->nregs = ARRAY_SIZE(hpre_cluster_dfx_regs);
regset->base = qm->io_base + hpre_cluster_offsets[i];
- tmp = debugfs_create_regset32("regs", 0444, tmp_d, regset);
- if (!tmp)
- return -ENOENT;
+ debugfs_create_regset32("regs", 0444, tmp_d, regset);
+
ret = hpre_create_debugfs_file(debug, tmp_d, HPRE_CLUSTER_CTRL,
i + HPRE_CLUSTER_CTRL);
if (ret)
@@ -705,14 +695,10 @@ static int hpre_debugfs_init(struct hisi_qm *qm)
{
struct hpre *hpre = container_of(qm, struct hpre, qm);
struct device *dev = &qm->pdev->dev;
- struct dentry *dir;
int ret;
- dir = debugfs_create_dir(dev_name(dev), hpre_debugfs_root);
- if (!dir)
- return -ENOENT;
-
- qm->debug.debug_root = dir;
+ qm->debug.debug_root = debugfs_create_dir(dev_name(dev),
+ hpre_debugfs_root);
ret = hisi_qm_debug_init(qm);
if (ret)
@@ -730,6 +716,11 @@ static int hpre_debugfs_init(struct hisi_qm *qm)
return ret;
}
+static void hpre_debugfs_exit(struct hisi_qm *qm)
+{
+ debugfs_remove_recursive(qm->debug.debug_root);
+}
+
static int hpre_qm_pre_init(struct hisi_qm *qm, struct pci_dev *pdev)
{
int ret;
@@ -929,7 +920,8 @@ static void hpre_remove(struct pci_dev *pdev)
hpre_cnt_regs_clear(qm);
qm->debug.curr_qm_qp_num = 0;
}
- debugfs_remove_recursive(qm->debug.debug_root);
+
+ hpre_debugfs_exit(qm);
hisi_qm_stop(qm, QM_NORMAL);
if (qm->fun_type == QM_HW_PF)
@@ -967,19 +959,23 @@ static void hpre_register_debugfs(void)
hpre_debugfs_root = NULL;
}
+static void hpre_unregister_debugfs(void)
+{
+ debugfs_remove_recursive(hpre_debugfs_root);
+}
+
static int __init hpre_init(void)
{
int ret;
INIT_LIST_HEAD(&hpre_devices.list);
mutex_init(&hpre_devices.lock);
- hpre_devices.check = NULL;
hpre_register_debugfs();
ret = pci_register_driver(&hpre_pci_driver);
if (ret) {
- debugfs_remove_recursive(hpre_debugfs_root);
+ hpre_unregister_debugfs();
pr_err("hpre: can't register hisi hpre driver.\n");
}
@@ -989,7 +985,7 @@ static int __init hpre_init(void)
static void __exit hpre_exit(void)
{
pci_unregister_driver(&hpre_pci_driver);
- debugfs_remove_recursive(hpre_debugfs_root);
+ hpre_unregister_debugfs();
}
module_init(hpre_init);
diff --git a/drivers/crypto/hisilicon/qm.c b/drivers/crypto/hisilicon/qm.c
index 6a5337a..d3429e7 100644
--- a/drivers/crypto/hisilicon/qm.c
+++ b/drivers/crypto/hisilicon/qm.c
@@ -1428,6 +1428,7 @@ static int qm_cq_ctx_cfg(struct hisi_qp *qp, int qp_id, int pasid)
qp->c_flag << QM_CQ_FLAG_SHIFT);
ret = qm_mb(qm, QM_MB_CMD_CQC, cqc_dma, qp_id, 0);
+
dma_unmap_single(dev, cqc_dma, sizeof(struct qm_cqc), DMA_TO_DEVICE);
kfree(cqc);
@@ -2330,6 +2331,7 @@ static int qm_aeq_ctx_cfg(struct hisi_qm *qm)
aeqc->base_h = cpu_to_le32(upper_32_bits(qm->aeqe_dma));
aeqc->dw6 = cpu_to_le32((QM_Q_DEPTH - 1) | (1 << QM_EQC_PHASE_SHIFT));
ret = qm_mb(qm, QM_MB_CMD_AEQC, aeqc_dma, 0, 0);
+
dma_unmap_single(dev, aeqc_dma, sizeof(struct qm_aeqc), DMA_TO_DEVICE);
kfree(aeqc);
@@ -2384,13 +2386,6 @@ static int __hisi_qm_start(struct hisi_qm *qm)
QM_INIT_BUF(qm, sqc, qm->qp_num);
QM_INIT_BUF(qm, cqc, qm->qp_num);
-#ifdef CONFIG_CRYPTO_QM_UACCE
- /* get reserved dma memory */
- qm->reserve = qm->qdma.va + off;
- qm->reserve_dma = qm->qdma.dma + off;
- off += PAGE_SIZE;
-#endif
-
ret = qm_eq_aeq_ctx_cfg(qm);
if (ret)
return ret;
@@ -2681,7 +2676,7 @@ void hisi_qm_debug_regs_clear(struct hisi_qm *qm)
*/
int hisi_qm_debug_init(struct hisi_qm *qm)
{
- struct dentry *qm_d, *qm_regs;
+ struct dentry *qm_d;
int i, ret;
qm_d = debugfs_create_dir("qm", qm->debug.debug_root);
@@ -2697,12 +2692,7 @@ int hisi_qm_debug_init(struct hisi_qm *qm)
goto failed_to_create;
}
- qm_regs = debugfs_create_file("regs", 0444, qm->debug.qm_d, qm,
- &qm_regs_fops);
- if (IS_ERR(qm_regs)) {
- ret = -ENOENT;
- goto failed_to_create;
- }
+ debugfs_create_file("regs", 0444, qm->debug.qm_d, qm, &qm_regs_fops);
return 0;
@@ -3038,7 +3028,9 @@ int hisi_qm_sriov_enable(struct pci_dev *pdev, int max_vfs)
{
struct hisi_qm *qm = pci_get_drvdata(pdev);
int pre_existing_vfs, num_vfs, ret;
+ int total_vfs;
+ total_vfs = pci_sriov_get_totalvfs(pdev);
pre_existing_vfs = pci_num_vf(pdev);
if (pre_existing_vfs) {
pci_err(pdev,
@@ -3046,7 +3038,7 @@ int hisi_qm_sriov_enable(struct pci_dev *pdev, int max_vfs)
return 0;
}
- num_vfs = min_t(int, max_vfs, QM_MAX_VFS_NUM);
+ num_vfs = min_t(int, max_vfs, total_vfs);
ret = qm_vf_q_assign(qm, num_vfs);
if (ret) {
pci_err(pdev, "Can't assign queues for VF!\n");
diff --git a/drivers/crypto/hisilicon/qm.h b/drivers/crypto/hisilicon/qm.h
index 36e888f..79e29ee 100644
--- a/drivers/crypto/hisilicon/qm.h
+++ b/drivers/crypto/hisilicon/qm.h
@@ -19,7 +19,7 @@
#define QNUM_V1 4096
#define QNUM_V2 1024
-#define QM_MAX_VFS_NUM 63
+#define QM_MAX_VFS_NUM_V2 63
/* qm user domain */
#define QM_ARUSER_M_CFG_1 0x100088
#define AXUSER_SNOOP_ENABLE BIT(30)
@@ -322,9 +322,7 @@ struct hisi_qm {
resource_size_t size;
struct uacce uacce;
const char *algs;
- void *reserve;
int uacce_mode;
- dma_addr_t reserve_dma;
#endif
struct workqueue_struct *wq;
struct work_struct work;
@@ -423,7 +421,7 @@ static inline int vf_num_set(const char *val, const struct kernel_param *kp)
if (ret < 0)
return ret;
- if (n > QM_MAX_VFS_NUM)
+ if (n > QM_MAX_VFS_NUM_V2)
return -ERANGE;
return param_set_int(val, kp);
diff --git a/drivers/crypto/hisilicon/rde/rde_main.c b/drivers/crypto/hisilicon/rde/rde_main.c
index 318d4a0..946532f 100644
--- a/drivers/crypto/hisilicon/rde/rde_main.c
+++ b/drivers/crypto/hisilicon/rde/rde_main.c
@@ -13,6 +13,7 @@
#include <linux/bitops.h>
#include <linux/debugfs.h>
#include <linux/init.h>
+#include <linux/iommu.h>
#include <linux/io.h>
#include <linux/kernel.h>
#include <linux/module.h>
@@ -20,6 +21,7 @@
#include <linux/seq_file.h>
#include <linux/topology.h>
#include <linux/uacce.h>
+
#include "rde.h"
#define HRDE_QUEUE_NUM_V1 4096
diff --git a/drivers/crypto/hisilicon/zip/zip_main.c b/drivers/crypto/hisilicon/zip/zip_main.c
index 54681dc..83e2869 100644
--- a/drivers/crypto/hisilicon/zip/zip_main.c
+++ b/drivers/crypto/hisilicon/zip/zip_main.c
@@ -858,8 +858,10 @@ static void hisi_zip_remove(struct pci_dev *pdev)
{
struct hisi_qm *qm = pci_get_drvdata(pdev);
+#ifdef CONFIG_CRYPTO_QM_UACCE
if (uacce_mode != UACCE_MODE_NOUACCE)
hisi_qm_remove_wait_delay(qm, &zip_devices);
+#endif
if (qm->fun_type == QM_HW_PF && qm->vfs_num)
hisi_qm_sriov_disable(pdev, NULL);
--
1.8.3
1
1

[PATCH 1/4] Revert "dm crypt: use WQ_HIGHPRI for the IO and crypt workqueues"
by Yang Yingliang 17 Apr '20
by Yang Yingliang 17 Apr '20
17 Apr '20
From: Mike Snitzer <snitzer(a)redhat.com>
mainline inclusion
from mainline-5.5-rc1
commit f612b2132db529feac4f965f28a1b9258ea7c22b
category: bugfix
bugzilla: 25149
CVE: NA
---------------------------
This reverts commit a1b89132dc4f61071bdeaab92ea958e0953380a1.
Revert required hand-patching due to subsequent changes that were
applied since commit a1b89132dc4f61071bdeaab92ea958e0953380a1.
Requires: ed0302e83098d ("dm crypt: make workqueue names device-specific")
Cc: stable(a)vger.kernel.org
Bug: https://bugzilla.kernel.org/show_bug.cgi?id=199857
Reported-by: Vito Caputo <vcaputo(a)pengaru.com>
Signed-off-by: Mike Snitzer <snitzer(a)redhat.com>
Signed-off-by: Sun Ke <sunke32(a)huawei.com>
Reviewed-by: Hou Tao <houtao1(a)huawei.com>
Signed-off-by: Yang Yingliang <yangyingliang(a)huawei.com>
---
drivers/md/dm-crypt.c | 7 +++----
1 file changed, 3 insertions(+), 4 deletions(-)
diff --git a/drivers/md/dm-crypt.c b/drivers/md/dm-crypt.c
index f68b9bd..d451f98 100644
--- a/drivers/md/dm-crypt.c
+++ b/drivers/md/dm-crypt.c
@@ -3996,17 +3996,16 @@ static int crypt_ctr(struct dm_target *ti, unsigned int argc, char **argv)
}
ret = -ENOMEM;
- cc->io_queue = alloc_workqueue("kcryptd_io", WQ_HIGHPRI | WQ_CPU_INTENSIVE | WQ_MEM_RECLAIM, 1);
+ cc->io_queue = alloc_workqueue("kcryptd_io", WQ_MEM_RECLAIM, 1);
if (!cc->io_queue) {
ti->error = "Couldn't create kcryptd io queue";
goto bad;
}
if (test_bit(DM_CRYPT_SAME_CPU, &cc->flags))
- cc->crypt_queue = alloc_workqueue("kcryptd", WQ_HIGHPRI | WQ_CPU_INTENSIVE | WQ_MEM_RECLAIM, 1);
+ cc->crypt_queue = alloc_workqueue("kcryptd", WQ_CPU_INTENSIVE | WQ_MEM_RECLAIM, 1);
else
- cc->crypt_queue = alloc_workqueue("kcryptd",
- WQ_HIGHPRI | WQ_CPU_INTENSIVE | WQ_MEM_RECLAIM | WQ_UNBOUND,
+ cc->crypt_queue = alloc_workqueue("kcryptd", WQ_CPU_INTENSIVE | WQ_MEM_RECLAIM | WQ_UNBOUND,
num_online_cpus());
if (!cc->crypt_queue) {
ti->error = "Couldn't create kcryptd queue";
--
1.8.3
1
3