If snprintf returns 0, (ret - 1) will be negative and used as an array index, fix it, also, the int type should be converted to unsigned int.
Signed-off-by: Wenkai Lin linwenkai6@hisilicon.com --- v1/wd.c | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-)
diff --git a/v1/wd.c b/v1/wd.c index e0a86dc..3839304 100644 --- a/v1/wd.c +++ b/v1/wd.c @@ -81,7 +81,7 @@ static int get_raw_attr(const char *dev_root, const char *attr, dev_root, attr); if (size <= 0) { WD_ERR("get %s/%s path fail!\n", dev_root, attr); - return size; + return -WD_EINVAL; }
ptrRet = realpath(attr_file, attr_path); @@ -140,7 +140,7 @@ static int get_str_attr(struct dev_info *dinfo, const char *attr, char *buf, return ret; }
- if ((size_t)ret == buf_sz) + if ((__u32)ret == buf_sz) ret = ret - 1;
buf[ret] = '\0';
Remove unused email address and description.
Signed-off-by: Wenkai Lin linwenkai6@hisilicon.com --- ChangeLog | 6 +++--- v1/drv/hisi_sec_udrv.h | 2 +- 2 files changed, 4 insertions(+), 4 deletions(-)
diff --git a/ChangeLog b/ChangeLog index 0cf3051..5824120 100644 --- a/ChangeLog +++ b/ChangeLog @@ -2,15 +2,15 @@ UADK 2.3.11, Released Jul 9th, 2021
Feature list:
- Support SVA on ZIP/SEC/HPRE accelerator on kunpeng930. + Support SVA on ZIP/SEC/HPRE accelerator on kunpeng.
Support of user space algorithm API layer of Warpdrive.
Offload Warpdrive WCRYPTO asymmetric algorithms with HPRE DH/RSA - on kunpeng920/930. + on kunpeng.
Offload elliptic curve EC algorithms SM2/ECDH/ECDSA/X25519/X448 on - kunpeng930 HPRE in user space. + kunpeng HPRE in user space.
Offload Warpdrive WCRYPTO symmetric algorithms AES/SM4/SM3/AEAD .etc algorithms. diff --git a/v1/drv/hisi_sec_udrv.h b/v1/drv/hisi_sec_udrv.h index 9c0da47..af96a76 100644 --- a/v1/drv/hisi_sec_udrv.h +++ b/v1/drv/hisi_sec_udrv.h @@ -326,7 +326,7 @@ struct bd3_tls_type_back { __u32 pad_len_1p3_back:16; };
-/* the kp930 sence */ +/* the hw v2 sence */ struct hisi_sec_bd3_sqe { __u32 type:4; __u32 inveld:1;
In digest storage scene, the address filled by users are DMA address, which are transferred to the WD through the opdata priv field. The WD only needs to parse the address and fill them in the hardware BD, the IOVA map is not required for address translation.
Signed-off-by: Wenkai Lin linwenkai6@hisilicon.com --- v1/drv/hisi_sec_udrv.c | 162 ++++++++++++++++++++++++++++++++--------- v1/wd_digest.c | 2 +- 2 files changed, 128 insertions(+), 36 deletions(-)
diff --git a/v1/drv/hisi_sec_udrv.c b/v1/drv/hisi_sec_udrv.c index 4d5ede5..f42022b 100644 --- a/v1/drv/hisi_sec_udrv.c +++ b/v1/drv/hisi_sec_udrv.c @@ -1088,7 +1088,7 @@ static int fill_digest_bd1_addr(struct wd_queue *q, } }
- /* for storage scene, data address using physical address */ + /* for bd1 udata scene, in/out address do not need to be mapped. */ phy = (uintptr_t)msg->in; sqe->type1.data_src_addr_l = (__u32)(phy & QM_L32BITS_MASK); sqe->type1.data_src_addr_h = HI_U32(phy); @@ -1100,8 +1100,7 @@ static int fill_digest_bd1_addr(struct wd_queue *q, return WD_SUCCESS; }
-static void fill_digest_bd1_udata(struct hisi_sec_sqe *sqe, - struct wd_sec_udata *udata) +static void fill_digest_bd1_udata(struct hisi_sec_sqe *sqe, struct wd_sec_udata *udata) { sqe->type1.gran_num = udata->gran_num; sqe->type1.src_skip_data_len = udata->src_offset; @@ -1167,17 +1166,11 @@ static int set_hmac_mode(struct wcrypto_digest_msg *msg, return 0; }
-static int fill_digest_bd2(struct wd_queue *q, struct hisi_sec_sqe *sqe, - struct wcrypto_digest_msg *msg, struct wcrypto_digest_tag *tag) +static int fill_digest_bd2_addr(struct wd_queue *q, struct wcrypto_digest_msg *msg, + struct hisi_sec_sqe *sqe) { - int ret = -WD_ENOMEM; uintptr_t phy; - - sqe->type = BD_TYPE2; - sqe->scene = SCENE_IPSEC; - - sqe->auth = AUTH_MAC_CALCULATE; - sqe->type2.a_len = msg->in_bytes; + int ret;
phy = (uintptr_t)drv_iova_map(q, msg->in, msg->in_bytes); if (unlikely(!phy)) { @@ -1199,28 +1192,107 @@ static int fill_digest_bd2(struct wd_queue *q, struct hisi_sec_sqe *sqe, if (ret) goto map_key_error;
+ return WD_SUCCESS; + +map_key_error: + unmap_addr(q, msg->out, msg->out_bytes, sqe->type2.mac_addr_l, + sqe->type2.mac_addr_h, msg->data_fmt); +map_out_error: + phy = DMA_ADDR(sqe->type2.data_src_addr_h, sqe->type2.data_src_addr_l); + drv_iova_unmap(q, msg->in, (void *)(uintptr_t)phy, msg->in_bytes); + return ret; +} + +static void fill_digest_bd2_udata_inner(struct wcrypto_digest_msg *msg, + struct hisi_sec_sqe *sqe) +{ + uintptr_t phy; + + /* for bd2 udata scene, address do not need to be mapped. */ + phy = (uintptr_t)msg->in; + sqe->type2.data_src_addr_l = (__u32)(phy & QM_L32BITS_MASK); + sqe->type2.data_src_addr_h = HI_U32(phy); + phy = (uintptr_t)msg->out; + sqe->type2.mac_addr_l = (__u32)(phy & QM_L32BITS_MASK); + sqe->type2.mac_addr_h = HI_U32(phy); + + if (msg->mode == WCRYPTO_DIGEST_HMAC) { + sqe->type2.a_key_len = msg->key_bytes / SEC_SQE_LEN_RATE; + phy = (uintptr_t)msg->key; + sqe->type2.a_key_addr_l = (__u32)(phy & QM_L32BITS_MASK); + sqe->type2.a_key_addr_h = HI_U32(phy); + } +} + +static int fill_digest_bd2_common(struct wd_queue *q, struct hisi_sec_sqe *sqe, + struct wcrypto_digest_msg *msg, struct wcrypto_digest_tag *tag) +{ + int ret; + + sqe->type = BD_TYPE2; + sqe->scene = SCENE_IPSEC; + + sqe->auth = AUTH_MAC_CALCULATE; + sqe->type2.a_len = msg->in_bytes; + ret = fill_digest_bd2_alg(msg, sqe); if (ret != WD_SUCCESS) { WD_ERR("fill_digest_bd2_alg fail!\n"); - goto map_alg_error; + return ret; } - qm_fill_digest_long_bd(msg, sqe);
if (tag) sqe->type2.tag = tag->wcrypto_tag.ctx_id;
return ret; +} + +static int fill_digest_bd2_udata(struct wd_queue *q, struct hisi_sec_sqe *sqe, + struct wcrypto_digest_msg *msg, struct wcrypto_digest_tag *tag) +{ + int ret; + + ret = fill_digest_bd2_common(q, sqe, msg, tag); + if (ret != WD_SUCCESS) + return ret; + + fill_digest_bd2_udata_inner(msg, sqe); + qm_fill_digest_long_bd(msg, sqe); + + return WD_SUCCESS; +} + +static int fill_digest_bd_udata(struct wd_queue *q, struct hisi_sec_sqe *sqe, + struct wcrypto_digest_msg *msg, struct wcrypto_digest_tag *tag) +{ + struct wd_sec_udata *udata = tag->priv; + + if (udata->key) { + msg->key = udata->key; + msg->key_bytes = udata->key_bytes; + } + + if (udata->gran_num) + return fill_digest_bd1(q, sqe, msg, tag); + + return fill_digest_bd2_udata(q, sqe, msg, tag); +} + +static int fill_digest_bd2(struct wd_queue *q, struct hisi_sec_sqe *sqe, + struct wcrypto_digest_msg *msg, struct wcrypto_digest_tag *tag) +{ + int ret; + + ret = fill_digest_bd2_common(q, sqe, msg, tag); + if (ret != WD_SUCCESS) + return ret; + + ret = fill_digest_bd2_addr(q, msg, sqe); + if (ret != WD_SUCCESS) + return ret; + + qm_fill_digest_long_bd(msg, sqe);
-map_alg_error: - unmap_addr(q, msg->key, msg->key_bytes, sqe->type2.a_key_addr_l, - sqe->type2.a_key_addr_h, msg->data_fmt); -map_key_error: - unmap_addr(q, msg->out, msg->out_bytes, sqe->type2.mac_addr_l, - sqe->type2.mac_addr_h, msg->data_fmt); -map_out_error: - phy = DMA_ADDR(sqe->type2.data_src_addr_h, - sqe->type2.data_src_addr_l); - drv_iova_unmap(q, msg->in, (void *)(uintptr_t)phy, msg->in_bytes); return ret; }
@@ -1231,12 +1303,12 @@ map_out_error: */ int qm_fill_digest_sqe(void *message, struct qm_queue_info *info, __u16 i) { - struct hisi_sec_sqe *sqe; struct wcrypto_digest_msg *msg = message; - struct wd_queue *q = info->q; struct wcrypto_digest_tag *tag = (void *)(uintptr_t)msg->usr_data; - int ret; + struct wd_queue *q = info->q; + struct hisi_sec_sqe *sqe; uintptr_t temp; + int ret;
temp = (uintptr_t)info->sq_base + i * info->sqe_size; sqe = (struct hisi_sec_sqe *)temp; @@ -1246,7 +1318,7 @@ int qm_fill_digest_sqe(void *message, struct qm_queue_info *info, __u16 i) fill_bd_addr_type(msg->data_fmt, sqe);
if (tag->priv) - ret = fill_digest_bd1(q, sqe, msg, tag); + ret = fill_digest_bd_udata(q, sqe, msg, tag); else ret = fill_digest_bd2(q, sqe, msg, tag);
@@ -1406,19 +1478,25 @@ int qm_fill_digest_bd3_sqe(void *message, struct qm_queue_info *info, __u16 i) { struct wcrypto_digest_msg *msg = message; struct wcrypto_digest_tag *tag = (void *)(uintptr_t)msg->usr_data; - struct hisi_sec_bd3_sqe *sqe; struct wd_queue *q = info->q; + struct hisi_sec_bd3_sqe *sqe; + struct hisi_sec_sqe *sqe2; uintptr_t temp; int ret;
temp = (uintptr_t)info->sq_base + i * info->sqe_size; - sqe = (struct hisi_sec_bd3_sqe *)temp; - - memset(sqe, 0, sizeof(struct hisi_sec_bd3_sqe));
- fill_bd3_addr_type(msg->data_fmt, sqe); - - ret = fill_digest_bd3(q, sqe, msg, tag); + if (tag->priv) { + sqe2 = (struct hisi_sec_sqe *)temp; + memset(sqe2, 0, sizeof(struct hisi_sec_sqe)); + fill_bd_addr_type(msg->data_fmt, sqe2); + ret = fill_digest_bd_udata(q, sqe2, msg, tag); + } else { + sqe = (struct hisi_sec_bd3_sqe *)temp; + memset(sqe, 0, sizeof(struct hisi_sec_bd3_sqe)); + fill_bd3_addr_type(msg->data_fmt, sqe); + ret = fill_digest_bd3(q, sqe, msg, tag); + } if (ret != WD_SUCCESS) return ret;
@@ -1660,6 +1738,7 @@ static void parse_digest_bd1(struct wd_queue *q, struct hisi_sec_sqe *sqe, static void parse_digest_bd2(struct wd_queue *q, struct hisi_sec_sqe *sqe, struct wcrypto_digest_msg *digest_msg) { + struct wcrypto_digest_tag *tag; __u64 dma_addr;
if (sqe->type2.done != SEC_HW_TASK_DONE @@ -1670,6 +1749,10 @@ static void parse_digest_bd2(struct wd_queue *q, struct hisi_sec_sqe *sqe, } else digest_msg->result = WD_SUCCESS;
+ tag = (void *)(uintptr_t)digest_msg->usr_data; + if (tag->priv) + return; + dma_addr = DMA_ADDR(sqe->type2.data_src_addr_h, sqe->type2.data_src_addr_l); drv_iova_unmap(q, digest_msg->in, (void *)(uintptr_t)dma_addr, @@ -2283,6 +2366,7 @@ int qm_parse_digest_bd3_sqe(void *msg, const struct qm_queue_info *info, { struct wcrypto_digest_msg *digest_msg = info->req_cache[i]; struct hisi_sec_bd3_sqe *sqe = msg; + struct hisi_sec_sqe *sqe2 = msg; struct wd_queue *q = info->q;
if (unlikely(!digest_msg)) { @@ -2294,6 +2378,14 @@ int qm_parse_digest_bd3_sqe(void *msg, const struct qm_queue_info *info, if (usr && sqe->tag_l != usr) return 0; parse_digest_bd3(q, sqe, digest_msg); + } else if (sqe->type == BD_TYPE2) { + if (usr && sqe2->type2.tag != usr) + return 0; + parse_digest_bd2(q, sqe2, digest_msg); + } else if (sqe->type == BD_TYPE1) { + if (usr && sqe2->type1.tag != usr) + return 0; + parse_digest_bd1(q, sqe2, digest_msg); } else { WD_ERR("SEC Digest BD Type error\n"); digest_msg->result = WD_IN_EPARA; diff --git a/v1/wd_digest.c b/v1/wd_digest.c index 86a7751..809aecd 100644 --- a/v1/wd_digest.c +++ b/v1/wd_digest.c @@ -64,7 +64,7 @@ static void del_ctx_key(struct wcrypto_digest_ctx *ctx) * want to clear the SGL buffer, we can only use 'wd_sgl_cp_from_pbuf' * whose 'pbuf' is all zero. */ - if (ctx->key) { + if (ctx->key && ctx->key_bytes) { if (ctx->setup.data_fmt == WD_FLAT_BUF) memset(ctx->key, 0, MAX_HMAC_KEY_SIZE); else if (ctx->setup.data_fmt == WD_SGL_BUF)
In aead storage scene, the address filled by users are DMA address, which are transferred to the WD through the opdata priv field. The WD only needs to parse the address and fill them in the hardware BD, the IOVA map is not required for address translation.
Signed-off-by: Wenkai Lin linwenkai6@hisilicon.com --- v1/drv/hisi_sec_udrv.c | 142 ++++++++++++++++++++++++++++++++++++----- v1/wd_aead.c | 10 +-- v1/wd_util.h | 13 ++++ 3 files changed, 142 insertions(+), 23 deletions(-)
diff --git a/v1/drv/hisi_sec_udrv.c b/v1/drv/hisi_sec_udrv.c index f42022b..32bded4 100644 --- a/v1/drv/hisi_sec_udrv.c +++ b/v1/drv/hisi_sec_udrv.c @@ -64,6 +64,11 @@ static int g_hmac_a_alg[WCRYPTO_MAX_DIGEST_TYPE] = { A_ALG_HMAC_SHA512, A_ALG_HMAC_SHA512_224, A_ALG_HMAC_SHA512_256 };
+static void parse_aead_bd2(struct wd_queue *q, struct hisi_sec_sqe *sqe, + struct wcrypto_aead_msg *msg); +static int fill_aead_bd_udata(struct wd_queue *q, struct hisi_sec_sqe *sqe, + struct wcrypto_aead_msg *msg, struct wcrypto_aead_tag *tag); + #ifdef DEBUG_LOG static void sec_dump_bd(unsigned char *bd, unsigned int len) { @@ -2224,6 +2229,7 @@ int qm_fill_aead_bd3_sqe(void *message, struct qm_queue_info *info, __u16 i) struct wcrypto_aead_tag *tag = (void *)(uintptr_t)msg->usr_data; struct wd_queue *q = info->q; struct hisi_sec_bd3_sqe *sqe; + struct hisi_sec_sqe *sqe2; uintptr_t temp; int ret;
@@ -2235,13 +2241,17 @@ int qm_fill_aead_bd3_sqe(void *message, struct qm_queue_info *info, __u16 i) }
temp = (uintptr_t)info->sq_base + i * info->sqe_size; - sqe = (struct hisi_sec_bd3_sqe *)temp; - - memset(sqe, 0, sizeof(struct hisi_sec_bd3_sqe)); - - fill_bd3_addr_type(msg->data_fmt, sqe); - - ret = fill_aead_bd3(q, sqe, msg, tag); + if (tag->priv) { + sqe2 = (struct hisi_sec_sqe *)temp; + memset(sqe2, 0, sizeof(struct hisi_sec_sqe)); + fill_bd_addr_type(msg->data_fmt, sqe2); + ret = fill_aead_bd_udata(q, sqe2, msg, tag); + } else { + sqe = (struct hisi_sec_bd3_sqe *)temp; + memset(sqe, 0, sizeof(struct hisi_sec_bd3_sqe)); + fill_bd3_addr_type(msg->data_fmt, sqe); + ret = fill_aead_bd3(q, sqe, msg, tag); + } if (ret != WD_SUCCESS) return ret;
@@ -2313,6 +2323,7 @@ int qm_parse_aead_bd3_sqe(void *msg, const struct qm_queue_info *info, { struct wcrypto_aead_msg *aead_msg = info->req_cache[i]; struct hisi_sec_bd3_sqe *sqe = msg; + struct hisi_sec_sqe *sqe2 = msg; struct wd_queue *q = info->q;
if (unlikely(!aead_msg)) { @@ -2324,6 +2335,10 @@ int qm_parse_aead_bd3_sqe(void *msg, const struct qm_queue_info *info, if (usr && sqe->tag_l != usr) return 0; parse_aead_bd3(q, sqe, aead_msg); + } else if (sqe->type == BD_TYPE2) { + if (usr && sqe2->type2.tag != usr) + return 0; + parse_aead_bd2(q, sqe2, aead_msg); } else { WD_ERR("SEC BD Type error\n"); aead_msg->result = WD_IN_EPARA; @@ -2664,7 +2679,45 @@ static int aead_param_len_check(struct wcrypto_aead_msg *msg) return 0; }
-static int fill_aead_bd2(struct wd_queue *q, struct hisi_sec_sqe *sqe, +static int fill_aead_bd2_udata_inner(struct wcrypto_aead_msg *msg, + struct hisi_sec_sqe *sqe, struct wd_aead_udata *udata) +{ + uintptr_t phy; + + sqe->type2.auth_src_offset = udata->src_offset; + sqe->type2.cipher_src_offset = udata->src_offset + msg->assoc_bytes; + /* for bd2 udata scene, address do not need to be mapped. */ + phy = (uintptr_t)msg->in; + sqe->type2.data_src_addr_l = (__u32)(phy & QM_L32BITS_MASK); + sqe->type2.data_src_addr_h = HI_U32(phy); + phy = (uintptr_t)msg->out; + sqe->type2.data_dst_addr_l = (__u32)(phy & QM_L32BITS_MASK); + sqe->type2.data_dst_addr_h = HI_U32(phy); + phy = (uintptr_t)msg->iv; + sqe->type2.c_ivin_addr_l = (__u32)(phy & QM_L32BITS_MASK); + sqe->type2.c_ivin_addr_h = HI_U32(phy); + + phy = (uintptr_t)udata->ckey; + sqe->type2.c_key_addr_l = (__u32)(phy & QM_L32BITS_MASK); + sqe->type2.c_key_addr_h = HI_U32(phy); + phy = (uintptr_t)udata->mac; + sqe->type2.mac_addr_l = (__u32)(phy & QM_L32BITS_MASK); + sqe->type2.mac_addr_h = HI_U32(phy); + if (msg->cmode == WCRYPTO_CIPHER_CCM || msg->cmode == WCRYPTO_CIPHER_GCM) { + if (udata->aiv) { + phy = (uintptr_t)udata->aiv; + sqe->type2.a_ivin_addr_l = (__u32)(phy & QM_L32BITS_MASK); + sqe->type2.a_ivin_addr_h = HI_U32(phy); + } else { + WD_ERR("Invalid aiv addr in CCM/GCM mode!\n"); + return -WD_EINVAL; + } + } + + return WD_SUCCESS; +} + +static int fill_aead_bd2_common(struct wd_queue *q, struct hisi_sec_sqe *sqe, struct wcrypto_aead_msg *msg, struct wcrypto_aead_tag *tag) { int ret; @@ -2686,26 +2739,77 @@ static int fill_aead_bd2(struct wd_queue *q, struct hisi_sec_sqe *sqe,
ret = fill_aead_bd2_alg(msg, sqe); if (ret != WD_SUCCESS) { - WD_ERR("fill_cipher_bd2_alg fail!\n"); + WD_ERR("fill_aead_bd2_alg fail!\n"); return ret; }
ret = fill_aead_bd2_mode(msg, sqe); if (ret != WD_SUCCESS) { - WD_ERR("fill_cipher_bd2_mode fail!\n"); + WD_ERR("fill_aead_bd2_mode fail!\n"); return ret; }
- ret = fill_aead_bd2_addr(q, msg, sqe); - if (ret != WD_SUCCESS) - return ret; - if (tag) sqe->type2.tag = tag->wcrypto_tag.ctx_id;
return ret; }
+static int fill_aead_bd2(struct wd_queue *q, struct hisi_sec_sqe *sqe, + struct wcrypto_aead_msg *msg, struct wcrypto_aead_tag *tag) +{ + int ret; + + ret = fill_aead_bd2_common(q, sqe, msg, tag); + if (ret != WD_SUCCESS) + return ret; + + return fill_aead_bd2_addr(q, msg, sqe); +} + +static int init_msg_with_udata(struct wcrypto_aead_msg *req, struct wd_aead_udata *udata) +{ + if (!udata->ckey || !udata->mac) { + WD_ERR("invalid udata para!\n"); + return -WD_EINVAL; + } + + if (req->cmode == WCRYPTO_CIPHER_CCM || req->cmode == WCRYPTO_CIPHER_GCM) { + req->ckey_bytes = udata->ckey_bytes; + req->auth_bytes = udata->mac_bytes; + } else { + WD_ERR("invalid cmode para!\n"); + return -WD_EINVAL; + } + + return WD_SUCCESS; +} + +static int fill_aead_bd2_udata(struct wd_queue *q, struct hisi_sec_sqe *sqe, + struct wcrypto_aead_msg *msg, struct wcrypto_aead_tag *tag) +{ + int ret; + + ret = fill_aead_bd2_common(q, sqe, msg, tag); + if (ret != WD_SUCCESS) + return ret; + + return fill_aead_bd2_udata_inner(msg, sqe, (struct wd_aead_udata *)tag->priv); +} + +static int fill_aead_bd_udata(struct wd_queue *q, struct hisi_sec_sqe *sqe, + struct wcrypto_aead_msg *msg, struct wcrypto_aead_tag *tag) +{ + struct wd_aead_udata *udata = tag->priv; + int ret; + + ret = init_msg_with_udata(msg, udata); + if (ret != WD_SUCCESS) + return ret; + + return fill_aead_bd2_udata(q, sqe, msg, tag); +} + int qm_fill_aead_sqe(void *message, struct qm_queue_info *info, __u16 i) { struct wcrypto_aead_msg *msg = message; @@ -2727,7 +2831,10 @@ int qm_fill_aead_sqe(void *message, struct qm_queue_info *info, __u16 i)
memset(sqe, 0, sizeof(struct hisi_sec_sqe));
- ret = fill_aead_bd2(q, sqe, msg, tag); + if (tag->priv) + ret = fill_aead_bd_udata(q, sqe, msg, tag); + else + ret = fill_aead_bd2(q, sqe, msg, tag); if (ret != WD_SUCCESS) return ret;
@@ -2743,6 +2850,7 @@ int qm_fill_aead_sqe(void *message, struct qm_queue_info *info, __u16 i) static void parse_aead_bd2(struct wd_queue *q, struct hisi_sec_sqe *sqe, struct wcrypto_aead_msg *msg) { + struct wcrypto_aead_tag *tag; __u8 mac[64] = { 0 }; __u64 dma_addr; int ret; @@ -2756,6 +2864,10 @@ static void parse_aead_bd2(struct wd_queue *q, struct hisi_sec_sqe *sqe, msg->result = WD_SUCCESS; }
+ tag = (void *)(uintptr_t)msg->usr_data; + if (tag->priv) + return; + /* * We obtain a memory from IV SGL as a temporary address space for MAC, * After the encryption is completed, copy the data from this temporary diff --git a/v1/wd_aead.c b/v1/wd_aead.c index 7abb8a0..f81f327 100644 --- a/v1/wd_aead.c +++ b/v1/wd_aead.c @@ -79,14 +79,14 @@ static void del_ctx_key(struct wcrypto_aead_ctx *ctx) * want to clear the SGL buffer, we can only use 'wd_sgl_cp_from_pbuf' * whose 'pbuf' is all zero. */ - if (ctx->ckey) { + if (ctx->ckey && ctx->ckey_bytes) { if (ctx->setup.data_fmt == WD_FLAT_BUF) memset(ctx->ckey, 0, MAX_CIPHER_KEY_SIZE); else if (ctx->setup.data_fmt == WD_SGL_BUF) wd_sgl_cp_from_pbuf(ctx->ckey, 0, tmp, MAX_CIPHER_KEY_SIZE); }
- if (ctx->akey) { + if (ctx->akey && ctx->akey_bytes) { if (ctx->setup.data_fmt == WD_FLAT_BUF) memset(ctx->akey, 0, MAX_AEAD_KEY_SIZE); else if (ctx->setup.data_fmt == WD_SGL_BUF) @@ -445,7 +445,6 @@ static int aead_requests_init(struct wcrypto_aead_msg **req, struct wcrypto_aead_op_data **op, struct wcrypto_aead_ctx *ctx, __u32 num) { - struct wd_sec_udata *udata; int ret; __u32 i;
@@ -471,11 +470,6 @@ static int aead_requests_init(struct wcrypto_aead_msg **req, req[i]->out_bytes = op[i]->out_bytes; req[i]->assoc_bytes = op[i]->assoc_size; req[i]->auth_bytes = ctx->auth_size; - udata = op[i]->priv; - if (udata && udata->key) { - req[i]->ckey = udata->key; - req[i]->ckey_bytes = udata->key_bytes; - }
req[i]->aiv = ctx->setup.br.alloc(ctx->setup.br.usr, MAX_AEAD_KEY_SIZE); diff --git a/v1/wd_util.h b/v1/wd_util.h index fc4586c..617af3b 100644 --- a/v1/wd_util.h +++ b/v1/wd_util.h @@ -175,6 +175,19 @@ struct wd_sec_udata { __u8 *key; };
+struct wd_aead_udata { + __u32 src_offset; + __u32 dst_offset; + __u16 ckey_bytes; + __u16 akey_bytes; + __u16 aiv_bytes; + __u16 mac_bytes; + __u8 *ckey; + __u8 *akey; + __u8 *aiv; + __u8 *mac; +}; + /* Digest tag format of Warpdrive */ struct wcrypto_digest_tag { struct wcrypto_cb_tag wcrypto_tag;