From: wangzengliang wangzengliang2@huawei.com
Signed-off-by: wangzengliang wangzengliang2@huawei.com Signed-off-by: JiangShui Yang yangjiangshui@h-partners.com --- src/uadk_cipher.c | 32 ++++++++++++++++++++++++++------ 1 file changed, 26 insertions(+), 6 deletions(-)
diff --git a/src/uadk_cipher.c b/src/uadk_cipher.c index b506c22..adcde01 100644 --- a/src/uadk_cipher.c +++ b/src/uadk_cipher.c @@ -67,6 +67,7 @@ struct cipher_priv_ctx { /* Crypto small packet offload threshold */ size_t switch_threshold; bool update_iv; + struct sched_params sched_param; };
struct cipher_info { @@ -690,11 +691,26 @@ static int do_cipher_async(struct cipher_priv_ctx *priv, struct async_op *op) return 1; }
+static int uadk_e_cipher_ctrl(EVP_CIPHER_CTX *ctx, int type, int numa_node, void *ptr) +{ + struct cipher_priv_ctx *priv = + (struct cipher_priv_ctx *)EVP_CIPHER_CTX_get_cipher_data(ctx); + + if (unlikely(!priv)) { + fprintf(stderr, "cipher priv ctx is NULL!\n"); + return 0; + } + + priv->sched_param.numa_id = numa_node; + priv->setup.sched_param = (void *)&(priv->sched_param); + return 1; +} + static void uadk_e_ctx_init(EVP_CIPHER_CTX *ctx, struct cipher_priv_ctx *priv) { __u32 cipher_counts = ARRAY_SIZE(cipher_info_table); - struct sched_params params = {0}; - int nid, ret; + struct sched_params *para; + int nid, ret, type; __u32 i;
priv->req.iv_bytes = EVP_CIPHER_CTX_iv_length(ctx); @@ -715,14 +731,17 @@ static void uadk_e_ctx_init(EVP_CIPHER_CTX *ctx, struct cipher_priv_ctx *priv) * the cipher algorithm does not distinguish between * encryption and decryption queues */ - params.type = priv->req.op_type; + type = priv->req.op_type; ret = uadk_e_is_env_enabled("cipher"); if (ret) - params.type = 0; + type = 0;
/* Use the default numa parameters */ - params.numa_id = -1; - priv->setup.sched_param = ¶ms; + if (priv->setup.sched_param != &priv->sched_param) + uadk_e_cipher_ctrl(ctx, 0, -1, NULL); + + para = (struct sched_params *)priv->setup.sched_param; + para->type = type;
if (!priv->sess) { nid = EVP_CIPHER_CTX_nid(ctx); @@ -820,6 +839,7 @@ do { \ !EVP_CIPHER_meth_set_init(uadk_##name, uadk_e_cipher_init) || \ !EVP_CIPHER_meth_set_do_cipher(uadk_##name, uadk_e_do_cipher) || \ !EVP_CIPHER_meth_set_cleanup(uadk_##name, uadk_e_cipher_cleanup) || \ + !EVP_CIPHER_meth_set_ctrl(uadk_##name, uadk_e_cipher_ctrl) || \ !EVP_CIPHER_meth_set_set_asn1_params(uadk_##name, EVP_CIPHER_set_asn1_iv) || \ !EVP_CIPHER_meth_set_get_asn1_params(uadk_##name, EVP_CIPHER_get_asn1_iv)) \ return 0; \
From: wangzengliang wangzengliang2@huawei.com
Signed-off-by: wangzengliang wangzengliang2@huawei.com Signed-off-by: JiangShui Yang yangjiangshui@h-partners.com --- src/uadk_digest.c | 40 ++++++++++++++++++++++++++++------------ 1 file changed, 28 insertions(+), 12 deletions(-)
diff --git a/src/uadk_digest.c b/src/uadk_digest.c index 9a583b4..1cb78fa 100644 --- a/src/uadk_digest.c +++ b/src/uadk_digest.c @@ -100,6 +100,7 @@ struct digest_priv_ctx { uint32_t app_datasize; bool is_stream_copy; size_t total_data_len; + struct sched_params sched_param; };
struct digest_info { @@ -529,12 +530,25 @@ static void digest_priv_ctx_reset(struct digest_priv_ctx *priv) priv->is_stream_copy = false; }
+static int uadk_e_digest_ctrl(EVP_MD_CTX *ctx, int cmd, int numa_node, void *p2) +{ + struct digest_priv_ctx *priv = + (struct digest_priv_ctx *)EVP_MD_CTX_md_data(ctx); + + if (unlikely(!priv)) { + fprintf(stderr, "digest priv ctx is NULL!\n"); + return 0; + } + priv->sched_param.numa_id = numa_node; + priv->setup.sched_param = (void *)&(priv->sched_param); + return 1; +} + static int uadk_e_digest_init(EVP_MD_CTX *ctx) { struct digest_priv_ctx *priv = (struct digest_priv_ctx *) EVP_MD_CTX_md_data(ctx); __u32 digest_counts = ARRAY_SIZE(digest_info_table); - struct sched_params params = {0}; __u32 i; int ret;
@@ -568,8 +582,9 @@ static int uadk_e_digest_init(EVP_MD_CTX *ctx) }
/* Use the default numa parameters */ - params.numa_id = -1; - priv->setup.sched_param = ¶ms; + if (priv->setup.sched_param != &priv->sched_param) + uadk_e_digest_ctrl(ctx, 0, -1, NULL); + if (!priv->sess) { priv->sess = wd_digest_alloc_sess(&priv->setup); if (unlikely(!priv->sess)) @@ -952,7 +967,7 @@ free_sess:
#define UADK_DIGEST_DESCR(name, pkey_type, md_size, flags, \ - block_size, ctx_size, init, update, final, cleanup, copy) \ + block_size, ctx_size, init, update, final, cleanup, copy, ctrl) \ do { \ uadk_##name = EVP_MD_meth_new(NID_##name, NID_##pkey_type); \ if (uadk_##name == 0 || \ @@ -964,7 +979,8 @@ do { \ !EVP_MD_meth_set_update(uadk_##name, update) || \ !EVP_MD_meth_set_final(uadk_##name, final) || \ !EVP_MD_meth_set_cleanup(uadk_##name, cleanup) || \ - !EVP_MD_meth_set_copy(uadk_##name, copy)) \ + !EVP_MD_meth_set_copy(uadk_##name, copy) || \ + !EVP_MD_meth_set_ctrl(uadk_##name, ctrl)) \ return 0; \ } while (0)
@@ -980,43 +996,43 @@ int uadk_e_bind_digest(ENGINE *e) sizeof(EVP_MD *) + sizeof(struct digest_priv_ctx), uadk_e_digest_init, uadk_e_digest_update, uadk_e_digest_final, uadk_e_digest_cleanup, - uadk_e_digest_copy); + uadk_e_digest_copy, uadk_e_digest_ctrl); UADK_DIGEST_DESCR(sm3, sm3WithRSAEncryption, SM3_DIGEST_LENGTH, 0, SM3_CBLOCK, sizeof(EVP_MD *) + sizeof(struct digest_priv_ctx), uadk_e_digest_init, uadk_e_digest_update, uadk_e_digest_final, uadk_e_digest_cleanup, - uadk_e_digest_copy); + uadk_e_digest_copy, uadk_e_digest_ctrl); UADK_DIGEST_DESCR(sha1, sha1WithRSAEncryption, SHA_DIGEST_LENGTH, EVP_MD_FLAG_FIPS, SHA1_CBLOCK, sizeof(EVP_MD *) + sizeof(struct digest_priv_ctx), uadk_e_digest_init, uadk_e_digest_update, uadk_e_digest_final, uadk_e_digest_cleanup, - uadk_e_digest_copy); + uadk_e_digest_copy, uadk_e_digest_ctrl); UADK_DIGEST_DESCR(sha224, sha224WithRSAEncryption, SHA224_DIGEST_LENGTH, EVP_MD_FLAG_FIPS, SHA224_CBLOCK, sizeof(EVP_MD *) + sizeof(struct digest_priv_ctx), uadk_e_digest_init, uadk_e_digest_update, uadk_e_digest_final, uadk_e_digest_cleanup, - uadk_e_digest_copy); + uadk_e_digest_copy, uadk_e_digest_ctrl); UADK_DIGEST_DESCR(sha256, sha256WithRSAEncryption, SHA256_DIGEST_LENGTH, EVP_MD_FLAG_FIPS, SHA256_CBLOCK, sizeof(EVP_MD *) + sizeof(struct digest_priv_ctx), uadk_e_digest_init, uadk_e_digest_update, uadk_e_digest_final, uadk_e_digest_cleanup, - uadk_e_digest_copy); + uadk_e_digest_copy, uadk_e_digest_ctrl); UADK_DIGEST_DESCR(sha384, sha384WithRSAEncryption, SHA384_DIGEST_LENGTH, EVP_MD_FLAG_FIPS, SHA384_CBLOCK, sizeof(EVP_MD *) + sizeof(struct digest_priv_ctx), uadk_e_digest_init, uadk_e_digest_update, uadk_e_digest_final, uadk_e_digest_cleanup, - uadk_e_digest_copy); + uadk_e_digest_copy, uadk_e_digest_ctrl); UADK_DIGEST_DESCR(sha512, sha512WithRSAEncryption, SHA512_DIGEST_LENGTH, EVP_MD_FLAG_FIPS, SHA512_CBLOCK, sizeof(EVP_MD *) + sizeof(struct digest_priv_ctx), uadk_e_digest_init, uadk_e_digest_update, uadk_e_digest_final, uadk_e_digest_cleanup, - uadk_e_digest_copy); + uadk_e_digest_copy, uadk_e_digest_ctrl);
return ENGINE_set_digests(e, uadk_engine_digests); }
From: Wenkai Lin linwenkai6@hisilicon.com
The current aead stream mode restricts that all the data length of the update stage must be 16-byte aligned and a 0-byte tail packet is sent at the final stage, which leads to the total data length must be 16-byte aligned. This patch fix it by changing the time when the final packet is sent, if the data of update stage is not 16-byte aligned, a final packet will be sent immediately, and no operation is performed at the final stage, if the data at update stage is 16-byte aligned, keep the final packet to final stage. This change requires users to split data based on 16-byte alignment and then send the last non-16-byte aligned data.
Signed-off-by: Wenkai Lin linwenkai6@hisilicon.com Signed-off-by: JiangShui Yang yangjiangshui@h-partners.com --- src/uadk_aead.c | 76 +++++++++++++++++++++++++++++-------------------- 1 file changed, 45 insertions(+), 31 deletions(-)
diff --git a/src/uadk_aead.c b/src/uadk_aead.c index 2a5c024..6c2d66e 100644 --- a/src/uadk_aead.c +++ b/src/uadk_aead.c @@ -446,6 +446,26 @@ static int uadk_e_aes_gcm_set_ctrl(EVP_CIPHER_CTX *ctx, int type, int arg, void } }
+static int do_aead_sync_inner(struct aead_priv_ctx *priv, unsigned char *out, + unsigned char *in, size_t inlen, enum wd_aead_msg_state state) +{ + int ret; + + priv->req.msg_state = state; + priv->req.src = (__u8 *)in; + priv->req.dst = out; + priv->req.in_bytes = inlen; + priv->req.state = 0; + ret = wd_do_aead_sync(priv->sess, &priv->req); + if (unlikely(ret < 0 || priv->req.state)) { + fprintf(stderr, "do aead task failed, msg state: %d, ret: %d, state: %u!\n", + state, ret, priv->req.state); + return RET_FAIL; + } + + return inlen; +} + static int uadk_e_do_aes_gcm_first(struct aead_priv_ctx *priv, unsigned char *out, const unsigned char *in, size_t inlen) { @@ -459,14 +479,9 @@ static int uadk_e_do_aes_gcm_first(struct aead_priv_ctx *priv, unsigned char *ou return 1; }
- priv->req.src = (unsigned char *)in; - priv->req.msg_state = AEAD_MSG_FIRST; - - ret = wd_do_aead_sync(priv->sess, &priv->req); - if (unlikely(ret < 0)) { - fprintf(stderr, "do aead first operation failed, ret: %d!\n", ret); + ret = do_aead_sync_inner(priv, out, in, inlen, AEAD_MSG_FIRST); + if (unlikely(ret < 0)) return RET_FAIL; - }
return 1; } @@ -474,23 +489,29 @@ static int uadk_e_do_aes_gcm_first(struct aead_priv_ctx *priv, unsigned char *ou static int do_aead_sync(struct aead_priv_ctx *priv, unsigned char *out, const unsigned char *in, size_t inlen) { + size_t nblocks, nbytes; + __u8 tail; int ret;
/* Due to a hardware limitation, zero-length aad using block mode. */ - if (priv->req.assoc_bytes) - priv->req.msg_state = AEAD_MSG_MIDDLE; - else - priv->req.msg_state = AEAD_MSG_BLOCK; + if (!priv->req.assoc_bytes) + return do_aead_sync_inner(priv, out, in, inlen, AEAD_MSG_BLOCK);
- priv->req.src = (unsigned char *)in; - priv->req.dst = out; - priv->req.in_bytes = inlen; - priv->req.state = 0; - ret = wd_do_aead_sync(priv->sess, &priv->req); - if (ret < 0 || priv->req.state) { - fprintf(stderr, "do aead update operation failed, ret: %d, state: %u!\n", - ret, priv->req.state); - return RET_FAIL; + tail = inlen % AES_BLOCK_SIZE; + nblocks = inlen / AES_BLOCK_SIZE; + nbytes = inlen - tail; + + /* If the data length is not 16-byte aligned, it is split according to the protocol. */ + if (nblocks) { + ret = do_aead_sync_inner(priv, out, in, nbytes, AEAD_MSG_MIDDLE); + if (ret < 0) + return ret; + } + + if (tail) { + ret = do_aead_sync_inner(priv, out + nbytes, in + nbytes, tail, AEAD_MSG_END); + if (ret < 0) + return ret; }
return inlen; @@ -653,20 +674,13 @@ static int uadk_e_do_aes_gcm_final(EVP_CIPHER_CTX *ctx, struct aead_priv_ctx *pr
enc = EVP_CIPHER_CTX_encrypting(ctx);
- if (ASYNC_get_current_job() || !priv->req.assoc_bytes) + if (ASYNC_get_current_job() || !priv->req.assoc_bytes || + priv->req.msg_state == AEAD_MSG_END) goto out;
- priv->req.msg_state = AEAD_MSG_END; - priv->req.src = NULL; - priv->req.in_bytes = 0; - priv->req.dst = out; - priv->req.state = 0; - ret = wd_do_aead_sync(priv->sess, &priv->req); - if (ret < 0 || priv->req.state) { - fprintf(stderr, "do aead final operation failed, ret: %d, state: %u!\n", - ret, priv->req.state); + ret = do_aead_sync_inner(priv, out, in, inlen, AEAD_MSG_END); + if (unlikely(ret < 0)) return RET_FAIL; - }
out: if (enc)
From: Wenkai Lin linwenkai6@hisilicon.com
When the length of aad is not 0 and the length of cipher len is 0, the stream does not enter the update stage and the mac is not set, fix it by copying mac before the first or update stage.
Signed-off-by: Wenkai Lin linwenkai6@hisilicon.com Signed-off-by: JiangShui Yang yangjiangshui@h-partners.com --- src/uadk_aead.c | 37 ++++++++++++++++++++++--------------- 1 file changed, 22 insertions(+), 15 deletions(-)
diff --git a/src/uadk_aead.c b/src/uadk_aead.c index 6c2d66e..dafaf92 100644 --- a/src/uadk_aead.c +++ b/src/uadk_aead.c @@ -51,6 +51,7 @@ struct aead_priv_ctx { unsigned char iv[AES_GCM_BLOCK_SIZE]; unsigned char mac[AES_GCM_TAG_LEN]; int taglen; + bool is_req_tag_set; };
struct aead_engine { @@ -341,6 +342,7 @@ static int uadk_e_aes_gcm_init(EVP_CIPHER_CTX *ctx, const unsigned char *ckey, priv->req.mac = priv->mac; priv->req.mac_bytes = AES_GCM_TAG_LEN; priv->taglen = 0; + priv->is_req_tag_set = false; priv->data = NULL;
if (enc) @@ -621,22 +623,11 @@ free_cb_param: return ret; }
-static int uadk_e_do_aes_gcm_update(EVP_CIPHER_CTX *ctx, struct aead_priv_ctx *priv, - unsigned char *out, const unsigned char *in, size_t inlen) +static int uadk_e_do_aes_gcm_update(struct aead_priv_ctx *priv, unsigned char *out, + const unsigned char *in, size_t inlen) { - unsigned char *ctx_buf = EVP_CIPHER_CTX_buf_noconst(ctx); struct async_op *op; - int ret, enc; - - enc = EVP_CIPHER_CTX_encrypting(ctx); - if (!enc) { - if (priv->taglen == AES_GCM_TAG_LEN) { - memcpy(priv->req.mac, ctx_buf, AES_GCM_TAG_LEN); - } else { - fprintf(stderr, "invalid: aead gcm mac length only support 16B.\n"); - return RET_FAIL; - } - } + int ret;
if (ASYNC_get_current_job()) { op = malloc(sizeof(struct async_op)); @@ -685,6 +676,8 @@ static int uadk_e_do_aes_gcm_final(EVP_CIPHER_CTX *ctx, struct aead_priv_ctx *pr out: if (enc) memcpy(ctx_buf, priv->req.mac, AES_GCM_TAG_LEN); + else + priv->is_req_tag_set = false;
return 0; } @@ -693,6 +686,8 @@ static int uadk_e_do_aes_gcm(EVP_CIPHER_CTX *ctx, unsigned char *out, const unsigned char *in, size_t inlen) { struct aead_priv_ctx *priv; + unsigned char *ctx_buf; + int enc;
priv = (struct aead_priv_ctx *)EVP_CIPHER_CTX_get_cipher_data(ctx); if (unlikely(!priv)) { @@ -701,10 +696,22 @@ static int uadk_e_do_aes_gcm(EVP_CIPHER_CTX *ctx, unsigned char *out, }
if (in) { + enc = EVP_CIPHER_CTX_encrypting(ctx); + if (!enc && !priv->is_req_tag_set) { + if (likely(priv->taglen == AES_GCM_TAG_LEN)) { + ctx_buf = EVP_CIPHER_CTX_buf_noconst(ctx); + memcpy(priv->req.mac, ctx_buf, AES_GCM_TAG_LEN); + priv->is_req_tag_set = true; + } else { + fprintf(stderr, "invalid: aead gcm mac length only support 16B.\n"); + return RET_FAIL; + } + } + if (out == NULL) return uadk_e_do_aes_gcm_first(priv, out, in, inlen);
- return uadk_e_do_aes_gcm_update(ctx, priv, out, in, inlen); + return uadk_e_do_aes_gcm_update(priv, out, in, inlen); }
return uadk_e_do_aes_gcm_final(ctx, priv, out, NULL, 0);
From: Wenkai Lin linwenkai6@hisilicon.com
First, use a stream mode to mark if a stream is async to avoid calling ASYNC_get_current_job every time. Then, the async job may not be obtained at the init stage, so make sure that data memory will be allocated.
Signed-off-by: Wenkai Lin linwenkai6@hisilicon.com Signed-off-by: JiangShui Yang yangjiangshui@h-partners.com --- src/uadk_aead.c | 71 +++++++++++++++++++++++++++++++++---------------- 1 file changed, 48 insertions(+), 23 deletions(-)
diff --git a/src/uadk_aead.c b/src/uadk_aead.c index dafaf92..9ea491d 100644 --- a/src/uadk_aead.c +++ b/src/uadk_aead.c @@ -44,6 +44,12 @@ /* The max data length is 16M-512B */ #define AEAD_BLOCK_SIZE 0xFFFE00
+enum stream_mode { + UNINIT_STREAM, + ASYNC_STREAM, + SYNC_STREAM +}; + struct aead_priv_ctx { handle_t sess; struct wd_aead_req req; @@ -52,6 +58,7 @@ struct aead_priv_ctx { unsigned char mac[AES_GCM_TAG_LEN]; int taglen; bool is_req_tag_set; + enum stream_mode mode; };
struct aead_engine { @@ -289,13 +296,11 @@ static int uadk_e_ctx_init(struct aead_priv_ctx *priv, const unsigned char *ckey goto out; }
- if (ASYNC_get_current_job()) { - /* Memory needs to be reserved for both input and output. */ - priv->data = malloc(AEAD_BLOCK_SIZE << 1); - if (unlikely(!priv->data)) { - fprintf(stderr, "uadk engine failed to alloc data!\n"); - goto out; - } + /* Memory needs to be reserved for both input and output. */ + priv->data = malloc(AEAD_BLOCK_SIZE << 1); + if (unlikely(!priv->data)) { + fprintf(stderr, "uadk engine failed to alloc data!\n"); + goto out; } }
@@ -343,6 +348,7 @@ static int uadk_e_aes_gcm_init(EVP_CIPHER_CTX *ctx, const unsigned char *ckey, priv->req.mac_bytes = AES_GCM_TAG_LEN; priv->taglen = 0; priv->is_req_tag_set = false; + priv->mode = UNINIT_STREAM; priv->data = NULL;
if (enc) @@ -476,7 +482,7 @@ static int uadk_e_do_aes_gcm_first(struct aead_priv_ctx *priv, unsigned char *ou priv->req.assoc_bytes = inlen;
/* Asynchronous jobs use the block mode. */ - if (ASYNC_get_current_job()) { + if (priv->mode == ASYNC_STREAM) { memcpy(priv->data, in, inlen); return 1; } @@ -629,7 +635,7 @@ static int uadk_e_do_aes_gcm_update(struct aead_priv_ctx *priv, unsigned char *o struct async_op *op; int ret;
- if (ASYNC_get_current_job()) { + if (priv->mode == ASYNC_STREAM) { op = malloc(sizeof(struct async_op)); if (unlikely(!op)) return RET_FAIL; @@ -665,7 +671,7 @@ static int uadk_e_do_aes_gcm_final(EVP_CIPHER_CTX *ctx, struct aead_priv_ctx *pr
enc = EVP_CIPHER_CTX_encrypting(ctx);
- if (ASYNC_get_current_job() || !priv->req.assoc_bytes || + if (priv->mode == ASYNC_STREAM || !priv->req.assoc_bytes || priv->req.msg_state == AEAD_MSG_END) goto out;
@@ -679,6 +685,34 @@ out: else priv->is_req_tag_set = false;
+ priv->mode = UNINIT_STREAM; + return 0; +} + +static int do_aes_gcm_prepare(EVP_CIPHER_CTX *ctx, struct aead_priv_ctx *priv) +{ + unsigned char *ctx_buf; + int enc; + + if (priv->mode == UNINIT_STREAM) { + if (ASYNC_get_current_job()) + priv->mode = ASYNC_STREAM; + else + priv->mode = SYNC_STREAM; + } + + enc = EVP_CIPHER_CTX_encrypting(ctx); + if (!enc && !priv->is_req_tag_set) { + if (likely(priv->taglen == AES_GCM_TAG_LEN)) { + ctx_buf = EVP_CIPHER_CTX_buf_noconst(ctx); + memcpy(priv->req.mac, ctx_buf, AES_GCM_TAG_LEN); + priv->is_req_tag_set = true; + } else { + fprintf(stderr, "invalid: aead gcm mac length only support 16B.\n"); + return RET_FAIL; + } + } + return 0; }
@@ -686,8 +720,7 @@ static int uadk_e_do_aes_gcm(EVP_CIPHER_CTX *ctx, unsigned char *out, const unsigned char *in, size_t inlen) { struct aead_priv_ctx *priv; - unsigned char *ctx_buf; - int enc; + int ret;
priv = (struct aead_priv_ctx *)EVP_CIPHER_CTX_get_cipher_data(ctx); if (unlikely(!priv)) { @@ -696,17 +729,9 @@ static int uadk_e_do_aes_gcm(EVP_CIPHER_CTX *ctx, unsigned char *out, }
if (in) { - enc = EVP_CIPHER_CTX_encrypting(ctx); - if (!enc && !priv->is_req_tag_set) { - if (likely(priv->taglen == AES_GCM_TAG_LEN)) { - ctx_buf = EVP_CIPHER_CTX_buf_noconst(ctx); - memcpy(priv->req.mac, ctx_buf, AES_GCM_TAG_LEN); - priv->is_req_tag_set = true; - } else { - fprintf(stderr, "invalid: aead gcm mac length only support 16B.\n"); - return RET_FAIL; - } - } + ret = do_aes_gcm_prepare(ctx, priv); + if (unlikely(ret)) + return ret;
if (out == NULL) return uadk_e_do_aes_gcm_first(priv, out, in, inlen);
From: Wenkai Lin linwenkai6@hisilicon.com
The return values of the aead function are unified.
Signed-off-by: Wenkai Lin linwenkai6@hisilicon.com Signed-off-by: JiangShui Yang yangjiangshui@h-partners.com --- src/uadk_aead.c | 88 ++++++++++++++++++++++++------------------------- 1 file changed, 44 insertions(+), 44 deletions(-)
diff --git a/src/uadk_aead.c b/src/uadk_aead.c index 9ea491d..88fdb82 100644 --- a/src/uadk_aead.c +++ b/src/uadk_aead.c @@ -27,6 +27,8 @@ #include "uadk_async.h" #include "uadk_utils.h"
+#define UADK_E_SUCCESS 1 +#define UADK_E_FAIL 0 #define RET_FAIL (-1) #define STATE_FAIL 0xFFFF #define CTX_SYNC_ENC 0 @@ -116,7 +118,7 @@ static int uadk_e_aead_poll(void *ctx) else if (ret == -EAGAIN) rx_cnt++; else - return RET_FAIL; + return ret; } while (rx_cnt < ENGINE_RECV_MAX_CNT);
fprintf(stderr, "failed to recv msg: timeout!\n"); @@ -243,14 +245,14 @@ static int uadk_e_init_aead_cipher(void) pthread_spin_lock(&g_aead_engine.lock); if (g_aead_engine.pid == getpid()) { pthread_spin_unlock(&g_aead_engine.lock); - return 1; + return UADK_E_SUCCESS; }
dev = wd_get_accel_dev("aead"); if (!dev) { pthread_spin_unlock(&g_aead_engine.lock); fprintf(stderr, "failed to get device for aead.\n"); - return 0; + return UADK_E_FAIL; }
ret = uadk_e_wd_aead_cipher_init(dev); @@ -258,7 +260,7 @@ static int uadk_e_init_aead_cipher(void) pthread_spin_unlock(&g_aead_engine.lock); fprintf(stderr, "failed to initiate aead cipher.\n"); free(dev); - return 0; + return UADK_E_FAIL; }
g_aead_engine.pid = getpid(); @@ -266,7 +268,7 @@ static int uadk_e_init_aead_cipher(void) free(dev); }
- return 1; + return UADK_E_SUCCESS; }
static int uadk_e_ctx_init(struct aead_priv_ctx *priv, const unsigned char *ckey, @@ -276,13 +278,13 @@ static int uadk_e_ctx_init(struct aead_priv_ctx *priv, const unsigned char *ckey
ret = uadk_e_init_aead_cipher(); if (!ret) - return 0; + return UADK_E_FAIL;
if (!priv->sess) { priv->sess = wd_aead_alloc_sess(setup); if (!priv->sess) { fprintf(stderr, "uadk engine failed to alloc aead session!\n"); - return 0; + return UADK_E_FAIL; } ret = wd_aead_set_authsize(priv->sess, AES_GCM_TAG_LEN); if (ret < 0) { @@ -304,11 +306,11 @@ static int uadk_e_ctx_init(struct aead_priv_ctx *priv, const unsigned char *ckey } }
- return 1; + return UADK_E_SUCCESS; out: wd_aead_free_sess(priv->sess); priv->sess = 0; - return 0; + return UADK_E_FAIL; }
static int uadk_e_aes_gcm_init(EVP_CIPHER_CTX *ctx, const unsigned char *ckey, @@ -322,11 +324,11 @@ static int uadk_e_aes_gcm_init(EVP_CIPHER_CTX *ctx, const unsigned char *ckey, priv = (struct aead_priv_ctx *)EVP_CIPHER_CTX_get_cipher_data(ctx); if (!priv) { fprintf(stderr, "invalid: aead priv ctx is NULL.\n"); - return 0; + return UADK_E_FAIL; }
if (unlikely(!ckey)) - return 1; + return UADK_E_SUCCESS;
if (iv) memcpy(priv->iv, iv, AES_GCM_IV_LEN); @@ -364,11 +366,8 @@ static int uadk_e_aes_gcm_init(EVP_CIPHER_CTX *ctx, const unsigned char *ckey, setup.sched_param = ¶ms;
ckey_len = EVP_CIPHER_CTX_key_length(ctx); - ret = uadk_e_ctx_init(priv, ckey, ckey_len, &setup); - if (!ret) - return 0;
- return 1; + ret = uadk_e_ctx_init(priv, ckey, ckey_len, &setup); }
static int uadk_e_aes_gcm_cleanup(EVP_CIPHER_CTX *ctx) @@ -378,7 +377,7 @@ static int uadk_e_aes_gcm_cleanup(EVP_CIPHER_CTX *ctx) priv = (struct aead_priv_ctx *)EVP_CIPHER_CTX_get_cipher_data(ctx); if (!priv) { fprintf(stderr, "invalid: aead priv ctx is NULL.\n"); - return 0; + return UADK_E_FAIL; }
if (priv->sess) { @@ -391,7 +390,7 @@ static int uadk_e_aes_gcm_cleanup(EVP_CIPHER_CTX *ctx) priv->data = NULL; }
- return 1; + return UADK_E_SUCCESS; }
static int uadk_e_aes_gcm_set_ctrl(EVP_CIPHER_CTX *ctx, int type, int arg, void *ptr) @@ -403,54 +402,56 @@ static int uadk_e_aes_gcm_set_ctrl(EVP_CIPHER_CTX *ctx, int type, int arg, void priv = (struct aead_priv_ctx *)EVP_CIPHER_CTX_get_cipher_data(ctx); if (!priv) { fprintf(stderr, "invalid: aead priv ctx is NULL.\n"); - return 0; + return UADK_E_FAIL; }
switch (type) { case EVP_CTRL_INIT: priv->req.iv_bytes = 0; - return 1; + return UADK_E_SUCCESS; # if (OPENSSL_VERSION_NUMBER >= 0x1010106fL) case EVP_CTRL_GET_IVLEN: + if (!ptr) + return UADK_E_FAIL; *(int *)ptr = priv->req.iv_bytes; - return 1; + return UADK_E_SUCCESS; #endif case EVP_CTRL_GCM_SET_IVLEN: if (arg != AES_GCM_IV_LEN) { fprintf(stderr, "invalid: aead gcm iv length only support 12B.\n"); - return 0; + return UADK_E_FAIL; } - return 1; + return UADK_E_SUCCESS; case EVP_CTRL_GCM_GET_TAG: if (arg <= 0 || arg > AES_GCM_TAG_LEN || !enc) { fprintf(stderr, "cannot get tag when decrypt or arg is invalid.\n"); - return 0; + return UADK_E_FAIL; }
- if (ctx_buf == NULL || ptr == NULL) { + if (!ctx_buf || !ptr) { fprintf(stderr, "failed to get tag, ctx memory pointer is invalid.\n"); - return 0; + return UADK_E_FAIL; }
memcpy(ptr, ctx_buf, arg); - return 1; + return UADK_E_SUCCESS; case EVP_CTRL_GCM_SET_TAG: if (arg <= 0 || arg > AES_GCM_TAG_LEN || enc) { fprintf(stderr, "cannot set tag when encrypt or arg is invalid.\n"); - return 0; + return UADK_E_FAIL; }
- if (ctx_buf == NULL || ptr == NULL) { + if (!ctx_buf || !ptr) { fprintf(stderr, "failed to set tag, ctx memory pointer is invalid.\n"); - return 0; + return UADK_E_FAIL; }
memcpy(ctx_buf, ptr, arg); priv->taglen = arg; - return 1; + return UADK_E_SUCCESS; default: fprintf(stderr, "unsupported ctrl type: %d\n", type); - return 0; + return UADK_E_FAIL; } }
@@ -484,21 +485,20 @@ static int uadk_e_do_aes_gcm_first(struct aead_priv_ctx *priv, unsigned char *ou /* Asynchronous jobs use the block mode. */ if (priv->mode == ASYNC_STREAM) { memcpy(priv->data, in, inlen); - return 1; + return UADK_E_SUCCESS; }
ret = do_aead_sync_inner(priv, out, in, inlen, AEAD_MSG_FIRST); if (unlikely(ret < 0)) return RET_FAIL;
- return 1; + return UADK_E_SUCCESS; }
static int do_aead_sync(struct aead_priv_ctx *priv, unsigned char *out, const unsigned char *in, size_t inlen) { - size_t nblocks, nbytes; - __u8 tail; + size_t nblocks, nbytes, tail; int ret;
/* Due to a hardware limitation, zero-length aad using block mode. */ @@ -575,7 +575,7 @@ static int do_aead_async(struct aead_priv_ctx *priv, struct async_op *op,
if (unlikely(priv->req.assoc_bytes + inlen > AEAD_BLOCK_SIZE)) { fprintf(stderr, "aead input data length is too long!\n"); - return 0; + return UADK_E_FAIL; }
do_aead_async_prepare(priv, out, in, inlen); @@ -583,7 +583,7 @@ static int do_aead_async(struct aead_priv_ctx *priv, struct async_op *op, cb_param = malloc(sizeof(struct uadk_e_cb_info)); if (unlikely(!cb_param)) { fprintf(stderr, "failed to alloc cb_param.\n"); - return 0; + return UADK_E_FAIL; }
cb_param->op = op; @@ -608,7 +608,7 @@ static int do_aead_async(struct aead_priv_ctx *priv, struct async_op *op, continue;
async_free_poll_task(op->idx, 0); - ret = 0; + ret = UADK_E_FAIL; goto free_cb_param; } } while (ret == -EBUSY); @@ -617,7 +617,7 @@ static int do_aead_async(struct aead_priv_ctx *priv, struct async_op *op, if (unlikely(!ret || priv->req.state)) { fprintf(stderr, "do aead async job failed, ret: %d, state: %u!\n", ret, priv->req.state); - ret = 0; + ret = UADK_E_FAIL; goto free_cb_param; }
@@ -713,7 +713,7 @@ static int do_aes_gcm_prepare(EVP_CIPHER_CTX *ctx, struct aead_priv_ctx *priv) } }
- return 0; + return UADK_E_SUCCESS; }
static int uadk_e_do_aes_gcm(EVP_CIPHER_CTX *ctx, unsigned char *out, @@ -725,15 +725,15 @@ static int uadk_e_do_aes_gcm(EVP_CIPHER_CTX *ctx, unsigned char *out, priv = (struct aead_priv_ctx *)EVP_CIPHER_CTX_get_cipher_data(ctx); if (unlikely(!priv)) { fprintf(stderr, "invalid: aead priv ctx is NULL.\n"); - return 0; + return RET_FAIL; }
if (in) { ret = do_aes_gcm_prepare(ctx, priv); - if (unlikely(ret)) - return ret; + if (unlikely(ret < 0)) + return RET_FAIL;
- if (out == NULL) + if (!out) return uadk_e_do_aes_gcm_first(priv, out, in, inlen);
return uadk_e_do_aes_gcm_update(priv, out, in, inlen);
From: Wenkai Lin linwenkai6@hisilicon.com
If task type is uninit, it will be zero, which is mistook as a cipher task and cause a recv timeout problem, so use 0x1 as start value of task type is better.
Signed-off-by: Wenkai Lin linwenkai6@hisilicon.com Signed-off-by: JiangShui Yang yangjiangshui@h-partners.com --- src/uadk_async.c | 2 +- src/uadk_async.h | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-)
diff --git a/src/uadk_async.c b/src/uadk_async.c index aa1407b..3879687 100644 --- a/src/uadk_async.c +++ b/src/uadk_async.c @@ -310,7 +310,7 @@ int async_wake_job(ASYNC_JOB *job)
void async_register_poll_fn(int type, async_recv_t func) { - if (type < 0 || type >= ASYNC_TASK_MAX) { + if (type < ASYNC_TASK_CIPHER || type >= ASYNC_TASK_MAX) { fprintf(stderr, "alg type is error, type= %d.\n", type); return; } diff --git a/src/uadk_async.h b/src/uadk_async.h index 5d73b60..5afc3c6 100644 --- a/src/uadk_async.h +++ b/src/uadk_async.h @@ -42,7 +42,7 @@ struct uadk_e_cb_info { typedef int (*async_recv_t)(void *ctx);
enum task_type { - ASYNC_TASK_CIPHER, + ASYNC_TASK_CIPHER = 0x1, ASYNC_TASK_DIGEST, ASYNC_TASK_AEAD, ASYNC_TASK_RSA,
When do sm2 async task with the digest method, there is a probability of segment fault occurring: | SM2_sign_loop | EVP_DigestSign | [...] | sm2_sign | sm2_sign_init_iot | wd_sm2_new_sign_in | [...] | uadk_ecc_get_rand | [...] | RAND_bytes | [...] | rand_bytes | EVP_DigestFinal_ex | uadk_e_digest_cleanup | wd_digest_free_sess | wd_memset_zero The wd_memset_zero() will release the sess and the addr of async job may get changed and will cause segment fault.
The solution is to make async callback param use the memory on heap rather than stack, or other cleanup related functions may release the memory on stack and modify the address of cb param in unknown scense.
Signed-off-by: Zhiqi Song songzhiqi1@huawei.com Signed-off-by: JiangShui Yang yangjiangshui@h-partners.com --- src/uadk_digest.c | 36 +++++++++++++++++++++++------------- 1 file changed, 23 insertions(+), 13 deletions(-)
diff --git a/src/uadk_digest.c b/src/uadk_digest.c index 1cb78fa..0dc68b1 100644 --- a/src/uadk_digest.c +++ b/src/uadk_digest.c @@ -767,25 +767,33 @@ static int do_digest_sync(struct digest_priv_ctx *priv)
static int do_digest_async(struct digest_priv_ctx *priv, struct async_op *op) { - struct uadk_e_cb_info cb_param; - int idx, ret, cnt; + struct uadk_e_cb_info *cb_param; + int ret = 0; + int cnt = 0; + int idx;
if (unlikely(priv->switch_flag == UADK_DO_SOFT)) { fprintf(stderr, "async cipher init failed.\n"); - return 0; + return ret; }
- cb_param.op = op; - cb_param.priv = priv; + cb_param = malloc(sizeof(struct uadk_e_cb_info)); + if (!cb_param) { + fprintf(stderr, "failed to alloc cb_param.\n"); + return ret; + } + + cb_param->op = op; + cb_param->priv = priv; priv->req.cb = uadk_e_digest_cb; - priv->req.cb_param = &cb_param; + priv->req.cb_param = cb_param;
ret = async_get_free_task(&idx); if (!ret) - return 0; + goto free_cb_param;
op->idx = idx; - cnt = 0; + do { ret = wd_do_digest_async(priv->sess, &priv->req); if (unlikely(ret < 0)) { @@ -797,14 +805,16 @@ static int do_digest_async(struct digest_priv_ctx *priv, struct async_op *op) continue;
async_free_poll_task(op->idx, 0); - return 0; + ret = 0; + goto free_cb_param; } } while (ret == -EBUSY);
ret = async_pause_job(priv, op, ASYNC_TASK_DIGEST); - if (!ret) - return 0; - return 1; + +free_cb_param: + free(cb_param); + return ret; }
static int uadk_e_digest_final(EVP_MD_CTX *ctx, unsigned char *digest) @@ -871,7 +881,7 @@ sync_err: fprintf(stderr, "do sec digest stream mode failed.\n"); } clear: - async_clear_async_event_notification(); + (void)async_clear_async_event_notification(); free(op); return ret; }
Make async callback param use the memory on heap rather than stack, or other async related functions may modify the address of cb param and cause unknown error.
Signed-off-by: Zhiqi Song songzhiqi1@huawei.com Signed-off-by: JiangShui Yang yangjiangshui@h-partners.com --- src/uadk_cipher.c | 39 +++++++++++++++++++++++++-------------- 1 file changed, 25 insertions(+), 14 deletions(-)
diff --git a/src/uadk_cipher.c b/src/uadk_cipher.c index adcde01..dce6d1e 100644 --- a/src/uadk_cipher.c +++ b/src/uadk_cipher.c @@ -651,24 +651,31 @@ static int do_cipher_sync(struct cipher_priv_ctx *priv)
static int do_cipher_async(struct cipher_priv_ctx *priv, struct async_op *op) { - struct uadk_e_cb_info cb_param; - int idx, ret, cnt; + struct uadk_e_cb_info *cb_param; + int ret = 0; + int cnt = 0; + int idx;
if (unlikely(priv->switch_flag == UADK_DO_SOFT)) { fprintf(stderr, "switch to soft cipher.\n"); - return 0; + return ret; + } + + cb_param = malloc(sizeof(struct uadk_e_cb_info)); + if (!cb_param) { + fprintf(stderr, "failed to alloc cb_param.\n"); + return ret; }
- cb_param.op = op; - cb_param.priv = priv; + cb_param->op = op; + cb_param->priv = priv; priv->req.cb = uadk_e_cipher_cb; - priv->req.cb_param = &cb_param; + priv->req.cb_param = cb_param;
ret = async_get_free_task(&idx); if (!ret) - return 0; + goto free_cb_param;
- cnt = 0; op->idx = idx; do { ret = wd_do_cipher_async(priv->sess, &priv->req); @@ -681,14 +688,16 @@ static int do_cipher_async(struct cipher_priv_ctx *priv, struct async_op *op) continue;
async_free_poll_task(op->idx, 0); - return 0; + ret = 0; + goto free_cb_param; } } while (ret == -EBUSY);
ret = async_pause_job(priv, op, ASYNC_TASK_CIPHER); - if (!ret) - return 0; - return 1; + +free_cb_param: + free(cb_param); + return ret; }
static int uadk_e_cipher_ctrl(EVP_CIPHER_CTX *ctx, int type, int numa_node, void *ptr) @@ -793,13 +802,14 @@ static int uadk_e_do_cipher(EVP_CIPHER_CTX *ctx, unsigned char *out, priv->req.out_buf_bytes = inlen;
uadk_e_ctx_init(ctx, priv); + ret = async_setup_async_event_notification(&op); if (!ret) { fprintf(stderr, "failed to setup async event notification.\n"); return 0; }
- if (op.job == NULL) { + if (!op.job) { /* Synchronous, only the synchronous mode supports soft computing */ ret = do_cipher_sync(priv); if (!ret) @@ -820,12 +830,13 @@ static int uadk_e_do_cipher(EVP_CIPHER_CTX *ctx, unsigned char *out, uadk_cipher_update_priv_ctx(priv);
return 1; + sync_err: ret = uadk_e_cipher_soft_work(ctx, out, in, inlen); if (ret != 1) fprintf(stderr, "do soft ciphers failed.\n"); out_notify: - async_clear_async_event_notification(); + (void)async_clear_async_event_notification(); return ret; }
Make async callback param use the memory on heap rather than stack, or other async related functions may modify the address of cb param and cause unknown error.
Signed-off-by: Zhiqi Song songzhiqi1@huawei.com Signed-off-by: JiangShui Yang yangjiangshui@h-partners.com --- src/uadk_dh.c | 121 ++++++++++++++++++++++++++++++-------------------- 1 file changed, 74 insertions(+), 47 deletions(-)
diff --git a/src/uadk_dh.c b/src/uadk_dh.c index 28aa452..582cc95 100644 --- a/src/uadk_dh.c +++ b/src/uadk_dh.c @@ -712,66 +712,93 @@ free_ag: return UADK_E_FAIL; }
-static int dh_do_crypto(struct uadk_dh_sess *dh_sess) +static int dh_do_sync(struct uadk_dh_sess *dh_sess) { - struct uadk_e_cb_info cb_param; - struct async_op op; - int idx, ret, cnt; + int ret;
- ret = async_setup_async_event_notification(&op); - if (!ret) { - printf("failed to setup async event notification.\n"); + ret = wd_do_dh_sync(dh_sess->sess, &dh_sess->req); + if (ret) { + if (ret == -WD_HW_EACCESS) + uadk_e_dh_set_status(); return UADK_E_FAIL; }
- if (!op.job) { - ret = wd_do_dh_sync(dh_sess->sess, &dh_sess->req); - if (ret) { - if (ret == -WD_HW_EACCESS) + return UADK_E_SUCCESS; +} + +static int dh_do_async(struct uadk_dh_sess *dh_sess, struct async_op *op) +{ + struct uadk_e_cb_info *cb_param; + int ret = 0; + int cnt = 0; + int idx; + + cb_param = malloc(sizeof(struct uadk_e_cb_info)); + if (!cb_param) { + fprintf(stderr, "failed to alloc cb_param.\n"); + return ret; + } + + cb_param->op = op; + cb_param->priv = &dh_sess->req; + dh_sess->req.cb = uadk_e_dh_cb; + dh_sess->req.cb_param = cb_param; + dh_sess->req.status = -1; + ret = async_get_free_task(&idx); + if (!ret) + goto free_cb_param; + + op->idx = idx; + do { + ret = wd_do_dh_async(dh_sess->sess, &dh_sess->req); + if (unlikely(ret < 0)) { + if (unlikely(ret == -WD_HW_EACCESS)) uadk_e_dh_set_status(); - return UADK_E_FAIL; + else if (unlikely(cnt++ > ENGINE_SEND_MAX_CNT)) + fprintf(stderr, "do dh async operation timeout.\n"); + else + continue; + + async_free_poll_task(op->idx, 0); + ret = UADK_E_FAIL; + goto free_cb_param; } - } else { - cb_param.op = &op; - cb_param.priv = &dh_sess->req; - dh_sess->req.cb = uadk_e_dh_cb; - dh_sess->req.cb_param = &cb_param; - dh_sess->req.status = -1; - ret = async_get_free_task(&idx); - if (!ret) - goto err; + } while (ret == -EBUSY);
- op.idx = idx; - cnt = 0; - do { - ret = wd_do_dh_async(dh_sess->sess, &dh_sess->req); - if (unlikely(ret < 0)) { - if (unlikely(ret == -WD_HW_EACCESS)) - uadk_e_dh_set_status(); - else if (unlikely(cnt++ > ENGINE_SEND_MAX_CNT)) - fprintf(stderr, "do dh async operation timeout.\n"); - else - continue; - - async_free_poll_task(op.idx, 0); - goto err; - } - } while (ret == -EBUSY); - - ret = async_pause_job(dh_sess, &op, ASYNC_TASK_DH); - if (!ret) - goto err; + ret = async_pause_job(dh_sess, op, ASYNC_TASK_DH); + if (!ret) + goto free_cb_param;
- ret = dh_sess->req.status; - if (ret) - goto err; + if (dh_sess->req.status) { + ret = UADK_E_FAIL; + goto free_cb_param; }
- return UADK_E_SUCCESS; +free_cb_param: + free(cb_param); + return ret; +}
-err: +static int dh_do_crypto(struct uadk_dh_sess *dh_sess) +{ + struct async_op op; + int ret = 0; + + ret = async_setup_async_event_notification(&op); + if (!ret) { + printf("failed to setup async event notification.\n"); + return ret; + } + + if (!op.job) { + ret = dh_do_sync(dh_sess); + return ret; + } + + ret = dh_do_async(dh_sess, &op); (void)async_clear_async_event_notification(); - return UADK_E_FAIL; + + return ret; }
static int dh_soft_set_pkey(DH *dh, BIGNUM *pub_key, BIGNUM *priv_key)
Make async callback param use the memory on heap rather than stack, or other async related functions may modify the address of cb param and cause unknown error.
Signed-off-by: Zhiqi Song songzhiqi1@huawei.com Signed-off-by: JiangShui Yang yangjiangshui@h-partners.com --- src/uadk_pkey.c | 120 ++++++++++++++++++++++++++++++------------------ 1 file changed, 75 insertions(+), 45 deletions(-)
diff --git a/src/uadk_pkey.c b/src/uadk_pkey.c index bb87f1a..8250b91 100644 --- a/src/uadk_pkey.c +++ b/src/uadk_pkey.c @@ -298,62 +298,92 @@ clear_status: ecc_res.status = UADK_UNINIT; }
+static int uadk_ecc_do_sync(handle_t sess, struct wd_ecc_req *req) +{ + int ret; + + ret = wd_do_ecc_sync(sess, req); + if (ret < 0) { + if (ret == -WD_HW_EACCESS) + uadk_e_ecc_set_status(); + return UADK_E_FAIL; + } + + return 1; +} + +static int uadk_ecc_do_async(handle_t sess, struct wd_ecc_req *req, + struct async_op *op, void *usr) +{ + struct uadk_e_cb_info *cb_param; + int ret = 0; + int cnt = 0; + int idx; + + cb_param = malloc(sizeof(struct uadk_e_cb_info)); + if (!cb_param) { + fprintf(stderr, "failed to alloc cb_param.\n"); + return ret; + } + + cb_param->op = op; + cb_param->priv = req; + req->cb_param = cb_param; + req->cb = uadk_e_ecc_cb; + req->status = -1; + ret = async_get_free_task(&idx); + if (!ret) + goto free_cb_param; + + op->idx = idx; + do { + ret = wd_do_ecc_async(sess, req); + if (unlikely(ret < 0)) { + if (unlikely(ret == -WD_HW_EACCESS)) + uadk_e_ecc_set_status(); + else if (unlikely(cnt++ > ENGINE_SEND_MAX_CNT)) + fprintf(stderr, "do ecc async operation timeout.\n"); + else + continue; + + async_free_poll_task(op->idx, 0); + ret = 0; + goto free_cb_param; + } + } while (ret == -EBUSY); + + ret = async_pause_job((void *)usr, op, ASYNC_TASK_ECC); + if (!ret) + goto free_cb_param; + + if (req->status) { + ret = 0; + goto free_cb_param; + } + +free_cb_param: + free(cb_param); + return ret; +} + int uadk_ecc_crypto(handle_t sess, struct wd_ecc_req *req, void *usr) { - struct uadk_e_cb_info cb_param; struct async_op op; - int idx, ret, cnt; + int ret;
ret = async_setup_async_event_notification(&op); if (!ret) { fprintf(stderr, "failed to setup async event notification.\n"); - return 0; + return ret; }
- if (op.job != NULL) { - cb_param.op = &op; - cb_param.priv = req; - req->cb_param = &cb_param; - req->cb = uadk_e_ecc_cb; - req->status = -1; - ret = async_get_free_task(&idx); - if (!ret) - goto err; + if (!op.job) + return uadk_ecc_do_sync(sess, req);
- op.idx = idx; - cnt = 0; - do { - ret = wd_do_ecc_async(sess, req); - if (unlikely(ret < 0)) { - if (unlikely(ret == -WD_HW_EACCESS)) - uadk_e_ecc_set_status(); - else if (unlikely(cnt++ > ENGINE_SEND_MAX_CNT)) - fprintf(stderr, "do ecc async operation timeout.\n"); - else - continue; - - async_free_poll_task(op.idx, 0); - goto err; - } - } while (ret == -EBUSY); - - ret = async_pause_job((void *)usr, &op, ASYNC_TASK_ECC); - if (!ret) - goto err; - if (req->status) - return 0; - } else { - ret = wd_do_ecc_sync(sess, req); - if (ret < 0) { - if (ret == -WD_HW_EACCESS) - uadk_e_ecc_set_status(); - return 0; - } - } - return 1; -err: + ret = uadk_ecc_do_async(sess, req, &op, usr); (void)async_clear_async_event_notification(); - return 0; + + return ret; }
bool uadk_is_all_zero(const unsigned char *data, size_t dlen)
Make async callback param use the memory on heap rather than stack, or other async related functions may modify the address of cb param and cause unknown error.
Signed-off-by: Zhiqi Song songzhiqi1@huawei.com Signed-off-by: JiangShui Yang yangjiangshui@h-partners.com --- src/uadk_rsa.c | 88 ++++++++++++++++++++++++++++++++------------------ 1 file changed, 56 insertions(+), 32 deletions(-)
diff --git a/src/uadk_rsa.c b/src/uadk_rsa.c index 475c2d0..29d4c21 100644 --- a/src/uadk_rsa.c +++ b/src/uadk_rsa.c @@ -1097,42 +1097,45 @@ static void uadk_e_rsa_cb(void *req_t) } }
-static int rsa_do_crypto(struct uadk_rsa_sess *rsa_sess) +static int rsa_do_sync(struct uadk_rsa_sess *rsa_sess) { - struct uadk_e_cb_info cb_param; - struct async_op op; - int idx, ret, cnt; + int ret;
- ret = async_setup_async_event_notification(&op); - if (!ret) { - fprintf(stderr, "failed to setup async event notification.\n"); + ret = wd_do_rsa_sync(rsa_sess->sess, &rsa_sess->req); + if (ret) { + if (ret == -WD_HW_EACCESS) + uadk_e_rsa_set_status(); return UADK_E_FAIL; }
- if (!op.job) { - ret = wd_do_rsa_sync(rsa_sess->sess, &(rsa_sess->req)); - if (ret) { - if (ret == -WD_HW_EACCESS) - uadk_e_rsa_set_status(); - goto err; - } else { - return UADK_E_SUCCESS; - } + return UADK_E_SUCCESS; +} + +static int rsa_do_async(struct uadk_rsa_sess *rsa_sess, struct async_op *op) +{ + struct uadk_e_cb_info *cb_param; + int ret = 0; + int cnt = 0; + int idx; + + cb_param = malloc(sizeof(struct uadk_e_cb_info)); + if (!cb_param) { + fprintf(stderr, "failed to alloc cb_param.\n"); + return ret; } - cb_param.op = &op; - cb_param.priv = &(rsa_sess->req); + + cb_param->op = op; + cb_param->priv = &rsa_sess->req; rsa_sess->req.cb = uadk_e_rsa_cb; - rsa_sess->req.cb_param = &cb_param; + rsa_sess->req.cb_param = cb_param; rsa_sess->req.status = -1; - ret = async_get_free_task(&idx); - if (ret == 0) - goto err; + if (!ret) + goto free_cb_param;
- op.idx = idx; - cnt = 0; + op->idx = idx; do { - ret = wd_do_rsa_async(rsa_sess->sess, &(rsa_sess->req)); + ret = wd_do_rsa_async(rsa_sess->sess, &rsa_sess->req); if (unlikely(ret < 0)) { if (unlikely(ret == -WD_HW_EACCESS)) uadk_e_rsa_set_status(); @@ -1141,23 +1144,44 @@ static int rsa_do_crypto(struct uadk_rsa_sess *rsa_sess) else continue;
- async_free_poll_task(op.idx, 0); - goto err; + async_free_poll_task(op->idx, 0); + ret = UADK_E_FAIL; + goto free_cb_param; } } while (ret == -EBUSY);
- ret = async_pause_job(rsa_sess, &op, ASYNC_TASK_RSA); + ret = async_pause_job(rsa_sess, op, ASYNC_TASK_RSA); if (!ret) - goto err; + goto free_cb_param;
if (rsa_sess->req.status) + ret = UADK_E_FAIL; + +free_cb_param: + free(cb_param); + return ret; +} + +static int rsa_do_crypto(struct uadk_rsa_sess *rsa_sess) +{ + struct async_op op; + int ret; + + ret = async_setup_async_event_notification(&op); + if (!ret) { + fprintf(stderr, "failed to setup async event notification.\n"); return UADK_E_FAIL; + }
- return UADK_E_SUCCESS; + if (!op.job) { + ret = rsa_do_sync(rsa_sess); + return ret; + }
-err: + ret = rsa_do_async(rsa_sess, &op); (void)async_clear_async_event_notification(); - return UADK_E_FAIL; + + return ret; }
static int uadk_e_soft_rsa_keygen(RSA *rsa, int bits, BIGNUM *e, BN_GENCB *cb)
From: Wenkai Lin linwenkai6@hisilicon.com
When packet length is small, it use software compute, so there is no need to init ctx, optimize it. before: len: 16 64 256 speed: 32213.72k 131927.84k 629974.20k after: len: 16 64 256 speed: 1771017.00k 5464413.82k 629608.35k
Signed-off-by: Wenkai Lin linwenkai6@hisilicon.com Signed-off-by: JiangShui Yang yangjiangshui@h-partners.com --- src/uadk_cipher.c | 28 +++++++++++----------------- 1 file changed, 11 insertions(+), 17 deletions(-)
diff --git a/src/uadk_cipher.c b/src/uadk_cipher.c index dce6d1e..0ae945f 100644 --- a/src/uadk_cipher.c +++ b/src/uadk_cipher.c @@ -636,13 +636,6 @@ static int do_cipher_sync(struct cipher_priv_ctx *priv) return 0; }
- /* - * If the length of the input data does not reach to hardware computing threshold, - * directly switch to soft cipher. - */ - if (priv->req.in_bytes <= priv->switch_threshold) - return 0; - ret = wd_do_cipher_sync(priv->sess, &priv->req); if (ret) return 0; @@ -801,28 +794,29 @@ static int uadk_e_do_cipher(EVP_CIPHER_CTX *ctx, unsigned char *out, priv->req.dst = out; priv->req.out_buf_bytes = inlen;
- uadk_e_ctx_init(ctx, priv); - ret = async_setup_async_event_notification(&op); if (!ret) { fprintf(stderr, "failed to setup async event notification.\n"); return 0; }
+ /* + * If the length of the input data does not reach to hardware computing threshold, + * directly switch to soft cipher. + */ + if (priv->req.in_bytes <= priv->switch_threshold) { + ret = 0; + goto sync_err; + } + + uadk_e_ctx_init(ctx, priv); + if (!op.job) { /* Synchronous, only the synchronous mode supports soft computing */ ret = do_cipher_sync(priv); if (!ret) goto sync_err; } else { - /* - * If the length of the input data - * does not reach to hardware computing threshold, - * directly switch to soft cipher. - */ - if (priv->req.in_bytes <= priv->switch_threshold) - goto sync_err; - ret = do_cipher_async(priv, &op); if (!ret) goto out_notify;
From: Wenkai Lin linwenkai6@hisilicon.com
Switch to software compute for both async mode and sync mode.
Signed-off-by: Wenkai Lin linwenkai6@hisilicon.com Signed-off-by: JiangShui Yang yangjiangshui@h-partners.com --- src/uadk_cipher.c | 41 ++++++++++++++++------------------------- 1 file changed, 16 insertions(+), 25 deletions(-)
diff --git a/src/uadk_cipher.c b/src/uadk_cipher.c index 0ae945f..0d7386f 100644 --- a/src/uadk_cipher.c +++ b/src/uadk_cipher.c @@ -631,11 +631,6 @@ static int do_cipher_sync(struct cipher_priv_ctx *priv) { int ret;
- if (unlikely(priv->switch_flag == UADK_DO_SOFT)) { - fprintf(stderr, "switch to soft cipher.\n"); - return 0; - } - ret = wd_do_cipher_sync(priv->sess, &priv->req); if (ret) return 0; @@ -649,11 +644,6 @@ static int do_cipher_async(struct cipher_priv_ctx *priv, struct async_op *op) int cnt = 0; int idx;
- if (unlikely(priv->switch_flag == UADK_DO_SOFT)) { - fprintf(stderr, "switch to soft cipher.\n"); - return ret; - } - cb_param = malloc(sizeof(struct uadk_e_cb_info)); if (!cb_param) { fprintf(stderr, "failed to alloc cb_param.\n"); @@ -794,28 +784,29 @@ static int uadk_e_do_cipher(EVP_CIPHER_CTX *ctx, unsigned char *out, priv->req.dst = out; priv->req.out_buf_bytes = inlen;
- ret = async_setup_async_event_notification(&op); - if (!ret) { - fprintf(stderr, "failed to setup async event notification.\n"); - return 0; - } - /* * If the length of the input data does not reach to hardware computing threshold, * directly switch to soft cipher. */ - if (priv->req.in_bytes <= priv->switch_threshold) { - ret = 0; - goto sync_err; - } + if (priv->req.in_bytes <= priv->switch_threshold) + goto out_soft;
uadk_e_ctx_init(ctx, priv); + if (unlikely(priv->switch_flag == UADK_DO_SOFT)) { + fprintf(stderr, "switch to soft cipher.\n"); + goto out_soft; + } + + ret = async_setup_async_event_notification(&op); + if (!ret) { + fprintf(stderr, "failed to setup async event notification.\n"); + goto out_soft; + }
if (!op.job) { - /* Synchronous, only the synchronous mode supports soft computing */ ret = do_cipher_sync(priv); if (!ret) - goto sync_err; + goto out_notify; } else { ret = do_cipher_async(priv, &op); if (!ret) @@ -825,12 +816,12 @@ static int uadk_e_do_cipher(EVP_CIPHER_CTX *ctx, unsigned char *out,
return 1;
-sync_err: +out_notify: + (void)async_clear_async_event_notification(); +out_soft: ret = uadk_e_cipher_soft_work(ctx, out, in, inlen); if (ret != 1) fprintf(stderr, "do soft ciphers failed.\n"); -out_notify: - (void)async_clear_async_event_notification(); return ret; }
From: Wenkai Lin linwenkai6@hisilicon.com
Fix for function discards 'const' qualifier warning.
Signed-off-by: Wenkai Lin linwenkai6@hisilicon.com Signed-off-by: JiangShui Yang yangjiangshui@h-partners.com --- src/uadk_aead.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-)
diff --git a/src/uadk_aead.c b/src/uadk_aead.c index 88fdb82..d23b9ca 100644 --- a/src/uadk_aead.c +++ b/src/uadk_aead.c @@ -456,7 +456,7 @@ static int uadk_e_aes_gcm_set_ctrl(EVP_CIPHER_CTX *ctx, int type, int arg, void }
static int do_aead_sync_inner(struct aead_priv_ctx *priv, unsigned char *out, - unsigned char *in, size_t inlen, enum wd_aead_msg_state state) + const unsigned char *in, size_t inlen, enum wd_aead_msg_state state) { int ret;