Fixup async mode bug and remove redundant code.
Zhiqi Song (10): uadk_engine: cleanup code style of async functions digest: fix the definition of async cb param cipher: fix the definition of async cb param dh: fix the definition of async cb param ecc: fix the definition of async cb param rsa: fix the definition of async cb param uadk_engine: remove redundant param of async ecc: fixup switching soft sm2 decrypt problem ecc: optimize sm2 sign check function digest: fix the address of async op
src/uadk_async.c | 140 ++++++++++++++++++++++++---------------------- src/uadk_async.h | 5 +- src/uadk_cipher.c | 39 ++++++++----- src/uadk_dh.c | 100 +++++++++++++++++++++------------ src/uadk_digest.c | 50 +++++++++++------ src/uadk_pkey.c | 99 ++++++++++++++++++++------------ src/uadk_rsa.c | 82 +++++++++++++++++---------- src/uadk_sm2.c | 42 +++++++++++--- 8 files changed, 349 insertions(+), 208 deletions(-)
Cleanup the return value and judgment code style of async mode functions.
Signed-off-by: Zhiqi Song songzhiqi1@huawei.com --- src/uadk_async.c | 129 ++++++++++++++++++++++++----------------------- src/uadk_async.h | 3 ++ 2 files changed, 69 insertions(+), 63 deletions(-)
diff --git a/src/uadk_async.c b/src/uadk_async.c index c46976c..ae53361 100644 --- a/src/uadk_async.c +++ b/src/uadk_async.c @@ -49,67 +49,66 @@ static void async_fd_cleanup(ASYNC_WAIT_CTX *ctx, const void *key, int async_setup_async_event_notification(struct async_op *op) { ASYNC_WAIT_CTX *waitctx; + void *custom = NULL; OSSL_ASYNC_FD efd; - void *custom;
memset(op, 0, sizeof(struct async_op)); op->job = ASYNC_get_current_job(); - if (op->job == NULL) - return 1; + if (!op->job) + return DO_SYNC;
waitctx = ASYNC_get_wait_ctx(op->job); - if (waitctx == NULL) - return 0; + if (!waitctx) + return UADK_E_FAIL;
- if (ASYNC_WAIT_CTX_get_fd(waitctx, engine_uadk_id, - &efd, &custom) == 0) { + if (!ASYNC_WAIT_CTX_get_fd(waitctx, engine_uadk_id, &efd, &custom)) { efd = eventfd(0, EFD_NONBLOCK); if (efd == -1) - return 0; + return UADK_E_FAIL;
- if (ASYNC_WAIT_CTX_set_wait_fd(waitctx, engine_uadk_id, efd, - custom, async_fd_cleanup) == 0) { + if (!ASYNC_WAIT_CTX_set_wait_fd(waitctx, engine_uadk_id, efd, + custom, async_fd_cleanup)) { async_fd_cleanup(waitctx, engine_uadk_id, efd, NULL); - return 0; + return UADK_E_FAIL; } }
- return 1; + return UADK_E_SUCCESS; }
int async_clear_async_event_notification(void) { - ASYNC_JOB *job; ASYNC_WAIT_CTX *waitctx; + void *custom = NULL; OSSL_ASYNC_FD efd; size_t num_add_fds; size_t num_del_fds; - void *custom = NULL; + ASYNC_JOB *job;
job = ASYNC_get_current_job(); - if (job == NULL) - return 0; + if (!job) + return UADK_E_FAIL;;
waitctx = ASYNC_get_wait_ctx(job); - if (waitctx == NULL) - return 0; + if (!waitctx) + return UADK_E_FAIL;
- if (ASYNC_WAIT_CTX_get_changed_fds(waitctx, NULL, &num_add_fds, - NULL, &num_del_fds) == 0) - return 0; + if (!ASYNC_WAIT_CTX_get_changed_fds(waitctx, NULL, &num_add_fds, + NULL, &num_del_fds)) + return UADK_E_FAIL;
if (num_add_fds > 0) { - if (ASYNC_WAIT_CTX_get_fd(waitctx, engine_uadk_id, - &efd, &custom) == 0) - return 0; + if (!ASYNC_WAIT_CTX_get_fd(waitctx, engine_uadk_id, + &efd, &custom)) + return UADK_E_FAIL;
async_fd_cleanup(waitctx, engine_uadk_id, efd, NULL);
- if (ASYNC_WAIT_CTX_clear_fd(waitctx, engine_uadk_id) == 0) - return 0; + if (!ASYNC_WAIT_CTX_clear_fd(waitctx, engine_uadk_id)) + return UADK_E_FAIL; }
- return 1; + return UADK_E_SUCCESS; }
void async_poll_task_free(void) @@ -121,11 +120,11 @@ void async_poll_task_free(void) uadk_e_set_async_poll_state(DISABLE_ASYNC_POLLING);
error = pthread_mutex_lock(&poll_queue.async_task_mutex); - if (error != 0) + if (error) return;
task = poll_queue.head; - if (task != NULL) + if (task) OPENSSL_free(task);
poll_queue.head = NULL; @@ -144,13 +143,13 @@ static int async_get_poll_task(int *id) while (!poll_queue.status[idx]) { idx = (idx + 1) % ASYNC_QUEUE_TASK_NUM; if (cnt++ == ASYNC_QUEUE_TASK_NUM) - return 0; + return UADK_E_FAIL; }
*id = idx; poll_queue.rid = (idx + 1) % ASYNC_QUEUE_TASK_NUM;
- return 1; + return UADK_E_SUCCESS; }
static struct async_poll_task *async_get_queue_task(void) @@ -159,11 +158,11 @@ static struct async_poll_task *async_get_queue_task(void) struct async_poll_task *task_queue; int idx, ret;
- if (pthread_mutex_lock(&poll_queue.async_task_mutex) != 0) + if (pthread_mutex_lock(&poll_queue.async_task_mutex)) return NULL;
ret = async_get_poll_task(&idx); - if (!ret) + if (ret == UADK_E_FAIL) goto err;
task_queue = poll_queue.head; @@ -171,10 +170,10 @@ static struct async_poll_task *async_get_queue_task(void) poll_queue.is_recv = 0;
err: - if (pthread_mutex_unlock(&poll_queue.async_task_mutex) != 0) + if (pthread_mutex_unlock(&poll_queue.async_task_mutex)) return NULL;
- if (cur_task && cur_task->op == NULL) + if (cur_task && !cur_task->op) return NULL;
return cur_task; @@ -182,7 +181,7 @@ err:
void async_free_poll_task(int id, bool is_cb) { - if (pthread_mutex_lock(&poll_queue.async_task_mutex) != 0) + if (pthread_mutex_lock(&poll_queue.async_task_mutex)) return;
poll_queue.status[id] = 0; @@ -190,7 +189,7 @@ void async_free_poll_task(int id, bool is_cb) if (is_cb) poll_queue.is_recv = 1;
- if (pthread_mutex_unlock(&poll_queue.async_task_mutex) != 0) + if (pthread_mutex_unlock(&poll_queue.async_task_mutex)) return;
(void)sem_post(&poll_queue.empty_sem); @@ -203,17 +202,17 @@ int async_get_free_task(int *id) int idx, ret; int cnt = 0;
- if (sem_wait(&poll_queue.empty_sem) != 0) - return 0; + if (sem_wait(&poll_queue.empty_sem)) + return UADK_E_FAIL;
- if (pthread_mutex_lock(&poll_queue.async_task_mutex) != 0) - return 0; + if (pthread_mutex_lock(&poll_queue.async_task_mutex)) + return UADK_E_FAIL;
idx = poll_queue.sid; while (poll_queue.status[idx]) { idx = (idx + 1) % ASYNC_QUEUE_TASK_NUM; if (cnt++ == ASYNC_QUEUE_TASK_NUM) { - ret = 0; + ret = UADK_E_FAIL; goto out; } } @@ -223,12 +222,16 @@ int async_get_free_task(int *id) poll_queue.status[idx] = 1; task_queue = poll_queue.head; task = &task_queue[idx]; + if (!task) { + ret = UADK_E_FAIL; + goto out; + } task->op = NULL; - ret = 1; + ret = UADK_E_SUCCESS;
out: - if (pthread_mutex_unlock(&poll_queue.async_task_mutex) != 0) - return 0; + if (pthread_mutex_unlock(&poll_queue.async_task_mutex)) + return UADK_E_FAIL;
return ret; } @@ -247,9 +250,9 @@ static int async_add_poll_task(void *ctx, struct async_op *op, enum task_type ty
ret = sem_post(&poll_queue.full_sem); if (ret) - return 0; + return UADK_E_FAIL;
- return 1; + return UADK_E_SUCCESS; }
int async_pause_job(void *ctx, struct async_op *op, enum task_type type, int id) @@ -261,19 +264,19 @@ int async_pause_job(void *ctx, struct async_op *op, enum task_type type, int id) int ret;
ret = async_add_poll_task(ctx, op, type, id); - if (ret == 0) + if (ret == UADK_E_FAIL) return ret;
waitctx = ASYNC_get_wait_ctx((ASYNC_JOB *)op->job); - if (waitctx == NULL) - return 0; + if (!waitctx) + return UADK_E_FAIL;
do { - if (ASYNC_pause_job() == 0) - return 0; + if (!ASYNC_pause_job()) + return UADK_E_FAIL;
ret = ASYNC_WAIT_CTX_get_fd(waitctx, engine_uadk_id, &efd, &custom); - if (ret <= 0) + if (ret == UADK_E_FAIL) continue;
if (read(efd, &buf, sizeof(uint64_t)) == -1) { @@ -291,13 +294,13 @@ int async_wake_job(ASYNC_JOB *job) { ASYNC_WAIT_CTX *waitctx; OSSL_ASYNC_FD efd; - void *custom; uint64_t buf = 1; + void *custom; int ret;
waitctx = ASYNC_get_wait_ctx(job); - if (waitctx == NULL) - return 0; + if (!waitctx) + return UADK_E_FAIL;
ret = ASYNC_WAIT_CTX_get_fd(waitctx, engine_uadk_id, &efd, &custom); if (ret > 0) { @@ -325,7 +328,7 @@ static void *async_poll_process_func(void *args) int ret, idx;
while (uadk_e_get_async_poll_state()) { - if (sem_wait(&poll_queue.full_sem) != 0) { + if (sem_wait(&poll_queue.full_sem)) { if (errno == EINTR) { /* sem_wait is interrupted by interrupt, continue */ continue; @@ -333,7 +336,7 @@ static void *async_poll_process_func(void *args) }
task = async_get_queue_task(); - if (task == NULL) { + if (!task) { (void)sem_post(&poll_queue.full_sem); usleep(1); continue; @@ -361,11 +364,11 @@ int async_module_init(void) memset(&poll_queue, 0, sizeof(struct async_poll_queue));
if (pthread_mutex_init(&(poll_queue.async_task_mutex), NULL) < 0) - return 0; + return UADK_E_FAIL;
poll_queue.head = calloc(ASYNC_QUEUE_TASK_NUM, sizeof(struct async_poll_task)); - if (poll_queue.head == NULL) - return 0; + if (!poll_queue.head) + return UADK_E_FAIL;
if (sem_init(&poll_queue.empty_sem, 0, ASYNC_QUEUE_TASK_NUM) != 0) goto err; @@ -381,9 +384,9 @@ int async_module_init(void) goto err;
poll_queue.thread_id = thread_id; - return 1; + return UADK_E_SUCCESS;
err: async_poll_task_free(); - return 0; + return UADK_E_FAIL; } diff --git a/src/uadk_async.h b/src/uadk_async.h index 9160c98..adb065f 100644 --- a/src/uadk_async.h +++ b/src/uadk_async.h @@ -23,6 +23,9 @@ #include <openssl/async.h>
#define ASYNC_QUEUE_TASK_NUM 1024 +#define UADK_E_SUCCESS 1 +#define UADK_E_FAIL 0 +#define DO_SYNC 1
struct async_op { ASYNC_JOB *job;
When do sm2 async task with the digest method, there is a probability of segment fault occurring: | SM2_sign_loop | EVP_DigestSign | [...] | sm2_sign | sm2_sign_init_iot | wd_sm2_new_sign_in | [...] | uadk_ecc_get_rand | [...] | RAND_bytes | [...] | rand_bytes | EVP_DigestFinal_ex | uadk_e_digest_cleanup | wd_digest_free_sess | wd_memset_zero The wd_memset_zero() will release the sess and the addr of async job may get changed and will cause segment fault.
The solution is to make async callback param use the memory on heap rather than stack, or other cleanup related functions may release the memory on stack and modify the address of cb param in unknown scense.
Signed-off-by: Zhiqi Song songzhiqi1@huawei.com --- src/uadk_digest.c | 35 ++++++++++++++++++++++------------- 1 file changed, 22 insertions(+), 13 deletions(-)
diff --git a/src/uadk_digest.c b/src/uadk_digest.c index b75c408..d9b36e1 100644 --- a/src/uadk_digest.c +++ b/src/uadk_digest.c @@ -715,22 +715,29 @@ static int do_digest_sync(struct digest_priv_ctx *priv)
static int do_digest_async(struct digest_priv_ctx *priv, struct async_op *op) { - struct uadk_e_cb_info cb_param; - int idx, ret; + struct uadk_e_cb_info *cb_param; + int ret = 0; + int idx;
if (unlikely(priv->switch_flag == UADK_DO_SOFT)) { fprintf(stderr, "async cipher init failed.\n"); - return 0; + return ret; + } + + cb_param = malloc(sizeof(struct uadk_e_cb_info)); + if (!cb_param) { + fprintf(stderr, "failed to alloc cb_param.\n"); + return ret; }
- cb_param.op = op; - cb_param.priv = priv; + cb_param->op = op; + cb_param->priv = priv; priv->req.cb = uadk_e_digest_cb; - priv->req.cb_param = &cb_param; + priv->req.cb_param = cb_param;
ret = async_get_free_task(&idx); if (!ret) - return 0; + goto free_cb_param;
op->idx = idx;
@@ -739,14 +746,16 @@ static int do_digest_async(struct digest_priv_ctx *priv, struct async_op *op) if (ret < 0 && ret != -EBUSY) { fprintf(stderr, "do sec digest async failed.\n"); async_free_poll_task(op->idx, 0); - return 0; + ret = 0; + goto free_cb_param; } } while (ret == -EBUSY);
ret = async_pause_job(priv, op, ASYNC_TASK_DIGEST, idx); - if (!ret) - return 0; - return 1; + +free_cb_param: + free(cb_param); + return ret; }
static int uadk_e_digest_final(EVP_MD_CTX *ctx, unsigned char *digest) @@ -773,7 +782,7 @@ static int uadk_e_digest_final(EVP_MD_CTX *ctx, unsigned char *digest) return 0; }
- if (op.job == NULL) { + if (!op.job) { /* Synchronous, only the synchronous mode supports soft computing */ if (unlikely(priv->switch_flag == UADK_DO_SOFT)) { ret = digest_soft_final(priv, digest); @@ -801,7 +810,7 @@ sync_err: fprintf(stderr, "do sec digest stream mode failed.\n"); } clear: - async_clear_async_event_notification(); + (void)async_clear_async_event_notification(); return ret; }
Make async callback param use the memory on heap rather than stack, or other async related functions may modify the address of cb param and cause unknown error.
Signed-off-by: Zhiqi Song songzhiqi1@huawei.com --- src/uadk_cipher.c | 37 ++++++++++++++++++++++++------------- 1 file changed, 24 insertions(+), 13 deletions(-)
diff --git a/src/uadk_cipher.c b/src/uadk_cipher.c index 901c29e..26a345b 100644 --- a/src/uadk_cipher.c +++ b/src/uadk_cipher.c @@ -814,22 +814,29 @@ static int do_cipher_sync(struct cipher_priv_ctx *priv)
static int do_cipher_async(struct cipher_priv_ctx *priv, struct async_op *op) { - struct uadk_e_cb_info cb_param; - int idx, ret; + struct uadk_e_cb_info *cb_param; + int ret = 0; + int idx;
if (unlikely(priv->switch_flag == UADK_DO_SOFT)) { fprintf(stderr, "switch to soft cipher.\n"); - return 0; + return ret; + } + + cb_param = malloc(sizeof(struct uadk_e_cb_info)); + if (!cb_param) { + fprintf(stderr, "failed to alloc cb_param.\n"); + return ret; }
- cb_param.op = op; - cb_param.priv = priv; + cb_param->op = op; + cb_param->priv = priv; priv->req.cb = uadk_e_cipher_cb; - priv->req.cb_param = &cb_param; + priv->req.cb_param = cb_param;
ret = async_get_free_task(&idx); if (!ret) - return 0; + goto free_cb_param;
op->idx = idx; do { @@ -837,14 +844,16 @@ static int do_cipher_async(struct cipher_priv_ctx *priv, struct async_op *op) if (ret < 0 && ret != -EBUSY) { fprintf(stderr, "do sec cipher failed, switch to soft cipher.\n"); async_free_poll_task(op->idx, 0); - return 0; + ret = 0; + goto free_cb_param; } } while (ret == -EBUSY);
ret = async_pause_job(priv, op, ASYNC_TASK_CIPHER, idx); - if (!ret) - return 0; - return 1; + +free_cb_param: + free(cb_param); + return ret; }
static void uadk_e_ctx_init(EVP_CIPHER_CTX *ctx, struct cipher_priv_ctx *priv) @@ -926,13 +935,14 @@ static int uadk_e_do_cipher(EVP_CIPHER_CTX *ctx, unsigned char *out, priv->req.out_buf_bytes = inlen;
uadk_e_ctx_init(ctx, priv); + ret = async_setup_async_event_notification(&op); if (!ret) { fprintf(stderr, "failed to setup async event notification.\n"); return 0; }
- if (op.job == NULL) { + if (!op.job) { /* Synchronous, only the synchronous mode supports soft computing */ ret = do_cipher_sync(priv); if (!ret) @@ -953,12 +963,13 @@ static int uadk_e_do_cipher(EVP_CIPHER_CTX *ctx, unsigned char *out, uadk_cipher_update_priv_ctx(priv);
return 1; + sync_err: ret = uadk_e_cipher_soft_work(ctx, out, in, inlen); if (ret != 1) fprintf(stderr, "do soft ciphers failed.\n"); out_notify: - async_clear_async_event_notification(); + (void)async_clear_async_event_notification(); return ret; }
Make async callback param use the memory on heap rather than stack, or other async related functions may modify the address of cb param and cause unknown error.
Signed-off-by: Zhiqi Song songzhiqi1@huawei.com --- src/uadk_dh.c | 100 +++++++++++++++++++++++++++++++------------------- 1 file changed, 63 insertions(+), 37 deletions(-)
diff --git a/src/uadk_dh.c b/src/uadk_dh.c index acb5b8a..c0b58ef 100644 --- a/src/uadk_dh.c +++ b/src/uadk_dh.c @@ -692,56 +692,82 @@ free_ag: return UADK_E_FAIL; }
+static int dh_do_sync(struct uadk_dh_sess *dh_sess) +{ + int ret; + + ret = wd_do_dh_sync(dh_sess->sess, &dh_sess->req); + if (ret) + return UADK_E_FAIL; + + return UADK_E_SUCCESS; +} + +static int dh_do_async(struct uadk_dh_sess *dh_sess, struct async_op *op) +{ + struct uadk_e_cb_info *cb_param; + int ret = 0; + int idx; + + cb_param = malloc(sizeof(struct uadk_e_cb_info)); + if (!cb_param) { + fprintf(stderr, "failed to alloc cb_param.\n"); + return ret; + } + + cb_param->op = op; + cb_param->priv = &dh_sess->req; + dh_sess->req.cb = uadk_e_dh_cb; + dh_sess->req.cb_param = cb_param; + dh_sess->req.status = -1; + ret = async_get_free_task(&idx); + if (!ret) + goto free_cb_param; + + op->idx = idx; + do { + ret = wd_do_dh_async(dh_sess->sess, &dh_sess->req); + if (ret < 0 && ret != -EBUSY) { + async_free_poll_task(idx, 0); + ret = UADK_E_FAIL; + goto free_cb_param; + } + } while (ret == -EBUSY); + + ret = async_pause_job(dh_sess, op, ASYNC_TASK_DH, idx); + if (!ret) + goto free_cb_param; + + if (dh_sess->req.status) { + ret = UADK_E_FAIL; + goto free_cb_param; + } + +free_cb_param: + free(cb_param); + return ret; +} + static int dh_do_crypto(struct uadk_dh_sess *dh_sess) { - struct uadk_e_cb_info cb_param; struct async_op op; - int idx, ret; + int ret = 0;
ret = async_setup_async_event_notification(&op); if (!ret) { printf("failed to setup async event notification.\n"); - return UADK_E_FAIL; + return ret; }
if (!op.job) { - ret = wd_do_dh_sync(dh_sess->sess, &dh_sess->req); - if (ret) - return UADK_E_FAIL; - } else { - cb_param.op = &op; - cb_param.priv = &dh_sess->req; - dh_sess->req.cb = uadk_e_dh_cb; - dh_sess->req.cb_param = &cb_param; - dh_sess->req.status = -1; - ret = async_get_free_task(&idx); - if (!ret) - goto err; - - op.idx = idx; - - do { - ret = wd_do_dh_async(dh_sess->sess, &dh_sess->req); - if (ret < 0 && ret != -EBUSY) { - async_free_poll_task(idx, 0); - goto err; - } - } while (ret == -EBUSY); - - ret = async_pause_job(dh_sess, &op, ASYNC_TASK_DH, idx); - if (!ret) - goto err; - - ret = dh_sess->req.status; - if (ret) - goto err; + ret = dh_do_sync(dh_sess); + return ret; }
- return UADK_E_SUCCESS; - -err: + ret = dh_do_async(dh_sess, &op); (void)async_clear_async_event_notification(); - return UADK_E_FAIL; + + return ret; }
static int dh_soft_set_pkey(DH *dh, BIGNUM *pub_key, BIGNUM *priv_key)
Make async callback param use the memory on heap rather than stack, or other async related functions may modify the address of cb param and cause unknown error.
Signed-off-by: Zhiqi Song songzhiqi1@huawei.com --- src/uadk_pkey.c | 99 ++++++++++++++++++++++++++++++++----------------- 1 file changed, 64 insertions(+), 35 deletions(-)
diff --git a/src/uadk_pkey.c b/src/uadk_pkey.c index 60e3238..6319d61 100644 --- a/src/uadk_pkey.c +++ b/src/uadk_pkey.c @@ -280,52 +280,81 @@ static void uadk_wd_ecc_uninit(void) ecc_res.numa_id = 0; }
+static int uadk_ecc_do_sync(handle_t sess, struct wd_ecc_req *req) +{ + int ret; + + ret = wd_do_ecc_sync(sess, req); + if (ret < 0) + return 0; + + return 1; +} + +static int uadk_ecc_do_async(handle_t sess, struct wd_ecc_req *req, + struct async_op *op, void *usr) +{ + struct uadk_e_cb_info *cb_param; + int ret = 0; + int idx; + + cb_param = malloc(sizeof(struct uadk_e_cb_info)); + if (!cb_param) { + fprintf(stderr, "failed to alloc cb_param.\n"); + return ret; + } + + cb_param->op = op; + cb_param->priv = req; + req->cb_param = cb_param; + req->cb = uadk_e_ecc_cb; + req->status = -1; + ret = async_get_free_task(&idx); + if (!ret) + goto free_cb_param; + + op->idx = idx; + do { + ret = wd_do_ecc_async(sess, req); + if (ret < 0 && ret != -EBUSY) { + async_free_poll_task(op->idx, 0); + ret = 0; + goto free_cb_param; + } + } while (ret == -EBUSY); + + ret = async_pause_job((void *)usr, op, ASYNC_TASK_ECC, idx); + if (!ret) + goto free_cb_param; + + if (req->status) { + ret = 0; + goto free_cb_param; + } + +free_cb_param: + free(cb_param); + return ret; +} + int uadk_ecc_crypto(handle_t sess, struct wd_ecc_req *req, void *usr) { - struct uadk_e_cb_info cb_param; struct async_op op; - int idx, ret; + int ret;
ret = async_setup_async_event_notification(&op); if (!ret) { fprintf(stderr, "failed to setup async event notification.\n"); - return 0; + return ret; }
- if (op.job != NULL) { - cb_param.op = &op; - cb_param.priv = req; - req->cb_param = &cb_param; - req->cb = uadk_e_ecc_cb; - req->status = -1; - ret = async_get_free_task(&idx); - if (!ret) - goto err; - - op.idx = idx; - - do { - ret = wd_do_ecc_async(sess, req); - if (ret < 0 && ret != -EBUSY) { - async_free_poll_task(op.idx, 0); - goto err; - } - } while (ret == -EBUSY); + if (!op.job) + return uadk_ecc_do_sync(sess, req);
- ret = async_pause_job((void *)usr, &op, ASYNC_TASK_ECC, idx); - if (!ret) - goto err; - if (req->status) - return 0; - } else { - ret = wd_do_ecc_sync(sess, req); - if (ret < 0) - return 0; - } - return 1; -err: + ret = uadk_ecc_do_async(sess, req, &op, usr); (void)async_clear_async_event_notification(); - return 0; + + return ret; }
bool uadk_is_all_zero(const unsigned char *data, size_t dlen)
Make async callback param use the memory on heap rather than stack, or other async related functions may modify the address of cb param and cause unknown error.
Signed-off-by: Zhiqi Song songzhiqi1@huawei.com --- src/uadk_rsa.c | 82 ++++++++++++++++++++++++++++++++------------------ 1 file changed, 53 insertions(+), 29 deletions(-)
diff --git a/src/uadk_rsa.c b/src/uadk_rsa.c index d0780a7..6424d9e 100644 --- a/src/uadk_rsa.c +++ b/src/uadk_rsa.c @@ -1080,56 +1080,80 @@ static void uadk_e_rsa_cb(void *req_t) } }
-static int rsa_do_crypto(struct uadk_rsa_sess *rsa_sess) +static int rsa_do_sync(struct uadk_rsa_sess *rsa_sess) { - struct uadk_e_cb_info cb_param; - struct async_op op; - int idx, ret; + int ret;
- ret = async_setup_async_event_notification(&op); - if (!ret) { - fprintf(stderr, "failed to setup async event notification.\n"); + ret = wd_do_rsa_sync(rsa_sess->sess, &rsa_sess->req); + if (ret) return UADK_E_FAIL; - }
- if (!op.job) { - ret = wd_do_rsa_sync(rsa_sess->sess, &(rsa_sess->req)); - if (!ret) - return UADK_E_SUCCESS; - else - goto err; + return UADK_E_SUCCESS; +} + +static int rsa_do_async(struct uadk_rsa_sess *rsa_sess, struct async_op *op) +{ + struct uadk_e_cb_info *cb_param; + int ret = 0; + int idx; + + cb_param = malloc(sizeof(struct uadk_e_cb_info)); + if (!cb_param) { + fprintf(stderr, "failed to alloc cb_param.\n"); + return ret; } - cb_param.op = &op; - cb_param.priv = &(rsa_sess->req); + + cb_param->op = op; + cb_param->priv = &rsa_sess->req; rsa_sess->req.cb = uadk_e_rsa_cb; - rsa_sess->req.cb_param = &cb_param; + rsa_sess->req.cb_param = cb_param; rsa_sess->req.status = -1; - ret = async_get_free_task(&idx); - if (ret == 0) - goto err; + if (!ret) + goto free_cb_param;
- op.idx = idx; + op->idx = idx; do { - ret = wd_do_rsa_async(rsa_sess->sess, &(rsa_sess->req)); + ret = wd_do_rsa_async(rsa_sess->sess, &rsa_sess->req); if (ret < 0 && ret != -EBUSY) { - async_free_poll_task(op.idx, 0); - goto err; + async_free_poll_task(op->idx, 0); + ret = UADK_E_FAIL; + goto free_cb_param; } } while (ret == -EBUSY);
- ret = async_pause_job(rsa_sess, &op, ASYNC_TASK_RSA, idx); + ret = async_pause_job(rsa_sess, op, ASYNC_TASK_RSA, idx); if (!ret) - goto err; + goto free_cb_param;
if (rsa_sess->req.status) + ret = UADK_E_FAIL; + +free_cb_param: + free(cb_param); + return ret; +} + +static int rsa_do_crypto(struct uadk_rsa_sess *rsa_sess) +{ + struct async_op op; + int ret; + + ret = async_setup_async_event_notification(&op); + if (!ret) { + fprintf(stderr, "failed to setup async event notification.\n"); return UADK_E_FAIL; + }
- return UADK_E_SUCCESS; + if (!op.job) { + ret = rsa_do_sync(rsa_sess); + return ret; + }
-err: + ret = rsa_do_async(rsa_sess, &op); (void)async_clear_async_event_notification(); - return UADK_E_FAIL; + + return ret; }
static int uadk_e_soft_rsa_keygen(RSA *rsa, int bits, BIGNUM *e, BN_GENCB *cb)
Remove redundant index parameter of async_pause_job(), as the value of the index has been saved in async_op type variable.
Signed-off-by: Zhiqi Song songzhiqi1@huawei.com --- src/uadk_async.c | 11 +++++++---- src/uadk_async.h | 2 +- src/uadk_cipher.c | 2 +- src/uadk_dh.c | 4 ++-- src/uadk_digest.c | 2 +- src/uadk_pkey.c | 2 +- src/uadk_rsa.c | 2 +- 7 files changed, 14 insertions(+), 11 deletions(-)
diff --git a/src/uadk_async.c b/src/uadk_async.c index ae53361..be5b314 100644 --- a/src/uadk_async.c +++ b/src/uadk_async.c @@ -236,14 +236,17 @@ out: return ret; }
-static int async_add_poll_task(void *ctx, struct async_op *op, enum task_type type, int id) +static int async_add_poll_task(void *ctx, struct async_op *op, enum task_type type) { struct async_poll_task *task_queue; struct async_poll_task *task; int ret;
task_queue = poll_queue.head; - task = &task_queue[id]; + task = &task_queue[op->idx]; + if (!task) + return UADK_E_FAIL; + task->ctx = ctx; task->type = type; task->op = op; @@ -255,7 +258,7 @@ static int async_add_poll_task(void *ctx, struct async_op *op, enum task_type ty return UADK_E_SUCCESS; }
-int async_pause_job(void *ctx, struct async_op *op, enum task_type type, int id) +int async_pause_job(void *ctx, struct async_op *op, enum task_type type) { ASYNC_WAIT_CTX *waitctx; OSSL_ASYNC_FD efd; @@ -263,7 +266,7 @@ int async_pause_job(void *ctx, struct async_op *op, enum task_type type, int id) uint64_t buf; int ret;
- ret = async_add_poll_task(ctx, op, type, id); + ret = async_add_poll_task(ctx, op, type); if (ret == UADK_E_FAIL) return ret;
diff --git a/src/uadk_async.h b/src/uadk_async.h index adb065f..ec41966 100644 --- a/src/uadk_async.h +++ b/src/uadk_async.h @@ -75,7 +75,7 @@ struct async_poll_queue {
int async_setup_async_event_notification(struct async_op *op); int async_clear_async_event_notification(void); -int async_pause_job(void *ctx, struct async_op *op, enum task_type type, int id); +int async_pause_job(void *ctx, struct async_op *op, enum task_type type); void async_register_poll_fn(int type, async_recv_t func); int async_module_init(void); int async_wake_job(ASYNC_JOB *job); diff --git a/src/uadk_cipher.c b/src/uadk_cipher.c index 26a345b..4febe9a 100644 --- a/src/uadk_cipher.c +++ b/src/uadk_cipher.c @@ -849,7 +849,7 @@ static int do_cipher_async(struct cipher_priv_ctx *priv, struct async_op *op) } } while (ret == -EBUSY);
- ret = async_pause_job(priv, op, ASYNC_TASK_CIPHER, idx); + ret = async_pause_job(priv, op, ASYNC_TASK_CIPHER);
free_cb_param: free(cb_param); diff --git a/src/uadk_dh.c b/src/uadk_dh.c index c0b58ef..e80ee58 100644 --- a/src/uadk_dh.c +++ b/src/uadk_dh.c @@ -728,13 +728,13 @@ static int dh_do_async(struct uadk_dh_sess *dh_sess, struct async_op *op) do { ret = wd_do_dh_async(dh_sess->sess, &dh_sess->req); if (ret < 0 && ret != -EBUSY) { - async_free_poll_task(idx, 0); + async_free_poll_task(op->idx, 0); ret = UADK_E_FAIL; goto free_cb_param; } } while (ret == -EBUSY);
- ret = async_pause_job(dh_sess, op, ASYNC_TASK_DH, idx); + ret = async_pause_job(dh_sess, op, ASYNC_TASK_DH); if (!ret) goto free_cb_param;
diff --git a/src/uadk_digest.c b/src/uadk_digest.c index d9b36e1..93da3de 100644 --- a/src/uadk_digest.c +++ b/src/uadk_digest.c @@ -751,7 +751,7 @@ static int do_digest_async(struct digest_priv_ctx *priv, struct async_op *op) } } while (ret == -EBUSY);
- ret = async_pause_job(priv, op, ASYNC_TASK_DIGEST, idx); + ret = async_pause_job(priv, op, ASYNC_TASK_DIGEST);
free_cb_param: free(cb_param); diff --git a/src/uadk_pkey.c b/src/uadk_pkey.c index 6319d61..dfa81d5 100644 --- a/src/uadk_pkey.c +++ b/src/uadk_pkey.c @@ -323,7 +323,7 @@ static int uadk_ecc_do_async(handle_t sess, struct wd_ecc_req *req, } } while (ret == -EBUSY);
- ret = async_pause_job((void *)usr, op, ASYNC_TASK_ECC, idx); + ret = async_pause_job((void *)usr, op, ASYNC_TASK_ECC); if (!ret) goto free_cb_param;
diff --git a/src/uadk_rsa.c b/src/uadk_rsa.c index 6424d9e..e05a82e 100644 --- a/src/uadk_rsa.c +++ b/src/uadk_rsa.c @@ -1122,7 +1122,7 @@ static int rsa_do_async(struct uadk_rsa_sess *rsa_sess, struct async_op *op) } } while (ret == -EBUSY);
- ret = async_pause_job(rsa_sess, op, ASYNC_TASK_RSA, idx); + ret = async_pause_job(rsa_sess, op, ASYNC_TASK_RSA); if (!ret) goto free_cb_param;
The openssl API d2i_SM2_Ciphertext() will clean the input param. When switching to soft computing, input data errors may occur. So pre-store the input data and free it after the hardware or software computing finished.
Signed-off-by: Zhiqi Song songzhiqi1@huawei.com --- src/uadk_sm2.c | 23 +++++++++++++++++++++-- 1 file changed, 21 insertions(+), 2 deletions(-)
diff --git a/src/uadk_sm2.c b/src/uadk_sm2.c index f393641..aa56b5b 100644 --- a/src/uadk_sm2.c +++ b/src/uadk_sm2.c @@ -1026,7 +1026,7 @@ static int sm2_decrypt_check(EVP_PKEY_CTX *ctx, hash_size = EVP_MD_size(md); if (hash_size <= 0) { fprintf(stderr, "hash size = %d error\n", hash_size); - return 0; + return UADK_DO_SOFT; }
if (!out) { @@ -1102,6 +1102,7 @@ static int sm2_decrypt(EVP_PKEY_CTX *ctx, struct sm2_ciphertext *ctext_struct; struct wd_ecc_req req = {0}; struct wd_ecc_point c1; + unsigned char *in_soft; struct wd_dtb c2, c3; const EVP_MD *md; int ret; @@ -1112,10 +1113,18 @@ static int sm2_decrypt(EVP_PKEY_CTX *ctx,
md = (smctx->ctx.md == NULL) ? EVP_sm3() : smctx->ctx.md;
+ /* d2i_SM2_Ciphertext() will clean the in data, pre-save here */ + if (!inlen) + goto do_soft; + in_soft = malloc(inlen); + if (!in_soft) + goto do_soft; + memcpy(in_soft, in, inlen); + ctext_struct = d2i_SM2_Ciphertext(NULL, &in, inlen); if (!ctext_struct) { ret = UADK_DO_SOFT; - goto do_soft; + goto free_in_soft; }
ret = cipher_ber_to_bin(md, ctext_struct, &c1, &c2, &c3); @@ -1153,6 +1162,16 @@ free_c1: free(c1.x.data); free_ctext: SM2_Ciphertext_free(ctext_struct); +free_in_soft: + if (ret != UADK_DO_SOFT) { + free(in_soft); + return ret; + } + + fprintf(stderr, "switch to execute openssl software calculation.\n"); + ret = openssl_decrypt(ctx, out, outlen, in_soft, inlen); + free(in_soft); + return ret; do_soft: if (ret != UADK_DO_SOFT) return ret;
Enable users to pass NULL sign parameter to obtain the length of the signature result. If users want to do actual signature task, they need to call the signature function a second time.
Signed-off-by: Zhiqi Song songzhiqi1@huawei.com --- src/uadk_sm2.c | 19 +++++++++++++------ 1 file changed, 13 insertions(+), 6 deletions(-)
diff --git a/src/uadk_sm2.c b/src/uadk_sm2.c index aa56b5b..b03f7bc 100644 --- a/src/uadk_sm2.c +++ b/src/uadk_sm2.c @@ -26,6 +26,8 @@ #include "uadk.h" #include "uadk_pkey.h"
+#define GET_SIGNLEN 1 + enum { CTX_INIT_FAIL = -1, CTX_UNINIT, @@ -673,6 +675,17 @@ static int sm2_sign_check(EVP_PKEY_CTX *ctx, unsigned char *sig, size_t *siglen, EC_KEY *ec = EVP_PKEY_get0(p_key); const int sig_sz = ECDSA_size(ec);
+ /* + * If 'sig' is NULL, users can use sm2_decrypt API to obtain the valid 'siglen' first, + * then users use the value of 'signlen' to alloc the memory of 'sig' and call the + * sm2_decrypt API a second time to do the decryption task. + */ + if (!sig) { + fprintf(stderr, "sig is NULL, get valid siglen\n"); + *siglen = (size_t)sig_sz; + return GET_SIGNLEN; + } + if (!smctx || !smctx->sess) { fprintf(stderr, "smctx or sess NULL\n"); return UADK_DO_SOFT; @@ -693,12 +706,6 @@ static int sm2_sign_check(EVP_PKEY_CTX *ctx, unsigned char *sig, size_t *siglen, return -EINVAL; }
- if (!sig) { - fprintf(stderr, "invalid: sig is NULL\n"); - *siglen = (size_t)sig_sz; - return -EINVAL; - } - if (tbslen > SM2_KEY_BYTES) return UADK_DO_SOFT;
The address of op should be on heap, or it will be released by digest cleaup process, and affects the following async task.
Signed-off-by: Zhiqi Song songzhiqi1@huawei.com --- src/uadk_digest.c | 15 +++++++++++---- 1 file changed, 11 insertions(+), 4 deletions(-)
diff --git a/src/uadk_digest.c b/src/uadk_digest.c index 93da3de..e14ec74 100644 --- a/src/uadk_digest.c +++ b/src/uadk_digest.c @@ -763,7 +763,7 @@ static int uadk_e_digest_final(EVP_MD_CTX *ctx, unsigned char *digest) struct digest_priv_ctx *priv = (struct digest_priv_ctx *)EVP_MD_CTX_md_data(ctx); int ret = 1; - struct async_op op; + struct async_op *op; priv->req.has_next = DIGEST_END; priv->req.in = priv->data; priv->req.out = priv->out; @@ -776,13 +776,18 @@ static int uadk_e_digest_final(EVP_MD_CTX *ctx, unsigned char *digest) if (priv->e_nid == NID_sha384) priv->req.out_bytes = WD_DIGEST_SHA384_LEN;
- ret = async_setup_async_event_notification(&op); + op = malloc(sizeof(struct async_op)); + if (!op) + return 0; + + ret = async_setup_async_event_notification(op); if (unlikely(!ret)) { fprintf(stderr, "failed to setup async event notification.\n"); + free(op); return 0; }
- if (!op.job) { + if (!op->job) { /* Synchronous, only the synchronous mode supports soft computing */ if (unlikely(priv->switch_flag == UADK_DO_SOFT)) { ret = digest_soft_final(priv, digest); @@ -794,12 +799,13 @@ static int uadk_e_digest_final(EVP_MD_CTX *ctx, unsigned char *digest) if (!ret) goto sync_err; } else { - ret = do_digest_async(priv, &op); + ret = do_digest_async(priv, op); if (!ret) goto clear; } memcpy(digest, priv->req.out, priv->req.out_bytes);
+ free(op); return 1;
sync_err: @@ -811,6 +817,7 @@ sync_err: } clear: (void)async_clear_async_event_notification(); + free(op); return ret; }