From: JiangShui Yang yangjiangshui@h-partners.com
Wenkai Lin (7): uadk_engine: support aead aes-gcm algorithm cipher: fix uadk_e_destroy_cipher memory leak aead: fix for engine lock is not initialized uadk: fix cipher switchover to software calculation fails aead: fix tag length check aead: fix for aes gcm update process cipher: add sm4 ecb mode
Zhiqi Song (1): uadk_engine: remove redundant param of async
src/Makefile.am | 3 +- src/uadk.h | 3 +- src/uadk_aead.c | 778 ++++++++++++++++++++++++++++++++++++++ src/uadk_async.c | 8 +- src/uadk_async.h | 3 +- src/uadk_cipher.c | 536 +++++++++----------------- src/uadk_cipher_adapter.c | 209 ++++++++++ src/uadk_cipher_adapter.h | 36 ++ src/uadk_dh.c | 4 +- src/uadk_digest.c | 2 +- src/uadk_engine_init.c | 28 +- src/uadk_pkey.c | 2 +- src/uadk_rsa.c | 2 +- 13 files changed, 1246 insertions(+), 368 deletions(-) create mode 100644 src/uadk_aead.c create mode 100644 src/uadk_cipher_adapter.c create mode 100644 src/uadk_cipher_adapter.h
From: Zhiqi Song songzhiqi1@huawei.com
Remove redundant index parameter of async_pause_job(), as the value of the index has been saved in async_op type variable.
Signed-off-by: Zhiqi Song songzhiqi1@huawei.com --- src/uadk_async.c | 8 ++++---- src/uadk_async.h | 2 +- src/uadk_cipher.c | 2 +- src/uadk_dh.c | 4 ++-- src/uadk_digest.c | 2 +- src/uadk_pkey.c | 2 +- src/uadk_rsa.c | 2 +- 7 files changed, 11 insertions(+), 11 deletions(-)
diff --git a/src/uadk_async.c b/src/uadk_async.c index 45f3918..870065d 100644 --- a/src/uadk_async.c +++ b/src/uadk_async.c @@ -234,14 +234,14 @@ out: return ret; }
-static int async_add_poll_task(void *ctx, struct async_op *op, enum task_type type, int id) +static int async_add_poll_task(void *ctx, struct async_op *op, enum task_type type) { struct async_poll_task *task_queue; struct async_poll_task *task; int ret;
task_queue = poll_queue.head; - task = &task_queue[id]; + task = &task_queue[op->idx]; task->ctx = ctx; task->type = type; task->op = op; @@ -253,7 +253,7 @@ static int async_add_poll_task(void *ctx, struct async_op *op, enum task_type ty return 1; }
-int async_pause_job(void *ctx, struct async_op *op, enum task_type type, int id) +int async_pause_job(void *ctx, struct async_op *op, enum task_type type) { ASYNC_WAIT_CTX *waitctx; OSSL_ASYNC_FD efd; @@ -261,7 +261,7 @@ int async_pause_job(void *ctx, struct async_op *op, enum task_type type, int id) uint64_t buf; int ret;
- ret = async_add_poll_task(ctx, op, type, id); + ret = async_add_poll_task(ctx, op, type); if (ret == 0) return ret;
diff --git a/src/uadk_async.h b/src/uadk_async.h index 9160c98..1208c30 100644 --- a/src/uadk_async.h +++ b/src/uadk_async.h @@ -72,7 +72,7 @@ struct async_poll_queue {
int async_setup_async_event_notification(struct async_op *op); int async_clear_async_event_notification(void); -int async_pause_job(void *ctx, struct async_op *op, enum task_type type, int id); +int async_pause_job(void *ctx, struct async_op *op, enum task_type type); void async_register_poll_fn(int type, async_recv_t func); int async_module_init(void); int async_wake_job(ASYNC_JOB *job); diff --git a/src/uadk_cipher.c b/src/uadk_cipher.c index 901c29e..ed25787 100644 --- a/src/uadk_cipher.c +++ b/src/uadk_cipher.c @@ -841,7 +841,7 @@ static int do_cipher_async(struct cipher_priv_ctx *priv, struct async_op *op) } } while (ret == -EBUSY);
- ret = async_pause_job(priv, op, ASYNC_TASK_CIPHER, idx); + ret = async_pause_job(priv, op, ASYNC_TASK_CIPHER); if (!ret) return 0; return 1; diff --git a/src/uadk_dh.c b/src/uadk_dh.c index acb5b8a..418747e 100644 --- a/src/uadk_dh.c +++ b/src/uadk_dh.c @@ -723,12 +723,12 @@ static int dh_do_crypto(struct uadk_dh_sess *dh_sess) do { ret = wd_do_dh_async(dh_sess->sess, &dh_sess->req); if (ret < 0 && ret != -EBUSY) { - async_free_poll_task(idx, 0); + async_free_poll_task(op.idx, 0); goto err; } } while (ret == -EBUSY);
- ret = async_pause_job(dh_sess, &op, ASYNC_TASK_DH, idx); + ret = async_pause_job(dh_sess, &op, ASYNC_TASK_DH); if (!ret) goto err;
diff --git a/src/uadk_digest.c b/src/uadk_digest.c index beb9f51..fa96e57 100644 --- a/src/uadk_digest.c +++ b/src/uadk_digest.c @@ -757,7 +757,7 @@ static int do_digest_async(struct digest_priv_ctx *priv, struct async_op *op) } } while (ret == -EBUSY);
- ret = async_pause_job(priv, op, ASYNC_TASK_DIGEST, idx); + ret = async_pause_job(priv, op, ASYNC_TASK_DIGEST); if (!ret) return 0; return 1; diff --git a/src/uadk_pkey.c b/src/uadk_pkey.c index 60e3238..b071d8b 100644 --- a/src/uadk_pkey.c +++ b/src/uadk_pkey.c @@ -312,7 +312,7 @@ int uadk_ecc_crypto(handle_t sess, struct wd_ecc_req *req, void *usr) } } while (ret == -EBUSY);
- ret = async_pause_job((void *)usr, &op, ASYNC_TASK_ECC, idx); + ret = async_pause_job((void *)usr, &op, ASYNC_TASK_ECC); if (!ret) goto err; if (req->status) diff --git a/src/uadk_rsa.c b/src/uadk_rsa.c index d0780a7..ca05ef7 100644 --- a/src/uadk_rsa.c +++ b/src/uadk_rsa.c @@ -1118,7 +1118,7 @@ static int rsa_do_crypto(struct uadk_rsa_sess *rsa_sess) } } while (ret == -EBUSY);
- ret = async_pause_job(rsa_sess, &op, ASYNC_TASK_RSA, idx); + ret = async_pause_job(rsa_sess, &op, ASYNC_TASK_RSA); if (!ret) goto err;
OpenSSL EVP interface supports the AEAD mode in which authentication and encryption and decryption are performed at the same time, unload software computing to hardware on the UADK engine. Currently, aes-128-gcm, aes-192-gcm, and aes-256-gcm are supported.
Signed-off-by: Wenkai Lin linwenkai6@hisilicon.com --- src/Makefile.am | 3 +- src/uadk.h | 2 - src/uadk_aead.c | 732 ++++++++++++++++++++++++++++++++++++++ src/uadk_async.h | 1 + src/uadk_cipher.c | 545 ++++++++++------------------ src/uadk_cipher_adapter.c | 204 +++++++++++ src/uadk_cipher_adapter.h | 36 ++ src/uadk_engine_init.c | 24 +- 8 files changed, 1173 insertions(+), 374 deletions(-) create mode 100644 src/uadk_aead.c create mode 100644 src/uadk_cipher_adapter.c create mode 100644 src/uadk_cipher_adapter.h
diff --git a/src/Makefile.am b/src/Makefile.am index 668ba13..3806de3 100644 --- a/src/Makefile.am +++ b/src/Makefile.am @@ -11,7 +11,8 @@ endif #HAVE_CRYPTO3
uadk_engine_la_SOURCES=uadk_utils.c uadk_engine_init.c uadk_cipher.c \ uadk_digest.c uadk_async.c uadk_rsa.c uadk_sm2.c \ - uadk_pkey.c uadk_dh.c uadk_ec.c uadk_ecx.c + uadk_pkey.c uadk_dh.c uadk_ec.c uadk_ecx.c \ + uadk_aead.c uadk_cipher_adapter.c
uadk_engine_la_LIBADD=-ldl $(WD_LIBS) -lpthread uadk_engine_la_LDFLAGS=-module -version-number $(VERSION) diff --git a/src/uadk.h b/src/uadk.h index 5a98feb..4cf2c13 100644 --- a/src/uadk.h +++ b/src/uadk.h @@ -28,8 +28,6 @@ enum { HW_V3, };
-int uadk_e_bind_cipher(ENGINE *e); -void uadk_e_destroy_cipher(void); int uadk_e_bind_digest(ENGINE *e); void uadk_e_destroy_digest(void); int uadk_e_bind_rsa(ENGINE *e); diff --git a/src/uadk_aead.c b/src/uadk_aead.c new file mode 100644 index 0000000..64a5f5c --- /dev/null +++ b/src/uadk_aead.c @@ -0,0 +1,732 @@ +/* + * Copyright 2023 Huawei Technologies Co.,Ltd. All rights reserved. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ +#include <stdio.h> +#include <stdbool.h> +#include <string.h> +#include <dlfcn.h> +#include <openssl/aes.h> +#include <openssl/engine.h> +#include <uadk/wd_aead.h> +#include <uadk/wd_sched.h> +#include "uadk_cipher_adapter.h" +#include "uadk.h" +#include "uadk_async.h" +#include "uadk_utils.h" + +#define RET_FAIL -1 +#define CTX_SYNC_ENC 0 +#define CTX_SYNC_DEC 1 +#define CTX_ASYNC_ENC 2 +#define CTX_ASYNC_DEC 3 +#define CTX_NUM 4 +#define AES_GCM_CTR_LEN 4 +#define AES_GCM_BLOCK_SIZE 16 +#define AES_GCM_IV_LEN 12 +#define AES_GCM_TAG_LEN 16 +#define GCM_FLAG (EVP_CIPH_FLAG_DEFAULT_ASN1 | EVP_CIPH_GCM_MODE \ + | EVP_CIPH_CUSTOM_IV | EVP_CIPH_FLAG_AEAD_CIPHER \ + | EVP_CIPH_FLAG_CUSTOM_CIPHER | EVP_CIPH_ALWAYS_CALL_INIT) +/* The max data length is 16M-512B */ +#define AEAD_BLOCK_SIZE 0xFFFE00 + +struct aead_priv_ctx { + handle_t sess; + struct wd_aead_sess_setup setup; + struct wd_aead_req req; + unsigned char *data; + unsigned char iv[AES_GCM_BLOCK_SIZE]; + unsigned char mac[AES_GCM_TAG_LEN]; + size_t last_update_bufflen; +}; + +struct aead_engine { + struct wd_ctx_config ctx_cfg; + struct wd_sched sched; + int numa_id; + int pid; + pthread_spinlock_t lock; +}; + +static struct aead_engine engine; + +static EVP_CIPHER *uadk_aes_128_gcm; +static EVP_CIPHER *uadk_aes_192_gcm; +static EVP_CIPHER *uadk_aes_256_gcm; + +static int uadk_e_aead_env_poll(void *ctx) +{ + __u64 rx_cnt = 0; + __u32 recv = 0; + /* Poll one packet currently */ + int expt = 1; + int ret; + + do { + ret = wd_aead_poll(expt, &recv); + if (ret < 0 || recv == expt) + return ret; + rx_cnt++; + } while (rx_cnt < ENGINE_RECV_MAX_CNT); + + fprintf(stderr, "failed to poll msg: timeout!\n"); + + return -ETIMEDOUT; +} + +static int uadk_e_aead_poll(void *ctx) +{ + struct aead_priv_ctx *priv = (struct aead_priv_ctx *) ctx; + __u64 rx_cnt = 0; + __u32 recv = 0; + /* Poll one packet currently */ + int expt = 1; + int ret, idx; + + if (priv->req.op_type == WD_CIPHER_ENCRYPTION_DIGEST) + idx = CTX_ASYNC_ENC; + else + idx = CTX_ASYNC_DEC; + + do { + ret = wd_aead_poll_ctx(idx, expt, &recv); + if (!ret && recv == expt) + return 0; + else if (ret == -EAGAIN) + rx_cnt++; + else + return RET_FAIL; + } while (rx_cnt < ENGINE_RECV_MAX_CNT); + + fprintf(stderr, "failed to recv msg: timeout!\n"); + + return -ETIMEDOUT; +} + +static handle_t sched_single_aead_init(handle_t h_sched_ctx, void *sched_param) +{ + struct sched_params *param = (struct sched_params *)sched_param; + struct sched_params *skey; + + skey = malloc(sizeof(struct sched_params)); + if (!skey) { + fprintf(stderr, "fail to alloc aead sched key!\n"); + return (handle_t)0; + } + + skey->numa_id = param->numa_id; + skey->type = param->type; + + return (handle_t)skey; +} + +static __u32 sched_single_pick_next_ctx(handle_t sched_ctx, void *sched_key, const int sched_mode) +{ + struct sched_params *key = (struct sched_params *)sched_key; + + if (sched_mode) { + if (key->type == WD_CIPHER_ENCRYPTION_DIGEST) + return CTX_ASYNC_ENC; + else + return CTX_ASYNC_DEC; + } else { + if (key->type == WD_CIPHER_ENCRYPTION_DIGEST) + return CTX_SYNC_ENC; + else + return CTX_SYNC_DEC; + } +} + +static int sched_single_poll_policy(handle_t h_sched_ctx, __u32 expect, __u32 *count) +{ + return 0; +} + +static int uadk_e_wd_aead_cipher_env_init(struct uacce_dev *dev) +{ + int ret; + + ret = uadk_e_set_env("WD_AEAD_CTX_NUM", dev->numa_id); + if (ret) + return ret; + + ret = wd_aead_env_init(NULL); + + async_register_poll_fn(ASYNC_TASK_AEAD, uadk_e_aead_env_poll); + + return ret; +} + +static int uadk_e_wd_aead_cipher_init(struct uacce_dev *dev) +{ + __u32 i, j; + int ret; + + engine.numa_id = dev->numa_id; + + ret = uadk_e_is_env_enabled("aead"); + if (ret) + return uadk_e_wd_aead_cipher_env_init(dev); + + memset(&engine.ctx_cfg, 0, sizeof(struct wd_ctx_config)); + engine.ctx_cfg.ctx_num = CTX_NUM; + engine.ctx_cfg.ctxs = calloc(CTX_NUM, sizeof(struct wd_ctx)); + if (!engine.ctx_cfg.ctxs) + return -ENOMEM; + + for (i = 0; i < CTX_NUM; i++) { + engine.ctx_cfg.ctxs[i].ctx = wd_request_ctx(dev); + if (!engine.ctx_cfg.ctxs[i].ctx) { + ret = -ENOMEM; + goto err_freectx; + } + } + + engine.ctx_cfg.ctxs[CTX_SYNC_ENC].op_type = CTX_TYPE_ENCRYPT; + engine.ctx_cfg.ctxs[CTX_SYNC_DEC].op_type = CTX_TYPE_DECRYPT; + engine.ctx_cfg.ctxs[CTX_SYNC_ENC].ctx_mode = CTX_MODE_SYNC; + engine.ctx_cfg.ctxs[CTX_SYNC_DEC].ctx_mode = CTX_MODE_SYNC; + + engine.ctx_cfg.ctxs[CTX_ASYNC_ENC].op_type = CTX_TYPE_ENCRYPT; + engine.ctx_cfg.ctxs[CTX_ASYNC_DEC].op_type = CTX_TYPE_DECRYPT; + engine.ctx_cfg.ctxs[CTX_ASYNC_ENC].ctx_mode = CTX_MODE_ASYNC; + engine.ctx_cfg.ctxs[CTX_ASYNC_DEC].ctx_mode = CTX_MODE_ASYNC; + + engine.sched.name = "sched_single"; + engine.sched.pick_next_ctx = sched_single_pick_next_ctx; + engine.sched.poll_policy = sched_single_poll_policy; + engine.sched.sched_init = sched_single_aead_init; + + ret = wd_aead_init(&engine.ctx_cfg, &engine.sched); + if (ret) + goto err_freectx; + + async_register_poll_fn(ASYNC_TASK_AEAD, uadk_e_aead_poll); + return ret; + +err_freectx: + for (j = 0; j < i; j++) + wd_release_ctx(engine.ctx_cfg.ctxs[j].ctx); + + free(engine.ctx_cfg.ctxs); + + return ret; +} + +static int uadk_e_init_aead_cipher(void) +{ + struct uacce_dev *dev; + int ret; + + if (engine.pid != getpid()) { + pthread_spin_lock(&engine.lock); + if (engine.pid == getpid()) { + pthread_spin_unlock(&engine.lock); + return 1; + } + + dev = wd_get_accel_dev("aead"); + if (!dev) { + pthread_spin_unlock(&engine.lock); + fprintf(stderr, "failed to get device for aead.\n"); + return 0; + } + + ret = uadk_e_wd_aead_cipher_init(dev); + if (ret < 0) { + pthread_spin_unlock(&engine.lock); + fprintf(stderr, "failed to initiate aead cipher.\n"); + free(dev); + return 0; + } + + engine.pid = getpid(); + pthread_spin_unlock(&engine.lock); + free(dev); + } + + return 1; +} + +static int uadk_e_ctx_init(struct aead_priv_ctx *priv, const unsigned char *ckey, int ckey_len) +{ + struct sched_params params = {0}; + int ret; + + ret = uadk_e_init_aead_cipher(); + if (unlikely(!ret)) { + fprintf(stderr, "uadk failed to init aead HW!\n"); + return 0; + } + + params.type = priv->req.op_type; + ret = uadk_e_is_env_enabled("aead"); + if (ret) + params.type = 0; + + params.numa_id = engine.numa_id; + priv->setup.sched_param = ¶ms; + if (!priv->sess) { + priv->sess = wd_aead_alloc_sess(&priv->setup); + if (!priv->sess) { + fprintf(stderr, "uadk engine failed to alloc aead session!\n"); + return 0; + } + ret = wd_aead_set_authsize(priv->sess, AES_GCM_TAG_LEN); + if (ret < 0) { + fprintf(stderr, "uadk engine failed to set authsize!\n"); + goto out; + } + + ret = wd_aead_set_ckey(priv->sess, ckey, ckey_len); + if (ret) { + fprintf(stderr, "uadk engine failed to set ckey!\n"); + goto out; + } + priv->data = malloc(AEAD_BLOCK_SIZE << 1); + if (unlikely(!priv->data)) { + fprintf(stderr, "uadk engine failed to alloc data!\n"); + goto out; + } + } + + return 1; +out: + wd_aead_free_sess(priv->sess); + priv->sess = 0; + return 0; +} + +static int uadk_e_aes_gcm_init(EVP_CIPHER_CTX *ctx, const unsigned char *ckey, + const unsigned char *iv, int enc) +{ + struct aead_priv_ctx *priv = + (struct aead_priv_ctx *)EVP_CIPHER_CTX_get_cipher_data(ctx); + int ret, ckey_len; + + if (unlikely(!ckey)) + return 1; + + if (iv) + memcpy(priv->iv, iv, AES_GCM_IV_LEN); + + priv->setup.calg = WD_CIPHER_AES; + priv->setup.cmode = WD_CIPHER_GCM; + priv->setup.dalg = 0; + priv->setup.dmode = 0; + + priv->last_update_bufflen = 0; + priv->req.assoc_bytes = 0; + priv->req.out_bytes = 0; + priv->req.data_fmt = WD_FLAT_BUF; + + priv->req.iv = priv->iv; + priv->req.iv_bytes = AES_GCM_IV_LEN; + memset(priv->iv + AES_GCM_IV_LEN, 0, AES_GCM_CTR_LEN); + + priv->req.mac = priv->mac; + priv->req.mac_bytes = AES_GCM_TAG_LEN; + + if (enc) + priv->req.op_type = WD_CIPHER_ENCRYPTION_DIGEST; + else + priv->req.op_type = WD_CIPHER_DECRYPTION_DIGEST; + + ckey_len = EVP_CIPHER_CTX_key_length(ctx); + ret = uadk_e_ctx_init(priv, ckey, ckey_len); + if (!ret) + return 0; + + return 1; +} + +static int uadk_e_aes_gcm_cleanup(EVP_CIPHER_CTX *ctx) +{ + struct aead_priv_ctx *priv = + (struct aead_priv_ctx *)EVP_CIPHER_CTX_get_cipher_data(ctx); + + if (priv->sess) { + wd_aead_free_sess(priv->sess); + priv->sess = 0; + } + + if (priv->data) { + free(priv->data); + priv->data = NULL; + } + + return 1; +} + +static int uadk_e_aes_gcm_set_ctrl(EVP_CIPHER_CTX *ctx, int type, int arg, void *ptr) +{ + struct aead_priv_ctx *priv = + (struct aead_priv_ctx *)EVP_CIPHER_CTX_get_cipher_data(ctx); + int enc = EVP_CIPHER_CTX_encrypting(ctx); + + switch (type) { + case EVP_CTRL_INIT: + priv->req.iv_bytes = 0; + return 1; + case EVP_CTRL_GET_IVLEN: + *(int *)ptr = priv->req.iv_bytes; + return 1; + case EVP_CTRL_GCM_SET_IVLEN: + if (arg != AES_GCM_IV_LEN) { + fprintf(stderr, "gcm only support 12 bytes.\n"); + return 0; + } + return 1; + case EVP_CTRL_GCM_GET_TAG: + if (arg <= 0 || arg > AES_GCM_TAG_LEN) { + fprintf(stderr, "TAG length invalid.\n"); + return 0; + } + + if (EVP_CIPHER_CTX_buf_noconst(ctx) == NULL || ptr == NULL) { + fprintf(stderr, "ctx memory pointer is invalid.\n"); + return 0; + } + + memcpy(ptr, EVP_CIPHER_CTX_buf_noconst(ctx), arg); + return 1; + case EVP_CTRL_GCM_SET_TAG: + if (arg != AES_GCM_TAG_LEN || enc) { + fprintf(stderr, "cannot set tag when encrypt or arg is invalid.\n"); + return 0; + } + + if (EVP_CIPHER_CTX_buf_noconst(ctx) == NULL || ptr == NULL) { + fprintf(stderr, "ctx memory pointer is invalid.\n"); + return 0; + } + + memcpy(EVP_CIPHER_CTX_buf_noconst(ctx), ptr, AES_GCM_TAG_LEN); + return 1; + default: + fprintf(stderr, "unsupported ctrl type: %d\n", type); + return 0; + } +} + +static int uadk_e_do_aes_gcm_first(EVP_CIPHER_CTX *ctx, unsigned char *out, + const unsigned char *in, size_t inlen) +{ + struct aead_priv_ctx *priv = + (struct aead_priv_ctx *)EVP_CIPHER_CTX_get_cipher_data(ctx); + int ret; + + priv->req.assoc_bytes = inlen; + + if (ASYNC_get_current_job()) { + memcpy(priv->data + priv->last_update_bufflen, in, inlen); + priv->last_update_bufflen += inlen; + return 1; + } + + priv->req.src = (unsigned char *)in; + priv->req.msg_state = AEAD_MSG_FIRST; + + ret = wd_do_aead_sync(priv->sess, &priv->req); + if (ret < 0) { + fprintf(stderr, "do sec aead first operation failed, ret:%d!\n", ret); + return RET_FAIL; + } + + return 1; +} + +static int uadk_e_hw_update(struct aead_priv_ctx *priv, unsigned char *out, + unsigned char *in, size_t inlen) +{ + int ret; + + priv->req.src = in; + priv->req.dst = out; + priv->req.in_bytes = inlen; + priv->req.msg_state = AEAD_MSG_MIDDLE; + ret = wd_do_aead_sync(priv->sess, &priv->req); + if (ret < 0) { + fprintf(stderr, "do sec aead update operation failed, ret:%d!\n", ret); + return RET_FAIL; + } + + return 0; +} + +static int uadk_e_cache_data(struct aead_priv_ctx *priv, const unsigned char *in, size_t inlen) +{ + if (ASYNC_get_current_job() || !priv->req.assoc_bytes) { + if (priv->last_update_bufflen + inlen > AEAD_BLOCK_SIZE) { + fprintf(stderr, "aead input data length is too long!\n"); + return RET_FAIL; + } + memcpy(priv->data + priv->last_update_bufflen, in, inlen); + priv->last_update_bufflen += inlen; + return 0; + } + + return 1; +} + +static int uadk_e_do_aes_gcm_update(EVP_CIPHER_CTX *ctx, unsigned char *out, + const unsigned char *in, size_t inlen) +{ + struct aead_priv_ctx *priv = + (struct aead_priv_ctx *)EVP_CIPHER_CTX_get_cipher_data(ctx); + int ret; + + ret = uadk_e_cache_data(priv, in, inlen); + if (ret <= 0) + return ret; + + ret = uadk_e_hw_update(priv, out, in, inlen); + if (ret < 0) + return RET_FAIL; + + return inlen; +} + +static void *uadk_e_aead_cb(struct wd_aead_req *req, void *data) +{ + struct uadk_e_cb_info *cb_param; + struct async_op *op; + + if (!req) + return NULL; + + cb_param = req->cb_param; + if (!cb_param) + return NULL; + + op = cb_param->op; + if (op && op->job && !op->done) { + op->done = 1; + async_free_poll_task(op->idx, 1); + async_wake_job(op->job); + } + + return NULL; +} + +static int do_aead_async(struct aead_priv_ctx *priv, struct async_op *op) +{ + struct uadk_e_cb_info *cb_param; + int ret = 0; + int idx; + + priv->req.in_bytes = priv->last_update_bufflen - priv->req.assoc_bytes; + priv->req.dst = priv->data + AEAD_BLOCK_SIZE; + + cb_param = malloc(sizeof(struct uadk_e_cb_info)); + if (!cb_param) { + fprintf(stderr, "failed to alloc cb_param.\n"); + return ret; + } + + cb_param->op = op; + cb_param->priv = priv; + priv->req.cb = uadk_e_aead_cb; + priv->req.cb_param = cb_param; + + ret = async_get_free_task(&idx); + if (!ret) + goto free_cb_param; + + op->idx = idx; + do { + ret = wd_do_aead_async(priv->sess, &priv->req); + if (ret < 0 && ret != -EBUSY) { + fprintf(stderr, "do sec aead async failed.\n"); + async_free_poll_task(op->idx, 0); + ret = 0; + goto free_cb_param; + } + } while (ret == -EBUSY); + + ret = async_pause_job(priv, op, ASYNC_TASK_AEAD); + +free_cb_param: + free(cb_param); + return ret; +} + +static int uadk_e_do_aes_gcm_final(EVP_CIPHER_CTX *ctx, unsigned char *out, + const unsigned char *in, size_t inlen) +{ + struct aead_priv_ctx *priv = + (struct aead_priv_ctx *)EVP_CIPHER_CTX_get_cipher_data(ctx); + unsigned char *ctx_buf = EVP_CIPHER_CTX_buf_noconst(ctx); + struct async_op *op; + int ret, enc; + + op = malloc(sizeof(struct async_op)); + if (!op) + return RET_FAIL; + + ret = async_setup_async_event_notification(op); + if (unlikely(!ret)) { + fprintf(stderr, "failed to setup async event notification.\n"); + free(op); + return RET_FAIL; + } + + if (priv->req.assoc_bytes && !op->job) + priv->req.msg_state = AEAD_MSG_END; + else + priv->req.msg_state = AEAD_MSG_BLOCK; + + enc = EVP_CIPHER_CTX_encrypting(ctx); + if (!enc) + memcpy(priv->req.mac, ctx_buf, AES_GCM_TAG_LEN); + + priv->req.src = priv->data; + if (!op->job) { + priv->req.in_bytes = priv->last_update_bufflen; + priv->req.dst = out; + ret = wd_do_aead_sync(priv->sess, &priv->req); + if (ret < 0) { + fprintf(stderr, "do sec aead final operation failed, ret: %d!\n", ret); + goto out; + } + } else { + ret = do_aead_async(priv, op); + if (!ret) + goto out; + + memcpy(out, priv->req.dst + priv->req.assoc_bytes, priv->req.in_bytes); + } + + if (enc) + memcpy(ctx_buf, priv->req.mac, AES_GCM_TAG_LEN); + + priv->last_update_bufflen = 0; + + free(op); + return priv->req.in_bytes; + +out: + (void)async_clear_async_event_notification(); + free(op); + return RET_FAIL; +} + +static int uadk_e_do_aes_gcm(EVP_CIPHER_CTX *ctx, unsigned char *out, + const unsigned char *in, size_t inlen) +{ + int ret; + + if (in) { + if (out == NULL) + return uadk_e_do_aes_gcm_first(ctx, out, in, inlen); + + return uadk_e_do_aes_gcm_update(ctx, out, in, inlen); + } + + return uadk_e_do_aes_gcm_final(ctx, out, NULL, 0); +} + +#define UADK_AEAD_DESCR(name, block_size, key_size, iv_len, flags, ctx_size, \ + init, cipher, cleanup, set_params, get_params, ctrl) \ +do {\ + uadk_##name = EVP_CIPHER_meth_new(NID_##name, block_size, key_size); \ + if (uadk_##name == 0 || \ + !EVP_CIPHER_meth_set_iv_length(uadk_##name, iv_len) || \ + !EVP_CIPHER_meth_set_flags(uadk_##name, flags) || \ + !EVP_CIPHER_meth_set_impl_ctx_size(uadk_##name, ctx_size) || \ + !EVP_CIPHER_meth_set_init(uadk_##name, init) || \ + !EVP_CIPHER_meth_set_do_cipher(uadk_##name, cipher) || \ + !EVP_CIPHER_meth_set_cleanup(uadk_##name, cleanup) || \ + !EVP_CIPHER_meth_set_set_asn1_params(uadk_##name, set_params) || \ + !EVP_CIPHER_meth_set_get_asn1_params(uadk_##name, get_params) || \ + !EVP_CIPHER_meth_set_ctrl(uadk_##name, ctrl)) \ + return 0;\ +} while (0) + +EVP_CIPHER *uadk_create_gcm_cipher_meth(int nid) +{ + EVP_CIPHER *aead = NULL; + + switch (nid) { + case NID_aes_128_gcm: + UADK_AEAD_DESCR(aes_128_gcm, AES_GCM_BLOCK_SIZE, 16, AES_GCM_IV_LEN, + GCM_FLAG, sizeof(struct aead_priv_ctx), + uadk_e_aes_gcm_init, uadk_e_do_aes_gcm, uadk_e_aes_gcm_cleanup, + (EVP_CIPH_FLAG_DEFAULT_ASN1 ? NULL : EVP_CIPHER_set_asn1_iv), + (EVP_CIPH_FLAG_DEFAULT_ASN1 ? NULL : EVP_CIPHER_get_asn1_iv), + uadk_e_aes_gcm_set_ctrl); + aead = uadk_aes_128_gcm; + break; + case NID_aes_192_gcm: + UADK_AEAD_DESCR(aes_192_gcm, AES_GCM_BLOCK_SIZE, 24, AES_GCM_IV_LEN, + GCM_FLAG, sizeof(struct aead_priv_ctx), + uadk_e_aes_gcm_init, uadk_e_do_aes_gcm, uadk_e_aes_gcm_cleanup, + (EVP_CIPH_FLAG_DEFAULT_ASN1 ? NULL : EVP_CIPHER_set_asn1_iv), + (EVP_CIPH_FLAG_DEFAULT_ASN1 ? NULL : EVP_CIPHER_get_asn1_iv), + uadk_e_aes_gcm_set_ctrl); + aead = uadk_aes_192_gcm; + break; + case NID_aes_256_gcm: + UADK_AEAD_DESCR(aes_256_gcm, AES_GCM_BLOCK_SIZE, 32, AES_GCM_IV_LEN, + GCM_FLAG, sizeof(struct aead_priv_ctx), + uadk_e_aes_gcm_init, uadk_e_do_aes_gcm, uadk_e_aes_gcm_cleanup, + (EVP_CIPH_FLAG_DEFAULT_ASN1 ? NULL : EVP_CIPHER_set_asn1_iv), + (EVP_CIPH_FLAG_DEFAULT_ASN1 ? NULL : EVP_CIPHER_get_asn1_iv), + uadk_e_aes_gcm_set_ctrl); + aead = uadk_aes_256_gcm; + break; + default: + aead = NULL; + break; + } + + return aead; +} + +static void destroy_aead(struct engine_cipher_info *info, int num) +{ + __u32 i; + + for (i = 0; i < num; i++) { + if (info[i].cipher != NULL) { + EVP_CIPHER_meth_free(info[i].cipher); + info[i].cipher = NULL; + } + } +} + +void uadk_e_destroy_aead(struct engine_cipher_info *info, int num) +{ + __u32 i; + int ret; + + if (engine.pid == getpid()) { + ret = uadk_e_is_env_enabled("aead"); + if (ret) { + wd_aead_env_uninit(); + } else { + wd_aead_uninit(); + for (i = 0; i < engine.ctx_cfg.ctx_num; i++) + wd_release_ctx(engine.ctx_cfg.ctxs[i].ctx); + + free(engine.ctx_cfg.ctxs); + } + engine.pid = 0; + } + + pthread_spin_destroy(&engine.lock); + destroy_aead(info, num); +} diff --git a/src/uadk_async.h b/src/uadk_async.h index 1208c30..678e392 100644 --- a/src/uadk_async.h +++ b/src/uadk_async.h @@ -41,6 +41,7 @@ typedef int (*async_recv_t)(void *ctx); enum task_type { ASYNC_TASK_CIPHER, ASYNC_TASK_DIGEST, + ASYNC_TASK_AEAD, ASYNC_TASK_RSA, ASYNC_TASK_DH, ASYNC_TASK_ECC, diff --git a/src/uadk_cipher.c b/src/uadk_cipher.c index ed25787..73be09d 100644 --- a/src/uadk_cipher.c +++ b/src/uadk_cipher.c @@ -25,6 +25,7 @@ #include <uadk/wd_sched.h> #include "uadk.h" #include "uadk_async.h" +#include "uadk_cipher_adapter.h"
#define UADK_DO_SOFT (-0xE0) #define CTX_SYNC_ENC 0 @@ -74,54 +75,8 @@ struct cipher_info { __u32 out_bytes; };
-static int platform; - #define SMALL_PACKET_OFFLOAD_THRESHOLD_DEFAULT 192
-static int cipher_hw_v2_nids[] = { - NID_aes_128_cbc, - NID_aes_192_cbc, - NID_aes_256_cbc, - NID_aes_128_ecb, - NID_aes_192_ecb, - NID_aes_256_ecb, - NID_aes_128_xts, - NID_aes_256_xts, - NID_sm4_cbc, - NID_des_ede3_cbc, - NID_des_ede3_ecb, - NID_sm4_ecb, - 0, -}; - -static int cipher_hw_v3_nids[] = { - NID_aes_128_cbc, - NID_aes_192_cbc, - NID_aes_256_cbc, - NID_aes_128_ctr, - NID_aes_192_ctr, - NID_aes_256_ctr, - NID_aes_128_ecb, - NID_aes_192_ecb, - NID_aes_256_ecb, - NID_aes_128_xts, - NID_aes_256_xts, - NID_sm4_cbc, - NID_sm4_ecb, - NID_des_ede3_cbc, - NID_des_ede3_ecb, - NID_aes_128_cfb128, - NID_aes_192_cfb128, - NID_aes_256_cfb128, - NID_aes_128_ofb128, - NID_aes_192_ofb128, - NID_aes_256_ofb128, - NID_sm4_cfb128, - NID_sm4_ofb128, - NID_sm4_ctr, - 0, -}; - static EVP_CIPHER *uadk_aes_128_cbc; static EVP_CIPHER *uadk_aes_192_cbc; static EVP_CIPHER *uadk_aes_256_cbc; @@ -328,130 +283,6 @@ static void uadk_e_cipher_sw_cleanup(EVP_CIPHER_CTX *ctx) } }
-static int uadk_get_accel_platform(char *alg_name) -{ - struct uacce_dev *dev; - - dev = wd_get_accel_dev(alg_name); - if (dev == NULL) - return 0; - - if (!strcmp(dev->api, "hisi_qm_v2")) - platform = HW_V2; - else - platform = HW_V3; - free(dev); - - return 1; -} - -static int uadk_e_engine_ciphers(ENGINE *e, const EVP_CIPHER **cipher, - const int **nids, int nid) -{ - int *cipher_nids; - __u32 size, i; - int ret = 1; - - if (platform == HW_V2) { - size = (sizeof(cipher_hw_v2_nids) - 1) / sizeof(int); - cipher_nids = cipher_hw_v2_nids; - } else { - size = (sizeof(cipher_hw_v3_nids) - 1) / sizeof(int); - cipher_nids = cipher_hw_v3_nids; - } - - if (!cipher) { - *nids = cipher_nids; - return size; - } - - for (i = 0; i < size; i++) { - if (nid == cipher_nids[i]) - break; - } - - switch (nid) { - case NID_aes_128_cbc: - *cipher = uadk_aes_128_cbc; - break; - case NID_aes_192_cbc: - *cipher = uadk_aes_192_cbc; - break; - case NID_aes_256_cbc: - *cipher = uadk_aes_256_cbc; - break; - case NID_aes_128_ctr: - *cipher = uadk_aes_128_ctr; - break; - case NID_aes_192_ctr: - *cipher = uadk_aes_192_ctr; - break; - case NID_aes_256_ctr: - *cipher = uadk_aes_256_ctr; - break; - case NID_aes_128_ecb: - *cipher = uadk_aes_128_ecb; - break; - case NID_aes_192_ecb: - *cipher = uadk_aes_192_ecb; - break; - case NID_aes_256_ecb: - *cipher = uadk_aes_256_ecb; - break; - case NID_aes_128_xts: - *cipher = uadk_aes_128_xts; - break; - case NID_aes_256_xts: - *cipher = uadk_aes_256_xts; - break; - case NID_sm4_cbc: - *cipher = uadk_sm4_cbc; - break; - case NID_sm4_ecb: - *cipher = uadk_sm4_ecb; - break; - case NID_des_ede3_cbc: - *cipher = uadk_des_ede3_cbc; - break; - case NID_des_ede3_ecb: - *cipher = uadk_des_ede3_ecb; - break; - case NID_aes_128_ofb128: - *cipher = uadk_aes_128_ofb128; - break; - case NID_aes_192_ofb128: - *cipher = uadk_aes_192_ofb128; - break; - case NID_aes_256_ofb128: - *cipher = uadk_aes_256_ofb128; - break; - case NID_aes_128_cfb128: - *cipher = uadk_aes_128_cfb128; - break; - case NID_aes_192_cfb128: - *cipher = uadk_aes_192_cfb128; - break; - case NID_aes_256_cfb128: - *cipher = uadk_aes_256_cfb128; - break; - case NID_sm4_ofb128: - *cipher = uadk_sm4_ofb128; - break; - case NID_sm4_cfb128: - *cipher = uadk_sm4_cfb128; - break; - case NID_sm4_ctr: - *cipher = uadk_sm4_ctr; - break; - default: - ret = 0; - *cipher = NULL; - break; - } - - return ret; -} - static handle_t sched_single_init(handle_t h_sched_ctx, void *sched_param) { struct sched_params *param = (struct sched_params *)sched_param; @@ -560,7 +391,8 @@ static int uadk_e_wd_cipher_env_init(struct uacce_dev *dev)
static int uadk_e_wd_cipher_init(struct uacce_dev *dev) { - int ret, i, j; + __u32 i, j; + int ret;
g_cipher_engine.numa_id = dev->numa_id;
@@ -978,210 +810,193 @@ do { \ return 0; \ } while (0)
-static int bind_v2_cipher(void) -{ - UADK_CIPHER_DESCR(aes_128_cbc, 16, 16, 16, EVP_CIPH_CBC_MODE, - sizeof(struct cipher_priv_ctx), uadk_e_cipher_init, - uadk_e_do_cipher, uadk_e_cipher_cleanup, - EVP_CIPHER_set_asn1_iv, EVP_CIPHER_get_asn1_iv); - UADK_CIPHER_DESCR(aes_192_cbc, 16, 24, 16, EVP_CIPH_CBC_MODE, - sizeof(struct cipher_priv_ctx), uadk_e_cipher_init, - uadk_e_do_cipher, uadk_e_cipher_cleanup, - EVP_CIPHER_set_asn1_iv, EVP_CIPHER_get_asn1_iv); - UADK_CIPHER_DESCR(aes_256_cbc, 16, 32, 16, EVP_CIPH_CBC_MODE, - sizeof(struct cipher_priv_ctx), uadk_e_cipher_init, - uadk_e_do_cipher, uadk_e_cipher_cleanup, - EVP_CIPHER_set_asn1_iv, EVP_CIPHER_get_asn1_iv); - UADK_CIPHER_DESCR(aes_128_ecb, 16, 16, 0, EVP_CIPH_ECB_MODE, - sizeof(struct cipher_priv_ctx), uadk_e_cipher_init, - uadk_e_do_cipher, uadk_e_cipher_cleanup, - EVP_CIPHER_set_asn1_iv, EVP_CIPHER_get_asn1_iv); - UADK_CIPHER_DESCR(aes_192_ecb, 16, 24, 0, EVP_CIPH_ECB_MODE, - sizeof(struct cipher_priv_ctx), uadk_e_cipher_init, - uadk_e_do_cipher, uadk_e_cipher_cleanup, - EVP_CIPHER_set_asn1_iv, EVP_CIPHER_get_asn1_iv); - UADK_CIPHER_DESCR(aes_256_ecb, 16, 32, 0, EVP_CIPH_ECB_MODE, - sizeof(struct cipher_priv_ctx), uadk_e_cipher_init, - uadk_e_do_cipher, uadk_e_cipher_cleanup, - EVP_CIPHER_set_asn1_iv, EVP_CIPHER_get_asn1_iv); - UADK_CIPHER_DESCR(aes_128_xts, 1, 32, 16, EVP_CIPH_XTS_MODE | EVP_CIPH_CUSTOM_IV, - sizeof(struct cipher_priv_ctx), uadk_e_cipher_init, - uadk_e_do_cipher, uadk_e_cipher_cleanup, - EVP_CIPHER_set_asn1_iv, EVP_CIPHER_get_asn1_iv); - UADK_CIPHER_DESCR(aes_256_xts, 1, 64, 16, EVP_CIPH_XTS_MODE | EVP_CIPH_CUSTOM_IV, - sizeof(struct cipher_priv_ctx), uadk_e_cipher_init, - uadk_e_do_cipher, uadk_e_cipher_cleanup, - EVP_CIPHER_set_asn1_iv, EVP_CIPHER_get_asn1_iv); - UADK_CIPHER_DESCR(sm4_cbc, 16, 16, 16, EVP_CIPH_CBC_MODE, - sizeof(struct cipher_priv_ctx), uadk_e_cipher_init, - uadk_e_do_cipher, uadk_e_cipher_cleanup, - EVP_CIPHER_set_asn1_iv, EVP_CIPHER_get_asn1_iv); - UADK_CIPHER_DESCR(des_ede3_cbc, 8, 24, 8, EVP_CIPH_CBC_MODE, - sizeof(struct cipher_priv_ctx), uadk_e_cipher_init, - uadk_e_do_cipher, uadk_e_cipher_cleanup, - EVP_CIPHER_set_asn1_iv, EVP_CIPHER_get_asn1_iv); - UADK_CIPHER_DESCR(des_ede3_ecb, 8, 24, 0, EVP_CIPH_ECB_MODE, - sizeof(struct cipher_priv_ctx), uadk_e_cipher_init, - uadk_e_do_cipher, uadk_e_cipher_cleanup, - EVP_CIPHER_set_asn1_iv, EVP_CIPHER_get_asn1_iv); - UADK_CIPHER_DESCR(sm4_ecb, 16, 16, 0, EVP_CIPH_ECB_MODE, - sizeof(struct cipher_priv_ctx), uadk_e_cipher_init, - uadk_e_do_cipher, uadk_e_cipher_cleanup, - EVP_CIPHER_set_asn1_iv, EVP_CIPHER_get_asn1_iv); - - return 0; -} - -static int bind_v3_cipher(void) -{ - UADK_CIPHER_DESCR(aes_128_ctr, 1, 16, 16, EVP_CIPH_CTR_MODE, - sizeof(struct cipher_priv_ctx), uadk_e_cipher_init, - uadk_e_do_cipher, uadk_e_cipher_cleanup, - EVP_CIPHER_set_asn1_iv, EVP_CIPHER_get_asn1_iv); - UADK_CIPHER_DESCR(aes_192_ctr, 1, 24, 16, EVP_CIPH_CTR_MODE, - sizeof(struct cipher_priv_ctx), uadk_e_cipher_init, - uadk_e_do_cipher, uadk_e_cipher_cleanup, - EVP_CIPHER_set_asn1_iv, EVP_CIPHER_get_asn1_iv); - UADK_CIPHER_DESCR(aes_256_ctr, 1, 32, 16, EVP_CIPH_CTR_MODE, - sizeof(struct cipher_priv_ctx), uadk_e_cipher_init, - uadk_e_do_cipher, uadk_e_cipher_cleanup, - EVP_CIPHER_set_asn1_iv, EVP_CIPHER_get_asn1_iv); - UADK_CIPHER_DESCR(aes_128_ofb128, 1, 16, 16, EVP_CIPH_OFB_MODE, - sizeof(struct cipher_priv_ctx), uadk_e_cipher_init, - uadk_e_do_cipher, uadk_e_cipher_cleanup, - EVP_CIPHER_set_asn1_iv, EVP_CIPHER_get_asn1_iv); - UADK_CIPHER_DESCR(aes_192_ofb128, 1, 24, 16, EVP_CIPH_OFB_MODE, - sizeof(struct cipher_priv_ctx), uadk_e_cipher_init, - uadk_e_do_cipher, uadk_e_cipher_cleanup, - EVP_CIPHER_set_asn1_iv, EVP_CIPHER_get_asn1_iv); - UADK_CIPHER_DESCR(aes_256_ofb128, 1, 32, 16, EVP_CIPH_OFB_MODE, - sizeof(struct cipher_priv_ctx), uadk_e_cipher_init, - uadk_e_do_cipher, uadk_e_cipher_cleanup, - EVP_CIPHER_set_asn1_iv, EVP_CIPHER_get_asn1_iv); - UADK_CIPHER_DESCR(aes_128_cfb128, 1, 16, 16, EVP_CIPH_CFB_MODE, - sizeof(struct cipher_priv_ctx), uadk_e_cipher_init, - uadk_e_do_cipher, uadk_e_cipher_cleanup, - EVP_CIPHER_set_asn1_iv, EVP_CIPHER_get_asn1_iv); - UADK_CIPHER_DESCR(aes_192_cfb128, 1, 24, 16, EVP_CIPH_CFB_MODE, - sizeof(struct cipher_priv_ctx), uadk_e_cipher_init, - uadk_e_do_cipher, uadk_e_cipher_cleanup, - EVP_CIPHER_set_asn1_iv, EVP_CIPHER_get_asn1_iv); - UADK_CIPHER_DESCR(aes_256_cfb128, 1, 32, 16, EVP_CIPH_CFB_MODE, - sizeof(struct cipher_priv_ctx), uadk_e_cipher_init, - uadk_e_do_cipher, uadk_e_cipher_cleanup, - EVP_CIPHER_set_asn1_iv, EVP_CIPHER_get_asn1_iv); - UADK_CIPHER_DESCR(sm4_ofb128, 1, 16, 16, EVP_CIPH_OFB_MODE, - sizeof(struct cipher_priv_ctx), uadk_e_cipher_init, - uadk_e_do_cipher, uadk_e_cipher_cleanup, - EVP_CIPHER_set_asn1_iv, EVP_CIPHER_get_asn1_iv); - UADK_CIPHER_DESCR(sm4_cfb128, 1, 16, 16, EVP_CIPH_OFB_MODE, - sizeof(struct cipher_priv_ctx), uadk_e_cipher_init, - uadk_e_do_cipher, uadk_e_cipher_cleanup, - EVP_CIPHER_set_asn1_iv, EVP_CIPHER_get_asn1_iv); - UADK_CIPHER_DESCR(sm4_ctr, 1, 16, 16, EVP_CIPH_CTR_MODE, - sizeof(struct cipher_priv_ctx), uadk_e_cipher_init, - uadk_e_do_cipher, uadk_e_cipher_cleanup, - EVP_CIPHER_set_asn1_iv, EVP_CIPHER_get_asn1_iv); - - return 0; -} - -int uadk_e_bind_cipher(ENGINE *e) +EVP_CIPHER *uadk_create_cipher_meth(int nid) { - int ret; + EVP_CIPHER *cipher;
- ret = uadk_get_accel_platform("cipher"); - if (!ret) { - fprintf(stderr, "failed to get accel hardware version.\n"); - return 0; + switch (nid) { + case NID_aes_128_cbc: + UADK_CIPHER_DESCR(aes_128_cbc, 16, 16, 16, EVP_CIPH_CBC_MODE, + sizeof(struct cipher_priv_ctx), uadk_e_cipher_init, + uadk_e_do_cipher, uadk_e_cipher_cleanup, + EVP_CIPHER_set_asn1_iv, EVP_CIPHER_get_asn1_iv); + cipher = uadk_aes_128_cbc; + break; + case NID_aes_192_cbc: + UADK_CIPHER_DESCR(aes_192_cbc, 16, 24, 16, EVP_CIPH_CBC_MODE, + sizeof(struct cipher_priv_ctx), uadk_e_cipher_init, + uadk_e_do_cipher, uadk_e_cipher_cleanup, + EVP_CIPHER_set_asn1_iv, EVP_CIPHER_get_asn1_iv); + cipher = uadk_aes_192_cbc; + break; + case NID_aes_256_cbc: + UADK_CIPHER_DESCR(aes_256_cbc, 16, 32, 16, EVP_CIPH_CBC_MODE, + sizeof(struct cipher_priv_ctx), uadk_e_cipher_init, + uadk_e_do_cipher, uadk_e_cipher_cleanup, + EVP_CIPHER_set_asn1_iv, EVP_CIPHER_get_asn1_iv); + cipher = uadk_aes_256_cbc; + break; + case NID_aes_128_ecb: + UADK_CIPHER_DESCR(aes_128_ecb, 16, 16, 0, EVP_CIPH_ECB_MODE, + sizeof(struct cipher_priv_ctx), uadk_e_cipher_init, + uadk_e_do_cipher, uadk_e_cipher_cleanup, + EVP_CIPHER_set_asn1_iv, EVP_CIPHER_get_asn1_iv); + cipher = uadk_aes_128_ecb; + break; + case NID_aes_192_ecb: + UADK_CIPHER_DESCR(aes_192_ecb, 16, 24, 0, EVP_CIPH_ECB_MODE, + sizeof(struct cipher_priv_ctx), uadk_e_cipher_init, + uadk_e_do_cipher, uadk_e_cipher_cleanup, + EVP_CIPHER_set_asn1_iv, EVP_CIPHER_get_asn1_iv); + cipher = uadk_aes_192_ecb; + break; + case NID_aes_256_ecb: + UADK_CIPHER_DESCR(aes_256_ecb, 16, 32, 0, EVP_CIPH_ECB_MODE, + sizeof(struct cipher_priv_ctx), uadk_e_cipher_init, + uadk_e_do_cipher, uadk_e_cipher_cleanup, + EVP_CIPHER_set_asn1_iv, EVP_CIPHER_get_asn1_iv); + cipher = uadk_aes_256_ecb; + break; + case NID_aes_128_xts: + UADK_CIPHER_DESCR(aes_128_xts, 1, 32, 16, EVP_CIPH_XTS_MODE | EVP_CIPH_CUSTOM_IV, + sizeof(struct cipher_priv_ctx), uadk_e_cipher_init, + uadk_e_do_cipher, uadk_e_cipher_cleanup, + EVP_CIPHER_set_asn1_iv, EVP_CIPHER_get_asn1_iv); + cipher = uadk_aes_128_xts; + break; + case NID_aes_256_xts: + UADK_CIPHER_DESCR(aes_256_xts, 1, 64, 16, EVP_CIPH_XTS_MODE | EVP_CIPH_CUSTOM_IV, + sizeof(struct cipher_priv_ctx), uadk_e_cipher_init, + uadk_e_do_cipher, uadk_e_cipher_cleanup, + EVP_CIPHER_set_asn1_iv, EVP_CIPHER_get_asn1_iv); + cipher = uadk_aes_256_xts; + break; + case NID_sm4_cbc: + UADK_CIPHER_DESCR(sm4_cbc, 16, 16, 16, EVP_CIPH_CBC_MODE, + sizeof(struct cipher_priv_ctx), uadk_e_cipher_init, + uadk_e_do_cipher, uadk_e_cipher_cleanup, + EVP_CIPHER_set_asn1_iv, EVP_CIPHER_get_asn1_iv); + cipher = uadk_sm4_cbc; + break; + case NID_des_ede3_cbc: + UADK_CIPHER_DESCR(des_ede3_cbc, 8, 24, 8, EVP_CIPH_CBC_MODE, + sizeof(struct cipher_priv_ctx), uadk_e_cipher_init, + uadk_e_do_cipher, uadk_e_cipher_cleanup, + EVP_CIPHER_set_asn1_iv, EVP_CIPHER_get_asn1_iv); + cipher = uadk_des_ede3_cbc; + break; + case NID_des_ede3_ecb: + UADK_CIPHER_DESCR(des_ede3_ecb, 8, 24, 0, EVP_CIPH_ECB_MODE, + sizeof(struct cipher_priv_ctx), uadk_e_cipher_init, + uadk_e_do_cipher, uadk_e_cipher_cleanup, + EVP_CIPHER_set_asn1_iv, EVP_CIPHER_get_asn1_iv); + cipher = uadk_des_ede3_ecb; + break; + case NID_aes_128_ctr: + UADK_CIPHER_DESCR(aes_128_ctr, 1, 16, 16, EVP_CIPH_CTR_MODE, + sizeof(struct cipher_priv_ctx), uadk_e_cipher_init, + uadk_e_do_cipher, uadk_e_cipher_cleanup, + EVP_CIPHER_set_asn1_iv, EVP_CIPHER_get_asn1_iv); + cipher = uadk_aes_128_ctr; + break; + case NID_aes_192_ctr: + UADK_CIPHER_DESCR(aes_192_ctr, 1, 24, 16, EVP_CIPH_CTR_MODE, + sizeof(struct cipher_priv_ctx), uadk_e_cipher_init, + uadk_e_do_cipher, uadk_e_cipher_cleanup, + EVP_CIPHER_set_asn1_iv, EVP_CIPHER_get_asn1_iv); + cipher = uadk_aes_192_ctr; + break; + case NID_aes_256_ctr: + UADK_CIPHER_DESCR(aes_256_ctr, 1, 32, 16, EVP_CIPH_CTR_MODE, + sizeof(struct cipher_priv_ctx), uadk_e_cipher_init, + uadk_e_do_cipher, uadk_e_cipher_cleanup, + EVP_CIPHER_set_asn1_iv, EVP_CIPHER_get_asn1_iv); + cipher = uadk_aes_256_ctr; + break; + case NID_aes_128_ofb128: + UADK_CIPHER_DESCR(aes_128_ofb128, 1, 16, 16, EVP_CIPH_OFB_MODE, + sizeof(struct cipher_priv_ctx), uadk_e_cipher_init, + uadk_e_do_cipher, uadk_e_cipher_cleanup, + EVP_CIPHER_set_asn1_iv, EVP_CIPHER_get_asn1_iv); + cipher = uadk_aes_128_ofb128; + break; + case NID_aes_192_ofb128: + UADK_CIPHER_DESCR(aes_192_ofb128, 1, 24, 16, EVP_CIPH_OFB_MODE, + sizeof(struct cipher_priv_ctx), uadk_e_cipher_init, + uadk_e_do_cipher, uadk_e_cipher_cleanup, + EVP_CIPHER_set_asn1_iv, EVP_CIPHER_get_asn1_iv); + cipher = uadk_aes_192_ofb128; + break; + case NID_aes_256_ofb128: + UADK_CIPHER_DESCR(aes_256_ofb128, 1, 32, 16, EVP_CIPH_OFB_MODE, + sizeof(struct cipher_priv_ctx), uadk_e_cipher_init, + uadk_e_do_cipher, uadk_e_cipher_cleanup, + EVP_CIPHER_set_asn1_iv, EVP_CIPHER_get_asn1_iv); + cipher = uadk_aes_256_ofb128; + break; + case NID_aes_128_cfb128: + UADK_CIPHER_DESCR(aes_128_cfb128, 1, 16, 16, EVP_CIPH_CFB_MODE, + sizeof(struct cipher_priv_ctx), uadk_e_cipher_init, + uadk_e_do_cipher, uadk_e_cipher_cleanup, + EVP_CIPHER_set_asn1_iv, EVP_CIPHER_get_asn1_iv); + cipher = uadk_aes_128_cfb128; + break; + case NID_aes_192_cfb128: + UADK_CIPHER_DESCR(aes_192_cfb128, 1, 24, 16, EVP_CIPH_CFB_MODE, + sizeof(struct cipher_priv_ctx), uadk_e_cipher_init, + uadk_e_do_cipher, uadk_e_cipher_cleanup, + EVP_CIPHER_set_asn1_iv, EVP_CIPHER_get_asn1_iv); + cipher = uadk_aes_192_cfb128; + break; + case NID_aes_256_cfb128: + UADK_CIPHER_DESCR(aes_256_cfb128, 1, 32, 16, EVP_CIPH_CFB_MODE, + sizeof(struct cipher_priv_ctx), uadk_e_cipher_init, + uadk_e_do_cipher, uadk_e_cipher_cleanup, + EVP_CIPHER_set_asn1_iv, EVP_CIPHER_get_asn1_iv); + cipher = uadk_aes_256_cfb128; + break; + case NID_sm4_ofb128: + UADK_CIPHER_DESCR(sm4_ofb128, 1, 16, 16, EVP_CIPH_OFB_MODE, + sizeof(struct cipher_priv_ctx), uadk_e_cipher_init, + uadk_e_do_cipher, uadk_e_cipher_cleanup, + EVP_CIPHER_set_asn1_iv, EVP_CIPHER_get_asn1_iv); + cipher = uadk_sm4_ofb128; + break; + case NID_sm4_cfb128: + UADK_CIPHER_DESCR(sm4_cfb128, 1, 16, 16, EVP_CIPH_OFB_MODE, + sizeof(struct cipher_priv_ctx), uadk_e_cipher_init, + uadk_e_do_cipher, uadk_e_cipher_cleanup, + EVP_CIPHER_set_asn1_iv, EVP_CIPHER_get_asn1_iv); + cipher = uadk_sm4_cfb128; + break; + case NID_sm4_ctr: + UADK_CIPHER_DESCR(sm4_ctr, 1, 16, 16, EVP_CIPH_CTR_MODE, + sizeof(struct cipher_priv_ctx), uadk_e_cipher_init, + uadk_e_do_cipher, uadk_e_cipher_cleanup, + EVP_CIPHER_set_asn1_iv, EVP_CIPHER_get_asn1_iv); + cipher = uadk_sm4_ctr; + break; + default: + cipher = NULL; + break; }
- bind_v2_cipher(); - if (platform > HW_V2) - bind_v3_cipher(); - - return ENGINE_set_ciphers(e, uadk_e_engine_ciphers); -} - -static void destroy_v2_cipher(void) -{ - EVP_CIPHER_meth_free(uadk_aes_128_cbc); - uadk_aes_128_cbc = 0; - EVP_CIPHER_meth_free(uadk_aes_192_cbc); - uadk_aes_192_cbc = 0; - EVP_CIPHER_meth_free(uadk_aes_256_cbc); - uadk_aes_256_cbc = 0; - EVP_CIPHER_meth_free(uadk_aes_128_ecb); - uadk_aes_128_ecb = 0; - EVP_CIPHER_meth_free(uadk_aes_192_ecb); - uadk_aes_192_ecb = 0; - EVP_CIPHER_meth_free(uadk_aes_256_ecb); - uadk_aes_256_ecb = 0; - EVP_CIPHER_meth_free(uadk_aes_128_xts); - uadk_aes_128_xts = 0; - EVP_CIPHER_meth_free(uadk_aes_256_xts); - uadk_aes_256_xts = 0; - EVP_CIPHER_meth_free(uadk_sm4_cbc); - uadk_sm4_cbc = 0; - EVP_CIPHER_meth_free(uadk_des_ede3_cbc); - uadk_des_ede3_cbc = 0; - EVP_CIPHER_meth_free(uadk_des_ede3_ecb); - uadk_des_ede3_ecb = 0; - EVP_CIPHER_meth_free(uadk_sm4_ecb); - uadk_sm4_ecb = 0; + return cipher; }
-static void destroy_v3_cipher(void) +static void destroy_cipher(struct engine_cipher_info *info, int num) { - EVP_CIPHER_meth_free(uadk_aes_128_ctr); - uadk_aes_128_ctr = 0; - EVP_CIPHER_meth_free(uadk_aes_192_ctr); - uadk_aes_192_ctr = 0; - EVP_CIPHER_meth_free(uadk_aes_256_ctr); - uadk_aes_256_ctr = 0; - EVP_CIPHER_meth_free(uadk_aes_128_ofb128); - uadk_aes_128_ofb128 = 0; - EVP_CIPHER_meth_free(uadk_aes_192_ofb128); - uadk_aes_192_ofb128 = 0; - EVP_CIPHER_meth_free(uadk_aes_256_ofb128); - uadk_aes_256_ofb128 = 0; - EVP_CIPHER_meth_free(uadk_aes_128_cfb128); - uadk_aes_128_cfb128 = 0; - EVP_CIPHER_meth_free(uadk_aes_192_cfb128); - uadk_aes_192_cfb128 = 0; - EVP_CIPHER_meth_free(uadk_aes_256_cfb128); - uadk_aes_256_cfb128 = 0; - EVP_CIPHER_meth_free(uadk_sm4_cfb128); - uadk_sm4_cfb128 = 0; - EVP_CIPHER_meth_free(uadk_sm4_ofb128); - uadk_sm4_ofb128 = 0; - EVP_CIPHER_meth_free(uadk_sm4_ctr); - uadk_sm4_ctr = 0; -} - -void uadk_e_destroy_cipher(void) -{ - __u32 i; - int ret; - - if (g_cipher_engine.pid == getpid()) { - ret = uadk_e_is_env_enabled("cipher"); - if (ret == ENV_ENABLED) { - wd_cipher_env_uninit(); - } else { - wd_cipher_uninit(); - for (i = 0; i < g_cipher_engine.ctx_cfg.ctx_num; i++) - wd_release_ctx(g_cipher_engine.ctx_cfg.ctxs[i].ctx); - free(g_cipher_engine.ctx_cfg.ctxs); + for (int i = 0; i != num; ++i) { + if (info[i].cipher != NULL) { + EVP_CIPHER_meth_free(info[i].cipher); + info[i].cipher = NULL; } - g_cipher_engine.pid = 0; } +}
- pthread_spin_destroy(&g_cipher_engine.lock); - - destroy_v2_cipher(); - if (platform > HW_V2) - destroy_v3_cipher(); +void uadk_e_destroy_cipher(struct engine_cipher_info *info, int num) +{ + destroy_cipher(info, num); }
void uadk_e_cipher_lock_init(void) diff --git a/src/uadk_cipher_adapter.c b/src/uadk_cipher_adapter.c new file mode 100644 index 0000000..c915df8 --- /dev/null +++ b/src/uadk_cipher_adapter.c @@ -0,0 +1,204 @@ +/* + * Copyright 2023 Huawei Technologies Co.,Ltd. All rights reserved. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ +#include "uadk_cipher_adapter.h" + +#define HW_SEC_V2 0 +#define HW_SEC_V3 1 +#define OTHERS_HW 2 + +static int cipher_hw_v2_nids[] = { + NID_aes_128_cbc, + NID_aes_192_cbc, + NID_aes_256_cbc, + NID_aes_128_ecb, + NID_aes_192_ecb, + NID_aes_256_ecb, + NID_aes_128_xts, + NID_aes_256_xts, + NID_sm4_cbc, + NID_des_ede3_cbc, + NID_des_ede3_ecb, + NID_aes_128_gcm, + NID_aes_192_gcm, + NID_aes_256_gcm +}; + +static int cipher_hw_v3_nids[] = { + NID_aes_128_cbc, + NID_aes_192_cbc, + NID_aes_256_cbc, + NID_aes_128_ctr, + NID_aes_192_ctr, + NID_aes_256_ctr, + NID_aes_128_ecb, + NID_aes_192_ecb, + NID_aes_256_ecb, + NID_aes_128_xts, + NID_aes_256_xts, + NID_sm4_cbc, + NID_sm4_ecb, + NID_des_ede3_cbc, + NID_des_ede3_ecb, + NID_aes_128_cfb128, + NID_aes_192_cfb128, + NID_aes_256_cfb128, + NID_aes_128_ofb128, + NID_aes_192_ofb128, + NID_aes_256_ofb128, + NID_sm4_cfb128, + NID_sm4_ofb128, + NID_sm4_ctr, + NID_aes_128_gcm, + NID_aes_192_gcm, + NID_aes_256_gcm +}; + +static struct engine_cipher_info c_info[] = { + {NID_aes_128_cbc, NULL}, + {NID_aes_192_cbc, NULL}, + {NID_aes_256_cbc, NULL}, + {NID_aes_128_ctr, NULL}, + {NID_aes_192_ctr, NULL}, + {NID_aes_256_ctr, NULL}, + {NID_aes_128_ecb, NULL}, + {NID_aes_192_ecb, NULL}, + {NID_aes_256_ecb, NULL}, + {NID_aes_128_xts, NULL}, + {NID_aes_256_xts, NULL}, + {NID_sm4_cbc, NULL}, + {NID_sm4_ecb, NULL}, + {NID_des_ede3_cbc, NULL}, + {NID_des_ede3_ecb, NULL}, + {NID_aes_128_cfb128, NULL}, + {NID_aes_192_cfb128, NULL}, + {NID_aes_256_cfb128, NULL}, + {NID_aes_128_ofb128, NULL}, + {NID_aes_192_ofb128, NULL}, + {NID_aes_256_ofb128, NULL}, + {NID_sm4_cfb128, NULL}, + {NID_sm4_ofb128, NULL}, + {NID_sm4_ctr, NULL}, + {NID_aes_128_gcm, NULL}, + {NID_aes_192_gcm, NULL}, + {NID_aes_256_gcm, NULL} +}; + +static const unsigned int num_cc = ARRAY_SIZE(c_info); + +static void uadk_e_create_ciphers(int index) +{ + switch (c_info[index].nid) { + case NID_aes_128_gcm: + case NID_aes_192_gcm: + case NID_aes_256_gcm: + c_info[index].cipher = uadk_create_gcm_cipher_meth(c_info[index].nid); + break; + case NID_aes_128_cbc: + case NID_aes_192_cbc: + case NID_aes_256_cbc: + case NID_aes_128_ctr: + case NID_aes_192_ctr: + case NID_aes_256_ctr: + case NID_aes_128_ecb: + case NID_aes_192_ecb: + case NID_aes_256_ecb: + case NID_aes_128_xts: + case NID_aes_256_xts: + case NID_sm4_cbc: + case NID_sm4_ecb: + case NID_des_ede3_cbc: + case NID_des_ede3_ecb: + case NID_aes_128_cfb128: + case NID_aes_192_cfb128: + case NID_aes_256_cfb128: + case NID_aes_128_ofb128: + case NID_aes_192_ofb128: + case NID_aes_256_ofb128: + case NID_sm4_cfb128: + case NID_sm4_ofb128: + case NID_sm4_ctr: + c_info[index].cipher = uadk_create_cipher_meth(c_info[index].nid); + break; + default: + break; + } +} + +int uadk_e_ciphers(ENGINE *e, const EVP_CIPHER **cipher, const int **nids, int nid) +{ + int platform = OTHERS_HW; + struct uacce_dev *dev; + __u32 i; + + if (!e) + return 0; + + if ((nids == NULL) && ((cipher == NULL) || (nid < 0))) { + if (cipher != NULL) + *cipher = NULL; + return 0; + } + + dev = wd_get_accel_dev("cipher"); + if (!dev) { + fprintf(stderr, "no device available, switch to software!\n"); + return 0; + } + + if (!strcmp(dev->api, "hisi_qm_v2")) + platform = HW_SEC_V2; + else if (!strcmp(dev->api, "hisi_qm_v3")) + platform = HW_SEC_V3; + + free(dev); + + if (cipher == NULL) { + if (platform == HW_SEC_V2) { + *nids = cipher_hw_v2_nids; + return ARRAY_SIZE(cipher_hw_v2_nids); + } else if (platform == HW_SEC_V3) { + *nids = cipher_hw_v3_nids; + return ARRAY_SIZE(cipher_hw_v3_nids); + } + + return 0; + } + + for (i = 0; i < num_cc; i++) { + if (nid == c_info[i].nid) { + if (c_info[i].cipher == NULL) + uadk_e_create_ciphers(i); + + *cipher = c_info[i].cipher; + return 1; + } + } + + *cipher = NULL; + return 0; +} + +int uadk_e_bind_ciphers(ENGINE *e) +{ + return ENGINE_set_ciphers(e, uadk_e_ciphers); +} + +void uadk_e_destroy_ciphers(void) +{ + uadk_e_destroy_cipher(c_info, num_cc); + uadk_e_destroy_aead(c_info, num_cc); +} diff --git a/src/uadk_cipher_adapter.h b/src/uadk_cipher_adapter.h new file mode 100644 index 0000000..f7b6fb4 --- /dev/null +++ b/src/uadk_cipher_adapter.h @@ -0,0 +1,36 @@ +/* + * Copyright 2023 Huawei Technologies Co.,Ltd. All rights reserved. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ +#ifndef UADK_ADAPT_H +#define UADK_ADAPT_H +#include <openssl/engine.h> +#include <uadk/wd.h> + +struct engine_cipher_info { + int nid; + EVP_CIPHER *cipher; +}; + +#define ARRAY_SIZE(x) (sizeof(x) / sizeof((x)[0])) + +EVP_CIPHER *uadk_create_gcm_cipher_meth(int nid); +EVP_CIPHER *uadk_create_cipher_meth(int nid); +void uadk_e_destroy_aead(struct engine_cipher_info *info, int num); +void uadk_e_destroy_cipher(struct engine_cipher_info *info, int num); + +int uadk_e_bind_ciphers(ENGINE *e); +void uadk_e_destroy_ciphers(void); +#endif diff --git a/src/uadk_engine_init.c b/src/uadk_engine_init.c index cf54360..33707bf 100644 --- a/src/uadk_engine_init.c +++ b/src/uadk_engine_init.c @@ -24,15 +24,17 @@ #include <uadk/wd.h> #include "uadk.h" #include "uadk_async.h" +#include "uadk_cipher_adapter.h" #ifdef KAE #include "v1/uadk_v1.h" #endif
#define UADK_CMD_ENABLE_CIPHER_ENV ENGINE_CMD_BASE -#define UADK_CMD_ENABLE_DIGEST_ENV (ENGINE_CMD_BASE + 1) -#define UADK_CMD_ENABLE_RSA_ENV (ENGINE_CMD_BASE + 2) -#define UADK_CMD_ENABLE_DH_ENV (ENGINE_CMD_BASE + 3) -#define UADK_CMD_ENABLE_ECC_ENV (ENGINE_CMD_BASE + 4) +#define UADK_CMD_ENABLE_AEAD_ENV (ENGINE_CMD_BASE + 1) +#define UADK_CMD_ENABLE_DIGEST_ENV (ENGINE_CMD_BASE + 2) +#define UADK_CMD_ENABLE_RSA_ENV (ENGINE_CMD_BASE + 3) +#define UADK_CMD_ENABLE_DH_ENV (ENGINE_CMD_BASE + 4) +#define UADK_CMD_ENABLE_ECC_ENV (ENGINE_CMD_BASE + 5)
/* Constants used when creating the ENGINE */ static const char *engine_uadk_id = "uadk_engine"; @@ -60,6 +62,12 @@ static const ENGINE_CMD_DEFN g_uadk_cmd_defns[] = { "Enable or Disable cipher engine environment variable.", ENGINE_CMD_FLAG_NUMERIC }, + { + UADK_CMD_ENABLE_AEAD_ENV, + "UADK_CMD_ENABLE_AEAD_ENV", + "Enable or Disable aead engine environment variable.", + ENGINE_CMD_FLAG_NUMERIC + }, { UADK_CMD_ENABLE_DIGEST_ENV, "UADK_CMD_ENABLE_DIGEST_ENV", @@ -104,6 +112,7 @@ struct uadk_alg_env_enabled {
static struct uadk_alg_env_enabled uadk_env_enabled[] = { { "cipher", 0 }, + { "aead", 0 }, { "digest", 0 }, { "rsa", 0 }, { "dh", 0 }, @@ -176,6 +185,9 @@ static int uadk_engine_ctrl(ENGINE *e, int cmd, long i, case UADK_CMD_ENABLE_CIPHER_ENV: uadk_e_set_env_enabled("cipher", i); break; + case UADK_CMD_ENABLE_AEAD_ENV: + uadk_e_set_env_enabled("aead", i); + break; case UADK_CMD_ENABLE_DIGEST_ENV: uadk_e_set_env_enabled("digest", i); break; @@ -210,7 +222,7 @@ static int uadk_destroy(ENGINE *e) #endif
if (uadk_cipher) - uadk_e_destroy_cipher(); + uadk_e_destroy_ciphers(); if (uadk_digest) uadk_e_destroy_digest(); if (uadk_rsa) @@ -328,7 +340,7 @@ static void bind_fn_uadk_alg(ENGINE *e)
dev = wd_get_accel_dev("cipher"); if (dev) { - if (!uadk_e_bind_cipher(e)) + if (!uadk_e_bind_ciphers(e)) fprintf(stderr, "uadk bind cipher failed\n"); else uadk_cipher = 1;
Fix an issue where hardware resources are not released and locks are not destroyed.
Signed-off-by: Wenkai Lin linwenkai6@hisilicon.com --- src/uadk_cipher.c | 18 ++++++++++++++++++ 1 file changed, 18 insertions(+)
diff --git a/src/uadk_cipher.c b/src/uadk_cipher.c index 73be09d..63cc738 100644 --- a/src/uadk_cipher.c +++ b/src/uadk_cipher.c @@ -996,6 +996,24 @@ static void destroy_cipher(struct engine_cipher_info *info, int num)
void uadk_e_destroy_cipher(struct engine_cipher_info *info, int num) { + __u32 i; + int ret; + + if (g_cipher_engine.pid == getpid()) { + ret = uadk_e_is_env_enabled("cipher"); + if (ret == ENV_ENABLED) { + wd_cipher_env_uninit(); + } else { + wd_cipher_uninit(); + for (i = 0; i < g_cipher_engine.ctx_cfg.ctx_num; i++) + wd_release_ctx(g_cipher_engine.ctx_cfg.ctxs[i].ctx); + free(g_cipher_engine.ctx_cfg.ctxs); + } + g_cipher_engine.pid = 0; + } + + pthread_spin_destroy(&g_cipher_engine.lock); + destroy_cipher(info, num); }
Fix an issue where the AEAD global lock is not initialized.
In the multi-thread scenario, if the hardware is faulty and the get_dev return is empty, the uadk engine should continue to complete registration instead of returning.
Signed-off-by: Wenkai Lin linwenkai6@hisilicon.com --- src/uadk.h | 1 + src/uadk_aead.c | 81 ++++++++++++++++++++++-------------------- src/uadk_engine_init.c | 4 ++- 3 files changed, 47 insertions(+), 39 deletions(-)
diff --git a/src/uadk.h b/src/uadk.h index 4cf2c13..3dbaba1 100644 --- a/src/uadk.h +++ b/src/uadk.h @@ -42,5 +42,6 @@ void uadk_e_ecc_lock_init(void); void uadk_e_rsa_lock_init(void); void uadk_e_dh_lock_init(void); void uadk_e_cipher_lock_init(void); +void uadk_e_aead_lock_init(void); void uadk_e_digest_lock_init(void); #endif diff --git a/src/uadk_aead.c b/src/uadk_aead.c index 64a5f5c..360f3f8 100644 --- a/src/uadk_aead.c +++ b/src/uadk_aead.c @@ -61,7 +61,7 @@ struct aead_engine { pthread_spinlock_t lock; };
-static struct aead_engine engine; +static struct aead_engine g_aead_engine;
static EVP_CIPHER *uadk_aes_128_gcm; static EVP_CIPHER *uadk_aes_192_gcm; @@ -175,42 +175,42 @@ static int uadk_e_wd_aead_cipher_init(struct uacce_dev *dev) __u32 i, j; int ret;
- engine.numa_id = dev->numa_id; + g_aead_engine.numa_id = dev->numa_id;
ret = uadk_e_is_env_enabled("aead"); if (ret) return uadk_e_wd_aead_cipher_env_init(dev);
- memset(&engine.ctx_cfg, 0, sizeof(struct wd_ctx_config)); - engine.ctx_cfg.ctx_num = CTX_NUM; - engine.ctx_cfg.ctxs = calloc(CTX_NUM, sizeof(struct wd_ctx)); - if (!engine.ctx_cfg.ctxs) + memset(&g_aead_engine.ctx_cfg, 0, sizeof(struct wd_ctx_config)); + g_aead_engine.ctx_cfg.ctx_num = CTX_NUM; + g_aead_engine.ctx_cfg.ctxs = calloc(CTX_NUM, sizeof(struct wd_ctx)); + if (!g_aead_engine.ctx_cfg.ctxs) return -ENOMEM;
for (i = 0; i < CTX_NUM; i++) { - engine.ctx_cfg.ctxs[i].ctx = wd_request_ctx(dev); - if (!engine.ctx_cfg.ctxs[i].ctx) { + g_aead_engine.ctx_cfg.ctxs[i].ctx = wd_request_ctx(dev); + if (!g_aead_engine.ctx_cfg.ctxs[i].ctx) { ret = -ENOMEM; goto err_freectx; } }
- engine.ctx_cfg.ctxs[CTX_SYNC_ENC].op_type = CTX_TYPE_ENCRYPT; - engine.ctx_cfg.ctxs[CTX_SYNC_DEC].op_type = CTX_TYPE_DECRYPT; - engine.ctx_cfg.ctxs[CTX_SYNC_ENC].ctx_mode = CTX_MODE_SYNC; - engine.ctx_cfg.ctxs[CTX_SYNC_DEC].ctx_mode = CTX_MODE_SYNC; + g_aead_engine.ctx_cfg.ctxs[CTX_SYNC_ENC].op_type = CTX_TYPE_ENCRYPT; + g_aead_engine.ctx_cfg.ctxs[CTX_SYNC_DEC].op_type = CTX_TYPE_DECRYPT; + g_aead_engine.ctx_cfg.ctxs[CTX_SYNC_ENC].ctx_mode = CTX_MODE_SYNC; + g_aead_engine.ctx_cfg.ctxs[CTX_SYNC_DEC].ctx_mode = CTX_MODE_SYNC;
- engine.ctx_cfg.ctxs[CTX_ASYNC_ENC].op_type = CTX_TYPE_ENCRYPT; - engine.ctx_cfg.ctxs[CTX_ASYNC_DEC].op_type = CTX_TYPE_DECRYPT; - engine.ctx_cfg.ctxs[CTX_ASYNC_ENC].ctx_mode = CTX_MODE_ASYNC; - engine.ctx_cfg.ctxs[CTX_ASYNC_DEC].ctx_mode = CTX_MODE_ASYNC; + g_aead_engine.ctx_cfg.ctxs[CTX_ASYNC_ENC].op_type = CTX_TYPE_ENCRYPT; + g_aead_engine.ctx_cfg.ctxs[CTX_ASYNC_DEC].op_type = CTX_TYPE_DECRYPT; + g_aead_engine.ctx_cfg.ctxs[CTX_ASYNC_ENC].ctx_mode = CTX_MODE_ASYNC; + g_aead_engine.ctx_cfg.ctxs[CTX_ASYNC_DEC].ctx_mode = CTX_MODE_ASYNC;
- engine.sched.name = "sched_single"; - engine.sched.pick_next_ctx = sched_single_pick_next_ctx; - engine.sched.poll_policy = sched_single_poll_policy; - engine.sched.sched_init = sched_single_aead_init; + g_aead_engine.sched.name = "sched_single"; + g_aead_engine.sched.pick_next_ctx = sched_single_pick_next_ctx; + g_aead_engine.sched.poll_policy = sched_single_poll_policy; + g_aead_engine.sched.sched_init = sched_single_aead_init;
- ret = wd_aead_init(&engine.ctx_cfg, &engine.sched); + ret = wd_aead_init(&g_aead_engine.ctx_cfg, &g_aead_engine.sched); if (ret) goto err_freectx;
@@ -219,9 +219,9 @@ static int uadk_e_wd_aead_cipher_init(struct uacce_dev *dev)
err_freectx: for (j = 0; j < i; j++) - wd_release_ctx(engine.ctx_cfg.ctxs[j].ctx); + wd_release_ctx(g_aead_engine.ctx_cfg.ctxs[j].ctx);
- free(engine.ctx_cfg.ctxs); + free(g_aead_engine.ctx_cfg.ctxs);
return ret; } @@ -231,30 +231,30 @@ static int uadk_e_init_aead_cipher(void) struct uacce_dev *dev; int ret;
- if (engine.pid != getpid()) { - pthread_spin_lock(&engine.lock); - if (engine.pid == getpid()) { - pthread_spin_unlock(&engine.lock); + if (g_aead_engine.pid != getpid()) { + pthread_spin_lock(&g_aead_engine.lock); + if (g_aead_engine.pid == getpid()) { + pthread_spin_unlock(&g_aead_engine.lock); return 1; }
dev = wd_get_accel_dev("aead"); if (!dev) { - pthread_spin_unlock(&engine.lock); + pthread_spin_unlock(&g_aead_engine.lock); fprintf(stderr, "failed to get device for aead.\n"); return 0; }
ret = uadk_e_wd_aead_cipher_init(dev); if (ret < 0) { - pthread_spin_unlock(&engine.lock); + pthread_spin_unlock(&g_aead_engine.lock); fprintf(stderr, "failed to initiate aead cipher.\n"); free(dev); return 0; }
- engine.pid = getpid(); - pthread_spin_unlock(&engine.lock); + g_aead_engine.pid = getpid(); + pthread_spin_unlock(&g_aead_engine.lock); free(dev); }
@@ -277,7 +277,7 @@ static int uadk_e_ctx_init(struct aead_priv_ctx *priv, const unsigned char *ckey if (ret) params.type = 0;
- params.numa_id = engine.numa_id; + params.numa_id = g_aead_engine.numa_id; priv->setup.sched_param = ¶ms; if (!priv->sess) { priv->sess = wd_aead_alloc_sess(&priv->setup); @@ -713,20 +713,25 @@ void uadk_e_destroy_aead(struct engine_cipher_info *info, int num) __u32 i; int ret;
- if (engine.pid == getpid()) { + if (g_aead_engine.pid == getpid()) { ret = uadk_e_is_env_enabled("aead"); if (ret) { wd_aead_env_uninit(); } else { wd_aead_uninit(); - for (i = 0; i < engine.ctx_cfg.ctx_num; i++) - wd_release_ctx(engine.ctx_cfg.ctxs[i].ctx); + for (i = 0; i < g_aead_engine.ctx_cfg.ctx_num; i++) + wd_release_ctx(g_aead_engine.ctx_cfg.ctxs[i].ctx);
- free(engine.ctx_cfg.ctxs); + free(g_aead_engine.ctx_cfg.ctxs); } - engine.pid = 0; + g_aead_engine.pid = 0; }
- pthread_spin_destroy(&engine.lock); + pthread_spin_destroy(&g_aead_engine.lock); destroy_aead(info, num); } + +void uadk_e_aead_lock_init(void) +{ + pthread_spin_init(&g_aead_engine.lock, PTHREAD_PROCESS_PRIVATE); +} diff --git a/src/uadk_engine_init.c b/src/uadk_engine_init.c index 33707bf..c9cdd10 100644 --- a/src/uadk_engine_init.c +++ b/src/uadk_engine_init.c @@ -262,8 +262,10 @@ static int uadk_init(ENGINE *e)
if (uadk_digest) uadk_e_digest_lock_init(); - if (uadk_cipher) + if (uadk_cipher) { uadk_e_cipher_lock_init(); + uadk_e_aead_lock_init(); + } if (uadk_rsa) uadk_e_rsa_lock_init(); if (uadk_dh)
In the multi-thread scenario, if the hardware is faulty and the get_dev return is empty, the uadk engine should continue to complete registration instead of returning.
Signed-off-by: Wenkai Lin linwenkai6@hisilicon.com --- src/uadk_cipher_adapter.c | 35 +++++++++++++++++++---------------- 1 file changed, 19 insertions(+), 16 deletions(-)
diff --git a/src/uadk_cipher_adapter.c b/src/uadk_cipher_adapter.c index c915df8..2c4ed15 100644 --- a/src/uadk_cipher_adapter.c +++ b/src/uadk_cipher_adapter.c @@ -16,9 +16,11 @@ */ #include "uadk_cipher_adapter.h"
-#define HW_SEC_V2 0 -#define HW_SEC_V3 1 -#define OTHERS_HW 2 +#define HW_UNINIT -1 +#define HW_SEC_V2 0 +#define HW_SEC_V3 1 + +static int g_platform = HW_UNINIT;
static int cipher_hw_v2_nids[] = { NID_aes_128_cbc, @@ -140,7 +142,6 @@ static void uadk_e_create_ciphers(int index)
int uadk_e_ciphers(ENGINE *e, const EVP_CIPHER **cipher, const int **nids, int nid) { - int platform = OTHERS_HW; struct uacce_dev *dev; __u32 i;
@@ -153,24 +154,26 @@ int uadk_e_ciphers(ENGINE *e, const EVP_CIPHER **cipher, const int **nids, int n return 0; }
- dev = wd_get_accel_dev("cipher"); - if (!dev) { - fprintf(stderr, "no device available, switch to software!\n"); - return 0; - } + if (g_platform == HW_UNINIT) { + dev = wd_get_accel_dev("cipher"); + if (!dev) { + fprintf(stderr, "no device available, switch to software!\n"); + return 0; + }
- if (!strcmp(dev->api, "hisi_qm_v2")) - platform = HW_SEC_V2; - else if (!strcmp(dev->api, "hisi_qm_v3")) - platform = HW_SEC_V3; + if (!strcmp(dev->api, "hisi_qm_v2")) + g_platform = HW_SEC_V2; + else + g_platform = HW_SEC_V3;
- free(dev); + free(dev); + }
if (cipher == NULL) { - if (platform == HW_SEC_V2) { + if (g_platform == HW_SEC_V2) { *nids = cipher_hw_v2_nids; return ARRAY_SIZE(cipher_hw_v2_nids); - } else if (platform == HW_SEC_V3) { + } else if (g_platform == HW_SEC_V3) { *nids = cipher_hw_v3_nids; return ARRAY_SIZE(cipher_hw_v3_nids); }
Encryption and decryption are distinguished when taglen is checked.
Signed-off-by: Wenkai Lin linwenkai6@hisilicon.com --- src/uadk_aead.c | 19 ++++++++++--------- 1 file changed, 10 insertions(+), 9 deletions(-)
diff --git a/src/uadk_aead.c b/src/uadk_aead.c index 360f3f8..00ba4d2 100644 --- a/src/uadk_aead.c +++ b/src/uadk_aead.c @@ -375,6 +375,7 @@ static int uadk_e_aes_gcm_set_ctrl(EVP_CIPHER_CTX *ctx, int type, int arg, void { struct aead_priv_ctx *priv = (struct aead_priv_ctx *)EVP_CIPHER_CTX_get_cipher_data(ctx); + void *ctx_buf = EVP_CIPHER_CTX_buf_noconst(ctx); int enc = EVP_CIPHER_CTX_encrypting(ctx);
switch (type) { @@ -391,30 +392,30 @@ static int uadk_e_aes_gcm_set_ctrl(EVP_CIPHER_CTX *ctx, int type, int arg, void } return 1; case EVP_CTRL_GCM_GET_TAG: - if (arg <= 0 || arg > AES_GCM_TAG_LEN) { - fprintf(stderr, "TAG length invalid.\n"); + if (arg <= 0 || arg > AES_GCM_TAG_LEN || !enc) { + fprintf(stderr, "cannot get tag when decrypt or arg is invalid.\n"); return 0; }
- if (EVP_CIPHER_CTX_buf_noconst(ctx) == NULL || ptr == NULL) { - fprintf(stderr, "ctx memory pointer is invalid.\n"); + if (ctx_buf == NULL || ptr == NULL) { + fprintf(stderr, "failed to get tag, ctx memory pointer is invalid.\n"); return 0; }
- memcpy(ptr, EVP_CIPHER_CTX_buf_noconst(ctx), arg); + memcpy(ptr, ctx_buf, arg); return 1; case EVP_CTRL_GCM_SET_TAG: - if (arg != AES_GCM_TAG_LEN || enc) { + if (arg <= 0 || arg > AES_GCM_TAG_LEN || enc) { fprintf(stderr, "cannot set tag when encrypt or arg is invalid.\n"); return 0; }
- if (EVP_CIPHER_CTX_buf_noconst(ctx) == NULL || ptr == NULL) { - fprintf(stderr, "ctx memory pointer is invalid.\n"); + if (ctx_buf == NULL || ptr == NULL) { + fprintf(stderr, "failed to set tag, ctx memory pointer is invalid.\n"); return 0; }
- memcpy(EVP_CIPHER_CTX_buf_noconst(ctx), ptr, AES_GCM_TAG_LEN); + memcpy(ctx_buf, ptr, arg); return 1; default: fprintf(stderr, "unsupported ctrl type: %d\n", type);
According to openssl use mode, in the cipher update stage, the input data length should be the same as the output length, however, cached data is used for block mode now, this patch is intended to fix the problem.
In the aead stream mode, the length of the final packet is 0 and can be calculated in the final stage, in the block mode(like asynchronous and aad 0-length scenarios), the calculation result must be returned to the user in the update stage.
However, this adds an additional restriction, user need to set the mac before the update stage.
Signed-off-by: Wenkai Lin linwenkai6@hisilicon.com --- src/uadk_aead.c | 278 +++++++++++++++++++++++++++--------------------- 1 file changed, 159 insertions(+), 119 deletions(-)
diff --git a/src/uadk_aead.c b/src/uadk_aead.c index 00ba4d2..e27aba5 100644 --- a/src/uadk_aead.c +++ b/src/uadk_aead.c @@ -28,6 +28,7 @@ #include "uadk_utils.h"
#define RET_FAIL -1 +#define STATE_FAIL 0xFFFF #define CTX_SYNC_ENC 0 #define CTX_SYNC_DEC 1 #define CTX_ASYNC_ENC 2 @@ -50,7 +51,7 @@ struct aead_priv_ctx { unsigned char *data; unsigned char iv[AES_GCM_BLOCK_SIZE]; unsigned char mac[AES_GCM_TAG_LEN]; - size_t last_update_bufflen; + int taglen; };
struct aead_engine { @@ -267,10 +268,8 @@ static int uadk_e_ctx_init(struct aead_priv_ctx *priv, const unsigned char *ckey int ret;
ret = uadk_e_init_aead_cipher(); - if (unlikely(!ret)) { - fprintf(stderr, "uadk failed to init aead HW!\n"); + if (!ret) return 0; - }
params.type = priv->req.op_type; ret = uadk_e_is_env_enabled("aead"); @@ -296,10 +295,14 @@ static int uadk_e_ctx_init(struct aead_priv_ctx *priv, const unsigned char *ckey fprintf(stderr, "uadk engine failed to set ckey!\n"); goto out; } - priv->data = malloc(AEAD_BLOCK_SIZE << 1); - if (unlikely(!priv->data)) { - fprintf(stderr, "uadk engine failed to alloc data!\n"); - goto out; + + if (ASYNC_get_current_job()) { + /* Memory needs to be reserved for both input and output. */ + priv->data = malloc(AEAD_BLOCK_SIZE << 1); + if (unlikely(!priv->data)) { + fprintf(stderr, "uadk engine failed to alloc data!\n"); + goto out; + } } }
@@ -313,10 +316,15 @@ out: static int uadk_e_aes_gcm_init(EVP_CIPHER_CTX *ctx, const unsigned char *ckey, const unsigned char *iv, int enc) { - struct aead_priv_ctx *priv = - (struct aead_priv_ctx *)EVP_CIPHER_CTX_get_cipher_data(ctx); + struct aead_priv_ctx *priv; int ret, ckey_len;
+ priv = (struct aead_priv_ctx *)EVP_CIPHER_CTX_get_cipher_data(ctx); + if (!priv) { + fprintf(stderr, "invalid: aead priv ctx is NULL.\n"); + return 0; + } + if (unlikely(!ckey)) return 1;
@@ -328,7 +336,6 @@ static int uadk_e_aes_gcm_init(EVP_CIPHER_CTX *ctx, const unsigned char *ckey, priv->setup.dalg = 0; priv->setup.dmode = 0;
- priv->last_update_bufflen = 0; priv->req.assoc_bytes = 0; priv->req.out_bytes = 0; priv->req.data_fmt = WD_FLAT_BUF; @@ -339,6 +346,8 @@ static int uadk_e_aes_gcm_init(EVP_CIPHER_CTX *ctx, const unsigned char *ckey,
priv->req.mac = priv->mac; priv->req.mac_bytes = AES_GCM_TAG_LEN; + priv->taglen = 0; + priv->data = NULL;
if (enc) priv->req.op_type = WD_CIPHER_ENCRYPTION_DIGEST; @@ -355,8 +364,13 @@ static int uadk_e_aes_gcm_init(EVP_CIPHER_CTX *ctx, const unsigned char *ckey,
static int uadk_e_aes_gcm_cleanup(EVP_CIPHER_CTX *ctx) { - struct aead_priv_ctx *priv = - (struct aead_priv_ctx *)EVP_CIPHER_CTX_get_cipher_data(ctx); + struct aead_priv_ctx *priv; + + priv = (struct aead_priv_ctx *)EVP_CIPHER_CTX_get_cipher_data(ctx); + if (!priv) { + fprintf(stderr, "invalid: aead priv ctx is NULL.\n"); + return 0; + }
if (priv->sess) { wd_aead_free_sess(priv->sess); @@ -373,10 +387,15 @@ static int uadk_e_aes_gcm_cleanup(EVP_CIPHER_CTX *ctx)
static int uadk_e_aes_gcm_set_ctrl(EVP_CIPHER_CTX *ctx, int type, int arg, void *ptr) { - struct aead_priv_ctx *priv = - (struct aead_priv_ctx *)EVP_CIPHER_CTX_get_cipher_data(ctx); void *ctx_buf = EVP_CIPHER_CTX_buf_noconst(ctx); int enc = EVP_CIPHER_CTX_encrypting(ctx); + struct aead_priv_ctx *priv; + + priv = (struct aead_priv_ctx *)EVP_CIPHER_CTX_get_cipher_data(ctx); + if (!priv) { + fprintf(stderr, "invalid: aead priv ctx is NULL.\n"); + return 0; + }
switch (type) { case EVP_CTRL_INIT: @@ -387,7 +406,7 @@ static int uadk_e_aes_gcm_set_ctrl(EVP_CIPHER_CTX *ctx, int type, int arg, void return 1; case EVP_CTRL_GCM_SET_IVLEN: if (arg != AES_GCM_IV_LEN) { - fprintf(stderr, "gcm only support 12 bytes.\n"); + fprintf(stderr, "invalid: aead gcm iv length only support 12B.\n"); return 0; } return 1; @@ -416,6 +435,7 @@ static int uadk_e_aes_gcm_set_ctrl(EVP_CIPHER_CTX *ctx, int type, int arg, void }
memcpy(ctx_buf, ptr, arg); + priv->taglen = arg; return 1; default: fprintf(stderr, "unsupported ctrl type: %d\n", type); @@ -423,18 +443,16 @@ static int uadk_e_aes_gcm_set_ctrl(EVP_CIPHER_CTX *ctx, int type, int arg, void } }
-static int uadk_e_do_aes_gcm_first(EVP_CIPHER_CTX *ctx, unsigned char *out, +static int uadk_e_do_aes_gcm_first(struct aead_priv_ctx *priv, unsigned char *out, const unsigned char *in, size_t inlen) { - struct aead_priv_ctx *priv = - (struct aead_priv_ctx *)EVP_CIPHER_CTX_get_cipher_data(ctx); int ret;
priv->req.assoc_bytes = inlen;
+ /* Asynchronous jobs use the block mode. */ if (ASYNC_get_current_job()) { - memcpy(priv->data + priv->last_update_bufflen, in, inlen); - priv->last_update_bufflen += inlen; + memcpy(priv->data, in, inlen); return 1; }
@@ -442,68 +460,43 @@ static int uadk_e_do_aes_gcm_first(EVP_CIPHER_CTX *ctx, unsigned char *out, priv->req.msg_state = AEAD_MSG_FIRST;
ret = wd_do_aead_sync(priv->sess, &priv->req); - if (ret < 0) { - fprintf(stderr, "do sec aead first operation failed, ret:%d!\n", ret); + if (unlikely(ret < 0)) { + fprintf(stderr, "do aead first operation failed, ret: %d!\n", ret); return RET_FAIL; }
return 1; }
-static int uadk_e_hw_update(struct aead_priv_ctx *priv, unsigned char *out, - unsigned char *in, size_t inlen) +static int do_aead_sync(struct aead_priv_ctx *priv, unsigned char *out, + const unsigned char *in, size_t inlen) { int ret;
- priv->req.src = in; + /* Due to a hardware limitation, zero-length aad using block mode. */ + if (priv->req.assoc_bytes) + priv->req.msg_state = AEAD_MSG_MIDDLE; + else + priv->req.msg_state = AEAD_MSG_BLOCK; + + priv->req.src = (unsigned char *)in; priv->req.dst = out; priv->req.in_bytes = inlen; - priv->req.msg_state = AEAD_MSG_MIDDLE; + priv->req.state = 0; ret = wd_do_aead_sync(priv->sess, &priv->req); - if (ret < 0) { - fprintf(stderr, "do sec aead update operation failed, ret:%d!\n", ret); + if (ret < 0 || priv->req.state) { + fprintf(stderr, "do aead update operation failed, ret: %d, state: %u!\n", + ret, priv->req.state); return RET_FAIL; }
- return 0; -} - -static int uadk_e_cache_data(struct aead_priv_ctx *priv, const unsigned char *in, size_t inlen) -{ - if (ASYNC_get_current_job() || !priv->req.assoc_bytes) { - if (priv->last_update_bufflen + inlen > AEAD_BLOCK_SIZE) { - fprintf(stderr, "aead input data length is too long!\n"); - return RET_FAIL; - } - memcpy(priv->data + priv->last_update_bufflen, in, inlen); - priv->last_update_bufflen += inlen; - return 0; - } - - return 1; -} - -static int uadk_e_do_aes_gcm_update(EVP_CIPHER_CTX *ctx, unsigned char *out, - const unsigned char *in, size_t inlen) -{ - struct aead_priv_ctx *priv = - (struct aead_priv_ctx *)EVP_CIPHER_CTX_get_cipher_data(ctx); - int ret; - - ret = uadk_e_cache_data(priv, in, inlen); - if (ret <= 0) - return ret; - - ret = uadk_e_hw_update(priv, out, in, inlen); - if (ret < 0) - return RET_FAIL; - return inlen; }
static void *uadk_e_aead_cb(struct wd_aead_req *req, void *data) { struct uadk_e_cb_info *cb_param; + struct wd_aead_req *req_origin; struct async_op *op;
if (!req) @@ -513,6 +506,9 @@ static void *uadk_e_aead_cb(struct wd_aead_req *req, void *data) if (!cb_param) return NULL;
+ req_origin = cb_param->priv; + req_origin->state = req->state; + op = cb_param->op; if (op && op->job && !op->done) { op->done = 1; @@ -523,35 +519,49 @@ static void *uadk_e_aead_cb(struct wd_aead_req *req, void *data) return NULL; }
-static int do_aead_async(struct aead_priv_ctx *priv, struct async_op *op) +static int do_aead_async(struct aead_priv_ctx *priv, struct async_op *op, + unsigned char *out, const unsigned char *in, size_t inlen) { struct uadk_e_cb_info *cb_param; - int ret = 0; - int idx; + int ret;
- priv->req.in_bytes = priv->last_update_bufflen - priv->req.assoc_bytes; - priv->req.dst = priv->data + AEAD_BLOCK_SIZE; + if (unlikely(priv->req.assoc_bytes + inlen > AEAD_BLOCK_SIZE)) { + fprintf(stderr, "aead input data length is too long!\n"); + return 0; + } + + priv->req.in_bytes = inlen; + /* AAD data is input or output together with plaintext or ciphertext. */ + if (priv->req.assoc_bytes) { + memcpy(priv->data + priv->req.assoc_bytes, in, inlen); + priv->req.src = priv->data; + priv->req.dst = priv->data + AEAD_BLOCK_SIZE; + } else { + priv->req.src = (unsigned char *)in; + priv->req.dst = out; + }
cb_param = malloc(sizeof(struct uadk_e_cb_info)); - if (!cb_param) { + if (unlikely(!cb_param)) { fprintf(stderr, "failed to alloc cb_param.\n"); - return ret; + return 0; }
cb_param->op = op; - cb_param->priv = priv; + cb_param->priv = &priv->req; priv->req.cb = uadk_e_aead_cb; priv->req.cb_param = cb_param; + priv->req.msg_state = AEAD_MSG_BLOCK; + priv->req.state = STATE_FAIL;
- ret = async_get_free_task(&idx); - if (!ret) + ret = async_get_free_task(&op->idx); + if (unlikely(!ret)) goto free_cb_param;
- op->idx = idx; do { ret = wd_do_aead_async(priv->sess, &priv->req); - if (ret < 0 && ret != -EBUSY) { - fprintf(stderr, "do sec aead async failed.\n"); + if (unlikely(ret < 0 && ret != -EBUSY)) { + fprintf(stderr, "do aead async operation failed.\n"); async_free_poll_task(op->idx, 0); ret = 0; goto free_cb_param; @@ -559,65 +569,59 @@ static int do_aead_async(struct aead_priv_ctx *priv, struct async_op *op) } while (ret == -EBUSY);
ret = async_pause_job(priv, op, ASYNC_TASK_AEAD); + if (unlikely(!ret || priv->req.state)) { + fprintf(stderr, "do aead async job failed, ret: %d, state: %u!\n", + ret, priv->req.state); + ret = 0; + goto free_cb_param; + } + + if (priv->req.assoc_bytes) + memcpy(out, priv->req.dst + priv->req.assoc_bytes, inlen);
free_cb_param: free(cb_param); return ret; }
-static int uadk_e_do_aes_gcm_final(EVP_CIPHER_CTX *ctx, unsigned char *out, - const unsigned char *in, size_t inlen) +static int uadk_e_do_aes_gcm_update(EVP_CIPHER_CTX *ctx, struct aead_priv_ctx *priv, + unsigned char *out, const unsigned char *in, size_t inlen) { - struct aead_priv_ctx *priv = - (struct aead_priv_ctx *)EVP_CIPHER_CTX_get_cipher_data(ctx); unsigned char *ctx_buf = EVP_CIPHER_CTX_buf_noconst(ctx); struct async_op *op; int ret, enc;
- op = malloc(sizeof(struct async_op)); - if (!op) - return RET_FAIL; - - ret = async_setup_async_event_notification(op); - if (unlikely(!ret)) { - fprintf(stderr, "failed to setup async event notification.\n"); - free(op); - return RET_FAIL; + enc = EVP_CIPHER_CTX_encrypting(ctx); + if (!enc) { + if (priv->taglen == AES_GCM_TAG_LEN) { + memcpy(priv->req.mac, ctx_buf, AES_GCM_TAG_LEN); + } else { + fprintf(stderr, "invalid: aead gcm mac length only support 16B.\n"); + return RET_FAIL; + } }
- if (priv->req.assoc_bytes && !op->job) - priv->req.msg_state = AEAD_MSG_END; - else - priv->req.msg_state = AEAD_MSG_BLOCK; - - enc = EVP_CIPHER_CTX_encrypting(ctx); - if (!enc) - memcpy(priv->req.mac, ctx_buf, AES_GCM_TAG_LEN); + if (ASYNC_get_current_job()) { + op = malloc(sizeof(struct async_op)); + if (unlikely(!op)) + return RET_FAIL;
- priv->req.src = priv->data; - if (!op->job) { - priv->req.in_bytes = priv->last_update_bufflen; - priv->req.dst = out; - ret = wd_do_aead_sync(priv->sess, &priv->req); - if (ret < 0) { - fprintf(stderr, "do sec aead final operation failed, ret: %d!\n", ret); - goto out; + ret = async_setup_async_event_notification(op); + if (unlikely(!ret)) { + fprintf(stderr, "failed to setup async event notification.\n"); + free(op); + return RET_FAIL; } - } else { - ret = do_aead_async(priv, op); - if (!ret) + + ret = do_aead_async(priv, op, out, in, inlen); + if (unlikely(!ret)) goto out;
- memcpy(out, priv->req.dst + priv->req.assoc_bytes, priv->req.in_bytes); + free(op); + return inlen; }
- if (enc) - memcpy(ctx_buf, priv->req.mac, AES_GCM_TAG_LEN); - - priv->last_update_bufflen = 0; - - free(op); - return priv->req.in_bytes; + return do_aead_sync(priv, out, in, inlen);
out: (void)async_clear_async_event_notification(); @@ -625,19 +629,55 @@ out: return RET_FAIL; }
+static int uadk_e_do_aes_gcm_final(EVP_CIPHER_CTX *ctx, struct aead_priv_ctx *priv, + unsigned char *out, const unsigned char *in, size_t inlen) +{ + unsigned char *ctx_buf = EVP_CIPHER_CTX_buf_noconst(ctx); + int ret, enc; + + enc = EVP_CIPHER_CTX_encrypting(ctx); + + if (ASYNC_get_current_job() || !priv->req.assoc_bytes) + goto out; + + priv->req.msg_state = AEAD_MSG_END; + priv->req.src = NULL; + priv->req.in_bytes = 0; + priv->req.dst = out; + priv->req.state = 0; + ret = wd_do_aead_sync(priv->sess, &priv->req); + if (ret < 0 || priv->req.state) { + fprintf(stderr, "do aead final operation failed, ret: %d, state: %u!\n", + ret, priv->req.state); + return RET_FAIL; + } + +out: + if (enc) + memcpy(ctx_buf, priv->req.mac, AES_GCM_TAG_LEN); + + return 0; +} + static int uadk_e_do_aes_gcm(EVP_CIPHER_CTX *ctx, unsigned char *out, const unsigned char *in, size_t inlen) { - int ret; + struct aead_priv_ctx *priv; + + priv = (struct aead_priv_ctx *)EVP_CIPHER_CTX_get_cipher_data(ctx); + if (unlikely(!priv)) { + fprintf(stderr, "invalid: aead priv ctx is NULL.\n"); + return 0; + }
if (in) { if (out == NULL) - return uadk_e_do_aes_gcm_first(ctx, out, in, inlen); + return uadk_e_do_aes_gcm_first(priv, out, in, inlen);
- return uadk_e_do_aes_gcm_update(ctx, out, in, inlen); + return uadk_e_do_aes_gcm_update(ctx, priv, out, in, inlen); }
- return uadk_e_do_aes_gcm_final(ctx, out, NULL, 0); + return uadk_e_do_aes_gcm_final(ctx, priv, out, NULL, 0); }
#define UADK_AEAD_DESCR(name, block_size, key_size, iv_len, flags, ctx_size, \
The sm4 ecb disappeared in the previous modification and added its support again.
Signed-off-by: Wenkai Lin linwenkai6@hisilicon.com --- src/uadk_cipher.c | 7 +++++++ src/uadk_cipher_adapter.c | 1 + 2 files changed, 8 insertions(+)
diff --git a/src/uadk_cipher.c b/src/uadk_cipher.c index 63cc738..12830b7 100644 --- a/src/uadk_cipher.c +++ b/src/uadk_cipher.c @@ -878,6 +878,13 @@ EVP_CIPHER *uadk_create_cipher_meth(int nid) EVP_CIPHER_set_asn1_iv, EVP_CIPHER_get_asn1_iv); cipher = uadk_sm4_cbc; break; + case NID_sm4_ecb: + UADK_CIPHER_DESCR(sm4_ecb, 16, 16, 0, EVP_CIPH_ECB_MODE, + sizeof(struct cipher_priv_ctx), uadk_e_cipher_init, + uadk_e_do_cipher, uadk_e_cipher_cleanup, + EVP_CIPHER_set_asn1_iv, EVP_CIPHER_get_asn1_iv); + cipher = uadk_sm4_ecb; + break; case NID_des_ede3_cbc: UADK_CIPHER_DESCR(des_ede3_cbc, 8, 24, 8, EVP_CIPH_CBC_MODE, sizeof(struct cipher_priv_ctx), uadk_e_cipher_init, diff --git a/src/uadk_cipher_adapter.c b/src/uadk_cipher_adapter.c index 2c4ed15..065575b 100644 --- a/src/uadk_cipher_adapter.c +++ b/src/uadk_cipher_adapter.c @@ -32,6 +32,7 @@ static int cipher_hw_v2_nids[] = { NID_aes_128_xts, NID_aes_256_xts, NID_sm4_cbc, + NID_sm4_ecb, NID_des_ede3_cbc, NID_des_ede3_ecb, NID_aes_128_gcm,