The uadk_engine supports the AES GCM algorithm. The cipher and aead algorithms use the same set of EVP interfaces.
Signed-off-by: Wenkai Lin linwenkai6@hisilicon.com --- src/Makefile.am | 3 +- src/sec_cipher_adapter.c | 195 ++++++++++++ src/sec_cipher_adapter.h | 36 +++ src/uadk.h | 2 - src/uadk_aead.c | 592 ++++++++++++++++++++++++++++++++++++ src/uadk_cipher.c | 625 +++++++++++++-------------------------- src/uadk_engine_init.c | 26 +- 7 files changed, 1052 insertions(+), 427 deletions(-) create mode 100755 src/sec_cipher_adapter.c create mode 100755 src/sec_cipher_adapter.h create mode 100755 src/uadk_aead.c
diff --git a/src/Makefile.am b/src/Makefile.am index af16d85..b84d87f 100644 --- a/src/Makefile.am +++ b/src/Makefile.am @@ -11,7 +11,8 @@ endif #HAVE_CRYPTO3
uadk_engine_la_SOURCES=uadk_utils.c uadk_engine_init.c uadk_cipher.c \ uadk_digest.c uadk_async.c uadk_rsa.c uadk_sm2.c \ - uadk_pkey.c uadk_dh.c uadk_ec.c uadk_ecx.c + uadk_pkey.c uadk_dh.c uadk_ec.c uadk_ecx.c \ + uadk_aead.c sec_cipher_adapter.c
uadk_engine_la_LIBADD=-ldl $(WD_LIBS) -lpthread uadk_engine_la_LDFLAGS=-module -version-number $(VERSION) diff --git a/src/sec_cipher_adapter.c b/src/sec_cipher_adapter.c new file mode 100755 index 0000000..694fa55 --- /dev/null +++ b/src/sec_cipher_adapter.c @@ -0,0 +1,195 @@ +/* + * Copyright 2020-2021 Huawei Technologies Co.,Ltd. All rights reserved. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ +#include "sec_cipher_adapter.h" + +#define HW_SEC_V2 0 +#define HW_SEC_V3 1 +#define OTHERS_HW 2 + +int cipher_hw_v2_nids[] = { + NID_aes_128_cbc, + NID_aes_192_cbc, + NID_aes_256_cbc, + NID_aes_128_ecb, + NID_aes_192_ecb, + NID_aes_256_ecb, + NID_aes_128_xts, + NID_aes_256_xts, + NID_sm4_cbc, + NID_des_ede3_cbc, + NID_des_ede3_ecb, + NID_aes_128_gcm, + NID_aes_192_gcm, + NID_aes_256_gcm +}; + +int cipher_hw_v3_nids[] = { + NID_aes_128_cbc, + NID_aes_192_cbc, + NID_aes_256_cbc, + NID_aes_128_ctr, + NID_aes_192_ctr, + NID_aes_256_ctr, + NID_aes_128_ecb, + NID_aes_192_ecb, + NID_aes_256_ecb, + NID_aes_128_xts, + NID_aes_256_xts, + NID_sm4_cbc, + NID_sm4_ecb, + NID_des_ede3_cbc, + NID_des_ede3_ecb, + NID_aes_128_cfb128, + NID_aes_192_cfb128, + NID_aes_256_cfb128, + NID_aes_128_ofb128, + NID_aes_192_ofb128, + NID_aes_256_ofb128, + NID_sm4_cfb128, + NID_sm4_ofb128, + NID_sm4_ctr, + NID_aes_128_gcm, + NID_aes_192_gcm, + NID_aes_256_gcm +}; + +struct engine_cipher_info c_info[] = { + {NID_aes_128_cbc, NULL}, + {NID_aes_192_cbc, NULL}, + {NID_aes_256_cbc, NULL}, + {NID_aes_128_ctr, NULL}, + {NID_aes_192_ctr, NULL}, + {NID_aes_256_ctr, NULL}, + {NID_aes_128_ecb, NULL}, + {NID_aes_192_ecb, NULL}, + {NID_aes_256_ecb, NULL}, + {NID_aes_128_xts, NULL}, + {NID_aes_256_xts, NULL}, + {NID_sm4_cbc, NULL}, + {NID_sm4_ecb, NULL}, + {NID_des_ede3_cbc, NULL}, + {NID_des_ede3_ecb, NULL}, + {NID_aes_128_cfb128, NULL}, + {NID_aes_192_cfb128, NULL}, + {NID_aes_256_cfb128, NULL}, + {NID_aes_128_ofb128, NULL}, + {NID_aes_192_ofb128, NULL}, + {NID_aes_256_ofb128, NULL}, + {NID_sm4_cfb128, NULL}, + {NID_sm4_ofb128, NULL}, + {NID_sm4_ctr, NULL}, + {NID_aes_128_gcm, NULL}, + {NID_aes_192_gcm, NULL}, + {NID_aes_256_gcm, NULL} +}; + +static const unsigned int num_cc = ARRAY_SIZE(c_info); + +static void uadk_e_create_ciphers(int index) +{ + switch (c_info[index].nid) { + case NID_aes_128_gcm: + case NID_aes_192_gcm: + case NID_aes_256_gcm: + c_info[index].cipher = uadk_create_gcm_cipher_meth(c_info[index].nid); + break; + case NID_aes_128_cbc: + case NID_aes_192_cbc: + case NID_aes_256_cbc: + case NID_aes_128_ctr: + case NID_aes_192_ctr: + case NID_aes_256_ctr: + case NID_aes_128_ecb: + case NID_aes_192_ecb: + case NID_aes_256_ecb: + case NID_aes_128_xts: + case NID_aes_256_xts: + case NID_sm4_cbc: + case NID_sm4_ecb: + case NID_des_ede3_cbc: + case NID_des_ede3_ecb: + case NID_aes_128_cfb128: + case NID_aes_192_cfb128: + case NID_aes_256_cfb128: + case NID_aes_128_ofb128: + case NID_aes_192_ofb128: + case NID_aes_256_ofb128: + case NID_sm4_cfb128: + case NID_sm4_ofb128: + case NID_sm4_ctr: + c_info[index].cipher = uadk_create_cipher_meth(c_info[index].nid); + break; + default: + break; + } +} + +int uadk_e_ciphers(ENGINE *e, const EVP_CIPHER **cipher, const int **nids, int nid) +{ + int platform = OTHERS_HW; + struct uacce_dev *dev; + + (void)e; + + if ((nids == NULL) && ((cipher == NULL) || (nid < 0))) { + if (cipher != NULL) + *cipher = NULL; + return 0; + } + + dev = wd_get_accel_dev("cipher"); + + if (!strcmp(dev->api, "hisi_qm_v2")) + platform = HW_SEC_V2; + else if (!strcmp(dev->api, "hisi_qm_v3")) + platform = HW_SEC_V3; + + if (cipher == NULL) { + if (platform == HW_SEC_V2) { + *nids = cipher_hw_v2_nids; + return ARRAY_SIZE(cipher_hw_v2_nids); + } else if (platform == HW_SEC_V3) { + *nids = cipher_hw_v3_nids; + return ARRAY_SIZE(cipher_hw_v3_nids); + } + } + + for (int i = 0; i < num_cc; i++) { + if (nid == c_info[i].nid) { + if (c_info[i].cipher == NULL) + uadk_e_create_ciphers(i); + + *cipher = c_info[i].cipher; + return 1; + } + } + + *cipher = NULL; + return 0; +} + +int uadk_e_bind_crypto(ENGINE *e) +{ + return ENGINE_set_ciphers(e, uadk_e_ciphers); +} + +void uadk_e_destroy_crypto(void) +{ + uadk_e_destroy_cipher(c_info, num_cc); + uadk_e_destroy_aead(c_info, num_cc); +} + diff --git a/src/sec_cipher_adapter.h b/src/sec_cipher_adapter.h new file mode 100755 index 0000000..3f010f2 --- /dev/null +++ b/src/sec_cipher_adapter.h @@ -0,0 +1,36 @@ +/* + * Copyright 2020-2021 Huawei Technologies Co.,Ltd. All rights reserved. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ +#ifndef UADK_ADAPT_H +#define UADK_ADAPT_H +#include <openssl/engine.h> +#include <uadk/wd.h> + +struct engine_cipher_info { + int nid; + EVP_CIPHER *cipher; +}; + +#define ARRAY_SIZE(x) (sizeof(x) / sizeof((x)[0])) + +EVP_CIPHER *uadk_create_gcm_cipher_meth(int nid); +EVP_CIPHER *uadk_create_cipher_meth(int nid); +void uadk_e_destroy_aead(struct engine_cipher_info *info, int num); +void uadk_e_destroy_cipher(struct engine_cipher_info *info, int num); + +int uadk_e_bind_crypto(ENGINE *e); +void uadk_e_destroy_crypto(void); +#endif diff --git a/src/uadk.h b/src/uadk.h index 30c099f..78c8682 100644 --- a/src/uadk.h +++ b/src/uadk.h @@ -29,8 +29,6 @@ enum { };
extern const char *engine_uadk_id; -int uadk_e_bind_cipher(ENGINE *e); -void uadk_e_destroy_cipher(void); int uadk_e_bind_digest(ENGINE *e); void uadk_e_destroy_digest(void); int uadk_e_bind_rsa(ENGINE *e); diff --git a/src/uadk_aead.c b/src/uadk_aead.c new file mode 100755 index 0000000..30e95e1 --- /dev/null +++ b/src/uadk_aead.c @@ -0,0 +1,592 @@ +/* + * Copyright 2020-2021 Huawei Technologies Co.,Ltd. All rights reserved. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ +#include <stdio.h> +#include <stdbool.h> +#include <string.h> +#include <dlfcn.h> +#include <openssl/aes.h> +#include <openssl/engine.h> +#include <uadk/wd_aead.h> +#include <uadk/wd_sched.h> +#include "sec_cipher_adapter.h" +#include "uadk.h" + +#define CTX_SYNC_ENC 0 +#define CTX_SYNC_DEC 1 +#define CTX_ASYNC_ENC 2 +#define CTX_ASYNC_DEC 3 +#define CTX_NUM 4 +#define AES_GCM_CTR_LEN 4 +#define AES_GCM_BLOCK_SIZE 16 +#define AES_GCM_IV_LEN 12 +#define AES_GCM_TAG_LEN 16 +#define GCM_FLAG (EVP_CIPH_FLAG_DEFAULT_ASN1 | EVP_CIPH_GCM_MODE \ + | EVP_CIPH_CUSTOM_IV | EVP_CIPH_FLAG_AEAD_CIPHER \ + | EVP_CIPH_FLAG_CUSTOM_CIPHER | EVP_CIPH_ALWAYS_CALL_INIT) +#define KEY_BYTES_OFFSET 0x3 +#define AEAD_DOING 1 +#define AEAD_END 0 +#define AEAD_BLOCK_SIZE 0xFFFE00 + +struct aead_priv_ctx { + handle_t sess; + struct wd_aead_sess_setup setup; + struct wd_aead_req req; + unsigned char *data; + unsigned char iv[AES_GCM_BLOCK_SIZE]; + unsigned char mac[AES_GCM_TAG_LEN]; + const unsigned char *ckey; + unsigned int mac_len; +}; + +struct aead_engine { + struct wd_ctx_config ctx_cfg; + struct wd_sched sched; + int numa_id; + int pid; + pthread_spinlock_t lock; +}; + +static struct aead_engine engine; + +static EVP_CIPHER *uadk_aes_128_gcm; +static EVP_CIPHER *uadk_aes_192_gcm; +static EVP_CIPHER *uadk_aes_256_gcm; + +static handle_t sched_single_aead_init(handle_t h_sched_ctx, void *sched_param) +{ + struct sched_params *param = (struct sched_params *)sched_param; + struct sched_params *skey; + + skey = malloc(sizeof(struct sched_params)); + if (!skey) { + fprintf(stderr, "fail to alloc aead sched key!\n"); + return (handle_t)0; + } + + skey->numa_id = param->numa_id; + skey->type = param->type; + + return (handle_t)skey; +} + +static __u32 sched_single_pick_next_ctx(handle_t sched_ctx, void *sched_key, const int sched_mode) +{ + struct sched_params *key = (struct sched_params *)sched_key; + + if (sched_mode) { + if (key->type == WD_CIPHER_ENCRYPTION_DIGEST) + return CTX_ASYNC_ENC; + else + return CTX_ASYNC_DEC; + } else { + if (key->type == WD_CIPHER_ENCRYPTION_DIGEST) + return CTX_SYNC_ENC; + else + return CTX_SYNC_DEC; + } +} + +static int sched_single_poll_policy(handle_t h_sched_ctx, __u32 expect, __u32 *count) +{ + return 0; +} + +static int uadk_e_wd_aead_cipher_env_init(struct uacce_dev *dev) +{ + int ret; + + ret = uadk_e_set_env("WD_AEAD_CIPHER_CTX_NUM", dev->numa_id); + if (ret) + return ret; + + ret = wd_aead_env_init(NULL); + + return ret; +} + +static int uadk_e_wd_aead_cipher_init(struct uacce_dev *dev) +{ + int ret, i, j; + + engine.numa_id = dev->numa_id; + + ret = uadk_e_is_env_enabled("aead"); + if (ret) + return uadk_e_wd_aead_cipher_env_init(dev); + + memset(&engine.ctx_cfg, 0, sizeof(struct wd_ctx_config)); + engine.ctx_cfg.ctx_num = CTX_NUM; + engine.ctx_cfg.ctxs = calloc(CTX_NUM, sizeof(struct wd_ctx)); + if (!engine.ctx_cfg.ctxs) + return -ENOMEM; + + for (i = 0; i < CTX_NUM; i++) { + engine.ctx_cfg.ctxs[i].ctx = wd_request_ctx(dev); + if (!engine.ctx_cfg.ctxs[i].ctx) { + ret = -ENOMEM; + goto err_freectx; + } + } + + engine.ctx_cfg.ctxs[CTX_SYNC_ENC].op_type = CTX_TYPE_ENCRYPT; + engine.ctx_cfg.ctxs[CTX_SYNC_DEC].op_type = CTX_TYPE_DECRYPT; + engine.ctx_cfg.ctxs[CTX_SYNC_ENC].ctx_mode = CTX_MODE_SYNC; + engine.ctx_cfg.ctxs[CTX_SYNC_DEC].ctx_mode = CTX_MODE_SYNC; + + engine.ctx_cfg.ctxs[CTX_ASYNC_ENC].op_type = CTX_TYPE_ENCRYPT; + engine.ctx_cfg.ctxs[CTX_ASYNC_DEC].op_type = CTX_TYPE_DECRYPT; + engine.ctx_cfg.ctxs[CTX_ASYNC_ENC].ctx_mode = CTX_MODE_ASYNC; + engine.ctx_cfg.ctxs[CTX_ASYNC_DEC].ctx_mode = CTX_MODE_ASYNC; + + engine.sched.name = "sched_single"; + engine.sched.pick_next_ctx = sched_single_pick_next_ctx; + engine.sched.poll_policy = sched_single_poll_policy; + engine.sched.sched_init = sched_single_aead_init; + + ret = wd_aead_init(&engine.ctx_cfg, &engine.sched); + if (ret) + goto err_freectx; + + return ret; + +err_freectx: + for (j = 0; j < i; j++) + wd_release_ctx(engine.ctx_cfg.ctxs[j].ctx); + + free(engine.ctx_cfg.ctxs); + + return ret; +} + +static int uadk_e_init_aead_cipher(void) +{ + struct uacce_dev *dev; + int ret; + + if (engine.pid != getpid()) { + pthread_spin_lock(&engine.lock); + if (engine.pid == getpid()) { + pthread_spin_unlock(&engine.lock); + return 1; + } + + dev = wd_get_accel_dev("aead"); + if (!dev) { + pthread_spin_unlock(&engine.lock); + fprintf(stderr, "failed to get device for aead.\n"); + return 0; + } + + ret = uadk_e_wd_aead_cipher_init(dev); + if (ret < 0) { + pthread_spin_unlock(&engine.lock); + fprintf(stderr, "failed to initiate aead cipher.\n"); + return 0; + } + + engine.pid = getpid(); + pthread_spin_unlock(&engine.lock); + free(dev); + } + + return 1; +} + +static int uadk_e_ctx_init(EVP_CIPHER_CTX *ctx, struct aead_priv_ctx *priv) +{ + struct sched_params params = {0}; + int ret; + + ret = uadk_e_init_aead_cipher(); + if (unlikely(!ret)) { + fprintf(stderr, "uadk failed to init aead HW!\n"); + return 0; + } + + params.type = priv->req.op_type; + ret = uadk_e_is_env_enabled("aead"); + if (ret) + params.type = 0; + + params.numa_id = engine.numa_id; + priv->setup.sched_param = ¶ms; + if (!priv->sess) { + priv->sess = wd_aead_alloc_sess(&priv->setup); + if (!priv->sess) { + fprintf(stderr, "uadk engine failed to alloc aead session!\n"); + return 0; + } + priv->data = malloc(AEAD_BLOCK_SIZE); + if (unlikely(!priv->data)) { + wd_aead_free_sess(priv->sess); + priv->sess = 0; + return 0; + } + } + + ret = wd_aead_set_ckey(priv->sess, priv->ckey, EVP_CIPHER_CTX_key_length(ctx)); + if (ret) { + wd_aead_free_sess(priv->sess); + free(priv->data); + priv->sess = 0; + priv->data = NULL; + fprintf(stderr, "uadk engine failed to set ckey!\n"); + return 0; + } + + return 1; +} + +static int uadk_e_aead_cipher_init(EVP_CIPHER_CTX *ctx, const unsigned char *ckey, + const unsigned char *iv, int enc) +{ + struct aead_priv_ctx *priv = + (struct aead_priv_ctx *)EVP_CIPHER_CTX_get_cipher_data(ctx); + int ret; + + if (unlikely(!ckey)) + return 1; + + priv->req.op_type = enc ? WD_CIPHER_ENCRYPTION_DIGEST : WD_CIPHER_DECRYPTION_DIGEST; + + if (iv) + memcpy(priv->iv, iv, AES_GCM_IV_LEN); + + priv->setup.calg = WD_CIPHER_AES; + priv->setup.cmode = WD_CIPHER_GCM; + priv->setup.dalg = 0; + priv->setup.dmode = 0; + + priv->req.assoc_bytes = 0; + priv->req.data_fmt = WD_FLAT_BUF; + priv->req.iv_bytes = AES_GCM_IV_LEN; + memset(priv->iv + AES_GCM_IV_LEN, 0, AES_GCM_CTR_LEN); + priv->req.iv = priv->iv; + priv->req.mac_bytes = AES_GCM_TAG_LEN; + + if (enc) { + priv->req.mac = priv->mac; + } else { + priv->req.mac = calloc(AES_GCM_TAG_LEN, sizeof(char)); + } + + priv->ckey = ckey; + + ret = uadk_e_ctx_init(ctx, priv); + if (!ret) + return 0; + //return -1; + + return 1; +} + +static int uadk_e_aead_cipher_cleanup(EVP_CIPHER_CTX *ctx) +{ + struct aead_priv_ctx *priv = + (struct aead_priv_ctx *)EVP_CIPHER_CTX_get_cipher_data(ctx); + + if (priv->req.op_type == WD_CIPHER_DECRYPTION_DIGEST) + free(priv->req.mac); + + if (priv->sess) { + wd_aead_free_sess(priv->sess); + priv->sess = 0; + } + + if (priv->data) { + free(priv->data); + priv->data = NULL; + } + + return 1; +} + +static int uadk_e_aead_cipher_set_ctrl(EVP_CIPHER_CTX *ctx, int type, int arg, void *ptr) +{ + struct aead_priv_ctx *priv = + (struct aead_priv_ctx *)EVP_CIPHER_CTX_get_cipher_data(ctx); + int enc = EVP_CIPHER_CTX_encrypting(ctx); + + switch (type) { + case EVP_CTRL_INIT: + priv->req.iv_bytes = 0; + return 1; + case EVP_CTRL_GET_IVLEN: + *(int *)ptr = priv->req.iv_bytes; + return 1; + case EVP_CTRL_GCM_SET_IVLEN: + if (arg != AES_GCM_IV_LEN) { + fprintf(stderr, "gcm only support 12 bytes.\n"); + return 0; + } + return 1; + case EVP_CTRL_GCM_GET_TAG: + if (arg <= 0 || arg > AES_GCM_TAG_LEN) { + fprintf(stderr, "TAG length invalid.\n"); + return 0; + } + + priv->mac_len = arg; + + if (EVP_CIPHER_CTX_buf_noconst(ctx) == NULL || ptr == NULL) { + fprintf(stderr, "ctx memory pointer is invalid.\n"); + return 0; + } + + memcpy(ptr, EVP_CIPHER_CTX_buf_noconst(ctx), arg); + return 1; + case EVP_CTRL_GCM_SET_TAG: + if (arg != AES_GCM_TAG_LEN || enc) { + fprintf(stderr, "cannot set tag when encrypt or arg is invalid.\n"); + return 0; + } + + if (EVP_CIPHER_CTX_buf_noconst(ctx) == NULL || ptr == NULL) { + fprintf(stderr, "ctx memory pointer is invalid.\n"); + return 0; + } + + memcpy(EVP_CIPHER_CTX_buf_noconst(ctx), ptr, AES_GCM_TAG_LEN); + return 1; + default: + fprintf(stderr, "unsupported ctrl type: %d\n", type); + return 0; + } +} + +//static void openssl_aes_cb_t(unsigned char *key, unsigned int key_len, +// unsigned char *src, unsigned char *dst) +//{ +// unsigned int local_key_len = key_len << KEY_BYTES_OFFSET; +// AES_KEY local_key; +// +// AES_set_encrypt_key(key, local_key_len, &local_key); +// AES_ecb_encrypt(src, dst, &local_key, AES_ENCRYPT); +//} + +void hexdump(char *s, char *buf, int num) +{ + int i; + + printf("%s\n", s); + for (i = 0; i < num; i++) { + printf("\%02X", buf[i]); + if ((i + 1) % 8 == 0) + printf("\n"); + } + printf("\n"); + + return; +} + +static int uadk_e_do_first(EVP_CIPHER_CTX *ctx, unsigned char *out, + const unsigned char *in, size_t inlen) +{ + unsigned char *ctx_buf = EVP_CIPHER_CTX_buf_noconst(ctx); + struct aead_priv_ctx *priv = + (struct aead_priv_ctx *)EVP_CIPHER_CTX_get_cipher_data(ctx); + int ret; + + ret = wd_aead_set_authsize(priv->sess, AES_GCM_TAG_LEN); + if (ret < 0) { + fprintf(stderr, "uadk engine failed to set authsize!\n"); + return -1; + } + + /* Only inputs aad data as do first */ + priv->req.src = (unsigned char *)in; + priv->req.in_bytes = inlen; + priv->req.assoc_bytes = inlen; + priv->req.dst = out; + priv->req.out_bytes = inlen; + priv->req.msg_state = AEAD_MSG_FIRST; + memcpy(priv->req.mac, ctx_buf, AES_GCM_TAG_LEN); + + ret = wd_do_aead_sync(priv->sess, &priv->req); + if (ret < 0) { + fprintf(stderr, "do sec aead first operation failed, ret:%d!\n", ret); + return -1; + } + + return 1; +} + +static int uadk_e_do_update(EVP_CIPHER_CTX *ctx, unsigned char *out, + const unsigned char *in, size_t inlen) +{ + struct aead_priv_ctx *priv = + (struct aead_priv_ctx *)EVP_CIPHER_CTX_get_cipher_data(ctx); + int ret; + + priv->req.src = (unsigned char *)in; + priv->req.dst = out; + + priv->req.in_bytes = inlen; + priv->req.out_bytes = inlen; + priv->req.msg_state = AEAD_MSG_MIDDLE; + + ret = wd_do_aead_sync(priv->sess, &priv->req); + if (ret < 0) { + fprintf(stderr, "do sec aead update operation failed, ret:%d!\n", ret); + return -1; + } + + return inlen; +} + +static int uadk_e_do_final(EVP_CIPHER_CTX *ctx, unsigned char *out, + const unsigned char *in, size_t inlen) +{ + struct aead_priv_ctx *priv = + (struct aead_priv_ctx *)EVP_CIPHER_CTX_get_cipher_data(ctx); + unsigned char *ctx_buf = EVP_CIPHER_CTX_buf_noconst(ctx); + int ret, enc; + + enc = EVP_CIPHER_CTX_encrypting(ctx); + priv->req.in_bytes = 0; + priv->req.dst = out; + priv->req.msg_state = AEAD_MSG_END; + + if (!enc) + memcpy(priv->mac, ctx_buf, AES_GCM_TAG_LEN); + + ret = wd_do_aead_sync(priv->sess, &priv->req); + if (ret < 0) { + fprintf(stderr, "do sec aead final operation failed, ret:%d!\n", ret); + return -1; + } + + if (enc) { + memcpy(ctx_buf, priv->req.mac, AES_GCM_TAG_LEN); + } else { + if (priv->req.state != WD_SUCCESS) + return -1; + else + return 1; + } + + return inlen; +} + +static int uadk_e_do_aead_cipher(EVP_CIPHER_CTX *ctx, unsigned char *out, + const unsigned char *in, size_t inlen) +{ + if (in) { + if (out == NULL) + return uadk_e_do_first(ctx, out, in, inlen); + else + return uadk_e_do_update(ctx, out, in, inlen); + } else { + /* first ->update ->final */ + return uadk_e_do_final(ctx, out, NULL, 0); + } +} + +#define UADK_AEAD_DESCR(name, block_size, key_size, iv_len, flags, ctx_size,\ + init, cipher, cleanup, set_params, get_params, ctrl)\ +do {\ + uadk_##name = EVP_CIPHER_meth_new(NID_##name, block_size, key_size);\ + if (uadk_##name == 0 ||\ + !EVP_CIPHER_meth_set_iv_length(uadk_##name, iv_len) ||\ + !EVP_CIPHER_meth_set_flags(uadk_##name, flags) ||\ + !EVP_CIPHER_meth_set_impl_ctx_size(uadk_##name, ctx_size) ||\ + !EVP_CIPHER_meth_set_init(uadk_##name, init) ||\ + !EVP_CIPHER_meth_set_do_cipher(uadk_##name, cipher) ||\ + !EVP_CIPHER_meth_set_cleanup(uadk_##name, cleanup) ||\ + !EVP_CIPHER_meth_set_set_asn1_params(uadk_##name, set_params) ||\ + !EVP_CIPHER_meth_set_get_asn1_params(uadk_##name, get_params) ||\ + !EVP_CIPHER_meth_set_ctrl(uadk_##name, ctrl))\ + return 0;\ +} while (0) + +EVP_CIPHER *uadk_create_gcm_cipher_meth(int nid) +{ + EVP_CIPHER *aead = NULL; + + switch (nid) { + case NID_aes_128_gcm: + UADK_AEAD_DESCR(aes_128_gcm, AES_GCM_BLOCK_SIZE, 16, AES_GCM_IV_LEN, + GCM_FLAG, sizeof(struct aead_priv_ctx), + uadk_e_aead_cipher_init, uadk_e_do_aead_cipher, + uadk_e_aead_cipher_cleanup, + (EVP_CIPH_FLAG_DEFAULT_ASN1 ? NULL : EVP_CIPHER_set_asn1_iv), + (EVP_CIPH_FLAG_DEFAULT_ASN1 ? NULL : EVP_CIPHER_get_asn1_iv), + uadk_e_aead_cipher_set_ctrl); + aead = uadk_aes_128_gcm; + break; + case NID_aes_192_gcm: + UADK_AEAD_DESCR(aes_192_gcm, AES_GCM_BLOCK_SIZE, 24, AES_GCM_IV_LEN, + GCM_FLAG, sizeof(struct aead_priv_ctx), + uadk_e_aead_cipher_init, uadk_e_do_aead_cipher, + uadk_e_aead_cipher_cleanup, + (EVP_CIPH_FLAG_DEFAULT_ASN1 ? NULL : EVP_CIPHER_set_asn1_iv), + (EVP_CIPH_FLAG_DEFAULT_ASN1 ? NULL : EVP_CIPHER_get_asn1_iv), + uadk_e_aead_cipher_set_ctrl); + aead = uadk_aes_192_gcm; + break; + case NID_aes_256_gcm: + UADK_AEAD_DESCR(aes_256_gcm, AES_GCM_BLOCK_SIZE, 32, AES_GCM_IV_LEN, + GCM_FLAG, sizeof(struct aead_priv_ctx), + uadk_e_aead_cipher_init, uadk_e_do_aead_cipher, + uadk_e_aead_cipher_cleanup, + (EVP_CIPH_FLAG_DEFAULT_ASN1 ? NULL : EVP_CIPHER_set_asn1_iv), + (EVP_CIPH_FLAG_DEFAULT_ASN1 ? NULL : EVP_CIPHER_get_asn1_iv), + uadk_e_aead_cipher_set_ctrl); + aead = uadk_aes_256_gcm; + break; + default: + aead = NULL; + break; + } + + return aead; +} + +static void destroy_aead(struct engine_cipher_info *info, int num) +{ + int i; + + for (i = 0; i < num; i++) { + if (info[i].cipher != NULL) { + EVP_CIPHER_meth_free(info[i].cipher); + info[i].cipher = NULL; + } + } +} + +void uadk_e_destroy_aead(struct engine_cipher_info *info, int num) +{ + int i, ret; + + if (engine.pid == getpid()) { + ret = uadk_e_is_env_enabled("aead"); + if (ret) { + wd_aead_env_uninit(); + } else { + wd_aead_uninit(); + for (i = 0; i < engine.ctx_cfg.ctx_num; i++) + wd_release_ctx(engine.ctx_cfg.ctxs[i].ctx); + + free(engine.ctx_cfg.ctxs); + } + engine.pid = 0; + } + + pthread_spin_destroy(&engine.lock); + destroy_aead(info, num); +} diff --git a/src/uadk_cipher.c b/src/uadk_cipher.c index c87c7ee..2782ad5 100644 --- a/src/uadk_cipher.c +++ b/src/uadk_cipher.c @@ -25,6 +25,7 @@ #include <uadk/wd_sched.h> #include "uadk.h" #include "uadk_async.h" +#include "sec_cipher_adapter.h"
#define UADK_DO_SOFT (-0xE0) #define CTX_SYNC_ENC 0 @@ -74,54 +75,8 @@ struct cipher_info { __u32 out_bytes; };
-static int platform; - #define SMALL_PACKET_OFFLOAD_THRESHOLD_DEFAULT 192
-static int cipher_hw_v2_nids[] = { - NID_aes_128_cbc, - NID_aes_192_cbc, - NID_aes_256_cbc, - NID_aes_128_ecb, - NID_aes_192_ecb, - NID_aes_256_ecb, - NID_aes_128_xts, - NID_aes_256_xts, - NID_sm4_cbc, - NID_des_ede3_cbc, - NID_des_ede3_ecb, - NID_sm4_ecb, - 0, -}; - -static int cipher_hw_v3_nids[] = { - NID_aes_128_cbc, - NID_aes_192_cbc, - NID_aes_256_cbc, - NID_aes_128_ctr, - NID_aes_192_ctr, - NID_aes_256_ctr, - NID_aes_128_ecb, - NID_aes_192_ecb, - NID_aes_256_ecb, - NID_aes_128_xts, - NID_aes_256_xts, - NID_sm4_cbc, - NID_sm4_ecb, - NID_des_ede3_cbc, - NID_des_ede3_ecb, - NID_aes_128_cfb128, - NID_aes_192_cfb128, - NID_aes_256_cfb128, - NID_aes_128_ofb128, - NID_aes_192_ofb128, - NID_aes_256_ofb128, - NID_sm4_cfb128, - NID_sm4_ofb128, - NID_sm4_ctr, - 0, -}; - static EVP_CIPHER *uadk_aes_128_cbc; static EVP_CIPHER *uadk_aes_192_cbc; static EVP_CIPHER *uadk_aes_256_cbc; @@ -134,7 +89,7 @@ static EVP_CIPHER *uadk_aes_256_ecb; static EVP_CIPHER *uadk_aes_128_xts; static EVP_CIPHER *uadk_aes_256_xts; static EVP_CIPHER *uadk_sm4_cbc; -static EVP_CIPHER *uadk_sm4_ecb; +// static EVP_CIPHER *uadk_sm4_ecb; static EVP_CIPHER *uadk_des_ede3_cbc; static EVP_CIPHER *uadk_des_ede3_ecb; static EVP_CIPHER *uadk_aes_128_cfb128; @@ -317,6 +272,12 @@ static int uadk_e_cipher_soft_work(EVP_CIPHER_CTX *ctx, unsigned char *out, return 1; }
+static int sec_ciphers_is_check_valid(struct cipher_priv_ctx *priv) +{ + return priv->req.in_bytes <= priv->switch_threshold ? + 0 : 1; +} + static void uadk_e_cipher_sw_cleanup(EVP_CIPHER_CTX *ctx) { struct cipher_priv_ctx *priv = @@ -328,130 +289,6 @@ static void uadk_e_cipher_sw_cleanup(EVP_CIPHER_CTX *ctx) } }
-static int uadk_get_accel_platform(char *alg_name) -{ - struct uacce_dev *dev; - - dev = wd_get_accel_dev(alg_name); - if (dev == NULL) - return 0; - - if (!strcmp(dev->api, "hisi_qm_v2")) - platform = HW_V2; - else - platform = HW_V3; - free(dev); - - return 1; -} - -static int uadk_e_engine_ciphers(ENGINE *e, const EVP_CIPHER **cipher, - const int **nids, int nid) -{ - int *cipher_nids; - __u32 size, i; - int ret = 1; - - if (platform == HW_V2) { - size = (sizeof(cipher_hw_v2_nids) - 1) / sizeof(int); - cipher_nids = cipher_hw_v2_nids; - } else { - size = (sizeof(cipher_hw_v3_nids) - 1) / sizeof(int); - cipher_nids = cipher_hw_v3_nids; - } - - if (!cipher) { - *nids = cipher_nids; - return size; - } - - for (i = 0; i < size; i++) { - if (nid == cipher_nids[i]) - break; - } - - switch (nid) { - case NID_aes_128_cbc: - *cipher = uadk_aes_128_cbc; - break; - case NID_aes_192_cbc: - *cipher = uadk_aes_192_cbc; - break; - case NID_aes_256_cbc: - *cipher = uadk_aes_256_cbc; - break; - case NID_aes_128_ctr: - *cipher = uadk_aes_128_ctr; - break; - case NID_aes_192_ctr: - *cipher = uadk_aes_192_ctr; - break; - case NID_aes_256_ctr: - *cipher = uadk_aes_256_ctr; - break; - case NID_aes_128_ecb: - *cipher = uadk_aes_128_ecb; - break; - case NID_aes_192_ecb: - *cipher = uadk_aes_192_ecb; - break; - case NID_aes_256_ecb: - *cipher = uadk_aes_256_ecb; - break; - case NID_aes_128_xts: - *cipher = uadk_aes_128_xts; - break; - case NID_aes_256_xts: - *cipher = uadk_aes_256_xts; - break; - case NID_sm4_cbc: - *cipher = uadk_sm4_cbc; - break; - case NID_sm4_ecb: - *cipher = uadk_sm4_ecb; - break; - case NID_des_ede3_cbc: - *cipher = uadk_des_ede3_cbc; - break; - case NID_des_ede3_ecb: - *cipher = uadk_des_ede3_ecb; - break; - case NID_aes_128_ofb128: - *cipher = uadk_aes_128_ofb128; - break; - case NID_aes_192_ofb128: - *cipher = uadk_aes_192_ofb128; - break; - case NID_aes_256_ofb128: - *cipher = uadk_aes_256_ofb128; - break; - case NID_aes_128_cfb128: - *cipher = uadk_aes_128_cfb128; - break; - case NID_aes_192_cfb128: - *cipher = uadk_aes_192_cfb128; - break; - case NID_aes_256_cfb128: - *cipher = uadk_aes_256_cfb128; - break; - case NID_sm4_ofb128: - *cipher = uadk_sm4_ofb128; - break; - case NID_sm4_cfb128: - *cipher = uadk_sm4_cfb128; - break; - case NID_sm4_ctr: - *cipher = uadk_sm4_ctr; - break; - default: - ret = 0; - *cipher = NULL; - break; - } - - return ret; -} - static handle_t sched_single_init(handle_t h_sched_ctx, void *sched_param) { struct sched_params *param = (struct sched_params *)sched_param; @@ -717,17 +554,17 @@ static int uadk_e_cipher_cleanup(EVP_CIPHER_CTX *ctx) return 1; }
-static void *uadk_e_cipher_cb(struct wd_cipher_req *req, void *data) +static void async_cb(struct wd_cipher_req *req, void *data) { struct uadk_e_cb_info *cb_param; struct async_op *op;
if (!req) - return NULL; + return;
cb_param = req->cb_param; if (!cb_param) - return NULL; + return;
op = cb_param->op; if (op && op->job && !op->done) { @@ -735,8 +572,6 @@ static void *uadk_e_cipher_cb(struct wd_cipher_req *req, void *data) async_free_poll_task(op->idx, 1); async_wake_job(op->job); } - - return NULL; }
/* Increment counter (128-bit int) by c */ @@ -764,15 +599,16 @@ static void uadk_cipher_update_priv_ctx(struct cipher_priv_ctx *priv) __u16 iv_bytes = priv->req.iv_bytes; int offset = priv->req.in_bytes - iv_bytes; unsigned char K[IV_LEN] = {0}; - __u32 i; + int i;
switch (priv->setup.mode) { - case WD_CIPHER_CFB: case WD_CIPHER_CBC: if (priv->req.op_type == WD_CIPHER_ENCRYPTION) - memcpy(priv->iv, priv->req.dst + offset, iv_bytes); + memcpy(priv->iv, priv->req.dst + priv->req.in_bytes - iv_bytes, + iv_bytes); else - memcpy(priv->iv, priv->req.src + offset, iv_bytes); + memcpy(priv->iv, priv->req.src + priv->req.in_bytes - iv_bytes, + iv_bytes);
break; case WD_CIPHER_OFB: @@ -781,6 +617,15 @@ static void uadk_cipher_update_priv_ctx(struct cipher_priv_ctx *priv) *((unsigned char *)priv->req.dst + offset + i); } memcpy(priv->iv, K, iv_bytes); + break; + case WD_CIPHER_CFB: + if (priv->req.op_type == WD_CIPHER_ENCRYPTION) + memcpy(priv->iv, priv->req.dst + priv->req.in_bytes - iv_bytes, + iv_bytes); + else + memcpy(priv->iv, priv->req.src + priv->req.in_bytes - iv_bytes, + iv_bytes); + break; case WD_CIPHER_CTR: ctr_iv_inc(priv->iv, priv->req.in_bytes >> CTR_MODE_LEN_SHIFT); @@ -794,16 +639,11 @@ static int do_cipher_sync(struct cipher_priv_ctx *priv) { int ret;
- if (unlikely(priv->switch_flag == UADK_DO_SOFT)) { - fprintf(stderr, "switch to soft cipher.\n"); + if (unlikely(priv->switch_flag == UADK_DO_SOFT)) return 0; - }
- /* - * If the length of the input data does not reach to hardware computing threshold, - * directly switch to soft cipher. - */ - if (priv->req.in_bytes <= priv->switch_threshold) + ret = sec_ciphers_is_check_valid(priv); + if (!ret) return 0;
ret = wd_do_cipher_sync(priv->sess, &priv->req); @@ -818,15 +658,14 @@ static int do_cipher_async(struct cipher_priv_ctx *priv, struct async_op *op) int idx, ret;
if (unlikely(priv->switch_flag == UADK_DO_SOFT)) { - fprintf(stderr, "switch to soft cipher.\n"); + fprintf(stderr, "async cipher init failed.\n"); return 0; }
cb_param.op = op; cb_param.priv = priv; - priv->req.cb = uadk_e_cipher_cb; + priv->req.cb = (void *)async_cb; priv->req.cb_param = &cb_param; - ret = async_get_free_task(&idx); if (!ret) return 0; @@ -849,10 +688,8 @@ static int do_cipher_async(struct cipher_priv_ctx *priv, struct async_op *op)
static void uadk_e_ctx_init(EVP_CIPHER_CTX *ctx, struct cipher_priv_ctx *priv) { - __u32 cipher_counts = ARRAY_SIZE(cipher_info_table); struct sched_params params = {0}; - int nid, ret; - __u32 i; + int ret;
priv->req.iv_bytes = EVP_CIPHER_CTX_iv_length(ctx); priv->req.iv = priv->iv; @@ -880,34 +717,15 @@ static void uadk_e_ctx_init(EVP_CIPHER_CTX *ctx, struct cipher_priv_ctx *priv) /* Use the default numa parameters */ params.numa_id = -1; priv->setup.sched_param = ¶ms; - if (!priv->sess) { - nid = EVP_CIPHER_CTX_nid(ctx); - - for (i = 0; i < cipher_counts; i++) { - if (nid == cipher_info_table[i].nid) { - cipher_priv_ctx_setup(priv, cipher_info_table[i].alg, - cipher_info_table[i].mode, cipher_info_table[i].out_bytes); - break; - } - } - - if (i == cipher_counts) { - fprintf(stderr, "failed to setup the private ctx.\n"); - return; - } - priv->sess = wd_cipher_alloc_sess(&priv->setup); - if (!priv->sess) { + if (!priv->sess) fprintf(stderr, "uadk failed to alloc session!\n"); - return; - } }
ret = wd_cipher_set_key(priv->sess, priv->key, EVP_CIPHER_CTX_key_length(ctx)); if (ret) { wd_cipher_free_sess(priv->sess); - priv->sess = 0; fprintf(stderr, "uadk failed to set key!\n"); } } @@ -938,14 +756,6 @@ static int uadk_e_do_cipher(EVP_CIPHER_CTX *ctx, unsigned char *out, if (!ret) goto sync_err; } else { - /* - * If the length of the input data - * does not reach to hardware computing threshold, - * directly switch to soft cipher. - */ - if (priv->req.in_bytes <= priv->switch_threshold) - goto sync_err; - ret = do_cipher_async(priv, &op); if (!ret) goto out_notify; @@ -978,210 +788,193 @@ do { \ return 0; \ } while (0)
-static int bind_v2_cipher(void) -{ - UADK_CIPHER_DESCR(aes_128_cbc, 16, 16, 16, EVP_CIPH_CBC_MODE, - sizeof(struct cipher_priv_ctx), uadk_e_cipher_init, - uadk_e_do_cipher, uadk_e_cipher_cleanup, - EVP_CIPHER_set_asn1_iv, EVP_CIPHER_get_asn1_iv); - UADK_CIPHER_DESCR(aes_192_cbc, 16, 24, 16, EVP_CIPH_CBC_MODE, - sizeof(struct cipher_priv_ctx), uadk_e_cipher_init, - uadk_e_do_cipher, uadk_e_cipher_cleanup, - EVP_CIPHER_set_asn1_iv, EVP_CIPHER_get_asn1_iv); - UADK_CIPHER_DESCR(aes_256_cbc, 16, 32, 16, EVP_CIPH_CBC_MODE, - sizeof(struct cipher_priv_ctx), uadk_e_cipher_init, - uadk_e_do_cipher, uadk_e_cipher_cleanup, - EVP_CIPHER_set_asn1_iv, EVP_CIPHER_get_asn1_iv); - UADK_CIPHER_DESCR(aes_128_ecb, 16, 16, 0, EVP_CIPH_ECB_MODE, - sizeof(struct cipher_priv_ctx), uadk_e_cipher_init, - uadk_e_do_cipher, uadk_e_cipher_cleanup, - EVP_CIPHER_set_asn1_iv, EVP_CIPHER_get_asn1_iv); - UADK_CIPHER_DESCR(aes_192_ecb, 16, 24, 0, EVP_CIPH_ECB_MODE, - sizeof(struct cipher_priv_ctx), uadk_e_cipher_init, - uadk_e_do_cipher, uadk_e_cipher_cleanup, - EVP_CIPHER_set_asn1_iv, EVP_CIPHER_get_asn1_iv); - UADK_CIPHER_DESCR(aes_256_ecb, 16, 32, 0, EVP_CIPH_ECB_MODE, - sizeof(struct cipher_priv_ctx), uadk_e_cipher_init, - uadk_e_do_cipher, uadk_e_cipher_cleanup, - EVP_CIPHER_set_asn1_iv, EVP_CIPHER_get_asn1_iv); - UADK_CIPHER_DESCR(aes_128_xts, 1, 32, 16, EVP_CIPH_XTS_MODE | EVP_CIPH_CUSTOM_IV, - sizeof(struct cipher_priv_ctx), uadk_e_cipher_init, - uadk_e_do_cipher, uadk_e_cipher_cleanup, - EVP_CIPHER_set_asn1_iv, EVP_CIPHER_get_asn1_iv); - UADK_CIPHER_DESCR(aes_256_xts, 1, 64, 16, EVP_CIPH_XTS_MODE | EVP_CIPH_CUSTOM_IV, - sizeof(struct cipher_priv_ctx), uadk_e_cipher_init, - uadk_e_do_cipher, uadk_e_cipher_cleanup, - EVP_CIPHER_set_asn1_iv, EVP_CIPHER_get_asn1_iv); - UADK_CIPHER_DESCR(sm4_cbc, 16, 16, 16, EVP_CIPH_CBC_MODE, - sizeof(struct cipher_priv_ctx), uadk_e_cipher_init, - uadk_e_do_cipher, uadk_e_cipher_cleanup, - EVP_CIPHER_set_asn1_iv, EVP_CIPHER_get_asn1_iv); - UADK_CIPHER_DESCR(des_ede3_cbc, 8, 24, 8, EVP_CIPH_CBC_MODE, - sizeof(struct cipher_priv_ctx), uadk_e_cipher_init, - uadk_e_do_cipher, uadk_e_cipher_cleanup, - EVP_CIPHER_set_asn1_iv, EVP_CIPHER_get_asn1_iv); - UADK_CIPHER_DESCR(des_ede3_ecb, 8, 24, 0, EVP_CIPH_ECB_MODE, - sizeof(struct cipher_priv_ctx), uadk_e_cipher_init, - uadk_e_do_cipher, uadk_e_cipher_cleanup, - EVP_CIPHER_set_asn1_iv, EVP_CIPHER_get_asn1_iv); - UADK_CIPHER_DESCR(sm4_ecb, 16, 16, 16, EVP_CIPH_ECB_MODE, - sizeof(struct cipher_priv_ctx), uadk_e_cipher_init, - uadk_e_do_cipher, uadk_e_cipher_cleanup, - EVP_CIPHER_set_asn1_iv, EVP_CIPHER_get_asn1_iv); - - return 0; -} - -static int bind_v3_cipher(void) -{ - UADK_CIPHER_DESCR(aes_128_ctr, 1, 16, 16, EVP_CIPH_CTR_MODE, - sizeof(struct cipher_priv_ctx), uadk_e_cipher_init, - uadk_e_do_cipher, uadk_e_cipher_cleanup, - EVP_CIPHER_set_asn1_iv, EVP_CIPHER_get_asn1_iv); - UADK_CIPHER_DESCR(aes_192_ctr, 1, 24, 16, EVP_CIPH_CTR_MODE, - sizeof(struct cipher_priv_ctx), uadk_e_cipher_init, - uadk_e_do_cipher, uadk_e_cipher_cleanup, - EVP_CIPHER_set_asn1_iv, EVP_CIPHER_get_asn1_iv); - UADK_CIPHER_DESCR(aes_256_ctr, 1, 32, 16, EVP_CIPH_CTR_MODE, - sizeof(struct cipher_priv_ctx), uadk_e_cipher_init, - uadk_e_do_cipher, uadk_e_cipher_cleanup, - EVP_CIPHER_set_asn1_iv, EVP_CIPHER_get_asn1_iv); - UADK_CIPHER_DESCR(aes_128_ofb128, 1, 16, 16, EVP_CIPH_OFB_MODE, - sizeof(struct cipher_priv_ctx), uadk_e_cipher_init, - uadk_e_do_cipher, uadk_e_cipher_cleanup, - EVP_CIPHER_set_asn1_iv, EVP_CIPHER_get_asn1_iv); - UADK_CIPHER_DESCR(aes_192_ofb128, 1, 24, 16, EVP_CIPH_OFB_MODE, - sizeof(struct cipher_priv_ctx), uadk_e_cipher_init, - uadk_e_do_cipher, uadk_e_cipher_cleanup, - EVP_CIPHER_set_asn1_iv, EVP_CIPHER_get_asn1_iv); - UADK_CIPHER_DESCR(aes_256_ofb128, 1, 32, 16, EVP_CIPH_OFB_MODE, - sizeof(struct cipher_priv_ctx), uadk_e_cipher_init, - uadk_e_do_cipher, uadk_e_cipher_cleanup, - EVP_CIPHER_set_asn1_iv, EVP_CIPHER_get_asn1_iv); - UADK_CIPHER_DESCR(aes_128_cfb128, 1, 16, 16, EVP_CIPH_CFB_MODE, - sizeof(struct cipher_priv_ctx), uadk_e_cipher_init, - uadk_e_do_cipher, uadk_e_cipher_cleanup, - EVP_CIPHER_set_asn1_iv, EVP_CIPHER_get_asn1_iv); - UADK_CIPHER_DESCR(aes_192_cfb128, 1, 24, 16, EVP_CIPH_CFB_MODE, - sizeof(struct cipher_priv_ctx), uadk_e_cipher_init, - uadk_e_do_cipher, uadk_e_cipher_cleanup, - EVP_CIPHER_set_asn1_iv, EVP_CIPHER_get_asn1_iv); - UADK_CIPHER_DESCR(aes_256_cfb128, 1, 32, 16, EVP_CIPH_CFB_MODE, - sizeof(struct cipher_priv_ctx), uadk_e_cipher_init, - uadk_e_do_cipher, uadk_e_cipher_cleanup, - EVP_CIPHER_set_asn1_iv, EVP_CIPHER_get_asn1_iv); - UADK_CIPHER_DESCR(sm4_ofb128, 1, 16, 16, EVP_CIPH_OFB_MODE, - sizeof(struct cipher_priv_ctx), uadk_e_cipher_init, - uadk_e_do_cipher, uadk_e_cipher_cleanup, - EVP_CIPHER_set_asn1_iv, EVP_CIPHER_get_asn1_iv); - UADK_CIPHER_DESCR(sm4_cfb128, 1, 16, 16, EVP_CIPH_OFB_MODE, - sizeof(struct cipher_priv_ctx), uadk_e_cipher_init, - uadk_e_do_cipher, uadk_e_cipher_cleanup, - EVP_CIPHER_set_asn1_iv, EVP_CIPHER_get_asn1_iv); - UADK_CIPHER_DESCR(sm4_ctr, 1, 16, 16, EVP_CIPH_CTR_MODE, - sizeof(struct cipher_priv_ctx), uadk_e_cipher_init, - uadk_e_do_cipher, uadk_e_cipher_cleanup, - EVP_CIPHER_set_asn1_iv, EVP_CIPHER_get_asn1_iv); - - return 0; -} - -int uadk_e_bind_cipher(ENGINE *e) +EVP_CIPHER *uadk_create_cipher_meth(int nid) { - int ret; + EVP_CIPHER *cipher;
- ret = uadk_get_accel_platform("cipher"); - if (!ret) { - fprintf(stderr, "failed to get accel hardware version.\n"); - return 0; + switch (nid) { + case NID_aes_128_cbc: + UADK_CIPHER_DESCR(aes_128_cbc, 16, 16, 16, EVP_CIPH_CBC_MODE, + sizeof(struct cipher_priv_ctx), uadk_e_cipher_init, + uadk_e_do_cipher, uadk_e_cipher_cleanup, + EVP_CIPHER_set_asn1_iv, EVP_CIPHER_get_asn1_iv); + cipher = uadk_aes_128_cbc; + break; + case NID_aes_192_cbc: + UADK_CIPHER_DESCR(aes_192_cbc, 16, 24, 16, EVP_CIPH_CBC_MODE, + sizeof(struct cipher_priv_ctx), uadk_e_cipher_init, + uadk_e_do_cipher, uadk_e_cipher_cleanup, + EVP_CIPHER_set_asn1_iv, EVP_CIPHER_get_asn1_iv); + cipher = uadk_aes_192_cbc; + break; + case NID_aes_256_cbc: + UADK_CIPHER_DESCR(aes_256_cbc, 16, 32, 16, EVP_CIPH_CBC_MODE, + sizeof(struct cipher_priv_ctx), uadk_e_cipher_init, + uadk_e_do_cipher, uadk_e_cipher_cleanup, + EVP_CIPHER_set_asn1_iv, EVP_CIPHER_get_asn1_iv); + cipher = uadk_aes_256_cbc; + break; + case NID_aes_128_ecb: + UADK_CIPHER_DESCR(aes_128_ecb, 16, 16, 0, EVP_CIPH_ECB_MODE, + sizeof(struct cipher_priv_ctx), uadk_e_cipher_init, + uadk_e_do_cipher, uadk_e_cipher_cleanup, + EVP_CIPHER_set_asn1_iv, EVP_CIPHER_get_asn1_iv); + cipher = uadk_aes_128_ecb; + break; + case NID_aes_192_ecb: + UADK_CIPHER_DESCR(aes_192_ecb, 16, 24, 0, EVP_CIPH_ECB_MODE, + sizeof(struct cipher_priv_ctx), uadk_e_cipher_init, + uadk_e_do_cipher, uadk_e_cipher_cleanup, + EVP_CIPHER_set_asn1_iv, EVP_CIPHER_get_asn1_iv); + cipher = uadk_aes_192_ecb; + break; + case NID_aes_256_ecb: + UADK_CIPHER_DESCR(aes_256_ecb, 16, 32, 0, EVP_CIPH_ECB_MODE, + sizeof(struct cipher_priv_ctx), uadk_e_cipher_init, + uadk_e_do_cipher, uadk_e_cipher_cleanup, + EVP_CIPHER_set_asn1_iv, EVP_CIPHER_get_asn1_iv); + cipher = uadk_aes_256_ecb; + break; + case NID_aes_128_xts: + UADK_CIPHER_DESCR(aes_128_xts, 1, 32, 16, EVP_CIPH_XTS_MODE | EVP_CIPH_CUSTOM_IV, + sizeof(struct cipher_priv_ctx), uadk_e_cipher_init, + uadk_e_do_cipher, uadk_e_cipher_cleanup, + EVP_CIPHER_set_asn1_iv, EVP_CIPHER_get_asn1_iv); + cipher = uadk_aes_128_xts; + break; + case NID_aes_256_xts: + UADK_CIPHER_DESCR(aes_256_xts, 1, 64, 16, EVP_CIPH_XTS_MODE | EVP_CIPH_CUSTOM_IV, + sizeof(struct cipher_priv_ctx), uadk_e_cipher_init, + uadk_e_do_cipher, uadk_e_cipher_cleanup, + EVP_CIPHER_set_asn1_iv, EVP_CIPHER_get_asn1_iv); + cipher = uadk_aes_256_xts; + break; + case NID_sm4_cbc: + UADK_CIPHER_DESCR(sm4_cbc, 16, 16, 16, EVP_CIPH_CBC_MODE, + sizeof(struct cipher_priv_ctx), uadk_e_cipher_init, + uadk_e_do_cipher, uadk_e_cipher_cleanup, + EVP_CIPHER_set_asn1_iv, EVP_CIPHER_get_asn1_iv); + cipher = uadk_sm4_cbc; + break; + case NID_des_ede3_cbc: + UADK_CIPHER_DESCR(des_ede3_cbc, 8, 24, 8, EVP_CIPH_CBC_MODE, + sizeof(struct cipher_priv_ctx), uadk_e_cipher_init, + uadk_e_do_cipher, uadk_e_cipher_cleanup, + EVP_CIPHER_set_asn1_iv, EVP_CIPHER_get_asn1_iv); + cipher = uadk_des_ede3_cbc; + break; + case NID_des_ede3_ecb: + UADK_CIPHER_DESCR(des_ede3_ecb, 8, 24, 0, EVP_CIPH_ECB_MODE, + sizeof(struct cipher_priv_ctx), uadk_e_cipher_init, + uadk_e_do_cipher, uadk_e_cipher_cleanup, + EVP_CIPHER_set_asn1_iv, EVP_CIPHER_get_asn1_iv); + cipher = uadk_des_ede3_ecb; + break; + case NID_aes_128_ctr: + UADK_CIPHER_DESCR(aes_128_ctr, 1, 16, 16, EVP_CIPH_CTR_MODE, + sizeof(struct cipher_priv_ctx), uadk_e_cipher_init, + uadk_e_do_cipher, uadk_e_cipher_cleanup, + EVP_CIPHER_set_asn1_iv, EVP_CIPHER_get_asn1_iv); + cipher = uadk_aes_128_ctr; + break; + case NID_aes_192_ctr: + UADK_CIPHER_DESCR(aes_192_ctr, 1, 24, 16, EVP_CIPH_CTR_MODE, + sizeof(struct cipher_priv_ctx), uadk_e_cipher_init, + uadk_e_do_cipher, uadk_e_cipher_cleanup, + EVP_CIPHER_set_asn1_iv, EVP_CIPHER_get_asn1_iv); + cipher = uadk_aes_192_ctr; + break; + case NID_aes_256_ctr: + UADK_CIPHER_DESCR(aes_256_ctr, 1, 32, 16, EVP_CIPH_CTR_MODE, + sizeof(struct cipher_priv_ctx), uadk_e_cipher_init, + uadk_e_do_cipher, uadk_e_cipher_cleanup, + EVP_CIPHER_set_asn1_iv, EVP_CIPHER_get_asn1_iv); + cipher = uadk_aes_256_ctr; + break; + case NID_aes_128_ofb128: + UADK_CIPHER_DESCR(aes_128_ofb128, 1, 16, 16, EVP_CIPH_OFB_MODE, + sizeof(struct cipher_priv_ctx), uadk_e_cipher_init, + uadk_e_do_cipher, uadk_e_cipher_cleanup, + EVP_CIPHER_set_asn1_iv, EVP_CIPHER_get_asn1_iv); + cipher = uadk_aes_128_ofb128; + break; + case NID_aes_192_ofb128: + UADK_CIPHER_DESCR(aes_192_ofb128, 1, 24, 16, EVP_CIPH_OFB_MODE, + sizeof(struct cipher_priv_ctx), uadk_e_cipher_init, + uadk_e_do_cipher, uadk_e_cipher_cleanup, + EVP_CIPHER_set_asn1_iv, EVP_CIPHER_get_asn1_iv); + cipher = uadk_aes_192_ofb128; + break; + case NID_aes_256_ofb128: + UADK_CIPHER_DESCR(aes_256_ofb128, 1, 32, 16, EVP_CIPH_OFB_MODE, + sizeof(struct cipher_priv_ctx), uadk_e_cipher_init, + uadk_e_do_cipher, uadk_e_cipher_cleanup, + EVP_CIPHER_set_asn1_iv, EVP_CIPHER_get_asn1_iv); + cipher = uadk_aes_256_ofb128; + break; + case NID_aes_128_cfb128: + UADK_CIPHER_DESCR(aes_128_cfb128, 1, 16, 16, EVP_CIPH_CFB_MODE, + sizeof(struct cipher_priv_ctx), uadk_e_cipher_init, + uadk_e_do_cipher, uadk_e_cipher_cleanup, + EVP_CIPHER_set_asn1_iv, EVP_CIPHER_get_asn1_iv); + cipher = uadk_aes_128_cfb128; + break; + case NID_aes_192_cfb128: + UADK_CIPHER_DESCR(aes_192_cfb128, 1, 24, 16, EVP_CIPH_CFB_MODE, + sizeof(struct cipher_priv_ctx), uadk_e_cipher_init, + uadk_e_do_cipher, uadk_e_cipher_cleanup, + EVP_CIPHER_set_asn1_iv, EVP_CIPHER_get_asn1_iv); + cipher = uadk_aes_192_cfb128; + break; + case NID_aes_256_cfb128: + UADK_CIPHER_DESCR(aes_256_cfb128, 1, 32, 16, EVP_CIPH_CFB_MODE, + sizeof(struct cipher_priv_ctx), uadk_e_cipher_init, + uadk_e_do_cipher, uadk_e_cipher_cleanup, + EVP_CIPHER_set_asn1_iv, EVP_CIPHER_get_asn1_iv); + cipher = uadk_aes_256_cfb128; + break; + case NID_sm4_ofb128: + UADK_CIPHER_DESCR(sm4_ofb128, 1, 16, 16, EVP_CIPH_OFB_MODE, + sizeof(struct cipher_priv_ctx), uadk_e_cipher_init, + uadk_e_do_cipher, uadk_e_cipher_cleanup, + EVP_CIPHER_set_asn1_iv, EVP_CIPHER_get_asn1_iv); + cipher = uadk_sm4_ofb128; + break; + case NID_sm4_cfb128: + UADK_CIPHER_DESCR(sm4_cfb128, 1, 16, 16, EVP_CIPH_OFB_MODE, + sizeof(struct cipher_priv_ctx), uadk_e_cipher_init, + uadk_e_do_cipher, uadk_e_cipher_cleanup, + EVP_CIPHER_set_asn1_iv, EVP_CIPHER_get_asn1_iv); + cipher = uadk_sm4_cfb128; + break; + case NID_sm4_ctr: + UADK_CIPHER_DESCR(sm4_ctr, 1, 16, 16, EVP_CIPH_CTR_MODE, + sizeof(struct cipher_priv_ctx), uadk_e_cipher_init, + uadk_e_do_cipher, uadk_e_cipher_cleanup, + EVP_CIPHER_set_asn1_iv, EVP_CIPHER_get_asn1_iv); + cipher = uadk_sm4_ctr; + break; + default: + cipher = NULL; + break; }
- bind_v2_cipher(); - if (platform > HW_V2) - bind_v3_cipher(); - - return ENGINE_set_ciphers(e, uadk_e_engine_ciphers); + return cipher; }
-static void destroy_v2_cipher(void) +static void destroy_cipher(struct engine_cipher_info *info, int num) { - EVP_CIPHER_meth_free(uadk_aes_128_cbc); - uadk_aes_128_cbc = 0; - EVP_CIPHER_meth_free(uadk_aes_192_cbc); - uadk_aes_192_cbc = 0; - EVP_CIPHER_meth_free(uadk_aes_256_cbc); - uadk_aes_256_cbc = 0; - EVP_CIPHER_meth_free(uadk_aes_128_ecb); - uadk_aes_128_ecb = 0; - EVP_CIPHER_meth_free(uadk_aes_192_ecb); - uadk_aes_192_ecb = 0; - EVP_CIPHER_meth_free(uadk_aes_256_ecb); - uadk_aes_256_ecb = 0; - EVP_CIPHER_meth_free(uadk_aes_128_xts); - uadk_aes_128_xts = 0; - EVP_CIPHER_meth_free(uadk_aes_256_xts); - uadk_aes_256_xts = 0; - EVP_CIPHER_meth_free(uadk_sm4_cbc); - uadk_sm4_cbc = 0; - EVP_CIPHER_meth_free(uadk_des_ede3_cbc); - uadk_des_ede3_cbc = 0; - EVP_CIPHER_meth_free(uadk_des_ede3_ecb); - uadk_des_ede3_ecb = 0; - EVP_CIPHER_meth_free(uadk_sm4_ecb); - uadk_sm4_ecb = 0; -} - -static void destroy_v3_cipher(void) -{ - EVP_CIPHER_meth_free(uadk_aes_128_ctr); - uadk_aes_128_ctr = 0; - EVP_CIPHER_meth_free(uadk_aes_192_ctr); - uadk_aes_192_ctr = 0; - EVP_CIPHER_meth_free(uadk_aes_256_ctr); - uadk_aes_256_ctr = 0; - EVP_CIPHER_meth_free(uadk_aes_128_ofb128); - uadk_aes_128_ofb128 = 0; - EVP_CIPHER_meth_free(uadk_aes_192_ofb128); - uadk_aes_192_ofb128 = 0; - EVP_CIPHER_meth_free(uadk_aes_256_ofb128); - uadk_aes_256_ofb128 = 0; - EVP_CIPHER_meth_free(uadk_aes_128_cfb128); - uadk_aes_128_cfb128 = 0; - EVP_CIPHER_meth_free(uadk_aes_192_cfb128); - uadk_aes_192_cfb128 = 0; - EVP_CIPHER_meth_free(uadk_aes_256_cfb128); - uadk_aes_256_cfb128 = 0; - EVP_CIPHER_meth_free(uadk_sm4_cfb128); - uadk_sm4_cfb128 = 0; - EVP_CIPHER_meth_free(uadk_sm4_ofb128); - uadk_sm4_ofb128 = 0; - EVP_CIPHER_meth_free(uadk_sm4_ctr); - uadk_sm4_ctr = 0; -} - -void uadk_e_destroy_cipher(void) -{ - __u32 i; - int ret; - - if (g_cipher_engine.pid == getpid()) { - ret = uadk_e_is_env_enabled("cipher"); - if (ret == ENV_ENABLED) { - wd_cipher_env_uninit(); - } else { - wd_cipher_uninit(); - for (i = 0; i < g_cipher_engine.ctx_cfg.ctx_num; i++) - wd_release_ctx(g_cipher_engine.ctx_cfg.ctxs[i].ctx); - free(g_cipher_engine.ctx_cfg.ctxs); + for (int i = 0; i != num; ++i) { + if (info[i].cipher != NULL) { + EVP_CIPHER_meth_free(info[i].cipher); + info[i].cipher = NULL; } - g_cipher_engine.pid = 0; } +}
- pthread_spin_destroy(&g_cipher_engine.lock); - - destroy_v2_cipher(); - if (platform > HW_V2) - destroy_v3_cipher(); +void uadk_e_destroy_cipher(struct engine_cipher_info *info, int num) +{ + destroy_cipher(info, num); }
void uadk_e_cipher_lock_init(void) diff --git a/src/uadk_engine_init.c b/src/uadk_engine_init.c index 0a9e3e6..d6503f8 100644 --- a/src/uadk_engine_init.c +++ b/src/uadk_engine_init.c @@ -24,15 +24,17 @@ #include <uadk/wd.h> #include "uadk.h" #include "uadk_async.h" +#include "sec_cipher_adapter.h" #ifdef KAE #include "v1/uadk_v1.h" #endif
#define UADK_CMD_ENABLE_CIPHER_ENV ENGINE_CMD_BASE -#define UADK_CMD_ENABLE_DIGEST_ENV (ENGINE_CMD_BASE + 1) -#define UADK_CMD_ENABLE_RSA_ENV (ENGINE_CMD_BASE + 2) -#define UADK_CMD_ENABLE_DH_ENV (ENGINE_CMD_BASE + 3) -#define UADK_CMD_ENABLE_ECC_ENV (ENGINE_CMD_BASE + 4) +#define UADK_CMD_ENABLE_AEAD_ENV (ENGINE_CMD_BASE + 1) +#define UADK_CMD_ENABLE_DIGEST_ENV (ENGINE_CMD_BASE + 2) +#define UADK_CMD_ENABLE_RSA_ENV (ENGINE_CMD_BASE + 3) +#define UADK_CMD_ENABLE_DH_ENV (ENGINE_CMD_BASE + 4) +#define UADK_CMD_ENABLE_ECC_ENV (ENGINE_CMD_BASE + 5)
/* Constants used when creating the ENGINE */ const char *engine_uadk_id = "uadk_engine"; @@ -60,6 +62,12 @@ static const ENGINE_CMD_DEFN g_uadk_cmd_defns[] = { "Enable or Disable cipher engine environment variable.", ENGINE_CMD_FLAG_NUMERIC }, + { + UADK_CMD_ENABLE_AEAD_ENV, + "UADK_CMD_ENABLE_AEAD_ENV", + "Enable or Disable aead engine environment variable.", + ENGINE_CMD_FLAG_NUMERIC + }, { UADK_CMD_ENABLE_DIGEST_ENV, "UADK_CMD_ENABLE_DIGEST_ENV", @@ -104,6 +112,7 @@ struct uadk_alg_env_enabled {
static struct uadk_alg_env_enabled uadk_env_enabled[] = { { "cipher", 0 }, + { "aead", 0 }, { "digest", 0 }, { "rsa", 0 }, { "dh", 0 }, @@ -210,7 +219,7 @@ static int uadk_destroy(ENGINE *e) #endif
if (uadk_cipher) - uadk_e_destroy_cipher(); + uadk_e_destroy_crypto(); if (uadk_digest) uadk_e_destroy_digest(); if (uadk_rsa) @@ -322,11 +331,12 @@ static void bind_fn_kae_alg(ENGINE *e)
static void bind_fn_uadk_alg(ENGINE *e) { - struct uacce_dev *dev; + struct uacce_dev *dev, *dev_aead;
+ dev_aead = wd_get_accel_dev("aead"); dev = wd_get_accel_dev("cipher"); - if (dev) { - if (!uadk_e_bind_cipher(e)) + if (dev && dev_aead) { + if (!uadk_e_bind_crypto(e)) fprintf(stderr, "uadk bind cipher failed\n"); else uadk_cipher = 1;