From: Wenkai Lin linwenkai6@hisilicon.com
Currently multiple evp context share the same session, we use a table to record the mapping between evp context and session,the table is updated when the mapping changes (cleanup, copy context, or init), but there is a problem caused by shared session that session status will be modified and another data flow will use the wrong status, so we need let each context get different session when we copy context, and the table is no longer needed.
Since the new session does not have any stream information, the engine need to send a message with total length and message status to uadk, so that uadk can correctly configure the session.
Signed-off-by: Wenkai Lin linwenkai6@hisilicon.com --- src/uadk_digest.c | 234 +++++++++++++++------------------------------- 1 file changed, 75 insertions(+), 159 deletions(-)
diff --git a/src/uadk_digest.c b/src/uadk_digest.c index 52f4ef7..beb9f51 100644 --- a/src/uadk_digest.c +++ b/src/uadk_digest.c @@ -35,8 +35,6 @@ #define CTX_SYNC 0 #define CTX_ASYNC 1 #define CTX_NUM 2 -#define DIGEST_DOING 1 -#define DIGEST_END 0 #define ENV_ENABLED 1
/* The max BD data length is 16M-512B */ @@ -99,7 +97,9 @@ struct digest_priv_ctx { uint32_t state; uint32_t switch_threshold; int switch_flag; - bool copy; + uint32_t app_datasize; + bool is_stream_copy; + size_t total_data_len; };
struct digest_info { @@ -148,78 +148,6 @@ static EVP_MD *uadk_sha256; static EVP_MD *uadk_sha384; static EVP_MD *uadk_sha512;
-struct ctx_info { - /* Use md_ctx as the key */ - EVP_MD_CTX *md_ctx; - /* Multiple ctxs may share the same session and data */ - handle_t sess; - unsigned char *data; - struct ctx_info *next; -}; - -/* Global table to record ctx and its session */ -static struct ctx_info *digest_ctx_info; -/* Lock for global ctx_info table */ -static pthread_mutex_t digest_ctx_mutex = PTHREAD_MUTEX_INITIALIZER; - -static void find_ctx_info(EVP_MD_CTX *ctx, struct ctx_info **node) -{ - struct ctx_info *pnext = digest_ctx_info; - - while (pnext) { - if (pnext->md_ctx == ctx) { - *node = pnext; - return; - } - pnext = pnext->next; - } -} - -static void del_ctx_info(struct ctx_info *node) -{ - struct ctx_info *pnext = digest_ctx_info; - - if (pnext == node) { - digest_ctx_info = pnext->next; - return; - } - - while (pnext) { - if (pnext->next == node) { - pnext->next = pnext->next->next; - return; - } - pnext = pnext->next; - } -} - -static void add_ctx_info(struct ctx_info *node) -{ - struct ctx_info *pnext = digest_ctx_info; - - if (!pnext) { - digest_ctx_info = node; - node->next = NULL; - } else { - node->next = digest_ctx_info; - digest_ctx_info = node; - } -} - -static bool is_ctx_sess_shared(struct ctx_info *node) -{ - struct ctx_info *pnext = digest_ctx_info; - - while (pnext) { - if ((pnext != node) && (pnext->sess == node->sess)) - return true; - - pnext = pnext->next; - } - - return false; -} - static const EVP_MD *uadk_e_digests_soft_md(uint32_t e_nid) { const EVP_MD *digest_md = NULL; @@ -271,7 +199,7 @@ static int digest_soft_init(struct digest_priv_ctx *md_ctx) uint32_t e_nid = md_ctx->e_nid; const EVP_MD *digest_md = NULL; EVP_MD_CTX *ctx = NULL; - int ctx_len; + int app_datasize;
/* Allocate a soft ctx for hardware engine */ if (md_ctx->soft_ctx == NULL) @@ -285,13 +213,15 @@ static int digest_soft_init(struct digest_priv_ctx *md_ctx) return 0; }
- ctx_len = EVP_MD_meth_get_app_datasize(digest_md); + app_datasize = EVP_MD_meth_get_app_datasize(digest_md); if (ctx->md_data == NULL) { - ctx->md_data = OPENSSL_malloc(ctx_len); + ctx->md_data = OPENSSL_malloc(app_datasize); if (ctx->md_data == NULL) return 0; }
+ md_ctx->app_datasize = app_datasize; + return EVP_MD_meth_get_init(digest_md)(ctx); }
@@ -339,17 +269,14 @@ static void digest_soft_cleanup(struct digest_priv_ctx *md_ctx) { EVP_MD_CTX *ctx = md_ctx->soft_ctx;
- /* Prevent double-free after the copy is used */ - if (md_ctx->copy) - return; - if (ctx != NULL) { if (ctx->md_data) { OPENSSL_free(ctx->md_data); ctx->md_data = NULL; } EVP_MD_CTX_free(ctx); - ctx = NULL; + md_ctx->soft_ctx = NULL; + md_ctx->app_datasize = 0; } }
@@ -563,7 +490,6 @@ static int uadk_e_init_digest(void) goto err_unlock;
g_digest_engine.pid = getpid(); - digest_ctx_info = NULL; pthread_spin_unlock(&g_digest_engine.lock); free(dev); } @@ -595,30 +521,9 @@ static void digest_priv_ctx_reset(struct digest_priv_ctx *priv) priv->last_update_bufflen = 0; priv->switch_threshold = 0; priv->switch_flag = 0; -} - -static int alloc_ctx_info(EVP_MD_CTX *ctx) -{ - struct digest_priv_ctx *priv = - (struct digest_priv_ctx *) EVP_MD_CTX_md_data(ctx); - struct ctx_info *node = NULL; - - pthread_mutex_lock(&digest_ctx_mutex); - find_ctx_info(ctx, &node); - if (node == NULL) { - node = malloc(sizeof(*node)); - if (node == NULL) { - pthread_mutex_unlock(&digest_ctx_mutex); - return 0; - } - node->md_ctx = ctx; - add_ctx_info(node); - } - node->sess = priv->sess; - node->data = priv->data; - pthread_mutex_unlock(&digest_ctx_mutex); - - return 1; + priv->total_data_len = 0; + priv->app_datasize = 0; + priv->is_stream_copy = false; }
static int uadk_e_digest_init(EVP_MD_CTX *ctx) @@ -665,12 +570,6 @@ static int uadk_e_digest_init(EVP_MD_CTX *ctx) priv->data = malloc(DIGEST_BLOCK_SIZE); if (unlikely(!priv->data)) goto out; - - if (!alloc_ctx_info(ctx)) { - free(priv->data); - priv->data = NULL; - goto out; - } }
priv->switch_threshold = sec_digest_get_sw_threshold(priv->e_nid); @@ -696,6 +595,16 @@ static void digest_update_out_length(EVP_MD_CTX *ctx) priv->req.out_bytes = WD_DIGEST_SHA384_FULL_LEN; }
+static void digest_set_msg_state(struct digest_priv_ctx *priv, bool is_end) +{ + if (unlikely(priv->is_stream_copy)) { + priv->req.has_next = is_end ? WD_DIGEST_STREAM_END : WD_DIGEST_STREAM_DOING; + priv->is_stream_copy = false; + } else { + priv->req.has_next = is_end ? WD_DIGEST_END : WD_DIGEST_DOING; + } +} + static int digest_update_inner(EVP_MD_CTX *ctx, const void *data, size_t data_len) { struct digest_priv_ctx *priv = @@ -706,8 +615,7 @@ static int digest_update_inner(EVP_MD_CTX *ctx, const void *data, size_t data_le int ret;
digest_update_out_length(ctx); - - priv->req.has_next = DIGEST_DOING; + digest_set_msg_state(priv, false);
while (priv->last_update_bufflen + left_len > DIGEST_BLOCK_SIZE) { copy_to_bufflen = DIGEST_BLOCK_SIZE - priv->last_update_bufflen; @@ -766,6 +674,8 @@ static int uadk_e_digest_update(EVP_MD_CTX *ctx, const void *data, size_t data_l if (unlikely(priv->switch_flag == UADK_DO_SOFT)) goto soft_update;
+ priv->total_data_len += data_len; + if (priv->last_update_bufflen + data_len <= DIGEST_BLOCK_SIZE) { uadk_memcpy(priv->data + priv->last_update_bufflen, data, data_len); priv->last_update_bufflen += data_len; @@ -857,9 +767,10 @@ static int uadk_e_digest_final(EVP_MD_CTX *ctx, unsigned char *digest) { struct digest_priv_ctx *priv = (struct digest_priv_ctx *)EVP_MD_CTX_md_data(ctx); - int ret = 1; struct async_op op; - priv->req.has_next = DIGEST_END; + int ret = 1; + + digest_set_msg_state(priv, true); priv->req.in = priv->data; priv->req.out = priv->out; priv->req.in_bytes = priv->last_update_bufflen; @@ -913,22 +824,10 @@ static int uadk_e_digest_cleanup(EVP_MD_CTX *ctx) { struct digest_priv_ctx *priv = (struct digest_priv_ctx *)EVP_MD_CTX_md_data(ctx); - struct ctx_info *node = NULL;
if (!priv) return 1;
- pthread_mutex_lock(&digest_ctx_mutex); - find_ctx_info(ctx, &node); - if (node == NULL) { - pthread_mutex_unlock(&digest_ctx_mutex); - return 1; - } - - /* If another ctx uses the same session, the session is not released */ - if (is_ctx_sess_shared(node)) - goto out; - if (priv->data) { free(priv->data); priv->data = NULL; @@ -939,10 +838,9 @@ static int uadk_e_digest_cleanup(EVP_MD_CTX *ctx) priv->sess = 0; }
-out: - del_ctx_info(node); - free(node); - pthread_mutex_unlock(&digest_ctx_mutex); + if (priv->soft_ctx) + digest_soft_cleanup(priv); + return 1; }
@@ -952,40 +850,58 @@ static int uadk_e_digest_copy(EVP_MD_CTX *to, const EVP_MD_CTX *from) (struct digest_priv_ctx *)EVP_MD_CTX_md_data(from); struct digest_priv_ctx *t = (struct digest_priv_ctx *)EVP_MD_CTX_md_data(to); - struct ctx_info *node = NULL; + struct sched_params params = {0}; + int ret;
- if (!f || !t) + if (!t) return 1; - /* - * EVP_MD_CTX_copy will copy from->priv to to->priv, - * including data pointer. Instead of coping data contents, - * add a flag to prevent double-free. - */ - - pthread_mutex_lock(&digest_ctx_mutex); - find_ctx_info(to, &node); - if (node == NULL) { - node = malloc(sizeof(*node)); - if (node == NULL) { - pthread_mutex_unlock(&digest_ctx_mutex); + + if (t->sess) { + params.numa_id = -1; + t->setup.sched_param = ¶ms; + t->sess = wd_digest_alloc_sess(&t->setup); + if (!t->sess) { + fprintf(stderr, "failed to alloc session for digest ctx copy.\n"); return 0; } - node->md_ctx = to; - add_ctx_info(node); - } else { - if (!is_ctx_sess_shared(node)) { - wd_digest_free_sess(node->sess); - free(node->data); + + t->data = malloc(DIGEST_BLOCK_SIZE); + if (!t->data) + goto free_sess; + + if (t->state != SEC_DIGEST_INIT) { + t->is_stream_copy = true; + /* Length that the hardware has processed should be equal to + * total input data length minus software cache data length. + */ + t->req.long_data_len = t->total_data_len - t->last_update_bufflen; } + + memcpy(t->data, f->data, f->last_update_bufflen); } - node->sess = t->sess; - node->data = t->data; - pthread_mutex_unlock(&digest_ctx_mutex);
- if (f->data) - t->copy = true; + if (t->soft_ctx) { + t->soft_ctx = NULL; + ret = digest_soft_init(t); + if (!ret) + goto free_data; + + memcpy(t->soft_ctx->md_data, f->soft_ctx->md_data, t->app_datasize); + }
return 1; + +free_data: + if (t->data) { + free(t->data); + t->data = NULL; + } +free_sess: + if (t->sess) { + wd_digest_free_sess(t->sess); + t->sess = 0; + } + return 0; }