From: Zhushuai Yin <yinzhushuai@huawei.com> The SEC v2 interface needs to support the no-sva business model within the new evolution framework. This ensures that users switching to the v2 interface can utilize both the sva mode and the no-sva mode. Signed-off-by: Zhushuai Yin <yinzhushuai@huawei.com> Signed-off-by: Zongyu Wu <wuzongyu1@huawei.com> --- drv/hisi_sec.c | 1081 ++++++++++++++++++++++++++++++----- include/drv/wd_aead_drv.h | 23 +- include/drv/wd_cipher_drv.h | 2 + include/drv/wd_digest_drv.h | 2 + include/wd_aead.h | 2 + include/wd_cipher.h | 2 + include/wd_digest.h | 2 + wd_aead.c | 179 +++++- wd_cipher.c | 40 +- wd_digest.c | 42 +- 10 files changed, 1207 insertions(+), 168 deletions(-) diff --git a/drv/hisi_sec.c b/drv/hisi_sec.c index 246edf4..0074952 100644 --- a/drv/hisi_sec.c +++ b/drv/hisi_sec.c @@ -91,6 +91,7 @@ #define AUTH_ALG_OFFSET 11 #define WD_CIPHER_THEN_DIGEST 0x0 #define WD_DIGEST_THEN_CIPHER 0x1 +#define AEAD_AIV_OFFSET 0x6 #define SEC_CTX_Q_NUM_DEF 1 @@ -602,6 +603,157 @@ static int hisi_sec_get_usage(void *param) return 0; } +static int eops_param_check(struct wd_alg_driver *drv, struct wd_mm_ops *mm_ops) +{ + if (!drv || !drv->priv) { + WD_ERR("invalid: aead drv or priv is NULL!\n"); + return -WD_EINVAL; + } + + if (!mm_ops) { + WD_ERR("invalid: mm_ops is NULL!\n"); + return -WD_EINVAL; + } + + return WD_SUCCESS; +} + +static int aead_sess_eops_init(struct wd_alg_driver *drv, + struct wd_mm_ops *mm_ops, void **params) +{ + struct wd_aead_aiv_addr *aiv_addr; + struct hisi_sec_ctx *sec_ctx; + struct hisi_qp *qp; + __u16 sq_depth; + int ret; + + ret = eops_param_check(drv, mm_ops); + if (ret) + return ret; + + if (!params) { + WD_ERR("invalid: extend ops init params address is NULL!\n"); + return -WD_EINVAL; + } + + if (*params) { + WD_ERR("invalid: extend ops init params repeatedly!\n"); + return -WD_EINVAL; + } + + aiv_addr = calloc(1, sizeof(struct wd_aead_aiv_addr)); + if (!aiv_addr) { + WD_ERR("aead failed to alloc aiv_addr memory!\n"); + return -WD_ENOMEM; + } + + sec_ctx = (struct hisi_sec_ctx *)drv->priv; + qp = (struct hisi_qp *)wd_ctx_get_priv(sec_ctx->config.ctxs[0].ctx); + sq_depth = qp->q_info.sq_depth; + aiv_addr->aiv = mm_ops->alloc(mm_ops->usr, (__u32)sq_depth << AEAD_AIV_OFFSET); + if (!aiv_addr->aiv) { + WD_ERR("aead failed to alloc aiv memory!\n"); + goto aiv_err; + } + memset(aiv_addr->aiv, 0, (__u32)sq_depth << AEAD_AIV_OFFSET); + if (!mm_ops->sva_mode) { + aiv_addr->aiv_nosva = mm_ops->iova_map(mm_ops->usr, aiv_addr->aiv, + (__u32)sq_depth << AEAD_AIV_OFFSET); + if (!aiv_addr->aiv_nosva) + goto aiv_nosva_err; + } + + aiv_addr->aiv_status = calloc(1, sq_depth); + if (!aiv_addr->aiv_status) { + WD_ERR("aead failed to alloc aiv_status memory!\n"); + goto aiv_status_err; + } + + *params = aiv_addr; + + return WD_SUCCESS; + +aiv_status_err: + if (!mm_ops->sva_mode) + mm_ops->iova_unmap(mm_ops->usr, aiv_addr->aiv, (void *)aiv_addr->aiv_nosva, + (__u32)sq_depth << AEAD_AIV_OFFSET); +aiv_nosva_err: + mm_ops->free(mm_ops->usr, aiv_addr->aiv); +aiv_err: + free(aiv_addr); + return -WD_ENOMEM; +} + +static void aead_sess_eops_uninit(struct wd_alg_driver *drv, + struct wd_mm_ops *mm_ops, void *params) +{ + struct wd_aead_aiv_addr *aiv_addr; + struct hisi_sec_ctx *sec_ctx; + struct hisi_qp *qp; + __u16 sq_depth; + int ret; + + ret = eops_param_check(drv, mm_ops); + if (ret) + return; + + if (!params) { + WD_ERR("invalid: extend ops uninit params address is NULL!\n"); + return; + } + + sec_ctx = (struct hisi_sec_ctx *)drv->priv; + qp = (struct hisi_qp *)wd_ctx_get_priv(sec_ctx->config.ctxs[0].ctx); + sq_depth = qp->q_info.sq_depth; + + aiv_addr = (struct wd_aead_aiv_addr *)params; + if (!mm_ops->sva_mode) + mm_ops->iova_unmap(mm_ops->usr, aiv_addr->aiv, (void *)aiv_addr->aiv_nosva, + (__u32)sq_depth << AEAD_AIV_OFFSET); + mm_ops->free(mm_ops->usr, aiv_addr->aiv); + free(aiv_addr->aiv_status); + free(params); +} + +static int aead_get_aiv_addr(struct hisi_qp *qp, struct wd_aead_msg *msg) +{ + struct wd_aead_aiv_addr *aiv_addr = (struct wd_aead_aiv_addr *)msg->drv_cfg; + __u16 sq_depth = qp->q_info.sq_depth; + int i; + + for (i = 0; i < sq_depth; i++) { + if (!__atomic_test_and_set(&aiv_addr->aiv_status[i], __ATOMIC_ACQUIRE)) { + msg->aiv = aiv_addr->aiv + i * AIV_STREAM_LEN; + return i; + } + } + + return -WD_EBUSY; +} + +static void aead_free_aiv_addr(struct wd_aead_msg *msg) +{ + struct wd_aead_aiv_addr *aiv_addr = (struct wd_aead_aiv_addr *)msg->drv_cfg; + __u32 aiv_idx; + + aiv_idx = (msg->aiv - aiv_addr->aiv) >> AEAD_AIV_OFFSET; + __atomic_clear(&aiv_addr->aiv_status[aiv_idx], __ATOMIC_RELEASE); +} + +static int sec_aead_get_extend_ops(void *ops) +{ + struct wd_aead_extend_ops *aead_ops = (struct wd_aead_extend_ops *)ops; + + if (!aead_ops) + return -WD_EINVAL; + + aead_ops->params = NULL; + aead_ops->eops_aiv_init = aead_sess_eops_init; + aead_ops->eops_aiv_uninit = aead_sess_eops_uninit; + + return WD_SUCCESS; +} + #define GEN_SEC_ALG_DRIVER(sec_alg_name, alg_type) \ {\ .drv_name = "hisi_sec2",\ @@ -616,6 +768,7 @@ static int hisi_sec_get_usage(void *param) .send = alg_type##_send,\ .recv = alg_type##_recv,\ .get_usage = hisi_sec_get_usage,\ + .get_extend_ops = sec_aead_get_extend_ops,\ } static struct wd_alg_driver cipher_alg_driver[] = { @@ -914,13 +1067,83 @@ static int fill_cipher_bd2_mode(struct wd_cipher_msg *msg, return 0; } -static void fill_cipher_bd2_addr(struct wd_cipher_msg *msg, - struct hisi_sec_sqe *sqe) +static void destroy_cipher_bd2_addr(struct wd_cipher_msg *msg, struct hisi_sec_sqe *sqe) { - sqe->type2.data_src_addr = (__u64)(uintptr_t)msg->in; - sqe->type2.data_dst_addr = (__u64)(uintptr_t)msg->out; - sqe->type2.c_ivin_addr = (__u64)(uintptr_t)msg->iv; - sqe->type2.c_key_addr = (__u64)(uintptr_t)msg->key; + struct wd_mm_ops *mm_ops = msg->mm_ops; + void *mempool; + + /* SVA mode and skip */ + if (!mm_ops || mm_ops->sva_mode) + return; + + if (!mm_ops->usr) { + WD_ERR("cipher failed to check memory pool!\n"); + return; + } + + mempool = mm_ops->usr; + if (sqe->type2.data_src_addr) + mm_ops->iova_unmap(mempool, msg->in, (void *)(uintptr_t)sqe->type2.data_src_addr, + msg->in_bytes); + + if (sqe->type2.data_dst_addr) + mm_ops->iova_unmap(mempool, msg->out, (void *)(uintptr_t)sqe->type2.data_dst_addr, + msg->out_bytes); + + if (sqe->type2.c_key_addr) + mm_ops->iova_unmap(mempool, msg->key, (void *)(uintptr_t)sqe->type2.c_key_addr, + msg->key_bytes); + + if (sqe->type2.c_ivin_addr) + mm_ops->iova_unmap(mempool, msg->iv, (void *)(uintptr_t)sqe->type2.c_ivin_addr, + msg->iv_bytes); +} + +static int fill_cipher_bd2_addr(struct wd_cipher_msg *msg, struct hisi_sec_sqe *sqe) +{ + struct wd_mm_ops *mm_ops = msg->mm_ops; + void *mempool, *phy_addr; + + if (mm_ops->sva_mode) { + sqe->type2.data_src_addr = (__u64)(uintptr_t)msg->in; + sqe->type2.data_dst_addr = (__u64)(uintptr_t)msg->out; + sqe->type2.c_ivin_addr = (__u64)(uintptr_t)msg->iv; + sqe->type2.c_key_addr = (__u64)(uintptr_t)msg->key; + return 0; + } + if (msg->mm_type > UADK_MEM_PROXY) { + WD_ERR("cipher failed to check memory type!\n"); + return -WD_EINVAL; + } + + /* No-SVA mode and Memory is USER mode or PROXY mode */ + mempool = mm_ops->usr; + phy_addr = mm_ops->iova_map(mempool, msg->in, msg->in_bytes); + if (!phy_addr) + return -WD_ENOMEM; + sqe->type2.data_src_addr = (__u64)(uintptr_t)phy_addr; + phy_addr = mm_ops->iova_map(mempool, msg->out, msg->out_bytes); + if (!phy_addr) + goto map_err; + sqe->type2.data_dst_addr = (__u64)(uintptr_t)phy_addr; + if (msg->iv_bytes) { + phy_addr = mm_ops->iova_map(mempool, msg->iv, msg->iv_bytes); + if (!phy_addr) + goto map_err; + sqe->type2.c_ivin_addr = (__u64)(uintptr_t)phy_addr; + } + if (msg->key_bytes) { + phy_addr = mm_ops->iova_map(mempool, msg->key, msg->key_bytes); + if (!phy_addr) + goto map_err; + sqe->type2.c_key_addr = (__u64)(uintptr_t)phy_addr; + } + + return 0; + +map_err: + destroy_cipher_bd2_addr(msg, sqe); + return -WD_ENOMEM; } static void parse_cipher_bd2(struct hisi_qp *qp, struct hisi_sec_sqe *sqe, @@ -945,8 +1168,6 @@ static void parse_cipher_bd2(struct hisi_qp *qp, struct hisi_sec_sqe *sqe, if (qp->q_info.qp_mode == CTX_MODE_ASYNC) { recv_msg->alg_type = WD_CIPHER; recv_msg->data_fmt = get_data_fmt_v2(sqe->sds_sa_type); - recv_msg->in = (__u8 *)(uintptr_t)sqe->type2.data_src_addr; - recv_msg->out = (__u8 *)(uintptr_t)sqe->type2.data_dst_addr; temp_msg = wd_cipher_get_msg(qp->q_info.idx, tag); if (!temp_msg) { recv_msg->result = WD_IN_EPARA; @@ -954,6 +1175,9 @@ static void parse_cipher_bd2(struct hisi_qp *qp, struct hisi_sec_sqe *sqe, qp->q_info.idx, tag); return; } + recv_msg->in = temp_msg->in; + recv_msg->out = temp_msg->out; + recv_msg->mm_ops = temp_msg->mm_ops; } else { /* The synchronization mode uses the same message */ temp_msg = recv_msg; @@ -964,6 +1188,8 @@ static void parse_cipher_bd2(struct hisi_qp *qp, struct hisi_sec_sqe *sqe, else update_iv_sgl(temp_msg); + destroy_cipher_bd2_addr(temp_msg, sqe); + if (unlikely(recv_msg->result != WD_SUCCESS)) dump_sec_msg(temp_msg, "cipher"); } @@ -1024,11 +1250,12 @@ static int cipher_len_check(struct wd_cipher_msg *msg) return 0; } -static void hisi_sec_put_sgl(handle_t h_qp, __u8 alg_type, void *in, void *out) +static void hisi_sec_put_sgl(handle_t h_qp, __u8 alg_type, void *in, void *out, + struct wd_mm_ops *mm_ops) { handle_t h_sgl_pool; - h_sgl_pool = hisi_qm_get_sglpool(h_qp, NULL); + h_sgl_pool = hisi_qm_get_sglpool(h_qp, mm_ops); if (!h_sgl_pool) return; @@ -1038,19 +1265,12 @@ static void hisi_sec_put_sgl(handle_t h_qp, __u8 alg_type, void *in, void *out) hisi_qm_put_hw_sgl(h_sgl_pool, out); } -static int hisi_sec_fill_sgl(handle_t h_qp, __u8 **in, __u8 **out, - struct hisi_sec_sqe *sqe, __u8 type) +static int hisi_sec_fill_sgl(handle_t h_sgl_pool, __u8 **in, __u8 **out, + struct hisi_sec_sqe *sqe, __u8 type) { - handle_t h_sgl_pool; void *hw_sgl_in; void *hw_sgl_out; - h_sgl_pool = hisi_qm_get_sglpool(h_qp, NULL); - if (!h_sgl_pool) { - WD_ERR("failed to get sglpool for hw_v2!\n"); - return -WD_EINVAL; - } - hw_sgl_in = hisi_qm_get_hw_sgl(h_sgl_pool, (struct wd_datalist *)(*in)); if (!hw_sgl_in) { WD_ERR("failed to get sgl in for hw_v2!\n"); @@ -1078,19 +1298,12 @@ static int hisi_sec_fill_sgl(handle_t h_qp, __u8 **in, __u8 **out, return 0; } -static int hisi_sec_fill_sgl_v3(handle_t h_qp, __u8 **in, __u8 **out, +static int hisi_sec_fill_sgl_v3(handle_t h_sgl_pool, __u8 **in, __u8 **out, struct hisi_sec_sqe3 *sqe, __u8 type) { - handle_t h_sgl_pool; void *hw_sgl_in; void *hw_sgl_out; - h_sgl_pool = hisi_qm_get_sglpool(h_qp, NULL); - if (!h_sgl_pool) { - WD_ERR("failed to get sglpool for hw_v3!\n"); - return -WD_EINVAL; - } - hw_sgl_in = hisi_qm_get_hw_sgl(h_sgl_pool, (struct wd_datalist *)(*in)); if (!hw_sgl_in) { WD_ERR("failed to get sgl in for hw_v3!\n"); @@ -1165,6 +1378,7 @@ static int hisi_sec_cipher_send(struct wd_alg_driver *drv, handle_t ctx, void *w handle_t h_qp = (handle_t)wd_ctx_get_priv(ctx); struct wd_cipher_msg *msg = wd_msg; struct hisi_sec_sqe sqe; + handle_t h_sgl_pool; __u16 count = 0; int ret; @@ -1179,7 +1393,13 @@ static int hisi_sec_cipher_send(struct wd_alg_driver *drv, handle_t ctx, void *w return ret; if (msg->data_fmt == WD_SGL_BUF) { - ret = hisi_sec_fill_sgl(h_qp, &msg->in, &msg->out, &sqe, + h_sgl_pool = hisi_qm_get_sglpool(h_qp, msg->mm_ops); + if (!h_sgl_pool) { + WD_ERR("cipher failed to get sglpool for hw_v2!\n"); + return -WD_EINVAL; + } + + ret = hisi_sec_fill_sgl(h_sgl_pool, &msg->in, &msg->out, &sqe, msg->alg_type); if (ret) return ret; @@ -1188,7 +1408,11 @@ static int hisi_sec_cipher_send(struct wd_alg_driver *drv, handle_t ctx, void *w hisi_set_msg_id(h_qp, &msg->tag); sqe.type2.clen_ivhlen |= (__u32)msg->in_bytes; sqe.type2.tag = (__u16)msg->tag; - fill_cipher_bd2_addr(msg, &sqe); + ret = fill_cipher_bd2_addr(msg, &sqe); + if (ret < 0) { + WD_ERR("cipher map memory is err(%d)!\n", ret); + return ret; + } ret = hisi_qm_send(h_qp, &sqe, 1, &count); if (ret < 0) { @@ -1197,8 +1421,8 @@ static int hisi_sec_cipher_send(struct wd_alg_driver *drv, handle_t ctx, void *w if (msg->data_fmt == WD_SGL_BUF) hisi_sec_put_sgl(h_qp, msg->alg_type, msg->in, - msg->out); - + msg->out, msg->mm_ops); + destroy_cipher_bd2_addr(msg, &sqe); return ret; } @@ -1225,7 +1449,7 @@ static int hisi_sec_cipher_recv(struct wd_alg_driver *drv, handle_t ctx, void *w if (recv_msg->data_fmt == WD_SGL_BUF) hisi_sec_put_sgl(h_qp, recv_msg->alg_type, recv_msg->in, - recv_msg->out); + recv_msg->out, recv_msg->mm_ops); return 0; } @@ -1316,13 +1540,83 @@ static int fill_cipher_bd3_mode(struct wd_cipher_msg *msg, return 0; } -static void fill_cipher_bd3_addr(struct wd_cipher_msg *msg, - struct hisi_sec_sqe3 *sqe) +static void destroy_cipher_bd3_addr(struct wd_cipher_msg *msg, struct hisi_sec_sqe3 *sqe) { - sqe->data_src_addr = (__u64)(uintptr_t)msg->in; - sqe->data_dst_addr = (__u64)(uintptr_t)msg->out; - sqe->no_scene.c_ivin_addr = (__u64)(uintptr_t)msg->iv; - sqe->c_key_addr = (__u64)(uintptr_t)msg->key; + struct wd_mm_ops *mm_ops = msg->mm_ops; + void *mempool; + + /* SVA mode and skip */ + if (!mm_ops || mm_ops->sva_mode) + return; + + if (!mm_ops->usr) { + WD_ERR("cipher failed to check memory pool!\n"); + return; + } + + mempool = mm_ops->usr; + if (sqe->data_src_addr) + mm_ops->iova_unmap(mempool, msg->in, (void *)(uintptr_t)sqe->data_src_addr, + msg->in_bytes); + + if (sqe->data_dst_addr) + mm_ops->iova_unmap(mempool, msg->out, (void *)(uintptr_t)sqe->data_dst_addr, + msg->out_bytes); + + if (sqe->c_key_addr) + mm_ops->iova_unmap(mempool, msg->key, (void *)(uintptr_t)sqe->c_key_addr, + msg->key_bytes); + + if (sqe->no_scene.c_ivin_addr) + mm_ops->iova_unmap(mempool, msg->iv, + (void *)(uintptr_t)sqe->no_scene.c_ivin_addr, msg->iv_bytes); +} + +static int fill_cipher_bd3_addr(struct wd_cipher_msg *msg, struct hisi_sec_sqe3 *sqe) +{ + struct wd_mm_ops *mm_ops = msg->mm_ops; + void *mempool, *phy_addr; + + if (mm_ops->sva_mode) { + sqe->data_src_addr = (__u64)(uintptr_t)msg->in; + sqe->data_dst_addr = (__u64)(uintptr_t)msg->out; + sqe->no_scene.c_ivin_addr = (__u64)(uintptr_t)msg->iv; + sqe->c_key_addr = (__u64)(uintptr_t)msg->key; + return 0; + } + if (msg->mm_type > UADK_MEM_PROXY) { + WD_ERR("cipher failed to check memory type!\n"); + return -WD_EINVAL; + } + + /* No-SVA mode and Memory is USER mode or PROXY mode */ + mempool = mm_ops->usr; + phy_addr = mm_ops->iova_map(mempool, msg->in, msg->in_bytes); + if (!phy_addr) + return -WD_ENOMEM; + sqe->data_src_addr = (__u64)(uintptr_t)phy_addr; + phy_addr = mm_ops->iova_map(mempool, msg->out, msg->out_bytes); + if (!phy_addr) + goto map_err; + sqe->data_dst_addr = (__u64)(uintptr_t)phy_addr; + if (msg->iv_bytes) { + phy_addr = mm_ops->iova_map(mempool, msg->iv, msg->iv_bytes); + if (!phy_addr) + goto map_err; + sqe->no_scene.c_ivin_addr = (__u64)(uintptr_t)phy_addr; + } + if (msg->key_bytes) { + phy_addr = mm_ops->iova_map(mempool, msg->key, msg->key_bytes); + if (!phy_addr) + goto map_err; + sqe->c_key_addr = (__u64)(uintptr_t)phy_addr; + } + + return 0; + +map_err: + destroy_cipher_bd3_addr(msg, sqe); + return -WD_ENOMEM; } static int fill_cipher_bd3(struct wd_cipher_msg *msg, struct hisi_sec_sqe3 *sqe) @@ -1361,8 +1655,12 @@ static int fill_cipher_bd3(struct wd_cipher_msg *msg, struct hisi_sec_sqe3 *sqe) return 0; } -static void fill_sec_prefetch(__u8 data_fmt, __u32 len, __u16 hw_type, struct hisi_sec_sqe3 *sqe) +static void fill_sec_prefetch(__u8 data_fmt, __u32 len, __u16 hw_type, struct hisi_sec_sqe3 *sqe, + bool sva_mode) { + if (!sva_mode) + return; + if (hw_type >= HISI_QM_API_VER5_BASE || (data_fmt == WD_FLAT_BUF && len <= SEC_SVA_PREFETCH_MAX_LEN)) sqe->auth_mac_key |= (__u32)SEC_ENABLE_SVA_PREFETCH << SEC_SVA_PREFETCH_OFFSET; @@ -1374,6 +1672,7 @@ static int hisi_sec_cipher_send_v3(struct wd_alg_driver *drv, handle_t ctx, void struct hisi_qp *qp = (struct hisi_qp *)h_qp; struct wd_cipher_msg *msg = wd_msg; struct hisi_sec_sqe3 sqe; + handle_t h_sgl_pool; __u16 count = 0; int ret; @@ -1387,10 +1686,16 @@ static int hisi_sec_cipher_send_v3(struct wd_alg_driver *drv, handle_t ctx, void if (ret) return ret; - fill_sec_prefetch(msg->data_fmt, msg->in_bytes, qp->q_info.hw_type, &sqe); + fill_sec_prefetch(msg->data_fmt, msg->in_bytes, qp->q_info.hw_type, &sqe, + msg->mm_ops->sva_mode); if (msg->data_fmt == WD_SGL_BUF) { - ret = hisi_sec_fill_sgl_v3(h_qp, &msg->in, &msg->out, &sqe, + h_sgl_pool = hisi_qm_get_sglpool(h_qp, msg->mm_ops); + if (!h_sgl_pool) { + WD_ERR("cipher failed to get sglpool for hw_v3!\n"); + return -WD_EINVAL; + } + ret = hisi_sec_fill_sgl_v3(h_sgl_pool, &msg->in, &msg->out, &sqe, msg->alg_type); if (ret) return ret; @@ -1399,7 +1704,11 @@ static int hisi_sec_cipher_send_v3(struct wd_alg_driver *drv, handle_t ctx, void hisi_set_msg_id(h_qp, &msg->tag); sqe.c_len_ivin = (__u32)msg->in_bytes; sqe.tag = (__u64)(uintptr_t)msg->tag; - fill_cipher_bd3_addr(msg, &sqe); + ret = fill_cipher_bd3_addr(msg, &sqe); + if (ret < 0) { + WD_ERR("cipher map memory is err(%d)!\n", ret); + return ret; + } ret = hisi_qm_send(h_qp, &sqe, 1, &count); if (ret < 0) { @@ -1408,8 +1717,8 @@ static int hisi_sec_cipher_send_v3(struct wd_alg_driver *drv, handle_t ctx, void if (msg->data_fmt == WD_SGL_BUF) hisi_sec_put_sgl(h_qp, msg->alg_type, msg->in, - msg->out); - + msg->out, msg->mm_ops); + destroy_cipher_bd3_addr(msg, &sqe); return ret; } @@ -1438,8 +1747,6 @@ static void parse_cipher_bd3(struct hisi_qp *qp, struct hisi_sec_sqe3 *sqe, if (qp->q_info.qp_mode == CTX_MODE_ASYNC) { recv_msg->alg_type = WD_CIPHER; recv_msg->data_fmt = get_data_fmt_v3(sqe->bd_param); - recv_msg->in = (__u8 *)(uintptr_t)sqe->data_src_addr; - recv_msg->out = (__u8 *)(uintptr_t)sqe->data_dst_addr; temp_msg = wd_cipher_get_msg(qp->q_info.idx, tag); if (!temp_msg) { recv_msg->result = WD_IN_EPARA; @@ -1447,6 +1754,9 @@ static void parse_cipher_bd3(struct hisi_qp *qp, struct hisi_sec_sqe3 *sqe, qp->q_info.idx, tag); return; } + recv_msg->in = temp_msg->in; + recv_msg->out = temp_msg->out; + recv_msg->mm_ops = temp_msg->mm_ops; } else { /* The synchronization mode uses the same message */ temp_msg = recv_msg; @@ -1457,6 +1767,8 @@ static void parse_cipher_bd3(struct hisi_qp *qp, struct hisi_sec_sqe3 *sqe, else update_iv_sgl(temp_msg); + destroy_cipher_bd3_addr(temp_msg, sqe); + if (unlikely(recv_msg->result != WD_SUCCESS)) dump_sec_msg(temp_msg, "cipher"); } @@ -1481,7 +1793,7 @@ static int hisi_sec_cipher_recv_v3(struct wd_alg_driver *drv, handle_t ctx, void if (recv_msg->data_fmt == WD_SGL_BUF) hisi_sec_put_sgl(h_qp, recv_msg->alg_type, recv_msg->in, - recv_msg->out); + recv_msg->out, recv_msg->mm_ops); return 0; } @@ -1519,7 +1831,6 @@ static int fill_digest_bd2_alg(struct wd_digest_msg *msg, return -WD_EINVAL; } sqe->type2.mac_key_alg |= (__u32)BYTES_TO_WORDS(msg->key_bytes) << MAC_LEN_OFFSET; - sqe->type2.a_key_addr = (__u64)(uintptr_t)msg->key; sqe->type2.mac_key_alg |= g_hmac_a_alg[msg->alg] << AUTH_ALG_OFFSET; @@ -1586,6 +1897,83 @@ static int fill_digest_long_hash(handle_t h_qp, struct wd_digest_msg *msg, return 0; } +static void destroy_digest_bd2_addr(struct wd_digest_msg *msg, struct hisi_sec_sqe *sqe) +{ + struct wd_mm_ops *mm_ops = msg->mm_ops; + void *mempool; + + /* SVA mode and skip */ + if (!mm_ops || mm_ops->sva_mode) + return; + + if (!mm_ops->usr) { + WD_ERR("digest failed to check memory pool!\n"); + return; + } + + mempool = mm_ops->usr; + + if (sqe->type2.data_src_addr) + mm_ops->iova_unmap(mempool, msg->in, (void *)(uintptr_t)sqe->type2.data_src_addr, + msg->in_bytes); + + if (sqe->type2.mac_addr) + mm_ops->iova_unmap(mempool, msg->out, (void *)(uintptr_t)sqe->type2.mac_addr, + msg->out_bytes); + + if (sqe->type2.a_key_addr && msg->mode == WD_DIGEST_HMAC) + mm_ops->iova_unmap(mempool, msg->key, (void *)(uintptr_t)sqe->type2.a_key_addr, + msg->key_bytes); +} + +static int fill_digest_bd2_addr(struct wd_digest_msg *msg, struct hisi_sec_sqe *sqe) +{ + struct wd_mm_ops *mm_ops = msg->mm_ops; + void *mempool, *phy_addr; + + if (mm_ops->sva_mode) { + /* avoid HW accessing address 0 when the pointer is NULL */ + if (msg->in) + sqe->type2.data_src_addr = (__u64)(uintptr_t)msg->in; + else + sqe->type2.data_src_addr = (__u64)(uintptr_t)msg->out; + sqe->type2.mac_addr = (__u64)(uintptr_t)msg->out; + if (msg->mode == WD_DIGEST_HMAC) + sqe->type2.a_key_addr = (__u64)(uintptr_t)msg->key; + return 0; + } + if (msg->mm_type > UADK_MEM_PROXY) { + WD_ERR("digest failed to check memory type!\n"); + return -WD_EINVAL; + } + + /* No-SVA mode and Memory is USER mode or PROXY mode */ + mempool = mm_ops->usr; + if (msg->in_bytes) { + phy_addr = mm_ops->iova_map(mempool, msg->in, msg->in_bytes); + if (!phy_addr) + return -WD_ENOMEM; + sqe->type2.data_src_addr = (__u64)(uintptr_t)phy_addr; + } + phy_addr = mm_ops->iova_map(mempool, msg->out, msg->out_bytes); + if (!phy_addr) + goto map_err; + sqe->type2.mac_addr = (__u64)(uintptr_t)phy_addr; + + if (msg->key_bytes != 0 && msg->mode == WD_DIGEST_HMAC) { + phy_addr = mm_ops->iova_map(mempool, msg->key, msg->key_bytes); + if (!phy_addr) + goto map_err; + sqe->type2.a_key_addr = (__u64)(uintptr_t)phy_addr; + } + + return 0; + +map_err: + destroy_digest_bd2_addr(msg, sqe); + return -WD_ENOMEM; +} + static void parse_digest_bd2(struct hisi_qp *qp, struct hisi_sec_sqe *sqe, struct wd_digest_msg *recv_msg) { @@ -1606,7 +1994,6 @@ static void parse_digest_bd2(struct hisi_qp *qp, struct hisi_sec_sqe *sqe, if (qp->q_info.qp_mode == CTX_MODE_ASYNC) { recv_msg->alg_type = WD_DIGEST; recv_msg->data_fmt = get_data_fmt_v2(sqe->sds_sa_type); - recv_msg->in = (__u8 *)(uintptr_t)sqe->type2.data_src_addr; temp_msg = wd_digest_get_msg(qp->q_info.idx, recv_msg->tag); if (!temp_msg) { recv_msg->result = WD_IN_EPARA; @@ -1614,11 +2001,15 @@ static void parse_digest_bd2(struct hisi_qp *qp, struct hisi_sec_sqe *sqe, qp->q_info.idx, recv_msg->tag); return; } + recv_msg->in = temp_msg->in; + recv_msg->mm_ops = temp_msg->mm_ops; } else { /* The synchronization mode uses the same message */ temp_msg = recv_msg; } + destroy_digest_bd2_addr(temp_msg, sqe); + if (unlikely(recv_msg->result != WD_SUCCESS)) dump_sec_msg(temp_msg, "digest"); } @@ -1719,6 +2110,7 @@ static int hisi_sec_digest_send(struct wd_alg_driver *drv, handle_t ctx, void *w handle_t h_qp = (handle_t)wd_ctx_get_priv(ctx); struct wd_digest_msg *msg = wd_msg; struct hisi_sec_sqe sqe; + handle_t h_sgl_pool; __u16 count = 0; __u8 scene; __u8 de; @@ -1743,7 +2135,12 @@ static int hisi_sec_digest_send(struct wd_alg_driver *drv, handle_t ctx, void *w de = DATA_DST_ADDR_DISABLE << SEC_DE_OFFSET; if (msg->data_fmt == WD_SGL_BUF) { - ret = hisi_sec_fill_sgl(h_qp, &msg->in, &msg->out, &sqe, + h_sgl_pool = hisi_qm_get_sglpool(h_qp, msg->mm_ops); + if (!h_sgl_pool) { + WD_ERR("digest failed to get sglpool for hw_v2!\n"); + return -WD_EINVAL; + } + ret = hisi_sec_fill_sgl(h_sgl_pool, &msg->in, &msg->out, &sqe, msg->alg_type); if (ret) return ret; @@ -1751,16 +2148,19 @@ static int hisi_sec_digest_send(struct wd_alg_driver *drv, handle_t ctx, void *w sqe.sds_sa_type |= (__u8)(de | scene); sqe.type2.alen_ivllen |= (__u32)msg->in_bytes; - sqe.type2.data_src_addr = (__u64)(uintptr_t)msg->in; - sqe.type2.mac_addr = (__u64)(uintptr_t)msg->out; + ret = fill_digest_bd2_addr(msg, &sqe); + if (ret) { + WD_ERR("digest map memory is err(%d)!\n", ret); + goto put_sgl; + } ret = fill_digest_bd2_alg(msg, &sqe); if (ret) - goto put_sgl; + goto destroy_addr; ret = fill_digest_long_hash(h_qp, msg, &sqe); if (ret) - goto put_sgl; + goto destroy_addr; hisi_set_msg_id(h_qp, &msg->tag); sqe.type2.tag = (__u16)msg->tag; @@ -1769,15 +2169,16 @@ static int hisi_sec_digest_send(struct wd_alg_driver *drv, handle_t ctx, void *w if (ret != -WD_EBUSY) WD_ERR("digest send sqe is err(%d)!\n", ret); - goto put_sgl; + goto destroy_addr; } return 0; +destroy_addr: + destroy_digest_bd2_addr(msg, &sqe); put_sgl: if (msg->data_fmt == WD_SGL_BUF) - hisi_sec_put_sgl(h_qp, msg->alg_type, msg->in, msg->out); - + hisi_sec_put_sgl(h_qp, msg->alg_type, msg->in, msg->out, msg->mm_ops); return ret; } @@ -1801,7 +2202,7 @@ static int hisi_sec_digest_recv(struct wd_alg_driver *drv, handle_t ctx, void *w if (recv_msg->data_fmt == WD_SGL_BUF) hisi_sec_put_sgl(h_qp, recv_msg->alg_type, recv_msg->in, - recv_msg->out); + recv_msg->out, recv_msg->mm_ops); return 0; } @@ -1831,7 +2232,7 @@ static int hmac_key_len_check(struct wd_digest_msg *msg) } static int fill_digest_bd3_alg(struct wd_digest_msg *msg, - struct hisi_sec_sqe3 *sqe) + struct hisi_sec_sqe3 *sqe) { int ret; @@ -1866,13 +2267,11 @@ static int fill_digest_bd3_alg(struct wd_digest_msg *msg, return ret; sqe->auth_mac_key |= (__u32)BYTES_TO_WORDS(msg->key_bytes) << SEC_AKEY_OFFSET_V3; - sqe->a_key_addr = (__u64)(uintptr_t)msg->key; sqe->auth_mac_key |= g_hmac_a_alg[msg->alg] << SEC_AUTH_ALG_OFFSET_V3; if (msg->alg == WD_DIGEST_AES_GMAC) { sqe->auth_mac_key |= AI_GEN_IVIN_ADDR << SEC_AI_GEN_OFFSET_V3; - sqe->auth_ivin.a_ivin_addr = (__u64)(uintptr_t)msg->iv; } } else { WD_ERR("failed to check digest mode, mode = %u\n", msg->mode); @@ -1956,12 +2355,104 @@ static void fill_digest_v3_scene(struct hisi_sec_sqe3 *sqe, sqe->bd_param |= (__u16)(de | scene); } +static void destroy_digest_bd3_addr(struct wd_digest_msg *msg, struct hisi_sec_sqe3 *sqe) +{ + struct wd_mm_ops *mm_ops = msg->mm_ops; + void *mempool; + + /* SVA mode and skip */ + if (!mm_ops || mm_ops->sva_mode) + return; + + if (!mm_ops->usr) { + WD_ERR("digest failed to check memory pool!\n"); + return; + } + + mempool = mm_ops->usr; + + if (sqe->data_src_addr) + mm_ops->iova_unmap(mempool, msg->in, (void *)(uintptr_t)sqe->data_src_addr, + msg->in_bytes); + + if (sqe->mac_addr) + mm_ops->iova_unmap(mempool, msg->out, (void *)(uintptr_t)sqe->mac_addr, + msg->out_bytes); + + if (sqe->a_key_addr && msg->mode == WD_DIGEST_HMAC) + mm_ops->iova_unmap(mempool, msg->key, (void *)(uintptr_t)sqe->a_key_addr, + msg->key_bytes); + + if (sqe->auth_ivin.a_ivin_addr && msg->mode == WD_DIGEST_HMAC && + msg->alg == WD_DIGEST_AES_GMAC) + mm_ops->iova_unmap(mempool, msg->iv, (void *)(uintptr_t)sqe->auth_ivin.a_ivin_addr, + MAX_IV_SIZE); +} + +static int fill_digest_bd3_addr(struct wd_digest_msg *msg, struct hisi_sec_sqe3 *sqe) +{ + struct wd_mm_ops *mm_ops = msg->mm_ops; + void *mempool, *phy_addr; + + if (msg->mm_ops->sva_mode) { + /* avoid HW accessing address 0 when the pointer is NULL */ + if (msg->in) + sqe->data_src_addr = (__u64)(uintptr_t)msg->in; + else + sqe->data_src_addr = (__u64)(uintptr_t)msg->out; + sqe->mac_addr = (__u64)(uintptr_t)msg->out; + if (msg->mode == WD_DIGEST_HMAC) + sqe->a_key_addr = (__u64)(uintptr_t)msg->key; + if (msg->mode == WD_DIGEST_HMAC && msg->alg == WD_DIGEST_AES_GMAC) + sqe->auth_ivin.a_ivin_addr = (__u64)(uintptr_t)msg->iv; + return 0; + } + if (msg->mm_type > UADK_MEM_PROXY) { + WD_ERR("digest failed to check memory type!\n"); + return -WD_EINVAL; + } + + /* No-SVA mode and Memory is USER mode or PROXY mode */ + mempool = mm_ops->usr; + if (msg->in_bytes) { + phy_addr = mm_ops->iova_map(mempool, msg->in, msg->in_bytes); + if (!phy_addr) + return -WD_ENOMEM; + sqe->data_src_addr = (__u64)(uintptr_t)phy_addr; + } + phy_addr = mm_ops->iova_map(mempool, msg->out, msg->out_bytes); + if (!phy_addr) + goto map_err; + sqe->mac_addr = (__u64)(uintptr_t)phy_addr; + + if (msg->iv && msg->mode == WD_DIGEST_HMAC && + msg->alg == WD_DIGEST_AES_GMAC) { + phy_addr = mm_ops->iova_map(mempool, msg->iv, MAX_IV_SIZE); + if (!phy_addr) + goto map_err; + sqe->auth_ivin.a_ivin_addr = (__u64)(uintptr_t)phy_addr; + } + if (msg->key_bytes != 0 && msg->mode == WD_DIGEST_HMAC) { + phy_addr = mm_ops->iova_map(mempool, msg->key, msg->key_bytes); + if (!phy_addr) + goto map_err; + sqe->a_key_addr = (__u64)(uintptr_t)phy_addr; + } + + return 0; + +map_err: + destroy_digest_bd3_addr(msg, sqe); + return -WD_ENOMEM; +} + static int hisi_sec_digest_send_v3(struct wd_alg_driver *drv, handle_t ctx, void *wd_msg) { handle_t h_qp = (handle_t)wd_ctx_get_priv(ctx); struct hisi_qp *qp = (struct hisi_qp *)h_qp; struct wd_digest_msg *msg = wd_msg; struct hisi_sec_sqe3 sqe; + handle_t h_sgl_pool; __u16 count = 0; int ret; @@ -1980,43 +2471,53 @@ static int hisi_sec_digest_send_v3(struct wd_alg_driver *drv, handle_t ctx, void sqe.auth_mac_key = AUTH_HMAC_CALCULATE; if (msg->data_fmt == WD_SGL_BUF) { - ret = hisi_sec_fill_sgl_v3(h_qp, &msg->in, &msg->out, &sqe, + h_sgl_pool = hisi_qm_get_sglpool(h_qp, msg->mm_ops); + if (!h_sgl_pool) { + WD_ERR("digest failed to get sglpool for hw_v3!\n"); + return -WD_EINVAL; + } + ret = hisi_sec_fill_sgl_v3(h_sgl_pool, &msg->in, &msg->out, &sqe, msg->alg_type); if (ret) return ret; } sqe.a_len_key = (__u32)msg->in_bytes; - sqe.data_src_addr = (__u64)(uintptr_t)msg->in; - sqe.mac_addr = (__u64)(uintptr_t)msg->out; + ret = fill_digest_bd3_addr(msg, &sqe); + if (ret < 0) { + WD_ERR("digest map memory is err(%d)!\n", ret); + goto put_sgl; + } ret = fill_digest_bd3_alg(msg, &sqe); if (ret) - goto put_sgl; + goto destroy_addr; ret = fill_digest_long_hash3(h_qp, msg, &sqe); if (ret) - goto put_sgl; + goto destroy_addr; hisi_set_msg_id(h_qp, &msg->tag); sqe.tag = (__u64)(uintptr_t)msg->tag; - fill_sec_prefetch(msg->data_fmt, msg->in_bytes, qp->q_info.hw_type, &sqe); + fill_sec_prefetch(msg->data_fmt, msg->in_bytes, qp->q_info.hw_type, &sqe, + msg->mm_ops->sva_mode); ret = hisi_qm_send(h_qp, &sqe, 1, &count); if (ret < 0) { if (ret != -WD_EBUSY) WD_ERR("digest send sqe is err(%d)!\n", ret); - goto put_sgl; + goto destroy_addr; } return 0; +destroy_addr: + destroy_digest_bd3_addr(msg, &sqe); put_sgl: if (msg->data_fmt == WD_SGL_BUF) - hisi_sec_put_sgl(h_qp, msg->alg_type, msg->in, msg->out); - + hisi_sec_put_sgl(h_qp, msg->alg_type, msg->in, msg->out, msg->mm_ops); return ret; } @@ -2040,7 +2541,6 @@ static void parse_digest_bd3(struct hisi_qp *qp, struct hisi_sec_sqe3 *sqe, if (qp->q_info.qp_mode == CTX_MODE_ASYNC) { recv_msg->alg_type = WD_DIGEST; recv_msg->data_fmt = get_data_fmt_v3(sqe->bd_param); - recv_msg->in = (__u8 *)(uintptr_t)sqe->data_src_addr; temp_msg = wd_digest_get_msg(qp->q_info.idx, recv_msg->tag); if (!temp_msg) { recv_msg->result = WD_IN_EPARA; @@ -2048,10 +2548,13 @@ static void parse_digest_bd3(struct hisi_qp *qp, struct hisi_sec_sqe3 *sqe, qp->q_info.idx, recv_msg->tag); return; } + recv_msg->in = temp_msg->in; + recv_msg->mm_ops = temp_msg->mm_ops; } else { /* The synchronization mode uses the same message */ temp_msg = recv_msg; } + destroy_digest_bd3_addr(temp_msg, sqe); if (unlikely(recv_msg->result != WD_SUCCESS)) dump_sec_msg(temp_msg, "digest"); @@ -2077,7 +2580,7 @@ static int hisi_sec_digest_recv_v3(struct wd_alg_driver *drv, handle_t ctx, void if (recv_msg->data_fmt == WD_SGL_BUF) hisi_sec_put_sgl(h_qp, recv_msg->alg_type, recv_msg->in, - recv_msg->out); + recv_msg->out, recv_msg->mm_ops); return 0; } @@ -2253,22 +2756,6 @@ static void set_aead_auth_iv(struct wd_aead_msg *msg) } } -static void fill_aead_bd2_addr(struct wd_aead_msg *msg, - struct hisi_sec_sqe *sqe) -{ - sqe->type2.data_src_addr = (__u64)(uintptr_t)msg->in; - sqe->type2.data_dst_addr = (__u64)(uintptr_t)msg->out; - sqe->type2.mac_addr = (__u64)(uintptr_t)msg->mac; - sqe->type2.c_key_addr = (__u64)(uintptr_t)msg->ckey; - sqe->type2.a_key_addr = (__u64)(uintptr_t)msg->akey; - sqe->type2.c_ivin_addr = (__u64)(uintptr_t)msg->iv; - - /* CCM/GCM should init a_iv */ - set_aead_auth_iv(msg); - - sqe->type2.a_ivin_addr = (__u64)(uintptr_t)msg->aiv; -} - static int aead_len_check(struct wd_aead_msg *msg, enum sec_bd_type type) { if (msg->msg_state == AEAD_MSG_MIDDLE) { @@ -2291,6 +2778,11 @@ static int aead_len_check(struct wd_aead_msg *msg, enum sec_bd_type type) return -WD_EINVAL; } + if (unlikely(msg->in_bytes == 0 && msg->assoc_bytes == 0)) { + WD_ERR("aead input data length is 0\n"); + return -WD_EINVAL; + } + if (unlikely(msg->cmode == WD_CIPHER_CCM && msg->assoc_bytes > MAX_CCM_AAD_LEN)) { WD_ERR("aead ccm aad length is too long, size = %u\n", @@ -2323,14 +2815,14 @@ static void gcm_auth_ivin(struct wd_aead_msg *msg) __u32 final_counter = GCM_FINAL_COUNTER; /* auth_ivin = {cipher_ivin(16B), null(16B), auth_mac(16B), null(16B)} */ - memset(msg->aiv_stream, 0, AIV_STREAM_LEN); + memset(msg->aiv, 0, AIV_STREAM_LEN); - memcpy(msg->aiv_stream, msg->iv, GCM_IV_SIZE); + memcpy(msg->aiv, msg->iv, GCM_IV_SIZE); /* The last 4 bytes of c_ivin are counters */ - memcpy(msg->aiv_stream + GCM_IV_SIZE, &final_counter, GCM_FINAL_COUNTER_LEN); + memcpy(msg->aiv + GCM_IV_SIZE, &final_counter, GCM_FINAL_COUNTER_LEN); /* Fill auth_ivin with the mac of last MIDDLE BD */ - memcpy(msg->aiv_stream + GCM_STREAM_MAC_OFFSET, msg->mac, GCM_FULL_MAC_LEN); + memcpy(msg->aiv + GCM_STREAM_MAC_OFFSET, msg->mac, GCM_FULL_MAC_LEN); /* Use the user's origin mac for decrypt icv check */ if (msg->op_type == WD_CIPHER_DECRYPTION_DIGEST) @@ -2353,8 +2845,6 @@ static void fill_gcm_first_bd2(struct wd_aead_msg *msg, struct hisi_sec_sqe *sqe sqe->type2.c_alg = 0; sqe->type2.auth_src_offset = 0; sqe->type2.alen_ivllen = msg->assoc_bytes; - sqe->type2.c_ivin_addr = (__u64)(uintptr_t)msg->iv; - sqe->type2.a_key_addr = (__u64)(uintptr_t)msg->ckey; } static void fill_gcm_middle_bd2(struct wd_aead_msg *msg, struct hisi_sec_sqe *sqe) @@ -2367,8 +2857,6 @@ static void fill_gcm_middle_bd2(struct wd_aead_msg *msg, struct hisi_sec_sqe *sq fill_gcm_akey_len(msg, sqe, BD_TYPE2); sqe->type2.alen_ivllen = 0; sqe->type2.a_ivin_addr = sqe->type2.mac_addr; - sqe->type2.c_ivin_addr = (__u64)(uintptr_t)msg->iv; - sqe->type2.a_key_addr = (__u64)(uintptr_t)msg->ckey; } static void get_galois_vector_s(struct wd_aead_msg *msg, __u8 *s) @@ -2385,7 +2873,7 @@ static void get_galois_vector_s(struct wd_aead_msg *msg, __u8 *s) /* Based the little-endian operation */ for (i = 0; i < GCM_BLOCK_SIZE; i++) - s[i] = a_c[i] ^ msg->aiv_stream[(__u8)(GCM_AUTH_MAC_OFFSET - i)]; + s[i] = a_c[i] ^ msg->aiv[(__u8)(GCM_AUTH_MAC_OFFSET - i)]; } static int gcm_do_soft_mac(struct wd_aead_msg *msg) @@ -2424,9 +2912,9 @@ static int gcm_do_soft_mac(struct wd_aead_msg *msg) */ for (i = 0; i < GCM_BLOCK_SIZE; i++) G[i] = data[GCM_BLOCK_OFFSET - i] ^ - msg->aiv_stream[(__u8)(GCM_AUTH_MAC_OFFSET - i)]; + msg->aiv[(__u8)(GCM_AUTH_MAC_OFFSET - i)]; - galois_compute(G, H, msg->aiv_stream + GCM_STREAM_MAC_OFFSET, GCM_BLOCK_SIZE); + galois_compute(G, H, msg->aiv + GCM_STREAM_MAC_OFFSET, GCM_BLOCK_SIZE); len -= block; offset += block; } @@ -2436,7 +2924,7 @@ static int gcm_do_soft_mac(struct wd_aead_msg *msg) galois_compute(S, H, g, GCM_BLOCK_SIZE); /* Encrypt ctr0 based on AES_ECB */ - aes_encrypt(msg->ckey, msg->ckey_bytes, msg->aiv_stream, ctr_r); + aes_encrypt(msg->ckey, msg->ckey_bytes, msg->aiv, ctr_r); /* Get the GMAC tag final */ for (i = 0; i < GCM_BLOCK_SIZE; i++) @@ -2556,11 +3044,159 @@ static int aead_msg_state_check(struct wd_aead_msg *msg) return 0; } +static void destroy_aead_bd2_addr(struct wd_aead_msg *msg, struct hisi_sec_sqe *sqe) +{ + struct wd_mm_ops *mm_ops = msg->mm_ops; + __u64 dma_addr; + void *mempool; + + aead_free_aiv_addr(msg); + /* SVA mode and skip */ + if (!mm_ops || mm_ops->sva_mode) + return; + + if (!mm_ops->usr) { + WD_ERR("aead failed to check memory pool!\n"); + return; + } + + mempool = mm_ops->usr; + if (sqe->type2.data_src_addr) + mm_ops->iova_unmap(mempool, msg->in, (void *)(uintptr_t)sqe->type2.data_src_addr, + msg->in_bytes); + + if (sqe->type2.data_dst_addr) + mm_ops->iova_unmap(mempool, msg->out, (void *)(uintptr_t)sqe->type2.data_dst_addr, + msg->out_bytes); + + if (sqe->type2.c_ivin_addr) + mm_ops->iova_unmap(mempool, msg->iv, (void *)(uintptr_t)sqe->type2.c_ivin_addr, + msg->iv_bytes); + + if (sqe->type2.a_key_addr) { + if ((msg->msg_state == AEAD_MSG_FIRST || msg->msg_state == AEAD_MSG_MIDDLE) + && msg->cmode == WD_CIPHER_GCM) + mm_ops->iova_unmap(mempool, msg->ckey, + (void *)(uintptr_t)sqe->type2.a_key_addr, + msg->ckey_bytes); + else + mm_ops->iova_unmap(mempool, msg->akey, + (void *)(uintptr_t)sqe->type2.a_key_addr, + msg->akey_bytes); + } + + if (sqe->type2.c_key_addr && !((msg->msg_state == AEAD_MSG_FIRST || + msg->msg_state == AEAD_MSG_MIDDLE) && msg->cmode == WD_CIPHER_GCM)) + mm_ops->iova_unmap(mempool, msg->ckey, (void *)(uintptr_t)sqe->type2.c_key_addr, + msg->ckey_bytes); + + if (sqe->type2.mac_addr) + mm_ops->iova_unmap(mempool, msg->mac, (void *)(uintptr_t)sqe->type2.mac_addr, + msg->auth_bytes); +} + +static int aead_mem_nosva_map(struct wd_aead_msg *msg, struct hisi_sec_sqe *sqe, int idx) +{ + struct wd_aead_aiv_addr *aiv_addr = (struct wd_aead_aiv_addr *)msg->drv_cfg; + struct wd_mm_ops *mm_ops = msg->mm_ops; + void *mempool, *phy_addr; + + /* No-SVA mode and Memory is USER mode or PROXY mode */ + mempool = mm_ops->usr; + + phy_addr = mm_ops->iova_map(mempool, msg->in, msg->in_bytes + msg->assoc_bytes); + if (!phy_addr) + return -WD_ENOMEM; + sqe->type2.data_src_addr = (__u64)(uintptr_t)phy_addr; + phy_addr = mm_ops->iova_map(mempool, msg->out, msg->out_bytes); + if (!phy_addr) + goto map_err; + sqe->type2.data_dst_addr = (__u64)(uintptr_t)phy_addr; + if (msg->iv_bytes) { + phy_addr = mm_ops->iova_map(mempool, msg->iv, msg->iv_bytes); + if (!phy_addr) + goto map_err; + sqe->type2.c_ivin_addr = (__u64)(uintptr_t)phy_addr; + } + if (msg->akey_bytes) { + phy_addr = mm_ops->iova_map(mempool, msg->akey, msg->akey_bytes); + if (!phy_addr) + goto map_err; + sqe->type2.a_key_addr = (__u64)(uintptr_t)phy_addr; + } + if (msg->ckey_bytes) { + phy_addr = mm_ops->iova_map(mempool, msg->ckey, msg->ckey_bytes); + if (!phy_addr) + goto map_err; + if ((msg->msg_state == AEAD_MSG_FIRST || msg->msg_state == AEAD_MSG_MIDDLE) + && msg->cmode == WD_CIPHER_GCM) + sqe->type2.a_key_addr = (__u64)(uintptr_t)phy_addr; + else + sqe->type2.c_key_addr = (__u64)(uintptr_t)phy_addr; + } + if (msg->auth_bytes) { + phy_addr = mm_ops->iova_map(mempool, msg->mac, msg->auth_bytes); + if (!phy_addr) + goto map_err; + sqe->type2.mac_addr = (__u64)(uintptr_t)phy_addr; + } + + /* CCM/GCM should init a_iv */ + set_aead_auth_iv(msg); + phy_addr = aiv_addr->aiv_nosva + (idx << AEAD_AIV_OFFSET); + sqe->type2.a_ivin_addr = (__u64)(uintptr_t)phy_addr; + + return 0; + +map_err: + destroy_aead_bd2_addr(msg, sqe); + return -WD_ENOMEM; +} + +static int fill_aead_bd2_addr(struct wd_aead_msg *msg, struct hisi_sec_sqe *sqe, + struct hisi_qp *qp) +{ + int idx; + + idx = aead_get_aiv_addr(qp, msg); + if (idx < 0) + return idx; + + /* sva mode */ + if (msg->mm_ops->sva_mode) { + sqe->type2.data_src_addr = (__u64)(uintptr_t)msg->in; + sqe->type2.data_dst_addr = (__u64)(uintptr_t)msg->out; + sqe->type2.mac_addr = (__u64)(uintptr_t)msg->mac; + sqe->type2.c_key_addr = (__u64)(uintptr_t)msg->ckey; + sqe->type2.a_key_addr = (__u64)(uintptr_t)msg->akey; + sqe->type2.c_ivin_addr = (__u64)(uintptr_t)msg->iv; + /* CCM/GCM should init a_iv */ + set_aead_auth_iv(msg); + sqe->type2.a_ivin_addr = (__u64)(uintptr_t)msg->aiv; + + if ((msg->msg_state == AEAD_MSG_FIRST || msg->msg_state == AEAD_MSG_MIDDLE) + && msg->cmode == WD_CIPHER_GCM) + sqe->type2.a_key_addr = (__u64)(uintptr_t)msg->ckey; + + return 0; + } + if (msg->mm_type > UADK_MEM_PROXY) { + WD_ERR("aead failed to check memory type!\n"); + aead_free_aiv_addr(msg); + return -WD_EINVAL; + } + + /* aiv addr is freed in destroy addr interface */ + return aead_mem_nosva_map(msg, sqe, idx); +} + static int hisi_sec_aead_send(struct wd_alg_driver *drv, handle_t ctx, void *wd_msg) { handle_t h_qp = (handle_t)wd_ctx_get_priv(ctx); + struct hisi_qp *qp = (struct hisi_qp *)h_qp; struct wd_aead_msg *msg = wd_msg; struct hisi_sec_sqe sqe; + handle_t h_sgl_pool; __u16 count = 0; int ret; @@ -2583,17 +3219,27 @@ static int hisi_sec_aead_send(struct wd_alg_driver *drv, handle_t ctx, void *wd_ return ret; if (msg->data_fmt == WD_SGL_BUF) { - ret = hisi_sec_fill_sgl(h_qp, &msg->in, &msg->out, + h_sgl_pool = hisi_qm_get_sglpool(h_qp, msg->mm_ops); + if (!h_sgl_pool) { + WD_ERR("aead failed to get sglpool for hw_v2!\n"); + return -WD_EINVAL; + } + ret = hisi_sec_fill_sgl(h_sgl_pool, &msg->in, &msg->out, &sqe, msg->alg_type); if (ret) return ret; } - fill_aead_bd2_addr(msg, &sqe); + ret = fill_aead_bd2_addr(msg, &sqe, qp); + if (ret < 0) { + if (ret != -WD_EBUSY) + WD_ERR("aead map memory is err(%d)!\n", ret); + goto put_sgl; + } ret = fill_stream_bd2(msg, &sqe); if (unlikely(ret)) - goto put_sgl; + goto destroy_addr; hisi_set_msg_id(h_qp, &msg->tag); sqe.type2.tag = (__u16)msg->tag; @@ -2603,15 +3249,16 @@ static int hisi_sec_aead_send(struct wd_alg_driver *drv, handle_t ctx, void *wd_ if (ret != -WD_EBUSY) WD_ERR("aead send sqe is err(%d)!\n", ret); - goto put_sgl; + goto destroy_addr; } return 0; +destroy_addr: + destroy_aead_bd2_addr(msg, &sqe); put_sgl: if (msg->data_fmt == WD_SGL_BUF) - hisi_sec_put_sgl(h_qp, msg->alg_type, msg->in, msg->out); - + hisi_sec_put_sgl(h_qp, msg->alg_type, msg->in, msg->out, msg->mm_ops); return ret; } @@ -2652,8 +3299,6 @@ static void parse_aead_bd2(struct hisi_qp *qp, struct hisi_sec_sqe *sqe, if (qp->q_info.qp_mode == CTX_MODE_ASYNC) { recv_msg->alg_type = WD_AEAD; recv_msg->data_fmt = get_data_fmt_v2(sqe->sds_sa_type); - recv_msg->in = (__u8 *)(uintptr_t)sqe->type2.data_src_addr; - recv_msg->out = (__u8 *)(uintptr_t)sqe->type2.data_dst_addr; temp_msg = wd_aead_get_msg(qp->q_info.idx, recv_msg->tag); if (!temp_msg) { recv_msg->result = WD_IN_EPARA; @@ -2661,12 +3306,16 @@ static void parse_aead_bd2(struct hisi_qp *qp, struct hisi_sec_sqe *sqe, qp->q_info.idx, recv_msg->tag); return; } + recv_msg->in = temp_msg->in; + recv_msg->out = temp_msg->out; + recv_msg->mm_ops = temp_msg->mm_ops; } else { /* The synchronization mode uses the same message */ temp_msg = recv_msg; } update_stream_counter(temp_msg); + destroy_aead_bd2_addr(temp_msg, sqe); if (unlikely(recv_msg->result != WD_SUCCESS)) dump_sec_msg(temp_msg, "aead"); @@ -2692,7 +3341,7 @@ static int hisi_sec_aead_recv(struct wd_alg_driver *drv, handle_t ctx, void *wd_ if (recv_msg->data_fmt == WD_SGL_BUF) hisi_sec_put_sgl(h_qp, recv_msg->alg_type, recv_msg->in, - recv_msg->out); + recv_msg->out, recv_msg->mm_ops); return 0; } @@ -2789,23 +3438,6 @@ static int fill_aead_bd3_mode(struct wd_aead_msg *msg, return 0; } -static void fill_aead_bd3_addr(struct wd_aead_msg *msg, - struct hisi_sec_sqe3 *sqe) -{ - sqe->data_src_addr = (__u64)(uintptr_t)msg->in; - sqe->data_dst_addr = (__u64)(uintptr_t)msg->out; - - sqe->mac_addr = (__u64)(uintptr_t)msg->mac; - sqe->c_key_addr = (__u64)(uintptr_t)msg->ckey; - sqe->a_key_addr = (__u64)(uintptr_t)msg->akey; - sqe->no_scene.c_ivin_addr = (__u64)(uintptr_t)msg->iv; - - /* CCM/GCM should init a_iv */ - set_aead_auth_iv(msg); - - sqe->auth_ivin.a_ivin_addr = (__u64)(uintptr_t)msg->aiv; -} - static void fill_gcm_first_bd3(struct wd_aead_msg *msg, struct hisi_sec_sqe3 *sqe) { sqe->auth_mac_key |= AI_GEN_INNER << SEC_AI_GEN_OFFSET_V3; @@ -2822,8 +3454,6 @@ static void fill_gcm_first_bd3(struct wd_aead_msg *msg, struct hisi_sec_sqe3 *sq sqe->c_mode_alg &= ~(0x7 << SEC_CALG_OFFSET_V3); sqe->auth_src_offset = 0; sqe->a_len_key = msg->assoc_bytes; - sqe->stream_scene.c_ivin_addr = (__u64)(uintptr_t)msg->iv; - sqe->a_key_addr = (__u64)(uintptr_t)msg->ckey; } static void fill_gcm_middle_bd3(struct wd_aead_msg *msg, struct hisi_sec_sqe3 *sqe) @@ -2837,8 +3467,6 @@ static void fill_gcm_middle_bd3(struct wd_aead_msg *msg, struct hisi_sec_sqe3 *s fill_gcm_akey_len(msg, sqe, BD_TYPE3); sqe->a_len_key = 0; sqe->auth_ivin.a_ivin_addr = sqe->mac_addr; - sqe->stream_scene.c_ivin_addr = (__u64)(uintptr_t)msg->iv; - sqe->a_key_addr = (__u64)(uintptr_t)msg->ckey; } static void fill_gcm_final_bd3(struct wd_aead_msg *msg, struct hisi_sec_sqe3 *sqe) @@ -2853,9 +3481,6 @@ static void fill_gcm_final_bd3(struct wd_aead_msg *msg, struct hisi_sec_sqe3 *sq sqe->a_len_key = 0; sqe->stream_scene.long_a_data_len = msg->assoc_bytes; sqe->stream_scene.long_a_data_len |= msg->long_data_len << LONG_AUTH_DATA_OFFSET; - sqe->stream_scene.c_ivin_addr = (__u64)(uintptr_t)msg->iv; - sqe->a_key_addr = (__u64)(uintptr_t)msg->ckey; - sqe->auth_ivin.a_ivin_addr = (__u64)(uintptr_t)msg->aiv_stream; } static int fill_stream_bd3(handle_t h_qp, struct wd_aead_msg *msg, struct hisi_sec_sqe3 *sqe) @@ -2939,12 +3564,167 @@ static int fill_aead_bd3(struct wd_aead_msg *msg, struct hisi_sec_sqe3 *sqe) return 0; } +static void destroy_aead_bd3_addr(struct wd_aead_msg *msg, struct hisi_sec_sqe3 *sqe) +{ + struct wd_mm_ops *mm_ops = msg->mm_ops; + void *mempool; + + aead_free_aiv_addr(msg); + /* SVA mode and skip */ + if (!mm_ops || mm_ops->sva_mode) + return; + + if (!mm_ops->usr) { + WD_ERR("aead failed to check memory pool!\n"); + return; + } + + mempool = mm_ops->usr; + if (sqe->data_src_addr) + mm_ops->iova_unmap(mempool, msg->in, (void *)(uintptr_t)sqe->data_src_addr, + msg->in_bytes); + + if (sqe->data_dst_addr) + mm_ops->iova_unmap(mempool, msg->out, (void *)(uintptr_t)sqe->data_dst_addr, + msg->out_bytes); + + if (sqe->no_scene.c_ivin_addr) + mm_ops->iova_unmap(mempool, msg->iv, (void *)(uintptr_t)sqe->no_scene.c_ivin_addr, + msg->iv_bytes); + else if (sqe->stream_scene.c_ivin_addr) + mm_ops->iova_unmap(mempool, msg->iv, + (void *)(uintptr_t)sqe->stream_scene.c_ivin_addr, + msg->iv_bytes); + + if (sqe->a_key_addr) { + if ((msg->msg_state == AEAD_MSG_FIRST || msg->msg_state == AEAD_MSG_MIDDLE || + msg->msg_state == AEAD_MSG_END) && msg->cmode == WD_CIPHER_GCM) + mm_ops->iova_unmap(mempool, msg->ckey, (void *)(uintptr_t)sqe->a_key_addr, + msg->ckey_bytes); + else + mm_ops->iova_unmap(mempool, msg->akey, (void *)(uintptr_t)sqe->a_key_addr, + msg->akey_bytes); + } + + if (sqe->c_key_addr && !((msg->msg_state == AEAD_MSG_FIRST || + msg->msg_state == AEAD_MSG_MIDDLE || msg->msg_state == AEAD_MSG_END) && + msg->cmode == WD_CIPHER_GCM)) + mm_ops->iova_unmap(mempool, msg->ckey, (void *)(uintptr_t)sqe->c_key_addr, + msg->ckey_bytes); + + if (sqe->mac_addr) + mm_ops->iova_unmap(mempool, msg->mac, (void *)(uintptr_t)sqe->mac_addr, + msg->auth_bytes); +} + +static int aead_mem_nosva_map_v3(struct wd_aead_msg *msg, struct hisi_sec_sqe3 *sqe, int idx) +{ + struct wd_aead_aiv_addr *aiv_addr = (struct wd_aead_aiv_addr *)msg->drv_cfg; + struct wd_mm_ops *mm_ops = msg->mm_ops; + void *mempool = mm_ops->usr; + void *phy_addr; + + phy_addr = mm_ops->iova_map(mempool, msg->in, msg->in_bytes + msg->assoc_bytes); + if (!phy_addr) + return -WD_ENOMEM; + sqe->data_src_addr = (__u64)(uintptr_t)phy_addr; + + phy_addr = mm_ops->iova_map(mempool, msg->out, msg->out_bytes); + if (!phy_addr) + goto map_err; + sqe->data_dst_addr = (__u64)(uintptr_t)phy_addr; + + if (msg->iv_bytes) { + phy_addr = mm_ops->iova_map(mempool, msg->iv, msg->iv_bytes); + if (!phy_addr) + goto map_err; + sqe->no_scene.c_ivin_addr = (__u64)(uintptr_t)phy_addr; + if ((msg->msg_state == AEAD_MSG_FIRST || msg->msg_state == AEAD_MSG_MIDDLE || + msg->msg_state == AEAD_MSG_END) && msg->cmode == WD_CIPHER_GCM) + sqe->stream_scene.c_ivin_addr = (__u64)(uintptr_t)phy_addr; + } + + if (msg->akey_bytes) { + phy_addr = mm_ops->iova_map(mempool, msg->akey, msg->akey_bytes); + if (!phy_addr) + goto map_err; + sqe->a_key_addr = (__u64)(uintptr_t)phy_addr; + } + + if (msg->ckey_bytes) { + phy_addr = mm_ops->iova_map(mempool, msg->ckey, msg->ckey_bytes); + if (!phy_addr) + goto map_err; + sqe->c_key_addr = (__u64)(uintptr_t)phy_addr; + if ((msg->msg_state == AEAD_MSG_FIRST || msg->msg_state == AEAD_MSG_MIDDLE || + msg->msg_state == AEAD_MSG_END) && msg->cmode == WD_CIPHER_GCM) + sqe->a_key_addr = (__u64)(uintptr_t)phy_addr; + } + + if (msg->auth_bytes) { + phy_addr = mm_ops->iova_map(mempool, msg->mac, msg->auth_bytes); + if (!phy_addr) + goto map_err; + sqe->mac_addr = (__u64)(uintptr_t)phy_addr; + } + + /* CCM/GCM should init a_iv */ + set_aead_auth_iv(msg); + phy_addr = aiv_addr->aiv_nosva + (idx << AEAD_AIV_OFFSET); + sqe->auth_ivin.a_ivin_addr = (__u64)(uintptr_t)phy_addr; + + return 0; + +map_err: + destroy_aead_bd3_addr(msg, sqe); + return -WD_ENOMEM; +} + +static int fill_aead_bd3_addr(struct wd_aead_msg *msg, struct hisi_sec_sqe3 *sqe, + struct hisi_qp *qp) +{ + int idx; + + idx = aead_get_aiv_addr(qp, msg); + if (idx < 0) + return idx; + + /* sva mode */ + if (msg->mm_ops->sva_mode) { + sqe->data_src_addr = (__u64)(uintptr_t)msg->in; + sqe->data_dst_addr = (__u64)(uintptr_t)msg->out; + sqe->no_scene.c_ivin_addr = (__u64)(uintptr_t)msg->iv; + sqe->c_key_addr = (__u64)(uintptr_t)msg->ckey; + sqe->a_key_addr = (__u64)(uintptr_t)msg->akey; + sqe->mac_addr = (__u64)(uintptr_t)msg->mac; + + /* CCM/GCM should init a_iv */ + set_aead_auth_iv(msg); + sqe->auth_ivin.a_ivin_addr = (__u64)(uintptr_t)msg->aiv; + if ((msg->msg_state == AEAD_MSG_FIRST || msg->msg_state == AEAD_MSG_MIDDLE || + msg->msg_state == AEAD_MSG_END) && msg->cmode == WD_CIPHER_GCM) { + sqe->stream_scene.c_ivin_addr = (__u64)(uintptr_t)msg->iv; + sqe->a_key_addr = (__u64)(uintptr_t)msg->ckey; + } + return 0; + } + if (msg->mm_type > UADK_MEM_PROXY) { + WD_ERR("aead failed to check memory type!\n"); + aead_free_aiv_addr(msg); + return -WD_EINVAL; + } + + /* aiv addr is freed in destroy addr interface */ + return aead_mem_nosva_map_v3(msg, sqe, idx); +} + static int hisi_sec_aead_send_v3(struct wd_alg_driver *drv, handle_t ctx, void *wd_msg) { handle_t h_qp = (handle_t)wd_ctx_get_priv(ctx); struct hisi_qp *qp = (struct hisi_qp *)h_qp; struct wd_aead_msg *msg = wd_msg; struct hisi_sec_sqe3 sqe; + handle_t h_sgl_pool; __u16 count = 0; int ret; @@ -2967,19 +3747,30 @@ static int hisi_sec_aead_send_v3(struct wd_alg_driver *drv, handle_t ctx, void * return ret; fill_sec_prefetch(msg->data_fmt, msg->in_bytes + msg->assoc_bytes, - qp->q_info.hw_type, &sqe); + qp->q_info.hw_type, &sqe, msg->mm_ops->sva_mode); if (msg->data_fmt == WD_SGL_BUF) { - ret = hisi_sec_fill_sgl_v3(h_qp, &msg->in, &msg->out, &sqe, + h_sgl_pool = hisi_qm_get_sglpool(h_qp, msg->mm_ops); + if (!h_sgl_pool) { + WD_ERR("aead failed to get sglpool for hw_v3!\n"); + return -WD_EINVAL; + } + ret = hisi_sec_fill_sgl_v3(h_sgl_pool, &msg->in, &msg->out, &sqe, msg->alg_type); if (ret) return ret; } - fill_aead_bd3_addr(msg, &sqe); + ret = fill_aead_bd3_addr(msg, &sqe, qp); + if (ret < 0) { + if (ret != -WD_EBUSY) + WD_ERR("aead map memory is err(%d)!\n", ret); + goto put_sgl; + } + ret = fill_stream_bd3(h_qp, msg, &sqe); if (unlikely(ret)) - goto put_sgl; + goto destroy_addr; hisi_set_msg_id(h_qp, &msg->tag); sqe.tag = msg->tag; @@ -2988,15 +3779,16 @@ static int hisi_sec_aead_send_v3(struct wd_alg_driver *drv, handle_t ctx, void * if (ret != -WD_EBUSY) WD_ERR("aead send sqe is err(%d)!\n", ret); - goto put_sgl; + goto destroy_addr; } return 0; +destroy_addr: + destroy_aead_bd3_addr(msg, &sqe); put_sgl: if (msg->data_fmt == WD_SGL_BUF) - hisi_sec_put_sgl(h_qp, msg->alg_type, msg->in, msg->out); - + hisi_sec_put_sgl(h_qp, msg->alg_type, msg->in, msg->out, msg->mm_ops); return ret; } @@ -3023,8 +3815,6 @@ static void parse_aead_bd3(struct hisi_qp *qp, struct hisi_sec_sqe3 *sqe, if (qp->q_info.qp_mode == CTX_MODE_ASYNC) { recv_msg->alg_type = WD_AEAD; recv_msg->data_fmt = get_data_fmt_v3(sqe->bd_param); - recv_msg->in = (__u8 *)(uintptr_t)sqe->data_src_addr; - recv_msg->out = (__u8 *)(uintptr_t)sqe->data_dst_addr; temp_msg = wd_aead_get_msg(qp->q_info.idx, recv_msg->tag); if (!temp_msg) { recv_msg->result = WD_IN_EPARA; @@ -3032,12 +3822,16 @@ static void parse_aead_bd3(struct hisi_qp *qp, struct hisi_sec_sqe3 *sqe, qp->q_info.idx, recv_msg->tag); return; } + recv_msg->in = temp_msg->in; + recv_msg->out = temp_msg->out; + recv_msg->mm_ops = temp_msg->mm_ops; } else { /* The synchronization mode uses the same message */ temp_msg = recv_msg; } update_stream_counter(temp_msg); + destroy_aead_bd3_addr(temp_msg, sqe); if (unlikely(recv_msg->result != WD_SUCCESS)) dump_sec_msg(temp_msg, "aead"); @@ -3063,7 +3857,7 @@ static int hisi_sec_aead_recv_v3(struct wd_alg_driver *drv, handle_t ctx, void * if (recv_msg->data_fmt == WD_SGL_BUF) hisi_sec_put_sgl(h_qp, recv_msg->alg_type, - recv_msg->in, recv_msg->out); + recv_msg->in, recv_msg->out, recv_msg->mm_ops); return 0; } @@ -3128,6 +3922,7 @@ static void hisi_sec_exit(struct wd_alg_driver *drv) priv = (struct hisi_sec_ctx *)drv->priv; config = &priv->config; + for (i = 0; i < config->ctx_num; i++) { h_qp = (handle_t)wd_ctx_get_priv(config->ctxs[i].ctx); hisi_qm_free_qp(h_qp); diff --git a/include/drv/wd_aead_drv.h b/include/drv/wd_aead_drv.h index a9c0e7c..2c53217 100644 --- a/include/drv/wd_aead_drv.h +++ b/include/drv/wd_aead_drv.h @@ -54,9 +54,7 @@ struct wd_aead_msg { /* input iv pointer */ __u8 *iv; /* input auth iv pointer */ - __u8 aiv[MAX_IV_SIZE]; - /* input auth iv pointer for stream mode */ - __u8 aiv_stream[AIV_STREAM_LEN]; + __u8 *aiv; /* input data pointer */ __u8 *in; /* output data pointer */ @@ -68,6 +66,25 @@ struct wd_aead_msg { /* total of data for stream mode */ __u64 long_data_len; enum wd_aead_msg_state msg_state; + struct wd_mm_ops *mm_ops; + enum wd_mem_type mm_type; + void *drv_cfg; /* internal driver configuration */ +}; + +struct wd_aead_aiv_addr { + __u8 *aiv; + __u8 *aiv_status; + __u8 *aiv_nosva; +}; + +struct wd_aead_extend_ops { + void *params; + int (*eops_aiv_init)(struct wd_alg_driver *drv, + struct wd_mm_ops *mm_ops, + void **params); + void (*eops_aiv_uninit)(struct wd_alg_driver *drv, + struct wd_mm_ops *mm_ops, + void *params); }; struct wd_aead_msg *wd_aead_get_msg(__u32 idx, __u32 tag); diff --git a/include/drv/wd_cipher_drv.h b/include/drv/wd_cipher_drv.h index c6d8ddf..c0be0c3 100644 --- a/include/drv/wd_cipher_drv.h +++ b/include/drv/wd_cipher_drv.h @@ -48,6 +48,8 @@ struct wd_cipher_msg { __u8 *in; /* output data pointer */ __u8 *out; + struct wd_mm_ops *mm_ops; + enum wd_mem_type mm_type; }; struct wd_cipher_msg *wd_cipher_get_msg(__u32 idx, __u32 tag); diff --git a/include/drv/wd_digest_drv.h b/include/drv/wd_digest_drv.h index a55ef5b..12398f2 100644 --- a/include/drv/wd_digest_drv.h +++ b/include/drv/wd_digest_drv.h @@ -59,6 +59,8 @@ struct wd_digest_msg { __u8 *partial_block; /* total of data for stream mode */ __u64 long_data_len; + struct wd_mm_ops *mm_ops; + enum wd_mem_type mm_type; }; static inline enum hash_block_type get_hash_block_type(struct wd_digest_msg *msg) diff --git a/include/wd_aead.h b/include/wd_aead.h index 01f6980..4b5095f 100644 --- a/include/wd_aead.h +++ b/include/wd_aead.h @@ -41,6 +41,8 @@ struct wd_aead_sess_setup { enum wd_digest_type dalg; enum wd_digest_mode dmode; void *sched_param; + struct wd_mm_ops mm_ops; + enum wd_mem_type mm_type; }; /** diff --git a/include/wd_cipher.h b/include/wd_cipher.h index d54f7fe..1d82eac 100644 --- a/include/wd_cipher.h +++ b/include/wd_cipher.h @@ -73,6 +73,8 @@ struct wd_cipher_sess_setup { enum wd_cipher_alg alg; enum wd_cipher_mode mode; void *sched_param; + struct wd_mm_ops mm_ops; + enum wd_mem_type mm_type; }; struct wd_cipher_req; diff --git a/include/wd_digest.h b/include/wd_digest.h index 6ce31f2..42a95db 100644 --- a/include/wd_digest.h +++ b/include/wd_digest.h @@ -100,6 +100,8 @@ struct wd_digest_sess_setup { enum wd_digest_type alg; enum wd_digest_mode mode; void *sched_param; + struct wd_mm_ops mm_ops; + enum wd_mem_type mm_type; }; typedef void *wd_digest_cb_t(void *cb_param); diff --git a/wd_aead.c b/wd_aead.c index 373b6fe..8467409 100644 --- a/wd_aead.c +++ b/wd_aead.c @@ -44,19 +44,22 @@ struct wd_aead_sess { enum wd_cipher_mode cmode; enum wd_digest_type dalg; enum wd_digest_mode dmode; - unsigned char ckey[MAX_CIPHER_KEY_SIZE]; - unsigned char akey[MAX_HMAC_KEY_SIZE]; + unsigned char *ckey; + unsigned char *akey; /* Mac data pointer for decrypto as stream mode */ - unsigned char mac_bak[WD_AEAD_CCM_GCM_MAX]; + unsigned char *mac_bak; __u16 ckey_bytes; __u16 akey_bytes; __u16 auth_bytes; void *priv; void *sched_key; /* Stored the counter for gcm stream mode */ - __u8 iv[MAX_IV_SIZE]; + __u8 *iv; /* Total of data for stream mode */ __u64 long_data_len; + struct wd_mm_ops mm_ops; + enum wd_mem_type mm_type; + struct wd_aead_extend_ops eops; }; struct wd_env_config wd_aead_env_config; @@ -302,26 +305,26 @@ int wd_aead_get_maxauthsize(handle_t h_sess) return g_aead_mac_len[sess->dalg]; } -handle_t wd_aead_alloc_sess(struct wd_aead_sess_setup *setup) +static struct wd_aead_sess *check_and_init_sess(struct wd_aead_sess_setup *setup) { - struct wd_aead_sess *sess = NULL; + struct wd_aead_sess *sess; bool ret; if (unlikely(!setup)) { WD_ERR("failed to check session input parameter!\n"); - return (handle_t)0; + return NULL; } if (setup->calg >= WD_CIPHER_ALG_TYPE_MAX || - setup->cmode >= WD_CIPHER_MODE_TYPE_MAX) { + setup->cmode >= WD_CIPHER_MODE_TYPE_MAX) { WD_ERR("failed to check algorithm setup!\n"); - return (handle_t)0; + return NULL; } sess = malloc(sizeof(struct wd_aead_sess)); if (!sess) { WD_ERR("failed to alloc session memory!\n"); - return (handle_t)0; + return NULL; } memset(sess, 0, sizeof(struct wd_aead_sess)); @@ -330,24 +333,160 @@ handle_t wd_aead_alloc_sess(struct wd_aead_sess_setup *setup) sess->cmode = setup->cmode; sess->dalg = setup->dalg; sess->dmode = setup->dmode; + ret = wd_drv_alg_support(sess->alg_name, wd_aead_setting.driver); if (!ret) { WD_ERR("failed to support this algorithm: %s!\n", sess->alg_name); - goto err_sess; + free(sess); + return NULL; + } + + return sess; +} + +static int aead_setup_memory_and_buffers(struct wd_aead_sess *sess, + struct wd_aead_sess_setup *setup) +{ + wd_alloc aead_alloc_func; + wd_free aead_free_func; + void *mempool; + int ret; + + ret = wd_mem_ops_init(wd_aead_setting.config.ctxs[0].ctx, + &setup->mm_ops, setup->mm_type); + if (ret) { + WD_ERR("failed to init memory ops!\n"); + return -WD_EINVAL; + } + + memcpy(&sess->mm_ops, &setup->mm_ops, sizeof(struct wd_mm_ops)); + sess->mm_type = setup->mm_type; + + aead_alloc_func = sess->mm_ops.alloc; + aead_free_func = sess->mm_ops.free; + mempool = sess->mm_ops.usr; + + sess->mac_bak = aead_alloc_func(mempool, WD_AEAD_CCM_GCM_MAX); + if (!sess->mac_bak) { + WD_ERR("aead failed to calloc mac_bak memory!\n"); + return -WD_ENOMEM; + } + memset(sess->mac_bak, 0, WD_AEAD_CCM_GCM_MAX); + + sess->iv = aead_alloc_func(mempool, MAX_IV_SIZE); + if (!sess->iv) { + WD_ERR("failed to alloc iv memory!\n"); + goto iv_err; + } + memset(sess->iv, 0, MAX_IV_SIZE); + + sess->ckey = aead_alloc_func(mempool, MAX_CIPHER_KEY_SIZE); + if (!sess->ckey) { + WD_ERR("failed to alloc ckey memory!\n"); + goto ckey_err; + } + memset(sess->ckey, 0, MAX_CIPHER_KEY_SIZE); + + sess->akey = aead_alloc_func(mempool, MAX_HMAC_KEY_SIZE); + if (!sess->akey) { + WD_ERR("failed to alloc akey memory!\n"); + goto akey_err; + } + memset(sess->akey, 0, MAX_HMAC_KEY_SIZE); + + return 0; + +akey_err: + aead_free_func(mempool, sess->ckey); +ckey_err: + aead_free_func(mempool, sess->iv); +iv_err: + aead_free_func(mempool, sess->mac_bak); + + return -WD_ENOMEM; +} + +static void cleanup_session(struct wd_aead_sess *sess) +{ + sess->mm_ops.free(sess->mm_ops.usr, sess->mac_bak); + sess->mm_ops.free(sess->mm_ops.usr, sess->iv); + sess->mm_ops.free(sess->mm_ops.usr, sess->ckey); + sess->mm_ops.free(sess->mm_ops.usr, sess->akey); + + if (sess) + free(sess); +} + +static int wd_aead_sess_eops_init(struct wd_aead_sess *sess) +{ + int ret; + + if (sess->eops.eops_aiv_init) { + if (!sess->eops.eops_aiv_uninit) { + WD_ERR("failed to get aead extend ops free in session!\n"); + return -WD_EINVAL; + } + ret = sess->eops.eops_aiv_init(wd_aead_setting.driver, &sess->mm_ops, + &sess->eops.params); + if (ret) { + WD_ERR("failed to init aead extend ops params in session!\n"); + return ret; + } + } + + return WD_SUCCESS; +} + +static void wd_aead_sess_eops_uninit(struct wd_aead_sess *sess) +{ + if (sess->eops.eops_aiv_uninit) { + sess->eops.eops_aiv_uninit(wd_aead_setting.driver, &sess->mm_ops, + sess->eops.params); + sess->eops.params = NULL; + } +} + +handle_t wd_aead_alloc_sess(struct wd_aead_sess_setup *setup) +{ + struct wd_aead_sess *sess; + int ret; + + sess = check_and_init_sess(setup); + if (!sess) + return (handle_t)0; + + if (aead_setup_memory_and_buffers(sess, setup)) { + free(sess); + return (handle_t)0; + } + + if (wd_aead_setting.driver->get_extend_ops) { + ret = wd_aead_setting.driver->get_extend_ops(&sess->eops); + if (ret) { + WD_ERR("failed to get aead sess extend ops!\n"); + goto sess_err; + } + } + + ret = wd_aead_sess_eops_init(sess); + if (ret) { + WD_ERR("failed to init aead sess extend eops!\n"); + goto sess_err; } - /* Some simple scheduler don't need scheduling parameters */ sess->sched_key = (void *)wd_aead_setting.sched.sched_init( - wd_aead_setting.sched.h_sched_ctx, setup->sched_param); + wd_aead_setting.sched.h_sched_ctx, setup->sched_param); if (WD_IS_ERR(sess->sched_key)) { WD_ERR("failed to init session schedule key!\n"); - goto err_sess; + goto sched_key_err; } return (handle_t)sess; -err_sess: - free(sess); +sched_key_err: + wd_aead_sess_eops_uninit(sess); +sess_err: + cleanup_session(sess); return (handle_t)0; } @@ -365,7 +504,8 @@ void wd_aead_free_sess(handle_t h_sess) if (sess->sched_key) free(sess->sched_key); - free(sess); + wd_aead_sess_eops_uninit(sess); + cleanup_session(sess); } static int wd_aead_param_check(struct wd_aead_sess *sess, @@ -717,8 +857,11 @@ static void fill_request_msg(struct wd_aead_msg *msg, struct wd_aead_req *req, msg->mac = req->mac; msg->auth_bytes = sess->auth_bytes; msg->data_fmt = req->data_fmt; - msg->msg_state = req->msg_state; + + msg->mm_ops = &sess->mm_ops; + msg->mm_type = sess->mm_type; + msg->drv_cfg = sess->eops.params; fill_stream_msg(msg, req, sess); } diff --git a/wd_cipher.c b/wd_cipher.c index 92ca07b..58656dc 100644 --- a/wd_cipher.c +++ b/wd_cipher.c @@ -63,9 +63,11 @@ struct wd_cipher_sess { enum wd_cipher_mode mode; wd_dev_mask_t *dev_mask; void *priv; - unsigned char key[MAX_CIPHER_KEY_SIZE]; + unsigned char *key; __u32 key_bytes; void *sched_key; + struct wd_mm_ops mm_ops; + enum wd_mem_type mm_type; }; struct wd_env_config wd_cipher_env_config; @@ -250,6 +252,31 @@ int wd_cipher_set_key(handle_t h_sess, const __u8 *key, __u32 key_len) return 0; } +static int cipher_setup_memory_and_buffers(struct wd_cipher_sess *sess, + struct wd_cipher_sess_setup *setup) +{ + int ret; + + ret = wd_mem_ops_init(wd_cipher_setting.config.ctxs[0].ctx, + &setup->mm_ops, setup->mm_type); + if (ret) { + WD_ERR("cipher failed to init memory ops!\n"); + return ret; + } + + memcpy(&sess->mm_ops, &setup->mm_ops, sizeof(struct wd_mm_ops)); + sess->mm_type = setup->mm_type; + + sess->key = sess->mm_ops.alloc(sess->mm_ops.usr, MAX_CIPHER_KEY_SIZE); + if (!sess->key) { + WD_ERR("cipher failed to alloc key memory!\n"); + return -WD_ENOMEM; + } + memset(sess->key, 0, MAX_CIPHER_KEY_SIZE); + + return 0; +} + handle_t wd_cipher_alloc_sess(struct wd_cipher_sess_setup *setup) { struct wd_cipher_sess *sess = NULL; @@ -282,16 +309,22 @@ handle_t wd_cipher_alloc_sess(struct wd_cipher_sess_setup *setup) sess->alg = setup->alg; sess->mode = setup->mode; + /* Memory type set */ + if (cipher_setup_memory_and_buffers(sess, setup)) + goto free_sess; + /* Some simple scheduler don't need scheduling parameters */ sess->sched_key = (void *)wd_cipher_setting.sched.sched_init( wd_cipher_setting.sched.h_sched_ctx, setup->sched_param); if (WD_IS_ERR(sess->sched_key)) { WD_ERR("failed to init session schedule key!\n"); - goto free_sess; + goto free_key; } return (handle_t)sess; +free_key: + sess->mm_ops.free(sess->mm_ops.usr, sess->key); free_sess: free(sess); return (handle_t)0; @@ -307,6 +340,7 @@ void wd_cipher_free_sess(handle_t h_sess) } wd_memset_zero(sess->key, sess->key_bytes); + sess->mm_ops.free(sess->mm_ops.usr, sess->key); if (sess->sched_key) free(sess->sched_key); @@ -545,6 +579,8 @@ static void fill_request_msg(struct wd_cipher_msg *msg, msg->iv = req->iv; msg->iv_bytes = req->iv_bytes; msg->data_fmt = req->data_fmt; + msg->mm_ops = &sess->mm_ops; + msg->mm_type = sess->mm_type; } static int cipher_iv_len_check(struct wd_cipher_req *req, diff --git a/wd_digest.c b/wd_digest.c index 2d31176..0b37f8b 100644 --- a/wd_digest.c +++ b/wd_digest.c @@ -64,10 +64,12 @@ struct wd_digest_sess { enum wd_digest_type alg; enum wd_digest_mode mode; void *priv; - unsigned char key[MAX_HMAC_KEY_SIZE]; + unsigned char *key; __u32 key_bytes; void *sched_key; struct wd_digest_stream_data stream_data; + struct wd_mm_ops mm_ops; + enum wd_mem_type mm_type; }; struct wd_env_config wd_digest_env_config; @@ -187,6 +189,31 @@ int wd_digest_set_key(handle_t h_sess, const __u8 *key, __u32 key_len) return 0; } +static int digest_setup_memory_and_buffers(struct wd_digest_sess *sess, + struct wd_digest_sess_setup *setup) +{ + int ret; + + ret = wd_mem_ops_init(wd_digest_setting.config.ctxs[0].ctx, + &setup->mm_ops, setup->mm_type); + if (ret) { + WD_ERR("failed to init memory ops!\n"); + return ret; + } + + memcpy(&sess->mm_ops, &setup->mm_ops, sizeof(struct wd_mm_ops)); + sess->mm_type = setup->mm_type; + + sess->key = sess->mm_ops.alloc(sess->mm_ops.usr, MAX_HMAC_KEY_SIZE); + if (!sess->key) { + WD_ERR("digest failed to alloc key memory!\n"); + return -WD_ENOMEM; + } + memset(sess->key, 0, MAX_HMAC_KEY_SIZE); + + return 0; +} + handle_t wd_digest_alloc_sess(struct wd_digest_sess_setup *setup) { struct wd_digest_sess *sess = NULL; @@ -215,16 +242,23 @@ handle_t wd_digest_alloc_sess(struct wd_digest_sess_setup *setup) WD_ERR("failed to support this algorithm: %s!\n", sess->alg_name); goto err_sess; } + + /* Memory type set */ + if (digest_setup_memory_and_buffers(sess, setup)) + goto err_sess; + /* Some simple scheduler don't need scheduling parameters */ sess->sched_key = (void *)wd_digest_setting.sched.sched_init( wd_digest_setting.sched.h_sched_ctx, setup->sched_param); if (WD_IS_ERR(sess->sched_key)) { WD_ERR("failed to init session schedule key!\n"); - goto err_sess; + goto err_key; } return (handle_t)sess; +err_key: + sess->mm_ops.free(sess->mm_ops.usr, sess->key); err_sess: free(sess); return (handle_t)0; @@ -240,6 +274,7 @@ void wd_digest_free_sess(handle_t h_sess) } wd_memset_zero(sess->key, sess->key_bytes); + sess->mm_ops.free(sess->mm_ops.usr, sess->key); if (sess->sched_key) free(sess->sched_key); free(sess); @@ -603,6 +638,9 @@ static void fill_request_msg(struct wd_digest_msg *msg, msg->partial_block = sess->stream_data.partial_block; msg->partial_bytes = sess->stream_data.partial_bytes; + msg->mm_ops = &sess->mm_ops; + msg->mm_type = sess->mm_type; + /* Use iv_bytes to store the stream message state */ msg->iv_bytes = sess->stream_data.msg_state; } -- 2.33.0