From: Weili Qian <qianweili(a)huawei.com>
UADK supports hardware acceleration for the data move.
Currently, data copying and initialization are supported.
Signed-off-by: Weili Qian <qianweili(a)huawei.com>
---
Makefile.am | 27 +-
drv/hisi_udma.c | 566 ++++++++++++++++++++++++++++++++++++++
include/drv/wd_udma_drv.h | 34 +++
include/wd_alg.h | 2 +
include/wd_udma.h | 124 +++++++++
include/wd_util.h | 1 +
libwd_dae.map | 15 +-
wd_udma.c | 511 ++++++++++++++++++++++++++++++++++
wd_util.c | 2 +
9 files changed, 1278 insertions(+), 4 deletions(-)
create mode 100644 drv/hisi_udma.c
create mode 100644 include/drv/wd_udma_drv.h
create mode 100644 include/wd_udma.h
create mode 100644 wd_udma.c
diff --git a/Makefile.am b/Makefile.am
index c4b9c526..df756f72 100644
--- a/Makefile.am
+++ b/Makefile.am
@@ -36,16 +36,17 @@ pkginclude_HEADERS = include/wd.h include/wd_cipher.h include/wd_aead.h \
include/wd_comp.h include/wd_dh.h include/wd_digest.h \
include/wd_rsa.h include/uacce.h include/wd_alg_common.h \
include/wd_ecc.h include/wd_sched.h include/wd_alg.h \
- include/wd_zlibwrapper.h include/wd_dae.h include/wd_agg.h
+ include/wd_zlibwrapper.h include/wd_dae.h include/wd_agg.h \
+ include/wd_udma.h
nobase_pkginclude_HEADERS = v1/wd.h v1/wd_cipher.h v1/wd_aead.h v1/uacce.h v1/wd_dh.h \
v1/wd_digest.h v1/wd_rsa.h v1/wd_bmm.h
-lib_LTLIBRARIES=libwd.la libwd_comp.la libwd_crypto.la libwd_dae.la
+lib_LTLIBRARIES=libwd.la libwd_comp.la libwd_crypto.la libwd_dae.la libwd_udma.la
uadk_driversdir=$(libdir)/uadk
uadk_drivers_LTLIBRARIES=libhisi_sec.la libhisi_hpre.la libhisi_zip.la \
- libisa_ce.la libisa_sve.la libhisi_dae.la
+ libisa_ce.la libisa_sve.la libhisi_dae.la libhisi_udma.la
libwd_la_SOURCES=wd.c wd_mempool.c wd.h wd_alg.c wd_alg.h \
v1/wd.c v1/wd.h v1/wd_adapter.c v1/wd_adapter.h \
@@ -69,6 +70,9 @@ libwd_la_SOURCES=wd.c wd_mempool.c wd.h wd_alg.c wd_alg.h \
v1/drv/hisi_sec_udrv.c v1/drv/hisi_sec_udrv.h \
v1/drv/hisi_rng_udrv.c v1/drv/hisi_rng_udrv.h
+libwd_udma_la_SOURCES=wd_udma.h wd_udma_drv.h wd_udma.c \
+ wd_util.c wd_util.h wd_sched.c wd_sched.h wd.c wd.h
+
libwd_dae_la_SOURCES=wd_dae.h wd_agg.h wd_agg_drv.h wd_agg.c \
wd_util.c wd_util.h wd_sched.c wd_sched.h wd.c wd.h
@@ -110,6 +114,9 @@ endif
libhisi_dae_la_SOURCES=drv/hisi_dae.c drv/hisi_qm_udrv.c \
hisi_qm_udrv.h
+libhisi_udma_la_SOURCES=drv/hisi_udma.c drv/hisi_qm_udrv.c \
+ hisi_qm_udrv.h
+
if WD_STATIC_DRV
AM_CFLAGS += -DWD_STATIC_DRV -fPIC
AM_CFLAGS += -DWD_NO_LOG
@@ -124,6 +131,9 @@ libhisi_zip_la_LIBADD = -ldl
libwd_crypto_la_LIBADD = $(libwd_la_OBJECTS) -ldl -lnuma
libwd_crypto_la_DEPENDENCIES = libwd.la
+libwd_udma_la_LIBADD = $(libwd_la_OBJECTS) -ldl -lnuma -lm -lpthread
+libwd_udma_la_DEPENDENCIES = libwd.la
+
libwd_dae_la_LIBADD = $(libwd_la_OBJECTS) -ldl -lnuma
libwd_dae_la_DEPENDENCIES = libwd.la
@@ -139,6 +149,9 @@ libisa_ce_la_DEPENDENCIES = libwd.la libwd_crypto.la
libisa_sve_la_LIBADD = $(libwd_la_OBJECTS) $(libwd_crypto_la_OBJECTS)
libisa_sve_la_DEPENDENCIES = libwd.la libwd_crypto.la
+libhisi_udma_la_LIBADD = $(libwd_la_OBJECTS) $(libwd_udma_la_OBJECTS)
+libhisi_udma_la_DEPENDENCIES = libwd.la libwd_udma.la
+
libhisi_dae_la_LIBADD = $(libwd_la_OBJECTS) $(libwd_dae_la_OBJECTS)
libhisi_dae_la_DEPENDENCIES = libwd.la libwd_dae.la
@@ -160,6 +173,10 @@ libwd_crypto_la_LIBADD= -lwd -ldl -lnuma
libwd_crypto_la_LDFLAGS=$(UADK_VERSION) $(UADK_CRYPTO_SYMBOL) -lpthread
libwd_crypto_la_DEPENDENCIES= libwd.la
+libwd_udma_la_LIBADD= -lwd -ldl -lnuma -lm -lpthread
+libwd_udma_la_LDFLAGS=$(UADK_VERSION) $(UADK_DAE_SYMBOL)
+libwd_udma_la_DEPENDENCIES= libwd.la
+
libwd_dae_la_LIBADD= -lwd -ldl -lnuma -lm
libwd_dae_la_LDFLAGS=$(UADK_VERSION) $(UADK_DAE_SYMBOL)
libwd_dae_la_DEPENDENCIES= libwd.la
@@ -184,6 +201,10 @@ libisa_sve_la_LIBADD= -lwd -lwd_crypto
libisa_sve_la_LDFLAGS=$(UADK_VERSION)
libisa_sve_la_DEPENDENCIES= libwd.la libwd_crypto.la
+libhisi_udma_la_LIBADD= -lwd -lwd_udma
+libhisi_udma_la_LDFLAGS=$(UADK_VERSION)
+libhisi_udma_la_DEPENDENCIES= libwd.la libwd_udma.la
+
libhisi_dae_la_LIBADD= -lwd -lwd_dae
libhisi_dae_la_LDFLAGS=$(UADK_VERSION)
libhisi_dae_la_DEPENDENCIES= libwd.la libwd_dae.la
diff --git a/drv/hisi_udma.c b/drv/hisi_udma.c
new file mode 100644
index 00000000..57dae8cb
--- /dev/null
+++ b/drv/hisi_udma.c
@@ -0,0 +1,566 @@
+// SPDX-License-Identifier: Apache-2.0
+/* Copyright 2025 Huawei Technologies Co.,Ltd. All rights reserved. */
+
+#include <math.h>
+#include <stdint.h>
+#include <stdlib.h>
+#include <stdio.h>
+#include <unistd.h>
+#include <sys/epoll.h>
+#include <sys/eventfd.h>
+#include <sys/mman.h>
+#include <sys/types.h>
+#include "hisi_qm_udrv.h"
+#include "../include/drv/wd_udma_drv.h"
+
+#define BIT(nr) (1UL << (nr))
+#define UDMA_CTX_Q_NUM_DEF 1
+#define UDMA_TASK_TYPE 0x3
+#define UDMA_SQE_TYPE 0x1
+#define UDMA_ALG_TYPE 2
+/* Multi max data size is (16M -1) * 64 */
+#define UDMA_M_MAX_ADDR_SIZE 1073741760
+/* Single max data size is (16M - 1) */
+#define UDMA_S_MAX_ADDR_SIZE 16777215
+#define UDMA_MAX_ADDR_NUM 64
+#define UDMA_ADDR_NUM_SHIFT 6
+#define UDMA_MULTI_ADDR_EN BIT(14)
+#define UDMA_ADDR_NUM_SHIFT 6
+#define UDMA_SVA_PREFETCH_EN BIT(15)
+#define UDMA_ADDR_RESV_NUM 16
+#define UDMA_ADDR_ALIGN_SIZE 128
+
+enum {
+ DATA_MEMCPY = 0x0,
+ DATA_MEMSET = 0x7,
+};
+
+enum {
+ UDMA_TASK_DONE = 0x1,
+ UDMA_TASK_ERROR = 0x2,
+};
+
+struct udma_addr {
+ __u64 addr;
+ __u64 data_size;
+};
+
+struct udma_addr_array {
+ __u64 resv_addr[UDMA_ADDR_RESV_NUM];
+ struct udma_addr src_addr[UDMA_MAX_ADDR_NUM];
+ struct udma_addr dst_addr[UDMA_MAX_ADDR_NUM];
+};
+
+struct udma_sqe {
+ __u32 bd_type : 6;
+ __u32 resv1 : 2;
+ __u32 task_type : 6;
+ __u32 resv2 : 2;
+ __u32 task_type_ext : 6;
+ __u32 resv3 : 9;
+ __u32 bd_invlid : 1;
+ __u32 rsv4[2];
+ __u32 low_tag;
+ __u32 hi_tag;
+ /* The number of bytes to be copied or filled for single address. */
+ __u32 data_size;
+ __u32 rsv5;
+ /*
+ * 0 ~ 13 bits: reserved,
+ * 14 bit: single address or multi addresses,
+ * 15 bit: sva prefetch en.
+ */
+ __u16 dw0;
+ /*
+ * 0 ~5 bits: reserved,
+ * 6 ~ 13 bits: address num,
+ * 14 ~15 bits: reserved.
+ */
+ __u16 dw1;
+ __u64 init_val;
+ __u32 rsv6[12];
+ /* dst addr for single address task */
+ __u64 dst_addr;
+ __u32 rsv7[2];
+ /* src addr for single address task, addr array for multi addresses. */
+ __u64 addr_array;
+ __u32 done_flag : 3;
+ __u32 rsv8 : 1;
+ __u32 ext_err_type : 12;
+ __u32 err_type : 8;
+ __u32 wtype : 8;
+ __u32 rsv9[3];
+};
+
+struct udma_internal_addr {
+ struct udma_addr_array *addr_array;
+ __u8 *addr_status;
+ __u16 addr_count;
+ __u16 tail;
+};
+
+struct hisi_udma_ctx {
+ struct wd_ctx_config_internal config;
+};
+
+static int get_free_inter_addr(struct udma_internal_addr *inter_addr)
+{
+ __u16 addr_count = inter_addr->addr_count;
+ __u16 idx = inter_addr->tail;
+ __u16 cnt = 0;
+
+ if (unlikely(!addr_count)) {
+ WD_ERR("invalid: internal addr count is 0!\n");
+ return -WD_EINVAL;
+ }
+
+ while (__atomic_test_and_set(&inter_addr->addr_status[idx], __ATOMIC_ACQUIRE)) {
+ idx = (idx + 1) % addr_count;
+ cnt++;
+ if (cnt == addr_count)
+ return -WD_EBUSY;
+ }
+
+ inter_addr->tail = (idx + 1) % addr_count;
+
+ return idx;
+}
+
+static void put_inter_addr(struct udma_internal_addr *inter_addr, int idx)
+{
+ __atomic_clear(&inter_addr->addr_status[idx], __ATOMIC_RELEASE);
+}
+
+static int check_udma_param(struct wd_udma_msg *msg)
+{
+ int i;
+
+ if (unlikely(!msg)) {
+ WD_ERR("invalid: input udma msg is NULL!\n");
+ return -WD_EINVAL;
+ }
+
+ if (unlikely(msg->addr_num > UDMA_MAX_ADDR_NUM)) {
+ WD_ERR("invalid: input addr_num is more than %d!\n", UDMA_MAX_ADDR_NUM);
+ return -WD_EINVAL;
+ }
+
+ /*
+ * When the single address length exceeds UDMA_S_MAX_ADDR_SIZE,
+ * the driver will split the address into multiple addresses and
+ * send them to the hardware.
+ */
+ if (msg->addr_num == 1) {
+ if (unlikely(msg->dst->data_size > UDMA_M_MAX_ADDR_SIZE)) {
+ WD_ERR("invalid: input size %lu is more than %d!\n",
+ msg->dst->data_size, UDMA_M_MAX_ADDR_SIZE);
+ return -WD_EINVAL;
+ }
+
+ return WD_SUCCESS;
+ }
+
+ for (i = 0; i < msg->addr_num; i++) {
+ if (unlikely(msg->dst[i].data_size > UDMA_S_MAX_ADDR_SIZE)) {
+ WD_ERR("invalid: addr %d input size %lu is more than %d!\n",
+ i, msg->dst[i].data_size, UDMA_S_MAX_ADDR_SIZE);
+ return -WD_EINVAL;
+ }
+ }
+
+ return WD_SUCCESS;
+}
+
+static void fill_long_size_memcpy_info(struct udma_sqe *sqe, struct wd_udma_msg *msg,
+ struct udma_addr_array *addr_array)
+{
+ __u32 addr_num = 0;
+ __u64 count;
+
+ for (count = 0; count < msg->src->data_size; count += UDMA_S_MAX_ADDR_SIZE) {
+ addr_array->src_addr[addr_num].addr = (__u64)(uintptr_t)msg->src->addr + count;
+ addr_array->dst_addr[addr_num].addr = (__u64)(uintptr_t)msg->dst->addr + count;
+ if (count + UDMA_S_MAX_ADDR_SIZE <= msg->src->data_size) {
+ addr_array->src_addr[addr_num].data_size = UDMA_S_MAX_ADDR_SIZE;
+ addr_array->dst_addr[addr_num].data_size = UDMA_S_MAX_ADDR_SIZE;
+ } else {
+ addr_array->src_addr[addr_num].data_size = msg->src->data_size - count;
+ addr_array->dst_addr[addr_num].data_size = msg->dst->data_size - count;
+ }
+ addr_num++;
+ }
+ sqe->dw1 |= (addr_num - 1) << UDMA_ADDR_NUM_SHIFT;
+}
+
+static void fill_long_size_memset_info(struct udma_sqe *sqe, struct wd_udma_msg *msg,
+ struct udma_addr_array *addr_array)
+{
+ __u32 addr_num = 0;
+ __u64 count;
+
+ for (count = 0; count < msg->dst->data_size; count += UDMA_S_MAX_ADDR_SIZE) {
+ addr_array->dst_addr[addr_num].addr = (__u64)(uintptr_t)msg->dst->addr + count;
+ if (count + UDMA_S_MAX_ADDR_SIZE <= msg->dst->data_size)
+ addr_array->dst_addr[addr_num].data_size = UDMA_S_MAX_ADDR_SIZE;
+ else
+ addr_array->dst_addr[addr_num].data_size = msg->dst->data_size - count;
+ addr_num++;
+ }
+
+ sqe->dw1 |= (addr_num - 1) << UDMA_ADDR_NUM_SHIFT;
+}
+
+static void fill_mulit_memset_addr_info(struct udma_sqe *sqe, struct wd_udma_msg *msg,
+ struct udma_addr_array *addr_array)
+{
+ int i;
+
+ for (i = 0; i < msg->addr_num; i++) {
+ addr_array->dst_addr[i].addr = (__u64)(uintptr_t)msg->dst[i].addr;
+ addr_array->dst_addr[i].data_size = (__u64)(uintptr_t)msg->dst[i].data_size;
+ }
+
+ sqe->dw1 |= ((__u32)msg->addr_num - 1) << UDMA_ADDR_NUM_SHIFT;
+}
+
+static void fill_multi_memcpy_addr_info(struct udma_sqe *sqe, struct wd_udma_msg *msg,
+ struct udma_addr_array *addr_array)
+{
+ int i;
+
+ for (i = 0; i < msg->addr_num; i++) {
+ addr_array->src_addr[i].addr = (__u64)(uintptr_t)msg->src[i].addr;
+ addr_array->src_addr[i].data_size = (__u64)(uintptr_t)msg->src[i].data_size;
+ addr_array->dst_addr[i].addr = (__u64)(uintptr_t)msg->dst[i].addr;
+ addr_array->dst_addr[i].data_size = (__u64)(uintptr_t)msg->dst[i].data_size;
+ }
+
+ sqe->dw1 |= ((__u32)msg->addr_num - 1) << UDMA_ADDR_NUM_SHIFT;
+}
+
+static void fill_multi_addr_info(struct udma_sqe *sqe, struct wd_udma_msg *msg,
+ struct udma_addr_array *addr_array)
+{
+ if (msg->addr_num == 1) {
+ if (msg->op_type == WD_UDMA_MEMCPY)
+ fill_long_size_memcpy_info(sqe, msg, addr_array);
+ else
+ fill_long_size_memset_info(sqe, msg, addr_array);
+ } else {
+ if (msg->op_type == WD_UDMA_MEMCPY)
+ fill_multi_memcpy_addr_info(sqe, msg, addr_array);
+ else
+ fill_mulit_memset_addr_info(sqe, msg, addr_array);
+ }
+
+ sqe->addr_array = (__u64)(uintptr_t)addr_array;
+ sqe->dw0 |= UDMA_MULTI_ADDR_EN;
+}
+
+static void fill_single_addr_info(struct udma_sqe *sqe, struct wd_udma_msg *msg)
+{
+ if (msg->op_type == WD_UDMA_MEMCPY)
+ sqe->addr_array = (__u64)(uintptr_t)msg->src->addr;
+ sqe->dst_addr = (__u64)(uintptr_t)msg->dst->addr;
+ sqe->data_size = msg->dst->data_size;
+}
+
+static void fill_udma_sqe_addr(struct udma_sqe *sqe, struct wd_udma_msg *msg,
+ struct udma_addr_array *addr_array)
+{
+ if (!addr_array)
+ fill_single_addr_info(sqe, msg);
+ else
+ fill_multi_addr_info(sqe, msg, addr_array);
+}
+
+static void fill_sqe_type(struct udma_sqe *sqe, struct wd_udma_msg *msg)
+{
+ sqe->bd_type = UDMA_SQE_TYPE;
+ sqe->task_type = UDMA_TASK_TYPE;
+ if (msg->op_type == WD_UDMA_MEMCPY)
+ sqe->task_type_ext = DATA_MEMCPY;
+ else
+ sqe->task_type_ext = DATA_MEMSET;
+}
+
+static void fill_init_value(struct udma_sqe *sqe, struct wd_udma_msg *msg)
+{
+ if (msg->op_type == WD_UDMA_MEMSET)
+ memset(&sqe->init_val, msg->value, sizeof(__u64));
+}
+
+static int udma_send(struct wd_alg_driver *drv, handle_t ctx, void *udma_msg)
+{
+ handle_t h_qp = (handle_t)wd_ctx_get_priv(ctx);
+ struct hisi_qp *qp = (struct hisi_qp *)h_qp;
+ struct udma_internal_addr *inter_addr = qp->priv;
+ struct udma_addr_array *addr_array = NULL;
+ struct wd_udma_msg *msg = udma_msg;
+ struct udma_sqe sqe = {0};
+ __u16 send_cnt = 0;
+ int idx = 0;
+ int ret;
+
+ ret = check_udma_param(msg);
+ if (unlikely(ret))
+ return ret;
+
+ if (msg->addr_num > 1 || msg->dst->data_size > UDMA_S_MAX_ADDR_SIZE) {
+ idx = get_free_inter_addr(inter_addr);
+ if (idx < 0)
+ return -WD_EBUSY;
+
+ addr_array = &inter_addr->addr_array[idx];
+ memset(addr_array, 0, sizeof(struct udma_addr_array));
+ }
+
+ fill_sqe_type(&sqe, msg);
+ fill_init_value(&sqe, msg);
+ fill_udma_sqe_addr(&sqe, msg, addr_array);
+
+ hisi_set_msg_id(h_qp, &msg->tag);
+ sqe.low_tag = msg->tag;
+ sqe.hi_tag = (__u32)idx;
+ sqe.dw0 |= UDMA_SVA_PREFETCH_EN;
+
+ ret = hisi_qm_send(h_qp, &sqe, 1, &send_cnt);
+ if (unlikely(ret)) {
+ if (ret != -WD_EBUSY)
+ WD_ERR("failed to send to hardware, ret = %d!\n", ret);
+ if (addr_array)
+ put_inter_addr(inter_addr, idx);
+ return ret;
+ }
+
+ return WD_SUCCESS;
+}
+
+static void dump_udma_msg(struct udma_sqe *sqe, struct wd_udma_msg *msg)
+{
+ WD_ERR("dump UDMA message after a task error occurs.\n"
+ "op_type:%u addr_num:%d.\n", msg->op_type, msg->addr_num);
+}
+
+static int udma_recv(struct wd_alg_driver *drv, handle_t ctx, void *udma_msg)
+{
+ handle_t h_qp = (handle_t)wd_ctx_get_priv(ctx);
+ struct hisi_qp *qp = (struct hisi_qp *)h_qp;
+ struct udma_internal_addr *inter_addr = qp->priv;
+ struct wd_udma_msg *msg = udma_msg;
+ struct wd_udma_msg *temp_msg = msg;
+ struct udma_sqe sqe = {0};
+ __u16 recv_cnt = 0;
+ int ret;
+
+ ret = hisi_qm_recv(h_qp, &sqe, 1, &recv_cnt);
+ if (ret)
+ return ret;
+
+ ret = hisi_check_bd_id(h_qp, msg->tag, sqe.low_tag);
+ if (ret)
+ goto out;
+
+ msg->tag = sqe.low_tag;
+ if (qp->q_info.qp_mode == CTX_MODE_ASYNC) {
+ temp_msg = wd_udma_get_msg(qp->q_info.idx, msg->tag);
+ if (!temp_msg) {
+ WD_ERR("failed to get send msg! idx = %u, tag = %u.\n",
+ qp->q_info.idx, msg->tag);
+ ret = -WD_EINVAL;
+ goto out;
+ }
+ }
+
+ msg->result = WD_SUCCESS;
+ if (sqe.done_flag != UDMA_TASK_DONE ||
+ sqe.err_type || sqe.ext_err_type || sqe.wtype) {
+ WD_ERR("failed to do udma task! done=0x%x, err_type=0x%x\n"
+ "ext_err_type=0x%x, wtype=0x%x!\n",
+ (__u32)sqe.done_flag, (__u32)sqe.err_type,
+ (__u32)sqe.ext_err_type, (__u32)sqe.wtype);
+ msg->result = WD_IN_EPARA;
+ }
+
+ if (unlikely(msg->result != WD_SUCCESS))
+ dump_udma_msg(&sqe, temp_msg);
+
+out:
+ if (sqe.dw0 & UDMA_MULTI_ADDR_EN)
+ put_inter_addr(inter_addr, sqe.hi_tag);
+ return ret;
+}
+
+static void udma_uninit_qp_priv(handle_t h_qp)
+{
+ struct hisi_qp *qp = (struct hisi_qp *)h_qp;
+ struct udma_internal_addr *inter_addr;
+
+ if (!qp)
+ return;
+
+ inter_addr = (struct udma_internal_addr *)qp->priv;
+ if (!inter_addr)
+ return;
+
+ free(inter_addr->addr_array);
+ free(inter_addr->addr_status);
+ free(inter_addr);
+ qp->priv = NULL;
+}
+
+static int udma_init_qp_priv(handle_t h_qp)
+{
+ struct hisi_qp *qp = (struct hisi_qp *)h_qp;
+ __u16 sq_depth = qp->q_info.sq_depth;
+ struct udma_internal_addr *inter_addr;
+ int ret = -WD_ENOMEM;
+
+ inter_addr = calloc(1, sizeof(struct udma_internal_addr));
+ if (!inter_addr)
+ return ret;
+
+ inter_addr->addr_status = calloc(1, sizeof(__u8) * sq_depth);
+ if (!inter_addr->addr_status)
+ goto free_inter_addr;
+
+ inter_addr->addr_array = aligned_alloc(UDMA_ADDR_ALIGN_SIZE,
+ sizeof(struct udma_addr_array) * sq_depth);
+ if (!inter_addr->addr_array)
+ goto free_addr_status;
+
+ inter_addr->addr_count = sq_depth;
+ qp->priv = inter_addr;
+
+ return WD_SUCCESS;
+
+free_addr_status:
+ free(inter_addr->addr_status);
+free_inter_addr:
+ free(inter_addr);
+
+ return ret;
+}
+
+static int udma_init(struct wd_alg_driver *drv, void *conf)
+{
+ struct wd_ctx_config_internal *config = conf;
+ struct hisi_qm_priv qm_priv;
+ struct hisi_udma_ctx *priv;
+ handle_t h_qp = 0;
+ handle_t h_ctx;
+ __u32 i, j;
+ int ret;
+
+ if (!config || !config->ctx_num) {
+ WD_ERR("invalid: udma init config is null or ctx num is 0!\n");
+ return -WD_EINVAL;
+ }
+
+ priv = malloc(sizeof(struct hisi_udma_ctx));
+ if (!priv)
+ return -WD_ENOMEM;
+
+ qm_priv.op_type = UDMA_ALG_TYPE;
+ qm_priv.sqe_size = sizeof(struct udma_sqe);
+ /* Allocate qp for each context */
+ for (i = 0; i < config->ctx_num; i++) {
+ h_ctx = config->ctxs[i].ctx;
+ qm_priv.qp_mode = config->ctxs[i].ctx_mode;
+ /* Setting the epoll en to 0 for ASYNC ctx */
+ qm_priv.epoll_en = (qm_priv.qp_mode == CTX_MODE_SYNC) ?
+ config->epoll_en : 0;
+ qm_priv.idx = i;
+ h_qp = hisi_qm_alloc_qp(&qm_priv, h_ctx);
+ if (!h_qp) {
+ ret = -WD_ENOMEM;
+ goto out;
+ }
+ config->ctxs[i].sqn = qm_priv.sqn;
+ ret = udma_init_qp_priv(h_qp);
+ if (ret)
+ goto free_h_qp;
+ }
+ memcpy(&priv->config, config, sizeof(struct wd_ctx_config_internal));
+ drv->priv = priv;
+
+ return WD_SUCCESS;
+free_h_qp:
+ hisi_qm_free_qp(h_qp);
+out:
+ for (j = 0; j < i; j++) {
+ h_qp = (handle_t)wd_ctx_get_priv(config->ctxs[j].ctx);
+ udma_uninit_qp_priv(h_qp);
+ hisi_qm_free_qp(h_qp);
+ }
+ free(priv);
+ return ret;
+}
+
+static void udma_exit(struct wd_alg_driver *drv)
+{
+ struct wd_ctx_config_internal *config;
+ struct hisi_udma_ctx *priv;
+ handle_t h_qp;
+ __u32 i;
+
+ if (!drv || !drv->priv)
+ return;
+
+ priv = (struct hisi_udma_ctx *)drv->priv;
+ config = &priv->config;
+ for (i = 0; i < config->ctx_num; i++) {
+ h_qp = (handle_t)wd_ctx_get_priv(config->ctxs[i].ctx);
+ udma_uninit_qp_priv(h_qp);
+ hisi_qm_free_qp(h_qp);
+ }
+
+ free(priv);
+ drv->priv = NULL;
+}
+
+static int udma_get_usage(void *param)
+{
+ return 0;
+}
+
+static struct wd_alg_driver udma_driver = {
+ .drv_name = "hisi_zip",
+ .alg_name = "udma",
+ .calc_type = UADK_ALG_HW,
+ .priority = 100,
+ .queue_num = UDMA_CTX_Q_NUM_DEF,
+ .op_type_num = 1,
+ .fallback = 0,
+ .init = udma_init,
+ .exit = udma_exit,
+ .send = udma_send,
+ .recv = udma_recv,
+ .get_usage = udma_get_usage,
+};
+
+#ifdef WD_STATIC_DRV
+void hisi_udma_probe(void)
+#else
+static void __attribute__((constructor)) hisi_udma_probe(void)
+#endif
+{
+ int ret;
+
+ WD_INFO("Info: register UDMA alg drivers!\n");
+
+ ret = wd_alg_driver_register(&udma_driver);
+ if (ret && ret != -WD_ENODEV)
+ WD_ERR("failed to register UDMA driver, ret = %d!\n", ret);
+}
+
+#ifdef WD_STATIC_DRV
+void hisi_udma_remove(void)
+#else
+static void __attribute__((destructor)) hisi_udma_remove(void)
+#endif
+{
+ WD_INFO("Info: unregister UDMA alg drivers!\n");
+
+ wd_alg_driver_unregister(&udma_driver);
+}
diff --git a/include/drv/wd_udma_drv.h b/include/drv/wd_udma_drv.h
new file mode 100644
index 00000000..c8028f79
--- /dev/null
+++ b/include/drv/wd_udma_drv.h
@@ -0,0 +1,34 @@
+/* SPDX-License-Identifier: Apache-2.0 */
+/* Copyright 2025 Huawei Technologies Co.,Ltd. All rights reserved. */
+
+#ifndef __WD_UDMA_DRV_H
+#define __WD_UDMA_DRV_H
+
+#include <asm/types.h>
+
+#include "../wd_udma.h"
+#include "../wd_util.h"
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+/* udma message format */
+struct wd_udma_msg {
+ struct wd_udma_req req;
+ struct wd_data_addr *src;
+ struct wd_data_addr *dst;
+ int addr_num;
+ int value;
+ enum wd_udma_op_type op_type;
+ __u32 tag; /* User-defined request identifier */
+ __u8 result; /* alg op error code */
+};
+
+struct wd_udma_msg *wd_udma_get_msg(__u32 idx, __u32 tag);
+
+#ifdef __cplusplus
+}
+#endif
+
+#endif /* __WD_UDMA_DRV_H */
diff --git a/include/wd_alg.h b/include/wd_alg.h
index aba855d6..441b3bef 100644
--- a/include/wd_alg.h
+++ b/include/wd_alg.h
@@ -204,11 +204,13 @@ void hisi_sec2_probe(void);
void hisi_hpre_probe(void);
void hisi_zip_probe(void);
void hisi_dae_probe(void);
+void hisi_udma_probe(void);
void hisi_sec2_remove(void);
void hisi_hpre_remove(void);
void hisi_zip_remove(void);
void hisi_dae_remove(void);
+void hisi_udma_remove(void);
#endif
diff --git a/include/wd_udma.h b/include/wd_udma.h
new file mode 100644
index 00000000..d8a7964e
--- /dev/null
+++ b/include/wd_udma.h
@@ -0,0 +1,124 @@
+/* SPDX-License-Identifier: Apache-2.0 */
+/*
+ * Copyright 2025 Huawei Technologies Co.,Ltd. All rights reserved.
+ */
+
+#ifndef __WD_UDMA_H
+#define __WD_UDMA_H
+
+#include <stdbool.h>
+
+#include "wd_alg_common.h"
+
+typedef void (*wd_udma_cb_t)(void *cb_param);
+
+/**
+ * wd_udma_op_type - Algorithm type of option.
+ */
+enum wd_udma_op_type {
+ WD_UDMA_MEMCPY,
+ WD_UDMA_MEMSET,
+ WD_UDMA_OP_MAX
+};
+
+/**
+ * wd_udma_sess_setup - udma session setup information.
+ * @sched_param: Parameters of the scheduling policy,
+ * usually allocated according to struct sched_params.
+ */
+struct wd_udma_sess_setup {
+ void *sched_param;
+};
+
+/**
+ * wd_data_addr - addr information of UDMA.
+ * @addr: Indicates the start address of the operation.
+ * @addr_size: Maximum size of the addr, in bytes.
+ * @count: Number of bytes to be set.
+ */
+struct wd_data_addr {
+ void *addr;
+ size_t addr_size;
+ size_t data_size;
+};
+
+/**
+ * wd_udma_req - udma operation request.
+ * @src: pointer to input address.
+ * @dst: pointer to output address, for WD_UDMA_MEMSET, only one of src and dst can be set.
+ * @addr_num: Number of address.
+ * @value: Value to be written for WD_UDMA_MEMSET.
+ * @op_type: udma operation type.
+ * @cb: Callback function.
+ * @cb_param: Parameters of the callback function.
+ * @state: operation result written back by the driver.
+ */
+struct wd_udma_req {
+ struct wd_data_addr *src;
+ struct wd_data_addr *dst;
+ int addr_num;
+ int value;
+ enum wd_udma_op_type op_type;
+ wd_udma_cb_t cb;
+ void *cb_param;
+ int status;
+};
+
+/**
+ * wd_udma_init() - A simplify interface to initializate ecc.
+ * To make the initializate simpler, ctx_params support set NULL.
+ * And then the function will set them as driver's default.
+ *
+ * @alg: The algorithm users want to use.
+ * @sched_type: The scheduling type users want to use.
+ * @task_type: Task types, including soft computing, hardware and hybrid computing.
+ * @ctx_params: The ctxs resources users want to use. Include per operation
+ * type ctx numbers and business process run numa.
+ *
+ * Return 0 if succeed and others if fail.
+ */
+int wd_udma_init(const char *alg, __u32 sched_type,
+ int task_type, struct wd_ctx_params *ctx_params);
+
+/**
+ * wd_udma_uninit() - Uninitialise ctx configuration and scheduler.
+ */
+void wd_udma_uninit(void);
+
+/**
+ * wd_udma_alloc_sess() - Allocate a wd udma session.
+ * @setup: Parameters to setup this session.
+ *
+ * Return 0 if failed.
+ */
+handle_t wd_udma_alloc_sess(struct wd_udma_sess_setup *setup);
+
+/**
+ * wd_udma_free_sess() - Free a wd udma session.
+ * @ sess: The sess to be freed.
+ */
+void wd_udma_free_sess(handle_t sess);
+
+/**
+ * wd_do_udma_sync() - Send a sync udma request.
+ * @h_sess: The session which request will be sent to.
+ * @req: Request.
+ */
+int wd_do_udma_sync(handle_t h_sess, struct wd_udma_req *req);
+
+/**
+ * wd_do_udma_async() - Send an async udma request.
+ * @sess: The session which request will be sent to.
+ * @req: Request.
+ */
+int wd_do_udma_async(handle_t h_sess, struct wd_udma_req *req);
+
+/**
+ * wd_udma_poll() - Poll finished request.
+ *
+ * This function will call poll_policy function which is registered to wd udma
+ * by user.
+ */
+int wd_udma_poll(__u32 expt, __u32 *count);
+
+#endif /* __WD_UDMA_H */
diff --git a/include/wd_util.h b/include/wd_util.h
index 9e9d4e35..bbb18a7c 100644
--- a/include/wd_util.h
+++ b/include/wd_util.h
@@ -42,6 +42,7 @@ enum wd_type {
WD_DH_TYPE,
WD_ECC_TYPE,
WD_AGG_TYPE,
+ WD_UDMA_TYPE,
WD_TYPE_MAX,
};
diff --git a/libwd_dae.map b/libwd_dae.map
index 4c51b856..6597ff98 100644
--- a/libwd_dae.map
+++ b/libwd_dae.map
@@ -1,4 +1,4 @@
-UADK_CRYPTO_2.0 {
+UADK_DAE_2.0 {
global:
wd_agg_alloc_sess;
wd_agg_free_sess;
@@ -17,5 +17,18 @@ global:
wd_sched_rr_instance;
wd_sched_rr_alloc;
wd_sched_rr_release;
+
+ wd_udma_alloc_sess;
+ wd_udma_free_sess;
+ wd_udma_init;
+ wd_udma_uninit;
+ wd_do_udma_sync;
+ wd_do_udma_async;
+ wd_udma_poll;
+ wd_udma_get_msg;
+
+ wd_sched_rr_instance;
+ wd_sched_rr_alloc;
+ wd_sched_rr_release;
local: *;
};
diff --git a/wd_udma.c b/wd_udma.c
new file mode 100644
index 00000000..7675df30
--- /dev/null
+++ b/wd_udma.c
@@ -0,0 +1,511 @@
+// SPDX-License-Identifier: Apache-2.0
+/*
+ * Copyright 2025 Huawei Technologies Co.,Ltd. All rights reserved.
+ */
+
+#include <stdlib.h>
+#include <pthread.h>
+#include <limits.h>
+#include "include/drv/wd_udma_drv.h"
+#include "wd_udma.h"
+
+struct wd_udma_sess {
+ const char *alg_name;
+ wd_dev_mask_t *dev_mask;
+ void *priv;
+ void *sched_key;
+};
+
+static struct wd_udma_setting {
+ enum wd_status status;
+ struct wd_ctx_config_internal config;
+ struct wd_sched sched;
+ struct wd_async_msg_pool pool;
+ struct wd_alg_driver *driver;
+ void *dlhandle;
+ void *dlh_list;
+} wd_udma_setting;
+
+static struct wd_init_attrs wd_udma_init_attrs;
+
+static void wd_udma_close_driver(void)
+{
+#ifndef WD_STATIC_DRV
+ wd_dlclose_drv(wd_udma_setting.dlh_list);
+ wd_udma_setting.dlh_list = NULL;
+#else
+ wd_release_drv(wd_udma_setting.driver);
+ hisi_udma_remove();
+#endif
+}
+
+static int wd_udma_open_driver(void)
+{
+#ifndef WD_STATIC_DRV
+ /*
+ * Driver lib file path could set by env param.
+ * then open tham by wd_dlopen_drv()
+ * use NULL means dynamic query path
+ */
+ wd_udma_setting.dlh_list = wd_dlopen_drv(NULL);
+ if (!wd_udma_setting.dlh_list) {
+ WD_ERR("fail to open driver lib files.\n");
+ return -WD_EINVAL;
+ }
+#else
+ hisi_udma_probe();
+#endif
+ return WD_SUCCESS;
+}
+
+void wd_udma_free_sess(handle_t sess)
+{
+ struct wd_udma_sess *sess_t = (struct wd_udma_sess *)sess;
+
+ if (!sess_t) {
+ WD_ERR("invalid: free udma sess param NULL!\n");
+ return;
+ }
+
+ if (sess_t->sched_key)
+ free(sess_t->sched_key);
+ free(sess_t);
+}
+
+handle_t wd_udma_alloc_sess(struct wd_udma_sess_setup *setup)
+{
+ struct wd_udma_sess *sess;
+
+ if (!setup) {
+ WD_ERR("invalid: alloc udma sess setup NULL!\n");
+ return (handle_t)0;
+ }
+
+ sess = calloc(1, sizeof(struct wd_udma_sess));
+ if (!sess)
+ return (handle_t)0;
+
+ sess->alg_name = "udma";
+ /* Some simple scheduler don't need scheduling parameters */
+ sess->sched_key = (void *)wd_udma_setting.sched.sched_init(
+ wd_udma_setting.sched.h_sched_ctx, setup->sched_param);
+ if (WD_IS_ERR(sess->sched_key)) {
+ WD_ERR("failed to init session schedule key!\n");
+ goto free_sess;
+ }
+
+ return (handle_t)sess;
+
+free_sess:
+ free(sess);
+ return (handle_t)0;
+}
+
+static int wd_udma_addr_check(struct wd_data_addr *data_addr)
+{
+ if (unlikely(!data_addr->addr)) {
+ WD_ERR("invalid: udma addr is NULL!\n");
+ return -WD_EINVAL;
+ }
+
+ if (unlikely(!data_addr->data_size ||
+ data_addr->data_size > data_addr->addr_size)) {
+ WD_ERR("invalid: udma size is error, data_size %lu, addr_size is %lu!\n",
+ data_addr->data_size, data_addr->addr_size);
+ return -WD_EINVAL;
+ }
+
+ return WD_SUCCESS;
+}
+
+static int wd_udma_param_check(struct wd_udma_sess *sess,
+ struct wd_udma_req *req)
+{
+ struct wd_data_addr *src, *dst;
+ int i, ret;
+
+ if (unlikely(!sess || !req)) {
+ WD_ERR("invalid: input param NULL!\n");
+ return -WD_EINVAL;
+ }
+
+ if (unlikely(req->addr_num <= 0)) {
+ WD_ERR("invalid: addr num is error %d!\n", req->addr_num);
+ return -WD_EINVAL;
+ }
+
+ src = req->src;
+ dst = req->dst;
+ if (unlikely(req->op_type >= WD_UDMA_OP_MAX)) {
+ WD_ERR("invalid: op_type is error %u!\n", req->op_type);
+ return -WD_EINVAL;
+ } else if (unlikely(req->op_type == WD_UDMA_MEMCPY && (!src || !dst))) {
+ WD_ERR("invalid: memcpy src or dst is NULL!\n");
+ return -WD_EINVAL;
+ } else if (unlikely(req->op_type == WD_UDMA_MEMSET &&
+ ((!src && !dst) || (src && dst)))) {
+ WD_ERR("invalid: memset src and dst is error!\n");
+ return -WD_EINVAL;
+ }
+
+ if (req->op_type == WD_UDMA_MEMSET)
+ dst = !req->src ? req->dst : req->src;
+
+ for (i = 0; i < req->addr_num; i++) {
+ if (req->op_type == WD_UDMA_MEMCPY) {
+ ret = wd_udma_addr_check(&src[i]);
+ if (unlikely(ret)) {
+ WD_ERR("invalid: udma memcpy src addr is error!\n");
+ return -WD_EINVAL;
+ }
+
+ ret = wd_udma_addr_check(&dst[i]);
+ if (unlikely(ret)) {
+ WD_ERR("invalid: udma memcpy dst addr is error!\n");
+ return -WD_EINVAL;
+ }
+
+ if (unlikely(dst[i].data_size != src[i].data_size)) {
+ WD_ERR("invalid: udma memcpy data_size is error!\n"
+ "src %lu, dst %lu!\n",
+ dst[i].data_size, src[i].data_size);
+ return -WD_EINVAL;
+ }
+ } else {
+ ret = wd_udma_addr_check(&dst[i]);
+ if (unlikely(ret)) {
+ WD_ERR("invalid: udma memset addr is error!\n");
+ return -WD_EINVAL;
+ }
+ }
+ }
+
+ return WD_SUCCESS;
+}
+
+static void fill_udma_msg(struct wd_udma_msg *msg, struct wd_udma_req *req)
+{
+ msg->result = WD_EINVAL;
+
+ memcpy(&msg->req, req, sizeof(*req));
+ msg->op_type = req->op_type;
+ msg->addr_num = req->addr_num;
+ msg->value = req->value;
+ if (req->op_type == WD_UDMA_MEMSET) {
+ msg->dst = !req->src ? req->dst : req->src;
+ } else {
+ msg->src = req->src;
+ msg->dst = req->dst;
+ }
+}
+
+int wd_do_udma_sync(handle_t h_sess, struct wd_udma_req *req)
+{
+ struct wd_ctx_config_internal *config = &wd_udma_setting.config;
+ handle_t h_sched_ctx = wd_udma_setting.sched.h_sched_ctx;
+ struct wd_udma_sess *sess_t = (struct wd_udma_sess *)h_sess;
+ struct wd_msg_handle msg_handle;
+ struct wd_ctx_internal *ctx;
+ struct wd_udma_msg msg = {0};
+ __u32 idx;
+ int ret;
+
+ ret = wd_udma_param_check(sess_t, req);
+ if (unlikely(ret))
+ return ret;
+
+ idx = wd_udma_setting.sched.pick_next_ctx(h_sched_ctx,
+ sess_t->sched_key,
+ CTX_MODE_SYNC);
+ ret = wd_check_ctx(config, CTX_MODE_SYNC, idx);
+ if (unlikely(ret))
+ return ret;
+
+ wd_dfx_msg_cnt(config, WD_CTX_CNT_NUM, idx);
+ ctx = config->ctxs + idx;
+
+ fill_udma_msg(&msg, req);
+
+ msg_handle.send = wd_udma_setting.driver->send;
+ msg_handle.recv = wd_udma_setting.driver->recv;
+ pthread_spin_lock(&ctx->lock);
+ ret = wd_handle_msg_sync(wd_udma_setting.driver, &msg_handle, ctx->ctx,
+ &msg, NULL, wd_udma_setting.config.epoll_en);
+ pthread_spin_unlock(&ctx->lock);
+ if (unlikely(ret))
+ return ret;
+
+ req->status = msg.result;
+
+ return GET_NEGATIVE(msg.result);
+}
+
+int wd_do_udma_async(handle_t sess, struct wd_udma_req *req)
+{
+ struct wd_ctx_config_internal *config = &wd_udma_setting.config;
+ handle_t h_sched_ctx = wd_udma_setting.sched.h_sched_ctx;
+ struct wd_udma_sess *sess_t = (struct wd_udma_sess *)sess;
+ struct wd_udma_msg *msg = NULL;
+ struct wd_ctx_internal *ctx;
+ int ret, mid;
+ __u32 idx;
+
+ ret = wd_udma_param_check(sess_t, req);
+ if (unlikely(ret))
+ return ret;
+
+ if (unlikely(!req->cb)) {
+ WD_ERR("invalid: udma input req cb is NULL!\n");
+ return -WD_EINVAL;
+ }
+
+ idx = wd_udma_setting.sched.pick_next_ctx(h_sched_ctx,
+ sess_t->sched_key,
+ CTX_MODE_ASYNC);
+ ret = wd_check_ctx(config, CTX_MODE_ASYNC, idx);
+ if (unlikely(ret))
+ return ret;
+ ctx = config->ctxs + idx;
+
+ mid = wd_get_msg_from_pool(&wd_udma_setting.pool, idx, (void **)&msg);
+ if (unlikely(mid < 0)) {
+ WD_ERR("failed to get msg from pool!\n");
+ return mid;
+ }
+
+ fill_udma_msg(msg, req);
+ msg->tag = mid;
+
+ ret = wd_alg_driver_send(wd_udma_setting.driver, ctx->ctx, msg);
+ if (unlikely(ret)) {
+ if (ret != -WD_EBUSY)
+ WD_ERR("failed to send udma BD, hw is err!\n");
+
+ goto fail_with_msg;
+ }
+
+ wd_dfx_msg_cnt(config, WD_CTX_CNT_NUM, idx);
+
+ return WD_SUCCESS;
+
+fail_with_msg:
+ wd_put_msg_to_pool(&wd_udma_setting.pool, idx, mid);
+
+ return ret;
+}
+
+static int wd_udma_poll_ctx(__u32 idx, __u32 expt, __u32 *count)
+{
+ struct wd_ctx_config_internal *config = &wd_udma_setting.config;
+ struct wd_udma_msg rcv_msg = {0};
+ struct wd_ctx_internal *ctx;
+ struct wd_udma_req *req;
+ struct wd_udma_msg *msg;
+ __u32 rcv_cnt = 0;
+ __u32 tmp = expt;
+ int ret;
+
+ *count = 0;
+
+ ret = wd_check_ctx(config, CTX_MODE_ASYNC, idx);
+ if (ret)
+ return ret;
+
+ ctx = config->ctxs + idx;
+
+ do {
+ ret = wd_alg_driver_recv(wd_udma_setting.driver, ctx->ctx, &rcv_msg);
+ if (ret == -WD_EAGAIN) {
+ return ret;
+ } else if (unlikely(ret)) {
+ WD_ERR("failed to async recv, ret = %d!\n", ret);
+ *count = rcv_cnt;
+ wd_put_msg_to_pool(&wd_udma_setting.pool, idx,
+ rcv_msg.tag);
+ return ret;
+ }
+ rcv_cnt++;
+ msg = wd_find_msg_in_pool(&wd_udma_setting.pool, idx, rcv_msg.tag);
+ if (!msg) {
+ WD_ERR("failed to find udma msg!\n");
+ return -WD_EINVAL;
+ }
+
+ msg->req.status = rcv_msg.result;
+ req = &msg->req;
+ req->cb(req);
+ wd_put_msg_to_pool(&wd_udma_setting.pool, idx, rcv_msg.tag);
+ *count = rcv_cnt;
+ } while (--tmp);
+
+ return ret;
+}
+
+int wd_udma_poll(__u32 expt, __u32 *count)
+{
+ handle_t h_sched_ctx = wd_udma_setting.sched.h_sched_ctx;
+
+ if (unlikely(!count || !expt)) {
+ WD_ERR("invalid: udma poll count is NULL or expt is 0!\n");
+ return -WD_EINVAL;
+ }
+
+ return wd_udma_setting.sched.poll_policy(h_sched_ctx, expt, count);
+}
+
+static void wd_udma_clear_status(void)
+{
+ wd_alg_clear_init(&wd_udma_setting.status);
+}
+
+static void wd_udma_alg_uninit(void)
+{
+ /* Uninit async request pool */
+ wd_uninit_async_request_pool(&wd_udma_setting.pool);
+ /* Unset config, sched, driver */
+ wd_clear_sched(&wd_udma_setting.sched);
+ wd_alg_uninit_driver(&wd_udma_setting.config, wd_udma_setting.driver);
+}
+
+void wd_udma_uninit(void)
+{
+ enum wd_status status;
+
+ wd_alg_get_init(&wd_udma_setting.status, &status);
+ if (status == WD_UNINIT)
+ return;
+
+ wd_udma_alg_uninit();
+ wd_alg_attrs_uninit(&wd_udma_init_attrs);
+ wd_alg_drv_unbind(wd_udma_setting.driver);
+ wd_udma_close_driver();
+ wd_alg_clear_init(&wd_udma_setting.status);
+}
+
+static int wd_udma_alg_init(struct wd_ctx_config *config, struct wd_sched *sched)
+{
+ int ret;
+
+ ret = wd_set_epoll_en("WD_UDMA_EPOLL_EN", &wd_udma_setting.config.epoll_en);
+ if (ret < 0)
+ return ret;
+
+ ret = wd_init_ctx_config(&wd_udma_setting.config, config);
+ if (ret < 0)
+ return ret;
+
+ ret = wd_init_sched(&wd_udma_setting.sched, sched);
+ if (ret < 0)
+ goto out_clear_ctx_config;
+
+ /* Allocate async pool for every ctx */
+ ret = wd_init_async_request_pool(&wd_udma_setting.pool, config, WD_POOL_MAX_ENTRIES,
+ sizeof(struct wd_udma_msg));
+ if (ret < 0)
+ goto out_clear_sched;
+
+ ret = wd_alg_init_driver(&wd_udma_setting.config, wd_udma_setting.driver);
+ if (ret)
+ goto out_clear_pool;
+
+ return WD_SUCCESS;
+
+out_clear_pool:
+ wd_uninit_async_request_pool(&wd_udma_setting.pool);
+out_clear_sched:
+ wd_clear_sched(&wd_udma_setting.sched);
+out_clear_ctx_config:
+ wd_clear_ctx_config(&wd_udma_setting.config);
+ return ret;
+}
+
+int wd_udma_init(const char *alg, __u32 sched_type, int task_type,
+ struct wd_ctx_params *ctx_params)
+{
+ struct wd_ctx_nums udma_ctx_num[WD_UDMA_OP_MAX] = {0};
+ struct wd_ctx_params udma_ctx_params = {0};
+ int state, ret = -WD_EINVAL;
+
+ pthread_atfork(NULL, NULL, wd_udma_clear_status);
+
+ state = wd_alg_try_init(&wd_udma_setting.status);
+ if (state)
+ return state;
+
+ if (!alg || sched_type >= SCHED_POLICY_BUTT ||
+ task_type < 0 || task_type >= TASK_MAX_TYPE) {
+ WD_ERR("invalid: input param is wrong!\n");
+ goto out_clear_init;
+ }
+
+ if (strcmp(alg, "udma")) {
+ WD_ERR("invalid: the alg %s not support!\n", alg);
+ goto out_clear_init;
+ }
+
+ state = wd_udma_open_driver();
+ if (state)
+ goto out_clear_init;
+
+ while (ret) {
+ memset(&wd_udma_setting.config, 0, sizeof(struct wd_ctx_config_internal));
+
+ /* Get alg driver and dev name */
+ wd_udma_setting.driver = wd_alg_drv_bind(task_type, alg);
+ if (!wd_udma_setting.driver) {
+ WD_ERR("fail to bind a valid driver.\n");
+ ret = -WD_EINVAL;
+ goto out_dlopen;
+ }
+
+ udma_ctx_params.ctx_set_num = udma_ctx_num;
+ ret = wd_ctx_param_init(&udma_ctx_params, ctx_params,
+ wd_udma_setting.driver, WD_UDMA_TYPE, WD_UDMA_OP_MAX);
+ if (ret) {
+ if (ret == -WD_EAGAIN) {
+ wd_disable_drv(wd_udma_setting.driver);
+ wd_alg_drv_unbind(wd_udma_setting.driver);
+ continue;
+ }
+ goto out_driver;
+ }
+
+ wd_udma_init_attrs.alg = alg;
+ wd_udma_init_attrs.sched_type = sched_type;
+ wd_udma_init_attrs.driver = wd_udma_setting.driver;
+ wd_udma_init_attrs.ctx_params = &udma_ctx_params;
+ wd_udma_init_attrs.alg_init = wd_udma_alg_init;
+ wd_udma_init_attrs.alg_poll_ctx = wd_udma_poll_ctx;
+ ret = wd_alg_attrs_init(&wd_udma_init_attrs);
+ if (ret) {
+ if (ret == -WD_ENODEV) {
+ wd_disable_drv(wd_udma_setting.driver);
+ wd_alg_drv_unbind(wd_udma_setting.driver);
+ wd_ctx_param_uninit(&udma_ctx_params);
+ continue;
+ }
+ WD_ERR("failed to init alg attrs!\n");
+ goto out_params_uninit;
+ }
+ }
+
+ wd_alg_set_init(&wd_udma_setting.status);
+ wd_ctx_param_uninit(&udma_ctx_params);
+
+ return WD_SUCCESS;
+
+out_params_uninit:
+ wd_ctx_param_uninit(&udma_ctx_params);
+out_driver:
+ wd_alg_drv_unbind(wd_udma_setting.driver);
+out_dlopen:
+ wd_udma_close_driver();
+out_clear_init:
+ wd_alg_clear_init(&wd_udma_setting.status);
+ return ret;
+}
+
+struct wd_udma_msg *wd_udma_get_msg(__u32 idx, __u32 tag)
+{
+ return wd_find_msg_in_pool(&wd_udma_setting.pool, idx, tag);
+}
diff --git a/wd_util.c b/wd_util.c
index f1b27bf8..38d2d375 100644
--- a/wd_util.c
+++ b/wd_util.c
@@ -63,6 +63,7 @@ static const char *wd_env_name[WD_TYPE_MAX] = {
"WD_DH_CTX_NUM",
"WD_ECC_CTX_NUM",
"WD_AGG_CTX_NUM",
+ "WD_UDMA_CTX_NUM",
};
struct async_task {
@@ -107,6 +108,7 @@ static struct acc_alg_item alg_options[] = {
{"deflate", "deflate"},
{"lz77_zstd", "lz77_zstd"},
{"hashagg", "hashagg"},
+ {"udma", "udma"},
{"rsa", "rsa"},
{"dh", "dh"},
--
2.33.0