Acc
Threads by month
- ----- 2025 -----
- December
- November
- October
- September
- August
- July
- June
- May
- April
- March
- February
- January
- ----- 2024 -----
- December
- November
- October
- September
- August
- July
- June
- May
- April
- March
- February
- January
- ----- 2023 -----
- December
- November
- October
- September
- August
- July
- June
- May
- April
- March
- February
- January
- ----- 2022 -----
- December
- November
- October
- September
- August
- July
- June
- May
- April
- March
- February
- 2 participants
- 389 discussions
13 Jun '25
From: Longfang Liu <liulongfang(a)huawei.com>
After removing the shared queue memory allocation interface, the UADK
test tools must also eliminate the shared memory functionality.
For individual memory reservations, the wd_reserve_memory interface
should be used. When allocating memory for multiple queues, each queue
should independently request its own reserved memory allocation.
Signed-off-by: Longfang Liu <liulongfang(a)huawei.com>
Signed-off-by: Qi Tao <taoqi10(a)huawei.com>
---
v1/test/hisi_hpre_test/hpre_test_tools.c | 392 -----------------------
v1/test/hisi_zip_test_sgl/wd_sched_sgl.c | 310 +++++++++---------
v1/test/test_mm/test_wd_mem.c | 8 +-
v1/test/wd_sched.c | 247 +++++++-------
4 files changed, 300 insertions(+), 657 deletions(-)
diff --git a/v1/test/hisi_hpre_test/hpre_test_tools.c b/v1/test/hisi_hpre_test/hpre_test_tools.c
index 7f562f34..10a4ade9 100755
--- a/v1/test/hisi_hpre_test/hpre_test_tools.c
+++ b/v1/test/hisi_hpre_test/hpre_test_tools.c
@@ -644,317 +644,6 @@ int application_release_multiple_queue(char *dev, char *alg_type, unsigned int q
printf("application_release_multiple_queue test end!\n");
return 0;
}
-
-/***
-
-***/
-int hpre_dev_queue_share(char *dev, char * share_dev, char *alg_type, unsigned long m_size)
-{
- void *addr=NULL;
- int ret = 0;
- struct wd_queue q;
- struct wd_queue target_q;
- unsigned long memory_size;
-
- memset((void *)&q, 0, sizeof(q));
- q.capa.alg = alg_type;
- snprintf(q.dev_path, sizeof(q.dev_path), "%s", dev);
- printf("queue path:%s\n", q.dev_path);
-
- ret = wd_request_queue(&q);
- if(ret)
- {
- printf("wd request queue fail!\n");
- return 1;
- }
- printf("wd request queue success!\n");
- memory_size = m_size;
- addr = wd_reserve_memory(&q, memory_size);
- if(!addr)
- {
- wd_release_queue(&q);
- printf("wd reserve memory fail!\n");
- return 1;
- }
- printf("wd reserve memory success!\n");
- memset(addr, 0, memory_size);
-
- memset((void *)&target_q, 0, sizeof(target_q));
- target_q.capa.alg = alg_type;
- snprintf(target_q.dev_path, sizeof(target_q.dev_path), "%s", share_dev);
- printf("target queue path:%s\n", target_q.dev_path);
-
- ret = wd_request_queue(&target_q);
- if(ret)
- {
- wd_release_queue(&q);
- printf("wd request target_q queue fail!\n");
- return 1;
- }
- printf("wd request target_q queue success!\n");
- //target_q队列共享q队列预留内存;
- ret = wd_share_reserved_memory(&q, &target_q);
- if(ret)
- {
- wd_release_queue(&q);
- wd_release_queue(&target_q);
- printf("wd target_q queue share reserved memory fail!\n");
- return 1;
- }
- printf("wd target_q queue share reserved memory success!\n");
- wd_release_queue(&target_q);
- wd_release_queue(&q);
-
- return 0;
-}
-/***
-
-***/
-int hpre_node_queue_share(char *dev, unsigned int node, unsigned int share_node, char *alg_type, unsigned long m_size)
-{
- void *addr=NULL;
- int ret = 0;
- struct wd_queue q;
- struct wd_queue target_q;
- unsigned long memory_size;
-
- memset((void *)&q, 0, sizeof(q));
- q.capa.alg = alg_type;
- snprintf(q.dev_path, sizeof(q.dev_path), "%s", dev);
- printf("queue path:%s\n", q.dev_path);
- q.node_mask = node;
-
- ret = wd_request_queue(&q);
- if(ret)
- {
- printf("wd request queue fail!\n");
- return 1;
- }
- printf("wd request queue success!\n");
- memory_size = m_size;
- addr = wd_reserve_memory(&q, memory_size);
- if(!addr)
- {
- wd_release_queue(&q);
- printf("wd reserve memory fail!\n");
- return 1;
- }
- printf("wd reserve memory success!\n");
- memset(addr, 0, memory_size);
-
- memset((void *)&target_q, 0, sizeof(target_q));
- target_q.capa.alg = alg_type;
- target_q.node_mask = node;
-
- ret = wd_request_queue(&target_q);
- if(ret)
- {
- wd_release_queue(&q);
- printf("wd request target_q queue fail!\n");
- return 1;
- }
- printf("wd request target_q queue success!\n");
- //target_q队列共享q队列预留内存;
- ret = do_dh(&q);
- if(ret)
- {
- printf("do dh on q fail!\n");
- return 1;
- }
- ret = do_dh(&target_q);
- if(ret)
- {
- printf("do dh on target q fail!\n");
- return 1;
- }
-
- ret = wd_share_reserved_memory(&q, &target_q);
-
- if(ret)
- {
- wd_release_queue(&q);
- wd_release_queue(&target_q);
- printf("wd target_q queue share reserved memory fail!\n");
- return 1;
- }
- printf("wd target_q queue share reserved memory success!\n");
- ret = do_dh(&q);
- if(ret)
- {
- printf("do dh on share q fail!\n");
- return 1;
- }
- ret = do_dh(&target_q);
- if(ret)
- {
- printf("do dh on share target q fail!\n");
- return 1;
- }
-
- wd_release_queue(&target_q);
- wd_release_queue(&q);
-
- return 0;
-}
-/***
-
-***/
-int hpre_dev_queue_interact_share(char *dev, char * share_dev, char *alg_type, unsigned long m_size)
-{
- void *addr=NULL;
- int ret = 0;
- struct wd_queue q;
- struct wd_queue target_q;
- unsigned long memory_size;
-
- memset((void *)&q, 0, sizeof(q));
- q.capa.alg = alg_type;
- snprintf(q.dev_path, sizeof(q.dev_path), "%s", dev);
- printf("queue path:%s\n", q.dev_path);
-
- ret = wd_request_queue(&q);
- if(ret)
- {
- printf("wd request queue fail!\n");
- return ret;
- }
- printf("wd request queue success!\n");
- memory_size = m_size;
- addr = wd_reserve_memory(&q, memory_size);
- if(!addr)
- {
- wd_release_queue(&q);
- printf("wd reserve memory fail!\n");
- return 1;
- }
- printf("wd reserve memory success!\n");
- memset(addr, 0, memory_size);
-
- memset((void *)&target_q, 0, sizeof(target_q));
- target_q.capa.alg = alg_type;
- snprintf(target_q.dev_path, sizeof(target_q.dev_path), "%s", share_dev);
- printf("target queue path:%s\n", target_q.dev_path);
-
- ret = wd_request_queue(&target_q);
- if(ret)
- {
- wd_release_queue(&q);
- printf("wd request target_q queue fail!\n");
- return 1;
- }
- printf("wd request target_q queue success!\n");
- addr = wd_reserve_memory(&target_q, memory_size);
- if(!addr)
- {
- wd_release_queue(&q);
- wd_release_queue(&target_q);
- printf("wd reserve memory fail!\n");
- return 1;
- }
- printf("wd reserve memory success!\n");
- memset(addr, 0, memory_size);
-
- //target_q
- ret = wd_share_reserved_memory(&q, &target_q);
- if(ret)
- {
- wd_release_queue(&q);
- wd_release_queue(&target_q);
- printf("wd target_q queue share reserved memory fail!\n");
- return 1;
- }
- printf("wd target_q queue share reserved memory success!\n");
-
- wd_release_queue(&target_q);
- wd_release_queue(&q);
-
- return 0;
-}
-
-/***
-
-***/
-int hpre_dev_queue_cross_proc_share(char *dev, char *alg_type, unsigned long m_size)
-{
- void *addr=NULL;
- int ret = 0;
- pid_t pid;
- struct wd_queue q;
- struct wd_queue target_q;
- unsigned long memory_size=0;
-
- pid = fork();
- if(pid < 0)
- {
- printf("Creation process failed, pid:%d\n",pid);
- return 1;
- }
- else if(pid == 0)
- {
- printf("child process:%d\n", pid);
- memset((void *)&q, 0, sizeof(q));
- q.capa.alg = alg_type;
- snprintf(q.dev_path, sizeof(q.dev_path), "%s", dev);
- printf("queue path:%s\n", q.dev_path);
-
- ret = wd_request_queue(&q);
- if(ret)
- {
- printf("request queue fail!\n");
- exit(1);
- }
- printf("wd request queue success!\n");
- memory_size = m_size;
- addr = wd_reserve_memory(&q, memory_size);
- if(!addr)
- {
- wd_release_queue(&q);
- printf("queue reserve memory fail!\n");
- exit(2);
- }
- printf("queue reserve memory success!\n");
- memset(addr, 0, memory_size);
- exit(0);
- }
- printf("parent process:%d\n", pid);
- pid_t wpid;
- int status = -1;
- wpid = waitpid(pid, &status, WUNTRACED | WCONTINUED);
- if( wpid < 0)
- {
- printf("exited, status=%d\n", WEXITSTATUS(status));
- return(status);
- }
-
- memset((void *)&target_q, 0, sizeof(target_q));
- target_q.capa.alg = alg_type;
- snprintf(target_q.dev_path, sizeof(target_q.dev_path), "%s", dev);
- printf("target queue path:%s\n", target_q.dev_path);
-
- ret = wd_request_queue(&target_q);
- if(ret)
- {
- wd_release_queue(&q);
- printf("wd request target_q queue fail!\n");
- return 1;
- }
- printf("wd request target_q queue success!\n");
- ret = wd_share_reserved_memory(&q, &target_q);
- if(ret)
- {
- wd_release_queue(&target_q);
- wd_release_queue(&q);
- printf("wd target_q queue share reserved memory fail!\n");
- return 1;
- }
- printf("wd target_q queue share reserved memory success!\n");
-
- wd_release_queue(&target_q);
- wd_release_queue(&q);
-
- return 0;
-}
-
/***
***/
@@ -1696,87 +1385,6 @@ int main(int arc, char *argv[])
return 1;
}
}
- else if(!strcmp(argv[1], "queue-share"))
- {
- /***
- argv[2] - 表示算法类型
- argv[3] - 表示申请队列设备
- argv[4] - 表示共享预留内存的设备
- argv[5] - 表示申请队列的预留内存大小
- ***/
- //申请单个队列,预留内存,与其它队列共享预留内存
- snprintf(algorithm_type, sizeof(algorithm_type), "%s", argv[2]);
- snprintf(dev, sizeof(dev), "%s", argv[3]);
- snprintf(share_dev, sizeof(share_dev), "%s", argv[4]);
- memory_size = strtoul(argv[5], NULL, 10);
-
- ret = hpre_dev_queue_share(dev, share_dev, algorithm_type, memory_size);
- if(0 != ret)
- {
- return 1;
- }
- }
- else if(!strcmp(argv[1], "node-queue-share"))
- {
- /***
- argv[2] - 表示算法类型
- argv[3] - 表示申请队列设备
- argv[4] - 表示设备node
- argv[5] - 表示共享内存设备node
- argv[6] - 表示申请队列的预留内存大小
- ***/
- //申请单个队列,预留内存,与其它队列共享预留内存
- snprintf(algorithm_type, sizeof(algorithm_type), "%s", argv[2]);
- snprintf(dev, sizeof(dev), "%s", argv[3]);
- unsigned int node=0;
- node = strtoul(argv[4], NULL, 16);
- unsigned int share_node=0;
- share_node = strtoul(argv[5], NULL, 16);
- memory_size = strtoul(argv[6], NULL, 10);
-
- ret = hpre_node_queue_share(dev, node, share_node, algorithm_type, memory_size);
- if(0 != ret)
- {
- return 1;
- }
- }
- else if(!strcmp(argv[1], "queue-interact-share"))
- {
- /***
- argv[2] - 表示算法类型
- argv[3] - 表示申请队列设备
- argv[4] - 表示共享预留内存的设备
- argv[5] - 表示申请队列的预留内存大小
- ***/
- //队列预留内存后作为共享的目标队列
- snprintf(algorithm_type, sizeof(algorithm_type), "%s", argv[2]);
- snprintf(dev, sizeof(dev), "%s", argv[3]);
- snprintf(share_dev, sizeof(share_dev), "%s", argv[4]);
- memory_size = strtoul(argv[5], NULL, 10);
-
- ret = hpre_dev_queue_interact_share(dev, share_dev, algorithm_type, memory_size);
- if(0 != ret)
- {
- return 1;
- }
- }
- else if(!strcmp(argv[1], "queue-cross-proc-share"))
- {
- /***
- argv[2] - 表示算法类型
- argv[3] - 表示申请队列设备
- argv[4] - 表示申请队列的预留内存大小
- ***/
- //跨进程进行队列共享
- snprintf(algorithm_type, sizeof(algorithm_type), "%s", argv[2]);
- snprintf(dev, sizeof(dev), "%s", argv[3]);
- memory_size = strtoul(argv[4], NULL, 10);
- ret = hpre_dev_queue_cross_proc_share(dev, algorithm_type, memory_size);
- if(0 != ret)
- {
- return 1;
- }
- }
else if(!strcmp(argv[1], "mult-thread-queue"))
{
/***
diff --git a/v1/test/hisi_zip_test_sgl/wd_sched_sgl.c b/v1/test/hisi_zip_test_sgl/wd_sched_sgl.c
index 31637565..7a3be22c 100644
--- a/v1/test/hisi_zip_test_sgl/wd_sched_sgl.c
+++ b/v1/test/hisi_zip_test_sgl/wd_sched_sgl.c
@@ -23,96 +23,33 @@
#define EXTRA_SIZE 4096
#define WD_WAIT_MS 1000
-static int __init_cache(struct wd_scheduler *sched, int data_fmt)
+static int wd_sched_pre_uninit(struct wd_scheduler *sched, int data_fmt)
{
- int i;
- int ret = -ENOMEM;
+ unsigned int flags = 0;
struct q_info *qinfo;
void *pool;
+ int i;
- sched->msgs = calloc(sched->msg_cache_num, sizeof(*sched->msgs));
- if (!sched->msgs) {
- WD_ERR("calloc for sched->msgs fail!\n");
- return ret;
- }
- sched->stat = calloc(sched->q_num, sizeof(*sched->stat));
- if (!sched->stat) {
- WD_ERR("calloc for sched->stat fail!\n");
- goto err_with_msgs;
- }
qinfo = sched->qs[0].qinfo;
- pool = qinfo->br.usr;
- for (i = 0; i < sched->msg_cache_num; i++) {
- if (data_fmt == WD_FLAT_BUF) { /* use pbuffer */
- sched->msgs[i].data_in = wd_alloc_blk(pool);
- sched->msgs[i].data_out = wd_alloc_blk(pool);
- if (!sched->msgs[i].data_in || !sched->msgs[i].data_out) {
- dbg("not enough data ss_region memory "
- "for cache %d (bs=%d)\n", i, sched->msg_data_size);
- goto err_with_stat;
- }
- } else { /* use sgl */
- sched->msgs[i].data_in = wd_alloc_sgl(pool, sched->msg_data_size);
- sched->msgs[i].data_out = wd_alloc_sgl(pool, sched->msg_data_size);
- if (!sched->msgs[i].data_in || !sched->msgs[i].data_out) {
- dbg("not enough data ss_region memory "
- "for cache %d (bs=%d)\n", i, sched->msg_data_size);
- goto err_with_stat;
- }
+ flags = qinfo->dev_flags;
+ if (flags & WD_UACCE_DEV_PASID) {
+ if (sched->ss_region) {
+ free(sched->ss_region);
+ sched->ss_region = NULL;
}
+ return 0;
+ }
- if (sched->init_cache)
- sched->init_cache(sched, i, data_fmt);
+ for (i = 0; i < sched->q_num; i++) {
+ wd_release_queue(&sched->qs[i]);
+ qinfo = sched->qs[i].qinfo;
+ if (data_fmt == WD_FLAT_BUF)
+ wd_blkpool_destroy(qinfo->br.usr);
+ else
+ wd_sglpool_destroy(qinfo->br.usr);
}
return 0;
-
-err_with_stat:
- free(sched->stat);
- sched->stat = NULL;
-err_with_msgs:
- free(sched->msgs);
- sched->msgs = NULL;
- return ret;
-}
-
-static void __fini_cache(struct wd_scheduler *sched, int data_fmt)
-{
- struct q_info *qinfo = sched->qs[0].qinfo;
- unsigned int flags = qinfo->dev_flags;
- void *pool;
- int i;
-
- if (sched->stat) {
- free(sched->stat);
- sched->stat = NULL;
- }
- if (!(flags & WD_UACCE_DEV_PASID)) {
- pool = qinfo->br.usr;
- if (pool) {
- if (data_fmt == WD_FLAT_BUF) { /* use pbuffer */
- for (i = 0; i < sched->msg_cache_num; i++) {
- if (sched->msgs[i].data_in)
- wd_free_blk(pool, sched->msgs[i].data_in);
- if (sched->msgs[i].data_out)
- wd_free_blk(pool, sched->msgs[i].data_out);
- }
- wd_blkpool_destroy(pool);
- } else { /* use sgl */
- for (i = 0; i < sched->msg_cache_num; i++) {
- if (sched->msgs[i].data_in)
- wd_free_sgl(pool, sched->msgs[i].data_in);
- if (sched->msgs[i].data_out)
- wd_free_sgl(pool, sched->msgs[i].data_out);
- }
- wd_sglpool_destroy(pool);
- }
- }
- }
- if (sched->msgs) {
- free(sched->msgs);
- sched->msgs = NULL;
- }
}
static int wd_sched_preinit(struct wd_scheduler *sched, int data_fmt)
@@ -124,14 +61,6 @@ static int wd_sched_preinit(struct wd_scheduler *sched, int data_fmt)
struct wd_sglpool_setup sp;
void *pool;
- for (i = 0; i < sched->q_num; i++) {
- ret = wd_request_queue(&sched->qs[i]);
- if (ret) {
- WD_ERR("fail to request queue!\n");
- goto out_with_queues;
- }
- }
-
if (!sched->ss_region_size)
sched->ss_region_size = EXTRA_SIZE + /* add 1 page extra */
sched->msg_cache_num * (sched->msg_data_size << 0x1);
@@ -145,12 +74,22 @@ static int wd_sched_preinit(struct wd_scheduler *sched, int data_fmt)
ret = -ENOMEM;
goto out_with_queues;
}
- } else {
- if (data_fmt == WD_FLAT_BUF) { /* use pbuffer*/
- memset(&mm_setup, 0, sizeof(mm_setup));
- mm_setup.block_size = sched->msg_data_size;
- mm_setup.block_num = sched->msg_cache_num << 0x1; /* in and out */
- mm_setup.align_size = 128;
+ return 0;
+ }
+
+ if (data_fmt == WD_FLAT_BUF) { /* use pbuffer*/
+ memset(&mm_setup, 0, sizeof(mm_setup));
+ mm_setup.block_size = sched->msg_data_size;
+ mm_setup.block_num = sched->msg_cache_num << 0x1; /* in and out */
+ mm_setup.align_size = 128;
+ for (i = 0; i < sched->q_num; i++) {
+ ret = wd_request_queue(&sched->qs[i]);
+ if (ret) {
+ WD_ERR("fail to request queue!\n");
+ goto out_with_queues;
+ }
+
+ qinfo = sched->qs[i].qinfo;
pool = wd_blkpool_create(&sched->qs[0], &mm_setup);
if (!pool) {
WD_ERR("%s(): create pool fail!\n", __func__);
@@ -162,15 +101,18 @@ static int wd_sched_preinit(struct wd_scheduler *sched, int data_fmt)
qinfo->br.iova_map = (void *)wd_blk_iova_map;
qinfo->br.iova_unmap = (void *)wd_blk_iova_unmap;
qinfo->br.usr = pool;
- } else { /* use sgl*/
- memset(&sp, 0, sizeof(sp));
- sp.buf_size = sched->msg_data_size / 10;
- sp.align_size = 64;
- sp.sge_num_in_sgl = 60;
- sp.buf_num_in_sgl = sp.sge_num_in_sgl;
- sp.sgl_num = 3 * sched->msg_cache_num;
- sp.buf_num = sp.buf_num_in_sgl * sp.sgl_num + sp.sgl_num * 2;
-
+ }
+ } else { /* use sgl*/
+ memset(&sp, 0, sizeof(sp));
+ sp.buf_size = sched->msg_data_size / 10;
+ sp.align_size = 64;
+ sp.sge_num_in_sgl = 60;
+ sp.buf_num_in_sgl = sp.sge_num_in_sgl;
+ sp.sgl_num = 3 * sched->msg_cache_num;
+ sp.buf_num = sp.buf_num_in_sgl * sp.sgl_num + sp.sgl_num * 2;
+
+ for (i = 0; i < sched->q_num; i++) {
+ qinfo = sched->qs[i].qinfo;
pool = wd_sglpool_create(&sched->qs[0], &sp);
if (!pool) {
WD_ERR("%s(): create pool fail!\n", __func__);
@@ -189,17 +131,132 @@ static int wd_sched_preinit(struct wd_scheduler *sched, int data_fmt)
return 0;
out_with_queues:
+ for (j = i-1; j >= 0; j--) {
+ wd_release_queue(&sched->qs[j]);
+ qinfo = sched->qs[j].qinfo;
+ if (data_fmt == WD_FLAT_BUF)
+ wd_blkpool_destroy(qinfo->br.usr);
+ else
+ wd_sglpool_destroy(qinfo->br.usr);
+ }
+
if (flags & WD_UACCE_DEV_PASID) {
if (sched->ss_region) {
free(sched->ss_region);
sched->ss_region = NULL;
}
}
- for (j = i-1; j >= 0; j--)
- wd_release_queue(&sched->qs[j]);
+
return ret;
}
+static void __fini_cache(struct wd_scheduler *sched, int data_fmt)
+{
+ struct q_info *qinfo = sched->qs[0].qinfo;
+ unsigned int flags = qinfo->dev_flags;
+ void *pool;
+ int i, j;
+
+ if (sched->stat) {
+ free(sched->stat);
+ sched->stat = NULL;
+ }
+
+ if (sched->msgs) {
+ free(sched->msgs);
+ sched->msgs = NULL;
+ }
+
+ if (!(flags & WD_UACCE_DEV_PASID)) {
+ for (j = 0; j < sched->q_num; j++) {
+ qinfo = sched->qs[j].qinfo;
+ pool = qinfo->br.usr;
+ if (!pool)
+ continue;
+
+ if (data_fmt == WD_FLAT_BUF) { /* use pbuffer */
+ for (i = 0; i < sched->msg_cache_num; i++) {
+ if (sched->msgs[i].data_in)
+ wd_free_blk(pool, sched->msgs[i].data_in);
+ if (sched->msgs[i].data_out)
+ wd_free_blk(pool, sched->msgs[i].data_out);
+ }
+ } else { /* use sgl */
+ for (i = 0; i < sched->msg_cache_num; i++) {
+ if (sched->msgs[i].data_in)
+ wd_free_sgl(pool, sched->msgs[i].data_in);
+ if (sched->msgs[i].data_out)
+ wd_free_sgl(pool, sched->msgs[i].data_out);
+ }
+ }
+ }
+ }
+}
+
+static int __init_cache(struct wd_scheduler *sched, int data_fmt)
+{
+ struct q_info *qinfo;
+ unsigned int flags;
+ int ret = -ENOMEM;
+ int i, j;
+ void *pool;
+
+ sched->msgs = calloc(sched->msg_cache_num, sizeof(*sched->msgs));
+ if (!sched->msgs) {
+ WD_ERR("calloc for sched->msgs fail!\n");
+ return ret;
+ }
+ sched->stat = calloc(sched->q_num, sizeof(*sched->stat));
+ if (!sched->stat) {
+ WD_ERR("calloc for sched->stat fail!\n");
+ goto err_with_msgs;
+ }
+ qinfo = sched->qs[0].qinfo;
+ pool = qinfo->br.usr;
+ flags = qinfo->dev_flags;
+ if ((flags & WD_UACCE_DEV_PASID))
+ return 0;
+
+ for (i = 0; i < sched->q_num; i++) {
+ qinfo = sched->qs[i].qinfo;
+ pool = qinfo->br.usr;
+ for (j = 0; j < sched->msg_cache_num; j++) {
+ if (data_fmt == WD_FLAT_BUF) { /* use pbuffer */
+ sched->msgs[j].data_in = wd_alloc_blk(pool);
+ sched->msgs[j].data_out = wd_alloc_blk(pool);
+ if (!sched->msgs[j].data_in || !sched->msgs[j].data_out) {
+ dbg("not enough data ss_region memory "
+ "for cache %d (bs=%d)\n", j, sched->msg_data_size);
+ goto err_with_stat;
+ }
+ } else { /* use sgl */
+ sched->msgs[j].data_in = wd_alloc_sgl(pool, sched->msg_data_size);
+ sched->msgs[j].data_out = wd_alloc_sgl(pool, sched->msg_data_size);
+ if (!sched->msgs[j].data_in || !sched->msgs[j].data_out) {
+ dbg("not enough data ss_region memory "
+ "for cache %d (bs=%d)\n", j, sched->msg_data_size);
+ goto err_with_stat;
+ }
+ }
+
+ if (sched->init_cache)
+ sched->init_cache(sched, j, data_fmt);
+ }
+ }
+
+ return 0;
+
+err_with_stat:
+ free(sched->stat);
+ sched->stat = NULL;
+ __fini_cache(sched, data_fmt);
+err_with_msgs:
+ if (sched->msgs) {
+ free(sched->msgs);
+ sched->msgs = NULL;
+ }
+ return ret;
+}
int wd_sched_init(struct wd_scheduler *sched, int data_fmt)
{
@@ -211,57 +268,22 @@ int wd_sched_init(struct wd_scheduler *sched, int data_fmt)
if (ret < 0)
return -EINVAL;
- qinfo = sched->qs[0].qinfo;
- flags = qinfo->dev_flags;
- if (!(flags & WD_UACCE_DEV_PASID)) {
- for (k = 1; k < sched->q_num; k++) {
- ret = wd_share_reserved_memory(&sched->qs[0],
- &sched->qs[k]);
- if (ret) {
- WD_ERR("fail to share queue reserved mem!\n");
- goto out_with_queues;
- }
- }
- }
-
sched->cl = sched->msg_cache_num;
ret = __init_cache(sched, data_fmt);
if (ret) {
WD_ERR("fail to init caches!\n");
- goto out_with_queues;
+ wd_sched_pre_uninit(sched, data_fmt);
+ return -EINVAL;
}
return 0;
-
-out_with_queues:
- if (flags & WD_UACCE_DEV_PASID) {
- if (sched->ss_region) {
- free(sched->ss_region);
- sched->ss_region = NULL;
- }
- }
- for (j = sched->q_num - 1; j >= 0; j--)
- wd_release_queue(&sched->qs[j]);
- return ret;
}
void wd_sched_fini(struct wd_scheduler *sched, int data_fmt)
{
- int i;
- struct q_info *qinfo = sched->qs[0].qinfo;
- unsigned int flags = qinfo->dev_flags;
-
__fini_cache(sched, data_fmt);
- if (flags & WD_UACCE_DEV_PASID) {
- if (sched->ss_region) {
- free(sched->ss_region);
- sched->ss_region = NULL;
- }
- }
-
- for (i = sched->q_num - 1; i >= 0; i--)
- wd_release_queue(&sched->qs[i]);
+ wd_sched_pre_uninit(sched, data_fmt);
}
static int __sync_send(struct wd_scheduler *sched)
@@ -350,4 +372,4 @@ int wd_sched_work(struct wd_scheduler *sched, int remained)
}
return sched->cl;
-}
\ No newline at end of file
+}
diff --git a/v1/test/test_mm/test_wd_mem.c b/v1/test/test_mm/test_wd_mem.c
index 09824b99..e2eec60e 100644
--- a/v1/test/test_mm/test_wd_mem.c
+++ b/v1/test/test_mm/test_wd_mem.c
@@ -208,10 +208,10 @@ void *mmt_sys_test_thread(void *data)
return NULL;
}
- ret = wd_share_reserved_memory(pdata->qinfo1.q, &rsa_q);
+ ret = wd_request_queue(&pdata->qinfo1.q);
if (ret) {
wd_release_queue(&rsa_q);
- MMT_PRT("Proc-%d, thrd-%d:share mem on rsa queue fail!\n",
+ MMT_PRT("Proc-%d, thrd-%d:rsa queue fail!\n",
pid, thread_id);
return NULL;
}
@@ -226,9 +226,9 @@ void *mmt_sys_test_thread(void *data)
return NULL;
}
- ret = wd_share_reserved_memory(pdata->qinfo2.q, &zlib_q);
+ ret = wd_request_queue(&pdata->qinfo2.q);
if (ret) {
- MMT_PRT("Proc-%d, thrd-%d:share mem on zlib queue fail!\n",
+ MMT_PRT("Proc-%d, thrd-%d:zlib queue fail!\n",
pid, thread_id);
goto fail_release;
diff --git a/v1/test/wd_sched.c b/v1/test/wd_sched.c
index f5e46699..ce1d2604 100644
--- a/v1/test/wd_sched.c
+++ b/v1/test/wd_sched.c
@@ -22,94 +22,40 @@
#define EXTRA_SIZE 4096
#define WD_WAIT_MS 1000
-static int __init_cache(struct wd_scheduler *sched)
+static int wd_sched_pre_uninit(struct wd_scheduler *sched)
{
- int i;
- int ret = -ENOMEM;
+ unsigned int flags = 0;
struct q_info *qinfo;
void *pool;
+ int i;
- sched->msgs = calloc(sched->msg_cache_num, sizeof(*sched->msgs));
- if (!sched->msgs) {
- WD_ERR("calloc for sched->msgs fail!\n");
- return ret;
- }
- sched->stat = calloc(sched->q_num, sizeof(*sched->stat));
- if (!sched->stat) {
- WD_ERR("calloc for sched->stat fail!\n");
- goto err_with_msgs;
- }
qinfo = sched->qs[0].qinfo;
- pool = qinfo->br.usr;
- for (i = 0; i < sched->msg_cache_num; i++) {
- sched->msgs[i].data_in = wd_alloc_blk(pool);
- sched->msgs[i].data_out = wd_alloc_blk(pool);
- if (!sched->msgs[i].data_in || !sched->msgs[i].data_out) {
- dbg("not enough data ss_region memory "
- "for cache %d (bs=%d)\n", i, sched->msg_data_size);
- goto err_with_stat;
+ flags = qinfo->dev_flags;
+ if (flags & WD_UACCE_DEV_PASID) {
+ if (sched->ss_region) {
+ free(sched->ss_region);
+ sched->ss_region = NULL;
}
+ return 0;
+ }
- if (sched->init_cache)
- sched->init_cache(sched, i);
+ for (i = 0; i < sched->q_num; i++) {
+ wd_release_queue(&sched->qs[i]);
+ qinfo = sched->qs[i].qinfo;
+ wd_blkpool_destroy(qinfo->br.usr);
}
return 0;
-
-err_with_stat:
- free(sched->stat);
- sched->stat = NULL;
-err_with_msgs:
- free(sched->msgs);
- sched->msgs = NULL;
- return ret;
-}
-
-static void __fini_cache(struct wd_scheduler *sched)
-{
- struct q_info *qinfo = sched->qs[0].qinfo;
- unsigned int flags = qinfo->dev_flags;
- void *pool;
- int i;
-
- if (sched->stat) {
- free(sched->stat);
- sched->stat = NULL;
- }
- if (!(flags & WD_UACCE_DEV_PASID)) {
- pool = qinfo->br.usr;
- if (pool) {
- for (i = 0; i < sched->msg_cache_num; i++) {
- if (sched->msgs[i].data_in)
- wd_free_blk(pool, sched->msgs[i].data_in);
- if (sched->msgs[i].data_out)
- wd_free_blk(pool, sched->msgs[i].data_out);
- }
- wd_blkpool_destroy(pool);
- }
- }
- if (sched->msgs) {
- free(sched->msgs);
- sched->msgs = NULL;
- }
}
static int wd_sched_preinit(struct wd_scheduler *sched)
{
- int ret, i, j;
+ struct wd_blkpool_setup mm_setup;
unsigned int flags = 0;
struct q_info *qinfo;
- struct wd_blkpool_setup mm_setup;
+ int ret, i, j;
void *pool;
- for (i = 0; i < sched->q_num; i++) {
- ret = wd_request_queue(&sched->qs[i]);
- if (ret) {
- WD_ERR("fail to request queue!\n");
- goto out_with_queues;
- }
- }
-
if (!sched->ss_region_size)
sched->ss_region_size = EXTRA_SIZE + /* add 1 page extra */
sched->msg_cache_num * (sched->msg_data_size << 0x1);
@@ -120,18 +66,29 @@ static int wd_sched_preinit(struct wd_scheduler *sched)
sched->ss_region = malloc(sched->ss_region_size);
if (!sched->ss_region) {
WD_ERR("fail to alloc sched ss region mem!\n");
+ return -ENOMEM;
+ }
+ return 0;
+ }
+
+ memset(&mm_setup, 0, sizeof(mm_setup));
+ mm_setup.block_size = sched->msg_data_size;
+ mm_setup.block_num = sched->msg_cache_num << 0x1; /* in and out */
+ mm_setup.align_size = 128;
+ for (i = 0; i < sched->q_num; i++) {
+ ret = wd_request_queue(&sched->qs[i]);
+ if (ret) {
+ WD_ERR("fail to request queue!\n");
ret = -ENOMEM;
goto out_with_queues;
}
- } else {
- memset(&mm_setup, 0, sizeof(mm_setup));
- mm_setup.block_size = sched->msg_data_size;
- mm_setup.block_num = sched->msg_cache_num << 0x1; /* in and out */
- mm_setup.align_size = 128;
- pool = wd_blkpool_create(&sched->qs[0], &mm_setup);
+
+ qinfo = sched->qs[i].qinfo;
+ pool = wd_blkpool_create(&sched->qs[i], &mm_setup);
if (!pool) {
WD_ERR("%s(): create pool fail!\n", __func__);
ret = -ENOMEM;
+ wd_release_queue(&sched->qs[i]);
goto out_with_queues;
}
qinfo->br.alloc = (void *)wd_alloc_blk;
@@ -144,79 +101,135 @@ static int wd_sched_preinit(struct wd_scheduler *sched)
return 0;
out_with_queues:
+ for (j = i-1; j >= 0; j--) {
+ wd_release_queue(&sched->qs[j]);
+ qinfo = sched->qs[j].qinfo;
+ wd_blkpool_destroy(qinfo->br.usr);
+ }
+
if (flags & WD_UACCE_DEV_PASID) {
if (sched->ss_region) {
free(sched->ss_region);
sched->ss_region = NULL;
}
}
- for (j = i-1; j >= 0; j--)
- wd_release_queue(&sched->qs[j]);
+
return ret;
}
+static void __fini_cache(struct wd_scheduler *sched)
+{
+ struct q_info *qinfo = sched->qs[0].qinfo;
+ unsigned int flags = qinfo->dev_flags;
+ void *pool;
+ int i, j;
-int wd_sched_init(struct wd_scheduler *sched)
+ if (sched->stat) {
+ free(sched->stat);
+ sched->stat = NULL;
+ }
+
+ if (sched->msgs) {
+ free(sched->msgs);
+ sched->msgs = NULL;
+ }
+
+ if (!(flags & WD_UACCE_DEV_PASID)) {
+ for (j = 0; j < sched->q_num; j++) {
+ qinfo = sched->qs[j].qinfo;
+ pool = qinfo->br.usr;
+ if (!pool)
+ continue;
+
+ for (i = 0; i < sched->msg_cache_num; i++) {
+ if (sched->msgs[i].data_in)
+ wd_free_blk(pool, sched->msgs[i].data_in);
+ if (sched->msgs[i].data_out)
+ wd_free_blk(pool, sched->msgs[i].data_out);
+ }
+ }
+ }
+}
+
+static int __init_cache(struct wd_scheduler *sched)
{
- int ret, j, k;
- unsigned int flags;
struct q_info *qinfo;
+ unsigned int flags;
+ int ret = -ENOMEM;
+ int i, j;
+ void *pool;
- ret = wd_sched_preinit(sched);
- if (ret < 0)
- return -EINVAL;
+ sched->msgs = calloc(sched->msg_cache_num, sizeof(*sched->msgs));
+ if (!sched->msgs) {
+ WD_ERR("calloc for sched->msgs fail!\n");
+ return ret;
+ }
+ sched->stat = calloc(sched->q_num, sizeof(*sched->stat));
+ if (!sched->stat) {
+ WD_ERR("calloc for sched->stat fail!\n");
+ goto err_with_msgs;
+ }
qinfo = sched->qs[0].qinfo;
+ pool = qinfo->br.usr;
flags = qinfo->dev_flags;
- if (!(flags & WD_UACCE_DEV_PASID)) {
- for (k = 1; k < sched->q_num; k++) {
- ret = wd_share_reserved_memory(&sched->qs[0],
- &sched->qs[k]);
- if (ret) {
- WD_ERR("fail to share queue reserved mem!\n");
- goto out_with_queues;
+ if ((flags & WD_UACCE_DEV_PASID))
+ return 0;
+
+ for (i = 0; i < sched->q_num; i++) {
+ qinfo = sched->qs[i].qinfo;
+ pool = qinfo->br.usr;
+ for (j = 0; j < sched->msg_cache_num; j++) {
+ sched->msgs[j].data_in = wd_alloc_blk(pool);
+ sched->msgs[j].data_out = wd_alloc_blk(pool);
+ if (!sched->msgs[j].data_in || !sched->msgs[j].data_out) {
+ dbg("not enough data ss_region memory "
+ "for cache %d (bs=%d)\n", j, sched->msg_data_size);
+ goto err_with_alloc;
}
+
+ if (sched->init_cache)
+ sched->init_cache(sched, j);
}
}
- sched->cl = sched->msg_cache_num;
+ return 0;
+
+err_with_alloc:
+ free(sched->stat);
+ sched->stat = NULL;
+ __fini_cache(sched);
+err_with_msgs:
+ if (sched->msgs) {
+ free(sched->msgs);
+ sched->msgs = NULL;
+ }
+ return ret;
+}
+
+int wd_sched_init(struct wd_scheduler *sched)
+{
+ int ret;
+ ret = wd_sched_preinit(sched);
+ if (ret < 0)
+ return -EINVAL;
+
+ sched->cl = sched->msg_cache_num;
ret = __init_cache(sched);
if (ret) {
WD_ERR("fail to init caches!\n");
- goto out_with_queues;
+ wd_sched_pre_uninit(sched);
+ return -EINVAL;
}
return 0;
-
-out_with_queues:
- if (flags & WD_UACCE_DEV_PASID) {
- if (sched->ss_region) {
- free(sched->ss_region);
- sched->ss_region = NULL;
- }
- }
- for (j = sched->q_num - 1; j >= 0; j--)
- wd_release_queue(&sched->qs[j]);
- return ret;
}
void wd_sched_fini(struct wd_scheduler *sched)
{
- int i;
- struct q_info *qinfo = sched->qs[0].qinfo;
- unsigned int flags = qinfo->dev_flags;
-
__fini_cache(sched);
- if (flags & WD_UACCE_DEV_PASID) {
- if (sched->ss_region) {
- free(sched->ss_region);
- sched->ss_region = NULL;
- }
- }
-
- for (i = sched->q_num - 1; i >= 0; i--)
- wd_release_queue(&sched->qs[i]);
+ wd_sched_pre_uninit(sched);
}
static int __sync_send(struct wd_scheduler *sched)
--
2.33.0
1
0
13 Jun '25
From: Longfang Liu <liulongfang(a)huawei.com>
After removing the shared queue memory allocation interface, the UADK
test tools must also eliminate the shared memory functionality.
For individual memory reservations, the wd_reserve_memory interface
should be used. When allocating memory for multiple queues, each queue
should independently request its own reserved memory allocation.
Signed-off-by: Longfang Liu <liulongfang(a)huawei.com>
Signed-off-by: Qi Tao <taoqi10(a)huawei.com>
---
v1/test/hisi_hpre_test/hpre_test_tools.c | 392 -----------------------
v1/test/hisi_zip_test_sgl/wd_sched_sgl.c | 310 +++++++++---------
v1/test/test_mm/test_wd_mem.c | 8 +-
v1/test/wd_sched.c | 247 +++++++-------
4 files changed, 300 insertions(+), 657 deletions(-)
diff --git a/v1/test/hisi_hpre_test/hpre_test_tools.c b/v1/test/hisi_hpre_test/hpre_test_tools.c
index 7f562f34..10a4ade9 100755
--- a/v1/test/hisi_hpre_test/hpre_test_tools.c
+++ b/v1/test/hisi_hpre_test/hpre_test_tools.c
@@ -644,317 +644,6 @@ int application_release_multiple_queue(char *dev, char *alg_type, unsigned int q
printf("application_release_multiple_queue test end!\n");
return 0;
}
-
-/***
-
-***/
-int hpre_dev_queue_share(char *dev, char * share_dev, char *alg_type, unsigned long m_size)
-{
- void *addr=NULL;
- int ret = 0;
- struct wd_queue q;
- struct wd_queue target_q;
- unsigned long memory_size;
-
- memset((void *)&q, 0, sizeof(q));
- q.capa.alg = alg_type;
- snprintf(q.dev_path, sizeof(q.dev_path), "%s", dev);
- printf("queue path:%s\n", q.dev_path);
-
- ret = wd_request_queue(&q);
- if(ret)
- {
- printf("wd request queue fail!\n");
- return 1;
- }
- printf("wd request queue success!\n");
- memory_size = m_size;
- addr = wd_reserve_memory(&q, memory_size);
- if(!addr)
- {
- wd_release_queue(&q);
- printf("wd reserve memory fail!\n");
- return 1;
- }
- printf("wd reserve memory success!\n");
- memset(addr, 0, memory_size);
-
- memset((void *)&target_q, 0, sizeof(target_q));
- target_q.capa.alg = alg_type;
- snprintf(target_q.dev_path, sizeof(target_q.dev_path), "%s", share_dev);
- printf("target queue path:%s\n", target_q.dev_path);
-
- ret = wd_request_queue(&target_q);
- if(ret)
- {
- wd_release_queue(&q);
- printf("wd request target_q queue fail!\n");
- return 1;
- }
- printf("wd request target_q queue success!\n");
- //target_q队列共享q队列预留内存;
- ret = wd_share_reserved_memory(&q, &target_q);
- if(ret)
- {
- wd_release_queue(&q);
- wd_release_queue(&target_q);
- printf("wd target_q queue share reserved memory fail!\n");
- return 1;
- }
- printf("wd target_q queue share reserved memory success!\n");
- wd_release_queue(&target_q);
- wd_release_queue(&q);
-
- return 0;
-}
-/***
-
-***/
-int hpre_node_queue_share(char *dev, unsigned int node, unsigned int share_node, char *alg_type, unsigned long m_size)
-{
- void *addr=NULL;
- int ret = 0;
- struct wd_queue q;
- struct wd_queue target_q;
- unsigned long memory_size;
-
- memset((void *)&q, 0, sizeof(q));
- q.capa.alg = alg_type;
- snprintf(q.dev_path, sizeof(q.dev_path), "%s", dev);
- printf("queue path:%s\n", q.dev_path);
- q.node_mask = node;
-
- ret = wd_request_queue(&q);
- if(ret)
- {
- printf("wd request queue fail!\n");
- return 1;
- }
- printf("wd request queue success!\n");
- memory_size = m_size;
- addr = wd_reserve_memory(&q, memory_size);
- if(!addr)
- {
- wd_release_queue(&q);
- printf("wd reserve memory fail!\n");
- return 1;
- }
- printf("wd reserve memory success!\n");
- memset(addr, 0, memory_size);
-
- memset((void *)&target_q, 0, sizeof(target_q));
- target_q.capa.alg = alg_type;
- target_q.node_mask = node;
-
- ret = wd_request_queue(&target_q);
- if(ret)
- {
- wd_release_queue(&q);
- printf("wd request target_q queue fail!\n");
- return 1;
- }
- printf("wd request target_q queue success!\n");
- //target_q队列共享q队列预留内存;
- ret = do_dh(&q);
- if(ret)
- {
- printf("do dh on q fail!\n");
- return 1;
- }
- ret = do_dh(&target_q);
- if(ret)
- {
- printf("do dh on target q fail!\n");
- return 1;
- }
-
- ret = wd_share_reserved_memory(&q, &target_q);
-
- if(ret)
- {
- wd_release_queue(&q);
- wd_release_queue(&target_q);
- printf("wd target_q queue share reserved memory fail!\n");
- return 1;
- }
- printf("wd target_q queue share reserved memory success!\n");
- ret = do_dh(&q);
- if(ret)
- {
- printf("do dh on share q fail!\n");
- return 1;
- }
- ret = do_dh(&target_q);
- if(ret)
- {
- printf("do dh on share target q fail!\n");
- return 1;
- }
-
- wd_release_queue(&target_q);
- wd_release_queue(&q);
-
- return 0;
-}
-/***
-
-***/
-int hpre_dev_queue_interact_share(char *dev, char * share_dev, char *alg_type, unsigned long m_size)
-{
- void *addr=NULL;
- int ret = 0;
- struct wd_queue q;
- struct wd_queue target_q;
- unsigned long memory_size;
-
- memset((void *)&q, 0, sizeof(q));
- q.capa.alg = alg_type;
- snprintf(q.dev_path, sizeof(q.dev_path), "%s", dev);
- printf("queue path:%s\n", q.dev_path);
-
- ret = wd_request_queue(&q);
- if(ret)
- {
- printf("wd request queue fail!\n");
- return ret;
- }
- printf("wd request queue success!\n");
- memory_size = m_size;
- addr = wd_reserve_memory(&q, memory_size);
- if(!addr)
- {
- wd_release_queue(&q);
- printf("wd reserve memory fail!\n");
- return 1;
- }
- printf("wd reserve memory success!\n");
- memset(addr, 0, memory_size);
-
- memset((void *)&target_q, 0, sizeof(target_q));
- target_q.capa.alg = alg_type;
- snprintf(target_q.dev_path, sizeof(target_q.dev_path), "%s", share_dev);
- printf("target queue path:%s\n", target_q.dev_path);
-
- ret = wd_request_queue(&target_q);
- if(ret)
- {
- wd_release_queue(&q);
- printf("wd request target_q queue fail!\n");
- return 1;
- }
- printf("wd request target_q queue success!\n");
- addr = wd_reserve_memory(&target_q, memory_size);
- if(!addr)
- {
- wd_release_queue(&q);
- wd_release_queue(&target_q);
- printf("wd reserve memory fail!\n");
- return 1;
- }
- printf("wd reserve memory success!\n");
- memset(addr, 0, memory_size);
-
- //target_q
- ret = wd_share_reserved_memory(&q, &target_q);
- if(ret)
- {
- wd_release_queue(&q);
- wd_release_queue(&target_q);
- printf("wd target_q queue share reserved memory fail!\n");
- return 1;
- }
- printf("wd target_q queue share reserved memory success!\n");
-
- wd_release_queue(&target_q);
- wd_release_queue(&q);
-
- return 0;
-}
-
-/***
-
-***/
-int hpre_dev_queue_cross_proc_share(char *dev, char *alg_type, unsigned long m_size)
-{
- void *addr=NULL;
- int ret = 0;
- pid_t pid;
- struct wd_queue q;
- struct wd_queue target_q;
- unsigned long memory_size=0;
-
- pid = fork();
- if(pid < 0)
- {
- printf("Creation process failed, pid:%d\n",pid);
- return 1;
- }
- else if(pid == 0)
- {
- printf("child process:%d\n", pid);
- memset((void *)&q, 0, sizeof(q));
- q.capa.alg = alg_type;
- snprintf(q.dev_path, sizeof(q.dev_path), "%s", dev);
- printf("queue path:%s\n", q.dev_path);
-
- ret = wd_request_queue(&q);
- if(ret)
- {
- printf("request queue fail!\n");
- exit(1);
- }
- printf("wd request queue success!\n");
- memory_size = m_size;
- addr = wd_reserve_memory(&q, memory_size);
- if(!addr)
- {
- wd_release_queue(&q);
- printf("queue reserve memory fail!\n");
- exit(2);
- }
- printf("queue reserve memory success!\n");
- memset(addr, 0, memory_size);
- exit(0);
- }
- printf("parent process:%d\n", pid);
- pid_t wpid;
- int status = -1;
- wpid = waitpid(pid, &status, WUNTRACED | WCONTINUED);
- if( wpid < 0)
- {
- printf("exited, status=%d\n", WEXITSTATUS(status));
- return(status);
- }
-
- memset((void *)&target_q, 0, sizeof(target_q));
- target_q.capa.alg = alg_type;
- snprintf(target_q.dev_path, sizeof(target_q.dev_path), "%s", dev);
- printf("target queue path:%s\n", target_q.dev_path);
-
- ret = wd_request_queue(&target_q);
- if(ret)
- {
- wd_release_queue(&q);
- printf("wd request target_q queue fail!\n");
- return 1;
- }
- printf("wd request target_q queue success!\n");
- ret = wd_share_reserved_memory(&q, &target_q);
- if(ret)
- {
- wd_release_queue(&target_q);
- wd_release_queue(&q);
- printf("wd target_q queue share reserved memory fail!\n");
- return 1;
- }
- printf("wd target_q queue share reserved memory success!\n");
-
- wd_release_queue(&target_q);
- wd_release_queue(&q);
-
- return 0;
-}
-
/***
***/
@@ -1696,87 +1385,6 @@ int main(int arc, char *argv[])
return 1;
}
}
- else if(!strcmp(argv[1], "queue-share"))
- {
- /***
- argv[2] - 表示算法类型
- argv[3] - 表示申请队列设备
- argv[4] - 表示共享预留内存的设备
- argv[5] - 表示申请队列的预留内存大小
- ***/
- //申请单个队列,预留内存,与其它队列共享预留内存
- snprintf(algorithm_type, sizeof(algorithm_type), "%s", argv[2]);
- snprintf(dev, sizeof(dev), "%s", argv[3]);
- snprintf(share_dev, sizeof(share_dev), "%s", argv[4]);
- memory_size = strtoul(argv[5], NULL, 10);
-
- ret = hpre_dev_queue_share(dev, share_dev, algorithm_type, memory_size);
- if(0 != ret)
- {
- return 1;
- }
- }
- else if(!strcmp(argv[1], "node-queue-share"))
- {
- /***
- argv[2] - 表示算法类型
- argv[3] - 表示申请队列设备
- argv[4] - 表示设备node
- argv[5] - 表示共享内存设备node
- argv[6] - 表示申请队列的预留内存大小
- ***/
- //申请单个队列,预留内存,与其它队列共享预留内存
- snprintf(algorithm_type, sizeof(algorithm_type), "%s", argv[2]);
- snprintf(dev, sizeof(dev), "%s", argv[3]);
- unsigned int node=0;
- node = strtoul(argv[4], NULL, 16);
- unsigned int share_node=0;
- share_node = strtoul(argv[5], NULL, 16);
- memory_size = strtoul(argv[6], NULL, 10);
-
- ret = hpre_node_queue_share(dev, node, share_node, algorithm_type, memory_size);
- if(0 != ret)
- {
- return 1;
- }
- }
- else if(!strcmp(argv[1], "queue-interact-share"))
- {
- /***
- argv[2] - 表示算法类型
- argv[3] - 表示申请队列设备
- argv[4] - 表示共享预留内存的设备
- argv[5] - 表示申请队列的预留内存大小
- ***/
- //队列预留内存后作为共享的目标队列
- snprintf(algorithm_type, sizeof(algorithm_type), "%s", argv[2]);
- snprintf(dev, sizeof(dev), "%s", argv[3]);
- snprintf(share_dev, sizeof(share_dev), "%s", argv[4]);
- memory_size = strtoul(argv[5], NULL, 10);
-
- ret = hpre_dev_queue_interact_share(dev, share_dev, algorithm_type, memory_size);
- if(0 != ret)
- {
- return 1;
- }
- }
- else if(!strcmp(argv[1], "queue-cross-proc-share"))
- {
- /***
- argv[2] - 表示算法类型
- argv[3] - 表示申请队列设备
- argv[4] - 表示申请队列的预留内存大小
- ***/
- //跨进程进行队列共享
- snprintf(algorithm_type, sizeof(algorithm_type), "%s", argv[2]);
- snprintf(dev, sizeof(dev), "%s", argv[3]);
- memory_size = strtoul(argv[4], NULL, 10);
- ret = hpre_dev_queue_cross_proc_share(dev, algorithm_type, memory_size);
- if(0 != ret)
- {
- return 1;
- }
- }
else if(!strcmp(argv[1], "mult-thread-queue"))
{
/***
diff --git a/v1/test/hisi_zip_test_sgl/wd_sched_sgl.c b/v1/test/hisi_zip_test_sgl/wd_sched_sgl.c
index 31637565..7a3be22c 100644
--- a/v1/test/hisi_zip_test_sgl/wd_sched_sgl.c
+++ b/v1/test/hisi_zip_test_sgl/wd_sched_sgl.c
@@ -23,96 +23,33 @@
#define EXTRA_SIZE 4096
#define WD_WAIT_MS 1000
-static int __init_cache(struct wd_scheduler *sched, int data_fmt)
+static int wd_sched_pre_uninit(struct wd_scheduler *sched, int data_fmt)
{
- int i;
- int ret = -ENOMEM;
+ unsigned int flags = 0;
struct q_info *qinfo;
void *pool;
+ int i;
- sched->msgs = calloc(sched->msg_cache_num, sizeof(*sched->msgs));
- if (!sched->msgs) {
- WD_ERR("calloc for sched->msgs fail!\n");
- return ret;
- }
- sched->stat = calloc(sched->q_num, sizeof(*sched->stat));
- if (!sched->stat) {
- WD_ERR("calloc for sched->stat fail!\n");
- goto err_with_msgs;
- }
qinfo = sched->qs[0].qinfo;
- pool = qinfo->br.usr;
- for (i = 0; i < sched->msg_cache_num; i++) {
- if (data_fmt == WD_FLAT_BUF) { /* use pbuffer */
- sched->msgs[i].data_in = wd_alloc_blk(pool);
- sched->msgs[i].data_out = wd_alloc_blk(pool);
- if (!sched->msgs[i].data_in || !sched->msgs[i].data_out) {
- dbg("not enough data ss_region memory "
- "for cache %d (bs=%d)\n", i, sched->msg_data_size);
- goto err_with_stat;
- }
- } else { /* use sgl */
- sched->msgs[i].data_in = wd_alloc_sgl(pool, sched->msg_data_size);
- sched->msgs[i].data_out = wd_alloc_sgl(pool, sched->msg_data_size);
- if (!sched->msgs[i].data_in || !sched->msgs[i].data_out) {
- dbg("not enough data ss_region memory "
- "for cache %d (bs=%d)\n", i, sched->msg_data_size);
- goto err_with_stat;
- }
+ flags = qinfo->dev_flags;
+ if (flags & WD_UACCE_DEV_PASID) {
+ if (sched->ss_region) {
+ free(sched->ss_region);
+ sched->ss_region = NULL;
}
+ return 0;
+ }
- if (sched->init_cache)
- sched->init_cache(sched, i, data_fmt);
+ for (i = 0; i < sched->q_num; i++) {
+ wd_release_queue(&sched->qs[i]);
+ qinfo = sched->qs[i].qinfo;
+ if (data_fmt == WD_FLAT_BUF)
+ wd_blkpool_destroy(qinfo->br.usr);
+ else
+ wd_sglpool_destroy(qinfo->br.usr);
}
return 0;
-
-err_with_stat:
- free(sched->stat);
- sched->stat = NULL;
-err_with_msgs:
- free(sched->msgs);
- sched->msgs = NULL;
- return ret;
-}
-
-static void __fini_cache(struct wd_scheduler *sched, int data_fmt)
-{
- struct q_info *qinfo = sched->qs[0].qinfo;
- unsigned int flags = qinfo->dev_flags;
- void *pool;
- int i;
-
- if (sched->stat) {
- free(sched->stat);
- sched->stat = NULL;
- }
- if (!(flags & WD_UACCE_DEV_PASID)) {
- pool = qinfo->br.usr;
- if (pool) {
- if (data_fmt == WD_FLAT_BUF) { /* use pbuffer */
- for (i = 0; i < sched->msg_cache_num; i++) {
- if (sched->msgs[i].data_in)
- wd_free_blk(pool, sched->msgs[i].data_in);
- if (sched->msgs[i].data_out)
- wd_free_blk(pool, sched->msgs[i].data_out);
- }
- wd_blkpool_destroy(pool);
- } else { /* use sgl */
- for (i = 0; i < sched->msg_cache_num; i++) {
- if (sched->msgs[i].data_in)
- wd_free_sgl(pool, sched->msgs[i].data_in);
- if (sched->msgs[i].data_out)
- wd_free_sgl(pool, sched->msgs[i].data_out);
- }
- wd_sglpool_destroy(pool);
- }
- }
- }
- if (sched->msgs) {
- free(sched->msgs);
- sched->msgs = NULL;
- }
}
static int wd_sched_preinit(struct wd_scheduler *sched, int data_fmt)
@@ -124,14 +61,6 @@ static int wd_sched_preinit(struct wd_scheduler *sched, int data_fmt)
struct wd_sglpool_setup sp;
void *pool;
- for (i = 0; i < sched->q_num; i++) {
- ret = wd_request_queue(&sched->qs[i]);
- if (ret) {
- WD_ERR("fail to request queue!\n");
- goto out_with_queues;
- }
- }
-
if (!sched->ss_region_size)
sched->ss_region_size = EXTRA_SIZE + /* add 1 page extra */
sched->msg_cache_num * (sched->msg_data_size << 0x1);
@@ -145,12 +74,22 @@ static int wd_sched_preinit(struct wd_scheduler *sched, int data_fmt)
ret = -ENOMEM;
goto out_with_queues;
}
- } else {
- if (data_fmt == WD_FLAT_BUF) { /* use pbuffer*/
- memset(&mm_setup, 0, sizeof(mm_setup));
- mm_setup.block_size = sched->msg_data_size;
- mm_setup.block_num = sched->msg_cache_num << 0x1; /* in and out */
- mm_setup.align_size = 128;
+ return 0;
+ }
+
+ if (data_fmt == WD_FLAT_BUF) { /* use pbuffer*/
+ memset(&mm_setup, 0, sizeof(mm_setup));
+ mm_setup.block_size = sched->msg_data_size;
+ mm_setup.block_num = sched->msg_cache_num << 0x1; /* in and out */
+ mm_setup.align_size = 128;
+ for (i = 0; i < sched->q_num; i++) {
+ ret = wd_request_queue(&sched->qs[i]);
+ if (ret) {
+ WD_ERR("fail to request queue!\n");
+ goto out_with_queues;
+ }
+
+ qinfo = sched->qs[i].qinfo;
pool = wd_blkpool_create(&sched->qs[0], &mm_setup);
if (!pool) {
WD_ERR("%s(): create pool fail!\n", __func__);
@@ -162,15 +101,18 @@ static int wd_sched_preinit(struct wd_scheduler *sched, int data_fmt)
qinfo->br.iova_map = (void *)wd_blk_iova_map;
qinfo->br.iova_unmap = (void *)wd_blk_iova_unmap;
qinfo->br.usr = pool;
- } else { /* use sgl*/
- memset(&sp, 0, sizeof(sp));
- sp.buf_size = sched->msg_data_size / 10;
- sp.align_size = 64;
- sp.sge_num_in_sgl = 60;
- sp.buf_num_in_sgl = sp.sge_num_in_sgl;
- sp.sgl_num = 3 * sched->msg_cache_num;
- sp.buf_num = sp.buf_num_in_sgl * sp.sgl_num + sp.sgl_num * 2;
-
+ }
+ } else { /* use sgl*/
+ memset(&sp, 0, sizeof(sp));
+ sp.buf_size = sched->msg_data_size / 10;
+ sp.align_size = 64;
+ sp.sge_num_in_sgl = 60;
+ sp.buf_num_in_sgl = sp.sge_num_in_sgl;
+ sp.sgl_num = 3 * sched->msg_cache_num;
+ sp.buf_num = sp.buf_num_in_sgl * sp.sgl_num + sp.sgl_num * 2;
+
+ for (i = 0; i < sched->q_num; i++) {
+ qinfo = sched->qs[i].qinfo;
pool = wd_sglpool_create(&sched->qs[0], &sp);
if (!pool) {
WD_ERR("%s(): create pool fail!\n", __func__);
@@ -189,17 +131,132 @@ static int wd_sched_preinit(struct wd_scheduler *sched, int data_fmt)
return 0;
out_with_queues:
+ for (j = i-1; j >= 0; j--) {
+ wd_release_queue(&sched->qs[j]);
+ qinfo = sched->qs[j].qinfo;
+ if (data_fmt == WD_FLAT_BUF)
+ wd_blkpool_destroy(qinfo->br.usr);
+ else
+ wd_sglpool_destroy(qinfo->br.usr);
+ }
+
if (flags & WD_UACCE_DEV_PASID) {
if (sched->ss_region) {
free(sched->ss_region);
sched->ss_region = NULL;
}
}
- for (j = i-1; j >= 0; j--)
- wd_release_queue(&sched->qs[j]);
+
return ret;
}
+static void __fini_cache(struct wd_scheduler *sched, int data_fmt)
+{
+ struct q_info *qinfo = sched->qs[0].qinfo;
+ unsigned int flags = qinfo->dev_flags;
+ void *pool;
+ int i, j;
+
+ if (sched->stat) {
+ free(sched->stat);
+ sched->stat = NULL;
+ }
+
+ if (sched->msgs) {
+ free(sched->msgs);
+ sched->msgs = NULL;
+ }
+
+ if (!(flags & WD_UACCE_DEV_PASID)) {
+ for (j = 0; j < sched->q_num; j++) {
+ qinfo = sched->qs[j].qinfo;
+ pool = qinfo->br.usr;
+ if (!pool)
+ continue;
+
+ if (data_fmt == WD_FLAT_BUF) { /* use pbuffer */
+ for (i = 0; i < sched->msg_cache_num; i++) {
+ if (sched->msgs[i].data_in)
+ wd_free_blk(pool, sched->msgs[i].data_in);
+ if (sched->msgs[i].data_out)
+ wd_free_blk(pool, sched->msgs[i].data_out);
+ }
+ } else { /* use sgl */
+ for (i = 0; i < sched->msg_cache_num; i++) {
+ if (sched->msgs[i].data_in)
+ wd_free_sgl(pool, sched->msgs[i].data_in);
+ if (sched->msgs[i].data_out)
+ wd_free_sgl(pool, sched->msgs[i].data_out);
+ }
+ }
+ }
+ }
+}
+
+static int __init_cache(struct wd_scheduler *sched, int data_fmt)
+{
+ struct q_info *qinfo;
+ unsigned int flags;
+ int ret = -ENOMEM;
+ int i, j;
+ void *pool;
+
+ sched->msgs = calloc(sched->msg_cache_num, sizeof(*sched->msgs));
+ if (!sched->msgs) {
+ WD_ERR("calloc for sched->msgs fail!\n");
+ return ret;
+ }
+ sched->stat = calloc(sched->q_num, sizeof(*sched->stat));
+ if (!sched->stat) {
+ WD_ERR("calloc for sched->stat fail!\n");
+ goto err_with_msgs;
+ }
+ qinfo = sched->qs[0].qinfo;
+ pool = qinfo->br.usr;
+ flags = qinfo->dev_flags;
+ if ((flags & WD_UACCE_DEV_PASID))
+ return 0;
+
+ for (i = 0; i < sched->q_num; i++) {
+ qinfo = sched->qs[i].qinfo;
+ pool = qinfo->br.usr;
+ for (j = 0; j < sched->msg_cache_num; j++) {
+ if (data_fmt == WD_FLAT_BUF) { /* use pbuffer */
+ sched->msgs[j].data_in = wd_alloc_blk(pool);
+ sched->msgs[j].data_out = wd_alloc_blk(pool);
+ if (!sched->msgs[j].data_in || !sched->msgs[j].data_out) {
+ dbg("not enough data ss_region memory "
+ "for cache %d (bs=%d)\n", j, sched->msg_data_size);
+ goto err_with_stat;
+ }
+ } else { /* use sgl */
+ sched->msgs[j].data_in = wd_alloc_sgl(pool, sched->msg_data_size);
+ sched->msgs[j].data_out = wd_alloc_sgl(pool, sched->msg_data_size);
+ if (!sched->msgs[j].data_in || !sched->msgs[j].data_out) {
+ dbg("not enough data ss_region memory "
+ "for cache %d (bs=%d)\n", j, sched->msg_data_size);
+ goto err_with_stat;
+ }
+ }
+
+ if (sched->init_cache)
+ sched->init_cache(sched, j, data_fmt);
+ }
+ }
+
+ return 0;
+
+err_with_stat:
+ free(sched->stat);
+ sched->stat = NULL;
+ __fini_cache(sched, data_fmt);
+err_with_msgs:
+ if (sched->msgs) {
+ free(sched->msgs);
+ sched->msgs = NULL;
+ }
+ return ret;
+}
int wd_sched_init(struct wd_scheduler *sched, int data_fmt)
{
@@ -211,57 +268,22 @@ int wd_sched_init(struct wd_scheduler *sched, int data_fmt)
if (ret < 0)
return -EINVAL;
- qinfo = sched->qs[0].qinfo;
- flags = qinfo->dev_flags;
- if (!(flags & WD_UACCE_DEV_PASID)) {
- for (k = 1; k < sched->q_num; k++) {
- ret = wd_share_reserved_memory(&sched->qs[0],
- &sched->qs[k]);
- if (ret) {
- WD_ERR("fail to share queue reserved mem!\n");
- goto out_with_queues;
- }
- }
- }
-
sched->cl = sched->msg_cache_num;
ret = __init_cache(sched, data_fmt);
if (ret) {
WD_ERR("fail to init caches!\n");
- goto out_with_queues;
+ wd_sched_pre_uninit(sched, data_fmt);
+ return -EINVAL;
}
return 0;
-
-out_with_queues:
- if (flags & WD_UACCE_DEV_PASID) {
- if (sched->ss_region) {
- free(sched->ss_region);
- sched->ss_region = NULL;
- }
- }
- for (j = sched->q_num - 1; j >= 0; j--)
- wd_release_queue(&sched->qs[j]);
- return ret;
}
void wd_sched_fini(struct wd_scheduler *sched, int data_fmt)
{
- int i;
- struct q_info *qinfo = sched->qs[0].qinfo;
- unsigned int flags = qinfo->dev_flags;
-
__fini_cache(sched, data_fmt);
- if (flags & WD_UACCE_DEV_PASID) {
- if (sched->ss_region) {
- free(sched->ss_region);
- sched->ss_region = NULL;
- }
- }
-
- for (i = sched->q_num - 1; i >= 0; i--)
- wd_release_queue(&sched->qs[i]);
+ wd_sched_pre_uninit(sched, data_fmt);
}
static int __sync_send(struct wd_scheduler *sched)
@@ -350,4 +372,4 @@ int wd_sched_work(struct wd_scheduler *sched, int remained)
}
return sched->cl;
-}
\ No newline at end of file
+}
diff --git a/v1/test/test_mm/test_wd_mem.c b/v1/test/test_mm/test_wd_mem.c
index 09824b99..e2eec60e 100644
--- a/v1/test/test_mm/test_wd_mem.c
+++ b/v1/test/test_mm/test_wd_mem.c
@@ -208,10 +208,10 @@ void *mmt_sys_test_thread(void *data)
return NULL;
}
- ret = wd_share_reserved_memory(pdata->qinfo1.q, &rsa_q);
+ ret = wd_request_queue(&pdata->qinfo1.q);
if (ret) {
wd_release_queue(&rsa_q);
- MMT_PRT("Proc-%d, thrd-%d:share mem on rsa queue fail!\n",
+ MMT_PRT("Proc-%d, thrd-%d:rsa queue fail!\n",
pid, thread_id);
return NULL;
}
@@ -226,9 +226,9 @@ void *mmt_sys_test_thread(void *data)
return NULL;
}
- ret = wd_share_reserved_memory(pdata->qinfo2.q, &zlib_q);
+ ret = wd_request_queue(&pdata->qinfo2.q);
if (ret) {
- MMT_PRT("Proc-%d, thrd-%d:share mem on zlib queue fail!\n",
+ MMT_PRT("Proc-%d, thrd-%d:zlib queue fail!\n",
pid, thread_id);
goto fail_release;
diff --git a/v1/test/wd_sched.c b/v1/test/wd_sched.c
index f5e46699..ce1d2604 100644
--- a/v1/test/wd_sched.c
+++ b/v1/test/wd_sched.c
@@ -22,94 +22,40 @@
#define EXTRA_SIZE 4096
#define WD_WAIT_MS 1000
-static int __init_cache(struct wd_scheduler *sched)
+static int wd_sched_pre_uninit(struct wd_scheduler *sched)
{
- int i;
- int ret = -ENOMEM;
+ unsigned int flags = 0;
struct q_info *qinfo;
void *pool;
+ int i;
- sched->msgs = calloc(sched->msg_cache_num, sizeof(*sched->msgs));
- if (!sched->msgs) {
- WD_ERR("calloc for sched->msgs fail!\n");
- return ret;
- }
- sched->stat = calloc(sched->q_num, sizeof(*sched->stat));
- if (!sched->stat) {
- WD_ERR("calloc for sched->stat fail!\n");
- goto err_with_msgs;
- }
qinfo = sched->qs[0].qinfo;
- pool = qinfo->br.usr;
- for (i = 0; i < sched->msg_cache_num; i++) {
- sched->msgs[i].data_in = wd_alloc_blk(pool);
- sched->msgs[i].data_out = wd_alloc_blk(pool);
- if (!sched->msgs[i].data_in || !sched->msgs[i].data_out) {
- dbg("not enough data ss_region memory "
- "for cache %d (bs=%d)\n", i, sched->msg_data_size);
- goto err_with_stat;
+ flags = qinfo->dev_flags;
+ if (flags & WD_UACCE_DEV_PASID) {
+ if (sched->ss_region) {
+ free(sched->ss_region);
+ sched->ss_region = NULL;
}
+ return 0;
+ }
- if (sched->init_cache)
- sched->init_cache(sched, i);
+ for (i = 0; i < sched->q_num; i++) {
+ wd_release_queue(&sched->qs[i]);
+ qinfo = sched->qs[i].qinfo;
+ wd_blkpool_destroy(qinfo->br.usr);
}
return 0;
-
-err_with_stat:
- free(sched->stat);
- sched->stat = NULL;
-err_with_msgs:
- free(sched->msgs);
- sched->msgs = NULL;
- return ret;
-}
-
-static void __fini_cache(struct wd_scheduler *sched)
-{
- struct q_info *qinfo = sched->qs[0].qinfo;
- unsigned int flags = qinfo->dev_flags;
- void *pool;
- int i;
-
- if (sched->stat) {
- free(sched->stat);
- sched->stat = NULL;
- }
- if (!(flags & WD_UACCE_DEV_PASID)) {
- pool = qinfo->br.usr;
- if (pool) {
- for (i = 0; i < sched->msg_cache_num; i++) {
- if (sched->msgs[i].data_in)
- wd_free_blk(pool, sched->msgs[i].data_in);
- if (sched->msgs[i].data_out)
- wd_free_blk(pool, sched->msgs[i].data_out);
- }
- wd_blkpool_destroy(pool);
- }
- }
- if (sched->msgs) {
- free(sched->msgs);
- sched->msgs = NULL;
- }
}
static int wd_sched_preinit(struct wd_scheduler *sched)
{
- int ret, i, j;
+ struct wd_blkpool_setup mm_setup;
unsigned int flags = 0;
struct q_info *qinfo;
- struct wd_blkpool_setup mm_setup;
+ int ret, i, j;
void *pool;
- for (i = 0; i < sched->q_num; i++) {
- ret = wd_request_queue(&sched->qs[i]);
- if (ret) {
- WD_ERR("fail to request queue!\n");
- goto out_with_queues;
- }
- }
-
if (!sched->ss_region_size)
sched->ss_region_size = EXTRA_SIZE + /* add 1 page extra */
sched->msg_cache_num * (sched->msg_data_size << 0x1);
@@ -120,18 +66,29 @@ static int wd_sched_preinit(struct wd_scheduler *sched)
sched->ss_region = malloc(sched->ss_region_size);
if (!sched->ss_region) {
WD_ERR("fail to alloc sched ss region mem!\n");
+ return -ENOMEM;
+ }
+ return 0;
+ }
+
+ memset(&mm_setup, 0, sizeof(mm_setup));
+ mm_setup.block_size = sched->msg_data_size;
+ mm_setup.block_num = sched->msg_cache_num << 0x1; /* in and out */
+ mm_setup.align_size = 128;
+ for (i = 0; i < sched->q_num; i++) {
+ ret = wd_request_queue(&sched->qs[i]);
+ if (ret) {
+ WD_ERR("fail to request queue!\n");
ret = -ENOMEM;
goto out_with_queues;
}
- } else {
- memset(&mm_setup, 0, sizeof(mm_setup));
- mm_setup.block_size = sched->msg_data_size;
- mm_setup.block_num = sched->msg_cache_num << 0x1; /* in and out */
- mm_setup.align_size = 128;
- pool = wd_blkpool_create(&sched->qs[0], &mm_setup);
+
+ qinfo = sched->qs[i].qinfo;
+ pool = wd_blkpool_create(&sched->qs[i], &mm_setup);
if (!pool) {
WD_ERR("%s(): create pool fail!\n", __func__);
ret = -ENOMEM;
+ wd_release_queue(&sched->qs[i]);
goto out_with_queues;
}
qinfo->br.alloc = (void *)wd_alloc_blk;
@@ -144,79 +101,135 @@ static int wd_sched_preinit(struct wd_scheduler *sched)
return 0;
out_with_queues:
+ for (j = i-1; j >= 0; j--) {
+ wd_release_queue(&sched->qs[j]);
+ qinfo = sched->qs[j].qinfo;
+ wd_blkpool_destroy(qinfo->br.usr);
+ }
+
if (flags & WD_UACCE_DEV_PASID) {
if (sched->ss_region) {
free(sched->ss_region);
sched->ss_region = NULL;
}
}
- for (j = i-1; j >= 0; j--)
- wd_release_queue(&sched->qs[j]);
+
return ret;
}
+static void __fini_cache(struct wd_scheduler *sched)
+{
+ struct q_info *qinfo = sched->qs[0].qinfo;
+ unsigned int flags = qinfo->dev_flags;
+ void *pool;
+ int i, j;
-int wd_sched_init(struct wd_scheduler *sched)
+ if (sched->stat) {
+ free(sched->stat);
+ sched->stat = NULL;
+ }
+
+ if (sched->msgs) {
+ free(sched->msgs);
+ sched->msgs = NULL;
+ }
+
+ if (!(flags & WD_UACCE_DEV_PASID)) {
+ for (j = 0; j < sched->q_num; j++) {
+ qinfo = sched->qs[j].qinfo;
+ pool = qinfo->br.usr;
+ if (!pool)
+ continue;
+
+ for (i = 0; i < sched->msg_cache_num; i++) {
+ if (sched->msgs[i].data_in)
+ wd_free_blk(pool, sched->msgs[i].data_in);
+ if (sched->msgs[i].data_out)
+ wd_free_blk(pool, sched->msgs[i].data_out);
+ }
+ }
+ }
+}
+
+static int __init_cache(struct wd_scheduler *sched)
{
- int ret, j, k;
- unsigned int flags;
struct q_info *qinfo;
+ unsigned int flags;
+ int ret = -ENOMEM;
+ int i, j;
+ void *pool;
- ret = wd_sched_preinit(sched);
- if (ret < 0)
- return -EINVAL;
+ sched->msgs = calloc(sched->msg_cache_num, sizeof(*sched->msgs));
+ if (!sched->msgs) {
+ WD_ERR("calloc for sched->msgs fail!\n");
+ return ret;
+ }
+ sched->stat = calloc(sched->q_num, sizeof(*sched->stat));
+ if (!sched->stat) {
+ WD_ERR("calloc for sched->stat fail!\n");
+ goto err_with_msgs;
+ }
qinfo = sched->qs[0].qinfo;
+ pool = qinfo->br.usr;
flags = qinfo->dev_flags;
- if (!(flags & WD_UACCE_DEV_PASID)) {
- for (k = 1; k < sched->q_num; k++) {
- ret = wd_share_reserved_memory(&sched->qs[0],
- &sched->qs[k]);
- if (ret) {
- WD_ERR("fail to share queue reserved mem!\n");
- goto out_with_queues;
+ if ((flags & WD_UACCE_DEV_PASID))
+ return 0;
+
+ for (i = 0; i < sched->q_num; i++) {
+ qinfo = sched->qs[i].qinfo;
+ pool = qinfo->br.usr;
+ for (j = 0; j < sched->msg_cache_num; j++) {
+ sched->msgs[j].data_in = wd_alloc_blk(pool);
+ sched->msgs[j].data_out = wd_alloc_blk(pool);
+ if (!sched->msgs[j].data_in || !sched->msgs[j].data_out) {
+ dbg("not enough data ss_region memory "
+ "for cache %d (bs=%d)\n", j, sched->msg_data_size);
+ goto err_with_alloc;
}
+
+ if (sched->init_cache)
+ sched->init_cache(sched, j);
}
}
- sched->cl = sched->msg_cache_num;
+ return 0;
+
+err_with_alloc:
+ free(sched->stat);
+ sched->stat = NULL;
+ __fini_cache(sched);
+err_with_msgs:
+ if (sched->msgs) {
+ free(sched->msgs);
+ sched->msgs = NULL;
+ }
+ return ret;
+}
+
+int wd_sched_init(struct wd_scheduler *sched)
+{
+ int ret;
+ ret = wd_sched_preinit(sched);
+ if (ret < 0)
+ return -EINVAL;
+
+ sched->cl = sched->msg_cache_num;
ret = __init_cache(sched);
if (ret) {
WD_ERR("fail to init caches!\n");
- goto out_with_queues;
+ wd_sched_pre_uninit(sched);
+ return -EINVAL;
}
return 0;
-
-out_with_queues:
- if (flags & WD_UACCE_DEV_PASID) {
- if (sched->ss_region) {
- free(sched->ss_region);
- sched->ss_region = NULL;
- }
- }
- for (j = sched->q_num - 1; j >= 0; j--)
- wd_release_queue(&sched->qs[j]);
- return ret;
}
void wd_sched_fini(struct wd_scheduler *sched)
{
- int i;
- struct q_info *qinfo = sched->qs[0].qinfo;
- unsigned int flags = qinfo->dev_flags;
-
__fini_cache(sched);
- if (flags & WD_UACCE_DEV_PASID) {
- if (sched->ss_region) {
- free(sched->ss_region);
- sched->ss_region = NULL;
- }
- }
-
- for (i = sched->q_num - 1; i >= 0; i--)
- wd_release_queue(&sched->qs[i]);
+ wd_sched_pre_uninit(sched);
}
static int __sync_send(struct wd_scheduler *sched)
--
2.33.0
1
0
13 Jun '25
From: Longfang Liu <liulongfang(a)huawei.com>
After removing the shared queue memory allocation interface, the UADK
test tools must also eliminate the shared memory functionality.
For individual memory reservations, the wd_reserve_memory interface
should be used. When allocating memory for multiple queues, each queue
should independently request its own reserved memory allocation.
Signed-off-by: Longfang Liu <liulongfang(a)huawei.com>
Signed-off-by: Qi Tao <taoqi10(a)huawei.com>
---
v1/test/hisi_hpre_test/hpre_test_tools.c | 392 -----------------------
v1/test/hisi_zip_test_sgl/wd_sched_sgl.c | 310 +++++++++---------
v1/test/test_mm/test_wd_mem.c | 8 +-
v1/test/wd_sched.c | 247 +++++++-------
4 files changed, 300 insertions(+), 657 deletions(-)
diff --git a/v1/test/hisi_hpre_test/hpre_test_tools.c b/v1/test/hisi_hpre_test/hpre_test_tools.c
index 7f562f34..10a4ade9 100755
--- a/v1/test/hisi_hpre_test/hpre_test_tools.c
+++ b/v1/test/hisi_hpre_test/hpre_test_tools.c
@@ -644,317 +644,6 @@ int application_release_multiple_queue(char *dev, char *alg_type, unsigned int q
printf("application_release_multiple_queue test end!\n");
return 0;
}
-
-/***
-
-***/
-int hpre_dev_queue_share(char *dev, char * share_dev, char *alg_type, unsigned long m_size)
-{
- void *addr=NULL;
- int ret = 0;
- struct wd_queue q;
- struct wd_queue target_q;
- unsigned long memory_size;
-
- memset((void *)&q, 0, sizeof(q));
- q.capa.alg = alg_type;
- snprintf(q.dev_path, sizeof(q.dev_path), "%s", dev);
- printf("queue path:%s\n", q.dev_path);
-
- ret = wd_request_queue(&q);
- if(ret)
- {
- printf("wd request queue fail!\n");
- return 1;
- }
- printf("wd request queue success!\n");
- memory_size = m_size;
- addr = wd_reserve_memory(&q, memory_size);
- if(!addr)
- {
- wd_release_queue(&q);
- printf("wd reserve memory fail!\n");
- return 1;
- }
- printf("wd reserve memory success!\n");
- memset(addr, 0, memory_size);
-
- memset((void *)&target_q, 0, sizeof(target_q));
- target_q.capa.alg = alg_type;
- snprintf(target_q.dev_path, sizeof(target_q.dev_path), "%s", share_dev);
- printf("target queue path:%s\n", target_q.dev_path);
-
- ret = wd_request_queue(&target_q);
- if(ret)
- {
- wd_release_queue(&q);
- printf("wd request target_q queue fail!\n");
- return 1;
- }
- printf("wd request target_q queue success!\n");
- //target_q队列共享q队列预留内存;
- ret = wd_share_reserved_memory(&q, &target_q);
- if(ret)
- {
- wd_release_queue(&q);
- wd_release_queue(&target_q);
- printf("wd target_q queue share reserved memory fail!\n");
- return 1;
- }
- printf("wd target_q queue share reserved memory success!\n");
- wd_release_queue(&target_q);
- wd_release_queue(&q);
-
- return 0;
-}
-/***
-
-***/
-int hpre_node_queue_share(char *dev, unsigned int node, unsigned int share_node, char *alg_type, unsigned long m_size)
-{
- void *addr=NULL;
- int ret = 0;
- struct wd_queue q;
- struct wd_queue target_q;
- unsigned long memory_size;
-
- memset((void *)&q, 0, sizeof(q));
- q.capa.alg = alg_type;
- snprintf(q.dev_path, sizeof(q.dev_path), "%s", dev);
- printf("queue path:%s\n", q.dev_path);
- q.node_mask = node;
-
- ret = wd_request_queue(&q);
- if(ret)
- {
- printf("wd request queue fail!\n");
- return 1;
- }
- printf("wd request queue success!\n");
- memory_size = m_size;
- addr = wd_reserve_memory(&q, memory_size);
- if(!addr)
- {
- wd_release_queue(&q);
- printf("wd reserve memory fail!\n");
- return 1;
- }
- printf("wd reserve memory success!\n");
- memset(addr, 0, memory_size);
-
- memset((void *)&target_q, 0, sizeof(target_q));
- target_q.capa.alg = alg_type;
- target_q.node_mask = node;
-
- ret = wd_request_queue(&target_q);
- if(ret)
- {
- wd_release_queue(&q);
- printf("wd request target_q queue fail!\n");
- return 1;
- }
- printf("wd request target_q queue success!\n");
- //target_q队列共享q队列预留内存;
- ret = do_dh(&q);
- if(ret)
- {
- printf("do dh on q fail!\n");
- return 1;
- }
- ret = do_dh(&target_q);
- if(ret)
- {
- printf("do dh on target q fail!\n");
- return 1;
- }
-
- ret = wd_share_reserved_memory(&q, &target_q);
-
- if(ret)
- {
- wd_release_queue(&q);
- wd_release_queue(&target_q);
- printf("wd target_q queue share reserved memory fail!\n");
- return 1;
- }
- printf("wd target_q queue share reserved memory success!\n");
- ret = do_dh(&q);
- if(ret)
- {
- printf("do dh on share q fail!\n");
- return 1;
- }
- ret = do_dh(&target_q);
- if(ret)
- {
- printf("do dh on share target q fail!\n");
- return 1;
- }
-
- wd_release_queue(&target_q);
- wd_release_queue(&q);
-
- return 0;
-}
-/***
-
-***/
-int hpre_dev_queue_interact_share(char *dev, char * share_dev, char *alg_type, unsigned long m_size)
-{
- void *addr=NULL;
- int ret = 0;
- struct wd_queue q;
- struct wd_queue target_q;
- unsigned long memory_size;
-
- memset((void *)&q, 0, sizeof(q));
- q.capa.alg = alg_type;
- snprintf(q.dev_path, sizeof(q.dev_path), "%s", dev);
- printf("queue path:%s\n", q.dev_path);
-
- ret = wd_request_queue(&q);
- if(ret)
- {
- printf("wd request queue fail!\n");
- return ret;
- }
- printf("wd request queue success!\n");
- memory_size = m_size;
- addr = wd_reserve_memory(&q, memory_size);
- if(!addr)
- {
- wd_release_queue(&q);
- printf("wd reserve memory fail!\n");
- return 1;
- }
- printf("wd reserve memory success!\n");
- memset(addr, 0, memory_size);
-
- memset((void *)&target_q, 0, sizeof(target_q));
- target_q.capa.alg = alg_type;
- snprintf(target_q.dev_path, sizeof(target_q.dev_path), "%s", share_dev);
- printf("target queue path:%s\n", target_q.dev_path);
-
- ret = wd_request_queue(&target_q);
- if(ret)
- {
- wd_release_queue(&q);
- printf("wd request target_q queue fail!\n");
- return 1;
- }
- printf("wd request target_q queue success!\n");
- addr = wd_reserve_memory(&target_q, memory_size);
- if(!addr)
- {
- wd_release_queue(&q);
- wd_release_queue(&target_q);
- printf("wd reserve memory fail!\n");
- return 1;
- }
- printf("wd reserve memory success!\n");
- memset(addr, 0, memory_size);
-
- //target_q
- ret = wd_share_reserved_memory(&q, &target_q);
- if(ret)
- {
- wd_release_queue(&q);
- wd_release_queue(&target_q);
- printf("wd target_q queue share reserved memory fail!\n");
- return 1;
- }
- printf("wd target_q queue share reserved memory success!\n");
-
- wd_release_queue(&target_q);
- wd_release_queue(&q);
-
- return 0;
-}
-
-/***
-
-***/
-int hpre_dev_queue_cross_proc_share(char *dev, char *alg_type, unsigned long m_size)
-{
- void *addr=NULL;
- int ret = 0;
- pid_t pid;
- struct wd_queue q;
- struct wd_queue target_q;
- unsigned long memory_size=0;
-
- pid = fork();
- if(pid < 0)
- {
- printf("Creation process failed, pid:%d\n",pid);
- return 1;
- }
- else if(pid == 0)
- {
- printf("child process:%d\n", pid);
- memset((void *)&q, 0, sizeof(q));
- q.capa.alg = alg_type;
- snprintf(q.dev_path, sizeof(q.dev_path), "%s", dev);
- printf("queue path:%s\n", q.dev_path);
-
- ret = wd_request_queue(&q);
- if(ret)
- {
- printf("request queue fail!\n");
- exit(1);
- }
- printf("wd request queue success!\n");
- memory_size = m_size;
- addr = wd_reserve_memory(&q, memory_size);
- if(!addr)
- {
- wd_release_queue(&q);
- printf("queue reserve memory fail!\n");
- exit(2);
- }
- printf("queue reserve memory success!\n");
- memset(addr, 0, memory_size);
- exit(0);
- }
- printf("parent process:%d\n", pid);
- pid_t wpid;
- int status = -1;
- wpid = waitpid(pid, &status, WUNTRACED | WCONTINUED);
- if( wpid < 0)
- {
- printf("exited, status=%d\n", WEXITSTATUS(status));
- return(status);
- }
-
- memset((void *)&target_q, 0, sizeof(target_q));
- target_q.capa.alg = alg_type;
- snprintf(target_q.dev_path, sizeof(target_q.dev_path), "%s", dev);
- printf("target queue path:%s\n", target_q.dev_path);
-
- ret = wd_request_queue(&target_q);
- if(ret)
- {
- wd_release_queue(&q);
- printf("wd request target_q queue fail!\n");
- return 1;
- }
- printf("wd request target_q queue success!\n");
- ret = wd_share_reserved_memory(&q, &target_q);
- if(ret)
- {
- wd_release_queue(&target_q);
- wd_release_queue(&q);
- printf("wd target_q queue share reserved memory fail!\n");
- return 1;
- }
- printf("wd target_q queue share reserved memory success!\n");
-
- wd_release_queue(&target_q);
- wd_release_queue(&q);
-
- return 0;
-}
-
/***
***/
@@ -1696,87 +1385,6 @@ int main(int arc, char *argv[])
return 1;
}
}
- else if(!strcmp(argv[1], "queue-share"))
- {
- /***
- argv[2] - 表示算法类型
- argv[3] - 表示申请队列设备
- argv[4] - 表示共享预留内存的设备
- argv[5] - 表示申请队列的预留内存大小
- ***/
- //申请单个队列,预留内存,与其它队列共享预留内存
- snprintf(algorithm_type, sizeof(algorithm_type), "%s", argv[2]);
- snprintf(dev, sizeof(dev), "%s", argv[3]);
- snprintf(share_dev, sizeof(share_dev), "%s", argv[4]);
- memory_size = strtoul(argv[5], NULL, 10);
-
- ret = hpre_dev_queue_share(dev, share_dev, algorithm_type, memory_size);
- if(0 != ret)
- {
- return 1;
- }
- }
- else if(!strcmp(argv[1], "node-queue-share"))
- {
- /***
- argv[2] - 表示算法类型
- argv[3] - 表示申请队列设备
- argv[4] - 表示设备node
- argv[5] - 表示共享内存设备node
- argv[6] - 表示申请队列的预留内存大小
- ***/
- //申请单个队列,预留内存,与其它队列共享预留内存
- snprintf(algorithm_type, sizeof(algorithm_type), "%s", argv[2]);
- snprintf(dev, sizeof(dev), "%s", argv[3]);
- unsigned int node=0;
- node = strtoul(argv[4], NULL, 16);
- unsigned int share_node=0;
- share_node = strtoul(argv[5], NULL, 16);
- memory_size = strtoul(argv[6], NULL, 10);
-
- ret = hpre_node_queue_share(dev, node, share_node, algorithm_type, memory_size);
- if(0 != ret)
- {
- return 1;
- }
- }
- else if(!strcmp(argv[1], "queue-interact-share"))
- {
- /***
- argv[2] - 表示算法类型
- argv[3] - 表示申请队列设备
- argv[4] - 表示共享预留内存的设备
- argv[5] - 表示申请队列的预留内存大小
- ***/
- //队列预留内存后作为共享的目标队列
- snprintf(algorithm_type, sizeof(algorithm_type), "%s", argv[2]);
- snprintf(dev, sizeof(dev), "%s", argv[3]);
- snprintf(share_dev, sizeof(share_dev), "%s", argv[4]);
- memory_size = strtoul(argv[5], NULL, 10);
-
- ret = hpre_dev_queue_interact_share(dev, share_dev, algorithm_type, memory_size);
- if(0 != ret)
- {
- return 1;
- }
- }
- else if(!strcmp(argv[1], "queue-cross-proc-share"))
- {
- /***
- argv[2] - 表示算法类型
- argv[3] - 表示申请队列设备
- argv[4] - 表示申请队列的预留内存大小
- ***/
- //跨进程进行队列共享
- snprintf(algorithm_type, sizeof(algorithm_type), "%s", argv[2]);
- snprintf(dev, sizeof(dev), "%s", argv[3]);
- memory_size = strtoul(argv[4], NULL, 10);
- ret = hpre_dev_queue_cross_proc_share(dev, algorithm_type, memory_size);
- if(0 != ret)
- {
- return 1;
- }
- }
else if(!strcmp(argv[1], "mult-thread-queue"))
{
/***
diff --git a/v1/test/hisi_zip_test_sgl/wd_sched_sgl.c b/v1/test/hisi_zip_test_sgl/wd_sched_sgl.c
index 31637565..7a3be22c 100644
--- a/v1/test/hisi_zip_test_sgl/wd_sched_sgl.c
+++ b/v1/test/hisi_zip_test_sgl/wd_sched_sgl.c
@@ -23,96 +23,33 @@
#define EXTRA_SIZE 4096
#define WD_WAIT_MS 1000
-static int __init_cache(struct wd_scheduler *sched, int data_fmt)
+static int wd_sched_pre_uninit(struct wd_scheduler *sched, int data_fmt)
{
- int i;
- int ret = -ENOMEM;
+ unsigned int flags = 0;
struct q_info *qinfo;
void *pool;
+ int i;
- sched->msgs = calloc(sched->msg_cache_num, sizeof(*sched->msgs));
- if (!sched->msgs) {
- WD_ERR("calloc for sched->msgs fail!\n");
- return ret;
- }
- sched->stat = calloc(sched->q_num, sizeof(*sched->stat));
- if (!sched->stat) {
- WD_ERR("calloc for sched->stat fail!\n");
- goto err_with_msgs;
- }
qinfo = sched->qs[0].qinfo;
- pool = qinfo->br.usr;
- for (i = 0; i < sched->msg_cache_num; i++) {
- if (data_fmt == WD_FLAT_BUF) { /* use pbuffer */
- sched->msgs[i].data_in = wd_alloc_blk(pool);
- sched->msgs[i].data_out = wd_alloc_blk(pool);
- if (!sched->msgs[i].data_in || !sched->msgs[i].data_out) {
- dbg("not enough data ss_region memory "
- "for cache %d (bs=%d)\n", i, sched->msg_data_size);
- goto err_with_stat;
- }
- } else { /* use sgl */
- sched->msgs[i].data_in = wd_alloc_sgl(pool, sched->msg_data_size);
- sched->msgs[i].data_out = wd_alloc_sgl(pool, sched->msg_data_size);
- if (!sched->msgs[i].data_in || !sched->msgs[i].data_out) {
- dbg("not enough data ss_region memory "
- "for cache %d (bs=%d)\n", i, sched->msg_data_size);
- goto err_with_stat;
- }
+ flags = qinfo->dev_flags;
+ if (flags & WD_UACCE_DEV_PASID) {
+ if (sched->ss_region) {
+ free(sched->ss_region);
+ sched->ss_region = NULL;
}
+ return 0;
+ }
- if (sched->init_cache)
- sched->init_cache(sched, i, data_fmt);
+ for (i = 0; i < sched->q_num; i++) {
+ wd_release_queue(&sched->qs[i]);
+ qinfo = sched->qs[i].qinfo;
+ if (data_fmt == WD_FLAT_BUF)
+ wd_blkpool_destroy(qinfo->br.usr);
+ else
+ wd_sglpool_destroy(qinfo->br.usr);
}
return 0;
-
-err_with_stat:
- free(sched->stat);
- sched->stat = NULL;
-err_with_msgs:
- free(sched->msgs);
- sched->msgs = NULL;
- return ret;
-}
-
-static void __fini_cache(struct wd_scheduler *sched, int data_fmt)
-{
- struct q_info *qinfo = sched->qs[0].qinfo;
- unsigned int flags = qinfo->dev_flags;
- void *pool;
- int i;
-
- if (sched->stat) {
- free(sched->stat);
- sched->stat = NULL;
- }
- if (!(flags & WD_UACCE_DEV_PASID)) {
- pool = qinfo->br.usr;
- if (pool) {
- if (data_fmt == WD_FLAT_BUF) { /* use pbuffer */
- for (i = 0; i < sched->msg_cache_num; i++) {
- if (sched->msgs[i].data_in)
- wd_free_blk(pool, sched->msgs[i].data_in);
- if (sched->msgs[i].data_out)
- wd_free_blk(pool, sched->msgs[i].data_out);
- }
- wd_blkpool_destroy(pool);
- } else { /* use sgl */
- for (i = 0; i < sched->msg_cache_num; i++) {
- if (sched->msgs[i].data_in)
- wd_free_sgl(pool, sched->msgs[i].data_in);
- if (sched->msgs[i].data_out)
- wd_free_sgl(pool, sched->msgs[i].data_out);
- }
- wd_sglpool_destroy(pool);
- }
- }
- }
- if (sched->msgs) {
- free(sched->msgs);
- sched->msgs = NULL;
- }
}
static int wd_sched_preinit(struct wd_scheduler *sched, int data_fmt)
@@ -124,14 +61,6 @@ static int wd_sched_preinit(struct wd_scheduler *sched, int data_fmt)
struct wd_sglpool_setup sp;
void *pool;
- for (i = 0; i < sched->q_num; i++) {
- ret = wd_request_queue(&sched->qs[i]);
- if (ret) {
- WD_ERR("fail to request queue!\n");
- goto out_with_queues;
- }
- }
-
if (!sched->ss_region_size)
sched->ss_region_size = EXTRA_SIZE + /* add 1 page extra */
sched->msg_cache_num * (sched->msg_data_size << 0x1);
@@ -145,12 +74,22 @@ static int wd_sched_preinit(struct wd_scheduler *sched, int data_fmt)
ret = -ENOMEM;
goto out_with_queues;
}
- } else {
- if (data_fmt == WD_FLAT_BUF) { /* use pbuffer*/
- memset(&mm_setup, 0, sizeof(mm_setup));
- mm_setup.block_size = sched->msg_data_size;
- mm_setup.block_num = sched->msg_cache_num << 0x1; /* in and out */
- mm_setup.align_size = 128;
+ return 0;
+ }
+
+ if (data_fmt == WD_FLAT_BUF) { /* use pbuffer*/
+ memset(&mm_setup, 0, sizeof(mm_setup));
+ mm_setup.block_size = sched->msg_data_size;
+ mm_setup.block_num = sched->msg_cache_num << 0x1; /* in and out */
+ mm_setup.align_size = 128;
+ for (i = 0; i < sched->q_num; i++) {
+ ret = wd_request_queue(&sched->qs[i]);
+ if (ret) {
+ WD_ERR("fail to request queue!\n");
+ goto out_with_queues;
+ }
+
+ qinfo = sched->qs[i].qinfo;
pool = wd_blkpool_create(&sched->qs[0], &mm_setup);
if (!pool) {
WD_ERR("%s(): create pool fail!\n", __func__);
@@ -162,15 +101,18 @@ static int wd_sched_preinit(struct wd_scheduler *sched, int data_fmt)
qinfo->br.iova_map = (void *)wd_blk_iova_map;
qinfo->br.iova_unmap = (void *)wd_blk_iova_unmap;
qinfo->br.usr = pool;
- } else { /* use sgl*/
- memset(&sp, 0, sizeof(sp));
- sp.buf_size = sched->msg_data_size / 10;
- sp.align_size = 64;
- sp.sge_num_in_sgl = 60;
- sp.buf_num_in_sgl = sp.sge_num_in_sgl;
- sp.sgl_num = 3 * sched->msg_cache_num;
- sp.buf_num = sp.buf_num_in_sgl * sp.sgl_num + sp.sgl_num * 2;
-
+ }
+ } else { /* use sgl*/
+ memset(&sp, 0, sizeof(sp));
+ sp.buf_size = sched->msg_data_size / 10;
+ sp.align_size = 64;
+ sp.sge_num_in_sgl = 60;
+ sp.buf_num_in_sgl = sp.sge_num_in_sgl;
+ sp.sgl_num = 3 * sched->msg_cache_num;
+ sp.buf_num = sp.buf_num_in_sgl * sp.sgl_num + sp.sgl_num * 2;
+
+ for (i = 0; i < sched->q_num; i++) {
+ qinfo = sched->qs[i].qinfo;
pool = wd_sglpool_create(&sched->qs[0], &sp);
if (!pool) {
WD_ERR("%s(): create pool fail!\n", __func__);
@@ -189,17 +131,132 @@ static int wd_sched_preinit(struct wd_scheduler *sched, int data_fmt)
return 0;
out_with_queues:
+ for (j = i-1; j >= 0; j--) {
+ wd_release_queue(&sched->qs[j]);
+ qinfo = sched->qs[j].qinfo;
+ if (data_fmt == WD_FLAT_BUF)
+ wd_blkpool_destroy(qinfo->br.usr);
+ else
+ wd_sglpool_destroy(qinfo->br.usr);
+ }
+
if (flags & WD_UACCE_DEV_PASID) {
if (sched->ss_region) {
free(sched->ss_region);
sched->ss_region = NULL;
}
}
- for (j = i-1; j >= 0; j--)
- wd_release_queue(&sched->qs[j]);
+
return ret;
}
+static void __fini_cache(struct wd_scheduler *sched, int data_fmt)
+{
+ struct q_info *qinfo = sched->qs[0].qinfo;
+ unsigned int flags = qinfo->dev_flags;
+ void *pool;
+ int i, j;
+
+ if (sched->stat) {
+ free(sched->stat);
+ sched->stat = NULL;
+ }
+
+ if (sched->msgs) {
+ free(sched->msgs);
+ sched->msgs = NULL;
+ }
+
+ if (!(flags & WD_UACCE_DEV_PASID)) {
+ for (j = 0; j < sched->q_num; j++) {
+ qinfo = sched->qs[j].qinfo;
+ pool = qinfo->br.usr;
+ if (!pool)
+ continue;
+
+ if (data_fmt == WD_FLAT_BUF) { /* use pbuffer */
+ for (i = 0; i < sched->msg_cache_num; i++) {
+ if (sched->msgs[i].data_in)
+ wd_free_blk(pool, sched->msgs[i].data_in);
+ if (sched->msgs[i].data_out)
+ wd_free_blk(pool, sched->msgs[i].data_out);
+ }
+ } else { /* use sgl */
+ for (i = 0; i < sched->msg_cache_num; i++) {
+ if (sched->msgs[i].data_in)
+ wd_free_sgl(pool, sched->msgs[i].data_in);
+ if (sched->msgs[i].data_out)
+ wd_free_sgl(pool, sched->msgs[i].data_out);
+ }
+ }
+ }
+ }
+}
+
+static int __init_cache(struct wd_scheduler *sched, int data_fmt)
+{
+ struct q_info *qinfo;
+ unsigned int flags;
+ int ret = -ENOMEM;
+ int i, j;
+ void *pool;
+
+ sched->msgs = calloc(sched->msg_cache_num, sizeof(*sched->msgs));
+ if (!sched->msgs) {
+ WD_ERR("calloc for sched->msgs fail!\n");
+ return ret;
+ }
+ sched->stat = calloc(sched->q_num, sizeof(*sched->stat));
+ if (!sched->stat) {
+ WD_ERR("calloc for sched->stat fail!\n");
+ goto err_with_msgs;
+ }
+ qinfo = sched->qs[0].qinfo;
+ pool = qinfo->br.usr;
+ flags = qinfo->dev_flags;
+ if ((flags & WD_UACCE_DEV_PASID))
+ return 0;
+
+ for (i = 0; i < sched->q_num; i++) {
+ qinfo = sched->qs[i].qinfo;
+ pool = qinfo->br.usr;
+ for (j = 0; j < sched->msg_cache_num; j++) {
+ if (data_fmt == WD_FLAT_BUF) { /* use pbuffer */
+ sched->msgs[j].data_in = wd_alloc_blk(pool);
+ sched->msgs[j].data_out = wd_alloc_blk(pool);
+ if (!sched->msgs[j].data_in || !sched->msgs[j].data_out) {
+ dbg("not enough data ss_region memory "
+ "for cache %d (bs=%d)\n", j, sched->msg_data_size);
+ goto err_with_stat;
+ }
+ } else { /* use sgl */
+ sched->msgs[j].data_in = wd_alloc_sgl(pool, sched->msg_data_size);
+ sched->msgs[j].data_out = wd_alloc_sgl(pool, sched->msg_data_size);
+ if (!sched->msgs[j].data_in || !sched->msgs[j].data_out) {
+ dbg("not enough data ss_region memory "
+ "for cache %d (bs=%d)\n", j, sched->msg_data_size);
+ goto err_with_stat;
+ }
+ }
+
+ if (sched->init_cache)
+ sched->init_cache(sched, j, data_fmt);
+ }
+ }
+
+ return 0;
+
+err_with_stat:
+ free(sched->stat);
+ sched->stat = NULL;
+ __fini_cache(sched, data_fmt);
+err_with_msgs:
+ if (sched->msgs) {
+ free(sched->msgs);
+ sched->msgs = NULL;
+ }
+ return ret;
+}
int wd_sched_init(struct wd_scheduler *sched, int data_fmt)
{
@@ -211,57 +268,22 @@ int wd_sched_init(struct wd_scheduler *sched, int data_fmt)
if (ret < 0)
return -EINVAL;
- qinfo = sched->qs[0].qinfo;
- flags = qinfo->dev_flags;
- if (!(flags & WD_UACCE_DEV_PASID)) {
- for (k = 1; k < sched->q_num; k++) {
- ret = wd_share_reserved_memory(&sched->qs[0],
- &sched->qs[k]);
- if (ret) {
- WD_ERR("fail to share queue reserved mem!\n");
- goto out_with_queues;
- }
- }
- }
-
sched->cl = sched->msg_cache_num;
ret = __init_cache(sched, data_fmt);
if (ret) {
WD_ERR("fail to init caches!\n");
- goto out_with_queues;
+ wd_sched_pre_uninit(sched, data_fmt);
+ return -EINVAL;
}
return 0;
-
-out_with_queues:
- if (flags & WD_UACCE_DEV_PASID) {
- if (sched->ss_region) {
- free(sched->ss_region);
- sched->ss_region = NULL;
- }
- }
- for (j = sched->q_num - 1; j >= 0; j--)
- wd_release_queue(&sched->qs[j]);
- return ret;
}
void wd_sched_fini(struct wd_scheduler *sched, int data_fmt)
{
- int i;
- struct q_info *qinfo = sched->qs[0].qinfo;
- unsigned int flags = qinfo->dev_flags;
-
__fini_cache(sched, data_fmt);
- if (flags & WD_UACCE_DEV_PASID) {
- if (sched->ss_region) {
- free(sched->ss_region);
- sched->ss_region = NULL;
- }
- }
-
- for (i = sched->q_num - 1; i >= 0; i--)
- wd_release_queue(&sched->qs[i]);
+ wd_sched_pre_uninit(sched, data_fmt);
}
static int __sync_send(struct wd_scheduler *sched)
@@ -350,4 +372,4 @@ int wd_sched_work(struct wd_scheduler *sched, int remained)
}
return sched->cl;
-}
\ No newline at end of file
+}
diff --git a/v1/test/test_mm/test_wd_mem.c b/v1/test/test_mm/test_wd_mem.c
index 09824b99..e2eec60e 100644
--- a/v1/test/test_mm/test_wd_mem.c
+++ b/v1/test/test_mm/test_wd_mem.c
@@ -208,10 +208,10 @@ void *mmt_sys_test_thread(void *data)
return NULL;
}
- ret = wd_share_reserved_memory(pdata->qinfo1.q, &rsa_q);
+ ret = wd_request_queue(&pdata->qinfo1.q);
if (ret) {
wd_release_queue(&rsa_q);
- MMT_PRT("Proc-%d, thrd-%d:share mem on rsa queue fail!\n",
+ MMT_PRT("Proc-%d, thrd-%d:rsa queue fail!\n",
pid, thread_id);
return NULL;
}
@@ -226,9 +226,9 @@ void *mmt_sys_test_thread(void *data)
return NULL;
}
- ret = wd_share_reserved_memory(pdata->qinfo2.q, &zlib_q);
+ ret = wd_request_queue(&pdata->qinfo2.q);
if (ret) {
- MMT_PRT("Proc-%d, thrd-%d:share mem on zlib queue fail!\n",
+ MMT_PRT("Proc-%d, thrd-%d:zlib queue fail!\n",
pid, thread_id);
goto fail_release;
diff --git a/v1/test/wd_sched.c b/v1/test/wd_sched.c
index f5e46699..ce1d2604 100644
--- a/v1/test/wd_sched.c
+++ b/v1/test/wd_sched.c
@@ -22,94 +22,40 @@
#define EXTRA_SIZE 4096
#define WD_WAIT_MS 1000
-static int __init_cache(struct wd_scheduler *sched)
+static int wd_sched_pre_uninit(struct wd_scheduler *sched)
{
- int i;
- int ret = -ENOMEM;
+ unsigned int flags = 0;
struct q_info *qinfo;
void *pool;
+ int i;
- sched->msgs = calloc(sched->msg_cache_num, sizeof(*sched->msgs));
- if (!sched->msgs) {
- WD_ERR("calloc for sched->msgs fail!\n");
- return ret;
- }
- sched->stat = calloc(sched->q_num, sizeof(*sched->stat));
- if (!sched->stat) {
- WD_ERR("calloc for sched->stat fail!\n");
- goto err_with_msgs;
- }
qinfo = sched->qs[0].qinfo;
- pool = qinfo->br.usr;
- for (i = 0; i < sched->msg_cache_num; i++) {
- sched->msgs[i].data_in = wd_alloc_blk(pool);
- sched->msgs[i].data_out = wd_alloc_blk(pool);
- if (!sched->msgs[i].data_in || !sched->msgs[i].data_out) {
- dbg("not enough data ss_region memory "
- "for cache %d (bs=%d)\n", i, sched->msg_data_size);
- goto err_with_stat;
+ flags = qinfo->dev_flags;
+ if (flags & WD_UACCE_DEV_PASID) {
+ if (sched->ss_region) {
+ free(sched->ss_region);
+ sched->ss_region = NULL;
}
+ return 0;
+ }
- if (sched->init_cache)
- sched->init_cache(sched, i);
+ for (i = 0; i < sched->q_num; i++) {
+ wd_release_queue(&sched->qs[i]);
+ qinfo = sched->qs[i].qinfo;
+ wd_blkpool_destroy(qinfo->br.usr);
}
return 0;
-
-err_with_stat:
- free(sched->stat);
- sched->stat = NULL;
-err_with_msgs:
- free(sched->msgs);
- sched->msgs = NULL;
- return ret;
-}
-
-static void __fini_cache(struct wd_scheduler *sched)
-{
- struct q_info *qinfo = sched->qs[0].qinfo;
- unsigned int flags = qinfo->dev_flags;
- void *pool;
- int i;
-
- if (sched->stat) {
- free(sched->stat);
- sched->stat = NULL;
- }
- if (!(flags & WD_UACCE_DEV_PASID)) {
- pool = qinfo->br.usr;
- if (pool) {
- for (i = 0; i < sched->msg_cache_num; i++) {
- if (sched->msgs[i].data_in)
- wd_free_blk(pool, sched->msgs[i].data_in);
- if (sched->msgs[i].data_out)
- wd_free_blk(pool, sched->msgs[i].data_out);
- }
- wd_blkpool_destroy(pool);
- }
- }
- if (sched->msgs) {
- free(sched->msgs);
- sched->msgs = NULL;
- }
}
static int wd_sched_preinit(struct wd_scheduler *sched)
{
- int ret, i, j;
+ struct wd_blkpool_setup mm_setup;
unsigned int flags = 0;
struct q_info *qinfo;
- struct wd_blkpool_setup mm_setup;
+ int ret, i, j;
void *pool;
- for (i = 0; i < sched->q_num; i++) {
- ret = wd_request_queue(&sched->qs[i]);
- if (ret) {
- WD_ERR("fail to request queue!\n");
- goto out_with_queues;
- }
- }
-
if (!sched->ss_region_size)
sched->ss_region_size = EXTRA_SIZE + /* add 1 page extra */
sched->msg_cache_num * (sched->msg_data_size << 0x1);
@@ -120,18 +66,29 @@ static int wd_sched_preinit(struct wd_scheduler *sched)
sched->ss_region = malloc(sched->ss_region_size);
if (!sched->ss_region) {
WD_ERR("fail to alloc sched ss region mem!\n");
+ return -ENOMEM;
+ }
+ return 0;
+ }
+
+ memset(&mm_setup, 0, sizeof(mm_setup));
+ mm_setup.block_size = sched->msg_data_size;
+ mm_setup.block_num = sched->msg_cache_num << 0x1; /* in and out */
+ mm_setup.align_size = 128;
+ for (i = 0; i < sched->q_num; i++) {
+ ret = wd_request_queue(&sched->qs[i]);
+ if (ret) {
+ WD_ERR("fail to request queue!\n");
ret = -ENOMEM;
goto out_with_queues;
}
- } else {
- memset(&mm_setup, 0, sizeof(mm_setup));
- mm_setup.block_size = sched->msg_data_size;
- mm_setup.block_num = sched->msg_cache_num << 0x1; /* in and out */
- mm_setup.align_size = 128;
- pool = wd_blkpool_create(&sched->qs[0], &mm_setup);
+
+ qinfo = sched->qs[i].qinfo;
+ pool = wd_blkpool_create(&sched->qs[i], &mm_setup);
if (!pool) {
WD_ERR("%s(): create pool fail!\n", __func__);
ret = -ENOMEM;
+ wd_release_queue(&sched->qs[i]);
goto out_with_queues;
}
qinfo->br.alloc = (void *)wd_alloc_blk;
@@ -144,79 +101,135 @@ static int wd_sched_preinit(struct wd_scheduler *sched)
return 0;
out_with_queues:
+ for (j = i-1; j >= 0; j--) {
+ wd_release_queue(&sched->qs[j]);
+ qinfo = sched->qs[j].qinfo;
+ wd_blkpool_destroy(qinfo->br.usr);
+ }
+
if (flags & WD_UACCE_DEV_PASID) {
if (sched->ss_region) {
free(sched->ss_region);
sched->ss_region = NULL;
}
}
- for (j = i-1; j >= 0; j--)
- wd_release_queue(&sched->qs[j]);
+
return ret;
}
+static void __fini_cache(struct wd_scheduler *sched)
+{
+ struct q_info *qinfo = sched->qs[0].qinfo;
+ unsigned int flags = qinfo->dev_flags;
+ void *pool;
+ int i, j;
-int wd_sched_init(struct wd_scheduler *sched)
+ if (sched->stat) {
+ free(sched->stat);
+ sched->stat = NULL;
+ }
+
+ if (sched->msgs) {
+ free(sched->msgs);
+ sched->msgs = NULL;
+ }
+
+ if (!(flags & WD_UACCE_DEV_PASID)) {
+ for (j = 0; j < sched->q_num; j++) {
+ qinfo = sched->qs[j].qinfo;
+ pool = qinfo->br.usr;
+ if (!pool)
+ continue;
+
+ for (i = 0; i < sched->msg_cache_num; i++) {
+ if (sched->msgs[i].data_in)
+ wd_free_blk(pool, sched->msgs[i].data_in);
+ if (sched->msgs[i].data_out)
+ wd_free_blk(pool, sched->msgs[i].data_out);
+ }
+ }
+ }
+}
+
+static int __init_cache(struct wd_scheduler *sched)
{
- int ret, j, k;
- unsigned int flags;
struct q_info *qinfo;
+ unsigned int flags;
+ int ret = -ENOMEM;
+ int i, j;
+ void *pool;
- ret = wd_sched_preinit(sched);
- if (ret < 0)
- return -EINVAL;
+ sched->msgs = calloc(sched->msg_cache_num, sizeof(*sched->msgs));
+ if (!sched->msgs) {
+ WD_ERR("calloc for sched->msgs fail!\n");
+ return ret;
+ }
+ sched->stat = calloc(sched->q_num, sizeof(*sched->stat));
+ if (!sched->stat) {
+ WD_ERR("calloc for sched->stat fail!\n");
+ goto err_with_msgs;
+ }
qinfo = sched->qs[0].qinfo;
+ pool = qinfo->br.usr;
flags = qinfo->dev_flags;
- if (!(flags & WD_UACCE_DEV_PASID)) {
- for (k = 1; k < sched->q_num; k++) {
- ret = wd_share_reserved_memory(&sched->qs[0],
- &sched->qs[k]);
- if (ret) {
- WD_ERR("fail to share queue reserved mem!\n");
- goto out_with_queues;
+ if ((flags & WD_UACCE_DEV_PASID))
+ return 0;
+
+ for (i = 0; i < sched->q_num; i++) {
+ qinfo = sched->qs[i].qinfo;
+ pool = qinfo->br.usr;
+ for (j = 0; j < sched->msg_cache_num; j++) {
+ sched->msgs[j].data_in = wd_alloc_blk(pool);
+ sched->msgs[j].data_out = wd_alloc_blk(pool);
+ if (!sched->msgs[j].data_in || !sched->msgs[j].data_out) {
+ dbg("not enough data ss_region memory "
+ "for cache %d (bs=%d)\n", j, sched->msg_data_size);
+ goto err_with_alloc;
}
+
+ if (sched->init_cache)
+ sched->init_cache(sched, j);
}
}
- sched->cl = sched->msg_cache_num;
+ return 0;
+
+err_with_alloc:
+ free(sched->stat);
+ sched->stat = NULL;
+ __fini_cache(sched);
+err_with_msgs:
+ if (sched->msgs) {
+ free(sched->msgs);
+ sched->msgs = NULL;
+ }
+ return ret;
+}
+
+int wd_sched_init(struct wd_scheduler *sched)
+{
+ int ret;
+ ret = wd_sched_preinit(sched);
+ if (ret < 0)
+ return -EINVAL;
+
+ sched->cl = sched->msg_cache_num;
ret = __init_cache(sched);
if (ret) {
WD_ERR("fail to init caches!\n");
- goto out_with_queues;
+ wd_sched_pre_uninit(sched);
+ return -EINVAL;
}
return 0;
-
-out_with_queues:
- if (flags & WD_UACCE_DEV_PASID) {
- if (sched->ss_region) {
- free(sched->ss_region);
- sched->ss_region = NULL;
- }
- }
- for (j = sched->q_num - 1; j >= 0; j--)
- wd_release_queue(&sched->qs[j]);
- return ret;
}
void wd_sched_fini(struct wd_scheduler *sched)
{
- int i;
- struct q_info *qinfo = sched->qs[0].qinfo;
- unsigned int flags = qinfo->dev_flags;
-
__fini_cache(sched);
- if (flags & WD_UACCE_DEV_PASID) {
- if (sched->ss_region) {
- free(sched->ss_region);
- sched->ss_region = NULL;
- }
- }
-
- for (i = sched->q_num - 1; i >= 0; i--)
- wd_release_queue(&sched->qs[i]);
+ wd_sched_pre_uninit(sched);
}
static int __sync_send(struct wd_scheduler *sched)
--
2.33.0
1
0
13 Jun '25
From: Longfang Liu <liulongfang(a)huawei.com>
After removing the shared queue memory allocation interface, the UADK
test tools must also eliminate the shared memory functionality.
For individual memory reservations, the wd_reserve_memory interface
should be used. When allocating memory for multiple queues, each queue
should independently request its own reserved memory allocation.
Signed-off-by: Longfang Liu <liulongfang(a)huawei.com>
Signed-off-by: Qi Tao <taoqi10(a)huawei.com>
---
v1/test/hisi_hpre_test/hpre_test_tools.c | 392 -----------------------
v1/test/hisi_zip_test_sgl/wd_sched_sgl.c | 310 +++++++++---------
v1/test/test_mm/test_wd_mem.c | 8 +-
v1/test/wd_sched.c | 247 +++++++-------
4 files changed, 300 insertions(+), 657 deletions(-)
diff --git a/v1/test/hisi_hpre_test/hpre_test_tools.c b/v1/test/hisi_hpre_test/hpre_test_tools.c
index 7f562f34..10a4ade9 100755
--- a/v1/test/hisi_hpre_test/hpre_test_tools.c
+++ b/v1/test/hisi_hpre_test/hpre_test_tools.c
@@ -644,317 +644,6 @@ int application_release_multiple_queue(char *dev, char *alg_type, unsigned int q
printf("application_release_multiple_queue test end!\n");
return 0;
}
-
-/***
-
-***/
-int hpre_dev_queue_share(char *dev, char * share_dev, char *alg_type, unsigned long m_size)
-{
- void *addr=NULL;
- int ret = 0;
- struct wd_queue q;
- struct wd_queue target_q;
- unsigned long memory_size;
-
- memset((void *)&q, 0, sizeof(q));
- q.capa.alg = alg_type;
- snprintf(q.dev_path, sizeof(q.dev_path), "%s", dev);
- printf("queue path:%s\n", q.dev_path);
-
- ret = wd_request_queue(&q);
- if(ret)
- {
- printf("wd request queue fail!\n");
- return 1;
- }
- printf("wd request queue success!\n");
- memory_size = m_size;
- addr = wd_reserve_memory(&q, memory_size);
- if(!addr)
- {
- wd_release_queue(&q);
- printf("wd reserve memory fail!\n");
- return 1;
- }
- printf("wd reserve memory success!\n");
- memset(addr, 0, memory_size);
-
- memset((void *)&target_q, 0, sizeof(target_q));
- target_q.capa.alg = alg_type;
- snprintf(target_q.dev_path, sizeof(target_q.dev_path), "%s", share_dev);
- printf("target queue path:%s\n", target_q.dev_path);
-
- ret = wd_request_queue(&target_q);
- if(ret)
- {
- wd_release_queue(&q);
- printf("wd request target_q queue fail!\n");
- return 1;
- }
- printf("wd request target_q queue success!\n");
- //target_q队列共享q队列预留内存;
- ret = wd_share_reserved_memory(&q, &target_q);
- if(ret)
- {
- wd_release_queue(&q);
- wd_release_queue(&target_q);
- printf("wd target_q queue share reserved memory fail!\n");
- return 1;
- }
- printf("wd target_q queue share reserved memory success!\n");
- wd_release_queue(&target_q);
- wd_release_queue(&q);
-
- return 0;
-}
-/***
-
-***/
-int hpre_node_queue_share(char *dev, unsigned int node, unsigned int share_node, char *alg_type, unsigned long m_size)
-{
- void *addr=NULL;
- int ret = 0;
- struct wd_queue q;
- struct wd_queue target_q;
- unsigned long memory_size;
-
- memset((void *)&q, 0, sizeof(q));
- q.capa.alg = alg_type;
- snprintf(q.dev_path, sizeof(q.dev_path), "%s", dev);
- printf("queue path:%s\n", q.dev_path);
- q.node_mask = node;
-
- ret = wd_request_queue(&q);
- if(ret)
- {
- printf("wd request queue fail!\n");
- return 1;
- }
- printf("wd request queue success!\n");
- memory_size = m_size;
- addr = wd_reserve_memory(&q, memory_size);
- if(!addr)
- {
- wd_release_queue(&q);
- printf("wd reserve memory fail!\n");
- return 1;
- }
- printf("wd reserve memory success!\n");
- memset(addr, 0, memory_size);
-
- memset((void *)&target_q, 0, sizeof(target_q));
- target_q.capa.alg = alg_type;
- target_q.node_mask = node;
-
- ret = wd_request_queue(&target_q);
- if(ret)
- {
- wd_release_queue(&q);
- printf("wd request target_q queue fail!\n");
- return 1;
- }
- printf("wd request target_q queue success!\n");
- //target_q队列共享q队列预留内存;
- ret = do_dh(&q);
- if(ret)
- {
- printf("do dh on q fail!\n");
- return 1;
- }
- ret = do_dh(&target_q);
- if(ret)
- {
- printf("do dh on target q fail!\n");
- return 1;
- }
-
- ret = wd_share_reserved_memory(&q, &target_q);
-
- if(ret)
- {
- wd_release_queue(&q);
- wd_release_queue(&target_q);
- printf("wd target_q queue share reserved memory fail!\n");
- return 1;
- }
- printf("wd target_q queue share reserved memory success!\n");
- ret = do_dh(&q);
- if(ret)
- {
- printf("do dh on share q fail!\n");
- return 1;
- }
- ret = do_dh(&target_q);
- if(ret)
- {
- printf("do dh on share target q fail!\n");
- return 1;
- }
-
- wd_release_queue(&target_q);
- wd_release_queue(&q);
-
- return 0;
-}
-/***
-
-***/
-int hpre_dev_queue_interact_share(char *dev, char * share_dev, char *alg_type, unsigned long m_size)
-{
- void *addr=NULL;
- int ret = 0;
- struct wd_queue q;
- struct wd_queue target_q;
- unsigned long memory_size;
-
- memset((void *)&q, 0, sizeof(q));
- q.capa.alg = alg_type;
- snprintf(q.dev_path, sizeof(q.dev_path), "%s", dev);
- printf("queue path:%s\n", q.dev_path);
-
- ret = wd_request_queue(&q);
- if(ret)
- {
- printf("wd request queue fail!\n");
- return ret;
- }
- printf("wd request queue success!\n");
- memory_size = m_size;
- addr = wd_reserve_memory(&q, memory_size);
- if(!addr)
- {
- wd_release_queue(&q);
- printf("wd reserve memory fail!\n");
- return 1;
- }
- printf("wd reserve memory success!\n");
- memset(addr, 0, memory_size);
-
- memset((void *)&target_q, 0, sizeof(target_q));
- target_q.capa.alg = alg_type;
- snprintf(target_q.dev_path, sizeof(target_q.dev_path), "%s", share_dev);
- printf("target queue path:%s\n", target_q.dev_path);
-
- ret = wd_request_queue(&target_q);
- if(ret)
- {
- wd_release_queue(&q);
- printf("wd request target_q queue fail!\n");
- return 1;
- }
- printf("wd request target_q queue success!\n");
- addr = wd_reserve_memory(&target_q, memory_size);
- if(!addr)
- {
- wd_release_queue(&q);
- wd_release_queue(&target_q);
- printf("wd reserve memory fail!\n");
- return 1;
- }
- printf("wd reserve memory success!\n");
- memset(addr, 0, memory_size);
-
- //target_q
- ret = wd_share_reserved_memory(&q, &target_q);
- if(ret)
- {
- wd_release_queue(&q);
- wd_release_queue(&target_q);
- printf("wd target_q queue share reserved memory fail!\n");
- return 1;
- }
- printf("wd target_q queue share reserved memory success!\n");
-
- wd_release_queue(&target_q);
- wd_release_queue(&q);
-
- return 0;
-}
-
-/***
-
-***/
-int hpre_dev_queue_cross_proc_share(char *dev, char *alg_type, unsigned long m_size)
-{
- void *addr=NULL;
- int ret = 0;
- pid_t pid;
- struct wd_queue q;
- struct wd_queue target_q;
- unsigned long memory_size=0;
-
- pid = fork();
- if(pid < 0)
- {
- printf("Creation process failed, pid:%d\n",pid);
- return 1;
- }
- else if(pid == 0)
- {
- printf("child process:%d\n", pid);
- memset((void *)&q, 0, sizeof(q));
- q.capa.alg = alg_type;
- snprintf(q.dev_path, sizeof(q.dev_path), "%s", dev);
- printf("queue path:%s\n", q.dev_path);
-
- ret = wd_request_queue(&q);
- if(ret)
- {
- printf("request queue fail!\n");
- exit(1);
- }
- printf("wd request queue success!\n");
- memory_size = m_size;
- addr = wd_reserve_memory(&q, memory_size);
- if(!addr)
- {
- wd_release_queue(&q);
- printf("queue reserve memory fail!\n");
- exit(2);
- }
- printf("queue reserve memory success!\n");
- memset(addr, 0, memory_size);
- exit(0);
- }
- printf("parent process:%d\n", pid);
- pid_t wpid;
- int status = -1;
- wpid = waitpid(pid, &status, WUNTRACED | WCONTINUED);
- if( wpid < 0)
- {
- printf("exited, status=%d\n", WEXITSTATUS(status));
- return(status);
- }
-
- memset((void *)&target_q, 0, sizeof(target_q));
- target_q.capa.alg = alg_type;
- snprintf(target_q.dev_path, sizeof(target_q.dev_path), "%s", dev);
- printf("target queue path:%s\n", target_q.dev_path);
-
- ret = wd_request_queue(&target_q);
- if(ret)
- {
- wd_release_queue(&q);
- printf("wd request target_q queue fail!\n");
- return 1;
- }
- printf("wd request target_q queue success!\n");
- ret = wd_share_reserved_memory(&q, &target_q);
- if(ret)
- {
- wd_release_queue(&target_q);
- wd_release_queue(&q);
- printf("wd target_q queue share reserved memory fail!\n");
- return 1;
- }
- printf("wd target_q queue share reserved memory success!\n");
-
- wd_release_queue(&target_q);
- wd_release_queue(&q);
-
- return 0;
-}
-
/***
***/
@@ -1696,87 +1385,6 @@ int main(int arc, char *argv[])
return 1;
}
}
- else if(!strcmp(argv[1], "queue-share"))
- {
- /***
- argv[2] - 表示算法类型
- argv[3] - 表示申请队列设备
- argv[4] - 表示共享预留内存的设备
- argv[5] - 表示申请队列的预留内存大小
- ***/
- //申请单个队列,预留内存,与其它队列共享预留内存
- snprintf(algorithm_type, sizeof(algorithm_type), "%s", argv[2]);
- snprintf(dev, sizeof(dev), "%s", argv[3]);
- snprintf(share_dev, sizeof(share_dev), "%s", argv[4]);
- memory_size = strtoul(argv[5], NULL, 10);
-
- ret = hpre_dev_queue_share(dev, share_dev, algorithm_type, memory_size);
- if(0 != ret)
- {
- return 1;
- }
- }
- else if(!strcmp(argv[1], "node-queue-share"))
- {
- /***
- argv[2] - 表示算法类型
- argv[3] - 表示申请队列设备
- argv[4] - 表示设备node
- argv[5] - 表示共享内存设备node
- argv[6] - 表示申请队列的预留内存大小
- ***/
- //申请单个队列,预留内存,与其它队列共享预留内存
- snprintf(algorithm_type, sizeof(algorithm_type), "%s", argv[2]);
- snprintf(dev, sizeof(dev), "%s", argv[3]);
- unsigned int node=0;
- node = strtoul(argv[4], NULL, 16);
- unsigned int share_node=0;
- share_node = strtoul(argv[5], NULL, 16);
- memory_size = strtoul(argv[6], NULL, 10);
-
- ret = hpre_node_queue_share(dev, node, share_node, algorithm_type, memory_size);
- if(0 != ret)
- {
- return 1;
- }
- }
- else if(!strcmp(argv[1], "queue-interact-share"))
- {
- /***
- argv[2] - 表示算法类型
- argv[3] - 表示申请队列设备
- argv[4] - 表示共享预留内存的设备
- argv[5] - 表示申请队列的预留内存大小
- ***/
- //队列预留内存后作为共享的目标队列
- snprintf(algorithm_type, sizeof(algorithm_type), "%s", argv[2]);
- snprintf(dev, sizeof(dev), "%s", argv[3]);
- snprintf(share_dev, sizeof(share_dev), "%s", argv[4]);
- memory_size = strtoul(argv[5], NULL, 10);
-
- ret = hpre_dev_queue_interact_share(dev, share_dev, algorithm_type, memory_size);
- if(0 != ret)
- {
- return 1;
- }
- }
- else if(!strcmp(argv[1], "queue-cross-proc-share"))
- {
- /***
- argv[2] - 表示算法类型
- argv[3] - 表示申请队列设备
- argv[4] - 表示申请队列的预留内存大小
- ***/
- //跨进程进行队列共享
- snprintf(algorithm_type, sizeof(algorithm_type), "%s", argv[2]);
- snprintf(dev, sizeof(dev), "%s", argv[3]);
- memory_size = strtoul(argv[4], NULL, 10);
- ret = hpre_dev_queue_cross_proc_share(dev, algorithm_type, memory_size);
- if(0 != ret)
- {
- return 1;
- }
- }
else if(!strcmp(argv[1], "mult-thread-queue"))
{
/***
diff --git a/v1/test/hisi_zip_test_sgl/wd_sched_sgl.c b/v1/test/hisi_zip_test_sgl/wd_sched_sgl.c
index 31637565..7a3be22c 100644
--- a/v1/test/hisi_zip_test_sgl/wd_sched_sgl.c
+++ b/v1/test/hisi_zip_test_sgl/wd_sched_sgl.c
@@ -23,96 +23,33 @@
#define EXTRA_SIZE 4096
#define WD_WAIT_MS 1000
-static int __init_cache(struct wd_scheduler *sched, int data_fmt)
+static int wd_sched_pre_uninit(struct wd_scheduler *sched, int data_fmt)
{
- int i;
- int ret = -ENOMEM;
+ unsigned int flags = 0;
struct q_info *qinfo;
void *pool;
+ int i;
- sched->msgs = calloc(sched->msg_cache_num, sizeof(*sched->msgs));
- if (!sched->msgs) {
- WD_ERR("calloc for sched->msgs fail!\n");
- return ret;
- }
- sched->stat = calloc(sched->q_num, sizeof(*sched->stat));
- if (!sched->stat) {
- WD_ERR("calloc for sched->stat fail!\n");
- goto err_with_msgs;
- }
qinfo = sched->qs[0].qinfo;
- pool = qinfo->br.usr;
- for (i = 0; i < sched->msg_cache_num; i++) {
- if (data_fmt == WD_FLAT_BUF) { /* use pbuffer */
- sched->msgs[i].data_in = wd_alloc_blk(pool);
- sched->msgs[i].data_out = wd_alloc_blk(pool);
- if (!sched->msgs[i].data_in || !sched->msgs[i].data_out) {
- dbg("not enough data ss_region memory "
- "for cache %d (bs=%d)\n", i, sched->msg_data_size);
- goto err_with_stat;
- }
- } else { /* use sgl */
- sched->msgs[i].data_in = wd_alloc_sgl(pool, sched->msg_data_size);
- sched->msgs[i].data_out = wd_alloc_sgl(pool, sched->msg_data_size);
- if (!sched->msgs[i].data_in || !sched->msgs[i].data_out) {
- dbg("not enough data ss_region memory "
- "for cache %d (bs=%d)\n", i, sched->msg_data_size);
- goto err_with_stat;
- }
+ flags = qinfo->dev_flags;
+ if (flags & WD_UACCE_DEV_PASID) {
+ if (sched->ss_region) {
+ free(sched->ss_region);
+ sched->ss_region = NULL;
}
+ return 0;
+ }
- if (sched->init_cache)
- sched->init_cache(sched, i, data_fmt);
+ for (i = 0; i < sched->q_num; i++) {
+ wd_release_queue(&sched->qs[i]);
+ qinfo = sched->qs[i].qinfo;
+ if (data_fmt == WD_FLAT_BUF)
+ wd_blkpool_destroy(qinfo->br.usr);
+ else
+ wd_sglpool_destroy(qinfo->br.usr);
}
return 0;
-
-err_with_stat:
- free(sched->stat);
- sched->stat = NULL;
-err_with_msgs:
- free(sched->msgs);
- sched->msgs = NULL;
- return ret;
-}
-
-static void __fini_cache(struct wd_scheduler *sched, int data_fmt)
-{
- struct q_info *qinfo = sched->qs[0].qinfo;
- unsigned int flags = qinfo->dev_flags;
- void *pool;
- int i;
-
- if (sched->stat) {
- free(sched->stat);
- sched->stat = NULL;
- }
- if (!(flags & WD_UACCE_DEV_PASID)) {
- pool = qinfo->br.usr;
- if (pool) {
- if (data_fmt == WD_FLAT_BUF) { /* use pbuffer */
- for (i = 0; i < sched->msg_cache_num; i++) {
- if (sched->msgs[i].data_in)
- wd_free_blk(pool, sched->msgs[i].data_in);
- if (sched->msgs[i].data_out)
- wd_free_blk(pool, sched->msgs[i].data_out);
- }
- wd_blkpool_destroy(pool);
- } else { /* use sgl */
- for (i = 0; i < sched->msg_cache_num; i++) {
- if (sched->msgs[i].data_in)
- wd_free_sgl(pool, sched->msgs[i].data_in);
- if (sched->msgs[i].data_out)
- wd_free_sgl(pool, sched->msgs[i].data_out);
- }
- wd_sglpool_destroy(pool);
- }
- }
- }
- if (sched->msgs) {
- free(sched->msgs);
- sched->msgs = NULL;
- }
}
static int wd_sched_preinit(struct wd_scheduler *sched, int data_fmt)
@@ -124,14 +61,6 @@ static int wd_sched_preinit(struct wd_scheduler *sched, int data_fmt)
struct wd_sglpool_setup sp;
void *pool;
- for (i = 0; i < sched->q_num; i++) {
- ret = wd_request_queue(&sched->qs[i]);
- if (ret) {
- WD_ERR("fail to request queue!\n");
- goto out_with_queues;
- }
- }
-
if (!sched->ss_region_size)
sched->ss_region_size = EXTRA_SIZE + /* add 1 page extra */
sched->msg_cache_num * (sched->msg_data_size << 0x1);
@@ -145,12 +74,22 @@ static int wd_sched_preinit(struct wd_scheduler *sched, int data_fmt)
ret = -ENOMEM;
goto out_with_queues;
}
- } else {
- if (data_fmt == WD_FLAT_BUF) { /* use pbuffer*/
- memset(&mm_setup, 0, sizeof(mm_setup));
- mm_setup.block_size = sched->msg_data_size;
- mm_setup.block_num = sched->msg_cache_num << 0x1; /* in and out */
- mm_setup.align_size = 128;
+ return 0;
+ }
+
+ if (data_fmt == WD_FLAT_BUF) { /* use pbuffer*/
+ memset(&mm_setup, 0, sizeof(mm_setup));
+ mm_setup.block_size = sched->msg_data_size;
+ mm_setup.block_num = sched->msg_cache_num << 0x1; /* in and out */
+ mm_setup.align_size = 128;
+ for (i = 0; i < sched->q_num; i++) {
+ ret = wd_request_queue(&sched->qs[i]);
+ if (ret) {
+ WD_ERR("fail to request queue!\n");
+ goto out_with_queues;
+ }
+
+ qinfo = sched->qs[i].qinfo;
pool = wd_blkpool_create(&sched->qs[0], &mm_setup);
if (!pool) {
WD_ERR("%s(): create pool fail!\n", __func__);
@@ -162,15 +101,18 @@ static int wd_sched_preinit(struct wd_scheduler *sched, int data_fmt)
qinfo->br.iova_map = (void *)wd_blk_iova_map;
qinfo->br.iova_unmap = (void *)wd_blk_iova_unmap;
qinfo->br.usr = pool;
- } else { /* use sgl*/
- memset(&sp, 0, sizeof(sp));
- sp.buf_size = sched->msg_data_size / 10;
- sp.align_size = 64;
- sp.sge_num_in_sgl = 60;
- sp.buf_num_in_sgl = sp.sge_num_in_sgl;
- sp.sgl_num = 3 * sched->msg_cache_num;
- sp.buf_num = sp.buf_num_in_sgl * sp.sgl_num + sp.sgl_num * 2;
-
+ }
+ } else { /* use sgl*/
+ memset(&sp, 0, sizeof(sp));
+ sp.buf_size = sched->msg_data_size / 10;
+ sp.align_size = 64;
+ sp.sge_num_in_sgl = 60;
+ sp.buf_num_in_sgl = sp.sge_num_in_sgl;
+ sp.sgl_num = 3 * sched->msg_cache_num;
+ sp.buf_num = sp.buf_num_in_sgl * sp.sgl_num + sp.sgl_num * 2;
+
+ for (i = 0; i < sched->q_num; i++) {
+ qinfo = sched->qs[i].qinfo;
pool = wd_sglpool_create(&sched->qs[0], &sp);
if (!pool) {
WD_ERR("%s(): create pool fail!\n", __func__);
@@ -189,17 +131,132 @@ static int wd_sched_preinit(struct wd_scheduler *sched, int data_fmt)
return 0;
out_with_queues:
+ for (j = i-1; j >= 0; j--) {
+ wd_release_queue(&sched->qs[j]);
+ qinfo = sched->qs[j].qinfo;
+ if (data_fmt == WD_FLAT_BUF)
+ wd_blkpool_destroy(qinfo->br.usr);
+ else
+ wd_sglpool_destroy(qinfo->br.usr);
+ }
+
if (flags & WD_UACCE_DEV_PASID) {
if (sched->ss_region) {
free(sched->ss_region);
sched->ss_region = NULL;
}
}
- for (j = i-1; j >= 0; j--)
- wd_release_queue(&sched->qs[j]);
+
return ret;
}
+static void __fini_cache(struct wd_scheduler *sched, int data_fmt)
+{
+ struct q_info *qinfo = sched->qs[0].qinfo;
+ unsigned int flags = qinfo->dev_flags;
+ void *pool;
+ int i, j;
+
+ if (sched->stat) {
+ free(sched->stat);
+ sched->stat = NULL;
+ }
+
+ if (sched->msgs) {
+ free(sched->msgs);
+ sched->msgs = NULL;
+ }
+
+ if (!(flags & WD_UACCE_DEV_PASID)) {
+ for (j = 0; j < sched->q_num; j++) {
+ qinfo = sched->qs[j].qinfo;
+ pool = qinfo->br.usr;
+ if (!pool)
+ continue;
+
+ if (data_fmt == WD_FLAT_BUF) { /* use pbuffer */
+ for (i = 0; i < sched->msg_cache_num; i++) {
+ if (sched->msgs[i].data_in)
+ wd_free_blk(pool, sched->msgs[i].data_in);
+ if (sched->msgs[i].data_out)
+ wd_free_blk(pool, sched->msgs[i].data_out);
+ }
+ } else { /* use sgl */
+ for (i = 0; i < sched->msg_cache_num; i++) {
+ if (sched->msgs[i].data_in)
+ wd_free_sgl(pool, sched->msgs[i].data_in);
+ if (sched->msgs[i].data_out)
+ wd_free_sgl(pool, sched->msgs[i].data_out);
+ }
+ }
+ }
+ }
+}
+
+static int __init_cache(struct wd_scheduler *sched, int data_fmt)
+{
+ struct q_info *qinfo;
+ unsigned int flags;
+ int ret = -ENOMEM;
+ int i, j;
+ void *pool;
+
+ sched->msgs = calloc(sched->msg_cache_num, sizeof(*sched->msgs));
+ if (!sched->msgs) {
+ WD_ERR("calloc for sched->msgs fail!\n");
+ return ret;
+ }
+ sched->stat = calloc(sched->q_num, sizeof(*sched->stat));
+ if (!sched->stat) {
+ WD_ERR("calloc for sched->stat fail!\n");
+ goto err_with_msgs;
+ }
+ qinfo = sched->qs[0].qinfo;
+ pool = qinfo->br.usr;
+ flags = qinfo->dev_flags;
+ if ((flags & WD_UACCE_DEV_PASID))
+ return 0;
+
+ for (i = 0; i < sched->q_num; i++) {
+ qinfo = sched->qs[i].qinfo;
+ pool = qinfo->br.usr;
+ for (j = 0; j < sched->msg_cache_num; j++) {
+ if (data_fmt == WD_FLAT_BUF) { /* use pbuffer */
+ sched->msgs[j].data_in = wd_alloc_blk(pool);
+ sched->msgs[j].data_out = wd_alloc_blk(pool);
+ if (!sched->msgs[j].data_in || !sched->msgs[j].data_out) {
+ dbg("not enough data ss_region memory "
+ "for cache %d (bs=%d)\n", j, sched->msg_data_size);
+ goto err_with_stat;
+ }
+ } else { /* use sgl */
+ sched->msgs[j].data_in = wd_alloc_sgl(pool, sched->msg_data_size);
+ sched->msgs[j].data_out = wd_alloc_sgl(pool, sched->msg_data_size);
+ if (!sched->msgs[j].data_in || !sched->msgs[j].data_out) {
+ dbg("not enough data ss_region memory "
+ "for cache %d (bs=%d)\n", j, sched->msg_data_size);
+ goto err_with_stat;
+ }
+ }
+
+ if (sched->init_cache)
+ sched->init_cache(sched, j, data_fmt);
+ }
+ }
+
+ return 0;
+
+err_with_stat:
+ free(sched->stat);
+ sched->stat = NULL;
+ __fini_cache(sched, data_fmt);
+err_with_msgs:
+ if (sched->msgs) {
+ free(sched->msgs);
+ sched->msgs = NULL;
+ }
+ return ret;
+}
int wd_sched_init(struct wd_scheduler *sched, int data_fmt)
{
@@ -211,57 +268,22 @@ int wd_sched_init(struct wd_scheduler *sched, int data_fmt)
if (ret < 0)
return -EINVAL;
- qinfo = sched->qs[0].qinfo;
- flags = qinfo->dev_flags;
- if (!(flags & WD_UACCE_DEV_PASID)) {
- for (k = 1; k < sched->q_num; k++) {
- ret = wd_share_reserved_memory(&sched->qs[0],
- &sched->qs[k]);
- if (ret) {
- WD_ERR("fail to share queue reserved mem!\n");
- goto out_with_queues;
- }
- }
- }
-
sched->cl = sched->msg_cache_num;
ret = __init_cache(sched, data_fmt);
if (ret) {
WD_ERR("fail to init caches!\n");
- goto out_with_queues;
+ wd_sched_pre_uninit(sched, data_fmt);
+ return -EINVAL;
}
return 0;
-
-out_with_queues:
- if (flags & WD_UACCE_DEV_PASID) {
- if (sched->ss_region) {
- free(sched->ss_region);
- sched->ss_region = NULL;
- }
- }
- for (j = sched->q_num - 1; j >= 0; j--)
- wd_release_queue(&sched->qs[j]);
- return ret;
}
void wd_sched_fini(struct wd_scheduler *sched, int data_fmt)
{
- int i;
- struct q_info *qinfo = sched->qs[0].qinfo;
- unsigned int flags = qinfo->dev_flags;
-
__fini_cache(sched, data_fmt);
- if (flags & WD_UACCE_DEV_PASID) {
- if (sched->ss_region) {
- free(sched->ss_region);
- sched->ss_region = NULL;
- }
- }
-
- for (i = sched->q_num - 1; i >= 0; i--)
- wd_release_queue(&sched->qs[i]);
+ wd_sched_pre_uninit(sched, data_fmt);
}
static int __sync_send(struct wd_scheduler *sched)
@@ -350,4 +372,4 @@ int wd_sched_work(struct wd_scheduler *sched, int remained)
}
return sched->cl;
-}
\ No newline at end of file
+}
diff --git a/v1/test/test_mm/test_wd_mem.c b/v1/test/test_mm/test_wd_mem.c
index 09824b99..e2eec60e 100644
--- a/v1/test/test_mm/test_wd_mem.c
+++ b/v1/test/test_mm/test_wd_mem.c
@@ -208,10 +208,10 @@ void *mmt_sys_test_thread(void *data)
return NULL;
}
- ret = wd_share_reserved_memory(pdata->qinfo1.q, &rsa_q);
+ ret = wd_request_queue(&pdata->qinfo1.q);
if (ret) {
wd_release_queue(&rsa_q);
- MMT_PRT("Proc-%d, thrd-%d:share mem on rsa queue fail!\n",
+ MMT_PRT("Proc-%d, thrd-%d:rsa queue fail!\n",
pid, thread_id);
return NULL;
}
@@ -226,9 +226,9 @@ void *mmt_sys_test_thread(void *data)
return NULL;
}
- ret = wd_share_reserved_memory(pdata->qinfo2.q, &zlib_q);
+ ret = wd_request_queue(&pdata->qinfo2.q);
if (ret) {
- MMT_PRT("Proc-%d, thrd-%d:share mem on zlib queue fail!\n",
+ MMT_PRT("Proc-%d, thrd-%d:zlib queue fail!\n",
pid, thread_id);
goto fail_release;
diff --git a/v1/test/wd_sched.c b/v1/test/wd_sched.c
index f5e46699..ce1d2604 100644
--- a/v1/test/wd_sched.c
+++ b/v1/test/wd_sched.c
@@ -22,94 +22,40 @@
#define EXTRA_SIZE 4096
#define WD_WAIT_MS 1000
-static int __init_cache(struct wd_scheduler *sched)
+static int wd_sched_pre_uninit(struct wd_scheduler *sched)
{
- int i;
- int ret = -ENOMEM;
+ unsigned int flags = 0;
struct q_info *qinfo;
void *pool;
+ int i;
- sched->msgs = calloc(sched->msg_cache_num, sizeof(*sched->msgs));
- if (!sched->msgs) {
- WD_ERR("calloc for sched->msgs fail!\n");
- return ret;
- }
- sched->stat = calloc(sched->q_num, sizeof(*sched->stat));
- if (!sched->stat) {
- WD_ERR("calloc for sched->stat fail!\n");
- goto err_with_msgs;
- }
qinfo = sched->qs[0].qinfo;
- pool = qinfo->br.usr;
- for (i = 0; i < sched->msg_cache_num; i++) {
- sched->msgs[i].data_in = wd_alloc_blk(pool);
- sched->msgs[i].data_out = wd_alloc_blk(pool);
- if (!sched->msgs[i].data_in || !sched->msgs[i].data_out) {
- dbg("not enough data ss_region memory "
- "for cache %d (bs=%d)\n", i, sched->msg_data_size);
- goto err_with_stat;
+ flags = qinfo->dev_flags;
+ if (flags & WD_UACCE_DEV_PASID) {
+ if (sched->ss_region) {
+ free(sched->ss_region);
+ sched->ss_region = NULL;
}
+ return 0;
+ }
- if (sched->init_cache)
- sched->init_cache(sched, i);
+ for (i = 0; i < sched->q_num; i++) {
+ wd_release_queue(&sched->qs[i]);
+ qinfo = sched->qs[i].qinfo;
+ wd_blkpool_destroy(qinfo->br.usr);
}
return 0;
-
-err_with_stat:
- free(sched->stat);
- sched->stat = NULL;
-err_with_msgs:
- free(sched->msgs);
- sched->msgs = NULL;
- return ret;
-}
-
-static void __fini_cache(struct wd_scheduler *sched)
-{
- struct q_info *qinfo = sched->qs[0].qinfo;
- unsigned int flags = qinfo->dev_flags;
- void *pool;
- int i;
-
- if (sched->stat) {
- free(sched->stat);
- sched->stat = NULL;
- }
- if (!(flags & WD_UACCE_DEV_PASID)) {
- pool = qinfo->br.usr;
- if (pool) {
- for (i = 0; i < sched->msg_cache_num; i++) {
- if (sched->msgs[i].data_in)
- wd_free_blk(pool, sched->msgs[i].data_in);
- if (sched->msgs[i].data_out)
- wd_free_blk(pool, sched->msgs[i].data_out);
- }
- wd_blkpool_destroy(pool);
- }
- }
- if (sched->msgs) {
- free(sched->msgs);
- sched->msgs = NULL;
- }
}
static int wd_sched_preinit(struct wd_scheduler *sched)
{
- int ret, i, j;
+ struct wd_blkpool_setup mm_setup;
unsigned int flags = 0;
struct q_info *qinfo;
- struct wd_blkpool_setup mm_setup;
+ int ret, i, j;
void *pool;
- for (i = 0; i < sched->q_num; i++) {
- ret = wd_request_queue(&sched->qs[i]);
- if (ret) {
- WD_ERR("fail to request queue!\n");
- goto out_with_queues;
- }
- }
-
if (!sched->ss_region_size)
sched->ss_region_size = EXTRA_SIZE + /* add 1 page extra */
sched->msg_cache_num * (sched->msg_data_size << 0x1);
@@ -120,18 +66,29 @@ static int wd_sched_preinit(struct wd_scheduler *sched)
sched->ss_region = malloc(sched->ss_region_size);
if (!sched->ss_region) {
WD_ERR("fail to alloc sched ss region mem!\n");
+ return -ENOMEM;
+ }
+ return 0;
+ }
+
+ memset(&mm_setup, 0, sizeof(mm_setup));
+ mm_setup.block_size = sched->msg_data_size;
+ mm_setup.block_num = sched->msg_cache_num << 0x1; /* in and out */
+ mm_setup.align_size = 128;
+ for (i = 0; i < sched->q_num; i++) {
+ ret = wd_request_queue(&sched->qs[i]);
+ if (ret) {
+ WD_ERR("fail to request queue!\n");
ret = -ENOMEM;
goto out_with_queues;
}
- } else {
- memset(&mm_setup, 0, sizeof(mm_setup));
- mm_setup.block_size = sched->msg_data_size;
- mm_setup.block_num = sched->msg_cache_num << 0x1; /* in and out */
- mm_setup.align_size = 128;
- pool = wd_blkpool_create(&sched->qs[0], &mm_setup);
+
+ qinfo = sched->qs[i].qinfo;
+ pool = wd_blkpool_create(&sched->qs[i], &mm_setup);
if (!pool) {
WD_ERR("%s(): create pool fail!\n", __func__);
ret = -ENOMEM;
+ wd_release_queue(&sched->qs[i]);
goto out_with_queues;
}
qinfo->br.alloc = (void *)wd_alloc_blk;
@@ -144,79 +101,135 @@ static int wd_sched_preinit(struct wd_scheduler *sched)
return 0;
out_with_queues:
+ for (j = i-1; j >= 0; j--) {
+ wd_release_queue(&sched->qs[j]);
+ qinfo = sched->qs[j].qinfo;
+ wd_blkpool_destroy(qinfo->br.usr);
+ }
+
if (flags & WD_UACCE_DEV_PASID) {
if (sched->ss_region) {
free(sched->ss_region);
sched->ss_region = NULL;
}
}
- for (j = i-1; j >= 0; j--)
- wd_release_queue(&sched->qs[j]);
+
return ret;
}
+static void __fini_cache(struct wd_scheduler *sched)
+{
+ struct q_info *qinfo = sched->qs[0].qinfo;
+ unsigned int flags = qinfo->dev_flags;
+ void *pool;
+ int i, j;
-int wd_sched_init(struct wd_scheduler *sched)
+ if (sched->stat) {
+ free(sched->stat);
+ sched->stat = NULL;
+ }
+
+ if (sched->msgs) {
+ free(sched->msgs);
+ sched->msgs = NULL;
+ }
+
+ if (!(flags & WD_UACCE_DEV_PASID)) {
+ for (j = 0; j < sched->q_num; j++) {
+ qinfo = sched->qs[j].qinfo;
+ pool = qinfo->br.usr;
+ if (!pool)
+ continue;
+
+ for (i = 0; i < sched->msg_cache_num; i++) {
+ if (sched->msgs[i].data_in)
+ wd_free_blk(pool, sched->msgs[i].data_in);
+ if (sched->msgs[i].data_out)
+ wd_free_blk(pool, sched->msgs[i].data_out);
+ }
+ }
+ }
+}
+
+static int __init_cache(struct wd_scheduler *sched)
{
- int ret, j, k;
- unsigned int flags;
struct q_info *qinfo;
+ unsigned int flags;
+ int ret = -ENOMEM;
+ int i, j;
+ void *pool;
- ret = wd_sched_preinit(sched);
- if (ret < 0)
- return -EINVAL;
+ sched->msgs = calloc(sched->msg_cache_num, sizeof(*sched->msgs));
+ if (!sched->msgs) {
+ WD_ERR("calloc for sched->msgs fail!\n");
+ return ret;
+ }
+ sched->stat = calloc(sched->q_num, sizeof(*sched->stat));
+ if (!sched->stat) {
+ WD_ERR("calloc for sched->stat fail!\n");
+ goto err_with_msgs;
+ }
qinfo = sched->qs[0].qinfo;
+ pool = qinfo->br.usr;
flags = qinfo->dev_flags;
- if (!(flags & WD_UACCE_DEV_PASID)) {
- for (k = 1; k < sched->q_num; k++) {
- ret = wd_share_reserved_memory(&sched->qs[0],
- &sched->qs[k]);
- if (ret) {
- WD_ERR("fail to share queue reserved mem!\n");
- goto out_with_queues;
+ if ((flags & WD_UACCE_DEV_PASID))
+ return 0;
+
+ for (i = 0; i < sched->q_num; i++) {
+ qinfo = sched->qs[i].qinfo;
+ pool = qinfo->br.usr;
+ for (j = 0; j < sched->msg_cache_num; j++) {
+ sched->msgs[j].data_in = wd_alloc_blk(pool);
+ sched->msgs[j].data_out = wd_alloc_blk(pool);
+ if (!sched->msgs[j].data_in || !sched->msgs[j].data_out) {
+ dbg("not enough data ss_region memory "
+ "for cache %d (bs=%d)\n", j, sched->msg_data_size);
+ goto err_with_alloc;
}
+
+ if (sched->init_cache)
+ sched->init_cache(sched, j);
}
}
- sched->cl = sched->msg_cache_num;
+ return 0;
+
+err_with_alloc:
+ free(sched->stat);
+ sched->stat = NULL;
+ __fini_cache(sched);
+err_with_msgs:
+ if (sched->msgs) {
+ free(sched->msgs);
+ sched->msgs = NULL;
+ }
+ return ret;
+}
+
+int wd_sched_init(struct wd_scheduler *sched)
+{
+ int ret;
+ ret = wd_sched_preinit(sched);
+ if (ret < 0)
+ return -EINVAL;
+
+ sched->cl = sched->msg_cache_num;
ret = __init_cache(sched);
if (ret) {
WD_ERR("fail to init caches!\n");
- goto out_with_queues;
+ wd_sched_pre_uninit(sched);
+ return -EINVAL;
}
return 0;
-
-out_with_queues:
- if (flags & WD_UACCE_DEV_PASID) {
- if (sched->ss_region) {
- free(sched->ss_region);
- sched->ss_region = NULL;
- }
- }
- for (j = sched->q_num - 1; j >= 0; j--)
- wd_release_queue(&sched->qs[j]);
- return ret;
}
void wd_sched_fini(struct wd_scheduler *sched)
{
- int i;
- struct q_info *qinfo = sched->qs[0].qinfo;
- unsigned int flags = qinfo->dev_flags;
-
__fini_cache(sched);
- if (flags & WD_UACCE_DEV_PASID) {
- if (sched->ss_region) {
- free(sched->ss_region);
- sched->ss_region = NULL;
- }
- }
-
- for (i = sched->q_num - 1; i >= 0; i--)
- wd_release_queue(&sched->qs[i]);
+ wd_sched_pre_uninit(sched);
}
static int __sync_send(struct wd_scheduler *sched)
--
2.33.0
1
0
*** BLURB HERE ***
Chenghai Huang (6):
uadk:remove the data consumption modifying when copying cache output
uadk: add the function of parsing the cached tail packets
uadk: add the function of parsing the cached tail packets in v1
uadk: fix the windowbits setting issue in zlibwrapper
uadk: fix gzip windowbits and level setting issue in zlibwrapper
uadk: fix repeated initialization issue
Hao Fang (1):
uadk_tool: hpre: fix for x448/x25519 perf test failed
Longfang Liu (2):
uadk: removal of the Shared Memory Interface for Queues
uadk_tools: Removal of the Shared Memory Interface for Queues
Qi Tao (1):
uadk: fix code cleanup issues
Super User (1):
uadk: the uadk_tool is compiled using the private OpenSSL header file.
Zhiqi Song (1):
uadk/hpre: remove redundant null operation
Zhushuai Yin (3):
Uadk: Fix the DAE memory leak issue.
uadk: The MAX MIN and rehash functions are added to the hash agg.
Uadk: fix hash agg review comments
lizhi (8):
uadk: adapt ECDH for high-performance cores
uadk_tool: adapt ecdh benchmark with secp256r1 curve
uadk/hpre: fix up severel parameter type mismatch issues
uadk: check hardware type to config hpcore field
uadk_tool: update ECC curve in uadk_tool benchmark
uadk:fix some cleanup issues
uadk: fix potential null pointer dereference
uadk_tool: Fix x448 bug caused by wrong nid.
Makefile.am | 2 +
drv/hisi_comp.c | 54 +--
drv/hisi_comp_huf.c | 248 ++++++++++++++
drv/hisi_comp_huf.h | 19 ++
drv/hisi_dae.c | 244 ++++++++++++--
drv/hisi_hpre.c | 108 +++++-
drv/hisi_qm_udrv.h | 3 +-
include/drv/wd_agg_drv.h | 10 +-
include/drv/wd_ecc_drv.h | 10 +
include/wd_agg.h | 9 +-
include/wd_ecc_curve.h | 27 ++
include/wd_util.h | 2 +-
include/wd_zlibwrapper.h | 2 +-
uadk_tool/benchmark/hpre_uadk_benchmark.c | 58 ++--
uadk_tool/benchmark/hpre_wd_benchmark.c | 32 +-
uadk_tool/benchmark/sec_soft_benchmark.c | 20 +-
uadk_tool/benchmark/uadk_benchmark.h | 2 +-
uadk_tool/test/comp_lib.h | 2 +-
v1/drv/hisi_zip_huf.c | 248 ++++++++++++++
v1/drv/hisi_zip_huf.h | 19 ++
v1/drv/hisi_zip_udrv.c | 52 ++-
v1/test/hisi_hpre_test/hpre_test_tools.c | 392 ----------------------
v1/test/hisi_zip_test_sgl/wd_sched_sgl.c | 310 +++++++++--------
v1/test/test_mm/test_wd_mem.c | 8 +-
v1/test/wd_sched.c | 247 +++++++-------
v1/uacce.h | 1 -
v1/wd.c | 57 +---
v1/wd.h | 2 -
wd.c | 4 +-
wd_aead.c | 8 +-
wd_agg.c | 47 +--
wd_cipher.c | 16 +-
wd_comp.c | 6 +-
wd_dh.c | 4 +-
wd_digest.c | 7 +-
wd_ecc.c | 70 +++-
wd_rsa.c | 5 +-
wd_util.c | 6 +-
wd_zlibwrapper.c | 48 +--
39 files changed, 1473 insertions(+), 936 deletions(-)
create mode 100644 drv/hisi_comp_huf.c
create mode 100644 drv/hisi_comp_huf.h
create mode 100644 v1/drv/hisi_zip_huf.c
create mode 100644 v1/drv/hisi_zip_huf.h
--
2.33.0
1
23
您好!
sig-AccLib 邀请您参加 2025-06-11 11:00 召开的Zoom会议
会议主题:【2025/6/11 AccLIb SIG 双周例会 11:00 - 12:00】
会议内容:
会议链接:linaro-org.zoom.us/j/91879279131
会议纪要:etherpad.openeuler.org/p/sig-AccLib-meet
会议链接:https://us06web.zoom.us/j/86361583097?pwd=PIujIKJR3JaIEl4pg3LAdclauyjQyy.1
会议纪要:https://etherpad.openeuler.org/p/sig-AccLib-meetings
更多资讯尽在:https://www.openeuler.org/zh/
Hello!
sig-AccLib invites you to attend the Zoom conference will be held at 2025-06-11 11:00,
The subject of the conference is 【2025/6/11 AccLIb SIG 双周例会 11:00 - 12:00】
Summary:
会议链接:linaro-org.zoom.us/j/91879279131
会议纪要:etherpad.openeuler.org/p/sig-AccLib-meet
You can join the meeting at https://us06web.zoom.us/j/86361583097?pwd=PIujIKJR3JaIEl4pg3LAdclauyjQyy.1
Add topics at https://etherpad.openeuler.org/p/sig-AccLib-meetings
More information: https://www.openeuler.org/en/
1
0
[PATCH 00/10] uadk_engine/provider: fixed static code alarms and code format issues.
by Qi Tao 05 Jun '25
by Qi Tao 05 Jun '25
05 Jun '25
From: JiangShui Yang <yangjiangshui(a)h-partners.com>
Chenghai Huang (4):
uadk_provider: fix some possible issues where empty memory is accessed
uadk_provider: adjust the code style
uadk_provider: fix the static variable problem in the case of multiple
concurrency
uadk_provider: add parameter validation to avoid memory errors
Hao Fang (6):
uadk_engine/provider: tinyfixes for code format alignment
uadk_engine/provider: add blank line before return
uadk_engine/provider: remove the variable redundant initialization
uadk_provider: add const keyword
uadk_provider: remove unused parameters
uadk_engine:sm2: use UADK_E_INVAL replace -1
src/uadk_aead.c | 2 +-
src/uadk_cipher.c | 13 +--
src/uadk_dh.c | 11 +--
src/uadk_digest.c | 24 +++---
src/uadk_ec.c | 6 ++
src/uadk_ecx.c | 4 +-
src/uadk_engine_init.c | 1 +
src/uadk_pkey.c | 9 ++-
src/uadk_pkey.h | 3 +-
src/uadk_prov_aead.c | 3 +-
src/uadk_prov_bio.c | 18 ++---
src/uadk_prov_cipher.c | 46 ++++++-----
src/uadk_prov_der_writer.c | 96 +++++++++++-----------
src/uadk_prov_der_writer.h | 8 +-
src/uadk_prov_dh.c | 89 +++++++++++----------
src/uadk_prov_digest.c | 23 +++---
src/uadk_prov_ec_kmgmt.c | 3 +-
src/uadk_prov_ecdh_exch.c | 21 +++--
src/uadk_prov_ecdsa.c | 8 +-
src/uadk_prov_ecx.c | 53 ++++++------
src/uadk_prov_ffc.c | 160 ++++++++++++++++++++-----------------
src/uadk_prov_ffc.h | 4 +-
src/uadk_prov_hmac.c | 20 ++---
src/uadk_prov_init.c | 10 +--
src/uadk_prov_packet.c | 48 ++++++-----
src/uadk_prov_packet.h | 52 ++++++------
src/uadk_prov_pkey.c | 63 ++++++++-------
src/uadk_prov_rsa.c | 82 +++++++++----------
src/uadk_prov_sm2.c | 135 ++++++++++++++++---------------
src/uadk_rsa.c | 31 +++----
src/uadk_sm2.c | 59 ++++++++------
31 files changed, 594 insertions(+), 511 deletions(-)
--
2.33.0
1
10
From: lizhi <lizhi206(a)huawei.com>
fix memory leak and avoid possible double-free risk in sm2.
Signed-off-by: lizhi <lizhi206(a)huawei.com>
---
src/uadk_sm2.c | 6 ++++--
1 file changed, 4 insertions(+), 2 deletions(-)
diff --git a/src/uadk_sm2.c b/src/uadk_sm2.c
index 170d320..7737292 100644
--- a/src/uadk_sm2.c
+++ b/src/uadk_sm2.c
@@ -407,9 +407,12 @@ static int sign_bin_to_ber(EC_KEY *ec, struct wd_dtb *r, struct wd_dtb *s,
if (sltmp < 0) {
fprintf(stderr, "failed to i2d_ECDSA_SIG\n");
ret = -EINVAL;
- goto free_s;
+ /* bs and br set to e_sig, use unified interface to prevent double release. */
+ goto free_sig;
}
+
*siglen = (size_t)sltmp;
+ ECDSA_SIG_free(e_sig);
return 0;
free_s:
@@ -417,7 +420,6 @@ free_s:
free_r:
BN_clear_free(br);
free_sig:
- ECDSA_SIG_set0(e_sig, NULL, NULL);
ECDSA_SIG_free(e_sig);
return ret;
--
2.33.0
1
1
Fixed the following issues:
1. Remove redundant function return value check.
2. Modify inappropriate exception printing.
3. There is no need to check the return value of
async_wake_job(). Only need to check req->state to
determine if there is an exception in packet reception.
Therefore, add "(void)" before the function call.
4. Free the allocated memory before returning an error.
5. The callback function's parameter list does not match
the function pointer definition.
Signed-off-by: Qi Tao <taoqi10(a)huawei.com>
---
src/uadk_aead.c | 4 ++--
src/uadk_async.c | 2 +-
src/uadk_cipher.c | 6 +-----
src/uadk_dh.c | 2 +-
src/uadk_digest.c | 2 +-
src/uadk_pkey.c | 2 +-
src/uadk_prov_aead.c | 4 ++--
src/uadk_prov_cipher.c | 4 ++--
src/uadk_prov_dh.c | 2 +-
src/uadk_prov_digest.c | 6 +++---
src/uadk_prov_hmac.c | 6 +++---
src/uadk_prov_init.c | 4 +++-
src/uadk_prov_pkey.c | 2 +-
src/uadk_prov_rsa.c | 2 +-
src/uadk_rsa.c | 2 +-
15 files changed, 24 insertions(+), 26 deletions(-)
diff --git a/src/uadk_aead.c b/src/uadk_aead.c
index 1da7753..69223f1 100644
--- a/src/uadk_aead.c
+++ b/src/uadk_aead.c
@@ -469,7 +469,7 @@ static int do_aead_sync_inner(struct aead_priv_ctx *priv, unsigned char *out,
priv->req.state = 0;
ret = wd_do_aead_sync(priv->sess, &priv->req);
if (unlikely(ret < 0 || priv->req.state)) {
- fprintf(stderr, "do aead task failed, msg state: %d, ret: %d, state: %u!\n",
+ fprintf(stderr, "do aead task failed, msg state: %u, ret: %d, state: %u!\n",
state, ret, priv->req.state);
return RET_FAIL;
}
@@ -547,7 +547,7 @@ static void *uadk_e_aead_cb(struct wd_aead_req *req, void *data)
if (op && op->job && !op->done) {
op->done = 1;
async_free_poll_task(op->idx, 1);
- async_wake_job(op->job);
+ (void)async_wake_job(op->job);
}
return NULL;
diff --git a/src/uadk_async.c b/src/uadk_async.c
index 7536bd5..0f92dde 100644
--- a/src/uadk_async.c
+++ b/src/uadk_async.c
@@ -329,7 +329,7 @@ static void *async_poll_process_func(void *args)
if (!poll_queue.is_recv && op->job) {
op->done = 1;
op->ret = ret;
- async_wake_job(op->job);
+ (void)async_wake_job(op->job);
async_free_poll_task(idx, 0);
}
}
diff --git a/src/uadk_cipher.c b/src/uadk_cipher.c
index 457e90d..95af193 100644
--- a/src/uadk_cipher.c
+++ b/src/uadk_cipher.c
@@ -242,10 +242,6 @@ static int uadk_e_cipher_soft_work(EVP_CIPHER_CTX *ctx, unsigned char *out,
*/
if (!priv->update_iv) {
iv = EVP_CIPHER_CTX_iv_noconst(ctx);
- if (unlikely(iv == NULL)) {
- fprintf(stderr, "get openssl software iv failed.\n");
- return 0;
- }
memcpy(iv, priv->iv, EVP_CIPHER_CTX_iv_length(ctx));
priv->update_iv = true;
}
@@ -567,7 +563,7 @@ static void *uadk_e_cipher_cb(struct wd_cipher_req *req, void *data)
if (op && op->job && !op->done) {
op->done = 1;
async_free_poll_task(op->idx, 1);
- async_wake_job(op->job);
+ (void)async_wake_job(op->job);
}
return NULL;
diff --git a/src/uadk_dh.c b/src/uadk_dh.c
index 2f2c1cf..011bf56 100644
--- a/src/uadk_dh.c
+++ b/src/uadk_dh.c
@@ -259,7 +259,7 @@ static void uadk_e_dh_cb(void *req_t)
if (op && op->job && !op->done) {
op->done = 1;
async_free_poll_task(op->idx, 1);
- async_wake_job(op->job);
+ (void)async_wake_job(op->job);
}
}
diff --git a/src/uadk_digest.c b/src/uadk_digest.c
index 6827f98..0b4e8c5 100644
--- a/src/uadk_digest.c
+++ b/src/uadk_digest.c
@@ -802,7 +802,7 @@ static void *uadk_e_digest_cb(void *data)
if (op && op->job && !op->done) {
op->done = 1;
async_free_poll_task(op->idx, 1);
- async_wake_job(op->job);
+ (void)async_wake_job(op->job);
}
return NULL;
diff --git a/src/uadk_pkey.c b/src/uadk_pkey.c
index 1f8234b..e05c7d0 100644
--- a/src/uadk_pkey.c
+++ b/src/uadk_pkey.c
@@ -101,7 +101,7 @@ void uadk_e_ecc_cb(void *req_t)
op->done = 1;
op->ret = 0;
async_free_poll_task(op->idx, 1);
- async_wake_job(op->job);
+ (void)async_wake_job(op->job);
}
}
diff --git a/src/uadk_prov_aead.c b/src/uadk_prov_aead.c
index 54e0115..dbbd844 100644
--- a/src/uadk_prov_aead.c
+++ b/src/uadk_prov_aead.c
@@ -390,7 +390,7 @@ static void *uadk_prov_aead_cb(struct wd_aead_req *req, void *data)
if (op && op->job && !op->done) {
op->done = 1;
async_free_poll_task(op->idx, 1);
- async_wake_job(op->job);
+ (void)async_wake_job(op->job);
}
return NULL;
@@ -800,7 +800,7 @@ static int uadk_get_aead_info(struct aead_priv_ctx *priv)
}
if (unlikely(i == aead_counts)) {
- fprintf(stderr, "failed to setup the private ctx.\n");
+ fprintf(stderr, "failed to get aead info.\n");
return UADK_AEAD_FAIL;
}
diff --git a/src/uadk_prov_cipher.c b/src/uadk_prov_cipher.c
index db4a957..dfe6666 100644
--- a/src/uadk_prov_cipher.c
+++ b/src/uadk_prov_cipher.c
@@ -395,7 +395,7 @@ static int uadk_get_cipher_info(struct cipher_priv_ctx *priv)
}
}
- fprintf(stderr, "failed to setup the private ctx.\n");
+ fprintf(stderr, "failed to get cipher info.\n");
return UADK_P_FAIL;
}
@@ -455,7 +455,7 @@ static void async_cb(struct wd_cipher_req *req, void *data)
if (op && op->job && !op->done) {
op->done = 1;
async_free_poll_task(op->idx, 1);
- async_wake_job(op->job);
+ (void)async_wake_job(op->job);
}
}
diff --git a/src/uadk_prov_dh.c b/src/uadk_prov_dh.c
index c75c386..1cb4b45 100644
--- a/src/uadk_prov_dh.c
+++ b/src/uadk_prov_dh.c
@@ -581,7 +581,7 @@ static void uadk_prov_dh_cb(void *req_t)
if (op && op->job && !op->done) {
op->done = 1;
async_free_poll_task(op->idx, 1);
- async_wake_job(op->job);
+ (void)async_wake_job(op->job);
}
}
diff --git a/src/uadk_prov_digest.c b/src/uadk_prov_digest.c
index 7a5bfff..a13d075 100644
--- a/src/uadk_prov_digest.c
+++ b/src/uadk_prov_digest.c
@@ -303,7 +303,7 @@ static int uadk_get_digest_info(struct digest_priv_ctx *priv)
}
if (unlikely(i == digest_counts)) {
- fprintf(stderr, "failed to setup the private ctx.\n");
+ fprintf(stderr, "failed to digest info.\n");
return UADK_DIGEST_FAIL;
}
@@ -534,7 +534,7 @@ soft_update:
return uadk_digest_soft_update(priv, data, data_len);
}
-static void uadk_async_cb(struct wd_digest_req *req, void *data)
+static void uadk_async_cb(struct wd_digest_req *req)
{
struct uadk_e_cb_info *digest_cb_param;
struct wd_digest_req *req_origin;
@@ -550,7 +550,7 @@ static void uadk_async_cb(struct wd_digest_req *req, void *data)
if (op && op->job && !op->done) {
op->done = 1;
async_free_poll_task(op->idx, 1);
- async_wake_job(op->job);
+ (void)async_wake_job(op->job);
}
}
diff --git a/src/uadk_prov_hmac.c b/src/uadk_prov_hmac.c
index db49612..6bf7947 100644
--- a/src/uadk_prov_hmac.c
+++ b/src/uadk_prov_hmac.c
@@ -361,7 +361,7 @@ static int uadk_get_hmac_info(struct hmac_priv_ctx *priv)
}
}
- fprintf(stderr, "failed to setup the private ctx, algname = %s.\n", priv->alg_name);
+ fprintf(stderr, "failed to get hmac info, algname = %s.\n", priv->alg_name);
return UADK_HMAC_FAIL;
}
@@ -529,7 +529,7 @@ soft_init:
return uadk_hmac_soft_init(priv);
}
-static void uadk_hmac_async_cb(struct wd_digest_req *req, void *data)
+static void uadk_hmac_async_cb(struct wd_digest_req *req)
{
struct uadk_e_cb_info *hmac_cb_param;
struct wd_digest_req *req_origin;
@@ -545,7 +545,7 @@ static void uadk_hmac_async_cb(struct wd_digest_req *req, void *data)
if (op && op->job && !op->done) {
op->done = 1;
async_free_poll_task(op->idx, 1);
- async_wake_job(op->job);
+ (void)async_wake_job(op->job);
}
}
diff --git a/src/uadk_prov_init.c b/src/uadk_prov_init.c
index c29500b..20f7068 100644
--- a/src/uadk_prov_init.c
+++ b/src/uadk_prov_init.c
@@ -470,8 +470,10 @@ int OSSL_provider_init(const OSSL_CORE_HANDLE *handle,
ctx->libctx = (OSSL_LIB_CTX *)c_get_libctx(handle);
ret = uadk_prov_ctx_set_core_bio_method(ctx);
- if (!ret)
+ if (!ret) {
+ OPENSSL_free(ctx);
return UADK_P_FAIL;
+ }
ret = async_module_init();
if (!ret)
diff --git a/src/uadk_prov_pkey.c b/src/uadk_prov_pkey.c
index ca853ae..ac4541c 100644
--- a/src/uadk_prov_pkey.c
+++ b/src/uadk_prov_pkey.c
@@ -362,7 +362,7 @@ void uadk_prov_ecc_cb(void *req_t)
ecc_async_op->done = 1;
ecc_async_op->ret = 0;
async_free_poll_task(ecc_async_op->idx, 1);
- async_wake_job(ecc_async_op->job);
+ (void)async_wake_job(ecc_async_op->job);
}
}
diff --git a/src/uadk_prov_rsa.c b/src/uadk_prov_rsa.c
index b9713cc..4351514 100644
--- a/src/uadk_prov_rsa.c
+++ b/src/uadk_prov_rsa.c
@@ -1275,7 +1275,7 @@ static void uadk_e_rsa_cb(void *req_t)
if (op && op->job && !op->done) {
op->done = 1;
async_free_poll_task(op->idx, 1);
- async_wake_job(op->job);
+ (void)async_wake_job(op->job);
}
}
diff --git a/src/uadk_rsa.c b/src/uadk_rsa.c
index 76678a4..1755374 100644
--- a/src/uadk_rsa.c
+++ b/src/uadk_rsa.c
@@ -1117,7 +1117,7 @@ static void uadk_e_rsa_cb(void *req_t)
if (op && op->job && !op->done) {
op->done = 1;
async_free_poll_task(op->idx, 1);
- async_wake_job(op->job);
+ (void)async_wake_job(op->job);
}
}
--
2.33.0
1
0
您好!
sig-AccLib 邀请您参加 2025-05-28 11:00 召开的Zoom会议
会议主题:【2025/5/28 AccLIb SIG 双周例会 11:00 - 12:00】
会议内容:
会议链接:
linaro-org.zoom.us/j/91879279131
会议纪要:etherpad.openeuler.org/p/sig-AccLib-meet
会议链接:https://us06web.zoom.us/j/82863167501?pwd=tvnic0aWCxFSVFObmeHHXbN3gh2a8m.1
会议纪要:https://etherpad.openeuler.org/p/sig-AccLib-meetings
更多资讯尽在:https://www.openeuler.org/zh/
Hello!
sig-AccLib invites you to attend the Zoom conference will be held at 2025-05-28 11:00,
The subject of the conference is 【2025/5/28 AccLIb SIG 双周例会 11:00 - 12:00】
Summary:
会议链接:
linaro-org.zoom.us/j/91879279131
会议纪要:etherpad.openeuler.org/p/sig-AccLib-meet
You can join the meeting at https://us06web.zoom.us/j/82863167501?pwd=tvnic0aWCxFSVFObmeHHXbN3gh2a8m.1
Add topics at https://etherpad.openeuler.org/p/sig-AccLib-meetings
More information: https://www.openeuler.org/en/
1
0