The dynamic loading function of uadk requires two new types of scheduler modes, which are used to adapt to instruction acceleration without hardware resources and SVE acceleration that needs to be bound to CPU cores.
Signed-off-by: Longfang Liu liulongfang@huawei.com --- include/wd_sched.h | 6 +- wd_sched.c | 181 +++++++++++++++++++++++++++++++++++++++++++-- 2 files changed, 181 insertions(+), 6 deletions(-)
diff --git a/include/wd_sched.h b/include/wd_sched.h index 2ae6103..a492d70 100644 --- a/include/wd_sched.h +++ b/include/wd_sched.h @@ -18,7 +18,11 @@ extern "C" { enum sched_policy_type { /* requests will be sent to ctxs one by one */ SCHED_POLICY_RR = 0, - SCHED_POLICY_BUTT + /* requests will no need ctxs */ + SCHED_POLICY_NONE, + /* requests will need a fixed ctx */ + SCHED_POLICY_SINGLE, + SCHED_POLICY_BUTT, };
struct sched_params { diff --git a/wd_sched.c b/wd_sched.c index 98f4cfd..66712c8 100644 --- a/wd_sched.c +++ b/wd_sched.c @@ -347,6 +347,158 @@ static int session_sched_poll_policy(handle_t h_sched_ctx, __u32 expect, __u32 * return 0; }
+static handle_t sched_none_init(handle_t h_sched_ctx, void *sched_param) +{ + return (handle_t)0; +} + +static __u32 sched_none_pick_next_ctx(handle_t sched_ctx, + void *sched_key, const int sched_mode) +{ + return 0; +} + +static int sched_none_poll_policy(handle_t h_sched_ctx, + __u32 expect, __u32 *count) +{ + struct wd_sched_ctx *sched_ctx = (struct wd_sched_ctx *)h_sched_ctx; + __u32 loop_times = MAX_POLL_TIMES + expect; + __u32 poll_num = 0; + int ret; + + while (loop_times > 0) { + /* Default use ctx 0 */ + ret = sched_ctx->poll_func(0, 1, &poll_num); + if ((ret < 0) && (ret != -EAGAIN)) + return ret; + else if (ret == -EAGAIN) + continue; + + *count += poll_num; + if (*count == expect) + break; + } + + return 0; +} + +static handle_t sched_single_init(handle_t h_sched_ctx, void *sched_param) +{ + return (handle_t)0; +} + +static __u32 sched_single_pick_next_ctx(handle_t sched_ctx, + void *sched_key, const int sched_mode) +{ +#define CTX_ASYNC 1 +#define CTX_SYNC 0 + + if (sched_mode) + return CTX_ASYNC; + else + return CTX_SYNC; +} + +static int sched_single_poll_policy(handle_t h_sched_ctx, + __u32 expect, __u32 *count) +{ + struct wd_sched_ctx *sched_ctx = (struct wd_sched_ctx *)h_sched_ctx; + __u32 loop_times = MAX_POLL_TIMES + expect; + __u32 poll_num = 0; + int ret; + + while (loop_times > 0) { + /* Default async mode use ctx 0 */ + ret = sched_ctx->poll_func(0, 1, &poll_num); + if ((ret < 0) && (ret != -EAGAIN)) + return ret; + else if (ret == -EAGAIN) + continue; + + *count += poll_num; + if (*count == expect) + break; + } + + return 0; +} + +static handle_t sched_none_init(handle_t h_sched_ctx, void *sched_param) +{ + return (handle_t)0; +} + +static __u32 sched_none_pick_next_ctx(handle_t sched_ctx, + void *sched_key, const int sched_mode) +{ + return 0; +} + +static int sched_none_poll_policy(handle_t h_sched_ctx, + __u32 expect, __u32 *count) +{ + struct wd_sched_ctx *sched_ctx = (struct wd_sched_ctx *)h_sched_ctx; + __u32 loop_times = MAX_POLL_TIMES + expect; + __u32 poll_num = 0; + int ret; + + while (loop_times > 0) { + /* Default use ctx 0 */ + ret = sched_ctx->poll_func(0, 1, &poll_num); + if ((ret < 0) && (ret != -EAGAIN)) + return ret; + else if (ret == -EAGAIN) + continue; + + *count += poll_num; + if (*count == expect) + break; + } + + return 0; +} + +static handle_t sched_single_init(handle_t h_sched_ctx, void *sched_param) +{ + return (handle_t)0; +} + +static __u32 sched_single_pick_next_ctx(handle_t sched_ctx, + void *sched_key, const int sched_mode) +{ +#define CTX_ASYNC 1 +#define CTX_SYNC 0 + + if (sched_mode) + return CTX_ASYNC; + else + return CTX_SYNC; +} + +static int sched_single_poll_policy(handle_t h_sched_ctx, + __u32 expect, __u32 *count) +{ + struct wd_sched_ctx *sched_ctx = (struct wd_sched_ctx *)h_sched_ctx; + __u32 loop_times = MAX_POLL_TIMES + expect; + __u32 poll_num = 0; + int ret; + + while (loop_times > 0) { + /* Default async mode use ctx 0 */ + ret = sched_ctx->poll_func(0, 1, &poll_num); + if ((ret < 0) && (ret != -EAGAIN)) + return ret; + else if (ret == -EAGAIN) + continue; + + *count += poll_num; + if (*count == expect) + break; + } + + return 0; +} + static struct wd_sched sched_table[SCHED_POLICY_BUTT] = { { .name = "RR scheduler", @@ -354,7 +506,19 @@ static struct wd_sched sched_table[SCHED_POLICY_BUTT] = { .sched_init = session_sched_init, .pick_next_ctx = session_sched_pick_next_ctx, .poll_policy = session_sched_poll_policy, - }, + }, { + .name = "None scheduler", + .sched_policy = SCHED_POLICY_SINGLE, + .sched_init = sched_none_init, + .pick_next_ctx = sched_none_pick_next_ctx, + .poll_policy = sched_none_poll_policy, + }, { + .name = "Single scheduler", + .sched_policy = SCHED_POLICY_SINGLE, + .sched_init = sched_single_init, + .pick_next_ctx = sched_single_pick_next_ctx, + .poll_policy = sched_single_poll_policy, + } };
static int wd_sched_get_nearby_numa_id(struct wd_sched_info *sched_info, int node, int numa_num) @@ -463,9 +627,12 @@ void wd_sched_rr_release(struct wd_sched *sched)
sched_ctx = (struct wd_sched_ctx *)sched->h_sched_ctx; if (!sched_ctx) - goto out; + goto ctx_out;
sched_info = sched_ctx->sched_info; + if (!sched_info) + goto info_out; + for (i = 0; i < sched_ctx->numa_num; i++) { for (j = 0; j < SCHED_MODE_BUTT; j++) { if (sched_info[i].ctx_region[j]) { @@ -475,9 +642,9 @@ void wd_sched_rr_release(struct wd_sched *sched) } }
+info_out: free(sched_ctx); - -out: +ctx_out: free(sched);
return; @@ -531,8 +698,11 @@ struct wd_sched *wd_sched_rr_alloc(__u8 sched_type, __u8 type_num, }
sched->h_sched_ctx = (handle_t)sched_ctx; - sched_info = sched_ctx->sched_info; + if (sched_type == SCHED_POLICY_NONE || + sched_type == SCHED_POLICY_SINGLE) + goto simple_ok;
+ sched_info = sched_ctx->sched_info; for (i = 0; i < numa_num; i++) { for (j = 0; j < SCHED_MODE_BUTT; j++) { sched_info[i].ctx_region[j] = @@ -542,6 +712,7 @@ struct wd_sched *wd_sched_rr_alloc(__u8 sched_type, __u8 type_num, } }
+simple_ok: sched_ctx->poll_func = func; sched_ctx->policy = sched_type; sched_ctx->type_num = type_num;