
From: Konstantin Meskhidze <konstantin.meskhidze@huawei.com> hulk inclusion category: feature bugzilla: https://gitee.com/openeuler/kernel/issues/IC5EHB ----------------------------------------- Add rt class callbacks implementation: - dequeue_ctx - enqueue_ctx - pick_next_ctx - put_prev_ctx - submit_prepare_ctx - select_work - check_preempt Add xsched_rt.c in /kernel/xsched Makefile. Add RT class callbacks support in core.c. Signed-off-by: Konstantin Meskhidze <konstantin.meskhidze@huawei.com> Signed-off-by: Hui Tang <tanghui20@.huawei.com> Signed-off-by: Liu Kai <liukai284@huawei.com> Signed-off-by: Xia Fukun <xiafukun@huawei.com> --- include/linux/xsched.h | 228 ++++++++++++++++++++++++++++++++++++++++- kernel/xsched/Makefile | 2 +- kernel/xsched/core.c | 162 ++++++++++++++++++++++++++++- kernel/xsched/rt.c | 225 ++++++++++++++++++++++++++++++++++++++++ 4 files changed, 611 insertions(+), 6 deletions(-) create mode 100644 kernel/xsched/rt.c diff --git a/include/linux/xsched.h b/include/linux/xsched.h index efe5d92a5acd..ba2c2e903f59 100644 --- a/include/linux/xsched.h +++ b/include/linux/xsched.h @@ -37,8 +37,33 @@ #define __GET_VS_TASK_TYPE(t) ((t)&0xFF) +#define __GET_VS_TASK_PRIO_RT(t) (((t) >> 8) & 0xFF) + #define GET_VS_TASK_TYPE(vs_ptr) __GET_VS_TASK_TYPE((vs_ptr)->task_type) +#define GET_VS_TASK_PRIO_RT(vs_ptr) __GET_VS_TASK_PRIO_RT((vs_ptr)->task_type) + +/* + * A default kick slice for RT class XSEs. + */ +#define XSCHED_RT_KICK_SLICE 20 +/* + * A default kick slice for CFS class XSEs. + */ +#define XSCHED_CFS_KICK_SLICE 10 + +enum xcu_sched_type { + XSCHED_TYPE_RT, + XSCHED_TYPE_DFLT = XSCHED_TYPE_RT, + XSCHED_TYPE_NUM, +}; + +enum xse_prio { + XSE_PRIO_LOW, + XSE_PRIO_HIGH, + NR_XSE_PRIO, +}; + enum xsched_rq_state { XRQ_STATE_INACTIVE = 0x00, XRQ_STATE_IDLE = 0x01, @@ -47,18 +72,61 @@ enum xsched_rq_state { XRQ_STATE_WAIT_RUNNING = 0x08, }; +enum xse_state { + XSE_PREPARE, + XSE_READY, + XSE_RUNNING, + XSE_BLOCK, + XSE_DEAD, +}; + +enum xse_flag { + XSE_TIF_NONE, + XSE_TIF_PREEMPT, + XSE_TIF_BALANCE, /* Unused so far */ +}; + + +extern const struct xsched_class rt_xsched_class; + +#define xsched_first_class (&rt_xsched_class) + +#define for_each_xsched_class(class) \ + for (class = xsched_first_class; class; class = class->next) + +#define for_each_xse_prio(prio) \ + for (prio = XSE_PRIO_LOW; prio < NR_XSE_PRIO; prio++) + #define for_each_vstream_in_ctx(vs, ctx) \ list_for_each_entry((vs), &((ctx)->vstream_list), ctx_node) +/* Manages xsched RT-like class linked list based runqueue. + * + * Now RT-like class runqueue structs is identical + * but will most likely grow different in the + * future as the Xsched evolves. + */ +struct xsched_rq_rt { + struct list_head rq[NR_XSE_PRIO]; + unsigned int nr_running; + int prio_nr_running[NR_XSE_PRIO]; + atomic_t prio_nr_kicks[NR_XSE_PRIO]; + DECLARE_BITMAP(curr_prios, NR_XSE_PRIO); +}; + /* Base XSched runqueue object structure that contains both mutual and * individual parameters for different scheduling classes. */ struct xsched_rq { struct xsched_entity *curr_xse; + const struct xsched_class *class; int state; int nr_running; + + /* RT class run queue.*/ + struct xsched_rq_rt rt; }; enum xcu_state { @@ -89,6 +157,9 @@ struct xsched_cu { uint32_t id; uint32_t state; + /* RT class kick counter. */ + atomic_t pending_kicks_rt; + struct task_struct *worker; struct xsched_rq xrq; @@ -104,6 +175,16 @@ struct xsched_cu { wait_queue_head_t wq_xcu_running; }; +struct xsched_entity_rt { + struct list_head list_node; + enum xse_state state; + enum xse_flag flag; + enum xse_prio prio; + + ktime_t timeslice; + s64 kick_slice; +}; + struct xsched_entity { uint32_t task_type; @@ -118,6 +199,9 @@ struct xsched_entity { /* Amount of submitted kicks context, used for resched decision. */ atomic_t submitted_one_kick; + size_t total_scheduled; + size_t total_submitted; + /* File descriptor coming from an associated context * used for identifying a given xsched entity in * info and error prints. @@ -127,9 +211,15 @@ struct xsched_entity { /* Xsched class for this xse. */ const struct xsched_class *class; + /* RT class entity. */ + struct xsched_entity_rt rt; + /* Pointer to context object. */ struct xsched_context *ctx; + /* Xsched entity execution statistics */ + u64 last_exec_runtime; + /* Pointer to an XCU object that represents an XCU * on which this xse is to be processed or is being * processed currently. @@ -140,15 +230,85 @@ struct xsched_entity { spinlock_t xse_lock; }; +static inline bool xse_is_rt(const struct xsched_entity *xse) +{ + return xse && xse->class == &rt_xsched_class; +} + +/* Returns a pointer to an atomic_t variable representing a counter + * of currently pending vstream kicks on a given XCU and for a + * given xsched class. + */ +static inline atomic_t * +xsched_get_pending_kicks_class(const struct xsched_class *class, + struct xsched_cu *xcu) +{ + /* Right now for testing purposes we have only XCU running streams. */ + if (!xcu) { + XSCHED_ERR("Try to get pending kicks with xcu=NULL.\n"); + return NULL; + } + + if (!class) { + XSCHED_ERR("Try to get pending kicks with class=NULL.\n"); + return NULL; + } + + if (class == &rt_xsched_class) + return &xcu->pending_kicks_rt; + + XSCHED_ERR("Xsched entity has an invalid class @ %s\n", __func__); + return NULL; +} + +/* Returns a pointer to an atomic_t variable representing a counter of + * currently pending vstream kicks for an XCU on which a given xsched + * entity is enqueued on and for a xsched class that assigned to a + * given xsched entity. + */ +static inline atomic_t * +xsched_get_pending_kicks_xse(const struct xsched_entity *xse) +{ + if (!xse) { + XSCHED_ERR("Try to get pending kicks with xse=NULL\n"); + return NULL; + } + + if (!xse->xcu) { + XSCHED_ERR("Try to get pending kicks with xse->xcu=NULL\n"); + return NULL; + } + + return xsched_get_pending_kicks_class(xse->class, xse->xcu); +} + /* Increments pending kicks counter for an XCU that the given * xsched entity is attached to and for xsched entity's xsched * class. */ static inline int xsched_inc_pending_kicks_xse(struct xsched_entity *xse) { + atomic_t *kicks_class = NULL; + + kicks_class = xsched_get_pending_kicks_xse(xse); + if (!kicks_class) + return -EINVAL; + + /* Incrementing pending kicks for XSE's sched class */ + atomic_inc(kicks_class); + /* Icrement pending kicks for current XSE. */ atomic_inc(&xse->kicks_pending_ctx_cnt); + /* Incrementing prio based pending kicks counter for RT class */ + if (xse_is_rt(xse)) { + atomic_inc(&xse->xcu->xrq.rt.prio_nr_kicks[xse->rt.prio]); + XSCHED_DEBUG("xcu increased pending kicks @ %s\n", __func__); + } else { + XSCHED_DEBUG("xse %u isn't rt class @ %s\n", xse->tgid, + __func__); + } + return 0; } @@ -158,9 +318,41 @@ static inline int xsched_inc_pending_kicks_xse(struct xsched_entity *xse) */ static inline int xsched_dec_pending_kicks_xse(struct xsched_entity *xse) { + atomic_t *kicks_class = NULL; + atomic_t *kicks_prio_rt = NULL; + + kicks_class = xsched_get_pending_kicks_xse(xse); + if (!kicks_class) + return -EINVAL; + + if (!atomic_read(kicks_class)) { + XSCHED_ERR("Try to decrement pending kicks beyond 0!\n"); + return -EINVAL; + } + + /* Decrementing pending kicks for XSE's sched class. */ + atomic_dec(kicks_class); + /* Decrementing pending kicks for current XSE. */ atomic_dec(&xse->kicks_pending_ctx_cnt); + /* Decrementing prio based pending kicks counter for RT class. */ + if (xse_is_rt(xse)) { + kicks_prio_rt = &xse->xcu->xrq.rt.prio_nr_kicks[xse->rt.prio]; + + if (!atomic_read(kicks_prio_rt)) { + XSCHED_ERR( + "Tried to decrement prio pending kicks beyond 0!\n"); + return -EINVAL; + } + + atomic_dec(kicks_prio_rt); + XSCHED_DEBUG("xcu decreased pending kicks @ %s\n", __func__); + } else { + XSCHED_DEBUG("xse %u isn't rt class @ %s\n", xse->tgid, + __func__); + } + return 0; } @@ -169,7 +361,14 @@ static inline int xsched_dec_pending_kicks_xse(struct xsched_entity *xse) */ static inline bool xsched_check_pending_kicks_xcu(struct xsched_cu *xcu) { - return 0; + atomic_t *kicks_rt; + + kicks_rt = xsched_get_pending_kicks_class(&rt_xsched_class, xcu); + + if (!kicks_rt) + return false; + + return !!atomic_read(kicks_rt); } static inline int xse_integrity_check(const struct xsched_entity *xse) @@ -223,6 +422,33 @@ static inline struct xsched_context *ctx_find_by_tgid(pid_t tgid) return ret; } +/* Xsched class. */ +struct xsched_class { + const struct xsched_class *next; + + /* Removes a given XSE from it's runqueue. */ + void (*dequeue_ctx)(struct xsched_entity *xse); + + /* Places a given XSE on a runqueue on a given XCU. */ + void (*enqueue_ctx)(struct xsched_entity *xse, struct xsched_cu *xcu); + + /* Returns a next XSE to be submitted on a given XCU. */ + struct xsched_entity *(*pick_next_ctx)(struct xsched_cu *xcu); + + /* Put a XSE back into rq during preemption. */ + void (*put_prev_ctx)(struct xsched_entity *xse); + + /* Prepares a given XSE for submission on a given XCU. */ + int (*submit_prepare_ctx)(struct xsched_entity *xse, + struct xsched_cu *xcu); + + /* Check context preemption. */ + bool (*check_preempt)(struct xsched_entity *xse); + + /* Select jobs from XSE to submit on XCU */ + size_t (*select_work)(struct xsched_cu *xcu, struct xsched_entity *xse); +}; + static inline void xsched_init_vsm(struct vstream_metadata *vsm, struct vstream_info *vs, vstream_args_t *arg) { diff --git a/kernel/xsched/Makefile b/kernel/xsched/Makefile index 62e58e4151b0..f882518d54ab 100644 --- a/kernel/xsched/Makefile +++ b/kernel/xsched/Makefile @@ -1,3 +1,3 @@ # SPDX-License-Identifier: GPL-2.0 obj-y += vstream.o -obj-$(CONFIG_XCU_SCHEDULER) += core.o +obj-$(CONFIG_XCU_SCHEDULER) += core.o rt.o diff --git a/kernel/xsched/core.c b/kernel/xsched/core.c index 09b2d2c47652..9bee0f5ed33b 100644 --- a/kernel/xsched/core.c +++ b/kernel/xsched/core.c @@ -40,19 +40,116 @@ static DEFINE_HASHTABLE(ctx_revmap, XCU_HASH_ORDER); static void put_prev_ctx(struct xsched_entity *xse) { + struct xsched_cu *xcu = xse->xcu; + + lockdep_assert_held(&xcu->xcu_lock); + + xse->class->put_prev_ctx(xse); + xse->last_exec_runtime = 0; + atomic_set(&xse->submitted_one_kick, 0); +} + +static size_t select_work_def(struct xsched_cu *xcu, struct xsched_entity *xse) +{ + int kick_count; + struct vstream_info *vs; + unsigned int sum_exec_time = 0; + size_t kicks_submitted = 0; + struct vstream_metadata *vsm; + int not_empty; + + kick_count = atomic_read(&xse->kicks_pending_ctx_cnt); + XSCHED_DEBUG("Before decrement XSE kick_count=%u @ %s\n", + kick_count, __func__); + + if (kick_count == 0) { + XSCHED_WARN("Try to select xse that has 0 kicks @ %s\n", + __func__); + return 0; + } + + do { + not_empty = 0; + for_each_vstream_in_ctx(vs, xse->ctx) { + spin_lock(&vs->stream_lock); + vsm = xsched_vsm_fetch_first(vs); + spin_unlock(&vs->stream_lock); + if (vsm) { + list_add_tail(&vsm->node, &xcu->vsm_list); + + sum_exec_time += vsm->exec_time; + kicks_submitted++; + xsched_dec_pending_kicks_xse(xse); + XSCHED_DEBUG( + "vs id = %d Kick submit exec_time %u sq_tail %u sqe_num %u sq_id %u @ %s\n", + vs->id, vsm->exec_time, vsm->sq_tail, + vsm->sqe_num, vsm->sq_id, __func__); + not_empty++; + } + } + } while (not_empty); + + kick_count = atomic_read(&xse->kicks_pending_ctx_cnt); + XSCHED_DEBUG("After decrement XSE kick_count=%d @ %s\n", + kick_count, __func__); + + xse->total_scheduled += kicks_submitted; + + return kicks_submitted; } static struct xsched_entity *__raw_pick_next_ctx(struct xsched_cu *xcu) { - return NULL; + const struct xsched_class *class; + struct xsched_entity *next = NULL; + size_t scheduled; + + lockdep_assert_held(&xcu->xcu_lock); + for_each_xsched_class(class) { + next = class->pick_next_ctx(xcu); + if (next) { + scheduled = class->select_work ? + class->select_work(xcu, next) : select_work_def(xcu, next); + + XSCHED_DEBUG("xse %d scheduled=%zu total=%zu @ %s\n", + next->tgid, scheduled, next->total_scheduled, __func__); + break; + } + } + + return next; } void enqueue_ctx(struct xsched_entity *xse, struct xsched_cu *xcu) { + lockdep_assert_held(&xcu->xcu_lock); + + if (xse_integrity_check(xse)) { + XSCHED_ERR("Fail to check xse integrity @ %s\n", __func__); + return; + } + + if (!xse->on_rq) { + xse->on_rq = true; + xse->class->enqueue_ctx(xse, xcu); + XSCHED_DEBUG("Enqueue xse %d @ %s\n", xse->tgid, __func__); + } } void dequeue_ctx(struct xsched_entity *xse, struct xsched_cu *xcu) { + lockdep_assert_held(&xcu->xcu_lock); + + if (xse_integrity_check(xse)) { + XSCHED_ERR("Fail to check xse integrity @ %s\n", __func__); + return; + } + + if (xse->on_rq) { + xse->class->dequeue_ctx(xse); + xse->on_rq = false; + XSCHED_DEBUG("Dequeue xse %d @ %s\n", xse->tgid, __func__); + } } static int delete_ctx(struct xsched_context *ctx) @@ -218,6 +315,15 @@ struct xsched_cu *xcu_find(uint32_t *type, int xsched_xse_set_class(struct xsched_entity *xse) { + switch (xse->task_type) { + case XSCHED_TYPE_RT: + xse->class = &rt_xsched_class; + XSCHED_DEBUG("Context is in RT class %s\n", __func__); + break; + default: + XSCHED_ERR("Xse has incorrect class @ %s\n", __func__); + return -EINVAL; + } return 0; } @@ -229,6 +335,10 @@ int xsched_ctx_init_xse(struct xsched_context *ctx, struct vstream_info *vs) atomic_set(&xse->kicks_pending_ctx_cnt, 0); atomic_set(&xse->submitted_one_kick, 0); + xse->total_scheduled = 0; + xse->total_submitted = 0; + xse->last_exec_runtime = 0; + xse->task_type = XSCHED_TYPE_RT; xse->fd = ctx->fd; xse->tgid = ctx->tgid; @@ -250,6 +360,25 @@ int xsched_ctx_init_xse(struct xsched_context *ctx, struct vstream_info *vs) return err; } + if (xse_is_rt(xse)) { + xse->rt.state = XSE_PREPARE; + xse->rt.flag = XSE_TIF_NONE; + xse->rt.prio = GET_VS_TASK_PRIO_RT(vs); + xse->rt.kick_slice = XSCHED_RT_KICK_SLICE; + + /* XSE priority is being decreased by 1 here because + * in libucc priority counter starts from 1 while in the + * kernel counter starts with 0. + * + * This inconsistency has to be solve in libucc in the + * future rather that having this confusing decrement to + * priority inside the kernel. + */ + if (xse->rt.prio > 0) + xse->rt.prio -= 1; + + INIT_LIST_HEAD(&xse->rt.list_node); + } WRITE_ONCE(xse->on_rq, false); spin_lock_init(&xse->xse_lock); @@ -261,6 +390,11 @@ static int __xsched_submit(struct xsched_cu *xcu, struct xsched_entity *xse) return 0; } +static inline bool should_preempt(struct xsched_entity *xse) +{ + return xse->class->check_preempt(xse); +} + static int xsched_schedule(void *input_xcu) { struct xsched_cu *xcu = input_xcu; @@ -309,19 +443,37 @@ static int xsched_schedule(void *input_xcu) return err; } +/* Initialize xsched rt runqueue during kernel init. + * Should only be called from xsched_init function. + */ +static inline void xsched_rt_rq_init(struct xsched_cu *xcu) +{ + int prio = 0; + + xcu->xrq.rt.nr_running = 0; + + for_each_xse_prio(prio) { + INIT_LIST_HEAD(&xcu->xrq.rt.rq[prio]); + xcu->xrq.rt.prio_nr_running[prio] = 0; + atomic_set(&xcu->xrq.rt.prio_nr_kicks[prio], 0); + } +} + /* Initialize xsched classes' runqueues. */ static inline void xsched_rq_init(struct xsched_cu *xcu) { xcu->xrq.nr_running = 0; xcu->xrq.curr_xse = NULL; + xcu->xrq.class = &rt_xsched_class; xcu->xrq.state = XRQ_STATE_IDLE; + xsched_rt_rq_init(xcu); } /* Initializes all xsched XCU objects. * Should only be called from xsched_xcu_register function. */ static void xsched_xcu_init(struct xsched_cu *xcu, struct xcu_group *group, - int xcu_id) + int xcu_id) { bitmap_clear(xcu_group_root->xcu_mask, 0, XSCHED_NR_CUS); @@ -329,17 +481,19 @@ static void xsched_xcu_init(struct xsched_cu *xcu, struct xcu_group *group, xcu->state = XSCHED_XCU_NONE; xcu->group = group; + atomic_set(&xcu->pending_kicks_rt, 0); atomic_set(&xcu->has_active, 0); INIT_LIST_HEAD(&xcu->vsm_list); - init_waitqueue_head(&xcu->wq_xcu_idle); - mutex_init(&xcu->xcu_lock); /* Mark current XCU in a mask inside XCU root group. */ set_bit(xcu->id, xcu_group_root->xcu_mask); + /* Initialize current XCU's runqueue. */ + xsched_rq_init(xcu); + /* This worker should set XCU to XSCHED_XCU_WAIT_IDLE. * If after initialization XCU still has XSCHED_XCU_NONE * status then we can assume that there was a problem diff --git a/kernel/xsched/rt.c b/kernel/xsched/rt.c new file mode 100644 index 000000000000..c6129b86f105 --- /dev/null +++ b/kernel/xsched/rt.c @@ -0,0 +1,225 @@ +// SPDX-License-Identifier: GPL-2.0+ +/* + * Real-Time Scheduling Class for XPU device + * + * Copyright (C) 2025-2026 Huawei Technologies Co., Ltd + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. + * + */ + +#include <uapi/linux/sched/types.h> +#include <linux/kthread.h> +#include <linux/slab.h> +#include <linux/xsched.h> +#include <linux/vstream.h> + +/* Add xsched entitiy to a run list based on priority, set on_cu flag + * and set a corresponding curr_prios bit if necessary. + */ +static inline void +xse_rt_add(struct xsched_entity *xse, struct xsched_cu *xcu) +{ + list_add_tail(&xse->rt.list_node, &xcu->xrq.rt.rq[xse->rt.prio]); + __set_bit(xse->rt.prio, xcu->xrq.rt.curr_prios); +} + +/* Delete xsched entitiy from a run list, unset on_cu flag and + * unset corresponding curr_prios bit if necessary. + */ +static inline void xse_rt_del(struct xsched_entity *xse) +{ + struct xsched_cu *xcu = xse->xcu; + + list_del_init(&xse->rt.list_node); + if (list_empty(&xcu->xrq.rt.rq[xse->rt.prio])) + __clear_bit(xse->rt.prio, xcu->xrq.rt.curr_prios); +} + +static inline void xse_rt_move_tail(struct xsched_entity *xse) +{ + struct xsched_cu *xcu = xse->xcu; + + list_move_tail(&xse->rt.list_node, &xcu->xrq.rt.rq[xse->rt.prio]); +} + +/* Increase RT runqueue total and per prio nr_running stat. */ +static inline void xrq_inc_nr_running(struct xsched_entity *xse, + struct xsched_cu *xcu) +{ + xcu->xrq.rt.nr_running++; + xcu->xrq.rt.prio_nr_running[xse->rt.prio]++; + set_bit(xse->rt.prio, xcu->xrq.rt.curr_prios); +} + +/* Decrease RT runqueue total and per prio nr_running stat + * and raise a bug if nr_running decrease beyond zero. + */ +static inline void xrq_dec_nr_running(struct xsched_entity *xse) +{ + struct xsched_cu *xcu = xse->xcu; + + xcu->xrq.rt.nr_running--; + xcu->xrq.rt.prio_nr_running[xse->rt.prio]--; + + if (!xcu->xrq.rt.prio_nr_running[xse->rt.prio]) + clear_bit(xse->rt.prio, xcu->xrq.rt.curr_prios); +} + +static void dequeue_ctx_rt(struct xsched_entity *xse) +{ + xse_rt_del(xse); + xrq_dec_nr_running(xse); +} + +static void enqueue_ctx_rt(struct xsched_entity *xse, struct xsched_cu *xcu) +{ + xse_rt_add(xse, xcu); + xrq_inc_nr_running(xse, xcu); +} + +static inline struct xsched_entity *xrq_next_xse(struct xsched_cu *xcu, + int prio) +{ + return list_first_entry(&xcu->xrq.rt.rq[prio], struct xsched_entity, + rt.list_node); +} + +/* Return the next priority for pick_next_ctx taking into + * account if there are pending kicks on certain priority. + */ +static inline uint32_t get_next_prio_rt(struct xsched_rq *xrq) +{ + int32_t curr_prio; + bool bit_val; + unsigned long *prios = xrq->rt.curr_prios; + atomic_t *prio_nr_kicks = xrq->rt.prio_nr_kicks; + + /* Using generic for loop instead of for_each_set_bit + * because it will be faster than for_each_set_bit. + */ + for (curr_prio = NR_XSE_PRIO - 1; curr_prio >= 0; curr_prio--) { + bit_val = test_bit(curr_prio, prios); + if (!bit_val && atomic_read(&prio_nr_kicks[curr_prio])) { + XSCHED_ERR( + "kicks > 0 on RT priority with the priority bit unset\n"); + BUG(); + return NR_XSE_PRIO; + } + + if (bit_val && atomic_read(&prio_nr_kicks[curr_prio])) + return curr_prio; + } + return NR_XSE_PRIO; +} + +static struct xsched_entity *pick_next_ctx_rt(struct xsched_cu *xcu) +{ + struct xsched_entity *result; + int next_prio; + + next_prio = get_next_prio_rt(&xcu->xrq); + if (next_prio >= NR_XSE_PRIO) { + XSCHED_DEBUG("No pending kicks in RT class @ %s\n", __func__); + return NULL; + } + + if (!xcu->xrq.rt.prio_nr_running[next_prio]) { + XSCHED_ERR( + "The nr_running of RT is 0 while there are pending kicks for %u prio\n", + next_prio); + return NULL; + } + + result = xrq_next_xse(xcu, next_prio); + if (!result) + XSCHED_ERR("Next XSE not found @ %s\n", __func__); + + return result; +} + +static void put_prev_ctx_rt(struct xsched_entity *xse) +{ + xse->rt.kick_slice -= atomic_read(&xse->submitted_one_kick); + XSCHED_DEBUG( + "Update XSE=%d kick_slice=%lld, XSE submitted=%d in RT class @ %s\n", + xse->tgid, xse->rt.kick_slice, + atomic_read(&xse->submitted_one_kick), __func__); + + if (xse->rt.kick_slice <= 0) { + xse->rt.kick_slice = XSCHED_RT_KICK_SLICE; + XSCHED_DEBUG("Refill XSE=%d kick_slice=%lld in RT class @ %s\n", + xse->tgid, xse->rt.kick_slice, __func__); + xse_rt_move_tail(xse); + } +} + +static int submit_prepare_ctx_rt(struct xsched_entity *xse, + struct xsched_cu *xcu) +{ + if (!atomic_read(&xse->kicks_pending_ctx_cnt)) { + xse->rt.state = XSE_READY; + xse->rt.kick_slice = 0; + return -EAGAIN; + } + xse->rt.state = XSE_RUNNING; + + return 0; +} + +static bool check_preempt_ctx_rt(struct xsched_entity *xse) +{ + return true; +} + +static size_t select_work_rt(struct xsched_cu *xcu, struct xsched_entity *xse) +{ + int kick_count, scheduled = 0; + struct vstream_info *vs; + struct vstream_metadata *vsm; + + kick_count = atomic_read(&xse->kicks_pending_ctx_cnt); + XSCHED_DEBUG("Before decrement XSE kick_count=%d @ %s\n", + kick_count, __func__); + + if (kick_count == 0) { + XSCHED_WARN("Try to select xse that has 0 kicks @ %s\n", + __func__); + return 0; + } + + for_each_vstream_in_ctx(vs, xse->ctx) { + spin_lock(&vs->stream_lock); + while ((vsm = xsched_vsm_fetch_first(vs))) { + list_add_tail(&vsm->node, &xcu->vsm_list); + scheduled++; + xsched_dec_pending_kicks_xse(xse); + } + spin_unlock(&vs->stream_lock); + } + + kick_count = atomic_read(&xse->kicks_pending_ctx_cnt); + XSCHED_DEBUG("After decrement XSE kick_count=%d @ %s\n", + kick_count, __func__); + + xse->total_scheduled += scheduled; + return scheduled; +} + +const struct xsched_class rt_xsched_class = { + .next = NULL, + .dequeue_ctx = dequeue_ctx_rt, + .enqueue_ctx = enqueue_ctx_rt, + .pick_next_ctx = pick_next_ctx_rt, + .put_prev_ctx = put_prev_ctx_rt, + .submit_prepare_ctx = submit_prepare_ctx_rt, + .select_work = select_work_rt, + .check_preempt = check_preempt_ctx_rt +}; -- 2.34.1