
From: Konstantin Meskhidze <konstantin.meskhidze@huawei.com> hulk inclusion category: feature bugzilla: https://gitee.com/openeuler/kernel/issues/IC5EHB ----------------------------------------- Add XCU xsched_schedule() implementation. Add xsched_rq data structures and related process. Signed-off-by: Konstantin Meskhidze <konstantin.meskhidze@huawei.com> Signed-off-by: Hui Tang <tanghui20@.huawei.com> --- include/linux/xsched.h | 79 ++++++++++++++++++ kernel/xsched/core.c | 180 +++++++++++++++++++++++++++++++++++++++- kernel/xsched/vstream.c | 8 ++ 3 files changed, 266 insertions(+), 1 deletion(-) diff --git a/include/linux/xsched.h b/include/linux/xsched.h index 465b61b07fd6..1652541e39e3 100644 --- a/include/linux/xsched.h +++ b/include/linux/xsched.h @@ -59,6 +59,28 @@ #define GET_VS_TASK_TYPE(vs_ptr) __GET_VS_TASK_TYPE((vs_ptr)->task_type) +enum xsched_rq_state { + XRQ_STATE_INACTIVE = 0x00, + XRQ_STATE_IDLE = 0x01, + XRQ_STATE_BUSY = 0x02, + XRQ_STATE_SUBMIT = 0x04, + XRQ_STATE_WAIT_RUNNING = 0x08 +}; + +#define for_each_vstream_in_ctx(vs, ctx) \ + list_for_each_entry((vs), &((ctx)->vstream_list), ctx_node) + + +/* Base Xsched runqueue object structure that contains both mutual and + * individual parameters for different scheduling classes. + */ +struct xsched_rq { + struct xsched_entity *curr_xse; + + int state; + int nr_running; +}; + enum xcu_state { XCU_INACTIVE, XCU_IDLE, @@ -89,10 +111,15 @@ struct xsched_cu { struct task_struct *worker; + struct xsched_rq xrq; + struct list_head vsm_list; + struct xcu_group *group; struct mutex xcu_lock; + atomic_t has_active; + wait_queue_head_t wq_xcu_idle; wait_queue_head_t wq_xcu_running; wait_queue_head_t wq_xcore_running; @@ -134,6 +161,56 @@ struct xsched_entity { spinlock_t xse_lock; }; +/* Increments pending kicks counter for an XCU that the given + * xsched entity is attached to and for xsched entity's xsched + * class. + */ +static inline int xsched_inc_pending_kicks_xse(struct xsched_entity *xse) +{ + /* Icrement pending kicks for current XSE. */ + atomic_inc(&xse->kicks_pending_ctx_cnt); + + return 0; +} + +/* Decrements pending kicks counter for an XCU that the given + * xsched entity is attached to and for XSched entity's sched + * class. + */ +static inline int xsched_dec_pending_kicks_xse(struct xsched_entity *xse) +{ + /* Decrementing pending kicks for current XSE. */ + atomic_dec(&xse->kicks_pending_ctx_cnt); + + return 0; +} + +/* Checks if there are pending kicks left on a given XCU for all + * xsched classes. + */ +static inline bool xsched_check_pending_kicks_xcu(struct xsched_cu *xcu) +{ + return 0; +} + +static inline int xse_integrity_check(const struct xsched_entity *xse) +{ + int err = !xse || !xse->class ? 0 : 1; + + if (!xse) { + XSCHED_ERR("xse is null @ %s\n", __func__); + goto out_err; + } + + if (!xse->class) { + XSCHED_ERR("xse->class is null @ %s\n", __func__); + goto out_err; + } + +out_err: + return err; +} + struct xsched_context { uint32_t fd; uint32_t devId; @@ -191,4 +268,6 @@ struct xsched_cu *xcu_find(__u32 *type, __u32 devId, __u32 channel_id); /* Vstream metadata proccesing functions.*/ int xsched_vsm_add_tail(struct vstream_info *vs, vstream_args_t *arg); +struct vstream_metadata *xsched_vsm_fetch_first(struct vstream_info *vs); +void enqueue_ctx(struct xsched_entity *xse, struct xsched_cu *xcu); #endif /* !__LINUX_XSCHED_H__ */ diff --git a/kernel/xsched/core.c b/kernel/xsched/core.c index 3ae3c78d8222..8a85fc0db45d 100644 --- a/kernel/xsched/core.c +++ b/kernel/xsched/core.c @@ -16,6 +16,7 @@ * more details. * */ +#include <linux/delay.h> #include <linux/kthread.h> #include <linux/slab.h> #include <linux/spinlock_types.h> @@ -39,6 +40,70 @@ DEFINE_MUTEX(xsched_ctx_list_mutex); static DEFINE_MUTEX(revmap_mutex); static DEFINE_HASHTABLE(ctx_revmap, XCU_HASH_ORDER); +static void put_prev_ctx(struct xsched_entity *xse) +{ +} + +static struct xsched_entity *__raw_pick_next_ctx(struct xsched_cu *xcu) +{ + return NULL; +} + +void enqueue_ctx(struct xsched_entity *xse, struct xsched_cu *xcu) +{ +} + +void dequeue_ctx(struct xsched_entity *xse, struct xsched_cu *xcu) +{ +} + +static int delete_ctx(struct xsched_context *ctx) +{ + struct xsched_cu *xcu = ctx->xse.xcu; + struct xsched_entity *curr_xse = xcu->xrq.curr_xse; + struct xsched_entity *xse = &ctx->xse; + + XSCHED_CALL_STUB(); + + if (!xse_integrity_check(xse)) { + XSCHED_ERR("Failed xse integrity check! %s\n", __func__); + return -EINVAL; + } + + if (!xse->xcu) { + XSCHED_ERR("Trying to delete ctx that is not attached to an XCU @ %s\n", __func__); + return -EINVAL; + } + + /* Wait till context has been submitted. */ + while (atomic_read(&xse->kicks_pending_ctx_cnt)) { + XSCHED_INFO( + "Deleting ctx %d, xse->kicks_pending_ctx_cnt=%d @ %s\n", + xse->tgid, atomic_read(&xse->kicks_pending_ctx_cnt), + __func__); + } + + if (atomic_read(&xse->kicks_pending_ctx_cnt)) { + XSCHED_ERR("Deleting ctx %d that has pending kicks left @ %s\n", + xse->tgid, __func__); + return -EINVAL; + } + + mutex_lock(&xcu->xcu_lock); + if (curr_xse == xse) + xcu->xrq.curr_xse = NULL; + + dequeue_ctx(xse, xcu); + mutex_unlock(&xcu->xcu_lock); + + XSCHED_INFO("Deleting ctx %d, pending kicks left=%d @ %s\n", xse->tgid, + atomic_read(&xse->kicks_pending_ctx_cnt), __func__); + + XSCHED_EXIT_STUB(); + + return 0; +} + /* Frees a given vstream and also frees and dequeues it's context * if a given vstream is the last and only vstream attached to it's * corresponding context object. @@ -52,6 +117,9 @@ void xsched_free_task(struct kref *kref) ctx = container_of(kref, struct xsched_context, kref); + while (READ_ONCE(ctx->xse.on_rq)) + usleep_range(100, 200); + mutex_lock(&xsched_ctx_list_mutex); list_for_each_entry_safe(vs, tmp, &ctx->vstream_list, ctx_node) { list_del(&vs->ctx_node); @@ -59,6 +127,7 @@ void xsched_free_task(struct kref *kref) kfree(vs); } + delete_ctx(ctx); list_del(&ctx->ctx_node); mutex_unlock(&xsched_ctx_list_mutex); @@ -264,11 +333,82 @@ int xsched_ctx_init_xse(struct xsched_context *ctx, struct vstream_info *vs) return err; } -static int xsched_schedule(void *input_xcu) +static int __xsched_submit(struct xsched_cu *xcu, struct xsched_entity *xse) { return 0; } +static int xsched_schedule(void *input_xcu) +{ + struct xsched_cu *xcu = input_xcu; + int err = 0; + struct xsched_entity *curr_xse = NULL; + struct xsched_entity *next_xse = NULL; + + XSCHED_CALL_STUB(); + + while (!kthread_should_stop()) { + XSCHED_INFO("%s: Xcu lock released\n", __func__); + mutex_unlock(&xcu->xcu_lock); + + wait_event_interruptible(xcu->wq_xcu_idle, + atomic_read(&xcu->has_active) || xcu->xrq.nr_running); + + XSCHED_INFO("%s: rt_nr_running = %d, has_active = %d\n", + __func__, xcu->xrq.nr_running, atomic_read(&xcu->has_active)); + + mutex_lock(&xcu->xcu_lock); + XSCHED_INFO("%s: Xcu lock taken\n", __func__); + + if (!xsched_check_pending_kicks_xcu(xcu)) { + XSCHED_ERR("%s: No pending kicks for xcu %u\n", __func__, xcu->id); + continue; + } + + next_xse = __raw_pick_next_ctx(xcu); + + if (!next_xse) { + XSCHED_ERR("%s: Couldn't find next xse for xcu %u\n", __func__, xcu->id); + continue; + } + + xcu->xrq.curr_xse = next_xse; + __XSCHED_TRACE("%s: Pick next ctx returned xse %d\n", __func__, next_xse->tgid); + + if (__xsched_submit(xcu, next_xse)) { + XSCHED_ERR("%s: Xse %d on XCU %u tried to submit with zero kicks\n", + __func__, next_xse->tgid, xcu->id); + continue; + } + + curr_xse = xcu->xrq.curr_xse; + if (curr_xse) { /* if not deleted yet */ + put_prev_ctx(curr_xse); + if (!atomic_read(&curr_xse->kicks_pending_ctx_cnt)) { + dequeue_ctx(curr_xse, xcu); + XSCHED_INFO( + "%s: Dequeue xse %d due to zero kicks on xcu %u\n", + __func__, curr_xse->tgid, xcu->id); + curr_xse = xcu->xrq.curr_xse = NULL; + } + } + } + + XSCHED_INFO("Xsched_schedule finished for xcu %u\n", xcu->id); + + XSCHED_EXIT_STUB(); + + return err; +} + +/* Initialize xsched classes' runqueues. */ +static inline void xsched_rq_init(struct xsched_cu *xcu) +{ + xcu->xrq.nr_running = 0; + xcu->xrq.curr_xse = NULL; + xcu->xrq.state = XRQ_STATE_IDLE; +} + /* Initializes all xsched XCU objects. * Should only be called from xsched_register_xcu function. */ @@ -281,6 +421,12 @@ static void xsched_xcu_init(struct xsched_cu *xcu, struct xcu_group *group, xcu->state = XSCHED_XCU_NONE; xcu->group = group; + atomic_set(&xcu->has_active, 0); + + INIT_LIST_HEAD(&xcu->vsm_list); + + init_waitqueue_head(&xcu->wq_xcu_idle); + mutex_init(&xcu->xcu_lock); /* Mark current XCU in a mask inside XCU root group. */ @@ -344,6 +490,38 @@ int xsched_vsm_add_tail(struct vstream_info *vs, vstream_args_t *arg) return err; } +/* Fetch the first vstream metadata from vstream metadata list + * and removes it from that list. Returned vstream metadata pointer + * to be freed after. + */ +struct vstream_metadata *xsched_vsm_fetch_first(struct vstream_info *vs) +{ + struct vstream_metadata *vsm; + + if (list_empty(&vs->metadata_list)) { + XSCHED_INFO("No metadata to fetch from vs %u @ %s\n", vs->id, + __func__); + goto out_null; + } + + vsm = list_first_entry(&vs->metadata_list, struct vstream_metadata, node); + + if (!vsm) { + XSCHED_ERR( + "Tried to delete metadata from empty list in vs %u @ %s\n", + vs->id, __func__); + goto out_null; + } + + list_del(&vsm->node); + vs->kicks_count -= 1; + + return vsm; + +out_null: + return NULL; +} + /* * Initialize and register xcu in xcu_manager array. */ diff --git a/kernel/xsched/vstream.c b/kernel/xsched/vstream.c index 1d1b4e436f69..c7e3772009bf 100644 --- a/kernel/xsched/vstream.c +++ b/kernel/xsched/vstream.c @@ -515,9 +515,17 @@ int vstream_kick(struct vstream_args *arg) goto out_err; } + enqueue_ctx(xse, xcu); + + /* Increasing a total amount of kicks on an CU to which this + * context is attached to based on sched_class. + */ + xsched_inc_pending_kicks_xse(&vstream->ctx->xse); + spin_unlock(&vstream->stream_lock); XSCHED_INFO("vstream lock released @ %s\n", __func__); mutex_unlock(&xcu->xcu_lock); + wake_up_interruptible(&xcu->wq_xcu_idle); out_err: XSCHED_EXIT_STUB(); -- 2.34.1