
From: Konstantin Meskhidze <konstantin.meskhidze@huawei.com> hulk inclusion category: feature bugzilla: https://gitee.com/openeuler/kernel/issues/IC5EHB ----------------------------------------- Add XCU xsched_schedule() implementation. Add xsched_rq data structures and related process. Signed-off-by: Konstantin Meskhidze <konstantin.meskhidze@huawei.com> Signed-off-by: Hui Tang <tanghui20@.huawei.com> Signed-off-by: Liu Kai <liukai284@huawei.com> Signed-off-by: Xia Fukun <xiafukun@huawei.com> --- include/linux/xsched.h | 79 +++++++++++++++++++- kernel/xsched/core.c | 161 +++++++++++++++++++++++++++++++++++++++- kernel/xsched/vstream.c | 8 ++ 3 files changed, 246 insertions(+), 2 deletions(-) diff --git a/include/linux/xsched.h b/include/linux/xsched.h index 47f0a43a72dc..efe5d92a5acd 100644 --- a/include/linux/xsched.h +++ b/include/linux/xsched.h @@ -39,6 +39,28 @@ #define GET_VS_TASK_TYPE(vs_ptr) __GET_VS_TASK_TYPE((vs_ptr)->task_type) +enum xsched_rq_state { + XRQ_STATE_INACTIVE = 0x00, + XRQ_STATE_IDLE = 0x01, + XRQ_STATE_BUSY = 0x02, + XRQ_STATE_SUBMIT = 0x04, + XRQ_STATE_WAIT_RUNNING = 0x08, +}; + +#define for_each_vstream_in_ctx(vs, ctx) \ + list_for_each_entry((vs), &((ctx)->vstream_list), ctx_node) + + +/* Base XSched runqueue object structure that contains both mutual and + * individual parameters for different scheduling classes. + */ +struct xsched_rq { + struct xsched_entity *curr_xse; + + int state; + int nr_running; +}; + enum xcu_state { XCU_INACTIVE, XCU_IDLE, @@ -69,10 +91,15 @@ struct xsched_cu { struct task_struct *worker; + struct xsched_rq xrq; + struct list_head vsm_list; + struct xcu_group *group; struct mutex xcu_lock; + atomic_t has_active; + wait_queue_head_t wq_xcu_idle; wait_queue_head_t wq_xcu_running; }; @@ -113,6 +140,53 @@ struct xsched_entity { spinlock_t xse_lock; }; +/* Increments pending kicks counter for an XCU that the given + * xsched entity is attached to and for xsched entity's xsched + * class. + */ +static inline int xsched_inc_pending_kicks_xse(struct xsched_entity *xse) +{ + /* Icrement pending kicks for current XSE. */ + atomic_inc(&xse->kicks_pending_ctx_cnt); + + return 0; +} + +/* Decrements pending kicks counter for an XCU that the given + * xsched entity is attached to and for XSched entity's sched + * class. + */ +static inline int xsched_dec_pending_kicks_xse(struct xsched_entity *xse) +{ + /* Decrementing pending kicks for current XSE. */ + atomic_dec(&xse->kicks_pending_ctx_cnt); + + return 0; +} + +/* Checks if there are pending kicks left on a given XCU for all + * xsched classes. + */ +static inline bool xsched_check_pending_kicks_xcu(struct xsched_cu *xcu) +{ + return 0; +} + +static inline int xse_integrity_check(const struct xsched_entity *xse) +{ + if (!xse) { + XSCHED_ERR("xse is null @ %s\n", __func__); + return -EINVAL; + } + + if (!xse->class) { + XSCHED_ERR("xse->class is null @ %s\n", __func__); + return -EINVAL; + } + + return 0; +} + struct xsched_context { uint32_t fd; uint32_t dev_id; @@ -170,4 +244,7 @@ struct xsched_cu *xcu_find(uint32_t *type, /* Vstream metadata proccesing functions.*/ int xsched_vsm_add_tail(struct vstream_info *vs, vstream_args_t *arg); -#endif /* !__LINUX_XSCHED_H__ */ +struct vstream_metadata *xsched_vsm_fetch_first(struct vstream_info *vs); +void enqueue_ctx(struct xsched_entity *xse, struct xsched_cu *xcu); +void dequeue_ctx(struct xsched_entity *xse, struct xsched_cu *xcu); +#endif /* __LINUX_XSCHED_H__ */ diff --git a/kernel/xsched/core.c b/kernel/xsched/core.c index 4866b1774383..09b2d2c47652 100644 --- a/kernel/xsched/core.c +++ b/kernel/xsched/core.c @@ -14,6 +14,7 @@ * more details. * */ +#include <linux/delay.h> #include <linux/kthread.h> #include <linux/slab.h> #include <linux/spinlock_types.h> @@ -37,6 +38,66 @@ DEFINE_MUTEX(xsched_ctx_list_mutex); static DEFINE_MUTEX(revmap_mutex); static DEFINE_HASHTABLE(ctx_revmap, XCU_HASH_ORDER); +static void put_prev_ctx(struct xsched_entity *xse) +{ +} + +static struct xsched_entity *__raw_pick_next_ctx(struct xsched_cu *xcu) +{ + return NULL; +} + +void enqueue_ctx(struct xsched_entity *xse, struct xsched_cu *xcu) +{ +} + +void dequeue_ctx(struct xsched_entity *xse, struct xsched_cu *xcu) +{ +} + +static int delete_ctx(struct xsched_context *ctx) +{ + struct xsched_cu *xcu = ctx->xse.xcu; + struct xsched_entity *curr_xse = xcu->xrq.curr_xse; + struct xsched_entity *xse = &ctx->xse; + + if (xse_integrity_check(xse)) { + XSCHED_ERR("Fail to check xse integrity @ %s\n", __func__); + return -EINVAL; + } + + if (!xse->xcu) { + XSCHED_ERR("Try to delete ctx that is not attached to xcu @ %s\n", + __func__); + return -EINVAL; + } + + /* Wait till context has been submitted. */ + while (atomic_read(&xse->kicks_pending_ctx_cnt)) { + XSCHED_DEBUG("Deleting ctx %d, xse->kicks_pending_ctx_cnt=%d @ %s\n", + xse->tgid, atomic_read(&xse->kicks_pending_ctx_cnt), + __func__); + usleep_range(100, 200); + } + + if (atomic_read(&xse->kicks_pending_ctx_cnt)) { + XSCHED_ERR("Deleting ctx %d that has pending kicks left @ %s\n", + xse->tgid, __func__); + return -EINVAL; + } + + mutex_lock(&xcu->xcu_lock); + if (curr_xse == xse) + xcu->xrq.curr_xse = NULL; + + dequeue_ctx(xse, xcu); + mutex_unlock(&xcu->xcu_lock); + XSCHED_DEBUG("Deleting ctx %d, pending kicks left=%d @ %s\n", xse->tgid, + atomic_read(&xse->kicks_pending_ctx_cnt), __func__); + + return 0; +} + /* Frees a given vstream and also frees and dequeues it's context * if a given vstream is the last and only vstream attached to it's * corresponding context object. @@ -48,6 +109,10 @@ void xsched_task_free(struct kref *kref) ctx = container_of(kref, struct xsched_context, kref); + /* Wait till xse dequeues */ + while (READ_ONCE(ctx->xse.on_rq)) + usleep_range(100, 200); + mutex_lock(&xsched_ctx_list_mutex); list_for_each_entry_safe(vs, tmp, &ctx->vstream_list, ctx_node) { list_del(&vs->ctx_node); @@ -55,6 +120,7 @@ void xsched_task_free(struct kref *kref) kfree(vs); } + delete_ctx(ctx); list_del(&ctx->ctx_node); mutex_unlock(&xsched_ctx_list_mutex); @@ -190,11 +256,67 @@ int xsched_ctx_init_xse(struct xsched_context *ctx, struct vstream_info *vs) return err; } -static int xsched_schedule(void *input_xcu) +static int __xsched_submit(struct xsched_cu *xcu, struct xsched_entity *xse) { return 0; } +static int xsched_schedule(void *input_xcu) +{ + struct xsched_cu *xcu = input_xcu; + int err = 0; + struct xsched_entity *curr_xse = NULL; + struct xsched_entity *next_xse = NULL; + + while (!kthread_should_stop()) { + mutex_unlock(&xcu->xcu_lock); + wait_event_interruptible(xcu->wq_xcu_idle, + atomic_read(&xcu->has_active) || xcu->xrq.nr_running); + + XSCHED_DEBUG("%s: rt_nr_running = %d, has_active = %d\n", + __func__, xcu->xrq.nr_running, atomic_read(&xcu->has_active)); + + mutex_lock(&xcu->xcu_lock); + if (!xsched_check_pending_kicks_xcu(xcu)) { + XSCHED_WARN("%s: No pending kicks on xcu %u\n", __func__, xcu->id); + continue; + } + + next_xse = __raw_pick_next_ctx(xcu); + if (!next_xse) { + XSCHED_WARN("%s: Couldn't find next xse on xcu %u\n", __func__, xcu->id); + continue; + } + + xcu->xrq.curr_xse = next_xse; + + if (__xsched_submit(xcu, next_xse) == 0) + continue; + + curr_xse = xcu->xrq.curr_xse; + if (curr_xse) { /* if not deleted yet */ + put_prev_ctx(curr_xse); + if (!atomic_read(&curr_xse->kicks_pending_ctx_cnt)) { + dequeue_ctx(curr_xse, xcu); + XSCHED_DEBUG( + "%s: Dequeue xse %d due to zero kicks on xcu %u\n", + __func__, curr_xse->tgid, xcu->id); + curr_xse = xcu->xrq.curr_xse = NULL; + } + } + } + + return err; +} + +/* Initialize xsched classes' runqueues. */ +static inline void xsched_rq_init(struct xsched_cu *xcu) +{ + xcu->xrq.nr_running = 0; + xcu->xrq.curr_xse = NULL; + xcu->xrq.state = XRQ_STATE_IDLE; +} + /* Initializes all xsched XCU objects. * Should only be called from xsched_xcu_register function. */ @@ -207,6 +329,12 @@ static void xsched_xcu_init(struct xsched_cu *xcu, struct xcu_group *group, xcu->state = XSCHED_XCU_NONE; xcu->group = group; + atomic_set(&xcu->has_active, 0); + + INIT_LIST_HEAD(&xcu->vsm_list); + + init_waitqueue_head(&xcu->wq_xcu_idle); + mutex_init(&xcu->xcu_lock); /* Mark current XCU in a mask inside XCU root group. */ @@ -263,6 +391,37 @@ int xsched_vsm_add_tail(struct vstream_info *vs, vstream_args_t *arg) return 0; } +/* Fetch the first vstream metadata from vstream metadata list + * and removes it from that list. Returned vstream metadata pointer + * to be freed after. + */ +struct vstream_metadata *xsched_vsm_fetch_first(struct vstream_info *vs) +{ + struct vstream_metadata *vsm; + + if (list_empty(&vs->metadata_list)) { + XSCHED_DEBUG("No metadata to fetch from vs %u @ %s\n", + vs->id, __func__); + return NULL; + } + + vsm = list_first_entry(&vs->metadata_list, struct vstream_metadata, node); + if (!vsm) { + XSCHED_ERR("Corrupted metadata list in vs %u @ %s\n", + vs->id, __func__); + return NULL; + } + + list_del(&vsm->node); + if (vs->kicks_count == 0) + XSCHED_WARN("kicks_count underflow in vs %u @ %s\n", + vs->id, __func__); + else + vs->kicks_count -= 1; + + return vsm; +} + /* * Initialize and register xcu in xcu_manager array. */ diff --git a/kernel/xsched/vstream.c b/kernel/xsched/vstream.c index 0c37abb4ff50..869b3c56aad0 100644 --- a/kernel/xsched/vstream.c +++ b/kernel/xsched/vstream.c @@ -442,10 +442,18 @@ int vstream_kick(struct vstream_args *arg) vstream->id, __func__); break; } + + enqueue_ctx(xse, xcu); + /* Increasing a total amount of kicks on an CU to which this + * context is attached to based on sched_class. + */ + xsched_inc_pending_kicks_xse(&vstream->ctx->xse); } while (err == -EBUSY); spin_unlock(&vstream->stream_lock); mutex_unlock(&xcu->xcu_lock); + if (!err) + wake_up_interruptible(&xcu->wq_xcu_idle); return err; } -- 2.34.1