From: Konstantin Meskhidze <konstantin.meskhidze@huawei.com> hulk inclusion category: feature bugzilla: https://gitee.com/openeuler/kernel/issues/IC5EHB ----------------------------------------- Add kernels send/receive mechanism, deliver the queued kernels to the XPU device and trigger new scheduling after the kernels have completed processing. Implements scheduling core abstraction to subsequent expand RT/CFS sched class. Signed-off-by: Konstantin Meskhidze <konstantin.meskhidze@huawei.com> Signed-off-by: Hui Tang <tanghui20@.huawei.com> Signed-off-by: Liu Kai <liukai284@huawei.com> Signed-off-by: Xia Fukun <xiafukun@huawei.com> Signed-off-by: Zicheng Qu <quzicheng@huawei.com> --- drivers/xcu/xcu_group.c | 16 ++ include/linux/vstream.h | 32 +++ include/linux/xcu_group.h | 7 + include/linux/xsched.h | 144 +++++++++++ include/uapi/linux/xcu_vstream.h | 3 + kernel/xsched/core.c | 393 +++++++++++++++++++++++++++++++ kernel/xsched/vstream.c | 61 ++++- 7 files changed, 655 insertions(+), 1 deletion(-) diff --git a/drivers/xcu/xcu_group.c b/drivers/xcu/xcu_group.c index 3215f37e4ece..54d389534508 100644 --- a/drivers/xcu/xcu_group.c +++ b/drivers/xcu/xcu_group.c @@ -221,6 +221,22 @@ int xcu_logic_free(struct xcu_op_handler_params *params) return params->group->opt->logic_free(params); } +/* This function runs a "sqe_op" callback for a given xcu_group + * and a given vstream that are passed within + * xcu_op_handler_params object. + * + * This handler provides an interface to set or get sqe info. + */ +int xcu_sqe_op(struct xcu_op_handler_params *params) +{ + if (!params->group->opt || !params->group->opt->sqe_op) { + XSCHED_ERR("No function [sqe_op] called.\n"); + return -EINVAL; + } + + return params->group->opt->sqe_op(params); +} + static struct xcu_group __xcu_group_root = { .id = 0, .type = XCU_TYPE_ROOT, diff --git a/include/linux/vstream.h b/include/linux/vstream.h index ffab65889036..fd393ec97a99 100644 --- a/include/linux/vstream.h +++ b/include/linux/vstream.h @@ -3,6 +3,38 @@ #define _LINUX_VSTREAM_H #include <uapi/linux/xcu_vstream.h> +#include <linux/ktime.h> + +#define MAX_VSTREAM_SIZE 2048 + +/* Vstream metadata describes each incoming kick + * that gets stored into a list of pending kicks + * inside a vstream to keep track of what is left + * to be processed by a driver. + */ +typedef struct vstream_metadata { + uint32_t exec_time; + /* A value of SQ tail that has been passed with the + * kick that is described by this exact metadata object. + */ + uint32_t sq_tail; + uint32_t sqe_num; + uint32_t sq_id; + uint8_t sqe[XCU_SQE_SIZE_MAX]; + + /* Report buffer for fake read. */ + int8_t cqe[XCU_CQE_BUF_SIZE]; + uint32_t cqe_num; + int32_t timeout; + + /* A node for metadata list */ + struct list_head node; + + struct vstream_info *parent; + + /* Time of list insertion */ + ktime_t add_time; +} vstream_metadata_t; typedef int vstream_manage_t(struct vstream_args *arg); diff --git a/include/linux/xcu_group.h b/include/linux/xcu_group.h index b24641b98e6a..c129dca32c51 100644 --- a/include/linux/xcu_group.h +++ b/include/linux/xcu_group.h @@ -17,6 +17,11 @@ enum xcu_type { XCU_TYPE_XPU, }; +enum xcu_sqe_op_type { + SQE_SET_NOTIFY, + SQE_IS_NOTIFY, +}; + struct xcu_op_handler_params { int fd; struct xcu_group *group; @@ -45,6 +50,7 @@ struct xcu_operation { xcu_op_handler_fn_t alloc; xcu_op_handler_fn_t logic_alloc; xcu_op_handler_fn_t logic_free; + xcu_op_handler_fn_t sqe_op; }; struct xcu_group { @@ -83,6 +89,7 @@ extern int xcu_finish(struct xcu_op_handler_params *params); extern int xcu_alloc(struct xcu_op_handler_params *params); extern int xcu_logic_alloc(struct xcu_op_handler_params *params); extern int xcu_logic_free(struct xcu_op_handler_params *params); +extern int xcu_sqe_op(struct xcu_op_handler_params *params); int xsched_xcu_register(struct xcu_group *group, uint32_t phys_id); int xsched_xcu_unregister(struct xcu_group *group, uint32_t phys_id); diff --git a/include/linux/xsched.h b/include/linux/xsched.h index d0753639a9f2..d52461e63d8a 100644 --- a/include/linux/xsched.h +++ b/include/linux/xsched.h @@ -38,6 +38,30 @@ #define MAX_VSTREAM_NUM 512 +enum xcu_sched_type { + XSCHED_TYPE_NUM +}; + +#define xsched_first_class \ + list_first_entry(&(xsched_class_list), struct xsched_class, node) + +#define for_each_xsched_class(class) \ + list_for_each_entry((class), &(xsched_class_list), node) + +#define for_each_vstream_in_ctx(vs, ctx) \ + list_for_each_entry((vs), &((ctx)->vstream_list), ctx_node) + +/* Base XSched runqueue object structure that contains both mutual and + * individual parameters for different scheduling classes. + */ +struct xsched_rq { + struct xsched_entity *curr_xse; + const struct xsched_class *class; + + int state; + int nr_running; +}; + enum xsched_cu_status { /* Worker not initialized. */ XSCHED_XCU_NONE, @@ -58,11 +82,21 @@ enum xsched_cu_status { struct xsched_cu { uint32_t id; uint32_t state; + + atomic_t pending_kicks; struct task_struct *worker; + + /* Storage list for contexts associated with this xcu */ + uint32_t nr_ctx; struct list_head ctx_list; struct mutex ctx_list_lock; + vstream_info_t *vs_array[MAX_VSTREAM_NUM]; struct mutex vs_array_lock; + + struct xsched_rq xrq; + struct list_head vsm_list; + struct xcu_group *group; struct mutex xcu_lock; wait_queue_head_t wq_xcu_idle; @@ -76,6 +110,15 @@ struct xsched_entity { pid_t owner_pid; pid_t tgid; + /* Amount of pending kicks currently sitting on this context. */ + atomic_t kicks_pending_ctx_cnt; + + /* Amount of submitted kicks context, used for resched decision. */ + atomic_t submitted_one_kick; + + size_t total_scheduled; + size_t total_submitted; + /* File descriptor coming from an associated context * used for identifying a given xsched entity in * info and error prints. @@ -88,6 +131,9 @@ struct xsched_entity { /* Pointer to context object. */ struct xsched_context *ctx; + /* Xsched entity execution statistics */ + u64 last_exec_runtime; + /* Pointer to an XCU object that represents an XCU * on which this xse is to be processed or is being * processed currently. @@ -98,6 +144,55 @@ struct xsched_entity { spinlock_t xse_lock; }; +/* Increments pending kicks counter for an XCU that the given + * xsched entity is attached to and for xsched entity's xsched + * class. + */ +static inline int xsched_inc_pending_kicks_xse(struct xsched_entity *xse) +{ + atomic_inc(&xse->xcu->pending_kicks); + /* Icrement pending kicks for current XSE. */ + atomic_inc(&xse->kicks_pending_ctx_cnt); + + return 0; +} + +/* Decrements pending kicks counter for an XCU that the given + * xsched entity is attached to and for XSched entity's sched + * class. + */ +static inline int xsched_dec_pending_kicks_xse(struct xsched_entity *xse) +{ + atomic_dec(&xse->xcu->pending_kicks); + /* Decrementing pending kicks for current XSE. */ + atomic_dec(&xse->kicks_pending_ctx_cnt); + + return 0; +} + +/* Checks if there are pending kicks left on a given XCU for all + * xsched classes. + */ +static inline bool xsched_check_pending_kicks_xcu(struct xsched_cu *xcu) +{ + return atomic_read(&xcu->pending_kicks); +} + +static inline int xse_integrity_check(const struct xsched_entity *xse) +{ + if (!xse) { + XSCHED_ERR("xse is null @ %s\n", __func__); + return -EINVAL; + } + + if (!xse->class) { + XSCHED_ERR("xse->class is null @ %s\n", __func__); + return -EINVAL; + } + + return 0; +} + struct xsched_context { uint32_t fd; uint32_t dev_id; @@ -134,8 +229,57 @@ ctx_find_by_tgid_and_xcu(pid_t tgid, struct xsched_cu *xcu) return ret; } +struct xsched_class { + enum xcu_sched_type class_id; + size_t kick_slice; + struct list_head node; + + /* Initialize a new xsched entity */ + void (*xse_init)(struct xsched_entity *xse); + + /* Destroy XSE scheduler-specific data */ + void (*xse_deinit)(struct xsched_entity *xse); + + /* Initialize a new runqueue per xcu */ + void (*rq_init)(struct xsched_cu *xcu); + + /* Removes a given XSE from it's runqueue. */ + void (*dequeue_ctx)(struct xsched_entity *xse); + + /* Places a given XSE on a runqueue on a given XCU. */ + void (*enqueue_ctx)(struct xsched_entity *xse, struct xsched_cu *xcu); + + /* Returns a next XSE to be submitted on a given XCU. */ + struct xsched_entity *(*pick_next_ctx)(struct xsched_cu *xcu); + + /* Put a XSE back into rq during preemption. */ + void (*put_prev_ctx)(struct xsched_entity *xse); + + /* Check context preemption. */ + bool (*check_preempt)(struct xsched_entity *xse); + + /* Select jobs from XSE to submit on XCU */ + size_t (*select_work)(struct xsched_cu *xcu, struct xsched_entity *xse); +}; + +static inline void xsched_init_vsm(struct vstream_metadata *vsm, + struct vstream_info *vs, vstream_args_t *arg) +{ + vsm->sq_id = arg->sq_id; + vsm->sqe_num = arg->vk_args.sqe_num; + vsm->timeout = arg->vk_args.timeout; + memcpy(vsm->sqe, arg->vk_args.sqe, XCU_SQE_SIZE_MAX); + vsm->parent = vs; + INIT_LIST_HEAD(&vsm->node); +} + int xsched_xcu_init(struct xsched_cu *xcu, struct xcu_group *group, int xcu_id); int xsched_schedule(void *input_xcu); int xsched_init_entity(struct xsched_context *ctx, struct vstream_info *vs); int ctx_bind_to_xcu(vstream_info_t *vstream_info, struct xsched_context *ctx); +int xsched_vsm_add_tail(struct vstream_info *vs, vstream_args_t *arg); +struct vstream_metadata *xsched_vsm_fetch_first(struct vstream_info *vs); +void enqueue_ctx(struct xsched_entity *xse, struct xsched_cu *xcu); +void dequeue_ctx(struct xsched_entity *xse, struct xsched_cu *xcu); +int delete_ctx(struct xsched_context *ctx); #endif /* !__LINUX_XSCHED_H__ */ diff --git a/include/uapi/linux/xcu_vstream.h b/include/uapi/linux/xcu_vstream.h index 4d65789c37c7..38cc97d3a139 100644 --- a/include/uapi/linux/xcu_vstream.h +++ b/include/uapi/linux/xcu_vstream.h @@ -6,6 +6,9 @@ #define PAYLOAD_SIZE_MAX 512 #define XCU_SQE_SIZE_MAX 64 +#define XCU_CQE_SIZE_MAX 32 +#define XCU_CQE_REPORT_NUM 4 +#define XCU_CQE_BUF_SIZE (XCU_CQE_REPORT_NUM * XCU_CQE_SIZE_MAX) #define KABI_RESERVE_BYTES(idx, n) \ __u8 __kabi_reserved_##idx[n] diff --git a/kernel/xsched/core.c b/kernel/xsched/core.c index 867c07f9e9d1..701a81297fc4 100644 --- a/kernel/xsched/core.c +++ b/kernel/xsched/core.c @@ -23,24 +23,403 @@ #include <linux/types.h> #include <linux/xsched.h> +/* List of scheduling classes available */ +struct list_head xsched_class_list; + +static void put_prev_ctx(struct xsched_entity *xse) +{ + struct xsched_cu *xcu = xse->xcu; + + lockdep_assert_held(&xcu->xcu_lock); + xse->class->put_prev_ctx(xse); + xse->last_exec_runtime = 0; + atomic_set(&xse->submitted_one_kick, 0); + XSCHED_DEBUG("Put current xse %d @ %s\n", xse->tgid, __func__); +} + +static size_t select_work_def(struct xsched_cu *xcu, struct xsched_entity *xse) +{ + int kick_count, scheduled = 0, not_empty; + struct vstream_info *vs; + struct xcu_op_handler_params params; + struct vstream_metadata *vsm; + size_t kick_slice = xse->class->kick_slice; + + kick_count = atomic_read(&xse->kicks_pending_ctx_cnt); + XSCHED_DEBUG("Before decrement XSE kick_count=%d @ %s\n", + kick_count, __func__); + + if (kick_count == 0) { + XSCHED_WARN("Try to select xse that has 0 kicks @ %s\n", + __func__); + return 0; + } + + do { + not_empty = 0; + for_each_vstream_in_ctx(vs, xse->ctx) { + spin_lock(&vs->stream_lock); + vsm = xsched_vsm_fetch_first(vs); + spin_unlock(&vs->stream_lock); + if (!vsm) + continue; + list_add_tail(&vsm->node, &xcu->vsm_list); + scheduled++; + xsched_dec_pending_kicks_xse(xse); + not_empty++; + } + } while ((scheduled < kick_slice) && (not_empty)); + + /* + * Iterate over all vstreams in context: + * Set wr_cqe bit in last computing task in vsm_list + */ + for_each_vstream_in_ctx(vs, xse->ctx) { + list_for_each_entry_reverse(vsm, &xcu->vsm_list, node) { + if (vsm->parent == vs) { + params.group = vsm->parent->xcu->group; + params.param_1 = &(int){SQE_SET_NOTIFY}; + params.param_2 = &vsm->sqe; + xcu_sqe_op(¶ms); + break; + } + } + } + + kick_count = atomic_read(&xse->kicks_pending_ctx_cnt); + XSCHED_DEBUG("After decrement XSE kick_count=%d @ %s\n", + kick_count, __func__); + + xse->total_scheduled += scheduled; + return scheduled; +} + +static struct xsched_entity *__raw_pick_next_ctx(struct xsched_cu *xcu) +{ + const struct xsched_class *class; + struct xsched_entity *next = NULL; + size_t scheduled; + + lockdep_assert_held(&xcu->xcu_lock); + for_each_xsched_class(class) { + next = class->pick_next_ctx(xcu); + if (next) { + scheduled = class->select_work ? + class->select_work(xcu, next) : select_work_def(xcu, next); + + XSCHED_DEBUG("xse %d scheduled=%zu total=%zu @ %s\n", + next->tgid, scheduled, next->total_scheduled, __func__); + break; + } + } + + return next; +} + +void enqueue_ctx(struct xsched_entity *xse, struct xsched_cu *xcu) +{ + lockdep_assert_held(&xcu->xcu_lock); + + if (xse_integrity_check(xse)) { + XSCHED_ERR("Fail to check xse integrity @ %s\n", __func__); + return; + } + + if (!xse->on_rq) { + xse->on_rq = true; + xse->class->enqueue_ctx(xse, xcu); + XSCHED_DEBUG("Enqueue xse %d @ %s\n", xse->tgid, __func__); + } +} + +void dequeue_ctx(struct xsched_entity *xse, struct xsched_cu *xcu) +{ + lockdep_assert_held(&xcu->xcu_lock); + + if (xse_integrity_check(xse)) { + XSCHED_ERR("Fail to check xse integrity @ %s\n", __func__); + return; + } + + if (xse->on_rq) { + xse->class->dequeue_ctx(xse); + xse->on_rq = false; + XSCHED_DEBUG("Dequeue xse %d @ %s\n", xse->tgid, __func__); + } +} + +int delete_ctx(struct xsched_context *ctx) +{ + struct xsched_cu *xcu = ctx->xse.xcu; + struct xsched_entity *curr_xse = xcu->xrq.curr_xse; + struct xsched_entity *xse = &ctx->xse; + + if (xse_integrity_check(xse)) { + XSCHED_ERR("Fail to check xse integrity @ %s\n", __func__); + return -EINVAL; + } + + if (!xse->xcu) { + XSCHED_ERR("Try to delete ctx that is not attached to xcu @ %s\n", + __func__); + return -EINVAL; + } + + /* Wait till context has been submitted. */ + while (atomic_read(&xse->kicks_pending_ctx_cnt)) { + XSCHED_DEBUG("Deleting ctx %d, xse->kicks_pending_ctx_cnt=%d @ %s\n", + xse->tgid, atomic_read(&xse->kicks_pending_ctx_cnt), + __func__); + usleep_range(100, 200); + } + + mutex_lock(&xcu->xcu_lock); + if (curr_xse == xse) + xcu->xrq.curr_xse = NULL; + dequeue_ctx(xse, xcu); + --xcu->nr_ctx; + mutex_unlock(&xcu->xcu_lock); + XSCHED_DEBUG("Deleting ctx %d, pending kicks left=%d @ %s\n", xse->tgid, + atomic_read(&xse->kicks_pending_ctx_cnt), __func__); + + xse->class->xse_deinit(xse); + return 0; +} + +int xsched_xse_set_class(struct xsched_entity *xse) +{ + struct xsched_class *sched = xsched_first_class; + + xse->class = sched; + return 0; +} + +static void submit_kick(struct vstream_metadata *vsm) +{ + struct vstream_info *vs = vsm->parent; + struct xcu_op_handler_params params; + + params.group = vs->xcu->group; + params.fd = vs->fd; + params.param_1 = &vs->id; + params.param_2 = &vs->channel_id; + params.param_3 = vsm->sqe; + params.param_4 = &vsm->sqe_num; + params.param_5 = &vsm->timeout; + params.param_6 = &vs->sqcq_type; + params.param_7 = vs->drv_ctx; + params.param_8 = &vs->logic_vcq_id; + + /* Send vstream on a device for processing. */ + if (xcu_run(¶ms) != 0) + XSCHED_ERR( + "Fail to send Vstream id %u tasks to a device for processing.\n", + vs->id); + + XSCHED_DEBUG("Vstream id %u submit vsm: sq_tail %u\n", vs->id, vsm->sq_tail); +} + +static void submit_wait(struct vstream_metadata *vsm) +{ + struct vstream_info *vs = vsm->parent; + struct xcu_op_handler_params params; + /* Wait timeout in ms. */ + int32_t timeout = 500; + + params.group = vs->xcu->group; + params.param_1 = &vs->channel_id; + params.param_2 = &vs->logic_vcq_id; + params.param_3 = &vs->user_stream_id; + params.param_4 = &vsm->sqe; + params.param_5 = vsm->cqe; + params.param_6 = vs->drv_ctx; + params.param_7 = &timeout; + + /* Wait for a device to complete processing. */ + if (xcu_wait(¶ms)) { + XSCHED_ERR("Fail to wait Vstream id %u tasks, logic_cq_id %u.\n", + vs->id, vs->logic_vcq_id); + } + + XSCHED_DEBUG("Vstream id %u wait finish, logic_cq_id %u\n", + vs->id, vs->logic_vcq_id); +} + +static int __xsched_submit(struct xsched_cu *xcu, struct xsched_entity *xse) +{ + struct vstream_metadata *vsm, *tmp; + int submitted = 0; + long submit_exec_time = 0; + ktime_t t_start = 0; + struct xcu_op_handler_params params; + + XSCHED_DEBUG("%s called for xse %d on xcu %u\n", + __func__, xse->tgid, xcu->id); + list_for_each_entry_safe(vsm, tmp, &xcu->vsm_list, node) { + submit_kick(vsm); + XSCHED_DEBUG("Xse %d vsm %u sched_delay: %lld ns\n", + xse->tgid, vsm->sq_id, ktime_to_ns(ktime_sub(ktime_get(), vsm->add_time))); + + params.group = vsm->parent->xcu->group; + params.param_1 = &(int){SQE_IS_NOTIFY}; + params.param_2 = &vsm->sqe; + if (xcu_sqe_op(¶ms)) { + mutex_unlock(&xcu->xcu_lock); + t_start = ktime_get(); + submit_wait(vsm); + submit_exec_time += ktime_to_ns(ktime_sub(ktime_get(), t_start)); + mutex_lock(&xcu->xcu_lock); + } + submitted++; + list_del(&vsm->node); + kfree(vsm); + } + + xse->last_exec_runtime += submit_exec_time; + xse->total_submitted += submitted; + atomic_add(submitted, &xse->submitted_one_kick); + INIT_LIST_HEAD(&xcu->vsm_list); + XSCHED_DEBUG("Xse %d submitted=%d total=%zu, exec_time=%ld @ %s\n", + xse->tgid, submitted, xse->total_submitted, + submit_exec_time, __func__); + + return submitted; +} + +static inline bool should_preempt(struct xsched_entity *xse) +{ + return xse->class->check_preempt(xse); +} + +int xsched_vsm_add_tail(struct vstream_info *vs, vstream_args_t *arg) +{ + struct vstream_metadata *new_vsm; + + new_vsm = kmalloc(sizeof(struct vstream_metadata), GFP_KERNEL); + if (!new_vsm) { + XSCHED_ERR("Fail to alloc kick metadata for vs %u @ %s\n", + vs->id, __func__); + return -ENOMEM; + } + + if (vs->kicks_count > MAX_VSTREAM_SIZE) { + kfree(new_vsm); + return -EBUSY; + } + + xsched_init_vsm(new_vsm, vs, arg); + list_add_tail(&new_vsm->node, &vs->metadata_list); + new_vsm->add_time = ktime_get(); + vs->kicks_count += 1; + + return 0; +} + +/* Fetch the first vstream metadata from vstream metadata list + * and removes it from that list. Returned vstream metadata pointer + * to be freed after. + */ +struct vstream_metadata *xsched_vsm_fetch_first(struct vstream_info *vs) +{ + struct vstream_metadata *vsm; + + if (list_empty(&vs->metadata_list)) { + XSCHED_DEBUG("No metadata to fetch from vs %u @ %s\n", + vs->id, __func__); + return NULL; + } + + vsm = list_first_entry(&vs->metadata_list, struct vstream_metadata, node); + if (!vsm) { + XSCHED_ERR("Corrupted metadata list in vs %u @ %s\n", + vs->id, __func__); + return NULL; + } + + list_del(&vsm->node); + if (vs->kicks_count == 0) + XSCHED_WARN("kicks_count underflow in vs %u @ %s\n", + vs->id, __func__); + else + vs->kicks_count -= 1; + + return vsm; +} + int xsched_schedule(void *input_xcu) { + struct xsched_cu *xcu = input_xcu; + struct xsched_entity *curr_xse = NULL; + struct xsched_entity *next_xse = NULL; + + while (!kthread_should_stop()) { + mutex_unlock(&xcu->xcu_lock); + wait_event_interruptible(xcu->wq_xcu_idle, 1); + + mutex_lock(&xcu->xcu_lock); + if (kthread_should_stop()) { + mutex_unlock(&xcu->xcu_lock); + break; + } + + if (!xsched_check_pending_kicks_xcu(xcu)) { + XSCHED_WARN("%s: No pending kicks on xcu %u\n", __func__, xcu->id); + continue; + } + + next_xse = __raw_pick_next_ctx(xcu); + if (!next_xse) { + XSCHED_WARN("%s: Couldn't find next xse on xcu %u\n", __func__, xcu->id); + continue; + } + + xcu->xrq.curr_xse = next_xse; + if (__xsched_submit(xcu, next_xse) == 0) + continue; + + curr_xse = xcu->xrq.curr_xse; + if (!curr_xse) + continue; + + /* if not deleted yet */ + put_prev_ctx(curr_xse); + if (!atomic_read(&curr_xse->kicks_pending_ctx_cnt)) + dequeue_ctx(curr_xse, xcu); + + xcu->xrq.curr_xse = NULL; + } + return 0; } + /* Initializes all xsched XCU objects. * Should only be called from xsched_xcu_register function. */ int xsched_xcu_init(struct xsched_cu *xcu, struct xcu_group *group, int xcu_id) { + struct xsched_class *sched; int err; xcu->id = xcu_id; xcu->state = XSCHED_XCU_NONE; xcu->group = group; + xcu->nr_ctx = 0; + xcu->xrq.curr_xse = NULL; + + atomic_set(&xcu->pending_kicks, 0); + INIT_LIST_HEAD(&xcu->vsm_list); + INIT_LIST_HEAD(&xcu->ctx_list); + init_waitqueue_head(&xcu->wq_xcu_idle); + mutex_init(&xcu->ctx_list_lock); + mutex_init(&xcu->vs_array_lock); mutex_init(&xcu->xcu_lock); + /* Initialize current XCU's runqueue. */ + for_each_xsched_class(sched) + sched->rq_init(xcu); + /* This worker should set XCU to XSCHED_XCU_WAIT_IDLE. * If after initialization XCU still has XSCHED_XCU_NONE * status then we can assume that there was a problem @@ -62,6 +441,13 @@ int xsched_init_entity(struct xsched_context *ctx, struct vstream_info *vs) int err = 0; struct xsched_entity *xse = &ctx->xse; + atomic_set(&xse->kicks_pending_ctx_cnt, 0); + atomic_set(&xse->submitted_one_kick, 0); + + xse->total_scheduled = 0; + xse->total_submitted = 0; + xse->last_exec_runtime = 0; + xse->fd = ctx->fd; xse->tgid = ctx->tgid; @@ -82,6 +468,13 @@ int xsched_init_entity(struct xsched_context *ctx, struct vstream_info *vs) xse->xcu = vs->xcu; + err = xsched_xse_set_class(xse); + if (err) { + XSCHED_ERR("Fail to set xse class @ %s\n", __func__); + return err; + } + xse->class->xse_init(xse); + WRITE_ONCE(xse->on_rq, false); spin_lock_init(&xse->xse_lock); diff --git a/kernel/xsched/vstream.c b/kernel/xsched/vstream.c index 18373f27d129..b3da8a3444c9 100644 --- a/kernel/xsched/vstream.c +++ b/kernel/xsched/vstream.c @@ -84,6 +84,7 @@ static void xsched_task_free(struct kref *kref) kfree(vs); } + delete_ctx(ctx); list_del(&ctx->ctx_node); mutex_unlock(&xcu->ctx_list_lock); @@ -236,6 +237,7 @@ static int alloc_ctx_from_vstream(struct vstream_info *vstream_info, } list_add(&(*ctx)->ctx_node, &xcu->ctx_list); + ++xcu->nr_ctx; return 0; } @@ -569,7 +571,64 @@ int vstream_free(struct vstream_args *arg) int vstream_kick(struct vstream_args *arg) { - return 0; + vstream_info_t *vstream; + struct xsched_cu *xcu = NULL; + struct xsched_entity *xse; + int err = 0; + uint32_t vstream_id = arg->sq_id; + uint32_t type = XCU_TYPE_XPU; + + xcu = xcu_find(type, arg->dev_id, arg->channel_id); + if (!xcu) + return -EINVAL; + + /* Get vstream. */ + vstream = vstream_get(xcu, vstream_id); + if (!vstream || !vstream->ctx) { + XSCHED_ERR("Vstream NULL or doesn't have a context. vstream_id=%u, dev_id=%u\n", + vstream_id, arg->dev_id); + return -EINVAL; + } + + xse = &vstream->ctx->xse; + XSCHED_DEBUG("New kick on xse %d @ %s\n", xse->tgid, __func__); + + do { + mutex_lock(&xcu->xcu_lock); + spin_lock(&vstream->stream_lock); + + /* Adding kick metadata. */ + err = xsched_vsm_add_tail(vstream, arg); + if (err == -EBUSY) { + spin_unlock(&vstream->stream_lock); + mutex_unlock(&xcu->xcu_lock); + + /* Retry after a while */ + usleep_range(100, 200); + continue; + } + + /* Don't forget to unlock */ + if (err) { + XSCHED_ERR("Fail to add kick metadata to vs %u @ %s\n", + vstream->id, __func__); + break; + } + + enqueue_ctx(xse, xcu); + + /* Increasing a total amount of kicks on an CU to which this + * context is attached to based on sched_class. + */ + xsched_inc_pending_kicks_xse(&vstream->ctx->xse); + } while (err == -EBUSY); + + spin_unlock(&vstream->stream_lock); + mutex_unlock(&xcu->xcu_lock); + if (!err) + wake_up_interruptible(&xcu->wq_xcu_idle); + + return err; } /* -- 2.34.1