
From: Konstantin Meskhidze <konstantin.meskhidze@huawei.com> hulk inclusion category: feature bugzilla: https://gitee.com/openeuler/kernel/issues/IC5EHB ----------------------------------------- Add vstream related data structures: - vstream_info. Add vstream related context and entity data structures: - xsched_entity - xsched_context Add xsched_init() implementation. Add vstream_alloc/free implementation. Signed-off-by: Konstantin Meskhidze <konstantin.meskhidze@huawei.com> Signed-off-by: Artem Kuzin <artem.kuzin@huawei.com> Signed-off-by: Hui Tang <tanghui20@.huawei.com> Signed-off-by: Liu Kai <liukai284@huawei.com> Signed-off-by: Xia Fukun <xiafukun@huawei.com> --- drivers/xcu/xcu_group.c | 48 ++++- include/linux/vstream.h | 46 +++++ include/linux/xcu_group.h | 16 ++ include/linux/xsched.h | 74 ++++++++ kernel/xsched/core.c | 93 ++++++++++ kernel/xsched/vstream.c | 374 +++++++++++++++++++++++++++++++++++++- 6 files changed, 647 insertions(+), 4 deletions(-) diff --git a/drivers/xcu/xcu_group.c b/drivers/xcu/xcu_group.c index fef05b9787f3..86b935af00b6 100644 --- a/drivers/xcu/xcu_group.c +++ b/drivers/xcu/xcu_group.c @@ -153,7 +153,12 @@ int xcu_complete(struct xcu_op_handler_params *params) */ int xcu_finish(struct xcu_op_handler_params *params) { - return 0; + if (!params->group->opt || !params->group->opt->finish) { + XSCHED_ERR("No function [finish] called.\n"); + return -EINVAL; + } + + return params->group->opt->finish(params); } /* This function runs a "alloc" callback for a given xcu_group @@ -165,7 +170,46 @@ int xcu_finish(struct xcu_op_handler_params *params) */ int xcu_alloc(struct xcu_op_handler_params *params) { - return 0; + if (!params->group->opt || !params->group->opt->alloc) { + XSCHED_ERR("No function [alloc] called.\n"); + return -EINVAL; + } + + return params->group->opt->alloc(params); +} + +/* This function runs a "logic_alloc" callback for a given xcu_group + * and a given vstream that are passed within + * xcu_op_handler_params object. + * + * This handler provides an interface to implement allocation + * and registering memory of logic CQ buffer. + */ +int xcu_logic_alloc(struct xcu_op_handler_params *params) +{ + if (!params->group->opt || !params->group->opt->logic_alloc) { + XSCHED_ERR("No function [logic_alloc] called.\n"); + return -EINVAL; + } + + return params->group->opt->logic_alloc(params); +} + +/* This function runs a "logic_free" callback for a given xcu_group + * and a given vstream that are passed within + * xcu_op_handler_params object. + * + * This handler provides an interface to implement deallocation + * and unregistering memory of a logic CQ buffer. + */ +int xcu_logic_free(struct xcu_op_handler_params *params) +{ + if (!params->group->opt || !params->group->opt->logic_free) { + XSCHED_ERR("No function [logic_free] called.\n"); + return -EINVAL; + } + + return params->group->opt->logic_free(params); } static struct xcu_group __xcu_group_root = { diff --git a/include/linux/vstream.h b/include/linux/vstream.h index 627f754f83c5..ca956ac2cf4f 100644 --- a/include/linux/vstream.h +++ b/include/linux/vstream.h @@ -4,6 +4,52 @@ #include <uapi/linux/xcu_vstream.h> +typedef struct vstream_info { + uint32_t user_stream_id; + uint32_t id; + uint32_t vcq_id; + uint32_t logic_vcq_id; + uint32_t dev_id; + uint32_t channel_id; + uint32_t fd; + uint32_t task_type; + int tgid; + int sqcq_type; + + void *drv_ctx; + + int inode_fd; + + /* Pointer to corresponding context. */ + struct xsched_context *ctx; + + /* List node in context's vstream list. */ + struct list_head ctx_node; + + /* Pointer to an CU object on which this + * vstream is currently being processed. + * NULL if vstream is not being processed. + */ + struct xsched_cu *xcu; + + /* List node in an CU list of vstreams that + * are currently being processed by this specific CU. + */ + struct list_head xcu_node; + + /* Private vstream data. */ + void *data; + + spinlock_t stream_lock; + + uint32_t kicks_count; + + /* List of metadata a.k.a. all recorded unprocesed + * kicks for this exact vstream. + */ + struct list_head metadata_list; +} vstream_info_t; + typedef int vstream_manage_t(struct vstream_args *arg); #endif /* _LINUX_VSTREAM_H */ diff --git a/include/linux/xcu_group.h b/include/linux/xcu_group.h index ef11886c18a1..c0168969c67a 100644 --- a/include/linux/xcu_group.h +++ b/include/linux/xcu_group.h @@ -18,6 +18,20 @@ enum xcu_type { }; struct xcu_op_handler_params { + int fd; + struct xcu_group *group; + void *payload; + union { + struct { + void *param_1; + void *param_2; + void *param_3; + void *param_4; + void *param_5; + void *param_6; + void *param_7; + }; + }; }; typedef int (*xcu_op_handler_fn_t)(struct xcu_op_handler_params *params); @@ -28,6 +42,8 @@ struct xcu_operation { xcu_op_handler_fn_t wait; xcu_op_handler_fn_t complete; xcu_op_handler_fn_t alloc; + xcu_op_handler_fn_t logic_alloc; + xcu_op_handler_fn_t logic_free; }; struct xcu_group { diff --git a/include/linux/xsched.h b/include/linux/xsched.h index 011f0e9bc227..702483b85621 100644 --- a/include/linux/xsched.h +++ b/include/linux/xsched.h @@ -3,6 +3,8 @@ #define __LINUX_XSCHED_H__ #include <linux/xcu_group.h> +#include <linux/kref.h> +#include <linux/vstream.h> #ifndef pr_fmt #define pr_fmt(fmt) fmt #endif @@ -67,5 +69,77 @@ struct xsched_cu { wait_queue_head_t wq_xcu_running; }; +struct xsched_entity { + uint32_t task_type; + + bool on_rq; + + pid_t owner_pid; + pid_t tgid; + + /* File descriptor coming from an associated context + * used for identifying a given xsched entity in + * info and error prints. + */ + uint32_t fd; + + /* Xsched class for this xse. */ + const struct xsched_class *class; + + /* Pointer to context object. */ + struct xsched_context *ctx; + + /* Pointer to an XCU object that represents an XCU + * on which this xse is to be processed or is being + * processed currently. + */ + struct xsched_cu *xcu; + + /* General purpose xse lock. */ + spinlock_t xse_lock; +}; + +struct xsched_context { + uint32_t fd; + uint32_t dev_id; + pid_t tgid; + + struct list_head vstream_list; + struct list_head ctx_node; + + struct xsched_entity xse; + + spinlock_t ctx_lock; + struct mutex ctx_mutex; + struct kref kref; +}; + +extern struct list_head xsched_ctx_list; +extern struct mutex xsched_ctx_list_mutex; + +/* Returns a pointer to xsched_context object corresponding to a given + * device file descriptor provided by fd argument. + */ +static inline struct xsched_context *ctx_find_by_tgid(pid_t tgid) +{ + struct xsched_context *ctx; + struct xsched_context *ret = NULL; + + list_for_each_entry(ctx, &xsched_ctx_list, ctx_node) { + if (ctx->tgid == tgid) { + ret = ctx; + break; + } + } + + return ret; +} + int xsched_xcu_register(struct xcu_group *group); +void xsched_task_free(struct kref *kref); +int xsched_ctx_init_xse(struct xsched_context *ctx, struct vstream_info *vs); +int ctx_bind_to_xcu(vstream_info_t *vstream_info, struct xsched_context *ctx); +int vstream_bind_to_xcu(vstream_info_t *vstream_info); +struct xsched_cu *xcu_find(uint32_t *type, + uint32_t dev_id, uint32_t channel_id); #endif /* !__LINUX_XSCHED_H__ */ diff --git a/kernel/xsched/core.c b/kernel/xsched/core.c index bbd125044c88..018c73101a70 100644 --- a/kernel/xsched/core.c +++ b/kernel/xsched/core.c @@ -32,6 +32,89 @@ spinlock_t xcu_mgr_lock; DECLARE_BITMAP(xcu_online_mask, XSCHED_NR_CUS); struct xsched_cu *xsched_cu_mgr[XSCHED_NR_CUS]; +/* Storage list for contexts. */ +struct list_head xsched_ctx_list; +DEFINE_MUTEX(xsched_ctx_list_mutex); + +/* Frees a given vstream and also frees and dequeues it's context + * if a given vstream is the last and only vstream attached to it's + * corresponding context object. + */ +void xsched_task_free(struct kref *kref) +{ + struct xsched_context *ctx; + vstream_info_t *vs, *tmp; + + ctx = container_of(kref, struct xsched_context, kref); + + mutex_lock(&xsched_ctx_list_mutex); + list_for_each_entry_safe(vs, tmp, &ctx->vstream_list, ctx_node) { + list_del(&vs->ctx_node); + kfree(vs->data); + kfree(vs); + } + + list_del(&ctx->ctx_node); + mutex_unlock(&xsched_ctx_list_mutex); + + kfree(ctx); +} + +int vstream_bind_to_xcu(vstream_info_t *vstream_info) +{ + struct xsched_cu *xcu_found = NULL; + uint32_t type = XCU_TYPE_XPU; + + xcu_found = xcu_find(&type, vstream_info->dev_id, vstream_info->channel_id); + if (!xcu_found) + return -EINVAL; + + /* Bind vstream to a xcu. */ + vstream_info->xcu = xcu_found; + XSCHED_DEBUG("XCU bound to a vstream: type=%u, dev_id=%u, chan_id=%u.\n", + type, vstream_info->dev_id, vstream_info->channel_id); + + return 0; +} + +struct xsched_cu *xcu_find(uint32_t *type, + uint32_t dev_id, uint32_t channel_id) +{ + struct xcu_group *group = NULL; + uint32_t local_type = *type; + + /* Find xcu by type. */ + group = xcu_group_find(xcu_group_root, local_type); + if (group == NULL) { + XSCHED_ERR("Fail to find type group.\n"); + return NULL; + } + + /* Find device id group. */ + group = xcu_group_find(group, dev_id); + if (group == NULL) { + XSCHED_ERR("Fail to find device group.\n"); + return NULL; + } + /* Find channel id group. */ + group = xcu_group_find(group, channel_id); + if (group == NULL) { + XSCHED_ERR("Fail to find channel group.\n"); + return NULL; + } + + *type = local_type; + XSCHED_DEBUG("XCU found: type=%u, dev_id=%u, chan_id=%u.\n", + local_type, dev_id, channel_id); + + return group->xcu; +} + +int xsched_ctx_init_xse(struct xsched_context *ctx, struct vstream_info *vs) +{ + return 0; +} + static int xsched_schedule(void *input_xcu) { return 0; @@ -108,3 +191,13 @@ int xsched_xcu_register(struct xcu_group *group) return 0; } EXPORT_SYMBOL(xsched_xcu_register); + +int __init xsched_init(void) +{ + /* Initializing global Xsched context list. */ + INIT_LIST_HEAD(&xsched_ctx_list); + + return 0; +} + +late_initcall(xsched_init); diff --git a/kernel/xsched/vstream.c b/kernel/xsched/vstream.c index 5723c359e0f2..127ff96ce48c 100644 --- a/kernel/xsched/vstream.c +++ b/kernel/xsched/vstream.c @@ -17,20 +17,390 @@ * */ #include <linux/syscalls.h> +#include <linux/anon_inodes.h> #include <linux/vstream.h> +#include <linux/xsched.h> +#include <linux/delay.h> #ifdef CONFIG_XCU_VSTREAM +#define MAX_VSTREAM_NUM 512 -int vstream_alloc(struct vstream_args *arg) +static DEFINE_MUTEX(vs_mutex); +static vstream_info_t *vstream_array[MAX_VSTREAM_NUM]; + +static int vstream_del(uint32_t vstream_id); +static int vstream_file_release(struct inode *inode, struct file *file); +static const struct file_operations vstreamfd_fops = { + .release = vstream_file_release, +}; + +static inline struct file *vstream_file_get(int vs_fd) +{ + return fget(vs_fd); +} + +static inline void vstream_file_put(struct file *vstream_file) +{ + fput(vstream_file); +} + +static int vstream_file_create(struct vstream_info *vs) +{ + int err = anon_inode_getfd("[vstreamfd]", + &vstreamfd_fops, vs, O_RDWR | O_CLOEXEC | O_NONBLOCK); + if (err < 0) + XSCHED_ERR("Fail to alloc anon inode vs %u @ %s\n", + vs->id, __func__); + + return err; +} + +static int vstream_destroy(vstream_info_t *vstream) { + int err; + struct xsched_context *ctx = NULL; + struct xsched_entity *xse = NULL; + + err = vstream_del(vstream->id); + if (err) + return err; + + xse = &vstream->ctx->xse; + ctx = vstream->ctx; + kref_put(&ctx->kref, xsched_task_free); + return 0; } -int vstream_free(struct vstream_args *arg) +static int vstream_file_release(struct inode *inode, struct file *file) +{ + vstream_info_t *vstream; + (void) inode; + + if (!file->private_data) + return 0; + + vstream = file->private_data; + return vstream_destroy(vstream); +} + +static void init_xsched_ctx(struct xsched_context *ctx, + const struct vstream_info *vs) { + ctx->tgid = vs->tgid; + ctx->fd = vs->fd; + ctx->dev_id = vs->dev_id; + kref_init(&ctx->kref); + + INIT_LIST_HEAD(&ctx->vstream_list); + INIT_LIST_HEAD(&ctx->ctx_node); + + spin_lock_init(&ctx->ctx_lock); + mutex_init(&ctx->ctx_mutex); +} + +/* Allocates a new xsched_context if a new vstream_info is bound + * to a device that no other vstream that is currently present + * is bound to. + */ +static int alloc_ctx_from_vstream(struct vstream_info *vstream_info, + struct xsched_context **ctx) +{ + *ctx = ctx_find_by_tgid(vstream_info->tgid); + if (*ctx) + return 0; + + *ctx = kzalloc(sizeof(struct xsched_context), GFP_KERNEL); + if (!*ctx) { + XSCHED_ERR("Fail to alloc xsched context (tgid=%d) @ %s\n", + vstream_info->tgid, __func__); + return -ENOMEM; + } + + init_xsched_ctx(*ctx, vstream_info); + + if (xsched_ctx_init_xse(*ctx, vstream_info) != 0) { + XSCHED_ERR("Fail to initialize XSE for context @ %s\n", + __func__); + kfree(*ctx); + return -EINVAL; + } + + list_add(&(*ctx)->ctx_node, &xsched_ctx_list); + + return 0; +} + +/* Bounds a new vstream_info object to a corresponding xsched context. */ +static int vstream_bind_to_ctx(struct vstream_info *vs) +{ + struct xsched_context *ctx = NULL; + int alloc_err = 0; + + mutex_lock(&xsched_ctx_list_mutex); + ctx = ctx_find_by_tgid(vs->tgid); + if (ctx) { + XSCHED_DEBUG("Ctx %d found @ %s\n", vs->tgid, __func__); + kref_get(&ctx->kref); + } else { + alloc_err = alloc_ctx_from_vstream(vs, &ctx); + if (alloc_err) + goto out_err; + } + + vs->ctx = ctx; + vs->xcu = ctx->xse.xcu; + ctx->dev_id = vs->dev_id; + list_add(&vs->ctx_node, &vs->ctx->vstream_list); + +out_err: + mutex_unlock(&xsched_ctx_list_mutex); + return alloc_err; +} + +static vstream_info_t *vstream_create(struct vstream_args *arg) +{ + struct vstream_info *vstream = NULL; + + vstream = kzalloc(sizeof(vstream_info_t), GFP_KERNEL); + if (!vstream) { + XSCHED_ERR("Failed to allocate vstream.\n"); + return NULL; + } + + vstream->inode_fd = vstream_file_create(vstream); + vstream->dev_id = arg->dev_id; + vstream->channel_id = arg->channel_id; + vstream->kicks_count = 0; + vstream->xcu = NULL; + + INIT_LIST_HEAD(&vstream->ctx_node); + INIT_LIST_HEAD(&vstream->xcu_node); + INIT_LIST_HEAD(&vstream->metadata_list); + + spin_lock_init(&vstream->stream_lock); + + return vstream; +} + +static int vstream_add(vstream_info_t *vstream, uint32_t id) +{ + if (id >= MAX_VSTREAM_NUM) { + XSCHED_ERR("vstream id out of range.\n"); + return -EINVAL; + } + + mutex_lock(&vs_mutex); + if (vstream_array[id] != NULL) { + mutex_unlock(&vs_mutex); + XSCHED_ERR("Vstream id=%u cell is busy.\n", id); + return -EINVAL; + } + vstream_array[id] = vstream; + mutex_unlock(&vs_mutex); + return 0; } +static int vstream_del(uint32_t vstream_id) +{ + if (vstream_id >= MAX_VSTREAM_NUM) { + XSCHED_ERR("Vstream id=%u out of range.\n", vstream_id); + return -EINVAL; + } + + mutex_lock(&vs_mutex); + vstream_array[vstream_id] = NULL; + mutex_unlock(&vs_mutex); + return 0; +} + +static vstream_info_t *vstream_get(uint32_t vstream_id) +{ + vstream_info_t *vstream = NULL; + + if (vstream_id >= MAX_VSTREAM_NUM) { + XSCHED_ERR("Vstream id=%u out of range.\n", vstream_id); + return NULL; + } + + mutex_lock(&vs_mutex); + vstream = vstream_array[vstream_id]; + mutex_unlock(&vs_mutex); + + return vstream; +} + +static vstream_info_t * +vstream_get_by_user_stream_id(uint32_t user_stream_id) +{ + int id; + + for (id = 0; id < MAX_VSTREAM_NUM; id++) { + if (vstream_array[id] != NULL && + vstream_array[id]->user_stream_id == user_stream_id) + return vstream_array[id]; + } + return NULL; +} + +static int sqcq_alloc(struct vstream_args *arg) +{ + vstream_alloc_args_t *va_args = &arg->va_args; + struct xsched_context *ctx = NULL; + struct xcu_op_handler_params params; + uint32_t logic_cq_id = 0; + vstream_info_t *vstream; + int ret = 0; + uint32_t tgid = 0; + uint32_t cq_id = 0; + uint32_t sq_id = 0; + + vstream = vstream_create(arg); + if (!vstream) + return -ENOSPC; + + vstream->fd = arg->fd; + vstream->task_type = arg->task_type; + + ret = vstream_bind_to_xcu(vstream); + if (ret < 0) { + ret = -EINVAL; + goto out_err_vstream_free; + } + + /* Allocates vstream's SQ and CQ memory on a XCU for processing. */ + params.group = vstream->xcu->group; + params.fd = arg->fd; + params.payload = arg->payload; + params.param_1 = &tgid; + params.param_2 = &sq_id; + params.param_3 = &cq_id; + params.param_4 = &logic_cq_id; + ret = xcu_alloc(¶ms); + if (ret) { + XSCHED_ERR("Fail to allocate SQ/CQ memory to a vstream.\n"); + goto out_err_vstream_free; + } + + vstream->drv_ctx = params.param_5; + vstream->id = sq_id; + vstream->vcq_id = cq_id; + vstream->logic_vcq_id = logic_cq_id; + vstream->user_stream_id = va_args->user_stream_id; + vstream->tgid = tgid; + vstream->sqcq_type = va_args->type; + + ret = vstream_bind_to_ctx(vstream); + if (ret < 0) + goto out_err_vstream_free; + + ctx = vstream->ctx; + + /* Add new vstream to array after allocating inode */ + ret = vstream_add(vstream, vstream->id); + if (ret < 0) + goto out_err_vstream_free; + + return 0; + +out_err_vstream_free: + kfree(vstream); + XSCHED_ERR("Exit %s with error, current_pid=%d, err=%d.\n", + __func__, current->pid, ret); + + return ret; +} + +static int logic_cq_alloc(struct vstream_args *arg) +{ + int err = 0; + struct xcu_op_handler_params params; + vstream_info_t *vstream = NULL; + vstream_alloc_args_t *logic_cq_alloc_para = &arg->va_args; + struct xsched_cu *xcu_found = NULL; + uint32_t logic_cq_id = 0, type = XCU_TYPE_XPU; + + vstream = vstream_get_by_user_stream_id( + logic_cq_alloc_para->user_stream_id); + if (!vstream) { + xcu_found = xcu_find(&type, arg->dev_id, arg->channel_id); + if (!xcu_found) { + err = -EINVAL; + goto out_err; + } + } else { + xcu_found = vstream->xcu; + } + + params.group = xcu_found->group; + params.fd = arg->fd; + params.payload = arg->payload; + params.param_1 = &logic_cq_id; + err = xcu_logic_alloc(¶ms); + if (err) { + XSCHED_ERR("Fail to alloc logic CQ memory to a vstream.\n"); + goto out_err; + } + + vstream->logic_vcq_id = logic_cq_id; + XSCHED_DEBUG( + "Vstream logic CQ: dev_id=%u, stream_id=%u, logic_cqid=%u @ %s\n", + vstream->dev_id, vstream->user_stream_id, + vstream->logic_vcq_id, __func__); + return 0; + +out_err: + XSCHED_ERR( + "Exit %s with error, current_pid=%d, err=%d.\n", + __func__, current->pid, err); + return err; +} + +int vstream_alloc(struct vstream_args *arg) +{ + vstream_alloc_args_t *va_args = &arg->va_args; + int ret; + + if (!va_args->type) + ret = sqcq_alloc(arg); + else + ret = logic_cq_alloc(arg); + + return ret; +} + +int vstream_free(struct vstream_args *arg) +{ + struct file *vs_file; + struct xcu_op_handler_params params; + uint32_t vstream_id = arg->sq_id; + vstream_info_t *vstream = NULL; + int err = 0; + + vstream = vstream_get(vstream_id); + if (!vstream) { + XSCHED_ERR("Fail to free NULL vstream, vstream id=%u\n", vstream_id); + return -EINVAL; + } + + params.group = vstream->xcu->group; + params.fd = arg->fd; + params.payload = arg->payload; + + vs_file = vstream_file_get(vstream->inode_fd); + vstream_destroy(vstream); + vs_file->private_data = NULL; + vstream_file_put(vs_file); + + err = xcu_finish(¶ms); + if (err) + XSCHED_ERR("Fail to free vstream sqId=%u, cqId=%u.\n", + arg->sq_id, arg->cq_id); + + return err; +} + int vstream_kick(struct vstream_args *arg) { return 0; -- 2.34.1