
From: Konstantin Meskhidze <konstantin.meskhidze@huawei.com> hulk inclusion category: feature bugzilla: https://gitee.com/openeuler/kernel/issues/IC5EHB ----------------------------------------- Add xsched RT class base template with function stubs: - dequeue_ctx_rt. - enqueue_ctx_rt. - pick_next_ctx_rt. - put_prev_ctx_kick_rt. - submit_prepare_ctx_rt. - check_preempt_ctx_rt. Add xsched_rt.c in /kernel/xsched Makefile. Signed-off-by: Konstantin Meskhidze <konstantin.meskhidze@huawei.com> Signed-off-by: Hui Tang <tanghui20@.huawei.com> --- include/linux/xsched.h | 226 ++++++++++++++++++++++++++++++++++++++++- kernel/xsched/Makefile | 2 +- kernel/xsched/core.c | 24 +++++ kernel/xsched/rt.c | 62 +++++++++++ 4 files changed, 312 insertions(+), 2 deletions(-) create mode 100644 kernel/xsched/rt.c diff --git a/include/linux/xsched.h b/include/linux/xsched.h index 1652541e39e3..64436db90647 100644 --- a/include/linux/xsched.h +++ b/include/linux/xsched.h @@ -57,8 +57,30 @@ #define __GET_VS_TASK_TYPE(t) ((t)&0xFF) +#define __GET_VS_TASK_PRIO_RT(t) (((t) >> 8) & 0xFF) + #define GET_VS_TASK_TYPE(vs_ptr) __GET_VS_TASK_TYPE((vs_ptr)->task_type) +#define GET_VS_TASK_PRIO_RT(vs_ptr) __GET_VS_TASK_PRIO_RT((vs_ptr)->task_type) + +#define XSCHED_RT_TIMESLICE_MS (10 * NSEC_PER_MSEC) +/* + * A default kick slice for RT class XSEs. + */ +#define XSCHED_RT_KICK_SLICE 20 + +enum xcu_sched_type { + XSCHED_TYPE_RT, + XSCHED_TYPE_DFLT = XSCHED_TYPE_RT, + XSCHED_TYPE_NUM, +}; + +enum xse_prio { + XSE_PRIO_LOW, + XSE_PRIO_HIGH, + NR_XSE_PRIO, +}; + enum xsched_rq_state { XRQ_STATE_INACTIVE = 0x00, XRQ_STATE_IDLE = 0x01, @@ -67,18 +89,61 @@ enum xsched_rq_state { XRQ_STATE_WAIT_RUNNING = 0x08 }; +enum xse_state { + XSE_PREPARE, + XSE_READY, + XSE_RUNNING, + XSE_BLOCK, + XSE_DEAD, +}; + +enum xse_flag { + XSE_TIF_NONE, + XSE_TIF_PREEMPT, + XSE_TIF_BALANCE, /* Unused so far */ +}; + + +extern const struct xsched_class rt_xsched_class; + +#define xsched_first_class (&rt_xsched_class) + +#define for_each_xsched_class(class) \ + for (class = xsched_first_class; class; class = class->next) + +#define for_each_xse_prio(prio) \ + for (prio = XSE_PRIO_LOW; prio < NR_XSE_PRIO; prio++) + #define for_each_vstream_in_ctx(vs, ctx) \ list_for_each_entry((vs), &((ctx)->vstream_list), ctx_node) +/* Manages xsched RT-like class linked list based runqueue. + * + * Now RT-like class runqueue structs is identical + * but will most likely grow different in the + * future as the Xsched evolves. + */ +struct xsched_rq_rt { + struct list_head rq[NR_XSE_PRIO]; + + int prio_nr_running[NR_XSE_PRIO]; + atomic_t prio_nr_kicks[NR_XSE_PRIO]; + DECLARE_BITMAP(curr_prios, NR_XSE_PRIO); +}; + /* Base Xsched runqueue object structure that contains both mutual and * individual parameters for different scheduling classes. */ struct xsched_rq { struct xsched_entity *curr_xse; + const struct xsched_class *class; int state; int nr_running; + + /* RT class run queue.*/ + struct xsched_rq_rt rt; }; enum xcu_state { @@ -109,6 +174,9 @@ struct xsched_cu { uint32_t id; uint32_t state; + /* RT class kick counter. */ + atomic_t pending_kicks_rt; + struct task_struct *worker; struct xsched_rq xrq; @@ -125,6 +193,16 @@ struct xsched_cu { wait_queue_head_t wq_xcore_running; }; +struct xsched_entity_rt { + struct list_head list_node; + enum xse_state state; + enum xse_flag flag; + enum xse_prio prio; + + ktime_t timeslice; + s64 kick_slice; +}; + struct xsched_entity { uint32_t task_type; @@ -148,9 +226,15 @@ struct xsched_entity { /* Xsched class for this xse. */ const struct xsched_class *class; + /* RT class entity. */ + struct xsched_entity_rt rt; + /* Pointer to context object. */ struct xsched_context *ctx; + /* Entity execution statistics */ + ktime_t last_process_time; + /* Pointer to an XCU object that represents an XCU * on which this xse is to be processed or is being * processed currently. @@ -160,6 +244,57 @@ struct xsched_entity { /* General purpose xse lock. */ spinlock_t xse_lock; }; +static inline bool xse_is_rt(const struct xsched_entity *xse) +{ + return xse && xse->class == &rt_xsched_class; +} + +/* Returns a pointer to an atomic_t variable representing a counter + * of currently pending vstream kicks on a given XCU and for a + * given xsched class. + */ +static inline atomic_t * +xsched_get_pending_kicks_class(const struct xsched_class *class, + struct xsched_cu *xcu) +{ + /* Right now for testing purposes we have only XCU running streams. */ + if (!xcu) { + XSCHED_ERR("Tried to get pending kicks with xcu=NULL.\n"); + return NULL; + } + + if (!class) { + XSCHED_ERR("Tried to get pending kicks with class=NULL.\n"); + return NULL; + } + + if (class == &rt_xsched_class) + return &xcu->pending_kicks_rt; + + XSCHED_ERR("Xsched entity has an invalid class @ %s\n", __func__); + return NULL; +} + +/* Returns a pointer to an atomic_t variable representing a counter of + * currently pending vstream kicks for an XCU on which a given xsched + * entity is enqueued on and for a xsched class that assigned to a + * given xsched entity. + */ +static inline atomic_t * +xsched_get_pending_kicks_xse(const struct xsched_entity *xse) +{ + if (!xse) { + XSCHED_ERR("Tried to get pending kicks with xse=NULL\n"); + return NULL; + } + + if (!xse->xcu) { + XSCHED_ERR("Tried to get pending kicks with xse->xcu=NULL\n"); + return NULL; + } + + return xsched_get_pending_kicks_class(xse->class, xse->xcu); +} /* Increments pending kicks counter for an XCU that the given * xsched entity is attached to and for xsched entity's xsched @@ -167,9 +302,28 @@ struct xsched_entity { */ static inline int xsched_inc_pending_kicks_xse(struct xsched_entity *xse) { + atomic_t *kicks_class = NULL; + + kicks_class = xsched_get_pending_kicks_xse(xse); + + if (!kicks_class) + return -EINVAL; + + /* Incrementing pending kicks for XSE's sched class */ + atomic_inc(kicks_class); + /* Icrement pending kicks for current XSE. */ atomic_inc(&xse->kicks_pending_ctx_cnt); + /* Incrementing prio based pending kicks counter for RT class */ + if (xse_is_rt(xse)) { + atomic_inc(&xse->xcu->xrq.rt.prio_nr_kicks[xse->rt.prio]); + XSCHED_INFO("xcu increased pending kicks @ %s\n", __func__); + } else { + XSCHED_INFO("xse %u isn't rt class @ %s\n", xse->tgid, + __func__); + } + return 0; } @@ -179,9 +333,45 @@ static inline int xsched_inc_pending_kicks_xse(struct xsched_entity *xse) */ static inline int xsched_dec_pending_kicks_xse(struct xsched_entity *xse) { + atomic_t *kicks_class = NULL; + atomic_t *kicks_prio_rt = NULL; + + kicks_class = xsched_get_pending_kicks_xse(xse); + + if (!kicks_class) { + XSCHED_ERR( + "Could find atomic counter for class based pending kicks.\n"); + return -EINVAL; + } + + if (!atomic_read(kicks_class)) { + XSCHED_ERR("Tried to decrement pending kicks beyond 0!\n"); + return -EINVAL; + } + + /* Decrementing pending kicks for XSE's sched class. */ + atomic_dec(kicks_class); + /* Decrementing pending kicks for current XSE. */ atomic_dec(&xse->kicks_pending_ctx_cnt); + /* Decrementing prio based pending kicks counter for RT class. */ + if (xse_is_rt(xse)) { + kicks_prio_rt = &xse->xcu->xrq.rt.prio_nr_kicks[xse->rt.prio]; + + if (!atomic_read(kicks_prio_rt)) { + XSCHED_ERR( + "Tried to decrement prio pending kicks beyond 0!\n"); + return -EINVAL; + } + + atomic_dec(kicks_prio_rt); + XSCHED_INFO("xcu decreased pending kicks @ %s\n", __func__); + } else { + XSCHED_INFO("xse %u isn't rt class @ %s\n", xse->tgid, + __func__); + } + return 0; } @@ -190,7 +380,14 @@ static inline int xsched_dec_pending_kicks_xse(struct xsched_entity *xse) */ static inline bool xsched_check_pending_kicks_xcu(struct xsched_cu *xcu) { - return 0; + atomic_t *kicks_rt; + + kicks_rt = xsched_get_pending_kicks_class(&rt_xsched_class, xcu); + + if (!kicks_rt) + return 0; + + return !!atomic_read(kicks_rt); } static inline int xse_integrity_check(const struct xsched_entity *xse) @@ -247,6 +444,33 @@ static inline struct xsched_context *find_ctx_by_tgid(pid_t tgid) return ret; } +/* Xsched class. */ +struct xsched_class { + const struct xsched_class *next; + + /* Removes a given XSE from it's runqueue. */ + void (*dequeue_ctx)(struct xsched_entity *xse); + + /* Places a given XSE on a runqueue on a given XCU. */ + void (*enqueue_ctx)(struct xsched_entity *xse, struct xsched_cu *xcu); + + /* Returns a next XSE to be submitted on a given XCU. */ + struct xsched_entity *(*pick_next_ctx)(struct xsched_cu *xcu); + + /* Put a XSE back into rq during preemption. */ + void (*put_prev_ctx)(struct xsched_entity *xse); + + /* Prepares a given XSE for submission on a given XCU. */ + int (*submit_prepare_ctx)(struct xsched_entity *xse, + struct xsched_cu *xcu); + + /* Check context preemption. */ + bool (*check_preempt)(struct xsched_entity *xse); + + /* Select jobs from XSE to submit on XCU */ + size_t (*select_work)(struct xsched_cu *xcu, struct xsched_entity *xse); +}; + static inline void xsched_init_vsm(struct vstream_metadata *vsm, struct vstream_info *vs, vstream_args_t *arg) diff --git a/kernel/xsched/Makefile b/kernel/xsched/Makefile index 62e58e4151b0..f882518d54ab 100644 --- a/kernel/xsched/Makefile +++ b/kernel/xsched/Makefile @@ -1,3 +1,3 @@ # SPDX-License-Identifier: GPL-2.0 obj-y += vstream.o -obj-$(CONFIG_XCU_SCHEDULER) += core.o +obj-$(CONFIG_XCU_SCHEDULER) += core.o rt.o diff --git a/kernel/xsched/core.c b/kernel/xsched/core.c index 8a85fc0db45d..28ebf1791930 100644 --- a/kernel/xsched/core.c +++ b/kernel/xsched/core.c @@ -300,6 +300,8 @@ int xsched_ctx_init_xse(struct xsched_context *ctx, struct vstream_info *vs) atomic_set(&xse->kicks_pending_ctx_cnt, 0); atomic_set(&xse->kicks_submited, 0); + xse->task_type = XSCHED_TYPE_RT; + xse->last_process_time = 0; xse->fd = ctx->fd; xse->tgid = ctx->tgid; @@ -401,12 +403,28 @@ static int xsched_schedule(void *input_xcu) return err; } +/* Initialize xsched rt runqueue during kernel init. + * Should only be called from xsched_init function. + */ +static inline void xsched_rt_rq_init(struct xsched_cu *xcu) +{ + int prio = 0; + + for_each_xse_prio(prio) { + INIT_LIST_HEAD(&xcu->xrq.rt.rq[prio]); + xcu->xrq.rt.prio_nr_running[prio] = 0; + atomic_set(&xcu->xrq.rt.prio_nr_kicks[prio], 0); + } +} + /* Initialize xsched classes' runqueues. */ static inline void xsched_rq_init(struct xsched_cu *xcu) { xcu->xrq.nr_running = 0; xcu->xrq.curr_xse = NULL; + xcu->xrq.class = &rt_xsched_class; xcu->xrq.state = XRQ_STATE_IDLE; + xsched_rt_rq_init(xcu); } /* Initializes all xsched XCU objects. @@ -418,9 +436,11 @@ static void xsched_xcu_init(struct xsched_cu *xcu, struct xcu_group *group, bitmap_clear(xcu_group_root->xcu_mask, 0, XSCHED_NR_CUS); xcu->id = xcu_id; + xcu->xrq.curr_xse = NULL; xcu->state = XSCHED_XCU_NONE; xcu->group = group; + atomic_set(&xcu->pending_kicks_rt, 0); atomic_set(&xcu->has_active, 0); INIT_LIST_HEAD(&xcu->vsm_list); @@ -432,6 +452,10 @@ static void xsched_xcu_init(struct xsched_cu *xcu, struct xcu_group *group, /* Mark current XCU in a mask inside XCU root group. */ set_bit(xcu->id, xcu_group_root->xcu_mask); + /* Initialize current XCU's runqueue. */ + xsched_rq_init(xcu); + + /* This worker should set XCU to XSCHED_XCU_WAIT_IDLE. * If after initialization XCU still has XSCHED_XCU_NONE * status then we can assume that there was a problem diff --git a/kernel/xsched/rt.c b/kernel/xsched/rt.c new file mode 100644 index 000000000000..12b188dce567 --- /dev/null +++ b/kernel/xsched/rt.c @@ -0,0 +1,62 @@ +// SPDX-License-Identifier: GPL-2.0+ +/* + * Real-Time Scheduling Class for XPU device + * + * Copyright (C) 2025-2026 Huawei Technologies Co., Ltd + * + * Author: Konstantin Meskhidze <konstantin.meskhidze@huawei.com> + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. + * + */ + +#include <uapi/linux/sched/types.h> +#include <linux/kthread.h> +#include <linux/slab.h> +#include <linux/xsched.h> +#include <linux/vstream.h> + +static void dequeue_ctx_rt(struct xsched_entity *xse) {} + +static void enqueue_ctx_rt(struct xsched_entity *xse, struct xsched_cu *xcu) {} + +static struct xsched_entity *pick_next_ctx_rt(struct xsched_cu *xcu) +{ + return NULL; +} + +static void put_prev_ctx_rt(struct xsched_entity *xse) {} + +static int submit_prepare_ctx_rt(struct xsched_entity *xse, + struct xsched_cu *xcu) +{ + return 0; +} + +static size_t select_work_rt(struct xsched_cu *xcu, struct xsched_entity *xse) +{ + return 0; +} + +static bool check_preempt_ctx_rt(struct xsched_entity *xse) +{ + return true; +} + +const struct xsched_class rt_xsched_class = { + .next = NULL, + .dequeue_ctx = dequeue_ctx_rt, + .enqueue_ctx = enqueue_ctx_rt, + .pick_next_ctx = pick_next_ctx_rt, + .put_prev_ctx = put_prev_ctx_rt, + .submit_prepare_ctx = submit_prepare_ctx_rt, + .select_work = select_work_rt, + .check_preempt = check_preempt_ctx_rt +}; -- 2.34.1