
From: Konstantin Meskhidze <konstantin.meskhidze@huawei.com> hulk inclusion category: feature bugzilla: https://gitee.com/openeuler/kernel/issues/IC5EHB ----------------------------------------- Add xsched CFS class base template with function stubs: - dequeue_ctx_fair. - enqueue_ctx_fair. - pick_next_ctx_fair. - check_preempt_fair. - put_prev_ctx_fair. - submit_prepare_ctx_fair. Add xsched_cfs.c in /kernel/xsched Makefile. Add cfs class related data structure. Signed-off-by: Konstantin Meskhidze <konstantin.meskhidze@huawei.com> Signed-off-by: Hui Tang <tanghui20@.huawei.com> --- include/linux/xsched.h | 45 +++++++++++++++++++++++-- kernel/xsched/Makefile | 2 +- kernel/xsched/cfs.c | 76 ++++++++++++++++++++++++++++++++++++++++++ kernel/xsched/core.c | 16 ++++++++- kernel/xsched/rt.c | 2 +- 5 files changed, 136 insertions(+), 5 deletions(-) create mode 100644 kernel/xsched/cfs.c diff --git a/include/linux/xsched.h b/include/linux/xsched.h index f093d82dfb95..0f3c0f7f21ad 100644 --- a/include/linux/xsched.h +++ b/include/linux/xsched.h @@ -55,6 +55,8 @@ #define XCU_HASH_ORDER 6 +#define XSCHED_CFS_MIN_TIMESLICE (10 * NSEC_PER_MSEC) + #define __GET_VS_TASK_TYPE(t) ((t)&0xFF) #define __GET_VS_TASK_PRIO_RT(t) (((t) >> 8) & 0xFF) @@ -72,6 +74,7 @@ enum xcu_sched_type { XSCHED_TYPE_RT, XSCHED_TYPE_DFLT = XSCHED_TYPE_RT, + XSCHED_TYPE_CFS, XSCHED_TYPE_NUM, }; @@ -105,6 +108,7 @@ enum xse_flag { extern const struct xsched_class rt_xsched_class; +extern const struct xsched_class fair_xsched_class; #define xsched_first_class (&rt_xsched_class) @@ -117,6 +121,12 @@ extern const struct xsched_class rt_xsched_class; #define for_each_vstream_in_ctx(vs, ctx) \ list_for_each_entry((vs), &((ctx)->vstream_list), ctx_node) +/* Manages xsched CFS-like class rbtree based runqueue. */ +struct xsched_rq_cfs { + unsigned int load; + u64 min_xruntime; + struct rb_root_cached ctx_timeline; +}; /* Manages xsched RT-like class linked list based runqueue. * @@ -144,6 +154,8 @@ struct xsched_rq { /* RT class run queue.*/ struct xsched_rq_rt rt; + /* CFS class run queue.*/ + struct xsched_rq_cfs cfs; }; enum xcu_state { @@ -176,6 +188,8 @@ struct xsched_cu { /* RT class kick counter. */ atomic_t pending_kicks_rt; + /* CFS class kick counter. */ + atomic_t pending_kicks_cfs; struct task_struct *worker; @@ -203,6 +217,22 @@ struct xsched_entity_rt { s64 kick_slice; }; +struct xsched_entity_cfs { + struct rb_node run_node; + + /* Rq on which this entity is (to be) queued. */ + struct xsched_rq_cfs *cfs_rq; + + /* Value of "virtual" runtime to sort entities in rbtree */ + u64 xruntime; + u32 weight; + + /* Clean execution time of scheduling entity */ + u64 exec_start; + u64 last_exec_runtime; + u64 sum_exec_runtime; +}; + struct xsched_entity { uint32_t task_type; @@ -231,6 +261,8 @@ struct xsched_entity { /* RT class entity. */ struct xsched_entity_rt rt; + /* CFS class entity. */ + struct xsched_entity_cfs cfs; /* Pointer to context object. */ struct xsched_context *ctx; @@ -252,6 +284,11 @@ static inline bool xse_is_rt(const struct xsched_entity *xse) return xse && xse->class == &rt_xsched_class; } +static inline bool xse_is_cfs(const struct xsched_entity *xse) +{ + return xse && xse->class == &fair_xsched_class; +} + /* Returns a pointer to an atomic_t variable representing a counter * of currently pending vstream kicks on a given XCU and for a * given xsched class. @@ -273,6 +310,8 @@ xsched_get_pending_kicks_class(const struct xsched_class *class, if (class == &rt_xsched_class) return &xcu->pending_kicks_rt; + if (class == &fair_xsched_class) + return &xcu->pending_kicks_cfs; XSCHED_ERR("Xsched entity has an invalid class @ %s\n", __func__); return NULL; @@ -384,13 +423,15 @@ static inline int xsched_dec_pending_kicks_xse(struct xsched_entity *xse) static inline bool xsched_check_pending_kicks_xcu(struct xsched_cu *xcu) { atomic_t *kicks_rt; + atomic_t *kicks_cfs; kicks_rt = xsched_get_pending_kicks_class(&rt_xsched_class, xcu); + kicks_cfs = xsched_get_pending_kicks_class(&fair_xsched_class, xcu); - if (!kicks_rt) + if (!kicks_rt || !kicks_cfs) return 0; - return !!atomic_read(kicks_rt); + return (!!atomic_read(kicks_rt) || !!atomic_read(kicks_cfs)); } static inline int xse_integrity_check(const struct xsched_entity *xse) diff --git a/kernel/xsched/Makefile b/kernel/xsched/Makefile index f882518d54ab..fe212f228cf6 100644 --- a/kernel/xsched/Makefile +++ b/kernel/xsched/Makefile @@ -1,3 +1,3 @@ # SPDX-License-Identifier: GPL-2.0 obj-y += vstream.o -obj-$(CONFIG_XCU_SCHEDULER) += core.o rt.o +obj-$(CONFIG_XCU_SCHEDULER) += core.o rt.o cfs.o diff --git a/kernel/xsched/cfs.c b/kernel/xsched/cfs.c new file mode 100644 index 000000000000..36922d91e85b --- /dev/null +++ b/kernel/xsched/cfs.c @@ -0,0 +1,76 @@ +// SPDX-License-Identifier: GPL-2.0+ +/* + * Completely Fair Scheduling (CFS) Class for XPU device + * + * Copyright (C) 2025-2026 Huawei Technologies Co., Ltd + * + * Author: Konstantin Meskhidze <konstantin.meskhidze@huawei.com> + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. + * + */ +#include <linux/xsched.h> + +#define CFS_INNER_RQ_EMPTY(cfs_xse) \ + ((cfs_xse)->xruntime == XSCHED_TIME_INF) + +/* For test xsched_cfs_grp_test.c */ +atomic64_t virtual_sched_clock = ATOMIC_INIT(0); + +/* + * Xsched Fair class methods + * For rq manipulation we rely on root runqueue lock already acquired in core. + * Access xsched_group_xcu_priv requires no locks because one thread per XCU. + */ +static void dequeue_ctx_fair(struct xsched_entity *xse) +{ +} + +/** + * enqueue_ctx_fair() - Add context to the runqueue + * @xse: xsched entity of context + * @xcu: executor + * + * In contrary to enqueue_task it is called once on context init. + * Although groups reside in tree, their nodes not counted in nr_running. + * The xruntime of a group xsched entitry represented by min xruntime inside. + */ +static void enqueue_ctx_fair(struct xsched_entity *xse, struct xsched_cu *xcu) +{ +} + +static struct xsched_entity *pick_next_ctx_fair(struct xsched_cu *xcu) +{ + return NULL; +} + +static inline bool xs_should_preempt_fair(struct xsched_entity *xse) +{ + return 0; +} + +static void put_prev_ctx_fair(struct xsched_entity *xse) +{ +} + +int submit_prepare_ctx_fair(struct xsched_entity *xse, struct xsched_cu *xcu) +{ + return 0; +} + +const struct xsched_class fair_xsched_class = { + .next = NULL, + .dequeue_ctx = dequeue_ctx_fair, + .enqueue_ctx = enqueue_ctx_fair, + .pick_next_ctx = pick_next_ctx_fair, + .put_prev_ctx = put_prev_ctx_fair, + .submit_prepare_ctx = submit_prepare_ctx_fair, + .check_preempt = xs_should_preempt_fair, +}; diff --git a/kernel/xsched/core.c b/kernel/xsched/core.c index 81bdb1bfc9a2..5d96a22337fa 100644 --- a/kernel/xsched/core.c +++ b/kernel/xsched/core.c @@ -93,7 +93,7 @@ static size_t select_work_def(struct xsched_cu *xcu, struct xsched_entity *xse) not_empty++; } } - } while (not_empty); + } while ((sum_exec_time < XSCHED_CFS_MIN_TIMESLICE) && (not_empty)); kick_count = atomic_read(&xse->kicks_pending_ctx_cnt); XSCHED_INFO("After decrement XSE kick_count=%u @ %s\n", @@ -408,6 +408,10 @@ int xsched_xse_set_class(struct xsched_entity *xse) xse->class = &rt_xsched_class; XSCHED_INFO("Context is in RT class %s\n", __func__); break; + case XSCHED_TYPE_CFS: + xse->class = &fair_xsched_class; + XSCHED_INFO("Context is in CFS class %s\n", __func__); + break; default: XSCHED_ERR("Xse has incorrect class @ %s\n", __func__); return -EINVAL; @@ -658,6 +662,14 @@ static inline void xsched_rt_rq_init(struct xsched_cu *xcu) } } +/* Initialize xsched cfs runqueue during kernel init. + * Should only be called from xsched_init function. + */ +static inline void xsched_cfs_rq_init(struct xsched_cu *xcu) +{ + xcu->xrq.cfs.ctx_timeline = RB_ROOT_CACHED; +} + /* Initialize xsched classes' runqueues. */ static inline void xsched_rq_init(struct xsched_cu *xcu) { @@ -666,6 +678,7 @@ static inline void xsched_rq_init(struct xsched_cu *xcu) xcu->xrq.class = &rt_xsched_class; xcu->xrq.state = XRQ_STATE_IDLE; xsched_rt_rq_init(xcu); + xsched_cfs_rq_init(xcu); } /* Initializes all xsched XCU objects. @@ -682,6 +695,7 @@ static void xsched_xcu_init(struct xsched_cu *xcu, struct xcu_group *group, xcu->group = group; atomic_set(&xcu->pending_kicks_rt, 0); + atomic_set(&xcu->pending_kicks_cfs, 0); atomic_set(&xcu->has_active, 0); INIT_LIST_HEAD(&xcu->vsm_list); diff --git a/kernel/xsched/rt.c b/kernel/xsched/rt.c index da9959778366..8bd3f7c7f403 100644 --- a/kernel/xsched/rt.c +++ b/kernel/xsched/rt.c @@ -233,7 +233,7 @@ static size_t select_work_rt(struct xsched_cu *xcu, struct xsched_entity *xse) } const struct xsched_class rt_xsched_class = { - .next = NULL, + .next = &fair_xsched_class, .dequeue_ctx = dequeue_ctx_rt, .enqueue_ctx = enqueue_ctx_rt, .pick_next_ctx = pick_next_ctx_rt, -- 2.34.1