hulk inclusion category: cleanup bugzilla: https://atomgit.com/openeuler/kernel/issues/8423 -------------------------------- Introduce init_fair_xsched_group() to handle XCU-specific initialization logic for scheduling groups. This function is called after the generic xsched_group_init() to ensure that common group structures are fully set up before applying XCU-dependent configuration. The change is preparatory and non-functional on its own, laying groundwork for robust per-XCU group management in upcoming features such as hierarchical CFS/RT subgroups. Signed-off-by: Liu Kai <liukai284@huawei.com> --- drivers/xcu/xcu_group.c | 2 +- include/linux/xsched.h | 9 ++++++++- kernel/xsched/cfs.c | 11 ++++++++--- kernel/xsched/cgroup.c | 38 +++++++++++++++++++------------------- 4 files changed, 36 insertions(+), 24 deletions(-) diff --git a/drivers/xcu/xcu_group.c b/drivers/xcu/xcu_group.c index 1cf159b8f57d..b53501759a09 100644 --- a/drivers/xcu/xcu_group.c +++ b/drivers/xcu/xcu_group.c @@ -328,7 +328,7 @@ int xsched_xcu_register(struct xcu_group *group, uint32_t phys_id) } #ifdef CONFIG_CGROUP_XCU - xcu_cfs_root_cg_init(xcu); + init_fair_xsched_group(root_xcg, xcu, &xcu->xrq.cfs); #endif /* CONFIG_CGROUP_XCU */ return 0; diff --git a/include/linux/xsched.h b/include/linux/xsched.h index e48e6d404b1c..e393377781e9 100644 --- a/include/linux/xsched.h +++ b/include/linux/xsched.h @@ -51,8 +51,10 @@ extern struct xsched_cu *xsched_cu_mgr[XSCHED_NR_CUS]; +extern struct xsched_group *root_xcg; extern struct xsched_class rt_xsched_class; extern struct xsched_class fair_xsched_class; +extern struct list_head xsched_class_list; #define for_each_xsched_class(class) \ list_for_each_entry((class), &(xsched_class_list), node) @@ -462,11 +464,16 @@ void dequeue_ctx(struct xsched_entity *xse); int delete_ctx(struct xsched_context *ctx); const struct xsched_class *find_xsched_class(int class_id); +#ifdef CONFIG_XCU_SCHED_CFS +void init_xsched_cfs_rq(struct xsched_rq_cfs *cfs_rq); +#endif + #ifdef CONFIG_CGROUP_XCU /* Xsched group manage functions */ void xsched_group_inherit(struct task_struct *tsk, struct xsched_entity *xse); void xcu_cg_subsys_init(void); -void xcu_cfs_root_cg_init(struct xsched_cu *xcu); +void init_fair_xsched_group(struct xsched_group *xg, + struct xsched_cu *xcu, struct xsched_rq_cfs *cfs_rq); void xcu_grp_shares_update(struct xsched_group *parent, struct xsched_group *child, u32 shares_cfg); void xcu_grp_shares_add(struct xsched_group *parent, struct xsched_group *child); diff --git a/kernel/xsched/cfs.c b/kernel/xsched/cfs.c index d8298f574387..6b74f96a6860 100644 --- a/kernel/xsched/cfs.c +++ b/kernel/xsched/cfs.c @@ -228,14 +228,19 @@ static void put_prev_ctx_fair(struct xsched_entity *xse) #endif } +void init_xsched_cfs_rq(struct xsched_rq_cfs *cfs_rq) +{ + cfs_rq->nr_running = 0; + cfs_rq->ctx_timeline = RB_ROOT_CACHED; + cfs_rq->min_xruntime = XSCHED_TIME_INF; +} + void rq_init_fair(struct xsched_rq *xrq) { if (!xrq) return; - xrq->cfs.nr_running = 0; - xrq->cfs.ctx_timeline = RB_ROOT_CACHED; - xrq->cfs.min_xruntime = XSCHED_TIME_INF; + init_xsched_cfs_rq(&xrq->cfs); } void xse_init_fair(struct xsched_entity *xse) diff --git a/kernel/xsched/cgroup.c b/kernel/xsched/cgroup.c index b819544c7dcc..32db2afa82ac 100644 --- a/kernel/xsched/cgroup.c +++ b/kernel/xsched/cgroup.c @@ -97,15 +97,23 @@ void xcu_cg_subsys_init(void) xcg_attach_entry_cache = KMEM_CACHE(xcg_attach_entry, 0); } -void xcu_cfs_root_cg_init(struct xsched_cu *xcu) +void init_fair_xsched_group(struct xsched_group *xg, + struct xsched_cu *xcu, struct xsched_rq_cfs *cfs_rq) { int id = xcu->id; - root_xcg->perxcu_priv[id].xcu_id = id; - root_xcg->perxcu_priv[id].self = root_xcg; - root_xcg->perxcu_priv[id].cfs_rq = &xcu->xrq.cfs; - root_xcg->perxcu_priv[id].xse.is_group = true; - fair_xsched_class.xse_init(&root_xcg->perxcu_priv[id].xse); + if (xg != root_xcg && WARN_ON(!xg->parent)) + return; + + xg->perxcu_priv[id].xcu_id = id; + xg->perxcu_priv[id].self = xg; + xg->perxcu_priv[id].cfs_rq = cfs_rq; + xg->perxcu_priv[id].xse.xcu = xcu; + xg->perxcu_priv[id].xse.is_group = true; + xg->perxcu_priv[id].xse.parent_grp = xg->parent; + + /* Put new empty groups to the right in parent's rbtree */ + fair_xsched_class.xse_init(&xg->perxcu_priv[id].xse); } static void xcg_perxcu_cfs_rq_deinit(struct xsched_group *xcg, int max_id) @@ -140,24 +148,16 @@ static int xcu_cfs_cg_init(struct xsched_group *xcg, struct xsched_rq_cfs *sub_cfs_rq; for_each_active_xcu(xcu, id) { - xcg->perxcu_priv[id].xcu_id = id; - xcg->perxcu_priv[id].self = xcg; - sub_cfs_rq = kzalloc(sizeof(*sub_cfs_rq), GFP_KERNEL); if (!sub_cfs_rq) { - XSCHED_ERR("Fail to alloc cfs runqueue on xcu %d\n", id); + XSCHED_ERR("Fail to alloc runqueue on xcu %d\n", id); xcg_perxcu_cfs_rq_deinit(xcg, id); return -ENOMEM; } - xcg->perxcu_priv[id].cfs_rq = sub_cfs_rq; - xcg->perxcu_priv[id].cfs_rq->ctx_timeline = RB_ROOT_CACHED; - xcg->perxcu_priv[id].cfs_rq->min_xruntime = XSCHED_TIME_INF; - - /* Put new empty groups to the right in parent's rbtree: */ - fair_xsched_class.xse_init(&xcg->perxcu_priv[id].xse); - xcg->perxcu_priv[id].xse.is_group = true; - xcg->perxcu_priv[id].xse.parent_grp = parent_xg; - xcg->perxcu_priv[id].xse.xcu = xcu; + init_xsched_cfs_rq(sub_cfs_rq); + + /* call init_fair_xsched_group() after init_xsched_group() */ + init_fair_xsched_group(xcg, xcu, sub_cfs_rq); } xcg->period = XSCHED_CFS_QUOTA_PERIOD_MS; -- 2.34.1