
hulk inclusion category: bugfix bugzilla: https://gitee.com/openeuler/kernel/issues/ICB7K1 -------------------------------- Signed-off-by: Zhang Qiao <zhangqiao22@huawei.com> --- kernel/sched/core.c | 8 +++++ kernel/sched/fair.c | 20 ++---------- kernel/sched/sched.h | 14 +++++++-- kernel/sched/soft_domain.c | 62 ++++++++++++++++++++++++++++++++++++++ 4 files changed, 84 insertions(+), 20 deletions(-) diff --git a/kernel/sched/core.c b/kernel/sched/core.c index 1539ab766993..e2080b7a9d37 100644 --- a/kernel/sched/core.c +++ b/kernel/sched/core.c @@ -10800,6 +10800,13 @@ static int cpu_cgroup_css_online(struct cgroup_subsys_state *css) return 0; } +static void cpu_cgroup_css_offline(struct cgroup_subsys_state *css) +{ + struct task_group *tg = css_tg(css); + + offline_soft_domain(tg); +} + static void cpu_cgroup_css_released(struct cgroup_subsys_state *css) { struct task_group *tg = css_tg(css); @@ -12232,6 +12239,7 @@ static struct cftype cpu_files[] = { struct cgroup_subsys cpu_cgrp_subsys = { .css_alloc = cpu_cgroup_css_alloc, .css_online = cpu_cgroup_css_online, + .css_offline = cpu_cgroup_css_offline, .css_released = cpu_cgroup_css_released, .css_free = cpu_cgroup_css_free, .css_extra_stat_show = cpu_extra_stat_show, diff --git a/kernel/sched/fair.c b/kernel/sched/fair.c index 21bd2ca4172d..2ef6b9063cc5 100644 --- a/kernel/sched/fair.c +++ b/kernel/sched/fair.c @@ -14846,22 +14846,6 @@ void free_fair_sched_group(struct task_group *tg) kfree(tg->se); } -#ifdef CONFIG_SCHED_SOFT_DOMAIN -int init_soft_domain(struct task_group *tg) -{ - struct soft_domain_ctx *sf_ctx = NULL; - - sf_ctx = kzalloc(sizeof(*sf_ctx) + cpumask_size(), GFP_KERNEL); - if (!sf_ctx) - return -ENOMEM; - - sf_ctx->policy = 0; - tg->sf_ctx = sf_ctx; - - return 0; -} -#endif - int alloc_fair_sched_group(struct task_group *tg, struct task_group *parent) { struct sched_entity *se; @@ -14882,7 +14866,7 @@ int alloc_fair_sched_group(struct task_group *tg, struct task_group *parent) if (ret) goto err; - ret = init_soft_domain(tg); + ret = init_soft_domain(tg, parent); if (ret) goto err; @@ -14908,6 +14892,7 @@ int alloc_fair_sched_group(struct task_group *tg, struct task_group *parent) kfree(cfs_rq); err: destroy_auto_affinity(tg); + destroy_soft_domain(tg); return 0; } @@ -14937,6 +14922,7 @@ void unregister_fair_sched_group(struct task_group *tg) destroy_cfs_bandwidth(tg_cfs_bandwidth(tg)); destroy_auto_affinity(tg); + destroy_soft_domain(tg); for_each_possible_cpu(cpu) { if (tg->se[cpu]) diff --git a/kernel/sched/sched.h b/kernel/sched/sched.h index cb9f8779ffc0..fe5821c48fed 100644 --- a/kernel/sched/sched.h +++ b/kernel/sched/sched.h @@ -3759,8 +3759,9 @@ bool bpf_sched_is_cpu_allowed(struct task_struct *p, int cpu); #ifdef CONFIG_SCHED_SOFT_DOMAIN void build_soft_domain(void); -int init_soft_domain(struct task_group *tg); - +int init_soft_domain(struct task_group *tg, struct task_group *parent); +int destroy_soft_domain(struct task_group *tg); +void offline_soft_domain(struct task_group *tg); int sched_group_set_soft_domain(struct task_group *tg, long val); int sched_group_set_soft_domain_quota(struct task_group *tg, long val); @@ -3771,7 +3772,14 @@ static inline struct cpumask *soft_domain_span(unsigned long span[]) #else static inline void build_soft_domain(void) { } -static inline int init_soft_domain(struct task_group *tg) +static inline int init_soft_domain(struct task_group *tg, struct task_group *parent) +{ + return 0; +} + +void offline_soft_domain(struct task_group *tg) { } + +static inline int destroy_soft_domain(struct task_group *tg) { return 0; } diff --git a/kernel/sched/soft_domain.c b/kernel/sched/soft_domain.c index d4433633a725..c34be1fee3e0 100644 --- a/kernel/sched/soft_domain.c +++ b/kernel/sched/soft_domain.c @@ -435,3 +435,65 @@ int sched_group_set_soft_domain_quota(struct task_group *tg, long val) return ret; } + +int init_soft_domain(struct task_group *tg, struct task_group *parent) +{ + struct soft_domain_ctx *sf_ctx = NULL; + struct soft_domain_ctx *psf_ctx = NULL; + + if (!soft_domain_enabled()) + return 0; + + sf_ctx = kzalloc(sizeof(*sf_ctx) + cpumask_size(), GFP_KERNEL); + if (!sf_ctx) + return -ENOMEM; + + mutex_lock(&soft_domain_mutex); + psf_ctx = parent->sf_ctx; + if (psf_ctx) { + sf_ctx->policy = psf_ctx->policy; + sf_ctx->nr_cpus = psf_ctx->nr_cpus; + cpumask_copy(to_cpumask(sf_ctx->span), to_cpumask(psf_ctx->span)); + } + + tg->sf_ctx = sf_ctx; + mutex_unlock(&soft_domain_mutex); + + return 0; +} + +void offline_soft_domain(struct task_group *tg) +{ + struct soft_domain_ctx *sf_ctx = NULL; + struct soft_domain_ctx *psf_ctx = NULL; + + if (!soft_domain_enabled()) + return; + + sf_ctx = tg->sf_ctx; + psf_ctx = tg->parent->sf_ctx; + + if (!sf_ctx) + return; + + mutex_lock(&soft_domain_mutex); + if (sf_ctx->policy != 0) { + /* + * parent group is not set, this group set + * soft domain by user. + */ + if (psf_ctx == NULL || psf_ctx->policy == 0) + __sched_group_unset_soft_domain(tg); + } + mutex_unlock(&soft_domain_mutex); +} + +int destroy_soft_domain(struct task_group *tg) +{ + if (!soft_domain_enabled()) + return 0; + + kfree(tg->sf_ctx); + + return 0; +} -- 2.18.0.huawei.25