From: Steve Sistare steve.sistare@oracle.com
hulk inclusion category: feature bugzilla: 38261 CVE: NA
---------------------------
Define and initialize a sparse bitmap of overloaded CPUs, per last-level-cache scheduling domain, for use by the CFS scheduling class. Save a pointer to cfs_overload_cpus in the rq for efficient access.
Signed-off-by: Steve Sistare steven.sistare@oracle.com Signed-off-by: Cheng Jian cj.chengjian@huawei.com Reviewed-by: Hanjun Guo guohanjun@huawei.com Signed-off-by: Yang Yingliang yangyingliang@huawei.com
Signed-off-by: Wei Li liwei391@huawei.com --- include/linux/sched/topology.h | 1 + kernel/sched/sched.h | 2 ++ kernel/sched/topology.c | 25 +++++++++++++++++++++++-- 3 files changed, 26 insertions(+), 2 deletions(-)
diff --git a/include/linux/sched/topology.h b/include/linux/sched/topology.h index 15f3f61f7e3b..3d04d4505fdc 100644 --- a/include/linux/sched/topology.h +++ b/include/linux/sched/topology.h @@ -72,6 +72,7 @@ struct sched_domain_shared { atomic_t ref; atomic_t nr_busy_cpus; int has_idle_cores; + struct sparsemask *cfs_overload_cpus; };
struct sched_domain { diff --git a/kernel/sched/sched.h b/kernel/sched/sched.h index 955abd645ff9..5d9cb3ad4546 100644 --- a/kernel/sched/sched.h +++ b/kernel/sched/sched.h @@ -83,6 +83,7 @@
struct rq; struct cpuidle_state; +struct sparsemask;
/* task_struct::on_rq states: */ #define TASK_ON_RQ_QUEUED 1 @@ -822,6 +823,7 @@ struct rq { struct cfs_rq cfs; struct rt_rq rt; struct dl_rq dl; + struct sparsemask *cfs_overload_cpus;
#ifdef CONFIG_FAIR_GROUP_SCHED /* list of leaf cfs_rq on this CPU: */ diff --git a/kernel/sched/topology.c b/kernel/sched/topology.c index 4012acc5adcf..5ca6998d0547 100644 --- a/kernel/sched/topology.c +++ b/kernel/sched/topology.c @@ -3,6 +3,7 @@ * Scheduler topology setup/handling methods */ #include "sched.h" +#include "sparsemask.h"
DEFINE_MUTEX(sched_domains_mutex);
@@ -409,7 +410,9 @@ DEFINE_PER_CPU(struct sched_domain *, sd_asym);
static void update_top_cache_domain(int cpu) { + struct sparsemask *cfs_overload_cpus = NULL; struct sched_domain_shared *sds = NULL; + struct rq *rq = cpu_rq(cpu); struct sched_domain *sd; int id = cpu; int size = 1; @@ -419,8 +422,10 @@ static void update_top_cache_domain(int cpu) id = cpumask_first(sched_domain_span(sd)); size = cpumask_weight(sched_domain_span(sd)); sds = sd->shared; + cfs_overload_cpus = sds->cfs_overload_cpus; }
+ rcu_assign_pointer(rq->cfs_overload_cpus, cfs_overload_cpus); rcu_assign_pointer(per_cpu(sd_llc, cpu), sd); per_cpu(sd_llc_size, cpu) = size; per_cpu(sd_llc_id, cpu) = id; @@ -1613,7 +1618,22 @@ static void __sdt_free(const struct cpumask *cpu_map)
static int sd_llc_alloc(struct sched_domain *sd) { - /* Allocate sd->shared data here. Empty for now. */ + struct sched_domain_shared *sds = sd->shared; + struct cpumask *span = sched_domain_span(sd); + int nid = cpu_to_node(cpumask_first(span)); + int flags = __GFP_ZERO | GFP_KERNEL; + struct sparsemask *mask; + + /* + * Allocate the bitmap if not already allocated. This is called for + * every CPU in the LLC but only allocates once per sd_llc_shared. + */ + if (!sds->cfs_overload_cpus) { + mask = sparsemask_alloc_node(nr_cpu_ids, 3, flags, nid); + if (!mask) + return 1; + sds->cfs_overload_cpus = mask; + }
return 0; } @@ -1625,7 +1645,8 @@ static void sd_llc_free(struct sched_domain *sd) if (!sds) return;
- /* Free data here. Empty for now. */ + sparsemask_free(sds->cfs_overload_cpus); + sds->cfs_overload_cpus = NULL; }
static int sd_llc_alloc_all(const struct cpumask *cpu_map, struct s_data *d)