From: Steve Sistare steven.sistare@oracle.com
maillist inclusion category: feature bugzilla: https://gitee.com/openeuler/kernel/issues/I8PIYZ CVE: NA
Reference: https://lore.kernel.org/lkml/1541767840-93588-3-git-send-email-steven.sistar...
---------------------------
Add functions sd_llc_alloc_all() and sd_llc_free_all() to allocate and free data pointed to by struct sched_domain_shared at the last-level-cache domain. sd_llc_alloc_all() is called after the SD hierarchy is known, to eliminate the unnecessary allocations that would occur if we instead allocated in __sdt_alloc() and then figured out which shared nodes are redundant.
Signed-off-by: Steve Sistare steven.sistare@oracle.com Signed-off-by: Cheng Yu serein.chengyu@huawei.com --- kernel/sched/topology.c | 75 ++++++++++++++++++++++++++++++++++++++++- 1 file changed, 74 insertions(+), 1 deletion(-)
diff --git a/kernel/sched/topology.c b/kernel/sched/topology.c index 423d08947962..3346362db697 100644 --- a/kernel/sched/topology.c +++ b/kernel/sched/topology.c @@ -11,6 +11,12 @@ DEFINE_MUTEX(sched_domains_mutex); static cpumask_var_t sched_domains_tmpmask; static cpumask_var_t sched_domains_tmpmask2;
+struct s_data; +static int sd_llc_alloc(struct sched_domain *sd); +static void sd_llc_free(struct sched_domain *sd); +static int sd_llc_alloc_all(const struct cpumask *cpu_map, struct s_data *d); +static void sd_llc_free_all(const struct cpumask *cpu_map); + #ifdef CONFIG_SCHED_DEBUG
static int __init sched_debug_setup(char *str) @@ -632,8 +638,10 @@ static void destroy_sched_domain(struct sched_domain *sd) */ free_sched_groups(sd->groups, 1);
- if (sd->shared && atomic_dec_and_test(&sd->shared->ref)) + if (sd->shared && atomic_dec_and_test(&sd->shared->ref)) { + sd_llc_free(sd); kfree(sd->shared); + } kfree(sd); }
@@ -1473,6 +1481,7 @@ static void __free_domain_allocs(struct s_data *d, enum s_alloc what, free_percpu(d->sd); fallthrough; case sa_sd_storage: + sd_llc_free_all(cpu_map); __sdt_free(cpu_map); fallthrough; case sa_none: @@ -2287,6 +2296,62 @@ static void __sdt_free(const struct cpumask *cpu_map) } }
+static int sd_llc_alloc(struct sched_domain *sd) +{ + /* Allocate sd->shared data here. Empty for now. */ + + return 0; +} + +static void sd_llc_free(struct sched_domain *sd) +{ + struct sched_domain_shared *sds = sd->shared; + + if (!sds) + return; + + /* Free data here. Empty for now. */ +} + +static int sd_llc_alloc_all(const struct cpumask *cpu_map, struct s_data *d) +{ + struct sched_domain *sd, *hsd; + int i; + + for_each_cpu(i, cpu_map) { + /* Find highest domain that shares resources */ + hsd = NULL; + for (sd = *per_cpu_ptr(d->sd, i); sd; sd = sd->parent) { + if (!(sd->flags & SD_SHARE_PKG_RESOURCES)) + break; + hsd = sd; + } + if (hsd && sd_llc_alloc(hsd)) + return 1; + } + + return 0; +} + +static void sd_llc_free_all(const struct cpumask *cpu_map) +{ + struct sched_domain_topology_level *tl; + struct sched_domain *sd; + struct sd_data *sdd; + int j; + + for_each_sd_topology(tl) { + sdd = &tl->data; + if (!sdd || !sdd->sd) + continue; + for_each_cpu(j, cpu_map) { + sd = *per_cpu_ptr(sdd->sd, j); + if (sd) + sd_llc_free(sd); + } + } +} + static struct sched_domain *build_sched_domain(struct sched_domain_topology_level *tl, const struct cpumask *cpu_map, struct sched_domain_attr *attr, struct sched_domain *child, int cpu) @@ -2480,6 +2545,14 @@ build_sched_domains(const struct cpumask *cpu_map, struct sched_domain_attr *att } }
+ /* + * Allocate shared sd data at last level cache. Must be done after + * domains are built above, but before the data is used in + * cpu_attach_domain and descendants below. + */ + if (sd_llc_alloc_all(cpu_map, &d)) + goto error; + /* Attach the domains */ rcu_read_lock(); for_each_cpu(i, cpu_map) {