
From: wanghongliang <wanghongliang@loongson.cn> LoongArch inclusion category: feature bugzilla: https://gitee.com/openeuler/kernel/issues/IC6CVG CVE: NA -------------------------------- In order to achieve more reasonable load balancing behavior, support for SCHED_MC has been added. The LLC distribution of Loongarch now is consistent with numa-node, the balancing domain of SCHED_MC can effectively reduce the situation where processes are awakened to smt_slibing Signed-off-by: wanghongliang <wanghongliang@loongson.cn> Signed-off-by: Tianyang Zhang <zhangtianyang@loongson.cn> Change-Id: I8388f208d93bf53d79473ba995895e5cd89d81b4 --- arch/loongarch/Kconfig | 9 ++++++ arch/loongarch/include/asm/smp.h | 1 + arch/loongarch/include/asm/topology.h | 9 ++++++ arch/loongarch/kernel/smp.c | 46 +++++++++++++++++++++++++++ 4 files changed, 65 insertions(+) diff --git a/arch/loongarch/Kconfig b/arch/loongarch/Kconfig index fa47efea7a13..316bf567759d 100644 --- a/arch/loongarch/Kconfig +++ b/arch/loongarch/Kconfig @@ -456,6 +456,15 @@ config NR_CPUS This allows you to specify the maximum number of CPUs which this kernel will support. +config SCHED_MC + def_bool y + prompt "Multi-core scheduler support" + depends on SMP + help + Multi-core scheduler support improves the CPU scheduler's decision + making when dealing with multi-core CPU chips at a cost of slightly + increased overhead in some places. If unsure say N here. + config NUMA bool "NUMA Support" select SMP diff --git a/arch/loongarch/include/asm/smp.h b/arch/loongarch/include/asm/smp.h index cc232901e4dd..1a7dba6256ea 100644 --- a/arch/loongarch/include/asm/smp.h +++ b/arch/loongarch/include/asm/smp.h @@ -24,6 +24,7 @@ extern int num_processors; extern int disabled_cpus; extern cpumask_t cpu_sibling_map[]; extern cpumask_t cpu_core_map[]; +extern cpumask_t cpu_llc_shared_map[]; extern cpumask_t cpu_foreign_map[]; void loongson_smp_setup(void); diff --git a/arch/loongarch/include/asm/topology.h b/arch/loongarch/include/asm/topology.h index 379f5e4830eb..5c485999cbb2 100644 --- a/arch/loongarch/include/asm/topology.h +++ b/arch/loongarch/include/asm/topology.h @@ -35,6 +35,15 @@ extern unsigned int __max_packages; #define topology_core_id(cpu) (cpu_data[cpu].core) #define topology_core_cpumask(cpu) (&cpu_core_map[cpu]) #define topology_sibling_cpumask(cpu) (&cpu_sibling_map[cpu]) + +/* + * return cpus that shares the last level cache. + */ +static inline const struct cpumask *cpu_coregroup_mask(int cpu) +{ + return &cpu_llc_shared_map[cpu]; +} + #else #define topology_max_packages() (1) #endif diff --git a/arch/loongarch/kernel/smp.c b/arch/loongarch/kernel/smp.c index 5d02bf5126b7..0201eb35df4c 100644 --- a/arch/loongarch/kernel/smp.c +++ b/arch/loongarch/kernel/smp.c @@ -49,6 +49,9 @@ EXPORT_SYMBOL(cpu_sibling_map); cpumask_t cpu_core_map[NR_CPUS] __read_mostly; EXPORT_SYMBOL(cpu_core_map); +cpumask_t cpu_llc_shared_map[NR_CPUS] __read_mostly; +EXPORT_SYMBOL(cpu_llc_shared_map); + static DECLARE_COMPLETION(cpu_starting); static DECLARE_COMPLETION(cpu_running); @@ -65,6 +68,10 @@ static cpumask_t cpu_sibling_setup_map; /* representing cpus for which core maps can be computed */ static cpumask_t cpu_core_setup_map; +/* representing cpus for which llc sibling maps can be computed */ +static cpumask_t cpu_llc_shared_setup_map; + + struct secondary_data cpuboot_data; static DEFINE_PER_CPU(int, cpu_state); @@ -103,6 +110,42 @@ static inline void set_cpu_core_map(int cpu) } } +static inline bool cpus_are_shared_llc(int cpua, int cpub) +{ + if (cpu_to_node(cpua) != cpu_to_node(cpub)) + return false; + + return true; +} + +static inline void set_cpu_llc_shared_map(int cpu) +{ + int i; + + cpumask_set_cpu(cpu, &cpu_llc_shared_setup_map); + + for_each_cpu(i, &cpu_llc_shared_setup_map) { + if (cpus_are_shared_llc(cpu, i)) { + cpumask_set_cpu(i, &cpu_llc_shared_map[cpu]); + cpumask_set_cpu(cpu, &cpu_llc_shared_map[i]); + } + } +} + +static inline void clear_cpu_llc_shared_map(int cpu) +{ + int i; + + for_each_cpu(i, &cpu_llc_shared_setup_map) { + if (cpus_are_shared_llc(cpu, i)) { + cpumask_clear_cpu(i, &cpu_llc_shared_map[cpu]); + cpumask_clear_cpu(cpu, &cpu_llc_shared_map[i]); + } + } + + cpumask_clear_cpu(cpu, &cpu_llc_shared_setup_map); +} + static inline void set_cpu_sibling_map(int cpu) { int i; @@ -401,6 +444,7 @@ int loongson_cpu_disable(void) #endif set_cpu_online(cpu, false); clear_cpu_sibling_map(cpu); + clear_cpu_llc_shared_map(cpu); calculate_cpu_foreign_map(); local_irq_save(flags); fixup_irqs(); @@ -523,6 +567,7 @@ void __init smp_prepare_cpus(unsigned int max_cpus) loongson_prepare_cpus(max_cpus); set_cpu_sibling_map(0); set_cpu_core_map(0); + set_cpu_llc_shared_map(0); calculate_cpu_foreign_map(); #ifndef CONFIG_HOTPLUG_CPU init_cpu_present(cpu_possible_mask); @@ -564,6 +609,7 @@ asmlinkage void start_secondary(void) set_cpu_sibling_map(cpu); set_cpu_core_map(cpu); + set_cpu_llc_shared_map(cpu); notify_cpu_starting(cpu); -- 2.33.0