From: Kai Shen shenkai8@huawei.com
hulk inclusion category: bugfix bugzilla: https://bugzilla.openeuler.org/show_bug.cgi?id=28 CVE: NA
---------------------------
Random performance decreases appear on cases of Hackbench which test pipe or socket communication among multi-threads on Hisi HIP08 SoC. Cache sharing which caused by the change of the data layout and the cache readunique prefetch mechanism both lead to this problem.
Readunique mechanism which may caused by store operation will invalid cachelines on other cores during data fetching stage which can cause cacheline invalidation happens frequently in a sharing data access situation.
Disable cache readunique prefetch can trackle this problem. Test cases are like: for i in 20;do echo "--------pipe thread num=$i----------" for j in $(seq 1 10);do ./hackbench -pipe $i thread 1000 done done
We disable readunique prefetch only in el2 for in el1 disabling readunique prefetch may cause panic due to lack of related priority which often be set in BIOS.
Introduce CONFIG_HISILICON_ERRATUM_HIP08_RU_PREFETCH and disable RU prefetch using boot cmdline 'readunique_prefetch=off'.
Signed-off-by: Kai Shen shenkai8@huawei.com Signed-off-by: Hanjun Guo guohanjun@huawei.com [XQ: adjusted context] Signed-off-by: Xie XiuQi xiexiuqi@huawei.com Signed-off-by: Cheng Jian cj.chengjian@huawei.com --- arch/arm64/Kconfig | 18 ++++++++++ arch/arm64/include/asm/cpucaps.h | 3 +- arch/arm64/kernel/cpu_errata.c | 56 ++++++++++++++++++++++++++++++++ 3 files changed, 76 insertions(+), 1 deletion(-)
diff --git a/arch/arm64/Kconfig b/arch/arm64/Kconfig index d8917873f549..058a73bf556d 100644 --- a/arch/arm64/Kconfig +++ b/arch/arm64/Kconfig @@ -625,6 +625,24 @@ config QCOM_FALKOR_ERRATUM_E1041
If unsure, say Y.
+config HISILICON_ERRATUM_HIP08_RU_PREFETCH + bool "HIP08 RU: HiSilicon HIP08 cache readunique might cause performance drop" + default y + help + The HiSilicon HIP08 cache readunique might compromise performance, + use cmdline "readunique_prefetch_disable" to disable RU prefetch. + + If unsure, say Y. + +config HISILICON_HIP08_RU_PREFETCH_DEFAULT_OFF + bool "HIP08 RU: disable HiSilicon HIP08 cache readunique by default" + depends on HISILICON_ERRATUM_HIP08_RU_PREFETCH + default n + help + Disable HiSilicon HIP08 cache readunique by default. + + If unsure, say N. + endmenu
diff --git a/arch/arm64/include/asm/cpucaps.h b/arch/arm64/include/asm/cpucaps.h index a9090f204a08..85446098342d 100644 --- a/arch/arm64/include/asm/cpucaps.h +++ b/arch/arm64/include/asm/cpucaps.h @@ -56,7 +56,8 @@ #define ARM64_WORKAROUND_1463225 35 #define ARM64_HAS_CRC32 36 #define ARM64_SSBS 37 +#define ARM64_WORKAROUND_HISI_HIP08_RU_PREFETCH 38
-#define ARM64_NCAPS 38 +#define ARM64_NCAPS 39
#endif /* __ASM_CPUCAPS_H */ diff --git a/arch/arm64/kernel/cpu_errata.c b/arch/arm64/kernel/cpu_errata.c index c0b2ef5e7ea3..881187d5fed6 100644 --- a/arch/arm64/kernel/cpu_errata.c +++ b/arch/arm64/kernel/cpu_errata.c @@ -24,6 +24,11 @@ #include <asm/cputype.h> #include <asm/cpufeature.h> #include <asm/smp_plat.h> +#ifdef CONFIG_HISILICON_ERRATUM_HIP08_RU_PREFETCH +#include <asm/ptrace.h> +#include <asm/sysreg.h> +#include <linux/smp.h> +#endif
static bool __maybe_unused is_affected_midr_range(const struct arm64_cpu_capabilities *entry, int scope) @@ -491,6 +496,48 @@ cpu_enable_cache_maint_trap(const struct arm64_cpu_capabilities *__unused) sysreg_clear_set(sctlr_el1, SCTLR_EL1_UCI, 0); }
+#ifdef CONFIG_HISILICON_ERRATUM_HIP08_RU_PREFETCH +# ifdef CONFIG_HISILICON_HIP08_RU_PREFETCH_DEFAULT_OFF +static bool readunique_prefetch_enabled; +# else +static bool readunique_prefetch_enabled = true; +# endif +static int __init readunique_prefetch_switch(char *data) +{ + if (!data) + return -EINVAL; + + if (strcmp(data, "off") == 0) + readunique_prefetch_enabled = false; + else if (strcmp(data, "on") == 0) + readunique_prefetch_enabled = true; + else + return -EINVAL; + + return 0; +} +early_param("readunique_prefetch", readunique_prefetch_switch); + +static bool +should_disable_hisi_hip08_ru_prefetch(const struct arm64_cpu_capabilities *entry, int unused) +{ + u64 el; + + if (readunique_prefetch_enabled) + return false; + + el = read_sysreg(CurrentEL); + return el == CurrentEL_EL2; +} + +#define CTLR_HISI_HIP08_RU_PREFETCH (1L << 40) +static void __maybe_unused +hisi_hip08_ru_prefetch_disable(const struct arm64_cpu_capabilities *__unused) +{ + sysreg_clear_set(S3_1_c15_c6_4, 0, CTLR_HISI_HIP08_RU_PREFETCH); +} +#endif + /* known invulnerable cores */ static const struct midr_range arm64_ssb_cpus[] = { MIDR_ALL_VERSIONS(MIDR_CORTEX_A35), @@ -870,6 +917,15 @@ const struct arm64_cpu_capabilities arm64_errata[] = { ERRATA_MIDR_RANGE_LIST(tx2_family_cpus), .matches = needs_tx2_tvm_workaround, }, +#endif +#ifdef CONFIG_HISILICON_ERRATUM_HIP08_RU_PREFETCH + { + .desc = "HiSilicon HIP08 Cache Readunique Prefetch Disable", + .capability = ARM64_WORKAROUND_HISI_HIP08_RU_PREFETCH, + ERRATA_MIDR_ALL_VERSIONS(MIDR_HISI_TSV110), + .matches = should_disable_hisi_hip08_ru_prefetch, + .cpu_enable = hisi_hip08_ru_prefetch_disable, + }, #endif { }