From: "Borislav Petkov (AMD)" bp@alien8.de
stable inclusion from stable-v5.10.189 commit e47af0c255aed7da91202f26250558a8e34e1c26 category: bugfix bugzilla: https://gitee.com/src-openeuler/kernel/issues/I7RQ67 CVE: CVE-2023-20569
Reference: https://git.kernel.org/pub/scm/linux/kernel/git/stable/linux.git/commit/?id=...
--------------------------------
Upstream commit: 1b5277c0ea0b247393a9c426769fde18cff5e2f6
Add support for the CPUID flag which denotes that the CPU is not affected by SRSO.
Signed-off-by: Borislav Petkov (AMD) bp@alien8.de Signed-off-by: Greg Kroah-Hartman gregkh@linuxfoundation.org
Conflicts: arch/x86/include/asm/cpufeatures.h
Signed-off-by: Jialin Zhang zhangjialin11@huawei.com (cherry picked from commit f4a2459b03a28b1b89eec1402579fcad7020f431) Signed-off-by: Jialin Zhang zhangjialin11@huawei.com --- arch/x86/include/asm/cpufeatures.h | 2 ++ arch/x86/include/asm/msr-index.h | 1 + arch/x86/include/asm/nospec-branch.h | 6 +++--- arch/x86/kernel/cpu/amd.c | 12 ++++++------ arch/x86/kernel/cpu/bugs.c | 24 ++++++++++++++++++++---- arch/x86/kernel/cpu/common.c | 6 ++++-- arch/x86/kvm/cpuid.c | 3 +++ 7 files changed, 39 insertions(+), 15 deletions(-)
diff --git a/arch/x86/include/asm/cpufeatures.h b/arch/x86/include/asm/cpufeatures.h index 7e28276a1007..447086e89114 100644 --- a/arch/x86/include/asm/cpufeatures.h +++ b/arch/x86/include/asm/cpufeatures.h @@ -434,7 +434,9 @@ #define X86_FEATURE_SPEC_CTRL_SSBD (18*32+31) /* "" Speculative Store Bypass Disable */
+#define X86_FEATURE_SBPB (19*32+27) /* "" Selective Branch Prediction Barrier */ #define X86_FEATURE_IBPB_BRTYPE (19*32+28) /* "" MSR_PRED_CMD[IBPB] flushes all branch type predictions */ +#define X86_FEATURE_SRSO_NO (19*32+29) /* "" CPU is not affected by SRSO */
/* * BUG word(s) diff --git a/arch/x86/include/asm/msr-index.h b/arch/x86/include/asm/msr-index.h index ec39d6d9967b..62e8681d0e3f 100644 --- a/arch/x86/include/asm/msr-index.h +++ b/arch/x86/include/asm/msr-index.h @@ -56,6 +56,7 @@
#define MSR_IA32_PRED_CMD 0x00000049 /* Prediction Command */ #define PRED_CMD_IBPB BIT(0) /* Indirect Branch Prediction Barrier */ +#define PRED_CMD_SBPB BIT(7) /* Selective Branch Prediction Barrier */
#define MSR_PPIN_CTL 0x0000004e #define MSR_PPIN 0x0000004f diff --git a/arch/x86/include/asm/nospec-branch.h b/arch/x86/include/asm/nospec-branch.h index 65db2a7d39e1..888256d2f1c6 100644 --- a/arch/x86/include/asm/nospec-branch.h +++ b/arch/x86/include/asm/nospec-branch.h @@ -314,11 +314,11 @@ void alternative_msr_write(unsigned int msr, u64 val, unsigned int feature) : "memory"); }
+extern u64 x86_pred_cmd; + static inline void indirect_branch_prediction_barrier(void) { - u64 val = PRED_CMD_IBPB; - - alternative_msr_write(MSR_IA32_PRED_CMD, val, X86_FEATURE_USE_IBPB); + alternative_msr_write(MSR_IA32_PRED_CMD, x86_pred_cmd, X86_FEATURE_USE_IBPB); }
/* The Intel SPEC CTRL MSR base value cache */ diff --git a/arch/x86/kernel/cpu/amd.c b/arch/x86/kernel/cpu/amd.c index 3a8ca21c5b64..402979952fc7 100644 --- a/arch/x86/kernel/cpu/amd.c +++ b/arch/x86/kernel/cpu/amd.c @@ -1287,14 +1287,14 @@ bool cpu_has_ibpb_brtype_microcode(void) { u8 fam = boot_cpu_data.x86;
- if (fam == 0x17) { - /* Zen1/2 IBPB flushes branch type predictions too. */ + /* Zen1/2 IBPB flushes branch type predictions too. */ + if (fam == 0x17) return boot_cpu_has(X86_FEATURE_AMD_IBPB); - } else if (fam == 0x19) { + /* Poke the MSR bit on Zen3/4 to check its presence. */ + else if (fam == 0x19) + return !wrmsrl_safe(MSR_IA32_PRED_CMD, PRED_CMD_SBPB); + else return false; - } - - return false; }
static void zenbleed_check_cpu(void *unused) diff --git a/arch/x86/kernel/cpu/bugs.c b/arch/x86/kernel/cpu/bugs.c index 5a9e923ab83f..f2a7d9491955 100644 --- a/arch/x86/kernel/cpu/bugs.c +++ b/arch/x86/kernel/cpu/bugs.c @@ -59,6 +59,9 @@ EXPORT_SYMBOL_GPL(x86_spec_ctrl_base); DEFINE_PER_CPU(u64, x86_spec_ctrl_current); EXPORT_SYMBOL_GPL(x86_spec_ctrl_current);
+u64 x86_pred_cmd __ro_after_init = PRED_CMD_IBPB; +EXPORT_SYMBOL_GPL(x86_pred_cmd); + static DEFINE_MUTEX(spec_ctrl_mutex);
/* @@ -2318,7 +2321,7 @@ static void __init srso_select_mitigation(void) bool has_microcode;
if (!boot_cpu_has_bug(X86_BUG_SRSO) || cpu_mitigations_off()) - return; + goto pred_cmd;
/* * The first check is for the kernel running as a guest in order @@ -2331,9 +2334,18 @@ static void __init srso_select_mitigation(void) } else { /* * Enable the synthetic (even if in a real CPUID leaf) - * flag for guests. + * flags for guests. */ setup_force_cpu_cap(X86_FEATURE_IBPB_BRTYPE); + setup_force_cpu_cap(X86_FEATURE_SBPB); + + /* + * Zen1/2 with SMT off aren't vulnerable after the right + * IBPB microcode has been applied. + */ + if ((boot_cpu_data.x86 < 0x19) && + (cpu_smt_control == CPU_SMT_DISABLED)) + setup_force_cpu_cap(X86_FEATURE_SRSO_NO); }
switch (srso_cmd) { @@ -2356,16 +2368,20 @@ static void __init srso_select_mitigation(void) srso_mitigation = SRSO_MITIGATION_SAFE_RET; } else { pr_err("WARNING: kernel not compiled with CPU_SRSO.\n"); - return; + goto pred_cmd; } break;
default: break; - }
pr_info("%s%s\n", srso_strings[srso_mitigation], (has_microcode ? "" : ", no microcode")); + +pred_cmd: + if (boot_cpu_has(X86_FEATURE_SRSO_NO) || + srso_cmd == SRSO_CMD_OFF) + x86_pred_cmd = PRED_CMD_SBPB; }
#undef pr_fmt diff --git a/arch/x86/kernel/cpu/common.c b/arch/x86/kernel/cpu/common.c index 5dc78d88d932..3ce12037320d 100644 --- a/arch/x86/kernel/cpu/common.c +++ b/arch/x86/kernel/cpu/common.c @@ -1299,8 +1299,10 @@ static void __init cpu_set_bug_bits(struct cpuinfo_x86 *c) boot_cpu_has(X86_FEATURE_AVX)) setup_force_cpu_bug(X86_BUG_GDS);
- if (cpu_matches(cpu_vuln_blacklist, SRSO)) - setup_force_cpu_bug(X86_BUG_SRSO); + if (!cpu_has(c, X86_FEATURE_SRSO_NO)) { + if (cpu_matches(cpu_vuln_blacklist, SRSO)) + setup_force_cpu_bug(X86_BUG_SRSO); + }
if (cpu_matches(cpu_vuln_whitelist, NO_MELTDOWN)) return; diff --git a/arch/x86/kvm/cpuid.c b/arch/x86/kvm/cpuid.c index aab185562803..7dad1cf6ed21 100644 --- a/arch/x86/kvm/cpuid.c +++ b/arch/x86/kvm/cpuid.c @@ -559,6 +559,9 @@ void kvm_set_cpu_caps(void) !boot_cpu_has(X86_FEATURE_AMD_SSBD)) kvm_cpu_cap_set(X86_FEATURE_VIRT_SSBD);
+ if (cpu_feature_enabled(X86_FEATURE_SRSO_NO)) + kvm_cpu_cap_set(X86_FEATURE_SRSO_NO); + /* * Hide all SVM features by default, SVM will set the cap bits for * features it emulates and/or exposes for L1.