From: Peter Zijlstra peterz@infradead.org
stable inclusion from stable-v4.19.266 commit f1b4cf5ce43f28503ef24d30fdbb9247d141765d category: bugfix bugzilla: https://gitee.com/src-openeuler/kernel/issues/I5GZ2Z CVE: CVE-2022-29901
--------------------------------
commit c779bc1a9002fa474175b80e72b85c9bf628abb0 upstream.
When changing SPEC_CTRL for user control, the WRMSR can be delayed until return-to-user when KERNEL_IBRS has been enabled.
This avoids an MSR write during context switch.
Signed-off-by: Peter Zijlstra (Intel) peterz@infradead.org Signed-off-by: Borislav Petkov bp@suse.de Reviewed-by: Josh Poimboeuf jpoimboe@kernel.org Signed-off-by: Borislav Petkov bp@suse.de Signed-off-by: Thadeu Lima de Souza Cascardo cascardo@canonical.com Signed-off-by: Suleiman Souhlal suleiman@google.com Signed-off-by: Greg Kroah-Hartman gregkh@linuxfoundation.org Signed-off-by: Lin Yujun linyujun809@huawei.com Reviewed-by: Zhang Jianhua chris.zjh@huawei.com Reviewed-by: Liao Chang liaochang1@huawei.com Signed-off-by: Yongqiang Liu liuyongqiang13@huawei.com --- arch/x86/include/asm/nospec-branch.h | 2 +- arch/x86/kernel/cpu/bugs.c | 18 ++++++++++++------ arch/x86/kernel/process.c | 2 +- 3 files changed, 14 insertions(+), 8 deletions(-)
diff --git a/arch/x86/include/asm/nospec-branch.h b/arch/x86/include/asm/nospec-branch.h index 6b7d94cf7568..a82d77f9fc8f 100644 --- a/arch/x86/include/asm/nospec-branch.h +++ b/arch/x86/include/asm/nospec-branch.h @@ -307,7 +307,7 @@ static inline void indirect_branch_prediction_barrier(void)
/* The Intel SPEC CTRL MSR base value cache */ extern u64 x86_spec_ctrl_base; -extern void write_spec_ctrl_current(u64 val); +extern void write_spec_ctrl_current(u64 val, bool force);
/* * With retpoline, we must use IBRS to restrict branch prediction diff --git a/arch/x86/kernel/cpu/bugs.c b/arch/x86/kernel/cpu/bugs.c index dc8cf26ef367..1ede3462f828 100644 --- a/arch/x86/kernel/cpu/bugs.c +++ b/arch/x86/kernel/cpu/bugs.c @@ -61,13 +61,19 @@ static DEFINE_MUTEX(spec_ctrl_mutex); * Keep track of the SPEC_CTRL MSR value for the current task, which may differ * from x86_spec_ctrl_base due to STIBP/SSB in __speculation_ctrl_update(). */ -void write_spec_ctrl_current(u64 val) +void write_spec_ctrl_current(u64 val, bool force) { if (this_cpu_read(x86_spec_ctrl_current) == val) return;
this_cpu_write(x86_spec_ctrl_current, val); - wrmsrl(MSR_IA32_SPEC_CTRL, val); + + /* + * When KERNEL_IBRS this MSR is written on return-to-user, unless + * forced the update can be delayed until that time. + */ + if (force || !cpu_feature_enabled(X86_FEATURE_KERNEL_IBRS)) + wrmsrl(MSR_IA32_SPEC_CTRL, val); }
/* @@ -1192,7 +1198,7 @@ static void __init spectre_v2_select_mitigation(void) if (spectre_v2_in_eibrs_mode(mode)) { /* Force it so VMEXIT will restore correctly */ x86_spec_ctrl_base |= SPEC_CTRL_IBRS; - write_spec_ctrl_current(x86_spec_ctrl_base); + write_spec_ctrl_current(x86_spec_ctrl_base, true); }
switch (mode) { @@ -1247,7 +1253,7 @@ static void __init spectre_v2_select_mitigation(void)
static void update_stibp_msr(void * __unused) { - write_spec_ctrl_current(x86_spec_ctrl_base); + write_spec_ctrl_current(x86_spec_ctrl_base, true); }
/* Update x86_spec_ctrl_base in case SMT state changed. */ @@ -1490,7 +1496,7 @@ static enum ssb_mitigation __init __ssb_select_mitigation(void) x86_amd_ssb_disable(); } else { x86_spec_ctrl_base |= SPEC_CTRL_SSBD; - write_spec_ctrl_current(x86_spec_ctrl_base); + write_spec_ctrl_current(x86_spec_ctrl_base, true); } }
@@ -1684,7 +1690,7 @@ int arch_prctl_spec_ctrl_get(struct task_struct *task, unsigned long which) void x86_spec_ctrl_setup_ap(void) { if (boot_cpu_has(X86_FEATURE_MSR_SPEC_CTRL)) - write_spec_ctrl_current(x86_spec_ctrl_base); + write_spec_ctrl_current(x86_spec_ctrl_base, true);
if (ssb_mode == SPEC_STORE_BYPASS_DISABLE) x86_amd_ssb_disable(); diff --git a/arch/x86/kernel/process.c b/arch/x86/kernel/process.c index a17afe402a62..9dad42b8fe3c 100755 --- a/arch/x86/kernel/process.c +++ b/arch/x86/kernel/process.c @@ -435,7 +435,7 @@ static __always_inline void __speculation_ctrl_update(unsigned long tifp, }
if (updmsr) - write_spec_ctrl_current(msr); + write_spec_ctrl_current(msr, false); }
static unsigned long speculation_ctrl_update_tif(struct task_struct *tsk)