From: Peter Zijlstra peterz@infradead.org
stable inclusion from stable-v5.10.192 commit e0f50b0e4186489f9abc8d7cd3c654f47eefc792 category: bugfix bugzilla: https://gitee.com/src-openeuler/kernel/issues/I7RQ67
Reference: https://git.kernel.org/pub/scm/linux/kernel/git/stable/linux.git/commit/?id=...
--------------------------------
commit 42be649dd1f2eee6b1fb185f1a231b9494cf095f upstream.
For a more consistent namespace.
[ bp: Fixup names in the doc too. ]
Signed-off-by: Peter Zijlstra (Intel) peterz@infradead.org Signed-off-by: Borislav Petkov (AMD) bp@alien8.de Link: https://lore.kernel.org/r/20230814121148.976236447@infradead.org Signed-off-by: Greg Kroah-Hartman gregkh@linuxfoundation.org Signed-off-by: Jialin Zhang zhangjialin11@huawei.com (cherry picked from commit 01aee51c18afda58706d1ba2abdaa2a8b8669984) Signed-off-by: Jialin Zhang zhangjialin11@huawei.com --- Documentation/admin-guide/hw-vuln/srso.rst | 4 ++-- arch/x86/include/asm/nospec-branch.h | 4 ++-- arch/x86/kernel/vmlinux.lds.S | 8 +++---- arch/x86/lib/retpoline.S | 26 +++++++++++----------- 4 files changed, 21 insertions(+), 21 deletions(-)
diff --git a/Documentation/admin-guide/hw-vuln/srso.rst b/Documentation/admin-guide/hw-vuln/srso.rst index 2f923c805802..f79cb11b080f 100644 --- a/Documentation/admin-guide/hw-vuln/srso.rst +++ b/Documentation/admin-guide/hw-vuln/srso.rst @@ -124,8 +124,8 @@ sequence. To ensure the safety of this mitigation, the kernel must ensure that the safe return sequence is itself free from attacker interference. In Zen3 and Zen4, this is accomplished by creating a BTB alias between the -untraining function srso_untrain_ret_alias() and the safe return -function srso_safe_ret_alias() which results in evicting a potentially +untraining function srso_alias_untrain_ret() and the safe return +function srso_alias_safe_ret() which results in evicting a potentially poisoned BTB entry and using that safe one for all function returns.
In older Zen1 and Zen2, this is accomplished using a reinterpretation diff --git a/arch/x86/include/asm/nospec-branch.h b/arch/x86/include/asm/nospec-branch.h index 51e3e189ec59..07967d80d14e 100644 --- a/arch/x86/include/asm/nospec-branch.h +++ b/arch/x86/include/asm/nospec-branch.h @@ -183,7 +183,7 @@
#ifdef CONFIG_CPU_SRSO ALTERNATIVE_2 "", "call srso_untrain_ret", X86_FEATURE_SRSO, \ - "call srso_untrain_ret_alias", X86_FEATURE_SRSO_ALIAS + "call srso_alias_untrain_ret", X86_FEATURE_SRSO_ALIAS #endif .endm
@@ -207,7 +207,7 @@ extern void srso_alias_return_thunk(void);
extern void retbleed_untrain_ret(void); extern void srso_untrain_ret(void); -extern void srso_untrain_ret_alias(void); +extern void srso_alias_untrain_ret(void);
extern void entry_ibpb(void);
diff --git a/arch/x86/kernel/vmlinux.lds.S b/arch/x86/kernel/vmlinux.lds.S index 226c72f94c3a..3f3e31a457d4 100644 --- a/arch/x86/kernel/vmlinux.lds.S +++ b/arch/x86/kernel/vmlinux.lds.S @@ -141,10 +141,10 @@ SECTIONS
#ifdef CONFIG_CPU_SRSO /* - * See the comment above srso_untrain_ret_alias()'s + * See the comment above srso_alias_untrain_ret()'s * definition. */ - . = srso_untrain_ret_alias | (1 << 2) | (1 << 8) | (1 << 14) | (1 << 20); + . = srso_alias_untrain_ret | (1 << 2) | (1 << 8) | (1 << 14) | (1 << 20); *(.text.__x86.rethunk_safe) #endif ALIGN_ENTRY_TEXT_END @@ -533,8 +533,8 @@ INIT_PER_CPU(irq_stack_backing_store); * Instead do: (A | B) - (A & B) in order to compute the XOR * of the two function addresses: */ -. = ASSERT(((ABSOLUTE(srso_untrain_ret_alias) | srso_safe_ret_alias) - - (ABSOLUTE(srso_untrain_ret_alias) & srso_safe_ret_alias)) == ((1 << 2) | (1 << 8) | (1 << 14) | (1 << 20)), +. = ASSERT(((ABSOLUTE(srso_alias_untrain_ret) | srso_alias_safe_ret) - + (ABSOLUTE(srso_alias_untrain_ret) & srso_alias_safe_ret)) == ((1 << 2) | (1 << 8) | (1 << 14) | (1 << 20)), "SRSO function pair won't alias"); #endif
diff --git a/arch/x86/lib/retpoline.S b/arch/x86/lib/retpoline.S index 889882f8c3c8..b5697ba1ca71 100644 --- a/arch/x86/lib/retpoline.S +++ b/arch/x86/lib/retpoline.S @@ -75,55 +75,55 @@ SYM_CODE_END(__x86_indirect_thunk_array) #ifdef CONFIG_RETHUNK
/* - * srso_untrain_ret_alias() and srso_safe_ret_alias() are placed at + * srso_alias_untrain_ret() and srso_alias_safe_ret() are placed at * special addresses: * - * - srso_untrain_ret_alias() is 2M aligned - * - srso_safe_ret_alias() is also in the same 2M page but bits 2, 8, 14 + * - srso_alias_untrain_ret() is 2M aligned + * - srso_alias_safe_ret() is also in the same 2M page but bits 2, 8, 14 * and 20 in its virtual address are set (while those bits in the - * srso_untrain_ret_alias() function are cleared). + * srso_alias_untrain_ret() function are cleared). * * This guarantees that those two addresses will alias in the branch * target buffer of Zen3/4 generations, leading to any potential * poisoned entries at that BTB slot to get evicted. * - * As a result, srso_safe_ret_alias() becomes a safe return. + * As a result, srso_alias_safe_ret() becomes a safe return. */ #ifdef CONFIG_CPU_SRSO .section .text.__x86.rethunk_untrain
-SYM_START(srso_untrain_ret_alias, SYM_L_GLOBAL, SYM_A_NONE) +SYM_START(srso_alias_untrain_ret, SYM_L_GLOBAL, SYM_A_NONE) UNWIND_HINT_FUNC ASM_NOP2 lfence jmp srso_alias_return_thunk -SYM_FUNC_END(srso_untrain_ret_alias) -__EXPORT_THUNK(srso_untrain_ret_alias) +SYM_FUNC_END(srso_alias_untrain_ret) +__EXPORT_THUNK(srso_alias_untrain_ret)
.section .text.__x86.rethunk_safe #else /* dummy definition for alternatives */ -SYM_START(srso_untrain_ret_alias, SYM_L_GLOBAL, SYM_A_NONE) +SYM_START(srso_alias_untrain_ret, SYM_L_GLOBAL, SYM_A_NONE) ANNOTATE_UNRET_SAFE ret int3 -SYM_FUNC_END(srso_untrain_ret_alias) +SYM_FUNC_END(srso_alias_untrain_ret) #endif
-SYM_START(srso_safe_ret_alias, SYM_L_GLOBAL, SYM_A_NONE) +SYM_START(srso_alias_safe_ret, SYM_L_GLOBAL, SYM_A_NONE) add $8, %_ASM_SP UNWIND_HINT_FUNC ANNOTATE_UNRET_SAFE ret int3 -SYM_FUNC_END(srso_safe_ret_alias) +SYM_FUNC_END(srso_alias_safe_ret)
.section .text.__x86.return_thunk
SYM_CODE_START(srso_alias_return_thunk) UNWIND_HINT_FUNC ANNOTATE_NOENDBR - call srso_safe_ret_alias + call srso_alias_safe_ret ud2 SYM_CODE_END(srso_alias_return_thunk)