From: Peter Zijlstra peterz@infradead.org
stable inclusion from stable-v5.10.133 commit c9eb5dcdc8f4a848b45b97725f5a2b8d324bb31a category: bugfix bugzilla: https://gitee.com/openeuler/kernel/issues/I5PTAS CVE: CVE-2022-29900,CVE-2022-23816,CVE-2022-29901
Reference: https://git.kernel.org/pub/scm/linux/kernel/git/stable/linux.git/commit/?id=...
--------------------------------
commit aa3d480315ba6c3025a60958e1981072ea37c3df upstream.
Use the return thunk in asm code. If the thunk isn't needed, it will get patched into a RET instruction during boot by apply_returns().
Since alternatives can't handle relocations outside of the first instruction, putting a 'jmp __x86_return_thunk' in one is not valid, therefore carve out the memmove ERMS path into a separate label and jump to it.
Signed-off-by: Peter Zijlstra (Intel) peterz@infradead.org Signed-off-by: Borislav Petkov bp@suse.de Reviewed-by: Josh Poimboeuf jpoimboe@kernel.org Signed-off-by: Borislav Petkov bp@suse.de [cascardo: no RANDSTRUCT_CFLAGS] Signed-off-by: Thadeu Lima de Souza Cascardo cascardo@canonical.com [bwh: Backported to 5.10: adjust context] Signed-off-by: Ben Hutchings ben@decadent.org.uk Signed-off-by: Greg Kroah-Hartman gregkh@linuxfoundation.org Signed-off-by: Lin Yujun linyujun809@huawei.com Reviewed-by: Zhang Jianhua chris.zjh@huawei.com Signed-off-by: Zheng Zengkai zhengzengkai@huawei.com --- arch/x86/entry/vdso/Makefile | 1 + arch/x86/include/asm/linkage.h | 8 ++++++++ arch/x86/lib/memmove_64.S | 7 ++++++- 3 files changed, 15 insertions(+), 1 deletion(-)
diff --git a/arch/x86/entry/vdso/Makefile b/arch/x86/entry/vdso/Makefile index 02e3e42f380b..1faa6be405b7 100644 --- a/arch/x86/entry/vdso/Makefile +++ b/arch/x86/entry/vdso/Makefile @@ -92,6 +92,7 @@ endif endif
$(vobjs): KBUILD_CFLAGS := $(filter-out $(GCC_PLUGINS_CFLAGS) $(RETPOLINE_CFLAGS),$(KBUILD_CFLAGS)) $(CFL) +$(vobjs): KBUILD_AFLAGS += -DBUILD_VDSO
# # vDSO code runs in userspace and -pg doesn't help with profiling anyway. diff --git a/arch/x86/include/asm/linkage.h b/arch/x86/include/asm/linkage.h index 030907922bd0..d04e61c2f863 100644 --- a/arch/x86/include/asm/linkage.h +++ b/arch/x86/include/asm/linkage.h @@ -18,19 +18,27 @@ #define __ALIGN_STR __stringify(__ALIGN) #endif
+#if defined(CONFIG_RETPOLINE) && !defined(__DISABLE_EXPORTS) && !defined(BUILD_VDSO) +#define RET jmp __x86_return_thunk +#else /* CONFIG_RETPOLINE */ #ifdef CONFIG_SLS #define RET ret; int3 #else #define RET ret #endif +#endif /* CONFIG_RETPOLINE */
#else /* __ASSEMBLY__ */
+#if defined(CONFIG_RETPOLINE) && !defined(__DISABLE_EXPORTS) && !defined(BUILD_VDSO) +#define ASM_RET "jmp __x86_return_thunk\n\t" +#else /* CONFIG_RETPOLINE */ #ifdef CONFIG_SLS #define ASM_RET "ret; int3\n\t" #else #define ASM_RET "ret\n\t" #endif +#endif /* CONFIG_RETPOLINE */
#endif /* __ASSEMBLY__ */
diff --git a/arch/x86/lib/memmove_64.S b/arch/x86/lib/memmove_64.S index 50ea390df712..4b8ee3a2fcc3 100644 --- a/arch/x86/lib/memmove_64.S +++ b/arch/x86/lib/memmove_64.S @@ -40,7 +40,7 @@ SYM_FUNC_START(__memmove) /* FSRM implies ERMS => no length checks, do the copy directly */ .Lmemmove_begin_forward: ALTERNATIVE "cmp $0x20, %rdx; jb 1f", "", X86_FEATURE_FSRM - ALTERNATIVE "", __stringify(movq %rdx, %rcx; rep movsb; RET), X86_FEATURE_ERMS + ALTERNATIVE "", "jmp .Lmemmove_erms", X86_FEATURE_ERMS
/* * movsq instruction have many startup latency @@ -206,6 +206,11 @@ SYM_FUNC_START(__memmove) movb %r11b, (%rdi) 13: RET + +.Lmemmove_erms: + movq %rdx, %rcx + rep movsb + RET SYM_FUNC_END(__memmove) SYM_FUNC_END_ALIAS(memmove) EXPORT_SYMBOL(__memmove)