From: Fangrui Song maskray@google.com
commit 4d6ffa27b8e5116c0abb318790fd01d4e12d75e6 upstream.
Commit
393f203f5fd5 ("x86_64: kasan: add interceptors for memset/memmove/memcpy functions")
added .weak directives to arch/x86/lib/mem*_64.S instead of changing the existing ENTRY macros to WEAK. This can lead to the assembly snippet
.weak memcpy ... .globl memcpy
which will produce a STB_WEAK memcpy with GNU as but STB_GLOBAL memcpy with LLVM's integrated assembler before LLVM 12. LLVM 12 (since https://reviews.llvm.org/D90108) will error on such an overridden symbol binding.
Commit
ef1e03152cb0 ("x86/asm: Make some functions local")
changed ENTRY in arch/x86/lib/memcpy_64.S to SYM_FUNC_START_LOCAL, which was ineffective due to the preceding .weak directive.
Use the appropriate SYM_FUNC_START_WEAK instead.
Fixes: 393f203f5fd5 ("x86_64: kasan: add interceptors for memset/memmove/memcpy functions") Fixes: ef1e03152cb0 ("x86/asm: Make some functions local") Reported-by: Sami Tolvanen samitolvanen@google.com Signed-off-by: Fangrui Song maskray@google.com Signed-off-by: Borislav Petkov bp@suse.de Reviewed-by: Nick Desaulniers ndesaulniers@google.com Tested-by: Nathan Chancellor natechancellor@gmail.com Tested-by: Nick Desaulniers ndesaulniers@google.com Cc: stable@vger.kernel.org Link: https://lkml.kernel.org/r/20201103012358.168682-1-maskray@google.com [nd: backport due to missing commit e9b9d020c487 ("x86/asm: Annotate aliases") commit ffedeeb780dc ("linkage: Introduce new macros for assembler symbols")] Signed-off-by: Nick Desaulniers ndesaulniers@google.com Signed-off-by: Greg Kroah-Hartman gregkh@linuxfoundation.org --- arch/x86/lib/memcpy_64.S | 6 +++--- arch/x86/lib/memmove_64.S | 4 ++-- arch/x86/lib/memset_64.S | 6 +++--- 3 files changed, 8 insertions(+), 8 deletions(-)
diff --git a/arch/x86/lib/memcpy_64.S b/arch/x86/lib/memcpy_64.S index 9d05572370ed..84b0078272d1 100644 --- a/arch/x86/lib/memcpy_64.S +++ b/arch/x86/lib/memcpy_64.S @@ -14,8 +14,6 @@ * to a jmp to memcpy_erms which does the REP; MOVSB mem copy. */
-.weak memcpy - /* * memcpy - Copy a memory block. * @@ -28,7 +26,9 @@ * rax original destination */ ENTRY(__memcpy) -ENTRY(memcpy) +.weak memcpy +.p2align 4, 0x90 +memcpy: ALTERNATIVE_2 "jmp memcpy_orig", "", X86_FEATURE_REP_GOOD, \ "jmp memcpy_erms", X86_FEATURE_ERMS
diff --git a/arch/x86/lib/memmove_64.S b/arch/x86/lib/memmove_64.S index bbec69d8223b..e1cfc880f42d 100644 --- a/arch/x86/lib/memmove_64.S +++ b/arch/x86/lib/memmove_64.S @@ -25,8 +25,8 @@ * rax: dest */ .weak memmove - -ENTRY(memmove) +.p2align 4, 0x90 +memmove: ENTRY(__memmove)
/* Handle more 32 bytes in loop */ diff --git a/arch/x86/lib/memset_64.S b/arch/x86/lib/memset_64.S index 9bc861c71e75..084189acdcd0 100644 --- a/arch/x86/lib/memset_64.S +++ b/arch/x86/lib/memset_64.S @@ -6,8 +6,6 @@ #include <asm/alternative-asm.h> #include <asm/export.h>
-.weak memset - /* * ISO C memset - set a memory block to a byte value. This function uses fast * string to get better performance than the original function. The code is @@ -19,7 +17,9 @@ * * rax original destination */ -ENTRY(memset) +.weak memset +.p2align 4, 0x90 +memset: ENTRY(__memset) /* * Some CPUs support enhanced REP MOVSB/STOSB feature. It is recommended