From: Thomas Gleixner tglx@linutronix.de
mainline inclusion from mainline-v6.5-rc1 commit 439e17576eb47f26b78c5bbc72e344d4206d2327 category: bugfix bugzilla: https://gitee.com/openeuler/kernel/issues/I7XLNT CVE: CVE-2022-40982
Reference: https://git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git/commit/?i...
---------------------------
commit 439e17576eb47f26b78c5bbc72e344d4206d2327 upstream
Invoke the X86ism mem_encrypt_init() from X86 arch_cpu_finalize_init() and remove the weak fallback from the core code.
No functional change.
Signed-off-by: Thomas Gleixner tglx@linutronix.de Link: https://lore.kernel.org/r/20230613224545.670360645@linutronix.de Signed-off-by: Daniel Sneddon daniel.sneddon@linux.intel.com Signed-off-by: Greg Kroah-Hartman gregkh@linuxfoundation.org Signed-off-by: Zeng Heng zengheng4@huawei.com --- arch/x86/include/asm/mem_encrypt.h | 7 ++++--- arch/x86/kernel/cpu/common.c | 11 +++++++++++ init/main.c | 11 ----------- 3 files changed, 15 insertions(+), 14 deletions(-)
diff --git a/arch/x86/include/asm/mem_encrypt.h b/arch/x86/include/asm/mem_encrypt.h index 8f3fa55d43ce..4406d58cf7cb 100644 --- a/arch/x86/include/asm/mem_encrypt.h +++ b/arch/x86/include/asm/mem_encrypt.h @@ -47,14 +47,13 @@ int __init early_set_memory_encrypted(unsigned long vaddr, unsigned long size);
void __init mem_encrypt_free_decrypted_mem(void);
-/* Architecture __weak replacement functions */ -void __init mem_encrypt_init(void); - void __init sev_es_init_vc_handling(void); bool sme_active(void); bool sev_active(void); bool sev_es_active(void);
+void __init mem_encrypt_init(void); + #define __bss_decrypted __section(".bss..decrypted")
#else /* !CONFIG_AMD_MEM_ENCRYPT */ @@ -86,6 +85,8 @@ early_set_memory_encrypted(unsigned long vaddr, unsigned long size) { return 0;
static inline void mem_encrypt_free_decrypted_mem(void) { }
+static inline void mem_encrypt_init(void) { } + #define __bss_decrypted
#endif /* CONFIG_AMD_MEM_ENCRYPT */ diff --git a/arch/x86/kernel/cpu/common.c b/arch/x86/kernel/cpu/common.c index cb5dd020ee24..27b8d0fcf09e 100644 --- a/arch/x86/kernel/cpu/common.c +++ b/arch/x86/kernel/cpu/common.c @@ -18,6 +18,7 @@ #include <linux/init.h> #include <linux/kprobes.h> #include <linux/kgdb.h> +#include <linux/mem_encrypt.h> #include <linux/smp.h> #include <linux/cpu.h> #include <linux/io.h> @@ -2254,4 +2255,14 @@ void __init arch_cpu_finalize_init(void) } else { fpu__init_check_bugs(); } + + /* + * This needs to be called before any devices perform DMA + * operations that might use the SWIOTLB bounce buffers. It will + * mark the bounce buffers as decrypted so that their usage will + * not cause "plain-text" data to be decrypted when accessed. It + * must be called after late_time_init() so that Hyper-V x86/x64 + * hypercalls work when the SWIOTLB bounce buffers are decrypted. + */ + mem_encrypt_init(); } diff --git a/init/main.c b/init/main.c index 73f7c575990e..8cccd0687a93 100644 --- a/init/main.c +++ b/init/main.c @@ -96,7 +96,6 @@ #include <linux/cache.h> #include <linux/rodata_test.h> #include <linux/jump_label.h> -#include <linux/mem_encrypt.h> #include <linux/kcsan.h> #include <linux/init_syscalls.h> #include <linux/randomize_kstack.h> @@ -777,8 +776,6 @@ void __init __weak thread_stack_cache_init(void) } #endif
-void __init __weak mem_encrypt_init(void) { } - void __init __weak poking_init(void) { }
void __init __weak pgtable_cache_init(void) { } @@ -1022,14 +1019,6 @@ asmlinkage __visible void __init __no_sanitize_address start_kernel(void) */ locking_selftest();
- /* - * This needs to be called before any devices perform DMA - * operations that might use the SWIOTLB bounce buffers. It will - * mark the bounce buffers as decrypted so that their usage will - * not cause "plain-text" data to be decrypted when accessed. - */ - mem_encrypt_init(); - #ifdef CONFIG_BLK_DEV_INITRD if (initrd_start && !initrd_below_start_ok && page_to_pfn(virt_to_page((void *)initrd_start)) < min_low_pfn) {