From: Sean Christopherson seanjc@google.com
mainline inclusion from mainline-v6.6-rc1 commit b23c83ad2c638420ec0608a9de354507c41bec29 category: bugfix bugzilla: https://gitee.com/src-openeuler/kernel/issues/I95B2J CVE: CVE-2023-52514
Reference: https://git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git/commit?id...
--------------------------------
VMCLEAR active VMCSes before any emergency reboot, not just if the kernel may kexec into a new kernel after a crash. Per Intel's SDM, the VMX architecture doesn't require the CPU to flush the VMCS cache on INIT. If an emergency reboot doesn't RESET CPUs, cached VMCSes could theoretically be kept and only be written back to memory after the new kernel is booted, i.e. could effectively corrupt memory after reboot.
Opportunistically remove the setting of the global pointer to NULL to make checkpatch happy.
Cc: Andrew Cooper Andrew.Cooper3@citrix.com Link: https://lore.kernel.org/r/20230721201859.2307736-2-seanjc@google.com Signed-off-by: Sean Christopherson seanjc@google.com Signed-off-by: Wei Li liwei391@huawei.com --- arch/x86/include/asm/kexec.h | 2 -- arch/x86/include/asm/reboot.h | 2 ++ arch/x86/kernel/crash.c | 30 ------------------------------ arch/x86/kernel/reboot.c | 22 ++++++++++++++++++++++ arch/x86/kvm/vmx.c | 12 ++---------- 5 files changed, 26 insertions(+), 42 deletions(-)
diff --git a/arch/x86/include/asm/kexec.h b/arch/x86/include/asm/kexec.h index b3a2b4309a31..3d43721f43e8 100644 --- a/arch/x86/include/asm/kexec.h +++ b/arch/x86/include/asm/kexec.h @@ -222,8 +222,6 @@ extern void arch_kexec_pre_free_pages(void *vaddr, unsigned int pages);
#endif
-typedef void crash_vmclear_fn(void); -extern crash_vmclear_fn __rcu *crash_vmclear_loaded_vmcss; extern void kdump_nmi_shootdown_cpus(void);
#endif /* __ASSEMBLY__ */ diff --git a/arch/x86/include/asm/reboot.h b/arch/x86/include/asm/reboot.h index 9177b4354c3f..dc201724a643 100644 --- a/arch/x86/include/asm/reboot.h +++ b/arch/x86/include/asm/reboot.h @@ -25,6 +25,8 @@ void __noreturn machine_real_restart(unsigned int type); #define MRR_BIOS 0 #define MRR_APM 1
+typedef void crash_vmclear_fn(void); +extern crash_vmclear_fn __rcu *crash_vmclear_loaded_vmcss; void cpu_emergency_disable_virtualization(void);
typedef void (*nmi_shootdown_cb)(int, struct pt_regs*); diff --git a/arch/x86/kernel/crash.c b/arch/x86/kernel/crash.c index e40bb50c5c4f..fdf605a2ad4d 100644 --- a/arch/x86/kernel/crash.c +++ b/arch/x86/kernel/crash.c @@ -44,28 +44,8 @@ struct crash_memmap_data { unsigned int type; };
-/* - * This is used to VMCLEAR all VMCSs loaded on the - * processor. And when loading kvm_intel module, the - * callback function pointer will be assigned. - * - * protected by rcu. - */ -crash_vmclear_fn __rcu *crash_vmclear_loaded_vmcss = NULL; -EXPORT_SYMBOL_GPL(crash_vmclear_loaded_vmcss); unsigned long crash_zero_bytes;
-static inline void cpu_crash_vmclear_loaded_vmcss(void) -{ - crash_vmclear_fn *do_vmclear_operation = NULL; - - rcu_read_lock(); - do_vmclear_operation = rcu_dereference(crash_vmclear_loaded_vmcss); - if (do_vmclear_operation) - do_vmclear_operation(); - rcu_read_unlock(); -} - #if defined(CONFIG_SMP) && defined(CONFIG_X86_LOCAL_APIC)
static void kdump_nmi_callback(int cpu, struct pt_regs *regs) @@ -80,11 +60,6 @@ static void kdump_nmi_callback(int cpu, struct pt_regs *regs) #endif crash_save_cpu(regs, cpu);
- /* - * VMCLEAR VMCSs loaded on all cpus if needed. - */ - cpu_crash_vmclear_loaded_vmcss(); - /* * Disable Intel PT to stop its logging */ @@ -138,11 +113,6 @@ void native_machine_crash_shutdown(struct pt_regs *regs)
crash_smp_send_stop();
- /* - * VMCLEAR VMCSs loaded on this cpu if needed. - */ - cpu_crash_vmclear_loaded_vmcss(); - cpu_emergency_disable_virtualization();
/* diff --git a/arch/x86/kernel/reboot.c b/arch/x86/kernel/reboot.c index b72873d24c86..da91a74ddeb6 100644 --- a/arch/x86/kernel/reboot.c +++ b/arch/x86/kernel/reboot.c @@ -778,6 +778,26 @@ void machine_crash_shutdown(struct pt_regs *regs) } #endif
+/* + * This is used to VMCLEAR all VMCSs loaded on the + * processor. And when loading kvm_intel module, the + * callback function pointer will be assigned. + * + * protected by rcu. + */ +crash_vmclear_fn __rcu *crash_vmclear_loaded_vmcss; +EXPORT_SYMBOL_GPL(crash_vmclear_loaded_vmcss); + +static inline void cpu_crash_vmclear_loaded_vmcss(void) +{ + crash_vmclear_fn *do_vmclear_operation = NULL; + + rcu_read_lock(); + do_vmclear_operation = rcu_dereference(crash_vmclear_loaded_vmcss); + if (do_vmclear_operation) + do_vmclear_operation(); + rcu_read_unlock(); +}
/* This is the CPU performing the emergency shutdown work. */ int crashing_cpu = -1; @@ -789,6 +809,8 @@ int crashing_cpu = -1; */ void cpu_emergency_disable_virtualization(void) { + cpu_crash_vmclear_loaded_vmcss(); + cpu_emergency_vmxoff(); cpu_emergency_svm_disable(); } diff --git a/arch/x86/kvm/vmx.c b/arch/x86/kvm/vmx.c index 535fce186d58..79fd3caa3ab5 100644 --- a/arch/x86/kvm/vmx.c +++ b/arch/x86/kvm/vmx.c @@ -50,7 +50,7 @@ #include <asm/fpu/internal.h> #include <asm/perf_event.h> #include <asm/debugreg.h> -#include <asm/kexec.h> +#include <asm/reboot.h> #include <asm/apic.h> #include <asm/irq_remapping.h> #include <asm/mmu_context.h> @@ -2225,7 +2225,6 @@ static void vmcs_load(struct vmcs *vmcs) vmcs, phys_addr); }
-#ifdef CONFIG_KEXEC_CORE /* * This bitmap is used to indicate whether the vmclear * operation is enabled on all cpus. All disabled by @@ -2260,10 +2259,6 @@ static void crash_vmclear_local_loaded_vmcss(void) loaded_vmcss_on_cpu_link) vmcs_clear(v->vmcs); } -#else -static inline void crash_enable_local_vmclear(int cpu) { } -static inline void crash_disable_local_vmclear(int cpu) { } -#endif /* CONFIG_KEXEC_CORE */
static void __loaded_vmcs_clear(void *arg) { @@ -14670,10 +14665,8 @@ static void vmx_cleanup_l1d_flush(void)
static void vmx_exit(void) { -#ifdef CONFIG_KEXEC_CORE RCU_INIT_POINTER(crash_vmclear_loaded_vmcss, NULL); synchronize_rcu(); -#endif
kvm_exit();
@@ -14758,10 +14751,9 @@ static int __init vmx_init(void)
vmx_setup_fb_clear_ctrl();
-#ifdef CONFIG_KEXEC_CORE rcu_assign_pointer(crash_vmclear_loaded_vmcss, crash_vmclear_local_loaded_vmcss); -#endif + vmx_check_vmcs12_offsets();
return 0;