From: Mahesh Salgaonkar mahesh@linux.ibm.com
stable inclusion from stable-v6.6.39 commit 0f37946c62c48a907625348cbc720a7a0c547d1e category: bugfix bugzilla: https://gitee.com/src-openeuler/kernel/issues/IAGPSI CVE: CVE-2024-42126
Reference: https://git.kernel.org/pub/scm/linux/kernel/git/stable/linux.git/commit/?id=...
--------------------------------
[ Upstream commit 0db880fc865ffb522141ced4bfa66c12ab1fbb70 ]
nmi_enter()/nmi_exit() touches per cpu variables which can lead to kernel crash when invoked during real mode interrupt handling (e.g. early HMI/MCE interrupt handler) if percpu allocation comes from vmalloc area.
Early HMI/MCE handlers are called through DEFINE_INTERRUPT_HANDLER_NMI() wrapper which invokes nmi_enter/nmi_exit calls. We don't see any issue when percpu allocation is from the embedded first chunk. However with CONFIG_NEED_PER_CPU_PAGE_FIRST_CHUNK enabled there are chances where percpu allocation can come from the vmalloc area.
With kernel command line "percpu_alloc=page" we can force percpu allocation to come from vmalloc area and can see kernel crash in machine_check_early:
[ 1.215714] NIP [c000000000e49eb4] rcu_nmi_enter+0x24/0x110 [ 1.215717] LR [c0000000000461a0] machine_check_early+0xf0/0x2c0 [ 1.215719] --- interrupt: 200 [ 1.215720] [c000000fffd73180] [0000000000000000] 0x0 (unreliable) [ 1.215722] [c000000fffd731b0] [0000000000000000] 0x0 [ 1.215724] [c000000fffd73210] [c000000000008364] machine_check_early_common+0x134/0x1f8
Fix this by avoiding use of nmi_enter()/nmi_exit() in real mode if percpu first chunk is not embedded.
Reviewed-by: Christophe Leroy christophe.leroy@csgroup.eu Tested-by: Shirisha Ganta shirisha@linux.ibm.com Signed-off-by: Mahesh Salgaonkar mahesh@linux.ibm.com Signed-off-by: Michael Ellerman mpe@ellerman.id.au Link: https://msgid.link/20240410043006.81577-1-mahesh@linux.ibm.com Signed-off-by: Sasha Levin sashal@kernel.org Signed-off-by: Jinjie Ruan ruanjinjie@huawei.com --- arch/powerpc/include/asm/interrupt.h | 10 ++++++++++ arch/powerpc/include/asm/percpu.h | 10 ++++++++++ arch/powerpc/kernel/setup_64.c | 2 ++ 3 files changed, 22 insertions(+)
diff --git a/arch/powerpc/include/asm/interrupt.h b/arch/powerpc/include/asm/interrupt.h index a4196ab1d016..5f9d61b2159c 100644 --- a/arch/powerpc/include/asm/interrupt.h +++ b/arch/powerpc/include/asm/interrupt.h @@ -336,6 +336,14 @@ static inline void interrupt_nmi_enter_prepare(struct pt_regs *regs, struct inte if (IS_ENABLED(CONFIG_KASAN)) return;
+ /* + * Likewise, do not use it in real mode if percpu first chunk is not + * embedded. With CONFIG_NEED_PER_CPU_PAGE_FIRST_CHUNK enabled there + * are chances where percpu allocation can come from vmalloc area. + */ + if (percpu_first_chunk_is_paged) + return; + /* Otherwise, it should be safe to call it */ nmi_enter(); } @@ -351,6 +359,8 @@ static inline void interrupt_nmi_exit_prepare(struct pt_regs *regs, struct inter // no nmi_exit for a pseries hash guest taking a real mode exception } else if (IS_ENABLED(CONFIG_KASAN)) { // no nmi_exit for KASAN in real mode + } else if (percpu_first_chunk_is_paged) { + // no nmi_exit if percpu first chunk is not embedded } else { nmi_exit(); } diff --git a/arch/powerpc/include/asm/percpu.h b/arch/powerpc/include/asm/percpu.h index 8e5b7d0b851c..634970ce13c6 100644 --- a/arch/powerpc/include/asm/percpu.h +++ b/arch/powerpc/include/asm/percpu.h @@ -15,6 +15,16 @@ #endif /* CONFIG_SMP */ #endif /* __powerpc64__ */
+#if defined(CONFIG_NEED_PER_CPU_PAGE_FIRST_CHUNK) && defined(CONFIG_SMP) +#include <linux/jump_label.h> +DECLARE_STATIC_KEY_FALSE(__percpu_first_chunk_is_paged); + +#define percpu_first_chunk_is_paged \ + (static_key_enabled(&__percpu_first_chunk_is_paged.key)) +#else +#define percpu_first_chunk_is_paged false +#endif /* CONFIG_PPC64 && CONFIG_SMP */ + #include <asm-generic/percpu.h>
#include <asm/paca.h> diff --git a/arch/powerpc/kernel/setup_64.c b/arch/powerpc/kernel/setup_64.c index 15ece83ab7ac..6231a42eb0a0 100644 --- a/arch/powerpc/kernel/setup_64.c +++ b/arch/powerpc/kernel/setup_64.c @@ -837,6 +837,7 @@ static __init int pcpu_cpu_to_node(int cpu)
unsigned long __per_cpu_offset[NR_CPUS] __read_mostly; EXPORT_SYMBOL(__per_cpu_offset); +DEFINE_STATIC_KEY_FALSE(__percpu_first_chunk_is_paged);
void __init setup_per_cpu_areas(void) { @@ -879,6 +880,7 @@ void __init setup_per_cpu_areas(void) if (rc < 0) panic("cannot initialize percpu area (err=%d)", rc);
+ static_key_enable(&__percpu_first_chunk_is_paged.key); delta = (unsigned long)pcpu_base_addr - (unsigned long)__per_cpu_start; for_each_possible_cpu(cpu) { __per_cpu_offset[cpu] = delta + pcpu_unit_offsets[cpu];