From: Peter Zijlstra peterz@infradead.org
stable inclusion from stable-v4.19.258 commit c7d4745b033d580c1d547cdd18c6689486889784 category: bugfix bugzilla: https://gitee.com/src-openeuler/kernel/issues/I5GZ2Z CVE: CVE-2022-29901
--------------------------------
commit 332924973725e8cdcc783c175f68cf7e162cb9e5 upstream.
Turns out that i386 doesn't unconditionally have LFENCE, as such the loop in __FILL_RETURN_BUFFER isn't actually speculation safe on such chips.
Fixes: ba6e31af2be9 ("x86/speculation: Add LFENCE to RSB fill sequence") Reported-by: Ben Hutchings ben@decadent.org.uk Signed-off-by: Peter Zijlstra (Intel) peterz@infradead.org Link: https://lkml.kernel.org/r/Yv9tj9vbQ9nNlXoY@worktop.programming.kicks-ass.net [bwh: Backported to 4.19/5.4: - __FILL_RETURN_BUFFER takes an sp parameter - Open-code __FILL_RETURN_SLOT] Signed-off-by: Ben Hutchings ben@decadent.org.uk Signed-off-by: Pawan Gupta pawan.kumar.gupta@linux.intel.com Signed-off-by: Greg Kroah-Hartman gregkh@linuxfoundation.org Signed-off-by: Lin Yujun linyujun809@huawei.com Reviewed-by: Zhang Jianhua chris.zjh@huawei.com Reviewed-by: Liao Chang liaochang1@huawei.com Signed-off-by: Yongqiang Liu liuyongqiang13@huawei.com --- arch/x86/include/asm/nospec-branch.h | 14 ++++++++++++++ 1 file changed, 14 insertions(+)
diff --git a/arch/x86/include/asm/nospec-branch.h b/arch/x86/include/asm/nospec-branch.h index 176089fad1b5..8646e4e7fe14 100644 --- a/arch/x86/include/asm/nospec-branch.h +++ b/arch/x86/include/asm/nospec-branch.h @@ -35,6 +35,7 @@ * the optimal version — two calls, each with their own speculation * trap should their return address end up getting used, in a loop. */ +#ifdef CONFIG_X86_64 #define __FILL_RETURN_BUFFER(reg, nr, sp) \ mov $(nr/2), reg; \ 771: \ @@ -55,6 +56,19 @@ add $(BITS_PER_LONG/8) * nr, sp; \ /* barrier for jnz misprediction */ \ lfence; +#else +/* + * i386 doesn't unconditionally have LFENCE, as such it can't + * do a loop. + */ +#define __FILL_RETURN_BUFFER(reg, nr, sp) \ + .rept nr; \ + call 772f; \ + int3; \ +772:; \ + .endr; \ + add $(BITS_PER_LONG/8) * nr, sp; +#endif
/* Sequence to mitigate PBRSB on eIBRS CPUs */ #define __ISSUE_UNBALANCED_RET_GUARD(sp) \