From: Kees Cook keescook@chromium.org
mainline inclusion from mainline-v5.13-rc1 commit 70918779aec9bd01d16f4e6e800ffe423d196021 category: featrue bugzilla: https://gitee.com/openeuler/kernel/issues/I5YQ6Z CVE: NA
Reference: https://git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git/commit/?i...
--------------------------------
Allow for a randomized stack offset on a per-syscall basis, with roughly 5 bits of entropy. (And include AAPCS rationale AAPCS thanks to Mark Rutland.)
In order to avoid unconditional stack canaries on syscall entry (due to the use of alloca()), also disable stack protector to avoid triggering needless checks and slowing down the entry path. As there is no general way to control stack protector coverage with a function attribute[1], this must be disabled at the compilation unit level. This isn't a problem here, though, since stack protector was not triggered before: examining the resulting syscall.o, there are no changes in canary coverage (none before, none now).
[1] a working __attribute__((no_stack_protector)) has been added to GCC and Clang but has not been released in any version yet: https://gcc.gnu.org/git/gitweb.cgi?p=gcc.git;h=346b302d09c1e6db56d9fe69048ac... https://reviews.llvm.org/rG4fbf84c1732fca596ad1d6e96015e19760eb8a9b
Signed-off-by: Kees Cook keescook@chromium.org Signed-off-by: Thomas Gleixner tglx@linutronix.de Acked-by: Will Deacon will@kernel.org Link: https://lore.kernel.org/r/20210401232347.2791257-6-keescook@chromium.org
conflict: arch/arm64/Kconfig arch/arm64/kernel/syscall.c
Signed-off-by: Yi Yang yiyang13@huawei.com Reviewed-by: Xiu Jianfeng xiujianfeng@huawei.com Reviewed-by: GONG Ruiqi gongruiqi1@huawei.com Signed-off-by: Zheng Zengkai zhengzengkai@huawei.com --- arch/arm64/Kconfig | 1 + arch/arm64/kernel/Makefile | 5 +++++ arch/arm64/kernel/syscall.c | 16 ++++++++++++++++ 3 files changed, 22 insertions(+)
diff --git a/arch/arm64/Kconfig b/arch/arm64/Kconfig index df28741e49c9..969f0889fbea 100644 --- a/arch/arm64/Kconfig +++ b/arch/arm64/Kconfig @@ -146,6 +146,7 @@ config ARM64 select HAVE_ARCH_MMAP_RND_BITS select HAVE_ARCH_MMAP_RND_COMPAT_BITS if COMPAT select HAVE_ARCH_PREL32_RELOCATIONS + select HAVE_ARCH_RANDOMIZE_KSTACK_OFFSET select HAVE_ARCH_SECCOMP_FILTER select HAVE_ARCH_STACKLEAK select HAVE_ARCH_THREAD_STRUCT_WHITELIST diff --git a/arch/arm64/kernel/Makefile b/arch/arm64/kernel/Makefile index 4cf75b247461..312c164db2ed 100644 --- a/arch/arm64/kernel/Makefile +++ b/arch/arm64/kernel/Makefile @@ -9,6 +9,11 @@ CFLAGS_REMOVE_ftrace.o = $(CC_FLAGS_FTRACE) CFLAGS_REMOVE_insn.o = $(CC_FLAGS_FTRACE) CFLAGS_REMOVE_return_address.o = $(CC_FLAGS_FTRACE)
+# Remove stack protector to avoid triggering unneeded stack canary +# checks due to randomize_kstack_offset. +CFLAGS_REMOVE_syscall.o = -fstack-protector -fstack-protector-strong +CFLAGS_syscall.o += -fno-stack-protector + # Object file lists. obj-y := debug-monitors.o entry.o irq.o fpsimd.o \ entry-common.o entry-fpsimd.o process.o ptrace.o \ diff --git a/arch/arm64/kernel/syscall.c b/arch/arm64/kernel/syscall.c index 66ca9534bd69..2a106e67a4cb 100644 --- a/arch/arm64/kernel/syscall.c +++ b/arch/arm64/kernel/syscall.c @@ -5,6 +5,7 @@ #include <linux/errno.h> #include <linux/nospec.h> #include <linux/ptrace.h> +#include <linux/randomize_kstack.h> #include <linux/syscalls.h>
#include <asm/daifflags.h> @@ -42,6 +43,8 @@ static void invoke_syscall(struct pt_regs *regs, unsigned int scno, { long ret;
+ add_random_kstack_offset(); + if (scno < sc_nr) { syscall_fn_t syscall_fn; syscall_fn = syscall_table[array_index_nospec(scno, sc_nr)]; @@ -51,6 +54,19 @@ static void invoke_syscall(struct pt_regs *regs, unsigned int scno, }
syscall_set_return_value(current, regs, 0, ret); + + /* + * Ultimately, this value will get limited by KSTACK_OFFSET_MAX(), + * but not enough for arm64 stack utilization comfort. To keep + * reasonable stack head room, reduce the maximum offset to 9 bits. + * + * The actual entropy will be further reduced by the compiler when + * applying stack alignment constraints: the AAPCS mandates a + * 16-byte (i.e. 4-bit) aligned SP at function boundaries. + * + * The resulting 5 bits of entropy is seen in SP[8:4]. + */ + choose_random_kstack_offset(get_random_int() & 0x1FF); }
static inline bool has_syscall_work(unsigned long flags)