From: Wang ShaoBo bobo.shaobowang@huawei.com
hulk inclusion category: feature bugzilla: https://gitee.com/openeuler/kernel/issues/I4QPLR CVE: NA
Reference: https://patchwork.kernel.org/project/linux-arm-kernel/cover/20211210184133.3...
-------------------------------
Reserve space for arm64 SME in thread_struct.
According to the implementation of SME, we need to reserve space for the following fields like this:
+enum vec_type { + ARM64_VEC_SVE = 0, + ARM64_VEC_SME, + ARM64_VEC_MAX, +}; + struct thread_struct { struct cpu_context cpu_context; /* cpu context */
@@ -149,8 +155,6 @@ struct thread_struct {
unsigned int fpsimd_cpu; void *sve_state; /* SVE registers, if any */ - unsigned int sve_vl; /* SVE vector length */ - unsigned int sve_vl_onexec; /* SVE vl after next exec */ unsigned long fault_address; /* fault info */ unsigned long fault_code; /* ESR_EL1 value */ struct debug_info debug; /* debugging */ @@ -162,6 +166,11 @@ struct thread_struct { u64 sctlr_tcf0; u64 gcr_user_incl; #endif + void *za_state; /* ZA register, if any */ + unsigned int vl[ARM64_VEC_MAX]; /* vector length */ + unsigned int vl_onexec[ARM64_VEC_MAX]; /* vl after next exec */ + u64 svcr; + u64 tpidr2_el0; };
This requires at least five 64bit spaces, other three for expansion.
Signed-off-by: Wang ShaoBo bobo.shaobowang@huawei.com Reviewed-by: Cheng Jian cj.chengjian@huawei.com Signed-off-by: Zheng Zengkai zhengzengkai@huawei.com --- arch/arm64/include/asm/processor.h | 9 +++++++++ 1 file changed, 9 insertions(+)
diff --git a/arch/arm64/include/asm/processor.h b/arch/arm64/include/asm/processor.h index 44968ffc322b..d94d60d01d9e 100644 --- a/arch/arm64/include/asm/processor.h +++ b/arch/arm64/include/asm/processor.h @@ -32,6 +32,7 @@ #include <linux/stddef.h> #include <linux/string.h> #include <linux/thread_info.h> +#include <linux/kabi.h>
#include <vdso/processor.h>
@@ -162,6 +163,14 @@ struct thread_struct { u64 sctlr_tcf0; u64 gcr_user_incl; #endif + KABI_RESERVE(1) + KABI_RESERVE(2) + KABI_RESERVE(3) + KABI_RESERVE(4) + KABI_RESERVE(5) + KABI_RESERVE(6) + KABI_RESERVE(7) + KABI_RESERVE(8) };
static inline void arch_thread_struct_whitelist(unsigned long *offset,