From: Mark Brown broonie@kernel.org
mainline inclusion from mainline-v5.16-rc1 commit 9f5848665788a0f07bc175cb2cdd06d367b7556e category: feature bugzilla: https://gitee.com/openeuler/kernel/issues/I8E73O CVE: NA
Reference: https://git.kernel.org/pub/scm/linux/kernel/git/stable/linux.git/commit/?id=...
-------------------------------------------------
SME introduces streaming SVE mode in which FFR is not present and the instructions for accessing it UNDEF. In preparation for handling this update the low level SVE state access functions to take a flag specifying if FFR should be handled. When saving the register state we store a zero for FFR to guard against uninitialized data being read. No behaviour change should be introduced by this patch.
Signed-off-by: Mark Brown broonie@kernel.org Link: https://lore.kernel.org/r/20211019172247.3045838-5-broonie@kernel.org Signed-off-by: Will Deacon will@kernel.org Signed-off-by: Wang ShaoBo bobo.shaobowang@huawei.com --- arch/arm64/include/asm/fpsimd.h | 6 +++--- arch/arm64/include/asm/fpsimdmacros.h | 20 ++++++++++++++------ arch/arm64/kernel/entry-fpsimd.S | 15 +++++++++------ arch/arm64/kernel/fpsimd.c | 10 ++++++---- arch/arm64/kvm/hyp/fpsimd.S | 6 ++++-- 5 files changed, 36 insertions(+), 21 deletions(-)
diff --git a/arch/arm64/include/asm/fpsimd.h b/arch/arm64/include/asm/fpsimd.h index 5cc9eaf1d52dd..ed2def998377f 100644 --- a/arch/arm64/include/asm/fpsimd.h +++ b/arch/arm64/include/asm/fpsimd.h @@ -66,10 +66,10 @@ static inline void *sve_pffr(struct thread_struct *thread) return (char *)thread->sve_state + sve_ffr_offset(thread->sve_vl); }
-extern void sve_save_state(void *state, u32 *pfpsr); +extern void sve_save_state(void *state, u32 *pfpsr, int save_ffr); extern void sve_load_state(void const *state, u32 const *pfpsr, - unsigned long vq_minus_1); -extern void sve_flush_live(unsigned long vq_minus_1); + int restore_ffr, unsigned long vq_minus_1); +extern void sve_flush_live(bool flush_ffr, unsigned long vq_minus_1); extern unsigned int sve_get_vl(void); extern void sve_set_vq(unsigned long vq_minus_1);
diff --git a/arch/arm64/include/asm/fpsimdmacros.h b/arch/arm64/include/asm/fpsimdmacros.h index 059204477ce66..dc0f0b4df881e 100644 --- a/arch/arm64/include/asm/fpsimdmacros.h +++ b/arch/arm64/include/asm/fpsimdmacros.h @@ -216,28 +216,36 @@ .macro sve_flush_z _for n, 0, 31, _sve_flush_z \n .endm -.macro sve_flush_p_ffr +.macro sve_flush_p _for n, 0, 15, _sve_pfalse \n +.endm +.macro sve_flush_ffr _sve_wrffr 0 .endm
-.macro sve_save nxbase, xpfpsr, nxtmp +.macro sve_save nxbase, xpfpsr, save_ffr, nxtmp _for n, 0, 31, _sve_str_v \n, \nxbase, \n - 34 _for n, 0, 15, _sve_str_p \n, \nxbase, \n - 16 + cbz \save_ffr, 921f _sve_rdffr 0 _sve_str_p 0, \nxbase _sve_ldr_p 0, \nxbase, -16 - + b 922f +921: + str xzr, [x\nxbase] // Zero out FFR +922: mrs x\nxtmp, fpsr str w\nxtmp, [\xpfpsr] mrs x\nxtmp, fpcr str w\nxtmp, [\xpfpsr, #4] .endm
-.macro __sve_load nxbase, xpfpsr, nxtmp +.macro __sve_load nxbase, xpfpsr, restore_ffr, nxtmp _for n, 0, 31, _sve_ldr_v \n, \nxbase, \n - 34 + cbz \restore_ffr, 921f _sve_ldr_p 0, \nxbase _sve_wrffr 0 +921: _for n, 0, 15, _sve_ldr_p \n, \nxbase, \n - 16
ldr w\nxtmp, [\xpfpsr] @@ -246,7 +254,7 @@ msr fpcr, x\nxtmp .endm
-.macro sve_load nxbase, xpfpsr, xvqminus1, nxtmp, xtmp2 +.macro sve_load nxbase, xpfpsr, restore_ffr, xvqminus1, nxtmp, xtmp2 sve_load_vq \xvqminus1, x\nxtmp, \xtmp2 - __sve_load \nxbase, \xpfpsr, \nxtmp + __sve_load \nxbase, \xpfpsr, \restore_ffr, \nxtmp .endm diff --git a/arch/arm64/kernel/entry-fpsimd.S b/arch/arm64/kernel/entry-fpsimd.S index e40cb29fdfd8a..5060d91b73e26 100644 --- a/arch/arm64/kernel/entry-fpsimd.S +++ b/arch/arm64/kernel/entry-fpsimd.S @@ -34,12 +34,12 @@ SYM_FUNC_END(fpsimd_load_state) #ifdef CONFIG_ARM64_SVE
SYM_FUNC_START(sve_save_state) - sve_save 0, x1, 2 + sve_save 0, x1, x2, 3 ret SYM_FUNC_END(sve_save_state)
SYM_FUNC_START(sve_load_state) - sve_load 0, x1, x2, 3, x4 + sve_load 0, x1, x2, x3, 4, x5 ret SYM_FUNC_END(sve_load_state)
@@ -59,13 +59,16 @@ SYM_FUNC_END(sve_set_vq) * VQ must already be configured by caller, any further updates of VQ * will need to ensure that the register state remains valid. * - * x0 = VQ - 1 + * x0 = include FFR? + * x1 = VQ - 1 */ SYM_FUNC_START(sve_flush_live) - cbz x0, 1f // A VQ-1 of 0 is 128 bits so no extra Z state + cbz x1, 1f // A VQ-1 of 0 is 128 bits so no extra Z state sve_flush_z -1: sve_flush_p_ffr - ret +1: sve_flush_p + tbz x0, #0, 2f + sve_flush_ffr +2: ret SYM_FUNC_END(sve_flush_live)
#endif /* CONFIG_ARM64_SVE */ diff --git a/arch/arm64/kernel/fpsimd.c b/arch/arm64/kernel/fpsimd.c index 8ad7429398597..a365521493231 100644 --- a/arch/arm64/kernel/fpsimd.c +++ b/arch/arm64/kernel/fpsimd.c @@ -287,7 +287,7 @@ static void task_fpsimd_load(void)
if (IS_ENABLED(CONFIG_ARM64_SVE) && test_thread_flag(TIF_SVE)) sve_load_state(sve_pffr(¤t->thread), - ¤t->thread.uw.fpsimd_state.fpsr, + ¤t->thread.uw.fpsimd_state.fpsr, true, sve_vq_from_vl(current->thread.sve_vl) - 1); else fpsimd_load_state(¤t->thread.uw.fpsimd_state); @@ -323,7 +323,7 @@ static void fpsimd_save(void)
sve_save_state((char *)last->sve_state + sve_ffr_offset(last->sve_vl), - &last->st->fpsr); + &last->st->fpsr, true); } else { fpsimd_save_state(last->st); } @@ -960,7 +960,7 @@ void do_sve_acc(unsigned int esr, struct pt_regs *regs) unsigned long vq_minus_one = sve_vq_from_vl(current->thread.sve_vl) - 1; sve_set_vq(vq_minus_one); - sve_flush_live(vq_minus_one); + sve_flush_live(true, vq_minus_one); fpsimd_bind_task_to_cpu(); } else { fpsimd_to_sve(current); @@ -1354,7 +1354,8 @@ void __efi_fpsimd_begin(void) __this_cpu_write(efi_sve_state_used, true);
sve_save_state(sve_state + sve_ffr_offset(sve_max_vl), - &this_cpu_ptr(&efi_fpsimd_state)->fpsr); + &this_cpu_ptr(&efi_fpsimd_state)->fpsr, + true); } else { fpsimd_save_state(this_cpu_ptr(&efi_fpsimd_state)); } @@ -1380,6 +1381,7 @@ void __efi_fpsimd_end(void)
sve_load_state(sve_state + sve_ffr_offset(sve_max_vl), &this_cpu_ptr(&efi_fpsimd_state)->fpsr, + true, sve_vq_from_vl(sve_get_vl()) - 1);
__this_cpu_write(efi_sve_state_used, false); diff --git a/arch/arm64/kvm/hyp/fpsimd.S b/arch/arm64/kvm/hyp/fpsimd.S index 3c635929771a8..1bb3b04b84e6a 100644 --- a/arch/arm64/kvm/hyp/fpsimd.S +++ b/arch/arm64/kvm/hyp/fpsimd.S @@ -21,11 +21,13 @@ SYM_FUNC_START(__fpsimd_restore_state) SYM_FUNC_END(__fpsimd_restore_state)
SYM_FUNC_START(__sve_restore_state) - __sve_load 0, x1, 2 + mov x2, #1 + __sve_load 0, x1, x2, 3 ret SYM_FUNC_END(__sve_restore_state)
SYM_FUNC_START(__sve_save_state) - sve_save 0, x1, 2 + mov x2, #1 + sve_save 0, x1, x2, 3 ret SYM_FUNC_END(__sve_save_state)