From: Mark Brown broonie@kernel.org
mainline inclusion from mainline-v5.16-rc1 commit ddc806b5c4752d35bdaa4dfa2aaa72785711a3da category: feature bugzilla: https://gitee.com/openeuler/kernel/issues/I5ITJT CVE: NA
Reference: https://git.kernel.org/pub/scm/linux/kernel/git/stable/linux.git/commit/?id=...
-------------------------------------------------
Currently when restoring the SVE state we supply the SVE vector length as an argument to sve_load_state() and the underlying macros. This becomes inconvenient with the addition of SME since we may need to restore any combination of SVE and SME vector lengths, and we already separately restore the vector length in the KVM code. We don't need to know the vector length during the actual register load since the SME load instructions can index into the data array for us.
Refactor the interface so we explicitly set the vector length separately to restoring the SVE registers in preparation for adding SME support, no functional change should be involved.
Signed-off-by: Mark Brown broonie@kernel.org Link: https://lore.kernel.org/r/20211019172247.3045838-9-broonie@kernel.org Signed-off-by: Will Deacon will@kernel.org Signed-off-by: Wang ShaoBo bobo.shaobowang@huawei.com --- arch/arm64/include/asm/fpsimd.h | 2 +- arch/arm64/include/asm/fpsimdmacros.h | 7 +------ arch/arm64/kernel/entry-fpsimd.S | 2 +- arch/arm64/kernel/fpsimd.c | 13 +++++++------ arch/arm64/kvm/hyp/fpsimd.S | 2 +- 5 files changed, 11 insertions(+), 15 deletions(-)
diff --git a/arch/arm64/include/asm/fpsimd.h b/arch/arm64/include/asm/fpsimd.h index c84370e15fed..785bcf91c021 100644 --- a/arch/arm64/include/asm/fpsimd.h +++ b/arch/arm64/include/asm/fpsimd.h @@ -68,7 +68,7 @@ static inline void *sve_pffr(struct thread_struct *thread)
extern void sve_save_state(void *state, u32 *pfpsr, int save_ffr); extern void sve_load_state(void const *state, u32 const *pfpsr, - int restore_ffr, unsigned long vq_minus_1); + int restore_ffr); extern void sve_flush_live(bool flush_ffr, unsigned long vq_minus_1); extern unsigned int sve_get_vl(void); extern void sve_set_vq(unsigned long vq_minus_1); diff --git a/arch/arm64/include/asm/fpsimdmacros.h b/arch/arm64/include/asm/fpsimdmacros.h index dc0f0b4df881..ab4290653a11 100644 --- a/arch/arm64/include/asm/fpsimdmacros.h +++ b/arch/arm64/include/asm/fpsimdmacros.h @@ -240,7 +240,7 @@ str w\nxtmp, [\xpfpsr, #4] .endm
-.macro __sve_load nxbase, xpfpsr, restore_ffr, nxtmp +.macro sve_load nxbase, xpfpsr, restore_ffr, nxtmp _for n, 0, 31, _sve_ldr_v \n, \nxbase, \n - 34 cbz \restore_ffr, 921f _sve_ldr_p 0, \nxbase @@ -253,8 +253,3 @@ ldr w\nxtmp, [\xpfpsr, #4] msr fpcr, x\nxtmp .endm - -.macro sve_load nxbase, xpfpsr, restore_ffr, xvqminus1, nxtmp, xtmp2 - sve_load_vq \xvqminus1, x\nxtmp, \xtmp2 - __sve_load \nxbase, \xpfpsr, \restore_ffr, \nxtmp -.endm diff --git a/arch/arm64/kernel/entry-fpsimd.S b/arch/arm64/kernel/entry-fpsimd.S index 5060d91b73e2..a5f9475ce6eb 100644 --- a/arch/arm64/kernel/entry-fpsimd.S +++ b/arch/arm64/kernel/entry-fpsimd.S @@ -39,7 +39,7 @@ SYM_FUNC_START(sve_save_state) SYM_FUNC_END(sve_save_state)
SYM_FUNC_START(sve_load_state) - sve_load 0, x1, x2, x3, 4, x5 + sve_load 0, x1, x2, 4 ret SYM_FUNC_END(sve_load_state)
diff --git a/arch/arm64/kernel/fpsimd.c b/arch/arm64/kernel/fpsimd.c index cb024befaccd..d5c582625793 100644 --- a/arch/arm64/kernel/fpsimd.c +++ b/arch/arm64/kernel/fpsimd.c @@ -316,12 +316,13 @@ static void task_fpsimd_load(void) WARN_ON(!system_supports_fpsimd()); WARN_ON(!have_cpu_fpsimd_context());
- if (IS_ENABLED(CONFIG_ARM64_SVE) && test_thread_flag(TIF_SVE)) + if (IS_ENABLED(CONFIG_ARM64_SVE) && test_thread_flag(TIF_SVE)) { + sve_set_vq(sve_vq_from_vl(task_get_sve_vl(current)) - 1); sve_load_state(sve_pffr(¤t->thread), - ¤t->thread.uw.fpsimd_state.fpsr, true, - sve_vq_from_vl(task_get_sve_vl(current)) - 1); - else + ¤t->thread.uw.fpsimd_state.fpsr, true); + } else { fpsimd_load_state(¤t->thread.uw.fpsimd_state); + } }
/* @@ -1421,10 +1422,10 @@ void __efi_fpsimd_end(void) likely(__this_cpu_read(efi_sve_state_used))) { char const *sve_state = this_cpu_ptr(efi_sve_state);
+ sve_set_vq(sve_vq_from_vl(sve_get_vl()) - 1); sve_load_state(sve_state + sve_ffr_offset(sve_max_vl()), &this_cpu_ptr(&efi_fpsimd_state)->fpsr, - true, - sve_vq_from_vl(sve_get_vl()) - 1); + true);
__this_cpu_write(efi_sve_state_used, false); } else { diff --git a/arch/arm64/kvm/hyp/fpsimd.S b/arch/arm64/kvm/hyp/fpsimd.S index 1bb3b04b84e6..e950875e31ce 100644 --- a/arch/arm64/kvm/hyp/fpsimd.S +++ b/arch/arm64/kvm/hyp/fpsimd.S @@ -22,7 +22,7 @@ SYM_FUNC_END(__fpsimd_restore_state)
SYM_FUNC_START(__sve_restore_state) mov x2, #1 - __sve_load 0, x1, x2, 3 + sve_load 0, x1, x2, 3 ret SYM_FUNC_END(__sve_restore_state)