From: Mark Brown broonie@kernel.org
mainline inclusion from mainline-v5.14-rc1 commit ad4711f962e08eff8d6e9b03f9670b1af6ea9395 category: feature bugzilla: https://gitee.com/openeuler/kernel/issues/II8E73O CVE: NA
Reference: https://git.kernel.org/pub/scm/linux/kernel/git/stable/linux.git/commit/?id=...
-------------------------------------------------
When the SVE vector length is 128 bits then there are no bits in the Z registers which are not shared with the V registers so we can skip them when zeroing state not shared with FPSIMD, this results in a minor performance improvement.
Signed-off-by: Mark Brown broonie@kernel.org Reviewed-by: Dave Martin Dave.Martin@arm.com Acked-by: Catalin Marinas catalin.marinas@arm.com Link: https://lore.kernel.org/r/20210512151131.27877-4-broonie@kernel.org Signed-off-by: Will Deacon will@kernel.org Signed-off-by: Wang ShaoBo bobo.shaobowang@huawei.com --- arch/arm64/include/asm/fpsimd.h | 2 +- arch/arm64/kernel/entry-fpsimd.S | 12 ++++++++++-- arch/arm64/kernel/fpsimd.c | 6 ++++-- 3 files changed, 15 insertions(+), 5 deletions(-)
diff --git a/arch/arm64/include/asm/fpsimd.h b/arch/arm64/include/asm/fpsimd.h index 7de0b34476f3d..8dcc31d0db0d3 100644 --- a/arch/arm64/include/asm/fpsimd.h +++ b/arch/arm64/include/asm/fpsimd.h @@ -69,7 +69,7 @@ static inline void *sve_pffr(struct thread_struct *thread) extern void sve_save_state(void *state, u32 *pfpsr); extern void sve_load_state(void const *state, u32 const *pfpsr, unsigned long vq_minus_1); -extern void sve_flush_live(void); +extern void sve_flush_live(unsigned long vq_minus_1); extern void sve_load_from_fpsimd_state(struct user_fpsimd_state const *state, unsigned long vq_minus_1); extern unsigned int sve_get_vl(void); diff --git a/arch/arm64/kernel/entry-fpsimd.S b/arch/arm64/kernel/entry-fpsimd.S index 7921d58427c27..9bc201ef5559b 100644 --- a/arch/arm64/kernel/entry-fpsimd.S +++ b/arch/arm64/kernel/entry-fpsimd.S @@ -70,10 +70,18 @@ SYM_FUNC_START(sve_load_from_fpsimd_state) ret SYM_FUNC_END(sve_load_from_fpsimd_state)
-/* Zero all SVE registers but the first 128-bits of each vector */ +/* + * Zero all SVE registers but the first 128-bits of each vector + * + * VQ must already be configured by caller, any further updates of VQ + * will need to ensure that the register state remains valid. + * + * x0 = VQ - 1 + */ SYM_FUNC_START(sve_flush_live) + cbz x0, 1f // A VQ-1 of 0 is 128 bits so no extra Z state sve_flush_z - sve_flush_p_ffr +1: sve_flush_p_ffr ret SYM_FUNC_END(sve_flush_live)
diff --git a/arch/arm64/kernel/fpsimd.c b/arch/arm64/kernel/fpsimd.c index 9c3edb0899aa6..3599b9a2f1dff 100644 --- a/arch/arm64/kernel/fpsimd.c +++ b/arch/arm64/kernel/fpsimd.c @@ -957,8 +957,10 @@ void do_sve_acc(unsigned int esr, struct pt_regs *regs) * disabling the trap, otherwise update our in-memory copy. */ if (!test_thread_flag(TIF_FOREIGN_FPSTATE)) { - sve_set_vq(sve_vq_from_vl(current->thread.sve_vl) - 1); - sve_flush_live(); + unsigned long vq_minus_one = + sve_vq_from_vl(current->thread.sve_vl) - 1; + sve_set_vq(vq_minus_one); + sve_flush_live(vq_minus_one); fpsimd_bind_task_to_cpu(); } else { fpsimd_to_sve(current);