From: Marc Zyngier maz@kernel.org
mainline inclusion from mainline-v5.13-rc1~76^2 commit 8c8010d69c1322734a272eb95dbbf42b5190e565 category: feature bugzilla: https://gitee.com/openeuler/kernel/issues/I5ITJT CVE: NA
Reference: https://git.kernel.org/pub/scm/linux/kernel/git/stable/linux.git/commit/?id=...
-------------------------------------------------
Implement the SVE save/restore for nVHE, following a similar logic to that of the VHE implementation:
- the SVE state is switched on trap from EL1 to EL2
- no further changes to ZCR_EL2 occur as long as the guest isn't preempted or exit to userspace
- ZCR_EL2 is reset to its default value on the first SVE access from the host EL1, and ZCR_EL1 restored to the default guest value in vcpu_put()
Acked-by: Will Deacon will@kernel.org Signed-off-by: Marc Zyngier maz@kernel.org Signed-off-by: Wang ShaoBo bobo.shaobowang@huawei.com --- arch/arm64/kvm/fpsimd.c | 10 +++++-- arch/arm64/kvm/hyp/include/hyp/switch.h | 35 +++++++++---------------- arch/arm64/kvm/hyp/nvhe/switch.c | 4 +-- 3 files changed, 23 insertions(+), 26 deletions(-)
diff --git a/arch/arm64/kvm/fpsimd.c b/arch/arm64/kvm/fpsimd.c index 14ea05c5134a..5621020b28de 100644 --- a/arch/arm64/kvm/fpsimd.c +++ b/arch/arm64/kvm/fpsimd.c @@ -121,11 +121,17 @@ void kvm_arch_vcpu_put_fp(struct kvm_vcpu *vcpu) local_irq_save(flags);
if (vcpu->arch.flags & KVM_ARM64_FP_ENABLED) { - if (guest_has_sve) + if (guest_has_sve) { __vcpu_sys_reg(vcpu, ZCR_EL1) = read_sysreg_el1(SYS_ZCR);
+ /* Restore the VL that was saved when bound to the CPU */ + if (!has_vhe()) + sve_cond_update_zcr_vq(vcpu_sve_max_vq(vcpu) - 1, + SYS_ZCR_EL1); + } + fpsimd_save_and_flush_cpu_state(); - } else if (host_has_sve) { + } else if (has_vhe() && host_has_sve) { /* * The FPSIMD/SVE state in the CPU has not been touched, and we * have SVE (and VHE): CPACR_EL1 (alias CPTR_EL2) has been diff --git a/arch/arm64/kvm/hyp/include/hyp/switch.h b/arch/arm64/kvm/hyp/include/hyp/switch.h index 37a115f90372..9b173f5102d3 100644 --- a/arch/arm64/kvm/hyp/include/hyp/switch.h +++ b/arch/arm64/kvm/hyp/include/hyp/switch.h @@ -215,25 +215,19 @@ static inline void __hyp_sve_restore_guest(struct kvm_vcpu *vcpu) /* Check for an FPSIMD/SVE trap and handle as appropriate */ static inline bool __hyp_handle_fpsimd(struct kvm_vcpu *vcpu) { - bool vhe, sve_guest, sve_host; + bool sve_guest, sve_host; u8 esr_ec; + u64 reg;
if (!system_supports_fpsimd()) return false;
- /* - * Currently system_supports_sve() currently implies has_vhe(), - * so the check is redundant. However, has_vhe() can be determined - * statically and helps the compiler remove dead code. - */ - if (has_vhe() && system_supports_sve()) { + if (system_supports_sve()) { sve_guest = vcpu_has_sve(vcpu); sve_host = vcpu->arch.flags & KVM_ARM64_HOST_SVE_IN_USE; - vhe = true; } else { sve_guest = false; sve_host = false; - vhe = has_vhe(); }
esr_ec = kvm_vcpu_trap_get_class(vcpu); @@ -243,31 +237,28 @@ static inline bool __hyp_handle_fpsimd(struct kvm_vcpu *vcpu)
vcpu->stat.fp_asimd_exit_stat++; /* Don't handle SVE traps for non-SVE vcpus here: */ - if (!sve_guest) - if (esr_ec != ESR_ELx_EC_FP_ASIMD) - return false; + if (!sve_guest && esr_ec != ESR_ELx_EC_FP_ASIMD) + return false;
/* Valid trap. Switch the context: */
- if (vhe) { - u64 reg = read_sysreg(cpacr_el1) | CPACR_EL1_FPEN; - + if (has_vhe()) { + reg = CPACR_EL1_FPEN; if (sve_guest) reg |= CPACR_EL1_ZEN;
- write_sysreg(reg, cpacr_el1); + sysreg_clear_set(cpacr_el1, 0, reg); } else { - write_sysreg(read_sysreg(cptr_el2) & ~(u64)CPTR_EL2_TFP, - cptr_el2); + reg = CPTR_EL2_TFP; + if (sve_guest) + reg |= CPTR_EL2_TZ; + + sysreg_clear_set(cptr_el2, reg, 0); }
isb();
if (vcpu->arch.flags & KVM_ARM64_FP_HOST) { - /* - * In the SVE case, VHE is assumed: it is enforced by - * Kconfig and kvm_arch_init(). - */ if (sve_host) __hyp_sve_save_host(vcpu); else diff --git a/arch/arm64/kvm/hyp/nvhe/switch.c b/arch/arm64/kvm/hyp/nvhe/switch.c index 6624596846d3..c6d7451a6daf 100644 --- a/arch/arm64/kvm/hyp/nvhe/switch.c +++ b/arch/arm64/kvm/hyp/nvhe/switch.c @@ -40,9 +40,9 @@ static void __activate_traps(struct kvm_vcpu *vcpu) __activate_traps_common(vcpu);
val = CPTR_EL2_DEFAULT; - val |= CPTR_EL2_TTA | CPTR_EL2_TZ | CPTR_EL2_TAM; + val |= CPTR_EL2_TTA | CPTR_EL2_TAM; if (!update_fp_enabled(vcpu)) { - val |= CPTR_EL2_TFP; + val |= CPTR_EL2_TFP | CPTR_EL2_TZ; __activate_traps_fpsimd32(vcpu); }