
From: eillon <yezhenyu2@huawei.com> hulk inclusion category: feature bugzilla: https://gitee.com/openeuler/kernel/issues/IB103D CVE: NA ---------------------------------------------------------------------- The HDBSS feature introduces new assembly registers (HDBSSBR_EL2 and HDBSSPROD_EL2), which depends on the armv9.5-a compilation support. So add ARM64_HDBSS config to control whether enable the HDBSS feature. Signed-off-by: eillon <yezhenyu2@huawei.com> --- arch/arm64/Kconfig | 12 ++++++++++++ arch/arm64/configs/openeuler_defconfig | 6 ++++++ arch/arm64/include/asm/cpufeature.h | 2 ++ arch/arm64/include/asm/kvm_host.h | 3 +++ arch/arm64/include/asm/kvm_mmu.h | 2 ++ arch/arm64/include/asm/sysreg.h | 2 ++ arch/arm64/kvm/arm.c | 8 ++++++++ arch/arm64/kvm/handle_exit.c | 5 ++++- arch/arm64/kvm/hyp/pgtable.c | 4 ++++ arch/arm64/kvm/hyp/vhe/switch.c | 2 ++ arch/arm64/kvm/hyp/vhe/sysreg-sr.c | 2 ++ arch/arm64/kvm/mmu.c | 4 ++++ arch/arm64/kvm/reset.c | 2 ++ include/linux/kvm_host.h | 2 ++ 14 files changed, 55 insertions(+), 1 deletion(-) diff --git a/arch/arm64/Kconfig b/arch/arm64/Kconfig index 9da9d58f1c02..7cbdd0b6259e 100644 --- a/arch/arm64/Kconfig +++ b/arch/arm64/Kconfig @@ -2376,6 +2376,18 @@ config ARM64_HAFT endmenu # "ARMv8.8 architectural features" +menu "ARMv9.5 architectural features" + +config ARM64_HDBSS + bool "Enable support for Hardware Dirty state tracking Structure (HDBSS)" + default y + help + Hardware Dirty state tracking Structure(HDBSS) enhances tracking + translation table descriptors’ dirty state to reduce the cost of + surveying for dirtied granules. + +endmenu # "ARMv9.5 architectural features" + config ARM64_SVE bool "ARM Scalable Vector Extension support" default y diff --git a/arch/arm64/configs/openeuler_defconfig b/arch/arm64/configs/openeuler_defconfig index 3205dc763d99..8764ab324aa5 100644 --- a/arch/arm64/configs/openeuler_defconfig +++ b/arch/arm64/configs/openeuler_defconfig @@ -567,6 +567,12 @@ CONFIG_ARM64_EPAN=y CONFIG_ARM64_NMI=y # end of ARMv8.8 architectural features +# +# ARMv9.5 architectural features +# +CONFIG_ARM64_HDBSS=y +# end of ARMv9.5 architectural features + CONFIG_ARM64_SVE=y CONFIG_ARM64_SME=y CONFIG_ARM64_PSEUDO_NMI=y diff --git a/arch/arm64/include/asm/cpufeature.h b/arch/arm64/include/asm/cpufeature.h index d2b9771b4a82..a58c8b332b21 100644 --- a/arch/arm64/include/asm/cpufeature.h +++ b/arch/arm64/include/asm/cpufeature.h @@ -752,6 +752,7 @@ static __always_inline bool system_supports_fpsimd(void) return !cpus_have_const_cap(ARM64_HAS_NO_FPSIMD); } +#ifdef CONFIG_ARM64_HDBSS static inline bool system_supports_hdbss(void) { u64 mmfr1; @@ -763,6 +764,7 @@ static inline bool system_supports_hdbss(void) return val == ID_AA64MMFR1_EL1_HAFDBS_HDBSS; } +#endif static inline bool system_uses_hw_pan(void) { diff --git a/arch/arm64/include/asm/kvm_host.h b/arch/arm64/include/asm/kvm_host.h index 51f99f3d824a..7c7e994cf12c 100644 --- a/arch/arm64/include/asm/kvm_host.h +++ b/arch/arm64/include/asm/kvm_host.h @@ -641,11 +641,14 @@ struct kvm_vcpu_arch { #ifdef CONFIG_HISI_VIRTCCA_HOST struct virtcca_cvm_tec tec; #endif + +#ifdef CONFIG_ARM64_HDBSS /* HDBSS registers info */ struct { u64 br_el2; u64 prod_el2; } hdbss; +#endif }; /* diff --git a/arch/arm64/include/asm/kvm_mmu.h b/arch/arm64/include/asm/kvm_mmu.h index a76bc71010e7..822d958b55e6 100644 --- a/arch/arm64/include/asm/kvm_mmu.h +++ b/arch/arm64/include/asm/kvm_mmu.h @@ -310,6 +310,7 @@ static __always_inline void __load_stage2(struct kvm_s2_mmu *mmu, asm(ALTERNATIVE("nop", "isb", ARM64_WORKAROUND_SPECULATIVE_AT)); } +#ifdef CONFIG_ARM64_HDBSS static __always_inline void __load_hdbss(struct kvm_vcpu *vcpu) { if (!vcpu->kvm->enable_hdbss) @@ -321,6 +322,7 @@ static __always_inline void __load_hdbss(struct kvm_vcpu *vcpu) dsb(sy); isb(); } +#endif static inline struct kvm *kvm_s2_mmu_to_kvm(struct kvm_s2_mmu *mmu) { diff --git a/arch/arm64/include/asm/sysreg.h b/arch/arm64/include/asm/sysreg.h index 8494aac11824..0d36f20ecd33 100644 --- a/arch/arm64/include/asm/sysreg.h +++ b/arch/arm64/include/asm/sysreg.h @@ -1031,6 +1031,7 @@ #define PIRx_ELx_PERM(idx, perm) ((perm) << ((idx) * 4)) +#ifdef CONFIG_ARM64_HDBSS /* * Definitions for the HDBSS feature */ @@ -1042,6 +1043,7 @@ #define HDBSSBR_SZ(br) (((br) & HDBSSBR_EL2_SZ_MASK) >> HDBSSBR_EL2_SZ_SHIFT) #define HDBSSPROD_IDX(prod) (((prod) & HDBSSPROD_EL2_INDEX_MASK) >> HDBSSPROD_EL2_INDEX_SHIFT) +#endif #define ARM64_FEATURE_FIELD_BITS 4 diff --git a/arch/arm64/kvm/arm.c b/arch/arm64/kvm/arm.c index f7206ab91c9c..d0d4e6bdc06b 100644 --- a/arch/arm64/kvm/arm.c +++ b/arch/arm64/kvm/arm.c @@ -129,6 +129,7 @@ int kvm_arch_vcpu_should_kick(struct kvm_vcpu *vcpu) return kvm_vcpu_exiting_guest_mode(vcpu) == IN_GUEST_MODE; } +#ifdef CONFIG_ARM64_HDBSS static int kvm_cap_arm_enable_hdbss(struct kvm *kvm, struct kvm_enable_cap *cap) { @@ -192,6 +193,7 @@ static int kvm_cap_arm_enable_hdbss(struct kvm *kvm, return 0; } +#endif int kvm_vm_ioctl_enable_cap(struct kvm *kvm, struct kvm_enable_cap *cap) @@ -247,9 +249,11 @@ int kvm_vm_ioctl_enable_cap(struct kvm *kvm, r = kvm_cvm_enable_cap(kvm, cap); break; #endif +#ifdef CONFIG_ARM64_HDBSS case KVM_CAP_ARM_HW_DIRTY_STATE_TRACK: r = kvm_cap_arm_enable_hdbss(kvm, cap); break; +#endif default: r = -EINVAL; break; @@ -503,9 +507,11 @@ int kvm_vm_ioctl_check_extension(struct kvm *kvm, long ext) r = static_key_enabled(&virtcca_cvm_is_available); break; #endif +#ifdef CONFIG_ARM64_HDBSS case KVM_CAP_ARM_HW_DIRTY_STATE_TRACK: r = system_supports_hdbss(); break; +#endif default: r = 0; } @@ -1837,6 +1843,7 @@ long kvm_arch_vcpu_ioctl(struct file *filp, void kvm_arch_sync_dirty_log(struct kvm *kvm, struct kvm_memory_slot *memslot) { +#ifdef CONFIG_ARM64_HDBSS /* * Flush all CPUs' dirty log buffers to the dirty_bitmap. Called * before reporting dirty_bitmap to userspace. KVM flushes the buffers @@ -1848,6 +1855,7 @@ void kvm_arch_sync_dirty_log(struct kvm *kvm, struct kvm_memory_slot *memslot) kvm_for_each_vcpu(i, vcpu, kvm) kvm_vcpu_kick(vcpu); +#endif } static int kvm_vm_ioctl_set_device_addr(struct kvm *kvm, diff --git a/arch/arm64/kvm/handle_exit.c b/arch/arm64/kvm/handle_exit.c index c9b3ce381018..6919c3858193 100644 --- a/arch/arm64/kvm/handle_exit.c +++ b/arch/arm64/kvm/handle_exit.c @@ -292,6 +292,7 @@ static exit_handle_fn kvm_get_exit_handler(struct kvm_vcpu *vcpu) return arm_exit_handlers[esr_ec]; } +#ifdef CONFIG_ARM64_HDBSS #define HDBSS_ENTRY_VALID_SHIFT 0 #define HDBSS_ENTRY_VALID_MASK (1UL << HDBSS_ENTRY_VALID_SHIFT) #define HDBSS_ENTRY_IPA_SHIFT 12 @@ -335,6 +336,7 @@ static void kvm_flush_hdbss_buffer(struct kvm_vcpu *vcpu) dsb(sy); isb(); } +#endif /* * We may be single-stepping an emulated instruction. If the emulation @@ -371,9 +373,10 @@ int handle_exit(struct kvm_vcpu *vcpu, int exception_index) { struct kvm_run *run = vcpu->run; +#ifdef CONFIG_ARM64_HDBSS if (vcpu->kvm->enable_hdbss) kvm_flush_hdbss_buffer(vcpu); - +#endif if (ARM_SERROR_PENDING(exception_index)) { /* * The SError is handled by handle_exit_early(). If the guest diff --git a/arch/arm64/kvm/hyp/pgtable.c b/arch/arm64/kvm/hyp/pgtable.c index 056b21a1ca92..874244df723e 100644 --- a/arch/arm64/kvm/hyp/pgtable.c +++ b/arch/arm64/kvm/hyp/pgtable.c @@ -713,8 +713,10 @@ static int stage2_set_prot_attr(struct kvm_pgtable *pgt, enum kvm_pgtable_prot p if (prot & KVM_PGTABLE_PROT_W) attr |= KVM_PTE_LEAF_ATTR_LO_S2_S2AP_W; +#ifdef CONFIG_ARM64_HDBSS if (prot & KVM_PGTABLE_PROT_DBM) attr |= KVM_PTE_LEAF_ATTR_HI_S2_DBM; +#endif attr |= FIELD_PREP(KVM_PTE_LEAF_ATTR_LO_S2_SH, sh); attr |= KVM_PTE_LEAF_ATTR_LO_S2_AF; @@ -1320,8 +1322,10 @@ int kvm_pgtable_stage2_relax_perms(struct kvm_pgtable *pgt, u64 addr, if (prot & KVM_PGTABLE_PROT_W) set |= KVM_PTE_LEAF_ATTR_LO_S2_S2AP_W; +#ifdef CONFIG_ARM64_HDBSS if (prot & KVM_PGTABLE_PROT_DBM) set |= KVM_PTE_LEAF_ATTR_HI_S2_DBM; +#endif if (prot & KVM_PGTABLE_PROT_X) clr |= KVM_PTE_LEAF_ATTR_HI_S2_XN; diff --git a/arch/arm64/kvm/hyp/vhe/switch.c b/arch/arm64/kvm/hyp/vhe/switch.c index 3d6f5891010e..9d315bd54e00 100644 --- a/arch/arm64/kvm/hyp/vhe/switch.c +++ b/arch/arm64/kvm/hyp/vhe/switch.c @@ -225,7 +225,9 @@ static int __kvm_vcpu_run_vhe(struct kvm_vcpu *vcpu) * __activate_traps clear HCR_EL2.TGE (among other things). */ __load_stage2(vcpu->arch.hw_mmu, vcpu->arch.hw_mmu->arch); +#ifdef CONFIG_ARM64_HDBSS __load_hdbss(vcpu); +#endif __activate_traps(vcpu); __kvm_adjust_pc(vcpu); diff --git a/arch/arm64/kvm/hyp/vhe/sysreg-sr.c b/arch/arm64/kvm/hyp/vhe/sysreg-sr.c index 236d07c1b0b8..283e19127591 100644 --- a/arch/arm64/kvm/hyp/vhe/sysreg-sr.c +++ b/arch/arm64/kvm/hyp/vhe/sysreg-sr.c @@ -92,7 +92,9 @@ void kvm_vcpu_load_sysregs_vhe(struct kvm_vcpu *vcpu) __sysreg_restore_el1_state(guest_ctxt); __mpam_guest_load(); +#ifdef CONFIG_ARM64_HDBSS __load_hdbss(vcpu); +#endif vcpu_set_flag(vcpu, SYSREGS_ON_CPU); diff --git a/arch/arm64/kvm/mmu.c b/arch/arm64/kvm/mmu.c index 0347ab14cf11..3830aa0b07a0 100644 --- a/arch/arm64/kvm/mmu.c +++ b/arch/arm64/kvm/mmu.c @@ -1586,8 +1586,10 @@ static int user_mem_abort(struct kvm_vcpu *vcpu, phys_addr_t fault_ipa, if (writable) prot |= KVM_PGTABLE_PROT_W; +#ifdef CONFIG_ARM64_HDBSS if (kvm->enable_hdbss && logging_active) prot |= KVM_PGTABLE_PROT_DBM; +#endif if (exec_fault) prot |= KVM_PGTABLE_PROT_X; @@ -1670,6 +1672,7 @@ int kvm_handle_guest_abort(struct kvm_vcpu *vcpu) fault_ipa = kvm_vcpu_get_fault_ipa(vcpu); is_iabt = kvm_vcpu_trap_is_iabt(vcpu); +#ifdef CONFIG_ARM64_HDBSS /* * HDBSS buffer already flushed when enter handle_trap_exceptions(). * Nothing to do here. @@ -1677,6 +1680,7 @@ int kvm_handle_guest_abort(struct kvm_vcpu *vcpu) iss2 = ESR_ELx_ISS2(kvm_vcpu_get_esr(vcpu)); if (fault_status == ESR_ELx_FSC_PERM && (iss2 & ESR_ELx_HDBSSF)) return 1; +#endif if (fault_status == ESR_ELx_FSC_FAULT) { /* Beyond sanitised PARange (which is the IPA limit) */ diff --git a/arch/arm64/kvm/reset.c b/arch/arm64/kvm/reset.c index 806080553bc1..0de1094d4e19 100644 --- a/arch/arm64/kvm/reset.c +++ b/arch/arm64/kvm/reset.c @@ -175,11 +175,13 @@ void kvm_arm_vcpu_destroy(struct kvm_vcpu *vcpu) kvm_destroy_tec(vcpu); #endif +#ifdef CONFIG_ARM64_HDBSS if (vcpu->arch.hdbss.br_el2) { hdbss_pg = phys_to_page(HDBSSBR_BADDR(vcpu->arch.hdbss.br_el2)); if (hdbss_pg) __free_pages(hdbss_pg, HDBSSBR_SZ(vcpu->arch.hdbss.br_el2)); } +#endif } static void kvm_vcpu_reset_sve(struct kvm_vcpu *vcpu) diff --git a/include/linux/kvm_host.h b/include/linux/kvm_host.h index 7235e88c726f..560ff9dd2b27 100644 --- a/include/linux/kvm_host.h +++ b/include/linux/kvm_host.h @@ -842,7 +842,9 @@ struct kvm { struct notifier_block pm_notifier; #endif char stats_id[KVM_STATS_NAME_SIZE]; +#ifdef CONFIG_ARM64_HDBSS bool enable_hdbss; +#endif }; #define kvm_err(fmt, ...) \ -- 2.39.3