From: Reiji Watanabe reijiw@google.com
mainline inclusion from mainline-v6.7-rc1 commit 57fc267f1b5caa56dfb46f60e20e673cbc4cc4a8 category: cleanup bugzilla: https://gitee.com/openeuler/kernel/issues/I9Q7QP CVE: NA
Reference: https://git.kernel.org/pub/scm/linux/kernel/git/stable/linux.git/commit/?id=...
--------------------------------
Add a helper to read a vCPU's PMCR_EL0, and use it whenever KVM reads a vCPU's PMCR_EL0.
Currently, the PMCR_EL0 value is tracked per vCPU. The following patches will make (only) PMCR_EL0.N track per guest. Having the new helper will be useful to combine the PMCR_EL0.N field (tracked per guest) and the other fields (tracked per vCPU) to provide the value of PMCR_EL0.
No functional change intended.
Reviewed-by: Sebastian Ott sebott@redhat.com Signed-off-by: Reiji Watanabe reijiw@google.com Signed-off-by: Raghavendra Rao Ananta rananta@google.com Reviewed-by: Eric Auger eric.auger@redhat.com Signed-off-by: Marc Zyngier maz@kernel.org Link: https://lore.kernel.org/r/20231020214053.2144305-4-rananta@google.com Signed-off-by: Oliver Upton oliver.upton@linux.dev Signed-off-by: Junhao He hejunhao3@huawei.com --- arch/arm64/kvm/arm.c | 3 +-- arch/arm64/kvm/pmu-emul.c | 21 +++++++++++++++------ arch/arm64/kvm/sys_regs.c | 6 +++--- include/kvm/arm_pmu.h | 6 ++++++ 4 files changed, 25 insertions(+), 11 deletions(-)
diff --git a/arch/arm64/kvm/arm.c b/arch/arm64/kvm/arm.c index 333c65ced849..a84c992126d7 100644 --- a/arch/arm64/kvm/arm.c +++ b/arch/arm64/kvm/arm.c @@ -874,8 +874,7 @@ static int check_vcpu_requests(struct kvm_vcpu *vcpu) }
if (kvm_check_request(KVM_REQ_RELOAD_PMU, vcpu)) - kvm_pmu_handle_pmcr(vcpu, - __vcpu_sys_reg(vcpu, PMCR_EL0)); + kvm_pmu_handle_pmcr(vcpu, kvm_vcpu_read_pmcr(vcpu));
if (kvm_check_request(KVM_REQ_RESYNC_PMU_EL0, vcpu)) kvm_vcpu_pmu_restore_guest(vcpu); diff --git a/arch/arm64/kvm/pmu-emul.c b/arch/arm64/kvm/pmu-emul.c index e6be14e38cee..b7e094d1dcd7 100644 --- a/arch/arm64/kvm/pmu-emul.c +++ b/arch/arm64/kvm/pmu-emul.c @@ -89,7 +89,7 @@ static bool kvm_pmc_is_64bit(struct kvm_pmc *pmc)
static bool kvm_pmc_has_64bit_overflow(struct kvm_pmc *pmc) { - u64 val = __vcpu_sys_reg(kvm_pmc_to_vcpu(pmc), PMCR_EL0); + u64 val = kvm_vcpu_read_pmcr(kvm_pmc_to_vcpu(pmc));
return (pmc->idx < ARMV8_PMU_CYCLE_IDX && (val & ARMV8_PMU_PMCR_LP)) || (pmc->idx == ARMV8_PMU_CYCLE_IDX && (val & ARMV8_PMU_PMCR_LC)); @@ -267,7 +267,7 @@ void kvm_pmu_vcpu_destroy(struct kvm_vcpu *vcpu)
u64 kvm_pmu_valid_counter_mask(struct kvm_vcpu *vcpu) { - u64 val = __vcpu_sys_reg(vcpu, PMCR_EL0) >> ARMV8_PMU_PMCR_N_SHIFT; + u64 val = kvm_vcpu_read_pmcr(vcpu) >> ARMV8_PMU_PMCR_N_SHIFT;
val &= ARMV8_PMU_PMCR_N_MASK; if (val == 0) @@ -289,7 +289,7 @@ void kvm_pmu_enable_counter_mask(struct kvm_vcpu *vcpu, u64 val) if (!kvm_vcpu_has_pmu(vcpu)) return;
- if (!(__vcpu_sys_reg(vcpu, PMCR_EL0) & ARMV8_PMU_PMCR_E) || !val) + if (!(kvm_vcpu_read_pmcr(vcpu) & ARMV8_PMU_PMCR_E) || !val) return;
for (i = 0; i < ARMV8_PMU_MAX_COUNTERS; i++) { @@ -341,7 +341,7 @@ static u64 kvm_pmu_overflow_status(struct kvm_vcpu *vcpu) { u64 reg = 0;
- if ((__vcpu_sys_reg(vcpu, PMCR_EL0) & ARMV8_PMU_PMCR_E)) { + if ((kvm_vcpu_read_pmcr(vcpu) & ARMV8_PMU_PMCR_E)) { reg = __vcpu_sys_reg(vcpu, PMOVSSET_EL0); reg &= __vcpu_sys_reg(vcpu, PMCNTENSET_EL0); reg &= __vcpu_sys_reg(vcpu, PMINTENSET_EL1); @@ -443,7 +443,7 @@ static void kvm_pmu_counter_increment(struct kvm_vcpu *vcpu, { int i;
- if (!(__vcpu_sys_reg(vcpu, PMCR_EL0) & ARMV8_PMU_PMCR_E)) + if (!(kvm_vcpu_read_pmcr(vcpu) & ARMV8_PMU_PMCR_E)) return;
/* Weed out disabled counters */ @@ -586,7 +586,7 @@ void kvm_pmu_handle_pmcr(struct kvm_vcpu *vcpu, u64 val) static bool kvm_pmu_counter_is_enabled(struct kvm_pmc *pmc) { struct kvm_vcpu *vcpu = kvm_pmc_to_vcpu(pmc); - return (__vcpu_sys_reg(vcpu, PMCR_EL0) & ARMV8_PMU_PMCR_E) && + return (kvm_vcpu_read_pmcr(vcpu) & ARMV8_PMU_PMCR_E) && (__vcpu_sys_reg(vcpu, PMCNTENSET_EL0) & BIT(pmc->idx)); }
@@ -1090,3 +1090,12 @@ u8 kvm_arm_pmu_get_pmuver_limit(void) ID_AA64DFR0_EL1_PMUVer_V3P5); return FIELD_GET(ARM64_FEATURE_MASK(ID_AA64DFR0_EL1_PMUVer), tmp); } + +/** + * kvm_vcpu_read_pmcr - Read PMCR_EL0 register for the vCPU + * @vcpu: The vcpu pointer + */ +u64 kvm_vcpu_read_pmcr(struct kvm_vcpu *vcpu) +{ + return __vcpu_sys_reg(vcpu, PMCR_EL0); +} diff --git a/arch/arm64/kvm/sys_regs.c b/arch/arm64/kvm/sys_regs.c index 02e21394e211..513731782667 100644 --- a/arch/arm64/kvm/sys_regs.c +++ b/arch/arm64/kvm/sys_regs.c @@ -843,7 +843,7 @@ static bool access_pmcr(struct kvm_vcpu *vcpu, struct sys_reg_params *p, * Only update writeable bits of PMCR (continuing into * kvm_pmu_handle_pmcr() as well) */ - val = __vcpu_sys_reg(vcpu, PMCR_EL0); + val = kvm_vcpu_read_pmcr(vcpu); val &= ~ARMV8_PMU_PMCR_MASK; val |= p->regval & ARMV8_PMU_PMCR_MASK; if (!kvm_supports_32bit_el0()) @@ -851,7 +851,7 @@ static bool access_pmcr(struct kvm_vcpu *vcpu, struct sys_reg_params *p, kvm_pmu_handle_pmcr(vcpu, val); } else { /* PMCR.P & PMCR.C are RAZ */ - val = __vcpu_sys_reg(vcpu, PMCR_EL0) + val = kvm_vcpu_read_pmcr(vcpu) & ~(ARMV8_PMU_PMCR_P | ARMV8_PMU_PMCR_C); p->regval = val; } @@ -900,7 +900,7 @@ static bool pmu_counter_idx_valid(struct kvm_vcpu *vcpu, u64 idx) { u64 pmcr, val;
- pmcr = __vcpu_sys_reg(vcpu, PMCR_EL0); + pmcr = kvm_vcpu_read_pmcr(vcpu); val = (pmcr >> ARMV8_PMU_PMCR_N_SHIFT) & ARMV8_PMU_PMCR_N_MASK; if (idx >= val && idx != ARMV8_PMU_CYCLE_IDX) { kvm_inject_undefined(vcpu); diff --git a/include/kvm/arm_pmu.h b/include/kvm/arm_pmu.h index 10ec84a45df0..467d60c53153 100644 --- a/include/kvm/arm_pmu.h +++ b/include/kvm/arm_pmu.h @@ -103,6 +103,7 @@ void kvm_vcpu_pmu_resync_el0(void); u8 kvm_arm_pmu_get_pmuver_limit(void); u64 kvm_pmu_evtyper_mask(struct kvm *kvm);
+u64 kvm_vcpu_read_pmcr(struct kvm_vcpu *vcpu); #else struct kvm_pmu { }; @@ -179,6 +180,11 @@ static inline u64 kvm_pmu_evtyper_mask(struct kvm *kvm) } static inline void kvm_vcpu_pmu_resync_el0(void) {}
+static inline u64 kvm_vcpu_read_pmcr(struct kvm_vcpu *vcpu) +{ + return 0; +} + #endif
#endif