mainline inclusion from mainline-v6.10-rc1 commit b782e8d07baac95a5ce3f8773cc61f4ed7d0ccbc category: bugfix bugzilla: https://gitee.com/openeuler/kernel/issues/I9J01A CVE: NA
Reference: https://git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git/commit/?i...
----------------------------------------------------------------------
Currently we're using "sbfx" to extract the PMUVer from ID_AA64DFR0_EL1 and skip the init/reset if no PMU present when the extracted PMUVer is negative or is zero. However for PMUv3p8 the PMUVer will be 0b1000 and PMUVer extracted by "sbfx" will always be negative and we'll skip the init/reset in __init_el2_debug/reset_pmuserenr_el0 unexpectedly.
So this patch use "ubfx" instead of "sbfx" to extract the PMUVer. If the PMUVer is implementation defined (0b1111) or not implemented(0b0000) then skip the reset/init. Previously we'll also skip the init/reset if the PMUVer is higher than the version we known (currently PMUv3p9), with this patch we'll only skip if the PMU is not implemented or implementation defined. This keeps consistence with how we probe the PMU in the driver with pmuv3_implemented().
Signed-off-by: Yicong Yang yangyicong@hisilicon.com Link: https://lore.kernel.org/r/20240411123030.7201-1-yangyicong@huawei.com Signed-off-by: Will Deacon will@kernel.org Signed-off-by: Junhao He hejunhao3@huawei.com --- arch/arm64/include/asm/assembler.h | 7 ++++--- arch/arm64/include/asm/sysreg.h | 1 + arch/arm64/kernel/head.S | 9 +++++---- 3 files changed, 10 insertions(+), 7 deletions(-)
diff --git a/arch/arm64/include/asm/assembler.h b/arch/arm64/include/asm/assembler.h index 5e6bacda05d8..63db8e5ec9f8 100644 --- a/arch/arm64/include/asm/assembler.h +++ b/arch/arm64/include/asm/assembler.h @@ -466,9 +466,10 @@ USER(\label, ic ivau, \tmp2) // invalidate I line PoU */ .macro reset_pmuserenr_el0, tmpreg mrs \tmpreg, id_aa64dfr0_el1 - sbfx \tmpreg, \tmpreg, #ID_AA64DFR0_PMUVER_SHIFT, #4 - cmp \tmpreg, #1 // Skip if no PMU present - b.lt 9000f + ubfx \tmpreg, \tmpreg, #ID_AA64DFR0_PMUVER_SHIFT, #4 + cmp \tmpreg, #ID_AA64DFR0_PMUVER_NI + ccmp \tmpreg, #ID_AA64DFR0_PMUVER_IMP_DEF, #4, ne + b.eq 9000f msr pmuserenr_el0, xzr // Disable PMU access from EL0 9000: .endm diff --git a/arch/arm64/include/asm/sysreg.h b/arch/arm64/include/asm/sysreg.h index 8bcc9ac9963e..9705f7abd428 100644 --- a/arch/arm64/include/asm/sysreg.h +++ b/arch/arm64/include/asm/sysreg.h @@ -1488,6 +1488,7 @@ #define ID_AA64DFR0_EL1_BRBE_NI 0x0 #define ID_AA64DFR0_EL1_BRBE_IMP 0x1 #define ID_AA64DFR0_EL1_BRBE_BRBE_V1P1 0x2 +#define ID_AA64DFR0_PMUVER_NI 0x0 #define ID_AA64DFR0_PMUVER_8_0 0x1 #define ID_AA64DFR0_PMUVER_8_1 0x4 #define ID_AA64DFR0_PMUVER_8_4 0x5 diff --git a/arch/arm64/kernel/head.S b/arch/arm64/kernel/head.S index 91890de6cefa..b44213388bab 100644 --- a/arch/arm64/kernel/head.S +++ b/arch/arm64/kernel/head.S @@ -623,13 +623,14 @@ set_hcr:
/* EL2 debug */ mrs x1, id_aa64dfr0_el1 - sbfx x0, x1, #ID_AA64DFR0_PMUVER_SHIFT, #4 - cmp x0, #1 - b.lt 4f // Skip if no PMU present + ubfx x0, x1, #ID_AA64DFR0_PMUVER_SHIFT, #4 + cmp x0, #ID_AA64DFR0_PMUVER_NI + ccmp x0, #ID_AA64DFR0_PMUVER_IMP_DEF, #4, ne + b.eq 4f // Skip if no PMU present mrs x0, pmcr_el0 // Disable debug access traps ubfx x0, x0, #11, #5 // to EL2 and allow access to 4: - csel x3, xzr, x0, lt // all PMU counters from EL1 + csel x3, xzr, x0, eq // all PMU counters from EL1
/* Statistical profiling */ ubfx x0, x1, #ID_AA64DFR0_PMSVER_SHIFT, #4