From: Marc Zyngier maz@kernel.org
mainline inclusion from mainline-v5.16-rc1 commit 9ee840a96042 category: feature bugzilla: https://gitee.com/openeuler/kernel/issues/I4QCBG CVE: NA
----------------------
CNTPCTSS_EL0 and CNTVCTSS_EL0 are alternatives to the usual CNTPCT_EL0 and CNTVCT_EL0 that do not require a previous ISB to be synchronised (SS stands for Self-Synchronising).
Use the ARM64_HAS_ECV capability to control alternative sequences that switch to these low(er)-cost primitives. Note that the counter access in the VDSO is for now left alone until we decide whether we want to allow this.
Signed-off-by: Marc Zyngier maz@kernel.org Link: https://lore.kernel.org/r/20211017124225.3018098-16-maz@kernel.org Signed-off-by: Will Deacon will@kernel.org Signed-off-by: Xiongfeng Wang wangxiongfeng2@huawei.com Reviewed-by: Hanjun Guo guohanjun@huawei.com Signed-off-by: Zheng Zengkai zhengzengkai@huawei.com --- arch/arm64/include/asm/arch_timer.h | 32 +++++++++++++++++++++-------- arch/arm64/include/asm/sysreg.h | 3 +++ 2 files changed, 27 insertions(+), 8 deletions(-)
diff --git a/arch/arm64/include/asm/arch_timer.h b/arch/arm64/include/asm/arch_timer.h index 637bb985a330..fe829b641650 100644 --- a/arch/arm64/include/asm/arch_timer.h +++ b/arch/arm64/include/asm/arch_timer.h @@ -64,14 +64,26 @@ DECLARE_PER_CPU(const struct arch_timer_erratum_workaround *,
static inline notrace u64 arch_timer_read_cntpct_el0(void) { - isb(); - return read_sysreg(cntpct_el0); + u64 cnt; + + asm volatile(ALTERNATIVE("isb\n mrs %0, cntpct_el0", + "nop\n" __mrs_s("%0", SYS_CNTPCTSS_EL0), + ARM64_HAS_ECV) + : "=r" (cnt)); + + return cnt; }
static inline notrace u64 arch_timer_read_cntvct_el0(void) { - isb(); - return read_sysreg(cntvct_el0); + u64 cnt; + + asm volatile(ALTERNATIVE("isb\n mrs %0, cntvct_el0", + "nop\n" __mrs_s("%0", SYS_CNTVCTSS_EL0), + ARM64_HAS_ECV) + : "=r" (cnt)); + + return cnt; }
#define arch_timer_reg_read_stable(reg) \ @@ -174,8 +186,10 @@ static __always_inline u64 __arch_counter_get_cntpct(void) { u64 cnt;
- isb(); - cnt = read_sysreg(cntpct_el0); + asm volatile(ALTERNATIVE("isb\n mrs %0, cntpct_el0", + "nop\n" __mrs_s("%0", SYS_CNTPCTSS_EL0), + ARM64_HAS_ECV) + : "=r" (cnt)); arch_counter_enforce_ordering(cnt); return cnt; } @@ -193,8 +207,10 @@ static __always_inline u64 __arch_counter_get_cntvct(void) { u64 cnt;
- isb(); - cnt = read_sysreg(cntvct_el0); + asm volatile(ALTERNATIVE("isb\n mrs %0, cntvct_el0", + "nop\n" __mrs_s("%0", SYS_CNTVCTSS_EL0), + ARM64_HAS_ECV) + : "=r" (cnt)); arch_counter_enforce_ordering(cnt); return cnt; } diff --git a/arch/arm64/include/asm/sysreg.h b/arch/arm64/include/asm/sysreg.h index 5a9ff0d216dc..bc5547727b02 100644 --- a/arch/arm64/include/asm/sysreg.h +++ b/arch/arm64/include/asm/sysreg.h @@ -450,6 +450,9 @@
#define SYS_CNTFRQ_EL0 sys_reg(3, 3, 14, 0, 0)
+#define SYS_CNTPCTSS_EL0 sys_reg(3, 3, 14, 0, 5) +#define SYS_CNTVCTSS_EL0 sys_reg(3, 3, 14, 0, 6) + #define SYS_CNTP_TVAL_EL0 sys_reg(3, 3, 14, 2, 0) #define SYS_CNTP_CTL_EL0 sys_reg(3, 3, 14, 2, 1) #define SYS_CNTP_CVAL_EL0 sys_reg(3, 3, 14, 2, 2)