virt inclusion category: feature bugzilla: https://gitee.com/openeuler/kernel/issues/I8TN8N CVE: NA
--------------------------------
Support PV qspinlock feature after pv.ops removed
Signed-off-by: lishusen lishusen2@huawei.com --- arch/arm64/include/asm/paravirt.h | 9 ++++++--- arch/arm64/kernel/paravirt.c | 10 ++++++---- 2 files changed, 12 insertions(+), 7 deletions(-)
diff --git a/arch/arm64/include/asm/paravirt.h b/arch/arm64/include/asm/paravirt.h index f10552839663..8e604a3f3062 100644 --- a/arch/arm64/include/asm/paravirt.h +++ b/arch/arm64/include/asm/paravirt.h @@ -33,7 +33,8 @@ static inline bool pv_vcpu_is_preempted(int cpu) #if defined(CONFIG_SMP) && defined(CONFIG_PARAVIRT_SPINLOCKS) void __init pv_qspinlock_init(void); bool pv_is_native_spin_unlock(void); -DECLARE_STATIC_CALL(pv_qspinlock_queued_spin_lock_slowpath, native_queued_spin_lock_slowpath); +DECLARE_STATIC_CALL(pv_qspinlock_queued_spin_lock_slowpath, + native_queued_spin_lock_slowpath); static inline void pv_queued_spin_lock_slowpath(struct qspinlock *lock, u32 val) { return static_call(pv_qspinlock_queued_spin_lock_slowpath)(lock, val); @@ -45,14 +46,16 @@ static inline void pv_queued_spin_unlock(struct qspinlock *lock) return static_call(pv_qspinlock_queued_spin_unlock)(lock); }
+DECLARE_STATIC_CALL(pv_qspinlock_wait, kvm_wait); static inline void pv_wait(u8 *ptr, u8 val) { - return; + return static_call(pv_qspinlock_wait)(ptr, val); }
+DECLARE_STATIC_CALL(pv_qspinlock_kick, kvm_kick_cpu); static inline void pv_kick(int cpu) { - return; + return static_call(pv_qspinlock_kick)(cpu); } #else
diff --git a/arch/arm64/kernel/paravirt.c b/arch/arm64/kernel/paravirt.c index 106f18654cea..0bfce7dbd229 100644 --- a/arch/arm64/kernel/paravirt.c +++ b/arch/arm64/kernel/paravirt.c @@ -309,10 +309,12 @@ void __init pv_qspinlock_init(void) pr_info("PV qspinlocks enabled\n");
__pv_init_lock_hash(); - pv_ops.sched.queued_spin_lock_slowpath = __pv_queued_spin_lock_slowpath; - pv_ops.sched.queued_spin_unlock = __pv_queued_spin_unlock; - pv_ops.sched.wait = kvm_wait; - pv_ops.sched.kick = kvm_kick_cpu; + static_call_update(pv_qspinlock_queued_spin_lock_slowpath, + __pv_queued_spin_lock_slowpath); + static_call_update(pv_qspinlock_queued_spin_unlock, + __pv_queued_spin_unlock); + static_call_update(pv_qspinlock_wait, kvm_wait); + static_call_update(pv_qspinlock_kick, kvm_kick_cpu); }
static __init int arm_parse_pvspin(char *arg)