virt inclusion category: feature bugzilla: https://gitee.com/openeuler/kernel/issues/I8WMFU CVE: NA
Support PV qspinlock feature after pv.ops removed
Signed-off-by: lishusen lishusen2@huawei.com --- arch/arm64/include/asm/paravirt.h | 16 ++++++++++++---- arch/arm64/kernel/paravirt.c | 22 ++++++++++++++++++---- 2 files changed, 30 insertions(+), 8 deletions(-)
diff --git a/arch/arm64/include/asm/paravirt.h b/arch/arm64/include/asm/paravirt.h index f10552839663..a29eeffa49aa 100644 --- a/arch/arm64/include/asm/paravirt.h +++ b/arch/arm64/include/asm/paravirt.h @@ -33,26 +33,34 @@ static inline bool pv_vcpu_is_preempted(int cpu) #if defined(CONFIG_SMP) && defined(CONFIG_PARAVIRT_SPINLOCKS) void __init pv_qspinlock_init(void); bool pv_is_native_spin_unlock(void); -DECLARE_STATIC_CALL(pv_qspinlock_queued_spin_lock_slowpath, native_queued_spin_lock_slowpath); + +void dummy_queued_spin_lock_slowpath(struct qspinlock *lock, u32 val); +DECLARE_STATIC_CALL(pv_qspinlock_queued_spin_lock_slowpath, + dummy_queued_spin_lock_slowpath); static inline void pv_queued_spin_lock_slowpath(struct qspinlock *lock, u32 val) { return static_call(pv_qspinlock_queued_spin_lock_slowpath)(lock, val); }
-DECLARE_STATIC_CALL(pv_qspinlock_queued_spin_unlock, native_queued_spin_unlock); +void dummy_queued_spin_unlock(struct qspinlock *lock); +DECLARE_STATIC_CALL(pv_qspinlock_queued_spin_unlock, dummy_queued_spin_unlock); static inline void pv_queued_spin_unlock(struct qspinlock *lock) { return static_call(pv_qspinlock_queued_spin_unlock)(lock); }
+void dummy_wait(u8 *ptr, u8 val); +DECLARE_STATIC_CALL(pv_qspinlock_wait, dummy_wait); static inline void pv_wait(u8 *ptr, u8 val) { - return; + return static_call(pv_qspinlock_wait)(ptr, val); }
+void dummy_kick(int cpu); +DECLARE_STATIC_CALL(pv_qspinlock_kick, dummy_kick); static inline void pv_kick(int cpu) { - return; + return static_call(pv_qspinlock_kick)(cpu); } #else
diff --git a/arch/arm64/kernel/paravirt.c b/arch/arm64/kernel/paravirt.c index c5fc82372e47..73a8b9886775 100644 --- a/arch/arm64/kernel/paravirt.c +++ b/arch/arm64/kernel/paravirt.c @@ -296,6 +296,17 @@ static void kvm_wait(u8 *ptr, u8 val) local_irq_restore(flags); }
+DEFINE_STATIC_CALL(pv_qspinlock_queued_spin_lock_slowpath, + native_queued_spin_lock_slowpath); +DEFINE_STATIC_CALL(pv_qspinlock_queued_spin_unlock, native_queued_spin_unlock); +DEFINE_STATIC_CALL(pv_qspinlock_wait, kvm_wait); +DEFINE_STATIC_CALL(pv_qspinlock_kick, kvm_kick_cpu); + +EXPORT_STATIC_CALL(pv_qspinlock_queued_spin_lock_slowpath); +EXPORT_STATIC_CALL(pv_qspinlock_queued_spin_unlock); +EXPORT_STATIC_CALL(pv_qspinlock_wait); +EXPORT_STATIC_CALL(pv_qspinlock_kick); + void __init pv_qspinlock_init(void) { /* Don't use the PV qspinlock code if there is only 1 vCPU. */ @@ -309,10 +320,13 @@ void __init pv_qspinlock_init(void) pr_info("PV qspinlocks enabled\n");
__pv_init_lock_hash(); - pv_ops.sched.queued_spin_lock_slowpath = __pv_queued_spin_lock_slowpath; - pv_ops.sched.queued_spin_unlock = __pv_queued_spin_unlock; - pv_ops.sched.wait = kvm_wait; - pv_ops.sched.kick = kvm_kick_cpu; + + static_call_update(pv_qspinlock_queued_spin_lock_slowpath, + __pv_queued_spin_lock_slowpath); + static_call_update(pv_qspinlock_queued_spin_unlock, + __pv_queued_spin_unlock); + static_call_update(pv_qspinlock_wait, kvm_wait); + static_call_update(pv_qspinlock_kick, kvm_kick_cpu); }
static __init int arm_parse_pvspin(char *arg)