From: Zengruan Ye yezengruan@huawei.com
euleros inclusion category: feature bugzilla: NA CVE: NA
--------------------------------
As kernel has used this interface, so lets support it.
Signed-off-by: Zengruan Ye yezengruan@huawei.com Reviewed-by: zhanghailiang zhang.zhanghailiang@huawei.com Signed-off-by: Yang Yingliang yangyingliang@huawei.com --- arch/arm64/Kconfig | 14 ++++++++++ arch/arm64/include/asm/paravirt.h | 29 +++++++++++++++++++++ arch/arm64/include/asm/qspinlock.h | 15 ++++++++--- arch/arm64/include/asm/qspinlock_paravirt.h | 12 +++++++++ arch/arm64/include/asm/spinlock.h | 3 +++ arch/arm64/kernel/Makefile | 1 + arch/arm64/kernel/alternative.c | 5 ++-- arch/arm64/kernel/paravirt-spinlocks.c | 5 ++++ arch/arm64/kernel/paravirt.c | 4 +++ 9 files changed, 82 insertions(+), 6 deletions(-) create mode 100644 arch/arm64/include/asm/qspinlock_paravirt.h
diff --git a/arch/arm64/Kconfig b/arch/arm64/Kconfig index 8842402e302d..0e71b6819334 100644 --- a/arch/arm64/Kconfig +++ b/arch/arm64/Kconfig @@ -817,6 +817,7 @@ config NODES_SHIFT config NUMA_AWARE_SPINLOCKS bool "Numa-aware spinlocks" depends on NUMA && QUEUED_SPINLOCKS + depends on PARAVIRT_SPINLOCKS default n help Introduce NUMA (Non Uniform Memory Access) awareness into @@ -901,6 +902,19 @@ config PARAVIRT under a hypervisor, potentially improving performance significantly over full virtualization.
+config PARAVIRT_SPINLOCKS + bool "Paravirtualization layer for spinlocks" + depends on PARAVIRT && SMP + help + Paravirtualized spinlocks allow a pvops backend to replace the + spinlock implementation with something virtualization-friendly + (for example, block the virtual CPU rather than spinning). + + It has a minimal impact on native kernels and gives a nice performance + benefit on paravirtualized KVM kernels. + + If you are unsure how to answer this question, answer Y. + config PARAVIRT_TIME_ACCOUNTING bool "Paravirtual steal time accounting" select PARAVIRT diff --git a/arch/arm64/include/asm/paravirt.h b/arch/arm64/include/asm/paravirt.h index 62e9ba70f4e6..256e3f9df184 100644 --- a/arch/arm64/include/asm/paravirt.h +++ b/arch/arm64/include/asm/paravirt.h @@ -12,6 +12,12 @@ struct pv_time_ops { };
struct pv_sched_ops { + void (*queued_spin_lock_slowpath)(struct qspinlock *lock, u32 val); + void (*queued_spin_unlock)(struct qspinlock *lock); + + void (*wait)(u8 *ptr, u8 val); + void (*kick)(int cpu); + bool (*vcpu_is_preempted)(int cpu); };
@@ -35,6 +41,29 @@ static inline bool pv_vcpu_is_preempted(int cpu) return pv_ops.sched.vcpu_is_preempted(cpu); }
+#if defined(CONFIG_SMP) && defined(CONFIG_PARAVIRT_SPINLOCKS) +bool pv_is_native_spin_unlock(void); +static inline void pv_queued_spin_lock_slowpath(struct qspinlock *lock, u32 val) +{ + return pv_ops.sched.queued_spin_lock_slowpath(lock, val); +} + +static inline void pv_queued_spin_unlock(struct qspinlock *lock) +{ + return pv_ops.sched.queued_spin_unlock(lock); +} + +static inline void pv_wait(u8 *ptr, u8 val) +{ + return pv_ops.sched.wait(ptr, val); +} + +static inline void pv_kick(int cpu) +{ + return pv_ops.sched.kick(cpu); +} +#endif /* SMP && PARAVIRT_SPINLOCKS */ + #else
#define pv_sched_init() do {} while (0) diff --git a/arch/arm64/include/asm/qspinlock.h b/arch/arm64/include/asm/qspinlock.h index fbe176fd4b3f..0022d446aa64 100644 --- a/arch/arm64/include/asm/qspinlock.h +++ b/arch/arm64/include/asm/qspinlock.h @@ -2,12 +2,19 @@ #ifndef _ASM_ARM64_QSPINLOCK_H #define _ASM_ARM64_QSPINLOCK_H
-#ifdef CONFIG_NUMA_AWARE_SPINLOCKS #include <asm-generic/qspinlock_types.h> +#include <asm/paravirt.h> + +#define _Q_PENDING_LOOPS (1 << 9)
+#ifdef CONFIG_NUMA_AWARE_SPINLOCKS extern void __cna_queued_spin_lock_slowpath(struct qspinlock *lock, u32 val); +#endif + +#ifdef CONFIG_PARAVIRT_SPINLOCKS extern void native_queued_spin_lock_slowpath(struct qspinlock *lock, u32 val); -extern void (*cna_queued_spin_lock_slowpath)(struct qspinlock *lock, u32 val); +extern void __pv_init_lock_hash(void); +extern void __pv_queued_spin_lock_slowpath(struct qspinlock *lock, u32 val);
#define queued_spin_unlock queued_spin_unlock /** @@ -23,12 +30,12 @@ static inline void native_queued_spin_unlock(struct qspinlock *lock)
static inline void queued_spin_lock_slowpath(struct qspinlock *lock, u32 val) { - cna_queued_spin_lock_slowpath(lock, val); + pv_queued_spin_lock_slowpath(lock, val); }
static inline void queued_spin_unlock(struct qspinlock *lock) { - native_queued_spin_unlock(lock); + pv_queued_spin_unlock(lock); } #endif
diff --git a/arch/arm64/include/asm/qspinlock_paravirt.h b/arch/arm64/include/asm/qspinlock_paravirt.h new file mode 100644 index 000000000000..eba4be28fbb9 --- /dev/null +++ b/arch/arm64/include/asm/qspinlock_paravirt.h @@ -0,0 +1,12 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* + * Copyright(c) 2019 Huawei Technologies Co., Ltd + * Author: Zengruan Ye yezengruan@huawei.com + */ + +#ifndef __ASM_QSPINLOCK_PARAVIRT_H +#define __ASM_QSPINLOCK_PARAVIRT_H + +extern void __pv_queued_spin_unlock(struct qspinlock *lock); + +#endif diff --git a/arch/arm64/include/asm/spinlock.h b/arch/arm64/include/asm/spinlock.h index 4a668995014c..17778794080a 100644 --- a/arch/arm64/include/asm/spinlock.h +++ b/arch/arm64/include/asm/spinlock.h @@ -20,6 +20,9 @@ #include <asm/qspinlock.h> #include <asm/paravirt.h>
+/* How long a lock should spin before we consider blocking */ +#define SPIN_THRESHOLD (1 << 15) + /* See include/linux/spinlock.h */ #define smp_mb__after_spinlock() smp_mb()
diff --git a/arch/arm64/kernel/Makefile b/arch/arm64/kernel/Makefile index 73bc1db45839..ac110f1b6c27 100644 --- a/arch/arm64/kernel/Makefile +++ b/arch/arm64/kernel/Makefile @@ -49,6 +49,7 @@ arm64-obj-$(CONFIG_ARM64_ERR_RECOV) += ras.o arm64-obj-$(CONFIG_ACPI_NUMA) += acpi_numa.o arm64-obj-$(CONFIG_ARM64_ACPI_PARKING_PROTOCOL) += acpi_parking_protocol.o arm64-obj-$(CONFIG_PARAVIRT) += paravirt.o paravirt-spinlocks.o +arm64-obj-$(CONFIG_PARAVIRT_SPINLOCKS) += paravirt.o paravirt-spinlocks.o arm64-obj-$(CONFIG_RANDOMIZE_BASE) += kaslr.o arm64-obj-$(CONFIG_HIBERNATION) += hibernate.o hibernate-asm.o arm64-obj-$(CONFIG_KEXEC) += machine_kexec.o relocate_kernel.o \ diff --git a/arch/arm64/kernel/alternative.c b/arch/arm64/kernel/alternative.c index 8511fc3b94bb..af5585389550 100644 --- a/arch/arm64/kernel/alternative.c +++ b/arch/arm64/kernel/alternative.c @@ -26,6 +26,7 @@ #include <asm/cpufeature.h> #include <asm/insn.h> #include <asm/sections.h> +#include <asm/paravirt.h> #include <linux/stop_machine.h>
#define __ALT_PTR(a,f) ((void *)&(a)->f + (a)->f) @@ -291,9 +292,9 @@ void __init apply_boot_alternatives(void) */ if ((numa_spinlock_flag == 1) || (numa_spinlock_flag == 0 && nr_node_ids > 1 && - cna_queued_spin_lock_slowpath == + pv_ops.sched.queued_spin_lock_slowpath == native_queued_spin_lock_slowpath)) { - cna_queued_spin_lock_slowpath = + pv_ops.sched.queued_spin_lock_slowpath = __cna_queued_spin_lock_slowpath; } #endif diff --git a/arch/arm64/kernel/paravirt-spinlocks.c b/arch/arm64/kernel/paravirt-spinlocks.c index fd733eb02d42..3cb43f9e6a1c 100644 --- a/arch/arm64/kernel/paravirt-spinlocks.c +++ b/arch/arm64/kernel/paravirt-spinlocks.c @@ -11,3 +11,8 @@ __visible bool __native_vcpu_is_preempted(int cpu) { return false; } + +bool pv_is_native_spin_unlock(void) +{ + return false; +} diff --git a/arch/arm64/kernel/paravirt.c b/arch/arm64/kernel/paravirt.c index 23709083a5c2..3edbcf89009e 100644 --- a/arch/arm64/kernel/paravirt.c +++ b/arch/arm64/kernel/paravirt.c @@ -30,6 +30,10 @@ struct static_key paravirt_steal_enabled; struct static_key paravirt_steal_rq_enabled;
struct paravirt_patch_template pv_ops = { +#ifdef CONFIG_PARAVIRT_SPINLOCKS + .sched.queued_spin_lock_slowpath = native_queued_spin_lock_slowpath, + .sched.queued_spin_unlock = native_queued_spin_unlock, +#endif .sched.vcpu_is_preempted = __native_vcpu_is_preempted, }; EXPORT_SYMBOL_GPL(pv_ops);