hulk inclusion category: feature bugzilla: https://gitee.com/openeuler/kernel/issues/I8T8XV CVE: NA
-------------------------------------------------
The CNA spinlock is enabled base on 'pv_ops' of pvspinlock, and is only supported on x86_64 now, add support for arm64 without pvspinlock.
Signed-off-by: Wei Li liwei391@huawei.com --- arch/arm64/Kconfig | 15 ++++++++++++ arch/arm64/include/asm/Kbuild | 1 - arch/arm64/include/asm/qspinlock.h | 38 ++++++++++++++++++++++++++++++ arch/x86/kernel/alternative.c | 4 ---- init/main.c | 4 +++- kernel/locking/qspinlock.c | 2 +- kernel/locking/qspinlock_cna.h | 12 ++++++++-- 7 files changed, 67 insertions(+), 9 deletions(-) create mode 100644 arch/arm64/include/asm/qspinlock.h
diff --git a/arch/arm64/Kconfig b/arch/arm64/Kconfig index b6088df74edd..efa1ac167033 100644 --- a/arch/arm64/Kconfig +++ b/arch/arm64/Kconfig @@ -1527,6 +1527,21 @@ config NODES_SHIFT Specify the maximum number of NUMA Nodes available on the target system. Increases memory reserved to accommodate various tables.
+config NUMA_AWARE_SPINLOCKS + bool "Numa-aware spinlocks" + depends on NUMA + depends on QUEUED_SPINLOCKS + default n + help + Introduce NUMA (Non Uniform Memory Access) awareness into + the slow path of spinlocks. + + In this variant of qspinlock, the kernel will try to keep the lock + on the same node, thus reducing the number of remote cache misses, + while trading some of the short term fairness for better performance. + + Say N if you want absolute first come first serve fairness. + source "kernel/Kconfig.hz"
config ARCH_SPARSEMEM_ENABLE diff --git a/arch/arm64/include/asm/Kbuild b/arch/arm64/include/asm/Kbuild index 5c8ee5a541d2..d16ee8095366 100644 --- a/arch/arm64/include/asm/Kbuild +++ b/arch/arm64/include/asm/Kbuild @@ -2,7 +2,6 @@ generic-y += early_ioremap.h generic-y += mcs_spinlock.h generic-y += qrwlock.h -generic-y += qspinlock.h generic-y += parport.h generic-y += user.h
diff --git a/arch/arm64/include/asm/qspinlock.h b/arch/arm64/include/asm/qspinlock.h new file mode 100644 index 000000000000..8cc7d00b8c67 --- /dev/null +++ b/arch/arm64/include/asm/qspinlock.h @@ -0,0 +1,38 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +#ifndef _ASM_ARM64_QSPINLOCK_H +#define _ASM_ARM64_QSPINLOCK_H + +#ifdef CONFIG_NUMA_AWARE_SPINLOCKS +#include <asm-generic/qspinlock_types.h> + +extern void cna_configure_spin_lock_slowpath(void); + +extern void (*cna_queued_spin_lock_slowpath)(struct qspinlock *lock, u32 val); +extern void native_queued_spin_lock_slowpath(struct qspinlock *lock, u32 val); + +#define queued_spin_unlock queued_spin_unlock +/** + * queued_spin_unlock - release a queued spinlock + * @lock : Pointer to queued spinlock structure + * + * A smp_store_release() on the least-significant byte. + */ +static inline void native_queued_spin_unlock(struct qspinlock *lock) +{ + smp_store_release(&lock->locked, 0); +} + +static inline void queued_spin_lock_slowpath(struct qspinlock *lock, u32 val) +{ + cna_queued_spin_lock_slowpath(lock, val); +} + +static inline void queued_spin_unlock(struct qspinlock *lock) +{ + native_queued_spin_unlock(lock); +} +#endif + +#include <asm-generic/qspinlock.h> + +#endif /* _ASM_ARM64_QSPINLOCK_H */ diff --git a/arch/x86/kernel/alternative.c b/arch/x86/kernel/alternative.c index 552a07e11b39..73be3931e4f0 100644 --- a/arch/x86/kernel/alternative.c +++ b/arch/x86/kernel/alternative.c @@ -1602,10 +1602,6 @@ void __init alternative_instructions(void) */ paravirt_set_cap();
-#if defined(CONFIG_NUMA_AWARE_SPINLOCKS) - cna_configure_spin_lock_slowpath(); -#endif - /* * First patch paravirt functions, such that we overwrite the indirect * call with the direct call. diff --git a/init/main.c b/init/main.c index ba7da8fe83ea..4a04a1c1d69e 100644 --- a/init/main.c +++ b/init/main.c @@ -1013,7 +1013,9 @@ void start_kernel(void) panic_param);
lockdep_init(); - +#if defined(CONFIG_NUMA_AWARE_SPINLOCKS) + cna_configure_spin_lock_slowpath(); +#endif /* * Need to run this when irqs are enabled, because it wants * to self-test [hard/soft]-irqs on/off lock inversion bugs diff --git a/kernel/locking/qspinlock.c b/kernel/locking/qspinlock.c index d3f99060b60f..c1818fec34f2 100644 --- a/kernel/locking/qspinlock.c +++ b/kernel/locking/qspinlock.c @@ -289,7 +289,7 @@ static __always_inline u32 __pv_wait_head_or_lock(struct qspinlock *lock, #define pv_kick_node __pv_kick_node #define pv_wait_head_or_lock __pv_wait_head_or_lock
-#ifdef CONFIG_PARAVIRT_SPINLOCKS +#if defined(CONFIG_PARAVIRT_SPINLOCKS) || defined(CONFIG_NUMA_AWARE_SPINLOCKS) #define queued_spin_lock_slowpath native_queued_spin_lock_slowpath #endif
diff --git a/kernel/locking/qspinlock_cna.h b/kernel/locking/qspinlock_cna.h index 3983505c1118..b4951b7a5930 100644 --- a/kernel/locking/qspinlock_cna.h +++ b/kernel/locking/qspinlock_cna.h @@ -376,6 +376,14 @@ static inline void cna_lock_handoff(struct mcs_spinlock *node, arch_mcs_lock_handoff(&next->locked, val); }
+#ifdef CONFIG_PARAVIRT_SPINLOCKS +#define cna_queued_spin_lock_slowpath pv_ops.lock.queued_spin_lock_slowpath +#else +void (*cna_queued_spin_lock_slowpath)(struct qspinlock *lock, u32 val) = + native_queued_spin_lock_slowpath; +EXPORT_SYMBOL(cna_queued_spin_lock_slowpath); +#endif + /* * Constant (boot-param configurable) flag selecting the NUMA-aware variant * of spinlock. Possible values: -1 (off, default) / 0 (auto) / 1 (on). @@ -413,13 +421,13 @@ void __init cna_configure_spin_lock_slowpath(void) return;
if (numa_spinlock_flag == 0 && (nr_node_ids < 2 || - pv_ops.lock.queued_spin_lock_slowpath != + cna_queued_spin_lock_slowpath != native_queued_spin_lock_slowpath)) return;
cna_init_nodes();
- pv_ops.lock.queued_spin_lock_slowpath = __cna_queued_spin_lock_slowpath; + cna_queued_spin_lock_slowpath = __cna_queued_spin_lock_slowpath;
pr_info("Enabling CNA spinlock\n"); }