From: Linus Torvalds torvalds@linuxfoundation.org
stable inclusion from stable-v5.10.210 commit db896bbe4a9c67cee377e5f6a743350d3ae4acf6 category: bugfix bugzilla: https://gitee.com/src-openeuler/kernel/issues/I93SNV CVE: CVE-2024-26602
Reference: https://git.kernel.org/pub/scm/linux/kernel/git/stable/linux.git/commit/?id=...
--------------------------------
commit 944d5fe50f3f03daacfea16300e656a1691c4a23 upstream.
On some systems, sys_membarrier can be very expensive, causing overall slowdowns for everything. So put a lock on the path in order to serialize the accesses to prevent the ability for this to be called at too high of a frequency and saturate the machine.
Reviewed-and-tested-by: Mathieu Desnoyers mathieu.desnoyers@efficios.com Acked-by: Borislav Petkov bp@alien8.de Fixes: 22e4ebb97582 ("membarrier: Provide expedited private command") Fixes: c5f58bd58f43 ("membarrier: Provide GLOBAL_EXPEDITED command") Signed-off-by: Linus Torvalds torvalds@linux-foundation.org [ converted to explicit mutex_*() calls - cleanup.h is not in this stable branch - gregkh ] Signed-off-by: Greg Kroah-Hartman gregkh@linuxfoundation.org Signed-off-by: Zhao Wenhui zhaowenhui8@huawei.com --- kernel/sched/membarrier.c | 9 +++++++++ 1 file changed, 9 insertions(+)
diff --git a/kernel/sched/membarrier.c b/kernel/sched/membarrier.c index 1757074be994..3ceb582ef9d7 100644 --- a/kernel/sched/membarrier.c +++ b/kernel/sched/membarrier.c @@ -34,6 +34,8 @@ | MEMBARRIER_CMD_REGISTER_PRIVATE_EXPEDITED \ | MEMBARRIER_PRIVATE_EXPEDITED_SYNC_CORE_BITMASK)
+static DEFINE_MUTEX(membarrier_ipi_mutex); + static void ipi_mb(void *info) { smp_mb(); /* IPIs should be serializing but paranoid. */ @@ -89,6 +91,7 @@ static int membarrier_global_expedited(void) if (!zalloc_cpumask_var(&tmpmask, GFP_KERNEL)) return -ENOMEM;
+ mutex_lock(&membarrier_ipi_mutex); cpus_read_lock(); rcu_read_lock(); for_each_online_cpu(cpu) { @@ -135,6 +138,8 @@ static int membarrier_global_expedited(void) * rq->curr modification in scheduler. */ smp_mb(); /* exit from system call is not a mb */ + mutex_unlock(&membarrier_ipi_mutex); + return 0; }
@@ -168,6 +173,7 @@ static int membarrier_private_expedited(int flags) if (!zalloc_cpumask_var(&tmpmask, GFP_KERNEL)) return -ENOMEM;
+ mutex_lock(&membarrier_ipi_mutex); cpus_read_lock(); rcu_read_lock(); for_each_online_cpu(cpu) { @@ -202,6 +208,7 @@ static int membarrier_private_expedited(int flags) * rq->curr modification in scheduler. */ smp_mb(); /* exit from system call is not a mb */ + mutex_unlock(&membarrier_ipi_mutex);
return 0; } @@ -243,6 +250,7 @@ static int sync_runqueues_membarrier_state(struct mm_struct *mm) * between threads which are users of @mm has its membarrier state * updated. */ + mutex_lock(&membarrier_ipi_mutex); cpus_read_lock(); rcu_read_lock(); for_each_online_cpu(cpu) { @@ -259,6 +267,7 @@ static int sync_runqueues_membarrier_state(struct mm_struct *mm)
free_cpumask_var(tmpmask); cpus_read_unlock(); + mutex_unlock(&membarrier_ipi_mutex);
return 0; }