hulk inclusion category: feature bugzilla: https://gitee.com/openeuler/kernel/issues/I9FACB
--------------------------------
Signed-off-by: Zhang Qiao zhangqiao22@huawei.com --- include/linux/mm.h | 3 +++ include/linux/mm_types.h | 18 ++++++++++++++++++ kernel/sched/core.c | 32 ++++++++++++++++++++++++++++++++ kernel/sched/sched.h | 9 +++++++++ 4 files changed, 62 insertions(+)
diff --git a/include/linux/mm.h b/include/linux/mm.h index 46c7b073824c..a2b8b9da8c64 100644 --- a/include/linux/mm.h +++ b/include/linux/mm.h @@ -2333,6 +2333,9 @@ void sched_mm_cid_fork(struct task_struct *t); void sched_mm_cid_exit_signals(struct task_struct *t); static inline int task_mm_cid(struct task_struct *t) { + if (sched_mm_cid_enabled) + return raw_smp_processor_id(); + return t->mm_cid; } #else diff --git a/include/linux/mm_types.h b/include/linux/mm_types.h index aa17e8c500ce..d77c423ddcd2 100644 --- a/include/linux/mm_types.h +++ b/include/linux/mm_types.h @@ -1091,6 +1091,12 @@ static inline void vma_iter_init(struct vma_iterator *vmi,
#ifdef CONFIG_SCHED_MM_CID
+DECLARE_STATIC_KEY_FALSE(sched_mm_cid_enable); + +#ifndef sched_mm_cid_enabled +#define sched_mm_cid_enabled static_branch_unlikely(&sched_mm_cid_enable) +#endif + enum mm_cid_state { MM_CID_UNSET = -1U, /* Unset state has lazy_put flag set. */ MM_CID_LAZY_PUT = (1U << 31), @@ -1136,6 +1142,9 @@ static inline void mm_init_cid(struct mm_struct *mm) { int i;
+ if (sched_mm_cid_enabled) + return; + for_each_possible_cpu(i) { struct mm_cid *pcpu_cid = per_cpu_ptr(mm->pcpu_cid, i);
@@ -1147,6 +1156,9 @@ static inline void mm_init_cid(struct mm_struct *mm)
static inline int mm_alloc_cid(struct mm_struct *mm) { + if (sched_mm_cid_enabled) + return 0; + mm->pcpu_cid = alloc_percpu(struct mm_cid); if (!mm->pcpu_cid) return -ENOMEM; @@ -1156,12 +1168,18 @@ static inline int mm_alloc_cid(struct mm_struct *mm)
static inline void mm_destroy_cid(struct mm_struct *mm) { + if (sched_mm_cid_enabled) + return; + free_percpu(mm->pcpu_cid); mm->pcpu_cid = NULL; }
static inline unsigned int mm_cid_size(void) { + if (sched_mm_cid_enabled) + return 0; + return cpumask_size(); } #else /* CONFIG_SCHED_MM_CID */ diff --git a/kernel/sched/core.c b/kernel/sched/core.c index eff79d6e1e81..532231d062e8 100644 --- a/kernel/sched/core.c +++ b/kernel/sched/core.c @@ -12188,6 +12188,16 @@ void call_trace_sched_update_nr_running(struct rq *rq, int count)
#ifdef CONFIG_SCHED_MM_CID
+DEFINE_STATIC_KEY_FALSE(sched_mm_cid_enable); + +static int __init sched_mm_cid_setup(char *buf) +{ + static_branch_enable(&sched_mm_cid_enable); + + return 0; +} +early_param("sched_mm_cid", sched_mm_cid_setup); + /* * @cid_lock: Guarantee forward-progress of cid allocation. * @@ -12416,8 +12426,12 @@ void sched_mm_cid_migrate_to(struct rq *dst_rq, struct task_struct *t)
lockdep_assert_rq_held(dst_rq);
+ if (sched_mm_cid_enabled) + return; + if (!mm) return; + src_cpu = t->migrate_from_cpu; if (src_cpu == -1) { t->last_mm_cid = -1; @@ -12614,6 +12628,9 @@ void init_sched_mm_cid(struct task_struct *t) struct mm_struct *mm = t->mm; int mm_users = 0;
+ if (sched_mm_cid_enabled) + return; + if (mm) { mm_users = atomic_read(&mm->mm_users); if (mm_users == 1) @@ -12628,6 +12645,9 @@ void task_tick_mm_cid(struct rq *rq, struct task_struct *curr) struct callback_head *work = &curr->cid_work; unsigned long now = jiffies;
+ if (sched_mm_cid_enabled) + return; + if (!curr->mm || (curr->flags & (PF_EXITING | PF_KTHREAD)) || work->next != work) return; @@ -12642,6 +12662,9 @@ void sched_mm_cid_exit_signals(struct task_struct *t) struct rq_flags rf; struct rq *rq;
+ if (sched_mm_cid_enabled) + return; + if (!mm) return;
@@ -12666,6 +12689,9 @@ void sched_mm_cid_before_execve(struct task_struct *t) struct rq_flags rf; struct rq *rq;
+ if (sched_mm_cid_enabled) + return; + if (!mm) return;
@@ -12690,6 +12716,9 @@ void sched_mm_cid_after_execve(struct task_struct *t) struct rq_flags rf; struct rq *rq;
+ if (sched_mm_cid_enabled) + return; + if (!mm) return;
@@ -12710,6 +12739,9 @@ void sched_mm_cid_after_execve(struct task_struct *t)
void sched_mm_cid_fork(struct task_struct *t) { + if (sched_mm_cid_enabled) + return; + WARN_ON_ONCE(!t->mm || t->mm_cid != -1); t->mm_cid_active = 1; } diff --git a/kernel/sched/sched.h b/kernel/sched/sched.h index f643777adbe4..b39b7cf2583f 100644 --- a/kernel/sched/sched.h +++ b/kernel/sched/sched.h @@ -3479,6 +3479,12 @@ static inline void update_current_exec_runtime(struct task_struct *curr,
#ifdef CONFIG_SCHED_MM_CID
+DECLARE_STATIC_KEY_FALSE(sched_mm_cid_enable); + +#ifndef sched_mm_cid_enabled +#define sched_mm_cid_enabled static_branch_unlikely(&sched_mm_cid_enable) +#endif + #define SCHED_MM_CID_PERIOD_NS (100ULL * 1000000) /* 100ms */ #define MM_CID_SCAN_DELAY 100 /* 100ms */
@@ -3663,6 +3669,9 @@ static inline void switch_mm_cid(struct rq *rq, struct task_struct *prev, struct task_struct *next) { + if (sched_mm_cid_enabled) + return; + /* * Provide a memory barrier between rq->curr store and load of * {prev,next}->mm->pcpu_cid[cpu] on rq->curr->mm transition.