hulk inclusion category: feature bugzilla: https://gitee.com/openeuler/kernel/issues/I9FACB
--------------------------------
Signed-off-by: Zhang Qiao zhangqiao22@huawei.com --- include/linux/mm_types.h | 11 +++++++++++ kernel/sched/core.c | 33 +++++++++++++++++++++++++++++++++ kernel/sched/sched.h | 5 +++++ 3 files changed, 49 insertions(+)
diff --git a/include/linux/mm_types.h b/include/linux/mm_types.h index aa17e8c500ce..c70e0ddb5fd6 100644 --- a/include/linux/mm_types.h +++ b/include/linux/mm_types.h @@ -1091,6 +1091,8 @@ static inline void vma_iter_init(struct vma_iterator *vmi,
#ifdef CONFIG_SCHED_MM_CID
+DECLARE_STATIC_KEY_FALSE(sched_mm_cid_enable); + enum mm_cid_state { MM_CID_UNSET = -1U, /* Unset state has lazy_put flag set. */ MM_CID_LAZY_PUT = (1U << 31), @@ -1136,6 +1138,9 @@ static inline void mm_init_cid(struct mm_struct *mm) { int i;
+ if (static_branch_unlikely(&sched_mm_cid_enable)) + return; + for_each_possible_cpu(i) { struct mm_cid *pcpu_cid = per_cpu_ptr(mm->pcpu_cid, i);
@@ -1147,6 +1152,9 @@ static inline void mm_init_cid(struct mm_struct *mm)
static inline int mm_alloc_cid(struct mm_struct *mm) { + if (static_branch_unlikely(&sched_mm_cid_enable)) + return 0; + mm->pcpu_cid = alloc_percpu(struct mm_cid); if (!mm->pcpu_cid) return -ENOMEM; @@ -1156,6 +1164,9 @@ static inline int mm_alloc_cid(struct mm_struct *mm)
static inline void mm_destroy_cid(struct mm_struct *mm) { + if (static_branch_unlikely(&sched_mm_cid_enable)) + return; + free_percpu(mm->pcpu_cid); mm->pcpu_cid = NULL; } diff --git a/kernel/sched/core.c b/kernel/sched/core.c index eff79d6e1e81..5eaa3c0581e3 100644 --- a/kernel/sched/core.c +++ b/kernel/sched/core.c @@ -12188,6 +12188,17 @@ void call_trace_sched_update_nr_running(struct rq *rq, int count)
#ifdef CONFIG_SCHED_MM_CID
+DEFINE_STATIC_KEY_FALSE(sched_mm_cid_enable); + +static int __init sched_mm_cid_setup(char *buf) +{ + static_branch_enable(&sched_mm_cid_enable); + pr_debug("sched mm cid enabled\n"); + + return 0; +} +early_param("sched_mm_cid", sched_mm_cid_setup); + /* * @cid_lock: Guarantee forward-progress of cid allocation. * @@ -12418,6 +12429,10 @@ void sched_mm_cid_migrate_to(struct rq *dst_rq, struct task_struct *t)
if (!mm) return; + + if (static_branch_unlikely(&sched_mm_cid_enable)) + return; + src_cpu = t->migrate_from_cpu; if (src_cpu == -1) { t->last_mm_cid = -1; @@ -12614,6 +12629,9 @@ void init_sched_mm_cid(struct task_struct *t) struct mm_struct *mm = t->mm; int mm_users = 0;
+ if (static_branch_unlikely(&sched_mm_cid_enable)) + return; + if (mm) { mm_users = atomic_read(&mm->mm_users); if (mm_users == 1) @@ -12628,6 +12646,9 @@ void task_tick_mm_cid(struct rq *rq, struct task_struct *curr) struct callback_head *work = &curr->cid_work; unsigned long now = jiffies;
+ if (static_branch_unlikely(&sched_mm_cid_enable)) + return; + if (!curr->mm || (curr->flags & (PF_EXITING | PF_KTHREAD)) || work->next != work) return; @@ -12645,6 +12666,9 @@ void sched_mm_cid_exit_signals(struct task_struct *t) if (!mm) return;
+ if (static_branch_unlikely(&sched_mm_cid_enable)) + return; + preempt_disable(); rq = this_rq(); rq_lock_irqsave(rq, &rf); @@ -12669,6 +12693,9 @@ void sched_mm_cid_before_execve(struct task_struct *t) if (!mm) return;
+ if (static_branch_unlikely(&sched_mm_cid_enable)) + return; + preempt_disable(); rq = this_rq(); rq_lock_irqsave(rq, &rf); @@ -12693,6 +12720,9 @@ void sched_mm_cid_after_execve(struct task_struct *t) if (!mm) return;
+ if (static_branch_unlikely(&sched_mm_cid_enable)) + return; + preempt_disable(); rq = this_rq(); rq_lock_irqsave(rq, &rf); @@ -12710,6 +12740,9 @@ void sched_mm_cid_after_execve(struct task_struct *t)
void sched_mm_cid_fork(struct task_struct *t) { + if (static_branch_unlikely(&sched_mm_cid_enable)) + return; + WARN_ON_ONCE(!t->mm || t->mm_cid != -1); t->mm_cid_active = 1; } diff --git a/kernel/sched/sched.h b/kernel/sched/sched.h index f643777adbe4..e0813db80092 100644 --- a/kernel/sched/sched.h +++ b/kernel/sched/sched.h @@ -3479,6 +3479,8 @@ static inline void update_current_exec_runtime(struct task_struct *curr,
#ifdef CONFIG_SCHED_MM_CID
+DECLARE_STATIC_KEY_FALSE(sched_mm_cid_enable); + #define SCHED_MM_CID_PERIOD_NS (100ULL * 1000000) /* 100ms */ #define MM_CID_SCAN_DELAY 100 /* 100ms */
@@ -3663,6 +3665,9 @@ static inline void switch_mm_cid(struct rq *rq, struct task_struct *prev, struct task_struct *next) { + if (static_branch_unlikely(&sched_mm_cid_enable)) + return; + /* * Provide a memory barrier between rq->curr store and load of * {prev,next}->mm->pcpu_cid[cpu] on rq->curr->mm transition.
反馈: 您发送到kernel@openeuler.org的补丁/补丁集,已成功转换为PR! PR链接地址: https://gitee.com/openeuler/kernel/pulls/6045 邮件列表地址:https://mailweb.openeuler.org/hyperkitty/list/kernel@openeuler.org/message/3...
FeedBack: The patch(es) which you have sent to kernel@openeuler.org mailing list has been converted to a pull request successfully! Pull request link: https://gitee.com/openeuler/kernel/pulls/6045 Mailing list address: https://mailweb.openeuler.org/hyperkitty/list/kernel@openeuler.org/message/3...