hulk inclusion category: bugfix bugzilla: https://gitee.com/openeuler/kernel/issues/ICDF44?from=project-issue ---------------------------------------- zero vruntime code Signed-off-by: Zicheng Qu <quzicheng@huawei.com> Signed-off-by: wulibin163 <wulibin163@126.com> --- kernel/sched/debug.c | 8 ++-- kernel/sched/fair.c | 93 +++++++++----------------------------------- kernel/sched/sched.h | 2 +- 3 files changed, 24 insertions(+), 79 deletions(-) diff --git a/kernel/sched/debug.c b/kernel/sched/debug.c index 7a9e6896c699..af3cb2bb6a9a 100644 --- a/kernel/sched/debug.c +++ b/kernel/sched/debug.c @@ -628,7 +628,7 @@ static void print_rq(struct seq_file *m, struct rq *rq, int rq_cpu) void print_cfs_rq(struct seq_file *m, int cpu, struct cfs_rq *cfs_rq) { - s64 left_vruntime = -1, min_vruntime, right_vruntime = -1, left_deadline = -1, spread; + s64 left_vruntime = -1, zero_vruntime, right_vruntime = -1, left_deadline = -1, spread; struct sched_entity *last, *first, *root; struct rq *rq = cpu_rq(cpu); unsigned long flags; @@ -653,15 +653,15 @@ void print_cfs_rq(struct seq_file *m, int cpu, struct cfs_rq *cfs_rq) last = __pick_last_entity(cfs_rq); if (last) right_vruntime = last->vruntime; - min_vruntime = cfs_rq->min_vruntime; + zero_vruntime = cfs_rq->zero_vruntime; raw_spin_rq_unlock_irqrestore(rq, flags); SEQ_printf(m, " .%-30s: %Ld.%06ld\n", "left_deadline", SPLIT_NS(left_deadline)); SEQ_printf(m, " .%-30s: %Ld.%06ld\n", "left_vruntime", SPLIT_NS(left_vruntime)); - SEQ_printf(m, " .%-30s: %Ld.%06ld\n", "min_vruntime", - SPLIT_NS(min_vruntime)); + SEQ_printf(m, " .%-30s: %Ld.%06ld\n", "zero_vruntime", + SPLIT_NS(zero_vruntime)); SEQ_printf(m, " .%-30s: %Ld.%06ld\n", "avg_vruntime", SPLIT_NS(avg_vruntime(cfs_rq))); SEQ_printf(m, " .%-30s: %Ld.%06ld\n", "right_vruntime", diff --git a/kernel/sched/fair.c b/kernel/sched/fair.c index ebfbdc1a4ce4..533ee220b4b2 100644 --- a/kernel/sched/fair.c +++ b/kernel/sched/fair.c @@ -740,7 +740,7 @@ static inline bool entity_before(const struct sched_entity *a, static inline s64 entity_key(struct cfs_rq *cfs_rq, struct sched_entity *se) { - return (s64)(se->vruntime - cfs_rq->min_vruntime); + return (s64)(se->vruntime - cfs_rq->zero_vruntime); } #define __node_2_se(node) \ @@ -792,13 +792,13 @@ static inline s64 entity_key(struct cfs_rq *cfs_rq, struct sched_entity *se) * * Which we track using: * - * v0 := cfs_rq->min_vruntime + * v0 := cfs_rq->zero_vruntime * \Sum (v_i - v0) * w_i := cfs_rq->avg_vruntime * \Sum w_i := cfs_rq->avg_load * - * Since min_vruntime is a monotonic increasing variable that closely tracks - * the per-task service, these deltas: (v_i - v), will be in the order of the - * maximal (virtual) lag induced in the system due to quantisation. + * Since zero_vruntime closely tracks the per-task service, these + * deltas: (v_i - v), will be in the order of the maximal (virtual) lag + * induced in the system due to quantisation. * * Also, we use scale_load_down() to reduce the size. * @@ -857,7 +857,7 @@ u64 avg_vruntime(struct cfs_rq *cfs_rq) avg = div_s64(avg, load); } - return cfs_rq->min_vruntime + avg; + return cfs_rq->zero_vruntime + avg; } /* @@ -923,7 +923,7 @@ static int vruntime_eligible(struct cfs_rq *cfs_rq, u64 vruntime) load += weight; } - return avg >= (s64)(vruntime - cfs_rq->min_vruntime) * load; + return avg >= (s64)(vruntime - cfs_rq->zero_vruntime) * load; } int entity_eligible(struct cfs_rq *cfs_rq, struct sched_entity *se) @@ -938,43 +938,13 @@ int entity_eligible(struct cfs_rq *cfs_rq, struct sched_entity *se) return vruntime_eligible(cfs_rq, se->vruntime); } -static u64 __update_min_vruntime(struct cfs_rq *cfs_rq, u64 vruntime) +static void update_zero_vruntime(struct cfs_rq *cfs_rq) { - u64 min_vruntime = cfs_rq->min_vruntime; - /* - * open coded max_vruntime() to allow updating avg_vruntime - */ - s64 delta = (s64)(vruntime - min_vruntime); - if (delta > 0) { - avg_vruntime_update(cfs_rq, delta); - min_vruntime = vruntime; - } - return min_vruntime; -} - -static void update_min_vruntime(struct cfs_rq *cfs_rq) -{ - struct sched_entity *se = __pick_root_entity(cfs_rq); - struct sched_entity *curr = cfs_rq->curr; - u64 vruntime = cfs_rq->min_vruntime; - - if (curr) { - if (curr->on_rq) - vruntime = curr->vruntime; - else - curr = NULL; - } + u64 vruntime = avg_vruntime(cfs_rq); + s64 delta = (s64)(vruntime - cfs_rq->zero_vruntime); - if (se) { - if (!curr) - vruntime = se->min_vruntime; - else - vruntime = min_vruntime(vruntime, se->min_vruntime); - } - - /* ensure we never gain time by being placed backwards. */ - u64_u32_store(cfs_rq->min_vruntime, - __update_min_vruntime(cfs_rq, vruntime)); + avg_vruntime_update(cfs_rq, delta); + cfs_rq->zero_vruntime = vruntime; } static inline bool __entity_less(struct rb_node *a, const struct rb_node *b) @@ -1017,6 +987,7 @@ RB_DECLARE_CALLBACKS(static, min_vruntime_cb, struct sched_entity, static void __enqueue_entity(struct cfs_rq *cfs_rq, struct sched_entity *se) { avg_vruntime_add(cfs_rq, se); + update_zero_vruntime(cfs_rq); se->min_vruntime = se->vruntime; rb_add_augmented_cached(&se->run_node, &cfs_rq->tasks_timeline, __entity_less, &min_vruntime_cb); @@ -1027,6 +998,7 @@ static void __dequeue_entity(struct cfs_rq *cfs_rq, struct sched_entity *se) rb_erase_augmented_cached(&se->run_node, &cfs_rq->tasks_timeline, &min_vruntime_cb); avg_vruntime_sub(cfs_rq, se); + update_zero_vruntime(cfs_rq); } struct sched_entity *__pick_root_entity(struct cfs_rq *cfs_rq) @@ -1343,7 +1315,6 @@ static void update_curr(struct cfs_rq *cfs_rq) curr->vruntime += calc_delta_fair(delta_exec, curr); update_deadline(cfs_rq, curr); - update_min_vruntime(cfs_rq); if (entity_is_task(curr)) { struct task_struct *curtask = task_of(curr); @@ -3941,14 +3912,6 @@ static void reweight_entity(struct cfs_rq *cfs_rq, struct sched_entity *se, bool curr = cfs_rq->curr == se; u64 avruntime; - if (qzc_fixed_switch && curr && se->on_rq && cfs_rq->nr_running == 1 && - se->vruntime < cfs_rq->min_vruntime) { - s64 rel_deadline = se->deadline - se->vruntime; - - se->vruntime = cfs_rq->min_vruntime; - se->deadline = se->vruntime + rel_deadline; - } - if (se->on_rq) { /* commit outstanding execution time */ update_curr(cfs_rq); @@ -3984,15 +3947,6 @@ static void reweight_entity(struct cfs_rq *cfs_rq, struct sched_entity *se, update_load_add(&cfs_rq->load, se->load.weight); if (!curr) __enqueue_entity(cfs_rq, se); - - /* - * The entity's vruntime has been adjusted, so let's check - * whether the rq-wide min_vruntime needs updated too. Since - * the calculations above require stable min_vruntime rather - * than up-to-date one, we do the update at the end of the - * reweight process. - */ - update_min_vruntime(cfs_rq); } } @@ -5628,15 +5582,6 @@ dequeue_entity(struct cfs_rq *cfs_rq, struct sched_entity *se, int flags) update_cfs_group(se); - /* - * Now advance min_vruntime if @se was the entity holding it back, - * except when: DEQUEUE_SAVE && !DEQUEUE_MOVE, in this case we'll be - * put back on, and if we advance min_vruntime, we'll be placed back - * further than we started -- ie. we'll be penalized. - */ - if ((flags & (DEQUEUE_SAVE | DEQUEUE_MOVE)) != DEQUEUE_SAVE) - update_min_vruntime(cfs_rq); - if (cfs_rq->nr_running == 0) update_idle_cfs_rq_clock_pelt(cfs_rq); } @@ -14876,7 +14821,7 @@ static void set_next_task_fair(struct rq *rq, struct task_struct *p, bool first) void init_cfs_rq(struct cfs_rq *cfs_rq) { cfs_rq->tasks_timeline = RB_ROOT_CACHED; - u64_u32_store(cfs_rq->min_vruntime, (u64)(-(1LL << 20))); + u64_u32_store(cfs_rq->zero_vruntime, (u64)(-(1LL << 20))); /* * We suppose the original intention of (u64)(-(1LL << 20)) was likely to * force cfs_rq->min_vruntime to overflow as quickly as possible, @@ -14885,7 +14830,7 @@ void init_cfs_rq(struct cfs_rq *cfs_rq) * To accelerate the reproduction of these issues, * we have temporarily modified the initial value of cfs_rq->min_vruntime. */ - cfs_rq->min_vruntime = (u64)(4596393947272479); + cfs_rq->zero_vruntime = (u64)(4596393947272479); #ifdef CONFIG_SMP raw_spin_lock_init(&cfs_rq->removed.lock); #endif @@ -15338,7 +15283,7 @@ __init void init_sched_fair_class(void) } u64 sched_debug_min_vruntime(struct cfs_rq *cfs_rq) { - return cfs_rq->min_vruntime; + return cfs_rq->zero_vruntime; } EXPORT_SYMBOL(sched_debug_min_vruntime); @@ -15346,9 +15291,9 @@ void sched_debug_cfs_rq_info(struct cfs_rq *cfs_rq) { u64 qzc_avruntime = avg_vruntime(cfs_rq); - printk("%s:%s:%d, cfs_rq=[%p]\tcfs_rq->nr_running=[%d]\tcfs_rq->avg_vruntime=[%lld]\tcfs_rq->min_vruntime=[%llu]\tcfs_rq->avg_load=[%llu]\tavg_vruntime(cfs_rq)=[%llu]\n", + printk("%s:%s:%d, cfs_rq=[%p]\tcfs_rq->nr_running=[%d]\tcfs_rq->avg_vruntime=[%lld]\tcfs_rq->zero_vruntime=[%llu]\tcfs_rq->avg_load=[%llu]\tavg_vruntime(cfs_rq)=[%llu]\n", __FILENAME__,__FUNCTION__, __LINE__, - cfs_rq, cfs_rq->nr_running, cfs_rq->avg_vruntime, cfs_rq->min_vruntime, cfs_rq->avg_load, qzc_avruntime); + cfs_rq, cfs_rq->nr_running, cfs_rq->avg_vruntime, cfs_rq->zero_vruntime, cfs_rq->avg_load, qzc_avruntime); if (cfs_rq->curr) { printk("%s:%s:%d, curr=[%p]\tpid=[%d]\ttgid=[%d]\tcurr->vruntime=[%llu]\tcurr->load.weight=[%lu]\tcurr->vlag=[%lld]\tcurr->slice=[%llu]\tcurr->deadline=[%llu]\tcurr->my_q=[%p]\treal_vlag=[%lld]\tvruntime_eligible=[%d]\n", diff --git a/kernel/sched/sched.h b/kernel/sched/sched.h index f6a3f93d1f75..0f4d4445657e 100644 --- a/kernel/sched/sched.h +++ b/kernel/sched/sched.h @@ -663,7 +663,7 @@ struct cfs_rq { u64 avg_load; u64 exec_clock; - u64 min_vruntime; + u64 zero_vruntime; #ifdef CONFIG_SCHED_CORE unsigned int forceidle_seq; u64 min_vruntime_fi; -- 2.34.1