From: Steve Sistare steven.sistare@oracle.com
hulk inclusion category: feature bugzilla: 38261 CVE: NA
---------------------------
Move the update of idle_stamp from idle_balance to the call site in pick_next_task_fair, to prepare for a future patch that adds work to pick_next_task_fair which must be included in the idle_stamp interval. No functional change.
Signed-off-by: Steve Sistare steven.sistare@oracle.com Signed-off-by: Cheng Jian cj.chengjian@huawei.com Reviewed-by: Hanjun Guo guohanjun@huawei.com Signed-off-by: Yang Yingliang yangyingliang@huawei.com --- kernel/sched/fair.c | 31 ++++++++++++++++++++++--------- 1 file changed, 22 insertions(+), 9 deletions(-)
diff --git a/kernel/sched/fair.c b/kernel/sched/fair.c index 8d88f8ee5625..2119467a60c5 100644 --- a/kernel/sched/fair.c +++ b/kernel/sched/fair.c @@ -3811,6 +3811,16 @@ util_est_dequeue(struct cfs_rq *cfs_rq, struct task_struct *p, bool task_sleep) WRITE_ONCE(p->se.avg.util_est, ue); }
+static inline void rq_idle_stamp_update(struct rq *rq) +{ + rq->idle_stamp = rq_clock(rq); +} + +static inline void rq_idle_stamp_clear(struct rq *rq) +{ + rq->idle_stamp = 0; +} + static void overload_clear(struct rq *rq) { struct sparsemask *overload_cpus; @@ -3856,6 +3866,8 @@ static inline int idle_balance(struct rq *rq, struct rq_flags *rf) return 0; }
+static inline void rq_idle_stamp_update(struct rq *rq) {} +static inline void rq_idle_stamp_clear(struct rq *rq) {} static inline void overload_clear(struct rq *rq) {} static inline void overload_set(struct rq *rq) {}
@@ -6907,8 +6919,18 @@ done: __maybe_unused; return p;
idle: + + /* + * We must set idle_stamp _before_ calling idle_balance(), such that we + * measure the duration of idle_balance() as idle time. + */ + rq_idle_stamp_update(rq); + new_tasks = idle_balance(rq, rf);
+ if (new_tasks) + rq_idle_stamp_clear(rq); + /* * Because idle_balance() releases (and re-acquires) rq->lock, it is * possible for any higher priority task to appear. In that case we @@ -9680,12 +9702,6 @@ static int idle_balance(struct rq *this_rq, struct rq_flags *rf) int pulled_task = 0; u64 curr_cost = 0;
- /* - * We must set idle_stamp _before_ calling idle_balance(), such that we - * measure the duration of idle_balance() as idle time. - */ - this_rq->idle_stamp = rq_clock(this_rq); - /* * Do not pull tasks towards !active CPUs... */ @@ -9777,9 +9793,6 @@ static int idle_balance(struct rq *this_rq, struct rq_flags *rf) if (this_rq->nr_running != this_rq->cfs.h_nr_running) pulled_task = -1;
- if (pulled_task) - this_rq->idle_stamp = 0; - rq_repin_lock(this_rq, rf);
return pulled_task;