
From: Dmitriy Alekseev <alekseev.dmitry@huawei.com> hulk inclusion category: feature bugzilla: https://gitee.com/openeuler/kernel/issues/IC5EHB ----------------------------------------- Use more universal cfs.nr_running counter instead of has_active flag to select active runqueue. Signed-off-by: Dmitriy Alekseev <alekseev.dmitry@huawei.com> Signed-off-by: Liu Kai <liukai284@huawei.com> --- include/linux/xsched.h | 6 ++---- kernel/xsched/cfs.c | 35 ++++++++++++++++++++--------------- kernel/xsched/core.c | 9 ++++----- kernel/xsched/rt.c | 4 ++-- 4 files changed, 28 insertions(+), 26 deletions(-) diff --git a/include/linux/xsched.h b/include/linux/xsched.h index 7c8a02c862d1..b862beb6c582 100644 --- a/include/linux/xsched.h +++ b/include/linux/xsched.h @@ -132,6 +132,7 @@ extern struct xsched_group *root_xcg; /* Manages xsched CFS-like class rbtree based runqueue. */ struct xsched_rq_cfs { + unsigned int nr_running; unsigned int load; u64 min_xruntime; struct rb_root_cached ctx_timeline; @@ -145,7 +146,7 @@ struct xsched_rq_cfs { */ struct xsched_rq_rt { struct list_head rq[NR_XSE_PRIO]; - + unsigned int nr_running; int prio_nr_running[NR_XSE_PRIO]; atomic_t prio_nr_kicks[NR_XSE_PRIO]; DECLARE_BITMAP(curr_prios, NR_XSE_PRIO); @@ -159,7 +160,6 @@ struct xsched_rq { const struct xsched_class *class; int state; - int nr_running; /* RT class run queue.*/ struct xsched_rq_rt rt; @@ -209,8 +209,6 @@ struct xsched_cu { struct mutex xcu_lock; - atomic_t has_active; - wait_queue_head_t wq_xcu_idle; wait_queue_head_t wq_xcu_running; wait_queue_head_t wq_xcore_running; diff --git a/kernel/xsched/cfs.c b/kernel/xsched/cfs.c index ea0f49488fb7..867d53f8511b 100644 --- a/kernel/xsched/cfs.c +++ b/kernel/xsched/cfs.c @@ -104,12 +104,13 @@ static void xs_update(struct xsched_entity_cfs *xse_cfs, u64 delta) * No locks required to access xsched_group_xcu_priv members, * because only one worker thread works for one XCU. */ -static void xg_update(struct xsched_group_xcu_priv *xg) +static void xg_update(struct xsched_group_xcu_priv *xg, int task_delta) { u64 new_xrt; struct xsched_entity_cfs *entry; for (; xg; xg = &xcg_parent_grp_xcu(xg)) { + xg->rq->nr_running += task_delta; entry = xs_pick_first(xg->rq); if (entry) new_xrt = entry->xruntime * xg->xse.cfs.weight; @@ -117,6 +118,10 @@ static void xg_update(struct xsched_group_xcu_priv *xg) new_xrt = XSCHED_TIME_INF; xg->rq->min_xruntime = new_xrt; + xg->xse.cfs.xruntime = new_xrt; + + if (!xg->xse.on_rq) + break; if (xg->self->parent) xs_cfs_rq_update(&xg->xse.cfs, new_xrt); @@ -132,20 +137,19 @@ static void xg_update(struct xsched_group_xcu_priv *xg) */ static void dequeue_ctx_fair(struct xsched_entity *xse) { + int task_delta; struct xsched_cu *xcu = xse->xcu; struct xsched_entity_cfs *first; struct xsched_entity_cfs *xse_cfs = &xse->cfs; + task_delta = + (xse->is_group) ? -(xse_this_grp_xcu(xse_cfs)->rq->nr_running) : -1; + xs_rq_remove(xse_cfs); - xg_update(xse_parent_grp_xcu(xse_cfs)); + xg_update(xse_parent_grp_xcu(xse_cfs), task_delta); first = xs_pick_first(&xcu->xrq.cfs); xcu->xrq.cfs.min_xruntime = (first) ? first->xruntime : XSCHED_TIME_INF; - - if (xcu->xrq.cfs.min_xruntime == XSCHED_TIME_INF) { - atomic_set(&xcu->has_active, 0); - XSCHED_INFO("%s: set has_active to 0\n", __func__); - } } /** @@ -159,6 +163,7 @@ static void dequeue_ctx_fair(struct xsched_entity *xse) */ static void enqueue_ctx_fair(struct xsched_entity *xse, struct xsched_cu *xcu) { + int task_delta; struct xsched_entity_cfs *first; struct xsched_rq_cfs *rq; struct xsched_entity_cfs *xse_cfs = &xse->cfs; @@ -166,22 +171,21 @@ static void enqueue_ctx_fair(struct xsched_entity *xse, struct xsched_cu *xcu) xse_cfs->weight = XSCHED_CFS_ENTITY_WEIGHT_DFLT; rq = xse_cfs->cfs_rq = xse_parent_grp_xcu(xse_cfs)->rq; - /* If no XSE of only empty groups */ + task_delta = + (xse->is_group) ? xse_this_grp_xcu(xse_cfs)->rq->nr_running : 1; + + /* If no XSE or only empty groups */ if (xs_pick_first(rq) == NULL || rq->min_xruntime == XSCHED_TIME_INF) rq->min_xruntime = xse_cfs->xruntime; else xse_cfs->xruntime = max(xse_cfs->xruntime, rq->min_xruntime); xs_rq_add(xse_cfs); - xg_update(xse_parent_grp_xcu(xse_cfs)); + + xg_update(xse_parent_grp_xcu(xse_cfs), task_delta); first = xs_pick_first(&xcu->xrq.cfs); xcu->xrq.cfs.min_xruntime = (first) ? first->xruntime : XSCHED_TIME_INF; - - if (xcu->xrq.cfs.min_xruntime != XSCHED_TIME_INF) { - atomic_set(&xcu->has_active, 1); - XSCHED_INFO("%s: set has_active to 1\n", __func__); - } } static struct xsched_entity *pick_next_ctx_fair(struct xsched_cu *xcu) @@ -201,7 +205,8 @@ static struct xsched_entity *pick_next_ctx_fair(struct xsched_cu *xcu) return container_of(xse, struct xsched_entity, cfs); } -static inline bool xs_should_preempt_fair(struct xsched_entity *xse) +static inline bool +xs_should_preempt_fair(struct xsched_entity *xse) { bool ret = (xse->last_process_time >= XSCHED_CFS_MIN_TIMESLICE); return ret; diff --git a/kernel/xsched/core.c b/kernel/xsched/core.c index 288cad6a5652..a2992cf1cf17 100644 --- a/kernel/xsched/core.c +++ b/kernel/xsched/core.c @@ -627,10 +627,10 @@ static int xsched_schedule(void *input_xcu) mutex_unlock(&xcu->xcu_lock); wait_event_interruptible(xcu->wq_xcu_idle, - atomic_read(&xcu->has_active) || xcu->xrq.nr_running); + xcu->xrq.cfs.nr_running || xcu->xrq.rt.nr_running); - XSCHED_INFO("%s: rt_nr_running = %d, has_active = %d\n", - __func__, xcu->xrq.nr_running, atomic_read(&xcu->has_active)); + XSCHED_INFO("%s: rt nr_running = %u, cfs nr_running = %u\n", + __func__, xcu->xrq.rt.nr_running, xcu->xrq.cfs.nr_running); mutex_lock(&xcu->xcu_lock); XSCHED_INFO("%s: Xcu lock taken\n", __func__); @@ -692,6 +692,7 @@ static int xsched_schedule(void *input_xcu) static inline void xsched_rt_rq_init(struct xsched_cu *xcu) { int prio = 0; + xcu->xrq.rt.nr_running = 0; for_each_xse_prio(prio) { INIT_LIST_HEAD(&xcu->xrq.rt.rq[prio]); @@ -711,7 +712,6 @@ static inline void xsched_cfs_rq_init(struct xsched_cu *xcu) /* Initialize xsched classes' runqueues. */ static inline void xsched_rq_init(struct xsched_cu *xcu) { - xcu->xrq.nr_running = 0; xcu->xrq.curr_xse = NULL; xcu->xrq.class = &rt_xsched_class; xcu->xrq.state = XRQ_STATE_IDLE; @@ -734,7 +734,6 @@ static void xsched_xcu_init(struct xsched_cu *xcu, struct xcu_group *group, atomic_set(&xcu->pending_kicks_rt, 0); atomic_set(&xcu->pending_kicks_cfs, 0); - atomic_set(&xcu->has_active, 0); INIT_LIST_HEAD(&xcu->vsm_list); diff --git a/kernel/xsched/rt.c b/kernel/xsched/rt.c index 60845981114c..74232c35f2b5 100644 --- a/kernel/xsched/rt.c +++ b/kernel/xsched/rt.c @@ -57,7 +57,7 @@ static inline void xse_rt_move_tail(struct xsched_entity *xse) static inline void xrq_inc_nr_running(struct xsched_entity *xse, struct xsched_cu *xcu) { - xcu->xrq.nr_running++; + xcu->xrq.rt.nr_running++; xcu->xrq.rt.prio_nr_running[xse->rt.prio]++; set_bit(xse->rt.prio, xcu->xrq.rt.curr_prios); } @@ -69,7 +69,7 @@ static inline void xrq_dec_nr_running(struct xsched_entity *xse) { struct xsched_cu *xcu = xse->xcu; - xcu->xrq.nr_running--; + xcu->xrq.rt.nr_running--; xcu->xrq.rt.prio_nr_running[xse->rt.prio]--; if (!xcu->xrq.rt.prio_nr_running[xse->rt.prio]) -- 2.34.1