hulk inclusion category: bugfix bugzilla: https://gitee.com/openeuler/kernel/issues/ID8IIO ----------------------------------------- Rename `kicks_pending_ctx_cnt` to `kicks_pending_cnt`. Directly use atomic operations to modify counters during scheduling, submission, and enqueueing. Remove old counter-related functions. Fixes: 76c15076abcb ("xsched: Add basic scheduler core support") Signed-off-by: Zicheng Qu <quzicheng@huawei.com> --- include/linux/xsched.h | 36 +------------------------ kernel/xsched/core.c | 60 ++++++++++++++++++++++------------------- kernel/xsched/vstream.c | 5 ---- 3 files changed, 33 insertions(+), 68 deletions(-) diff --git a/include/linux/xsched.h b/include/linux/xsched.h index 0402dc24cc8b..38db18c0d570 100644 --- a/include/linux/xsched.h +++ b/include/linux/xsched.h @@ -192,7 +192,7 @@ struct xsched_entity { pid_t tgid; /* Amount of pending kicks currently sitting on this context. */ - atomic_t kicks_pending_ctx_cnt; + atomic_t kicks_pending_cnt; /* Amount of submitted kicks context, used for resched decision. */ atomic_t submitted_one_kick; @@ -338,40 +338,6 @@ xse_this_grp(struct xsched_entity_cfs *xse_cfs) } #endif /* CONFIG_CGROUP_XCU */ -/* Increments pending kicks counter for an XCU that the given - * xsched entity is attached to and for xsched entity's xsched - * class. - */ -static inline int xsched_inc_pending_kicks_xse(struct xsched_entity *xse) -{ - atomic_inc(&xse->xcu->pending_kicks); - /* Icrement pending kicks for current XSE. */ - atomic_inc(&xse->kicks_pending_ctx_cnt); - - return 0; -} - -/* Decrements pending kicks counter for an XCU that the given - * xsched entity is attached to and for XSched entity's sched - * class. - */ -static inline int xsched_dec_pending_kicks_xse(struct xsched_entity *xse) -{ - atomic_dec(&xse->xcu->pending_kicks); - /* Decrementing pending kicks for current XSE. */ - atomic_dec(&xse->kicks_pending_ctx_cnt); - - return 0; -} - -/* Checks if there are pending kicks left on a given XCU for all - * xsched classes. - */ -static inline bool xsched_check_pending_kicks_xcu(struct xsched_cu *xcu) -{ - return atomic_read(&xcu->pending_kicks); -} - static inline int xse_integrity_check(const struct xsched_entity *xse) { if (!xse) { diff --git a/kernel/xsched/core.c b/kernel/xsched/core.c index b920a7923999..182743767afa 100644 --- a/kernel/xsched/core.c +++ b/kernel/xsched/core.c @@ -39,17 +39,13 @@ static void put_prev_ctx(struct xsched_entity *xse) static size_t select_work_def(struct xsched_cu *xcu, struct xsched_entity *xse) { - int kick_count, scheduled = 0, not_empty; + int scheduled = 0, not_empty; struct vstream_info *vs; struct xcu_op_handler_params params; struct vstream_metadata *vsm; size_t kick_slice = xse->class->kick_slice; - kick_count = atomic_read(&xse->kicks_pending_ctx_cnt); - XSCHED_DEBUG("Before decrement XSE kick_count=%d @ %s\n", - kick_count, __func__); - - if (kick_count == 0) { + if (atomic_read(&xse->kicks_pending_cnt) == 0) { XSCHED_WARN("Try to select xse that has 0 kicks @ %s\n", __func__); return 0; @@ -58,18 +54,23 @@ static size_t select_work_def(struct xsched_cu *xcu, struct xsched_entity *xse) do { not_empty = 0; for_each_vstream_in_ctx(vs, xse->ctx) { - spin_lock(&vs->stream_lock); vsm = xsched_vsm_fetch_first(vs); - spin_unlock(&vs->stream_lock); + if (!vsm) continue; + list_add_tail(&vsm->node, &xcu->vsm_list); scheduled++; - xsched_dec_pending_kicks_xse(xse); not_empty++; } } while ((scheduled < kick_slice) && (not_empty)); + if (scheduled == 0) + return 0; + + atomic_sub(scheduled, &xse->kicks_pending_cnt); + atomic_add(scheduled, &xcu->pending_kicks); + /* * Iterate over all vstreams in context: * Set wr_cqe bit in last computing task in vsm_list @@ -86,10 +87,6 @@ static size_t select_work_def(struct xsched_cu *xcu, struct xsched_entity *xse) } } - kick_count = atomic_read(&xse->kicks_pending_ctx_cnt); - XSCHED_DEBUG("After decrement XSE kick_count=%d @ %s\n", - kick_count, __func__); - xse->total_scheduled += scheduled; return scheduled; } @@ -107,6 +104,11 @@ static struct xsched_entity *__raw_pick_next_ctx(struct xsched_cu *xcu) scheduled = class->select_work ? class->select_work(xcu, next) : select_work_def(xcu, next); + if (scheduled == 0) { + dequeue_ctx(next, xcu); + return NULL; + } + XSCHED_DEBUG("xse %d scheduled=%zu total=%zu @ %s\n", next->tgid, scheduled, next->total_scheduled, __func__); break; @@ -166,12 +168,8 @@ int delete_ctx(struct xsched_context *ctx) } /* Wait till context has been submitted. */ - while (atomic_read(&xse->kicks_pending_ctx_cnt)) { - XSCHED_DEBUG("Deleting ctx %d, xse->kicks_pending_ctx_cnt=%d @ %s\n", - xse->tgid, atomic_read(&xse->kicks_pending_ctx_cnt), - __func__); + while (atomic_read(&xse->kicks_pending_cnt)) usleep_range(100, 200); - } mutex_lock(&xcu->xcu_lock); if (curr_xse == xse) @@ -179,8 +177,6 @@ int delete_ctx(struct xsched_context *ctx) dequeue_ctx(xse, xcu); --xcu->nr_ctx; mutex_unlock(&xcu->xcu_lock); - XSCHED_DEBUG("Deleting ctx %d, pending kicks left=%d @ %s\n", xse->tgid, - atomic_read(&xse->kicks_pending_ctx_cnt), __func__); xse->class->xse_deinit(xse); @@ -288,8 +284,12 @@ static int __xsched_submit(struct xsched_cu *xcu, struct xsched_entity *xse) kfree(vsm); } + if (submitted == 0) + return 0; + xse->last_exec_runtime += submit_exec_time; xse->total_submitted += submitted; + atomic_sub(submitted, &xcu->pending_kicks); atomic_add(submitted, &xse->submitted_one_kick); INIT_LIST_HEAD(&xcu->vsm_list); XSCHED_DEBUG("Xse %d submitted=%d total=%zu, exec_time=%ld @ %s\n", @@ -325,6 +325,11 @@ int xsched_vsm_add_tail(struct vstream_info *vs, vstream_args_t *arg) new_vsm->add_time = ktime_get(); vs->kicks_count += 1; + /* Increasing a total amount of kicks on an CU to which this + * context is attached to based on sched_class. + */ + atomic_inc(&vs->ctx->xse.kicks_pending_cnt); + return 0; } @@ -342,11 +347,12 @@ struct vstream_metadata *xsched_vsm_fetch_first(struct vstream_info *vs) return NULL; } + spin_lock(&vs->stream_lock); vsm = list_first_entry(&vs->metadata_list, struct vstream_metadata, node); if (!vsm) { XSCHED_ERR("Corrupted metadata list in vs %u @ %s\n", vs->id, __func__); - return NULL; + goto out_unlock; } list_del(&vsm->node); @@ -356,6 +362,9 @@ struct vstream_metadata *xsched_vsm_fetch_first(struct vstream_info *vs) else vs->kicks_count -= 1; +out_unlock: + spin_unlock(&vs->stream_lock); + return vsm; } @@ -376,11 +385,6 @@ int xsched_schedule(void *input_xcu) break; } - if (!xsched_check_pending_kicks_xcu(xcu)) { - XSCHED_WARN("%s: No pending kicks on xcu %u\n", __func__, xcu->id); - continue; - } - next_xse = __raw_pick_next_ctx(xcu); if (!next_xse) { XSCHED_WARN("%s: Couldn't find next xse on xcu %u\n", __func__, xcu->id); @@ -397,7 +401,7 @@ int xsched_schedule(void *input_xcu) /* if not deleted yet */ put_prev_ctx(curr_xse); - if (!atomic_read(&curr_xse->kicks_pending_ctx_cnt)) + if (!atomic_read(&curr_xse->kicks_pending_cnt)) dequeue_ctx(curr_xse, xcu); #ifdef CONFIG_CGROUP_XCU @@ -460,7 +464,7 @@ int xsched_init_entity(struct xsched_context *ctx, struct vstream_info *vs) int err = 0; struct xsched_entity *xse = &ctx->xse; - atomic_set(&xse->kicks_pending_ctx_cnt, 0); + atomic_set(&xse->kicks_pending_cnt, 0); atomic_set(&xse->submitted_one_kick, 0); xse->total_scheduled = 0; diff --git a/kernel/xsched/vstream.c b/kernel/xsched/vstream.c index 5073c9bf6af5..d01151c65dcf 100644 --- a/kernel/xsched/vstream.c +++ b/kernel/xsched/vstream.c @@ -617,11 +617,6 @@ int vstream_kick(struct vstream_args *arg) } enqueue_ctx(xse, xcu); - - /* Increasing a total amount of kicks on an CU to which this - * context is attached to based on sched_class. - */ - xsched_inc_pending_kicks_xse(&vstream->ctx->xse); } while (err == -EBUSY); spin_unlock(&vstream->stream_lock); -- 2.34.1