From: Ren Zhijie renzhijie2@huawei.com
hulk inclusion category: feature bugzilla: https://gitee.com/openeuler/kernel/issues/I7X7WW
--------------------------------
Add helper function bpf_sched_set_tg_tag() and bpf_sched_set_task_tag() to set tag for task group or task.
They can not be call when rq->lock has been held.
The use case is that the other kernel subsystems, such as the network, can use it to mark key tasks.
Signed-off-by: Ren Zhijie renzhijie2@huawei.com Signed-off-by: Hui Tang tanghui20@huawei.com Signed-off-by: Guan Jing guanjing6@huawei.com --- include/uapi/linux/bpf.h | 14 +++++++++++ kernel/bpf/helpers.c | 6 +++++ kernel/sched/bpf_sched.c | 45 ++++++++++++++++++++++++++++++++++ tools/include/uapi/linux/bpf.h | 14 +++++++++++ 4 files changed, 79 insertions(+)
diff --git a/include/uapi/linux/bpf.h b/include/uapi/linux/bpf.h index aa071e9bde72..8ca15a8d2ff8 100644 --- a/include/uapi/linux/bpf.h +++ b/include/uapi/linux/bpf.h @@ -5589,6 +5589,18 @@ union bpf_attr { * different workloads. * Return * Task tag, if used, 0 as default tag, or a negative error in case of failure. + * + * int bpf_sched_set_tg_tag(struct task_group *tg, s64 tag) + * Description + * Set tag to *tg* and its descendants. + * Return + * 0 on success, or a negative error in case of failure. + * + * int bpf_sched_set_task_tag(struct task_struct *tsk, s64 tag) + * Description + * Set tag to *tsk*. + * Return + * 0 on success, or a negative error in case of failure. */ #define ___BPF_FUNC_MAPPER(FN, ctx...) \ FN(unspec, 0, ##ctx) \ @@ -5807,6 +5819,8 @@ union bpf_attr { FN(sk_original_addr, 213, ##ctx) \ FN(sched_tg_tag_of, 214, ##ctx) \ FN(sched_task_tag_of, 215, ##ctx) \ + FN(sched_set_tg_tag, 216, ##ctx) \ + FN(sched_set_task_tag, 217, ##ctx) \ /* */
/* backwards-compatibility macros for users of __BPF_FUNC_MAPPER that don't diff --git a/kernel/bpf/helpers.c b/kernel/bpf/helpers.c index 3424eba97132..0a00d21a6f47 100644 --- a/kernel/bpf/helpers.c +++ b/kernel/bpf/helpers.c @@ -1656,6 +1656,8 @@ const struct bpf_func_proto bpf_probe_read_kernel_str_proto __weak; const struct bpf_func_proto bpf_task_pt_regs_proto __weak; const struct bpf_func_proto bpf_sched_tg_tag_of_proto __weak; const struct bpf_func_proto bpf_sched_task_tag_of_proto __weak; +const struct bpf_func_proto bpf_sched_set_tg_tag_proto __weak; +const struct bpf_func_proto bpf_sched_set_task_tag_proto __weak;
const struct bpf_func_proto * bpf_base_func_proto(enum bpf_func_id func_id) @@ -1709,6 +1711,10 @@ bpf_base_func_proto(enum bpf_func_id func_id) return &bpf_sched_tg_tag_of_proto; case BPF_FUNC_sched_task_tag_of: return &bpf_sched_task_tag_of_proto; + case BPF_FUNC_sched_set_tg_tag: + return &bpf_sched_set_tg_tag_proto; + case BPF_FUNC_sched_set_task_tag: + return &bpf_sched_set_task_tag_proto; default: break; } diff --git a/kernel/sched/bpf_sched.c b/kernel/sched/bpf_sched.c index 0601156336b2..662aa0b34216 100644 --- a/kernel/sched/bpf_sched.c +++ b/kernel/sched/bpf_sched.c @@ -104,3 +104,48 @@ const struct bpf_func_proto bpf_sched_task_tag_of_proto = { .arg1_type = PTR_MAYBE_NULL | ARG_PTR_TO_BTF_ID, .arg1_btf_id = &btf_sched_task_ids[0], }; + +BPF_CALL_2(bpf_sched_set_tg_tag, struct task_group *, tg, s64, tag) +{ +#if CONFIG_CGROUP_SCHED + if (tg == NULL || tg == &root_task_group) + return -EINVAL; + + if (tg->tag == tag) + return 0; + + rcu_read_lock(); + walk_tg_tree_from(tg, tg_change_tag, tg_nop, (void *)(&tag)); + rcu_read_unlock(); + + return 0; +#endif + return -EPERM; +} + +const struct bpf_func_proto bpf_sched_set_tg_tag_proto = { + .func = bpf_sched_set_tg_tag, + .gpl_only = false, + .ret_type = RET_INTEGER, + .arg1_type = PTR_MAYBE_NULL | ARG_PTR_TO_BTF_ID, + .arg1_btf_id = &btf_sched_tg_ids[0], + .arg2_type = ARG_ANYTHING, +}; + +BPF_CALL_2(bpf_sched_set_task_tag, struct task_struct *, tsk, s64, tag) +{ + if (tsk == NULL) + return -EINVAL; + + sched_settag(tsk, tag); + return 0; +} + +const struct bpf_func_proto bpf_sched_set_task_tag_proto = { + .func = bpf_sched_set_task_tag, + .gpl_only = false, + .ret_type = RET_INTEGER, + .arg1_type = PTR_MAYBE_NULL | ARG_PTR_TO_BTF_ID, + .arg1_btf_id = &btf_sched_task_ids[0], + .arg2_type = ARG_ANYTHING, +}; diff --git a/tools/include/uapi/linux/bpf.h b/tools/include/uapi/linux/bpf.h index aa071e9bde72..8ca15a8d2ff8 100644 --- a/tools/include/uapi/linux/bpf.h +++ b/tools/include/uapi/linux/bpf.h @@ -5589,6 +5589,18 @@ union bpf_attr { * different workloads. * Return * Task tag, if used, 0 as default tag, or a negative error in case of failure. + * + * int bpf_sched_set_tg_tag(struct task_group *tg, s64 tag) + * Description + * Set tag to *tg* and its descendants. + * Return + * 0 on success, or a negative error in case of failure. + * + * int bpf_sched_set_task_tag(struct task_struct *tsk, s64 tag) + * Description + * Set tag to *tsk*. + * Return + * 0 on success, or a negative error in case of failure. */ #define ___BPF_FUNC_MAPPER(FN, ctx...) \ FN(unspec, 0, ##ctx) \ @@ -5807,6 +5819,8 @@ union bpf_attr { FN(sk_original_addr, 213, ##ctx) \ FN(sched_tg_tag_of, 214, ##ctx) \ FN(sched_task_tag_of, 215, ##ctx) \ + FN(sched_set_tg_tag, 216, ##ctx) \ + FN(sched_set_task_tag, 217, ##ctx) \ /* */
/* backwards-compatibility macros for users of __BPF_FUNC_MAPPER that don't