
hulk inclusion category: feature bugzilla: https://gitee.com/openeuler/kernel/issues/ICFKYV -------------------------------- Add libbpf_sched_set_task_prefer_cpumask helper, which allows setting preferred cpumask for the task. Signed-off-by: Cheng Yu <serein.chengyu@huawei.com> --- include/uapi/linux/bpf.h | 7 +++++++ kernel/bpf/helpers.c | 3 +++ kernel/sched/bpf_sched.c | 37 ++++++++++++++++++++++++++++++++++ tools/include/uapi/linux/bpf.h | 7 +++++++ tools/lib/bpf/libbpf_sched.h | 6 ++++++ 5 files changed, 60 insertions(+) diff --git a/include/uapi/linux/bpf.h b/include/uapi/linux/bpf.h index f4ebb476cf03..ab746f19f624 100644 --- a/include/uapi/linux/bpf.h +++ b/include/uapi/linux/bpf.h @@ -3934,6 +3934,12 @@ union bpf_attr { * update network's relationship to sched subsystem. * Return * 0 on success, or a negative error in case of failure. + * + * int bpf_sched_set_task_prefer_cpumask(struct task_struct *tsk, struct cpumask *mask, int len) + * Description + * set prefer cpumask for the task. + * Return + * 0 on success, or a negative error in case of failure. */ #define __BPF_FUNC_MAPPER(FN) \ FN(unspec), \ @@ -4111,6 +4117,7 @@ union bpf_attr { FN(sched_set_curr_preferred_node),\ FN(get_node_stats), \ FN(sched_net_rship_submit), \ + FN(sched_set_task_prefer_cpumask), \ /* */ /* integer value in 'imm' field of BPF_CALL instruction selects which helper diff --git a/kernel/bpf/helpers.c b/kernel/bpf/helpers.c index 1fbda5c87a29..72be3af042cb 100644 --- a/kernel/bpf/helpers.c +++ b/kernel/bpf/helpers.c @@ -664,6 +664,7 @@ const struct bpf_func_proto bpf_sched_tg_tag_of_proto __weak; const struct bpf_func_proto bpf_sched_task_tag_of_proto __weak; const struct bpf_func_proto bpf_sched_set_tg_tag_proto __weak; const struct bpf_func_proto bpf_sched_set_task_tag_proto __weak; +const struct bpf_func_proto bpf_sched_set_task_prefer_cpumask_proto __weak; const struct bpf_func_proto * bpf_base_func_proto(enum bpf_func_id func_id) @@ -711,6 +712,8 @@ bpf_base_func_proto(enum bpf_func_id func_id) return &bpf_sched_tg_tag_of_proto; case BPF_FUNC_sched_task_tag_of: return &bpf_sched_task_tag_of_proto; + case BPF_FUNC_sched_set_task_prefer_cpumask: + return &bpf_sched_set_task_prefer_cpumask_proto; default: break; } diff --git a/kernel/sched/bpf_sched.c b/kernel/sched/bpf_sched.c index 3e14d1fa911e..cdb83e261eb9 100644 --- a/kernel/sched/bpf_sched.c +++ b/kernel/sched/bpf_sched.c @@ -414,6 +414,43 @@ const struct bpf_func_proto bpf_sched_set_curr_preferred_node_proto = { }; #endif +#ifdef CONFIG_QOS_SCHED_DYNAMIC_AFFINITY +static inline int +set_task_prefer_cpumask(struct task_struct *tsk, struct cpumask *mask, int len) +{ + if (!tsk || !mask || sizeof(*mask) != len) + return -EINVAL; + + if (set_prefer_cpus_ptr(tsk, mask)) + return -EINVAL; + + return 0; +} +#else +static inline int +set_task_prefer_cpumask(struct task_struct *tsk, struct cpumask *mask, int len) +{ + return -EINVAL; +} +#endif + +BPF_CALL_3(bpf_sched_set_task_prefer_cpumask, + struct task_struct *, tsk, struct cpumask *, mask, int, len) +{ + return set_task_prefer_cpumask(tsk, mask, len); +} + +const struct bpf_func_proto bpf_sched_set_task_prefer_cpumask_proto = { + .func = bpf_sched_set_task_prefer_cpumask, + .gpl_only = false, + .ret_type = RET_INTEGER, + .arg1_type = PTR_MAYBE_NULL | ARG_PTR_TO_BTF_ID, + .arg1_btf_id = &btf_sched_task_ids[0], + .arg2_type = PTR_MAYBE_NULL | ARG_PTR_TO_MEM | MEM_RDONLY, + .arg3_type = ARG_CONST_SIZE, +}; + + static const struct bpf_func_proto * bpf_sched_func_proto(enum bpf_func_id func_id, const struct bpf_prog *prog) { diff --git a/tools/include/uapi/linux/bpf.h b/tools/include/uapi/linux/bpf.h index 120d4b1a3323..77317ffb64e2 100644 --- a/tools/include/uapi/linux/bpf.h +++ b/tools/include/uapi/linux/bpf.h @@ -3934,6 +3934,12 @@ union bpf_attr { * update network's relationship to sched subsystem. * Return * 0 on success, or a negative error in case of failure. + * + * int bpf_sched_set_task_prefer_cpumask(struct task_struct *tsk, struct cpumask *mask, int len) + * Description + * set prefer cpumask for the task. + * Return + * 0 on success, or a negative error in case of failure. */ #define __BPF_FUNC_MAPPER(FN) \ FN(unspec), \ @@ -4111,6 +4117,7 @@ union bpf_attr { FN(sched_set_curr_preferred_node),\ FN(get_node_stats), \ FN(sched_net_rship_submit), \ + FN(sched_set_task_prefer_cpumask), \ /* */ /* integer value in 'imm' field of BPF_CALL instruction selects which helper diff --git a/tools/lib/bpf/libbpf_sched.h b/tools/lib/bpf/libbpf_sched.h index 3e9b41788637..e84268399d10 100644 --- a/tools/lib/bpf/libbpf_sched.h +++ b/tools/lib/bpf/libbpf_sched.h @@ -665,4 +665,10 @@ libbpf_mem_preferred_nid(struct task_struct *tsk, nodemask_t *preferred_node) *preferred_node = getVal(stats->mm.comm.preferred_node); return 0; } + +static __always_inline int +libbpf_sched_set_task_prefer_cpumask(struct task_struct *tsk, struct cpumask *mask, int len) +{ + return bpf_sched_set_task_prefer_cpumask(tsk, mask, len); +} #endif -- 2.25.1