From: Chen Hui judy.chenhui@huawei.com
hulk inclusion category: feature bugzilla: https://gitee.com/openeuler/kernel/issues/I7X7WW
--------------------------------
Add bpf helper function bpf_init_cpu_topology() which obtains cpu topology info through the macros topology_* that are defined by include/linux/topology.h, and save it in BPF MAP.
The cpu topology info are useful to select core in userspace.
Signed-off-by: Chen Hui judy.chenhui@huawei.com Signed-off-by: Ren Zhijie renzhijie2@huawei.com Signed-off-by: Hui Tang tanghui20@huawei.com Signed-off-by: Guan Jing guanjing6@huawei.com --- include/linux/bpf_topology.h | 46 ++++++++++++++++++ include/uapi/linux/bpf.h | 14 ++++++ kernel/sched/bpf_sched.c | 8 ++++ kernel/sched/bpf_topology.c | 87 ++++++++++++++++++++++++++++++++++ kernel/sched/build_utility.c | 1 + scripts/bpf_doc.py | 4 ++ tools/include/uapi/linux/bpf.h | 14 ++++++ 7 files changed, 174 insertions(+) create mode 100644 include/linux/bpf_topology.h create mode 100644 kernel/sched/bpf_topology.c
diff --git a/include/linux/bpf_topology.h b/include/linux/bpf_topology.h new file mode 100644 index 000000000000..0c7ee492edde --- /dev/null +++ b/include/linux/bpf_topology.h @@ -0,0 +1,46 @@ +/* SPDX-License-Identifier: GPL-2.0 */ + +/* + * Copyright (C) 2022. Huawei Technologies Co., Ltd. All rights reserved. + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 and + * only version 2 as published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + */ + +#ifndef _LINUX_BPF_TOPOLOGY_H +#define _LINUX_BPF_TOPOLOGY_H + +#include <linux/cpumask.h> + +struct bpf_cpu_topology { + int cpu; + int core_id; + int cluster_id; + int die_id; + int physical_package_id; + int numa_node; + struct cpumask thread_siblings; + struct cpumask core_siblings; + struct cpumask cluster_cpus; + struct cpumask die_cpus; + struct cpumask package_cpus; + struct cpumask node_cpu_lists; +}; + +struct bpf_cpumask_info { + unsigned int nums_possible_cpus; + unsigned int nums_active_cpus; + unsigned int nums_isolate_cpus; + unsigned int nr_cpu_ids; + unsigned int bpf_nr_cpumask_bits; + struct cpumask cpu_possible_cpumask; + struct cpumask cpu_active_cpumask; + struct cpumask cpu_isolate_cpumask; +}; + +#endif /* _LINUX_BPF_TOPOLOGY_H */ diff --git a/include/uapi/linux/bpf.h b/include/uapi/linux/bpf.h index 34641b24e699..cb37b50acccd 100644 --- a/include/uapi/linux/bpf.h +++ b/include/uapi/linux/bpf.h @@ -5607,6 +5607,18 @@ union bpf_attr { * Get multiple types of *cpu* statistics and store in *ctx*. * Return * 0 on success, or a negative error in case of failure. + * + * long bpf_init_cpu_topology(struct bpf_map *map) + * Description + * Initializing the cpu topology which used for bpf prog. + * Return + * 0 on success, or a negative error in case of failure. + * + * int bpf_get_cpumask_info(struct bpf_map *map, struct bpf_cpumask_info *cpus) + * Description + * Get system cpus returned in *cpus*. + * Return + * 0 on success, or a negative error in case of failure. */ #define ___BPF_FUNC_MAPPER(FN, ctx...) \ FN(unspec, 0, ##ctx) \ @@ -5828,6 +5840,8 @@ union bpf_attr { FN(sched_set_tg_tag, 216, ##ctx) \ FN(sched_set_task_tag, 217, ##ctx) \ FN(sched_cpu_stats_of, 218, ##ctx) \ + FN(init_cpu_topology, 219, ##ctx) \ + FN(get_cpumask_info, 220, ##ctx) \ /* */
/* backwards-compatibility macros for users of __BPF_FUNC_MAPPER that don't diff --git a/kernel/sched/bpf_sched.c b/kernel/sched/bpf_sched.c index 8608d6ec939b..e768b465d684 100644 --- a/kernel/sched/bpf_sched.c +++ b/kernel/sched/bpf_sched.c @@ -5,6 +5,7 @@ #include <linux/bpf_sched.h> #include <linux/btf_ids.h> #include <linux/cpuidle.h> +#include <linux/bpf_topology.h> #include "sched.h"
DEFINE_STATIC_KEY_FALSE(bpf_sched_enabled_key); @@ -29,6 +30,9 @@ BTF_SET_START(bpf_sched_hooks) #undef BPF_SCHED_HOOK BTF_SET_END(bpf_sched_hooks)
+const struct bpf_func_proto bpf_init_cpu_topology_proto __weak; +const struct bpf_func_proto bpf_get_cpumask_info_proto __weak; + int bpf_sched_verify_prog(struct bpf_verifier_log *vlog, const struct bpf_prog *prog) { @@ -123,6 +127,10 @@ bpf_sched_func_proto(enum bpf_func_id func_id, const struct bpf_prog *prog) return bpf_get_trace_printk_proto(); case BPF_FUNC_sched_cpu_stats_of: return &bpf_sched_cpu_stats_of_proto; + case BPF_FUNC_init_cpu_topology: + return &bpf_init_cpu_topology_proto; + case BPF_FUNC_get_cpumask_info: + return &bpf_get_cpumask_info_proto; default: return bpf_base_func_proto(func_id); } diff --git a/kernel/sched/bpf_topology.c b/kernel/sched/bpf_topology.c new file mode 100644 index 000000000000..867c4b824366 --- /dev/null +++ b/kernel/sched/bpf_topology.c @@ -0,0 +1,87 @@ +// SPDX-License-Identifier: GPL-2.0 + +/* + * Copyright (C) 2022. Huawei Technologies Co., Ltd. All rights reserved. + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 and + * only version 2 as published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + */ + +#include <linux/bpf.h> +#include <linux/btf_ids.h> +#include <linux/bpf_verifier.h> +#include <linux/topology.h> +#include <linux/cpumask.h> +#include <linux/bpf_topology.h> +#include <linux/sched/isolation.h> + +static void bpf_update_cpu_topology(struct bpf_cpu_topology *bpf_cpu_topology, int cpu) +{ + bpf_cpu_topology->cpu = cpu; + bpf_cpu_topology->core_id = topology_core_id(cpu); + bpf_cpu_topology->cluster_id = topology_cluster_id(cpu); + bpf_cpu_topology->die_id = topology_die_id(cpu); + bpf_cpu_topology->physical_package_id = topology_physical_package_id(cpu); + bpf_cpu_topology->numa_node = cpu_to_node(cpu); + cpumask_copy(&bpf_cpu_topology->thread_siblings, topology_sibling_cpumask(cpu)); + cpumask_copy(&bpf_cpu_topology->core_siblings, topology_core_cpumask(cpu)); + cpumask_copy(&bpf_cpu_topology->cluster_cpus, topology_cluster_cpumask(cpu)); + cpumask_copy(&bpf_cpu_topology->die_cpus, topology_die_cpumask(cpu)); + cpumask_copy(&bpf_cpu_topology->package_cpus, topology_core_cpumask(cpu)); + cpumask_copy(&bpf_cpu_topology->node_cpu_lists, cpumask_of_node(cpu_to_node(cpu))); +} + +BPF_CALL_1(bpf_init_cpu_topology, struct bpf_map *, map) +{ + const struct cpumask *cpu_map = cpu_active_mask; + struct bpf_cpu_topology *topo; + int i = -1; + + WARN_ON_ONCE(!rcu_read_lock_held() && !rcu_read_lock_bh_held()); + for_each_cpu(i, cpu_map) { + topo = map->ops->map_lookup_elem(map, &i); + if (!topo) + return -ENOMEM; + + bpf_update_cpu_topology(topo, i); + } + + return 0; +} + +const struct bpf_func_proto bpf_init_cpu_topology_proto = { + .func = bpf_init_cpu_topology, + .gpl_only = false, + .ret_type = RET_INTEGER, + .arg1_type = ARG_CONST_MAP_PTR, +}; + +BPF_CALL_2(bpf_get_cpumask_info, struct bpf_map *, map, struct bpf_cpumask_info *, cpus) +{ + if (!cpus) + return -EINVAL; + + cpumask_copy(&cpus->cpu_possible_cpumask, cpu_possible_mask); + cpumask_copy(&cpus->cpu_active_cpumask, cpu_active_mask); + cpumask_copy(&cpus->cpu_isolate_cpumask, housekeeping_cpumask(HK_TYPE_DOMAIN)); + cpus->nums_possible_cpus = num_possible_cpus(); + cpus->nums_active_cpus = num_active_cpus(); + cpus->nums_isolate_cpus = cpumask_weight(&cpus->cpu_isolate_cpumask); + cpus->nr_cpu_ids = nr_cpu_ids; + cpus->bpf_nr_cpumask_bits = nr_cpumask_bits; + + return 0; +} + +const struct bpf_func_proto bpf_get_cpumask_info_proto = { + .func = bpf_get_cpumask_info, + .gpl_only = false, + .ret_type = RET_INTEGER, + .arg1_type = ARG_CONST_MAP_PTR, + .arg2_type = ARG_PTR_TO_MAP_VALUE_OR_NULL, +}; diff --git a/kernel/sched/build_utility.c b/kernel/sched/build_utility.c index d44c584d9bc7..eb4fab8fd18f 100644 --- a/kernel/sched/build_utility.c +++ b/kernel/sched/build_utility.c @@ -111,4 +111,5 @@
#ifdef CONFIG_BPF_SCHED # include "bpf_sched.c" +# include "bpf_topology.c" #endif diff --git a/scripts/bpf_doc.py b/scripts/bpf_doc.py index e8bbfb801645..b23f07438978 100755 --- a/scripts/bpf_doc.py +++ b/scripts/bpf_doc.py @@ -702,6 +702,8 @@ class PrinterHelpers(Printer): 'struct ipv6hdr', 'struct task_group', 'struct bpf_sched_cpu_stats', + 'struct bpf_cpu_topology', + 'struct bpf_cpumask_info', ] known_types = { '...', @@ -759,6 +761,8 @@ class PrinterHelpers(Printer): 'struct ipv6hdr', 'struct task_group', 'struct bpf_sched_cpu_stats', + 'struct bpf_cpu_topology', + 'struct bpf_cpumask_info', } mapped_types = { 'u8': '__u8', diff --git a/tools/include/uapi/linux/bpf.h b/tools/include/uapi/linux/bpf.h index 34641b24e699..cb37b50acccd 100644 --- a/tools/include/uapi/linux/bpf.h +++ b/tools/include/uapi/linux/bpf.h @@ -5607,6 +5607,18 @@ union bpf_attr { * Get multiple types of *cpu* statistics and store in *ctx*. * Return * 0 on success, or a negative error in case of failure. + * + * long bpf_init_cpu_topology(struct bpf_map *map) + * Description + * Initializing the cpu topology which used for bpf prog. + * Return + * 0 on success, or a negative error in case of failure. + * + * int bpf_get_cpumask_info(struct bpf_map *map, struct bpf_cpumask_info *cpus) + * Description + * Get system cpus returned in *cpus*. + * Return + * 0 on success, or a negative error in case of failure. */ #define ___BPF_FUNC_MAPPER(FN, ctx...) \ FN(unspec, 0, ##ctx) \ @@ -5828,6 +5840,8 @@ union bpf_attr { FN(sched_set_tg_tag, 216, ##ctx) \ FN(sched_set_task_tag, 217, ##ctx) \ FN(sched_cpu_stats_of, 218, ##ctx) \ + FN(init_cpu_topology, 219, ##ctx) \ + FN(get_cpumask_info, 220, ##ctx) \ /* */
/* backwards-compatibility macros for users of __BPF_FUNC_MAPPER that don't