
hulk inclusion category: feature bugzilla: https://gitee.com/openeuler/kernel/issues/ICA1GK -------------------------------- Add kfuncs to get the active pid namespace of a task, and to get the number of tasks and the last pid of a given pid namespace. Signed-off-by: GONG Ruiqi <gongruiqi1@huawei.com> --- include/linux/pid_namespace.h | 11 +++++++++++ kernel/bpf-rvi/common_kfuncs.c | 34 ++++++++++++++++++++++++++++++++++ kernel/pid_namespace.c | 25 +++++++++++++++++++++++++ 3 files changed, 70 insertions(+) diff --git a/include/linux/pid_namespace.h b/include/linux/pid_namespace.h index 78d8e728e769..68cc51803f14 100644 --- a/include/linux/pid_namespace.h +++ b/include/linux/pid_namespace.h @@ -149,6 +149,17 @@ struct pidns_loadavg { }; extern struct pidns_loadavg init_pidns_loadavg; + +struct pid_iter { + unsigned int pid; + struct task_struct *task; +}; + +struct pid_iter next_pid(struct pid_namespace *ns, struct pid_iter iter); + +#define for_each_task_in_pidns(iter, ns) \ + for (iter = next_pid(ns, iter); iter.task; \ + iter.pid += 1, iter = next_pid(ns, iter)) #endif #endif /* _LINUX_PID_NS_H */ diff --git a/kernel/bpf-rvi/common_kfuncs.c b/kernel/bpf-rvi/common_kfuncs.c index c751dccc2a67..22ac2c69de1a 100644 --- a/kernel/bpf-rvi/common_kfuncs.c +++ b/kernel/bpf-rvi/common_kfuncs.c @@ -2,6 +2,9 @@ /* Copyright (c) 2025 Huawei Technologies Co., Ltd */ #include <linux/memcontrol.h> +#include <linux/idr.h> +#include <linux/pid_namespace.h> +#include <linux/sched.h> #include <linux/btf_ids.h> #include <linux/bpf.h> @@ -14,8 +17,39 @@ __bpf_kfunc struct mem_cgroup *bpf_mem_cgroup_from_task(struct task_struct *p) return mem_cgroup_from_task(p); } +/* + * Loadavg related kfuncs + */ + +__bpf_kfunc struct pid_namespace *bpf_task_active_pid_ns(struct task_struct *task) +{ + return task_active_pid_ns(task); +} + +__bpf_kfunc u64 bpf_pidns_nr_tasks(struct pid_namespace *ns) +{ + struct pid_iter iter; + u32 nr_running = 0, nr_threads = 0; + + for_each_task_in_pidns(iter, ns) { + nr_threads++; + if (task_is_running(iter.task)) + nr_running++; + } + + return (u64)nr_running << 32 | nr_threads; +} + +__bpf_kfunc u32 bpf_pidns_last_pid(struct pid_namespace *ns) +{ + return idr_get_cursor(&ns->idr) - 1; +} + BTF_SET8_START(bpf_common_kfuncs_ids) BTF_ID_FLAGS(func, bpf_mem_cgroup_from_task, KF_RET_NULL | KF_RCU) +BTF_ID_FLAGS(func, bpf_task_active_pid_ns, KF_TRUSTED_ARGS) +BTF_ID_FLAGS(func, bpf_pidns_nr_tasks) +BTF_ID_FLAGS(func, bpf_pidns_last_pid) BTF_SET8_END(bpf_common_kfuncs_ids) static const struct btf_kfunc_id_set bpf_common_kfuncs_set = { diff --git a/kernel/pid_namespace.c b/kernel/pid_namespace.c index b49a3d7477a6..a7e6aa2788ef 100644 --- a/kernel/pid_namespace.c +++ b/kernel/pid_namespace.c @@ -598,4 +598,29 @@ static void pidns_calc_loadavg_workfn(struct work_struct *work) pidns_calc_avenrun(); schedule_delayed_work(&pidns_calc_loadavg_work, LOAD_FREQ); } + +/* Reference: next_tgid() in fs/proc/base.c */ +struct pid_iter next_pid(struct pid_namespace *ns, struct pid_iter iter) +{ + struct pid *pid; + + if (iter.task) + put_task_struct(iter.task); + rcu_read_lock(); +retry: + iter.task = NULL; + pid = find_ge_pid(iter.pid, ns); + if (pid) { + iter.pid = pid_nr_ns(pid, ns); + iter.task = pid_task(pid, PIDTYPE_PID); + // maybe don't need this + if (!iter.task) { + iter.pid += 1; + goto retry; + } + get_task_struct(iter.task); + } + rcu_read_unlock(); + return iter; +} #endif /* CONFIG_BPF_RVI */ -- 2.25.1