hulk inclusion category: feature bugzilla: https://gitee.com/openeuler/kernel/issues/I9GZAQ CVE: NA
--------------------------------
Add one bpf prog type for network numa relationship.
Signed-off-by: Liu Jian liujian56@huawei.com --- include/linux/bpf_types.h | 4 + include/linux/filter.h | 54 ++++++++++++ include/uapi/linux/bpf.h | 6 ++ kernel/bpf/syscall.c | 16 ++++ net/Kconfig | 6 ++ net/core/filter.c | 148 +++++++++++++++++++++++++++++++++ tools/bpf/bpftool/prog.c | 2 + tools/include/uapi/linux/bpf.h | 6 ++ tools/lib/bpf/libbpf.c | 2 + tools/lib/bpf/libbpf_probes.c | 1 + 10 files changed, 245 insertions(+)
diff --git a/include/linux/bpf_types.h b/include/linux/bpf_types.h index 5732b485c539..57954e35fd36 100644 --- a/include/linux/bpf_types.h +++ b/include/linux/bpf_types.h @@ -81,6 +81,10 @@ BPF_PROG_TYPE(BPF_PROG_TYPE_LSM, lsm, BPF_PROG_TYPE(BPF_PROG_TYPE_SCHED, bpf_sched, void *, void *) #endif /* CONFIG_BPF_SCHED */ +#ifdef CONFIG_BPF_NET_GLOBAL_PROG +BPF_PROG_TYPE(BPF_PROG_TYPE_NET_GLOBAL, bpf_gnet, + struct bpf_gnet_ctx, struct bpf_gnet_ctx) +#endif
BPF_MAP_TYPE(BPF_MAP_TYPE_ARRAY, array_map_ops) BPF_MAP_TYPE(BPF_MAP_TYPE_PERCPU_ARRAY, percpu_array_map_ops) diff --git a/include/linux/filter.h b/include/linux/filter.h index 4479a49a4f7c..10901c4f5b20 100644 --- a/include/linux/filter.h +++ b/include/linux/filter.h @@ -1474,4 +1474,58 @@ static inline bool bpf_sk_lookup_run_v6(struct net *net, int protocol, } #endif /* IS_ENABLED(CONFIG_IPV6) */
+#ifdef CONFIG_BPF_NET_GLOBAL_PROG +struct bpf_gnet_ctx_kern { + struct sock *sk; +}; + +enum gnet_bpf_attach_type { + GNET_BPF_ATTACH_TYPE_INVALID = -1, + GNET_RESERVE0 = 0, + MAX_GNET_BPF_ATTACH_TYPE +}; + +#define GNET_ATYPE(type) \ + case BPF_##type: return type + +static inline enum gnet_bpf_attach_type +to_gnet_bpf_attach_type(enum bpf_attach_type attach_type) +{ + switch (attach_type) { + GNET_ATYPE(GNET_RESERVE0); + default: + return GNET_BPF_ATTACH_TYPE_INVALID; + } +} + +struct gnet_bpf { + struct bpf_prog __rcu *progs[MAX_GNET_BPF_ATTACH_TYPE]; + u32 flags[MAX_GNET_BPF_ATTACH_TYPE]; +}; + +extern struct static_key_false gnet_bpf_enabled_key[MAX_GNET_BPF_ATTACH_TYPE]; +#define gnet_bpf_enabled(atype) static_branch_unlikely(&gnet_bpf_enabled_key[atype]) +extern struct gnet_bpf gnet_bpf_progs; + +int gnet_bpf_prog_attach(const union bpf_attr *attr, + enum bpf_prog_type ptype, struct bpf_prog *prog); +int gnet_bpf_prog_detach(const union bpf_attr *attr, enum bpf_prog_type ptype); + +static inline void run_gnet_bpf(enum gnet_bpf_attach_type atype, + struct bpf_gnet_ctx_kern *ctx) +{ + struct bpf_prog *prog; + + rcu_read_lock(); + prog = rcu_dereference(gnet_bpf_progs.progs[atype]); + if (unlikely(!prog)) + goto out; + + bpf_prog_run_pin_on_cpu(prog, ctx); +out: + rcu_read_unlock(); +} + +#endif + #endif /* __LINUX_FILTER_H__ */ diff --git a/include/uapi/linux/bpf.h b/include/uapi/linux/bpf.h index b87934003c40..b4ddcba26377 100644 --- a/include/uapi/linux/bpf.h +++ b/include/uapi/linux/bpf.h @@ -201,6 +201,7 @@ enum bpf_prog_type { BPF_PROG_TYPE_SK_LOOKUP, #ifndef __GENKSYMS__ BPF_PROG_TYPE_SCHED, + BPF_PROG_TYPE_NET_GLOBAL, #endif };
@@ -245,6 +246,7 @@ enum bpf_attach_type { BPF_XDP, #ifndef __GENKSYMS__ BPF_SCHED, + BPF_GNET_RESERVE0, #endif __MAX_BPF_ATTACH_TYPE }; @@ -5250,4 +5252,8 @@ enum { BTF_F_ZERO = (1ULL << 3), };
+struct bpf_gnet_ctx { + __bpf_md_ptr(struct bpf_sock *, sk); +}; + #endif /* _UAPI__LINUX_BPF_H__ */ diff --git a/kernel/bpf/syscall.c b/kernel/bpf/syscall.c index ba690c210f57..172d4005c940 100644 --- a/kernel/bpf/syscall.c +++ b/kernel/bpf/syscall.c @@ -2107,6 +2107,9 @@ static bool is_net_admin_prog_type(enum bpf_prog_type prog_type) case BPF_PROG_TYPE_CGROUP_SYSCTL: case BPF_PROG_TYPE_SOCK_OPS: case BPF_PROG_TYPE_EXT: /* extends any prog */ +#ifdef CONFIG_BPF_NET_GLOBAL_PROG + case BPF_PROG_TYPE_NET_GLOBAL: +#endif return true; case BPF_PROG_TYPE_CGROUP_SKB: /* always unpriv */ @@ -3017,6 +3020,10 @@ attach_type_to_prog_type(enum bpf_attach_type attach_type) return BPF_PROG_TYPE_SK_LOOKUP; case BPF_XDP: return BPF_PROG_TYPE_XDP; +#ifdef CONFIG_BPF_NET_GLOBAL_PROG + case BPF_GNET_RESERVE0: + return BPF_PROG_TYPE_NET_GLOBAL; +#endif default: return BPF_PROG_TYPE_UNSPEC; } @@ -3072,6 +3079,11 @@ static int bpf_prog_attach(const union bpf_attr *attr) case BPF_PROG_TYPE_SOCK_OPS: ret = cgroup_bpf_prog_attach(attr, ptype, prog); break; +#ifdef CONFIG_BPF_NET_GLOBAL_PROG + case BPF_PROG_TYPE_NET_GLOBAL: + ret = gnet_bpf_prog_attach(attr, ptype, prog); + break; +#endif default: ret = -EINVAL; } @@ -3108,6 +3120,10 @@ static int bpf_prog_detach(const union bpf_attr *attr) case BPF_PROG_TYPE_CGROUP_SYSCTL: case BPF_PROG_TYPE_SOCK_OPS: return cgroup_bpf_prog_detach(attr, ptype); +#ifdef CONFIG_BPF_NET_GLOBAL_PROG + case BPF_PROG_TYPE_NET_GLOBAL: + return gnet_bpf_prog_detach(attr, ptype); +#endif default: return -EINVAL; } diff --git a/net/Kconfig b/net/Kconfig index 232075ae15e2..6186e9ad88a3 100644 --- a/net/Kconfig +++ b/net/Kconfig @@ -470,6 +470,12 @@ config ETHTOOL_NETLINK netlink. It provides better extensibility and some new features, e.g. notification messages.
+config BPF_NET_GLOBAL_PROG + bool "Network global bpf prog type" + depends on NET + depends on BPF_SYSCALL + default n + endif # if NET
# Used by archs to tell that they support BPF JIT compiler plus which flavour. diff --git a/net/core/filter.c b/net/core/filter.c index 16a2774eecd6..a5b497043eda 100644 --- a/net/core/filter.c +++ b/net/core/filter.c @@ -10682,3 +10682,151 @@ bpf_sk_base_func_proto(enum bpf_func_id func_id)
return func; } + +#ifdef CONFIG_BPF_NET_GLOBAL_PROG +static DEFINE_MUTEX(gnet_bpf_mutex); +struct gnet_bpf gnet_bpf_progs; +EXPORT_SYMBOL(gnet_bpf_progs); +struct static_key_false gnet_bpf_enabled_key[MAX_GNET_BPF_ATTACH_TYPE]; +EXPORT_SYMBOL(gnet_bpf_enabled_key); + +int gnet_bpf_prog_attach(const union bpf_attr *attr, + enum bpf_prog_type ptype, struct bpf_prog *prog) +{ + enum gnet_bpf_attach_type atype; + struct bpf_prog *attached; + int ret = 0; + + if (attr->attach_flags || attr->replace_bpf_fd) + return -EINVAL; + + atype = to_gnet_bpf_attach_type(attr->attach_type); + if (atype < 0) + return -EINVAL; + + mutex_lock(&gnet_bpf_mutex); + attached = gnet_bpf_progs.progs[atype]; + if (attached == prog) { + ret = -EINVAL; + goto out_unlock; + } + + rcu_assign_pointer(gnet_bpf_progs.progs[atype], prog); + gnet_bpf_progs.flags[atype] = attr->attach_flags; + if (attached) + bpf_prog_put(attached); + else + static_branch_inc(&gnet_bpf_enabled_key[atype]); + +out_unlock: + mutex_unlock(&gnet_bpf_mutex); + return ret; +} + +int gnet_bpf_prog_detach(const union bpf_attr *attr, enum bpf_prog_type ptype) +{ + enum gnet_bpf_attach_type atype; + struct bpf_prog *attached; + int ret = 0; + + atype = to_gnet_bpf_attach_type(attr->attach_type); + if (atype < 0) + return -EINVAL; + + mutex_lock(&gnet_bpf_mutex); + attached = gnet_bpf_progs.progs[atype]; + if (!attached) { + ret = -ENOENT; + goto out_unlock; + } + + static_branch_dec(&gnet_bpf_enabled_key[atype]); + gnet_bpf_progs.flags[atype] = 0; + rcu_assign_pointer(gnet_bpf_progs.progs[atype], NULL); + bpf_prog_put(attached); +out_unlock: + mutex_unlock(&gnet_bpf_mutex); + return ret; +} + +static int __init gnet_bpf_init(void) +{ + return 0; +} +late_initcall(gnet_bpf_init); + +static const struct bpf_func_proto * +bpf_gnet_func_proto(enum bpf_func_id func_id, const struct bpf_prog *prog) +{ + switch (func_id) { + case BPF_FUNC_perf_event_output: + return &bpf_skb_event_output_proto; + case BPF_FUNC_sk_fullsock: + return &bpf_sk_fullsock_proto; + default: + break; + } + + return bpf_sk_base_func_proto(func_id); +} + +static bool bpf_gnet_is_valid_access(int off, int size, + enum bpf_access_type type, + const struct bpf_prog *prog, + struct bpf_insn_access_aux *info) +{ + if (off < 0 || off >= sizeof(struct bpf_gnet_ctx)) + return false; + + /* The verifier guarantees that size > 0. */ + if (off % size != 0) + return false; + + if (type == BPF_WRITE) + return false; + + switch (off) { + case offsetof(struct bpf_gnet_ctx, sk): + if (size != sizeof(__u64)) + return false; + info->reg_type = PTR_TO_SOCKET_OR_NULL; + break; + default: + break; + } + return true; +} + +static u32 bpf_gnet_convert_ctx_access(enum bpf_access_type type, + const struct bpf_insn *si, + struct bpf_insn *insn_buf, + struct bpf_prog *prog, u32 *target_size) +{ + struct bpf_insn *insn = insn_buf; + + switch (si->off) { + case offsetof(struct bpf_gnet_ctx, sk): + *insn++ = BPF_LDX_MEM(BPF_FIELD_SIZEOF(struct bpf_gnet_ctx_kern, sk), + si->dst_reg, si->src_reg, + offsetof(struct bpf_gnet_ctx_kern, sk)); + break; + } + return insn - insn_buf; +} + +static int bpf_gnet_gen_prologue(struct bpf_insn *insn_buf, bool direct_write, + const struct bpf_prog *prog) +{ + return 0; +} + +const struct bpf_verifier_ops bpf_gnet_verifier_ops = { + .get_func_proto = bpf_gnet_func_proto, + .is_valid_access = bpf_gnet_is_valid_access, + .convert_ctx_access = bpf_gnet_convert_ctx_access, + .gen_prologue = bpf_gnet_gen_prologue, +}; + +const struct bpf_prog_ops bpf_gnet_prog_ops = { +}; +#endif diff --git a/tools/bpf/bpftool/prog.c b/tools/bpf/bpftool/prog.c index 2c85586ec224..d9b2fe1c451a 100644 --- a/tools/bpf/bpftool/prog.c +++ b/tools/bpf/bpftool/prog.c @@ -65,6 +65,7 @@ const char * const prog_type_name[] = { [BPF_PROG_TYPE_LSM] = "lsm", [BPF_PROG_TYPE_SK_LOOKUP] = "sk_lookup", [BPF_PROG_TYPE_SCHED] = "sched", + [BPF_PROG_TYPE_NET_GLOBAL] = "gnet", };
const size_t prog_type_name_size = ARRAY_SIZE(prog_type_name); @@ -79,6 +80,7 @@ static const char * const attach_type_strings[] = { [BPF_SK_SKB_STREAM_VERDICT] = "stream_verdict", [BPF_SK_MSG_VERDICT] = "msg_verdict", [BPF_FLOW_DISSECTOR] = "flow_dissector", + [BPF_GNET_RESERVE0] = "gnet_reserve0", [__MAX_BPF_ATTACH_TYPE] = NULL, };
diff --git a/tools/include/uapi/linux/bpf.h b/tools/include/uapi/linux/bpf.h index 5a153a1a8f18..dc493193174f 100644 --- a/tools/include/uapi/linux/bpf.h +++ b/tools/include/uapi/linux/bpf.h @@ -201,6 +201,7 @@ enum bpf_prog_type { BPF_PROG_TYPE_SK_LOOKUP, #ifndef __GENKSYMS__ BPF_PROG_TYPE_SCHED, + BPF_PROG_TYPE_NET_GLOBAL, #endif };
@@ -245,6 +246,7 @@ enum bpf_attach_type { BPF_XDP, #ifndef __GENKSYMS__ BPF_SCHED, + BPF_GNET_RESERVE0, #endif __MAX_BPF_ATTACH_TYPE }; @@ -5250,4 +5252,8 @@ enum { BTF_F_ZERO = (1ULL << 3), };
+struct bpf_gnet_ctx { + __bpf_md_ptr(struct bpf_sock *, sk); +}; + #endif /* _UAPI__LINUX_BPF_H__ */ diff --git a/tools/lib/bpf/libbpf.c b/tools/lib/bpf/libbpf.c index 283881242222..1a04ac5395bb 100644 --- a/tools/lib/bpf/libbpf.c +++ b/tools/lib/bpf/libbpf.c @@ -8494,6 +8494,8 @@ static const struct bpf_sec_def section_defs[] = { BPF_PROG_SEC("struct_ops", BPF_PROG_TYPE_STRUCT_OPS), BPF_EAPROG_SEC("sk_lookup/", BPF_PROG_TYPE_SK_LOOKUP, BPF_SK_LOOKUP), + BPF_EAPROG_SEC("gnet/reserve0", BPF_PROG_TYPE_NET_GLOBAL, + BPF_GNET_RESERVE0), };
#undef BPF_PROG_SEC_IMPL diff --git a/tools/lib/bpf/libbpf_probes.c b/tools/lib/bpf/libbpf_probes.c index 13393f0eab25..73aef4467823 100644 --- a/tools/lib/bpf/libbpf_probes.c +++ b/tools/lib/bpf/libbpf_probes.c @@ -111,6 +111,7 @@ probe_load(enum bpf_prog_type prog_type, const struct bpf_insn *insns, case BPF_PROG_TYPE_STRUCT_OPS: case BPF_PROG_TYPE_EXT: case BPF_PROG_TYPE_LSM: + case BPF_PROG_TYPE_NET_GLOBAL: default: break; }