From: Alexei Starovoitov ast@kernel.org
mainline inclusion from mainline-5.17-rc1 commit 26b367e3663931f2fee5f0786a1eff712e67b0bf category: feature bugzilla: https://gitee.com/openeuler/kernel/issues/I5EUVD CVE: NA
Reference: https://git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git/commit/?i...
-------------------------------------------------
Add a test where randmap() function is appended to three different bpf programs. That action checks struct bpf_core_relo replication logic and offset adjustment in gen loader part of libbpf.
Fourth bpf program has 360 CO-RE relocations from vmlinux, bpf_testmod, and non-existing type. It tests candidate cache logic.
Signed-off-by: Alexei Starovoitov ast@kernel.org Signed-off-by: Andrii Nakryiko andrii@kernel.org Acked-by: Andrii Nakryiko andrii@kernel.org Link: https://lore.kernel.org/bpf/20211201181040.23337-16-alexei.starovoitov@gmail... (cherry picked from commit 26b367e3663931f2fee5f0786a1eff712e67b0bf) Signed-off-by: Wang Yufen wangyufen@huawei.com
Conflicts: tools/testing/selftests/bpf/Makefile
Signed-off-by: Wang Yufen wangyufen@huawei.com --- tools/testing/selftests/bpf/Makefile | 2 +- .../selftests/bpf/prog_tests/core_kern.c | 14 +++ tools/testing/selftests/bpf/progs/core_kern.c | 104 ++++++++++++++++++ 3 files changed, 119 insertions(+), 1 deletion(-) create mode 100644 tools/testing/selftests/bpf/prog_tests/core_kern.c create mode 100644 tools/testing/selftests/bpf/progs/core_kern.c
diff --git a/tools/testing/selftests/bpf/Makefile b/tools/testing/selftests/bpf/Makefile index be8b9afbfdb7..73fe4a35b4b2 100644 --- a/tools/testing/selftests/bpf/Makefile +++ b/tools/testing/selftests/bpf/Makefile @@ -308,7 +308,7 @@ LINKED_SKELS := test_static_linked.skel.h
LSKELS := kfunc_call_test.c fentry_test.c fexit_test.c \ test_ringbuf.c atomics.c trace_printk.c \ - map_ptr_kern.c + map_ptr_kern.c core_kern.c # Generate both light skeleton and libbpf skeleton for these LSKELS_EXTRA := test_ksyms_module.c test_ksyms_weak.c SKEL_BLACKLIST += $$(LSKELS) diff --git a/tools/testing/selftests/bpf/prog_tests/core_kern.c b/tools/testing/selftests/bpf/prog_tests/core_kern.c new file mode 100644 index 000000000000..6c56c4176505 --- /dev/null +++ b/tools/testing/selftests/bpf/prog_tests/core_kern.c @@ -0,0 +1,14 @@ +// SPDX-License-Identifier: GPL-2.0 +/* Copyright (c) 2021 Facebook */ + +#include "test_progs.h" +#include "core_kern.lskel.h" + +void test_core_kern(void) +{ + struct core_kern_lskel *skel; + + skel = core_kern_lskel__open_and_load(); + ASSERT_OK_PTR(skel, "open_and_load"); + core_kern_lskel__destroy(skel); +} diff --git a/tools/testing/selftests/bpf/progs/core_kern.c b/tools/testing/selftests/bpf/progs/core_kern.c new file mode 100644 index 000000000000..13499cc15c7d --- /dev/null +++ b/tools/testing/selftests/bpf/progs/core_kern.c @@ -0,0 +1,104 @@ +// SPDX-License-Identifier: GPL-2.0 +/* Copyright (c) 2021 Facebook */ +#include "vmlinux.h" + +#include <bpf/bpf_helpers.h> +#include <bpf/bpf_tracing.h> +#include <bpf/bpf_core_read.h> + +#define ATTR __always_inline +#include "test_jhash.h" + +struct { + __uint(type, BPF_MAP_TYPE_ARRAY); + __type(key, u32); + __type(value, u32); + __uint(max_entries, 256); +} array1 SEC(".maps"); + +struct { + __uint(type, BPF_MAP_TYPE_ARRAY); + __type(key, u32); + __type(value, u32); + __uint(max_entries, 256); +} array2 SEC(".maps"); + +static __noinline int randmap(int v, const struct net_device *dev) +{ + struct bpf_map *map = (struct bpf_map *)&array1; + int key = bpf_get_prandom_u32() & 0xff; + int *val; + + if (bpf_get_prandom_u32() & 1) + map = (struct bpf_map *)&array2; + + val = bpf_map_lookup_elem(map, &key); + if (val) + *val = bpf_get_prandom_u32() + v + dev->mtu; + + return 0; +} + +SEC("tp_btf/xdp_devmap_xmit") +int BPF_PROG(tp_xdp_devmap_xmit_multi, const struct net_device + *from_dev, const struct net_device *to_dev, int sent, int drops, + int err) +{ + return randmap(from_dev->ifindex, from_dev); +} + +SEC("fentry/eth_type_trans") +int BPF_PROG(fentry_eth_type_trans, struct sk_buff *skb, + struct net_device *dev, unsigned short protocol) +{ + return randmap(dev->ifindex + skb->len, dev); +} + +SEC("fexit/eth_type_trans") +int BPF_PROG(fexit_eth_type_trans, struct sk_buff *skb, + struct net_device *dev, unsigned short protocol) +{ + return randmap(dev->ifindex + skb->len, dev); +} + +volatile const int never; + +struct __sk_bUfF /* it will not exist in vmlinux */ { + int len; +} __attribute__((preserve_access_index)); + +struct bpf_testmod_test_read_ctx /* it exists in bpf_testmod */ { + size_t len; +} __attribute__((preserve_access_index)); + +SEC("tc") +int balancer_ingress(struct __sk_buff *ctx) +{ + void *data_end = (void *)(long)ctx->data_end; + void *data = (void *)(long)ctx->data; + void *ptr; + int ret = 0, nh_off, i = 0; + + nh_off = 14; + + /* pragma unroll doesn't work on large loops */ +#define C do { \ + ptr = data + i; \ + if (ptr + nh_off > data_end) \ + break; \ + ctx->tc_index = jhash(ptr, nh_off, ctx->cb[0] + i++); \ + if (never) { \ + /* below is a dead code with unresolvable CO-RE relo */ \ + i += ((struct __sk_bUfF *)ctx)->len; \ + /* this CO-RE relo may or may not resolve + * depending on whether bpf_testmod is loaded. + */ \ + i += ((struct bpf_testmod_test_read_ctx *)ctx)->len; \ + } \ + } while (0); +#define C30 C;C;C;C;C;C;C;C;C;C;C;C;C;C;C;C;C;C;C;C;C;C;C;C;C;C;C;C;C;C; + C30;C30;C30; /* 90 calls */ + return 0; +} + +char LICENSE[] SEC("license") = "GPL";