hulk inclusion category: feature bugzilla: https://gitee.com/openeuler/kernel/issues/I8UOP9
--------------------------------
reserve space for bpf related structures.
Signed-off-by: Pu Lehui pulehui@huawei.com --- include/linux/bpf.h | 83 +++++++++++++++++++++++++++++++ include/linux/bpf_local_storage.h | 11 ++++ include/linux/bpf_verifier.h | 40 +++++++++++++++ 3 files changed, 134 insertions(+)
diff --git a/include/linux/bpf.h b/include/linux/bpf.h index 392f581af2ce..d615a1c02415 100644 --- a/include/linux/bpf.h +++ b/include/linux/bpf.h @@ -29,6 +29,7 @@ #include <linux/rcupdate_trace.h> #include <linux/static_call.h> #include <linux/memcontrol.h> +#include <linux/kabi.h>
struct bpf_verifier_env; struct bpf_verifier_log; @@ -168,6 +169,11 @@ struct bpf_map_ops {
/* bpf_iter info used to open a seq_file */ const struct bpf_iter_seq_info *iter_seq_info; + + KABI_RESERVE(1) + KABI_RESERVE(2) + KABI_RESERVE(3) + KABI_RESERVE(4) };
enum { @@ -288,6 +294,11 @@ struct bpf_map { bool bypass_spec_v1; bool frozen; /* write-once; write-protected by freeze_mutex */ s64 __percpu *elem_count; + + KABI_RESERVE(1) + KABI_RESERVE(2) + KABI_RESERVE(3) + KABI_RESERVE(4) };
static inline const char *btf_field_type_name(enum btf_field_type type) @@ -807,6 +818,11 @@ struct bpf_func_proto { }; int *ret_btf_id; /* return value btf_id */ bool (*allowed)(const struct bpf_prog *prog); + + KABI_RESERVE(1) + KABI_RESERVE(2) + KABI_RESERVE(3) + KABI_RESERVE(4) };
/* bpf_context is intentionally undefined structure. Pointer to bpf_context is @@ -942,6 +958,11 @@ struct bpf_verifier_ops { int (*btf_struct_access)(struct bpf_verifier_log *log, const struct bpf_reg_state *reg, int off, int size); + + KABI_RESERVE(1) + KABI_RESERVE(2) + KABI_RESERVE(3) + KABI_RESERVE(4) };
struct bpf_prog_offload_ops { @@ -1001,6 +1022,9 @@ struct btf_func_model { u8 nr_args; u8 arg_size[MAX_BPF_FUNC_ARGS]; u8 arg_flags[MAX_BPF_FUNC_ARGS]; + + KABI_RESERVE(1) + KABI_RESERVE(2) };
/* Restore arguments before returning from trampoline to let original function @@ -1121,6 +1145,11 @@ struct bpf_tramp_image { struct rcu_head rcu; struct work_struct work; }; + + KABI_RESERVE(1) + KABI_RESERVE(2) + KABI_RESERVE(3) + KABI_RESERVE(4) };
struct bpf_trampoline { @@ -1149,6 +1178,11 @@ struct bpf_trampoline { /* Executable image of trampoline */ struct bpf_tramp_image *cur_image; struct module *mod; + + KABI_RESERVE(1) + KABI_RESERVE(2) + KABI_RESERVE(3) + KABI_RESERVE(4) };
struct bpf_attach_target_info { @@ -1180,6 +1214,11 @@ struct bpf_dispatcher { struct static_call_key *sc_key; void *sc_tramp; #endif + + KABI_RESERVE(1) + KABI_RESERVE(2) + KABI_RESERVE(3) + KABI_RESERVE(4) };
static __always_inline __nocfi unsigned int bpf_dispatcher_nop_func( @@ -1465,6 +1504,15 @@ struct bpf_prog_aux { struct work_struct work; struct rcu_head rcu; }; + + KABI_RESERVE(1) + KABI_RESERVE(2) + KABI_RESERVE(3) + KABI_RESERVE(4) + KABI_RESERVE(5) + KABI_RESERVE(6) + KABI_RESERVE(7) + KABI_RESERVE(8) };
struct bpf_prog { @@ -1499,6 +1547,11 @@ struct bpf_prog { DECLARE_FLEX_ARRAY(struct sock_filter, insns); DECLARE_FLEX_ARRAY(struct bpf_insn, insnsi); }; + + KABI_RESERVE(1) + KABI_RESERVE(2) + KABI_RESERVE(3) + KABI_RESERVE(4) };
struct bpf_array_aux { @@ -1507,6 +1560,11 @@ struct bpf_array_aux { struct bpf_map *map; struct mutex poke_mutex; struct work_struct work; + + KABI_RESERVE(1) + KABI_RESERVE(2) + KABI_RESERVE(3) + KABI_RESERVE(4) };
struct bpf_link { @@ -1516,6 +1574,11 @@ struct bpf_link { const struct bpf_link_ops *ops; struct bpf_prog *prog; struct work_struct work; + + KABI_RESERVE(1) + KABI_RESERVE(2) + KABI_RESERVE(3) + KABI_RESERVE(4) };
struct bpf_link_ops { @@ -1529,6 +1592,11 @@ struct bpf_link_ops { struct bpf_link_info *info); int (*update_map)(struct bpf_link *link, struct bpf_map *new_map, struct bpf_map *old_map); + + KABI_RESERVE(1) + KABI_RESERVE(2) + KABI_RESERVE(3) + KABI_RESERVE(4) };
struct bpf_tramp_link { @@ -1626,6 +1694,11 @@ struct bpf_struct_ops { struct btf_func_model func_models[BPF_STRUCT_OPS_MAX_NR_MEMBERS]; u32 type_id; u32 value_id; + + KABI_RESERVE(1) + KABI_RESERVE(2) + KABI_RESERVE(3) + KABI_RESERVE(4) };
#if defined(CONFIG_BPF_JIT) && defined(CONFIG_BPF_SYSCALL) @@ -1727,6 +1800,11 @@ struct bpf_array { DECLARE_FLEX_ARRAY(void *, ptrs) __aligned(8); DECLARE_FLEX_ARRAY(void __percpu *, pptrs) __aligned(8); }; + + KABI_RESERVE(1) + KABI_RESERVE(2) + KABI_RESERVE(3) + KABI_RESERVE(4) };
#define BPF_COMPLEXITY_LIMIT_INSNS 1000000 /* yes. 1M insns */ @@ -1822,6 +1900,11 @@ struct bpf_prog_array_item { struct bpf_cgroup_storage *cgroup_storage[MAX_BPF_CGROUP_STORAGE_TYPE]; u64 bpf_cookie; }; + + KABI_RESERVE(1) + KABI_RESERVE(2) + KABI_RESERVE(3) + KABI_RESERVE(4) };
struct bpf_prog_array { diff --git a/include/linux/bpf_local_storage.h b/include/linux/bpf_local_storage.h index 173ec7f43ed1..c2ae8788a5cb 100644 --- a/include/linux/bpf_local_storage.h +++ b/include/linux/bpf_local_storage.h @@ -14,6 +14,7 @@ #include <linux/hash.h> #include <linux/types.h> #include <linux/bpf_mem_alloc.h> +#include <linux/kabi.h> #include <uapi/linux/btf.h>
#define BPF_LOCAL_STORAGE_CACHE_SIZE 16 @@ -59,6 +60,11 @@ struct bpf_local_storage_map { struct bpf_mem_alloc selem_ma; struct bpf_mem_alloc storage_ma; bool bpf_ma; + + KABI_RESERVE(1) + KABI_RESERVE(2) + KABI_RESERVE(3) + KABI_RESERVE(4) };
struct bpf_local_storage_data { @@ -94,6 +100,11 @@ struct bpf_local_storage { */ struct rcu_head rcu; raw_spinlock_t lock; /* Protect adding/removing from the "list" */ + + KABI_RESERVE(1) + KABI_RESERVE(2) + KABI_RESERVE(3) + KABI_RESERVE(4) };
/* U16_MAX is much more than enough for sk local storage diff --git a/include/linux/bpf_verifier.h b/include/linux/bpf_verifier.h index b6e58dab8e27..d2b59c6d9d60 100644 --- a/include/linux/bpf_verifier.h +++ b/include/linux/bpf_verifier.h @@ -8,6 +8,7 @@ #include <linux/btf.h> /* for struct btf and btf_id() */ #include <linux/filter.h> /* for MAX_BPF_STACK */ #include <linux/tnum.h> +#include <linux/kabi.h>
/* Maximum variable offset umax_value permitted when resolving memory accesses. * In practice this is far bigger than any realistic pointer offset; this limit @@ -225,6 +226,11 @@ struct bpf_reg_state { enum bpf_reg_liveness live; /* if (!precise && SCALAR_VALUE) min/max/tnum don't affect safety */ bool precise; + + KABI_RESERVE(1) + KABI_RESERVE(2) + KABI_RESERVE(3) + KABI_RESERVE(4) };
enum bpf_stack_slot_type { @@ -273,6 +279,11 @@ struct bpf_reference_state { * exiting a callback function. */ int callback_ref; + + KABI_RESERVE(1) + KABI_RESERVE(2) + KABI_RESERVE(3) + KABI_RESERVE(4) };
/* state of the program: @@ -306,6 +317,11 @@ struct bpf_func_state { struct bpf_reference_state *refs; int allocated_stack; struct bpf_stack_state *stack; + + KABI_RESERVE(1) + KABI_RESERVE(2) + KABI_RESERVE(3) + KABI_RESERVE(4) };
struct bpf_idx_pair { @@ -383,6 +399,11 @@ struct bpf_verifier_state { */ struct bpf_idx_pair *jmp_history; u32 jmp_history_cnt; + + KABI_RESERVE(1) + KABI_RESERVE(2) + KABI_RESERVE(3) + KABI_RESERVE(4) };
#define bpf_get_spilled_reg(slot, frame) \ @@ -490,6 +511,11 @@ struct bpf_insn_aux_data { * this instruction, regardless of any heuristics */ bool force_checkpoint; + + KABI_RESERVE(1) + KABI_RESERVE(2) + KABI_RESERVE(3) + KABI_RESERVE(4) };
#define MAX_USED_MAPS 64 /* max number of maps accessed by one eBPF program */ @@ -541,6 +567,11 @@ struct bpf_subprog_info { bool tail_call_reachable; bool has_ld_abs; bool is_async_cb; + + KABI_RESERVE(1) + KABI_RESERVE(2) + KABI_RESERVE(3) + KABI_RESERVE(4) };
struct bpf_verifier_env; @@ -640,6 +671,15 @@ struct bpf_verifier_env { * e.g., in reg_type_str() to generate reg_type string */ char tmp_str_buf[TMP_STR_BUF_LEN]; + + KABI_RESERVE(1) + KABI_RESERVE(2) + KABI_RESERVE(3) + KABI_RESERVE(4) + KABI_RESERVE(5) + KABI_RESERVE(6) + KABI_RESERVE(7) + KABI_RESERVE(8) };
__printf(2, 0) void bpf_verifier_vlog(struct bpf_verifier_log *log,