From: Andrii Nakryiko andrii@kernel.org
mainline inclusion from mainline-v6.2-rc1 commit bffdeaa8a5af7200b0e74c9d5a41167f86626a36 category: bugfix bugzilla: https://gitee.com/src-openeuler/kernel/issues/IB2AQ3 CVE: CVE-2023-52920
Reference: https://git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git/commit/?i...
--------------------------------
BPF verifier marks some instructions as prune points. Currently these prune points serve two purposes.
It's a point where verifier tries to find previously verified state and check current state's equivalence to short circuit verification for current code path.
But also currently it's a point where jump history, used for precision backtracking, is updated. This is done so that non-linear flow of execution could be properly backtracked.
Such coupling is coincidental and unnecessary. Some prune points are not part of some non-linear jump path, so don't need update of jump history. On the other hand, not all instructions which have to be recorded in jump history necessarily are good prune points.
This patch splits prune and jump points into independent flags. Currently all prune points are marked as jump points to minimize amount of changes in this patch, but next patch will perform some optimization of prune vs jmp point placement.
No functional changes are intended.
Acked-by: John Fastabend john.fastabend@gmail.com Signed-off-by: Andrii Nakryiko andrii@kernel.org Link: https://lore.kernel.org/r/20221206233345.438540-2-andrii@kernel.org Signed-off-by: Alexei Starovoitov ast@kernel.org Conflicts: kernel/bpf/verifier.c [The conflicts were due to not merge commit efdb22de7dcd] Signed-off-by: Pu Lehui pulehui@huawei.com --- include/linux/bpf_verifier.h | 1 + kernel/bpf/verifier.c | 51 +++++++++++++++++++++++++++--------- 2 files changed, 40 insertions(+), 12 deletions(-)
diff --git a/include/linux/bpf_verifier.h b/include/linux/bpf_verifier.h index f4d5cff3e9cb..322c0917888d 100644 --- a/include/linux/bpf_verifier.h +++ b/include/linux/bpf_verifier.h @@ -362,6 +362,7 @@ struct bpf_insn_aux_data { /* below fields are initialized once */ unsigned int orig_idx; /* original instruction index */ bool prune_point; + bool jmp_point; };
#define MAX_USED_MAPS 64 /* max number of maps accessed by one eBPF program */ diff --git a/kernel/bpf/verifier.c b/kernel/bpf/verifier.c index e1765126cf98..caf284736281 100644 --- a/kernel/bpf/verifier.c +++ b/kernel/bpf/verifier.c @@ -1825,6 +1825,16 @@ static int check_reg_arg(struct bpf_verifier_env *env, u32 regno, return 0; }
+static void mark_jmp_point(struct bpf_verifier_env *env, int idx) +{ + env->insn_aux_data[idx].jmp_point = true; +} + +static bool is_jmp_point(struct bpf_verifier_env *env, int insn_idx) +{ + return env->insn_aux_data[insn_idx].jmp_point; +} + /* for any branch, call, exit record the history of jmps in the given state */ static int push_jmp_history(struct bpf_verifier_env *env, struct bpf_verifier_state *cur) @@ -1832,6 +1842,9 @@ static int push_jmp_history(struct bpf_verifier_env *env, u32 cnt = cur->jmp_history_cnt; struct bpf_idx_pair *p;
+ if (!is_jmp_point(env, env->insn_idx)) + return 0; + cnt++; p = krealloc(cur->jmp_history, cnt * sizeof(*p), GFP_USER); if (!p) @@ -8835,11 +8848,16 @@ static struct bpf_verifier_state_list **explored_state( return &env->explored_states[(idx ^ state->callsite) % state_htab_size(env)]; }
-static void init_explored_state(struct bpf_verifier_env *env, int idx) +static void mark_prune_point(struct bpf_verifier_env *env, int idx) { env->insn_aux_data[idx].prune_point = true; }
+static bool is_prune_point(struct bpf_verifier_env *env, int insn_idx) +{ + return env->insn_aux_data[insn_idx].prune_point; +} + /* t, w, e - match pseudo-code above: * t - index of current instruction * w - next instruction @@ -8863,9 +8881,11 @@ static int push_insn(int t, int w, int e, struct bpf_verifier_env *env, return -EINVAL; }
- if (e == BRANCH) + if (e == BRANCH) { /* mark branch target for state pruning */ - init_explored_state(env, w); + mark_prune_point(env, w); + mark_jmp_point(env, w); + }
if (insn_state[w] == 0) { /* tree-edge */ @@ -8934,10 +8954,13 @@ static int check_cfg(struct bpf_verifier_env *env) goto peek_stack; else if (ret < 0) goto err_free; - if (t + 1 < insn_cnt) - init_explored_state(env, t + 1); + if (t + 1 < insn_cnt) { + mark_prune_point(env, t + 1); + mark_jmp_point(env, t + 1); + } if (insns[t].src_reg == BPF_PSEUDO_CALL) { - init_explored_state(env, t); + mark_prune_point(env, t); + mark_jmp_point(env, t); ret = push_insn(t, t + insns[t].imm + 1, BRANCH, env, false); if (ret == 1) @@ -8961,15 +8984,19 @@ static int check_cfg(struct bpf_verifier_env *env) * but it's marked, since backtracking needs * to record jmp history in is_state_visited(). */ - init_explored_state(env, t + insns[t].off + 1); + mark_prune_point(env, t + insns[t].off + 1); + mark_jmp_point(env, t + insns[t].off + 1); /* tell verifier to check for equivalent states * after every call and jump */ - if (t + 1 < insn_cnt) - init_explored_state(env, t + 1); + if (t + 1 < insn_cnt) { + mark_prune_point(env, t + 1); + mark_jmp_point(env, t + 1); + } } else { /* conditional jump with two edges */ - init_explored_state(env, t); + mark_prune_point(env, t); + mark_jmp_point(env, t); ret = push_insn(t, t + 1, FALLTHROUGH, env, true); if (ret == 1) goto peek_stack; @@ -9898,11 +9925,11 @@ static int is_state_visited(struct bpf_verifier_env *env, int insn_idx) bool add_new_state = env->test_state_freq ? true : false;
cur->last_insn_idx = env->prev_insn_idx; - if (!env->insn_aux_data[insn_idx].prune_point) + if (!is_prune_point(env, insn_idx)) /* this 'insn_idx' instruction wasn't marked, so we will not * be doing state search here */ - return 0; + return push_jmp_history(env, cur);
/* bpf progs typically have pruning point every 4 instructions * http://vger.kernel.org/bpfconf2019.html#session-1