Andrii Nakryiko (5): bpf: decouple prune and jump points bpf: encapsulate precision backtracking bookkeeping bpf: mostly decouple jump history management from is_state_visited() bpf: support non-r10 register spill/fill to/from stack in precision tracking bpf: handle fake register spill to stack with BPF_ST_MEM instruction
include/linux/bpf_verifier.h | 46 +- kernel/bpf/verifier.c | 480 ++++++++++++------ .../testing/selftests/bpf/verifier/precise.c | 29 +- 3 files changed, 385 insertions(+), 170 deletions(-)
From: Andrii Nakryiko andrii@kernel.org
mainline inclusion from mainline-v6.2-rc1 commit bffdeaa8a5af7200b0e74c9d5a41167f86626a36 category: bugfix bugzilla: https://gitee.com/src-openeuler/kernel/issues/IB2AQ3 CVE: CVE-2023-52920
Reference: https://git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git/commit/?i...
--------------------------------
BPF verifier marks some instructions as prune points. Currently these prune points serve two purposes.
It's a point where verifier tries to find previously verified state and check current state's equivalence to short circuit verification for current code path.
But also currently it's a point where jump history, used for precision backtracking, is updated. This is done so that non-linear flow of execution could be properly backtracked.
Such coupling is coincidental and unnecessary. Some prune points are not part of some non-linear jump path, so don't need update of jump history. On the other hand, not all instructions which have to be recorded in jump history necessarily are good prune points.
This patch splits prune and jump points into independent flags. Currently all prune points are marked as jump points to minimize amount of changes in this patch, but next patch will perform some optimization of prune vs jmp point placement.
No functional changes are intended.
Acked-by: John Fastabend john.fastabend@gmail.com Signed-off-by: Andrii Nakryiko andrii@kernel.org Link: https://lore.kernel.org/r/20221206233345.438540-2-andrii@kernel.org Signed-off-by: Alexei Starovoitov ast@kernel.org Conflicts: kernel/bpf/verifier.c [The conflicts were due to not merge commit efdb22de7dcd] Signed-off-by: Pu Lehui pulehui@huawei.com --- include/linux/bpf_verifier.h | 1 + kernel/bpf/verifier.c | 51 +++++++++++++++++++++++++++--------- 2 files changed, 40 insertions(+), 12 deletions(-)
diff --git a/include/linux/bpf_verifier.h b/include/linux/bpf_verifier.h index f4d5cff3e9cb..322c0917888d 100644 --- a/include/linux/bpf_verifier.h +++ b/include/linux/bpf_verifier.h @@ -362,6 +362,7 @@ struct bpf_insn_aux_data { /* below fields are initialized once */ unsigned int orig_idx; /* original instruction index */ bool prune_point; + bool jmp_point; };
#define MAX_USED_MAPS 64 /* max number of maps accessed by one eBPF program */ diff --git a/kernel/bpf/verifier.c b/kernel/bpf/verifier.c index e1765126cf98..caf284736281 100644 --- a/kernel/bpf/verifier.c +++ b/kernel/bpf/verifier.c @@ -1825,6 +1825,16 @@ static int check_reg_arg(struct bpf_verifier_env *env, u32 regno, return 0; }
+static void mark_jmp_point(struct bpf_verifier_env *env, int idx) +{ + env->insn_aux_data[idx].jmp_point = true; +} + +static bool is_jmp_point(struct bpf_verifier_env *env, int insn_idx) +{ + return env->insn_aux_data[insn_idx].jmp_point; +} + /* for any branch, call, exit record the history of jmps in the given state */ static int push_jmp_history(struct bpf_verifier_env *env, struct bpf_verifier_state *cur) @@ -1832,6 +1842,9 @@ static int push_jmp_history(struct bpf_verifier_env *env, u32 cnt = cur->jmp_history_cnt; struct bpf_idx_pair *p;
+ if (!is_jmp_point(env, env->insn_idx)) + return 0; + cnt++; p = krealloc(cur->jmp_history, cnt * sizeof(*p), GFP_USER); if (!p) @@ -8835,11 +8848,16 @@ static struct bpf_verifier_state_list **explored_state( return &env->explored_states[(idx ^ state->callsite) % state_htab_size(env)]; }
-static void init_explored_state(struct bpf_verifier_env *env, int idx) +static void mark_prune_point(struct bpf_verifier_env *env, int idx) { env->insn_aux_data[idx].prune_point = true; }
+static bool is_prune_point(struct bpf_verifier_env *env, int insn_idx) +{ + return env->insn_aux_data[insn_idx].prune_point; +} + /* t, w, e - match pseudo-code above: * t - index of current instruction * w - next instruction @@ -8863,9 +8881,11 @@ static int push_insn(int t, int w, int e, struct bpf_verifier_env *env, return -EINVAL; }
- if (e == BRANCH) + if (e == BRANCH) { /* mark branch target for state pruning */ - init_explored_state(env, w); + mark_prune_point(env, w); + mark_jmp_point(env, w); + }
if (insn_state[w] == 0) { /* tree-edge */ @@ -8934,10 +8954,13 @@ static int check_cfg(struct bpf_verifier_env *env) goto peek_stack; else if (ret < 0) goto err_free; - if (t + 1 < insn_cnt) - init_explored_state(env, t + 1); + if (t + 1 < insn_cnt) { + mark_prune_point(env, t + 1); + mark_jmp_point(env, t + 1); + } if (insns[t].src_reg == BPF_PSEUDO_CALL) { - init_explored_state(env, t); + mark_prune_point(env, t); + mark_jmp_point(env, t); ret = push_insn(t, t + insns[t].imm + 1, BRANCH, env, false); if (ret == 1) @@ -8961,15 +8984,19 @@ static int check_cfg(struct bpf_verifier_env *env) * but it's marked, since backtracking needs * to record jmp history in is_state_visited(). */ - init_explored_state(env, t + insns[t].off + 1); + mark_prune_point(env, t + insns[t].off + 1); + mark_jmp_point(env, t + insns[t].off + 1); /* tell verifier to check for equivalent states * after every call and jump */ - if (t + 1 < insn_cnt) - init_explored_state(env, t + 1); + if (t + 1 < insn_cnt) { + mark_prune_point(env, t + 1); + mark_jmp_point(env, t + 1); + } } else { /* conditional jump with two edges */ - init_explored_state(env, t); + mark_prune_point(env, t); + mark_jmp_point(env, t); ret = push_insn(t, t + 1, FALLTHROUGH, env, true); if (ret == 1) goto peek_stack; @@ -9898,11 +9925,11 @@ static int is_state_visited(struct bpf_verifier_env *env, int insn_idx) bool add_new_state = env->test_state_freq ? true : false;
cur->last_insn_idx = env->prev_insn_idx; - if (!env->insn_aux_data[insn_idx].prune_point) + if (!is_prune_point(env, insn_idx)) /* this 'insn_idx' instruction wasn't marked, so we will not * be doing state search here */ - return 0; + return push_jmp_history(env, cur);
/* bpf progs typically have pruning point every 4 instructions * http://vger.kernel.org/bpfconf2019.html#session-1
From: Andrii Nakryiko andrii@kernel.org
mainline inclusion from mainline-v6.5-rc1 commit 407958a0e980b9e1842ab87b5a1040521e1e24e9 category: bugfix bugzilla: https://gitee.com/src-openeuler/kernel/issues/IB2AQ3 CVE: CVE-2023-52920
Reference: https://git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git/commit/?i...
--------------------------------
Add struct backtrack_state and straightforward API around it to keep track of register and stack masks used and maintained during precision backtracking process. Having this logic separately allow to keep high-level backtracking algorithm cleaner, but also it sets us up to cleanly keep track of register and stack masks per frame, allowing (with some further logic adjustments) to perform precision backpropagation across multiple frames (i.e., subprog calls).
Signed-off-by: Andrii Nakryiko andrii@kernel.org Link: https://lore.kernel.org/r/20230505043317.3629845-4-andrii@kernel.org Signed-off-by: Alexei Starovoitov ast@kernel.org Conflicts: kernel/bpf/verifier.c include/linux/bpf_verifier.h [The conflicts were due to some minor issue] Signed-off-by: Pu Lehui pulehui@huawei.com --- include/linux/bpf_verifier.h | 14 ++ kernel/bpf/verifier.c | 249 +++++++++++++++++++++++++---------- 2 files changed, 196 insertions(+), 67 deletions(-)
diff --git a/include/linux/bpf_verifier.h b/include/linux/bpf_verifier.h index 322c0917888d..11121b16cc39 100644 --- a/include/linux/bpf_verifier.h +++ b/include/linux/bpf_verifier.h @@ -161,6 +161,10 @@ enum bpf_stack_slot_type {
#define BPF_REG_SIZE 8 /* size of eBPF register in bytes */
+#define BPF_REGMASK_ARGS ((1 << BPF_REG_1) | (1 << BPF_REG_2) | \ + (1 << BPF_REG_3) | (1 << BPF_REG_4) | \ + (1 << BPF_REG_5)) + struct bpf_stack_state { struct bpf_reg_state spilled_ptr; u8 slot_type[BPF_REG_SIZE]; @@ -415,6 +419,15 @@ struct bpf_subprog_info { bool has_ld_abs; };
+struct bpf_verifier_env; + +struct backtrack_state { + struct bpf_verifier_env *env; + u32 frame; + u32 reg_masks[MAX_CALL_FRAMES]; + u64 stack_masks[MAX_CALL_FRAMES]; +}; + /* single container for all structs * one verifier_env per bpf_check() call */ @@ -451,6 +464,7 @@ struct bpf_verifier_env { int *insn_stack; int cur_stack; } cfg; + struct backtrack_state bt; u32 pass_cnt; /* number of times do_check() was called */ u32 subprog_cnt; /* number of instructions analyzed by the verifier */ diff --git a/kernel/bpf/verifier.c b/kernel/bpf/verifier.c index caf284736281..aa13ab328aa5 100644 --- a/kernel/bpf/verifier.c +++ b/kernel/bpf/verifier.c @@ -574,6 +574,12 @@ static bool is_spilled_reg(const struct bpf_stack_state *stack) return stack->slot_type[BPF_REG_SIZE - 1] == STACK_SPILL; }
+static bool is_spilled_scalar_reg(const struct bpf_stack_state *stack) +{ + return stack->slot_type[BPF_REG_SIZE - 1] == STACK_SPILL && + stack->spilled_ptr.type == SCALAR_VALUE; +} + static void scrub_spilled_slot(u8 *stype) { if (*stype != STACK_INVALID) @@ -1873,12 +1879,128 @@ static int get_prev_insn_idx(struct bpf_verifier_state *st, int i, return i; }
+static inline void bt_init(struct backtrack_state *bt, u32 frame) +{ + bt->frame = frame; +} + +static inline void bt_reset(struct backtrack_state *bt) +{ + struct bpf_verifier_env *env = bt->env; + + memset(bt, 0, sizeof(*bt)); + bt->env = env; +} + +static inline u32 bt_empty(struct backtrack_state *bt) +{ + u64 mask = 0; + int i; + + for (i = 0; i <= bt->frame; i++) + mask |= bt->reg_masks[i] | bt->stack_masks[i]; + + return mask == 0; +} + +static inline int bt_subprog_enter(struct backtrack_state *bt) +{ + if (bt->frame == MAX_CALL_FRAMES - 1) { + verbose(bt->env, "BUG subprog enter from frame %d\n", bt->frame); + WARN_ONCE(1, "verifier backtracking bug"); + return -EFAULT; + } + bt->frame++; + return 0; +} + +static inline int bt_subprog_exit(struct backtrack_state *bt) +{ + if (bt->frame == 0) { + verbose(bt->env, "BUG subprog exit from frame 0\n"); + WARN_ONCE(1, "verifier backtracking bug"); + return -EFAULT; + } + bt->frame--; + return 0; +} + +static inline void bt_set_frame_reg(struct backtrack_state *bt, u32 frame, u32 reg) +{ + bt->reg_masks[frame] |= 1 << reg; +} + +static inline void bt_clear_frame_reg(struct backtrack_state *bt, u32 frame, u32 reg) +{ + bt->reg_masks[frame] &= ~(1 << reg); +} + +static inline void bt_set_reg(struct backtrack_state *bt, u32 reg) +{ + bt_set_frame_reg(bt, bt->frame, reg); +} + +static inline void bt_clear_reg(struct backtrack_state *bt, u32 reg) +{ + bt_clear_frame_reg(bt, bt->frame, reg); +} + +static inline void bt_set_frame_slot(struct backtrack_state *bt, u32 frame, u32 slot) +{ + bt->stack_masks[frame] |= 1ull << slot; +} + +static inline void bt_clear_frame_slot(struct backtrack_state *bt, u32 frame, u32 slot) +{ + bt->stack_masks[frame] &= ~(1ull << slot); +} + +static inline void bt_set_slot(struct backtrack_state *bt, u32 slot) +{ + bt_set_frame_slot(bt, bt->frame, slot); +} + +static inline void bt_clear_slot(struct backtrack_state *bt, u32 slot) +{ + bt_clear_frame_slot(bt, bt->frame, slot); +} + +static inline u32 bt_frame_reg_mask(struct backtrack_state *bt, u32 frame) +{ + return bt->reg_masks[frame]; +} + +static inline u32 bt_reg_mask(struct backtrack_state *bt) +{ + return bt->reg_masks[bt->frame]; +} + +static inline u64 bt_frame_stack_mask(struct backtrack_state *bt, u32 frame) +{ + return bt->stack_masks[frame]; +} + +static inline u64 bt_stack_mask(struct backtrack_state *bt) +{ + return bt->stack_masks[bt->frame]; +} + +static inline bool bt_is_reg_set(struct backtrack_state *bt, u32 reg) +{ + return bt->reg_masks[bt->frame] & (1 << reg); +} + +static inline bool bt_is_slot_set(struct backtrack_state *bt, u32 slot) +{ + return bt->stack_masks[bt->frame] & (1ull << slot); +} + /* For given verifier state backtrack_insn() is called from the last insn to * the first insn. Its purpose is to compute a bitmask of registers and * stack slots that needs precision in the parent verifier state. */ static int backtrack_insn(struct bpf_verifier_env *env, int idx, - u32 *reg_mask, u64 *stack_mask) + struct backtrack_state *bt) { const struct bpf_insn_cbs cbs = { .cb_print = verbose, @@ -1888,20 +2010,20 @@ static int backtrack_insn(struct bpf_verifier_env *env, int idx, u8 class = BPF_CLASS(insn->code); u8 opcode = BPF_OP(insn->code); u8 mode = BPF_MODE(insn->code); - u32 dreg = 1u << insn->dst_reg; - u32 sreg = 1u << insn->src_reg; + u32 dreg = insn->dst_reg; + u32 sreg = insn->src_reg; u32 spi;
if (insn->code == 0) return 0; if (env->log.level & BPF_LOG_LEVEL) { - verbose(env, "regs=%x stack=%llx before ", *reg_mask, *stack_mask); + verbose(env, "regs=%x stack=%llx before ", bt_reg_mask(bt), bt_stack_mask(bt)); verbose(env, "%d: ", idx); print_bpf_insn(&cbs, insn, env->allow_ptr_leaks); }
if (class == BPF_ALU || class == BPF_ALU64) { - if (!(*reg_mask & dreg)) + if (!bt_is_reg_set(bt, dreg)) return 0; if (opcode == BPF_END || opcode == BPF_NEG) { /* sreg is reserved and unused @@ -1914,8 +2036,8 @@ static int backtrack_insn(struct bpf_verifier_env *env, int idx, * dreg needs precision after this insn * sreg needs precision before this insn */ - *reg_mask &= ~dreg; - *reg_mask |= sreg; + bt_clear_reg(bt, dreg); + bt_set_reg(bt, sreg); } else { /* dreg = K * dreg needs precision after this insn. @@ -1923,7 +2045,7 @@ static int backtrack_insn(struct bpf_verifier_env *env, int idx, * as precise=true in this verifier state. * No further markings in parent are necessary */ - *reg_mask &= ~dreg; + bt_clear_reg(bt, dreg); } } else { if (BPF_SRC(insn->code) == BPF_X) { @@ -1931,15 +2053,15 @@ static int backtrack_insn(struct bpf_verifier_env *env, int idx, * both dreg and sreg need precision * before this insn */ - *reg_mask |= sreg; + bt_set_reg(bt, sreg); } /* else dreg += K * dreg still needs precision before this insn */ } } else if (class == BPF_LDX) { - if (!(*reg_mask & dreg)) + if (!bt_is_reg_set(bt, dreg)) return 0; - *reg_mask &= ~dreg; + bt_clear_reg(bt, dreg);
/* scalars can only be spilled into stack w/o losing precision. * Load from any other memory can be zero extended. @@ -1960,9 +2082,9 @@ static int backtrack_insn(struct bpf_verifier_env *env, int idx, WARN_ONCE(1, "verifier backtracking bug"); return -EFAULT; } - *stack_mask |= 1ull << spi; + bt_set_slot(bt, spi); } else if (class == BPF_STX || class == BPF_ST) { - if (*reg_mask & dreg) + if (bt_is_reg_set(bt, dreg)) /* stx & st shouldn't be using _scalar_ dst_reg * to access memory. It means backtracking * encountered a case of pointer subtraction. @@ -1977,29 +2099,29 @@ static int backtrack_insn(struct bpf_verifier_env *env, int idx, WARN_ONCE(1, "verifier backtracking bug"); return -EFAULT; } - if (!(*stack_mask & (1ull << spi))) + if (!bt_is_slot_set(bt, spi)) return 0; - *stack_mask &= ~(1ull << spi); + bt_clear_slot(bt, spi); if (class == BPF_STX) - *reg_mask |= sreg; + bt_set_reg(bt, sreg); } else if (class == BPF_JMP || class == BPF_JMP32) { if (opcode == BPF_CALL) { if (insn->src_reg == BPF_PSEUDO_CALL) return -ENOTSUPP; /* regular helper call sets R0 */ - *reg_mask &= ~1; - if (*reg_mask & 0x3f) { + bt_clear_reg(bt, BPF_REG_0); + if (bt_reg_mask(bt) & BPF_REGMASK_ARGS) { /* if backtracing was looking for registers R1-R5 * they should have been found already. */ - verbose(env, "BUG regs %x\n", *reg_mask); + verbose(env, "BUG regs %x\n", bt_reg_mask(bt)); WARN_ONCE(1, "verifier backtracking bug"); return -EFAULT; } } else if (opcode == BPF_EXIT) { return -ENOTSUPP; } else if (BPF_SRC(insn->code) == BPF_X) { - if (!(*reg_mask & (dreg | sreg))) + if (!bt_is_reg_set(bt, dreg) && !bt_is_reg_set(bt, sreg)) return 0; /* dreg <cond> sreg * Both dreg and sreg need precision before @@ -2007,7 +2129,8 @@ static int backtrack_insn(struct bpf_verifier_env *env, int idx, * before it would be equally necessary to * propagate it to dreg. */ - *reg_mask |= (sreg | dreg); + bt_set_reg(bt, dreg); + bt_set_reg(bt, sreg); /* else dreg <cond> K * Only dreg still needs precision before * this insn, so for the K-based conditional @@ -2015,9 +2138,9 @@ static int backtrack_insn(struct bpf_verifier_env *env, int idx, */ } } else if (class == BPF_LD) { - if (!(*reg_mask & dreg)) + if (!bt_is_reg_set(bt, dreg)) return 0; - *reg_mask &= ~dreg; + bt_clear_reg(bt, dreg); /* It's ld_imm64 or ld_abs or ld_ind. * For ld_imm64 no further tracking of precision * into parent is necessary @@ -2230,20 +2353,21 @@ static void mark_all_scalars_imprecise(struct bpf_verifier_env *env, struct bpf_ static int __mark_chain_precision(struct bpf_verifier_env *env, int frame, int regno, int spi) { + struct backtrack_state *bt = &env->bt; struct bpf_verifier_state *st = env->cur_state; int first_idx = st->first_insn_idx; int last_idx = env->insn_idx; struct bpf_func_state *func; struct bpf_reg_state *reg; - u32 reg_mask = regno >= 0 ? 1u << regno : 0; - u64 stack_mask = spi >= 0 ? 1ull << spi : 0; bool skip_first = true; - bool new_marks = false; int i, err;
if (!env->bpf_capable) return 0;
+ /* set frame number from which we are starting to backtrack */ + bt_init(bt, frame); + /* Do sanity checks against current state of register and/or stack * slot, but don't set precise flag in current state, as precision * tracking in the current state is unnecessary. @@ -2255,26 +2379,17 @@ static int __mark_chain_precision(struct bpf_verifier_env *env, int frame, int r WARN_ONCE(1, "backtracing misuse"); return -EFAULT; } - new_marks = true; + bt_set_reg(bt, regno); }
while (spi >= 0) { - if (!is_spilled_reg(&func->stack[spi])) { - stack_mask = 0; + if (!is_spilled_scalar_reg(&func->stack[spi])) break; - } - reg = &func->stack[spi].spilled_ptr; - if (reg->type != SCALAR_VALUE) { - stack_mask = 0; - break; - } - new_marks = true; + bt_set_slot(bt, spi); break; }
- if (!new_marks) - return 0; - if (!reg_mask && !stack_mask) + if (bt_empty(bt)) return 0;
for (;;) { @@ -2293,12 +2408,13 @@ static int __mark_chain_precision(struct bpf_verifier_env *env, int frame, int r if (st->curframe == 0 && st->frame[0]->subprogno > 0 && st->frame[0]->callsite == BPF_MAIN_FUNC && - stack_mask == 0 && (reg_mask & ~0x3e) == 0) { - bitmap_from_u64(mask, reg_mask); + bt_stack_mask(bt) == 0 && + (bt_reg_mask(bt) & ~BPF_REGMASK_ARGS) == 0) { + bitmap_from_u64(mask, bt_reg_mask(bt)); for_each_set_bit(i, mask, 32) { reg = &st->frame[0]->regs[i]; if (reg->type != SCALAR_VALUE) { - reg_mask &= ~(1u << i); + bt_clear_reg(bt, i); continue; } reg->precise = true; @@ -2306,8 +2422,8 @@ static int __mark_chain_precision(struct bpf_verifier_env *env, int frame, int r return 0; }
- verbose(env, "BUG backtracing func entry subprog %d reg_mask %x stack_mask %llx\n", - st->frame[0]->subprogno, reg_mask, stack_mask); + verbose(env, "BUG backtracking func entry subprog %d reg_mask %x stack_mask %llx\n", + st->frame[0]->subprogno, bt_reg_mask(bt), bt_stack_mask(bt)); WARN_ONCE(1, "verifier backtracking bug"); return -EFAULT; } @@ -2317,15 +2433,16 @@ static int __mark_chain_precision(struct bpf_verifier_env *env, int frame, int r err = 0; skip_first = false; } else { - err = backtrack_insn(env, i, ®_mask, &stack_mask); + err = backtrack_insn(env, i, bt); } if (err == -ENOTSUPP) { mark_all_scalars_precise(env, st); + bt_reset(bt); return 0; } else if (err) { return err; } - if (!reg_mask && !stack_mask) + if (bt_empty(bt)) /* Found assignment(s) into tracked register in this state. * Since this state is already marked, just return. * Nothing to be tracked further in the parent state. @@ -2350,21 +2467,21 @@ static int __mark_chain_precision(struct bpf_verifier_env *env, int frame, int r if (!st) break;
- new_marks = false; func = st->frame[frame]; - bitmap_from_u64(mask, reg_mask); + bitmap_from_u64(mask, bt_reg_mask(bt)); for_each_set_bit(i, mask, 32) { reg = &func->regs[i]; if (reg->type != SCALAR_VALUE) { - reg_mask &= ~(1u << i); + bt_clear_reg(bt, i); continue; } - if (!reg->precise) - new_marks = true; - reg->precise = true; + if (reg->precise) + bt_clear_reg(bt, i); + else + reg->precise = true; }
- bitmap_from_u64(mask, stack_mask); + bitmap_from_u64(mask, bt_stack_mask(bt)); for_each_set_bit(i, mask, 64) { if (i >= func->allocated_stack / BPF_REG_SIZE) { /* the sequence of instructions: @@ -2381,32 +2498,28 @@ static int __mark_chain_precision(struct bpf_verifier_env *env, int frame, int r * In such case fallback to conservative. */ mark_all_scalars_precise(env, st); + bt_reset(bt); return 0; }
- if (!is_spilled_reg(&func->stack[i])) { - stack_mask &= ~(1ull << i); + if (!is_spilled_scalar_reg(&func->stack[i])) { + bt_clear_slot(bt, i); continue; } reg = &func->stack[i].spilled_ptr; - if (reg->type != SCALAR_VALUE) { - stack_mask &= ~(1ull << i); - continue; - } - if (!reg->precise) - new_marks = true; - reg->precise = true; + if (reg->precise) + bt_clear_slot(bt, i); + else + reg->precise = true; } if (env->log.level & BPF_LOG_LEVEL) { print_verifier_state(env, func); verbose(env, "parent %s regs=%x stack=%llx marks\n", - new_marks ? "didn't have" : "already had", - reg_mask, stack_mask); + !bt_empty(bt) ? "didn't have" : "already had", + bt_reg_mask(bt), bt_stack_mask(bt)); }
- if (!reg_mask && !stack_mask) - break; - if (!new_marks) + if (bt_empty(bt)) break;
last_idx = st->last_insn_idx; @@ -12755,6 +12868,8 @@ int bpf_check(struct bpf_prog **prog, union bpf_attr *attr, return -ENOMEM; log = &env->log;
+ env->bt.env = env; + len = (*prog)->len; env->insn_aux_data = vzalloc(array_size(sizeof(struct bpf_insn_aux_data), len));
From: Andrii Nakryiko andrii@kernel.org
mainline inclusion from mainline-v6.2-rc1 commit a095f421057e22853022cb644b1589d0dd0e756e category: bugfix bugzilla: https://gitee.com/src-openeuler/kernel/issues/IB2AQ3 CVE: CVE-2023-52920
Reference: https://git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git/commit/?i...
--------------------------------
Jump history updating and state equivalence checks are conceptually independent, so move push_jmp_history() out of is_state_visited(). Also make a decision whether to perform state equivalence checks or not one layer higher in do_check(), keeping is_state_visited() unconditionally performing state checks.
push_jmp_history() should be performed after state checks. There is just one small non-uniformity. When is_state_visited() finds already validated equivalent state, it propagates precision marks to current state's parent chain. For this to work correctly, jump history has to be updated, so is_state_visited() is doing that internally.
But if no equivalent verified state is found, jump history has to be updated in a newly cloned child state, so is_jmp_point() + push_jmp_history() is performed after is_state_visited() exited with zero result, which means "proceed with validation".
This change has no functional changes. It's not strictly necessary, but feels right to decouple these two processes.
Acked-by: John Fastabend john.fastabend@gmail.com Signed-off-by: Andrii Nakryiko andrii@kernel.org Link: https://lore.kernel.org/r/20221206233345.438540-3-andrii@kernel.org Signed-off-by: Alexei Starovoitov ast@kernel.org Signed-off-by: Pu Lehui pulehui@huawei.com --- kernel/bpf/verifier.c | 49 +++++++++++++++++++++++-------------------- 1 file changed, 26 insertions(+), 23 deletions(-)
diff --git a/kernel/bpf/verifier.c b/kernel/bpf/verifier.c index aa13ab328aa5..de4a3cb0df46 100644 --- a/kernel/bpf/verifier.c +++ b/kernel/bpf/verifier.c @@ -10037,13 +10037,6 @@ static int is_state_visited(struct bpf_verifier_env *env, int insn_idx) int i, j, err, states_cnt = 0; bool add_new_state = env->test_state_freq ? true : false;
- cur->last_insn_idx = env->prev_insn_idx; - if (!is_prune_point(env, insn_idx)) - /* this 'insn_idx' instruction wasn't marked, so we will not - * be doing state search here - */ - return push_jmp_history(env, cur); - /* bpf progs typically have pruning point every 4 instructions * http://vger.kernel.org/bpfconf2019.html#session-1 * Do not add new state for future pruning if the verifier hasn't seen @@ -10162,10 +10155,10 @@ static int is_state_visited(struct bpf_verifier_env *env, int insn_idx) env->max_states_per_insn = states_cnt;
if (!env->bpf_capable && states_cnt > BPF_COMPLEXITY_LIMIT_STATES) - return push_jmp_history(env, cur); + return 0;
if (!add_new_state) - return push_jmp_history(env, cur); + return 0;
/* There were no equivalent states, remember the current one. * Technically the current state is not proven to be safe yet, @@ -10305,21 +10298,31 @@ static int do_check(struct bpf_verifier_env *env) return -E2BIG; }
- err = is_state_visited(env, env->insn_idx); - if (err < 0) - return err; - if (err == 1) { - /* found equivalent state, can prune the search */ - if (env->log.level & BPF_LOG_LEVEL) { - if (do_print_state) - verbose(env, "\nfrom %d to %d%s: safe\n", - env->prev_insn_idx, env->insn_idx, - env->cur_state->speculative ? - " (speculative execution)" : ""); - else - verbose(env, "%d: safe\n", env->insn_idx); + state->last_insn_idx = env->prev_insn_idx; + + if (is_prune_point(env, env->insn_idx)) { + err = is_state_visited(env, env->insn_idx); + if (err < 0) + return err; + if (err == 1) { + /* found equivalent state, can prune the search */ + if (env->log.level & BPF_LOG_LEVEL) { + if (do_print_state) + verbose(env, "\nfrom %d to %d%s: safe\n", + env->prev_insn_idx, env->insn_idx, + env->cur_state->speculative ? + " (speculative execution)" : ""); + else + verbose(env, "%d: safe\n", env->insn_idx); + } + goto process_bpf_exit; } - goto process_bpf_exit; + } + + if (is_jmp_point(env, env->insn_idx)) { + err = push_jmp_history(env, state); + if (err) + return err; }
if (signal_pending(current))
From: Andrii Nakryiko andrii@kernel.org
mainline inclusion from mainline-v6.8-rc1 commit 41f6f64e6999a837048b1bd13a2f8742964eca6b category: bugfix bugzilla: https://gitee.com/src-openeuler/kernel/issues/IB2AQ3 CVE: CVE-2023-52920
Reference: https://git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git/commit/?i...
--------------------------------
Use instruction (jump) history to record instructions that performed register spill/fill to/from stack, regardless if this was done through read-only r10 register, or any other register after copying r10 into it *and* potentially adjusting offset.
To make this work reliably, we push extra per-instruction flags into instruction history, encoding stack slot index (spi) and stack frame number in extra 10 bit flags we take away from prev_idx in instruction history. We don't touch idx field for maximum performance, as it's checked most frequently during backtracking.
This change removes basically the last remaining practical limitation of precision backtracking logic in BPF verifier. It fixes known deficiencies, but also opens up new opportunities to reduce number of verified states, explored in the subsequent patches.
There are only three differences in selftests' BPF object files according to veristat, all in the positive direction (less states).
File Program Insns (A) Insns (B) Insns (DIFF) States (A) States (B) States (DIFF) -------------------------------------- ------------- --------- --------- ------------- ---------- ---------- ------------- test_cls_redirect_dynptr.bpf.linked3.o cls_redirect 2987 2864 -123 (-4.12%) 240 231 -9 (-3.75%) xdp_synproxy_kern.bpf.linked3.o syncookie_tc 82848 82661 -187 (-0.23%) 5107 5073 -34 (-0.67%) xdp_synproxy_kern.bpf.linked3.o syncookie_xdp 85116 84964 -152 (-0.18%) 5162 5130 -32 (-0.62%)
Note, I avoided renaming jmp_history to more generic insn_hist to minimize number of lines changed and potential merge conflicts between bpf and bpf-next trees.
Notice also cur_hist_entry pointer reset to NULL at the beginning of instruction verification loop. This pointer avoids the problem of relying on last jump history entry's insn_idx to determine whether we already have entry for current instruction or not. It can happen that we added jump history entry because current instruction is_jmp_point(), but also we need to add instruction flags for stack access. In this case, we don't want to entries, so we need to reuse last added entry, if it is present.
Relying on insn_idx comparison has the same ambiguity problem as the one that was fixed recently in [0], so we avoid that.
[0] https://patchwork.kernel.org/project/netdevbpf/patch/20231110002638.4168352-...
Acked-by: Eduard Zingerman eddyz87@gmail.com Reported-by: Tao Lyu tao.lyu@epfl.ch Signed-off-by: Andrii Nakryiko andrii@kernel.org Link: https://lore.kernel.org/r/20231205184248.1502704-2-andrii@kernel.org Signed-off-by: Alexei Starovoitov ast@kernel.org Conflicts: kernel/bpf/verifier.c include/linux/bpf_verifier.h tools/testing/selftests/bpf/verifier/precise.c tools/testing/selftests/bpf/progs/verifier_subprog_precision.c [The conflicts were due to not merge commit fde2a3882bd0] Signed-off-by: Pu Lehui pulehui@huawei.com --- include/linux/bpf_verifier.h | 31 +++- kernel/bpf/verifier.c | 154 ++++++++++-------- .../testing/selftests/bpf/verifier/precise.c | 29 ++-- 3 files changed, 135 insertions(+), 79 deletions(-)
diff --git a/include/linux/bpf_verifier.h b/include/linux/bpf_verifier.h index 11121b16cc39..c300b2816e07 100644 --- a/include/linux/bpf_verifier.h +++ b/include/linux/bpf_verifier.h @@ -210,9 +210,32 @@ struct bpf_func_state { struct bpf_stack_state *stack; };
-struct bpf_idx_pair { - u32 prev_idx; +#define MAX_CALL_FRAMES 8 + +/* instruction history flags, used in bpf_jmp_history_entry.flags field */ +enum { + /* instruction references stack slot through PTR_TO_STACK register; + * we also store stack's frame number in lower 3 bits (MAX_CALL_FRAMES is 8) + * and accessed stack slot's index in next 6 bits (MAX_BPF_STACK is 512, + * 8 bytes per slot, so slot index (spi) is [0, 63]) + */ + INSN_F_FRAMENO_MASK = 0x7, /* 3 bits */ + + INSN_F_SPI_MASK = 0x3f, /* 6 bits */ + INSN_F_SPI_SHIFT = 3, /* shifted 3 bits to the left */ + + INSN_F_STACK_ACCESS = BIT(9), /* we need 10 bits total */ +}; + +static_assert(INSN_F_FRAMENO_MASK + 1 >= MAX_CALL_FRAMES); +static_assert(INSN_F_SPI_MASK + 1 >= MAX_BPF_STACK / 8); + +struct bpf_jmp_history_entry { u32 idx; + /* insn idx can't be bigger than 1 million */ + u32 prev_idx : 22; + /* special flags, e.g., whether insn is doing register stack spill/load */ + u32 flags : 10; };
struct bpf_id_pair { @@ -222,7 +245,6 @@ struct bpf_id_pair {
/* Maximum number of register states that can exist at once */ #define BPF_ID_MAP_SIZE (MAX_BPF_REG + MAX_BPF_STACK / BPF_REG_SIZE) -#define MAX_CALL_FRAMES 8 struct bpf_verifier_state { /* call stack tracking */ struct bpf_func_state *frame[MAX_CALL_FRAMES]; @@ -286,7 +308,7 @@ struct bpf_verifier_state { * For most states jmp_history_cnt is [0-3]. * For loops can go up to ~40. */ - struct bpf_idx_pair *jmp_history; + struct bpf_jmp_history_entry *jmp_history; u32 jmp_history_cnt; };
@@ -465,6 +487,7 @@ struct bpf_verifier_env { int cur_stack; } cfg; struct backtrack_state bt; + struct bpf_jmp_history_entry *cur_hist_ent; u32 pass_cnt; /* number of times do_check() was called */ u32 subprog_cnt; /* number of instructions analyzed by the verifier */ diff --git a/kernel/bpf/verifier.c b/kernel/bpf/verifier.c index de4a3cb0df46..8aa160ca04c4 100644 --- a/kernel/bpf/verifier.c +++ b/kernel/bpf/verifier.c @@ -920,8 +920,8 @@ static int copy_verifier_state(struct bpf_verifier_state *dst_state, int i, err;
dst_state->jmp_history = copy_array(dst_state->jmp_history, src->jmp_history, - src->jmp_history_cnt, sizeof(struct bpf_idx_pair), - GFP_USER); + src->jmp_history_cnt, sizeof(*dst_state->jmp_history), + GFP_USER); if (!dst_state->jmp_history) return -ENOMEM; dst_state->jmp_history_cnt = src->jmp_history_cnt; @@ -1831,6 +1831,21 @@ static int check_reg_arg(struct bpf_verifier_env *env, u32 regno, return 0; }
+static int insn_stack_access_flags(int frameno, int spi) +{ + return INSN_F_STACK_ACCESS | (spi << INSN_F_SPI_SHIFT) | frameno; +} + +static int insn_stack_access_spi(int insn_flags) +{ + return (insn_flags >> INSN_F_SPI_SHIFT) & INSN_F_SPI_MASK; +} + +static int insn_stack_access_frameno(int insn_flags) +{ + return insn_flags & INSN_F_FRAMENO_MASK; +} + static void mark_jmp_point(struct bpf_verifier_env *env, int idx) { env->insn_aux_data[idx].jmp_point = true; @@ -1842,26 +1857,49 @@ static bool is_jmp_point(struct bpf_verifier_env *env, int insn_idx) }
/* for any branch, call, exit record the history of jmps in the given state */ -static int push_jmp_history(struct bpf_verifier_env *env, - struct bpf_verifier_state *cur) +static int push_jmp_history(struct bpf_verifier_env *env, struct bpf_verifier_state *cur, + int insn_flags) { u32 cnt = cur->jmp_history_cnt; - struct bpf_idx_pair *p; + struct bpf_jmp_history_entry *p;
- if (!is_jmp_point(env, env->insn_idx)) + /* combine instruction flags if we already recorded this instruction */ + if (env->cur_hist_ent) { + /* atomic instructions push insn_flags twice, for READ and + * WRITE sides, but they should agree on stack slot + */ + WARN_ONCE((env->cur_hist_ent->flags & insn_flags) && + (env->cur_hist_ent->flags & insn_flags) != insn_flags, + "verifier insn history bug: insn_idx %d cur flags %x new flags %x\n", + env->insn_idx, env->cur_hist_ent->flags, insn_flags); + env->cur_hist_ent->flags |= insn_flags; return 0; + }
cnt++; p = krealloc(cur->jmp_history, cnt * sizeof(*p), GFP_USER); if (!p) return -ENOMEM; - p[cnt - 1].idx = env->insn_idx; - p[cnt - 1].prev_idx = env->prev_insn_idx; cur->jmp_history = p; + + p = &cur->jmp_history[cnt - 1]; + p->idx = env->insn_idx; + p->prev_idx = env->prev_insn_idx; + p->flags = insn_flags; cur->jmp_history_cnt = cnt; + env->cur_hist_ent = p; + return 0; }
+static struct bpf_jmp_history_entry *get_jmp_hist_entry(struct bpf_verifier_state *st, + u32 hist_end, int insn_idx) +{ + if (hist_end > 0 && st->jmp_history[hist_end - 1].idx == insn_idx) + return &st->jmp_history[hist_end - 1]; + return NULL; +} + /* Backtrack one insn at a time. If idx is not at the top of recorded * history then previous instruction came from straight line execution. */ @@ -1990,9 +2028,14 @@ static inline bool bt_is_reg_set(struct backtrack_state *bt, u32 reg) return bt->reg_masks[bt->frame] & (1 << reg); }
+static inline bool bt_is_frame_slot_set(struct backtrack_state *bt, u32 frame, u32 slot) +{ + return bt->stack_masks[frame] & (1ull << slot); +} + static inline bool bt_is_slot_set(struct backtrack_state *bt, u32 slot) { - return bt->stack_masks[bt->frame] & (1ull << slot); + return bt_is_frame_slot_set(bt, bt->frame, slot); }
/* For given verifier state backtrack_insn() is called from the last insn to @@ -2000,7 +2043,7 @@ static inline bool bt_is_slot_set(struct backtrack_state *bt, u32 slot) * stack slots that needs precision in the parent verifier state. */ static int backtrack_insn(struct bpf_verifier_env *env, int idx, - struct backtrack_state *bt) + struct bpf_jmp_history_entry *hist, struct backtrack_state *bt) { const struct bpf_insn_cbs cbs = { .cb_print = verbose, @@ -2012,7 +2055,7 @@ static int backtrack_insn(struct bpf_verifier_env *env, int idx, u8 mode = BPF_MODE(insn->code); u32 dreg = insn->dst_reg; u32 sreg = insn->src_reg; - u32 spi; + u32 spi, fr;
if (insn->code == 0) return 0; @@ -2069,20 +2112,16 @@ static int backtrack_insn(struct bpf_verifier_env *env, int idx, * by 'precise' mark in corresponding register of this state. * No further tracking necessary. */ - if (insn->src_reg != BPF_REG_FP) + if (!hist || !(hist->flags & INSN_F_STACK_ACCESS)) return 0;
/* dreg = *(u64 *)[fp - off] was a fill from the stack. * that [fp - off] slot contains scalar that needs to be * tracked with precision */ - spi = (-insn->off - 1) / BPF_REG_SIZE; - if (spi >= 64) { - verbose(env, "BUG spi %d\n", spi); - WARN_ONCE(1, "verifier backtracking bug"); - return -EFAULT; - } - bt_set_slot(bt, spi); + spi = insn_stack_access_spi(hist->flags); + fr = insn_stack_access_frameno(hist->flags); + bt_set_frame_slot(bt, fr, spi); } else if (class == BPF_STX || class == BPF_ST) { if (bt_is_reg_set(bt, dreg)) /* stx & st shouldn't be using _scalar_ dst_reg @@ -2091,17 +2130,13 @@ static int backtrack_insn(struct bpf_verifier_env *env, int idx, */ return -ENOTSUPP; /* scalars can only be spilled into stack */ - if (insn->dst_reg != BPF_REG_FP) + if (!hist || !(hist->flags & INSN_F_STACK_ACCESS)) return 0; - spi = (-insn->off - 1) / BPF_REG_SIZE; - if (spi >= 64) { - verbose(env, "BUG spi %d\n", spi); - WARN_ONCE(1, "verifier backtracking bug"); - return -EFAULT; - } - if (!bt_is_slot_set(bt, spi)) + spi = insn_stack_access_spi(hist->flags); + fr = insn_stack_access_frameno(hist->flags); + if (!bt_is_frame_slot_set(bt, fr, spi)) return 0; - bt_clear_slot(bt, spi); + bt_clear_frame_slot(bt, fr, spi); if (class == BPF_STX) bt_set_reg(bt, sreg); } else if (class == BPF_JMP || class == BPF_JMP32) { @@ -2395,6 +2430,7 @@ static int __mark_chain_precision(struct bpf_verifier_env *env, int frame, int r for (;;) { DECLARE_BITMAP(mask, 64); u32 history = st->jmp_history_cnt; + struct bpf_jmp_history_entry *hist;
if (env->log.level & BPF_LOG_LEVEL) verbose(env, "last_idx %d first_idx %d\n", last_idx, first_idx); @@ -2433,7 +2469,8 @@ static int __mark_chain_precision(struct bpf_verifier_env *env, int frame, int r err = 0; skip_first = false; } else { - err = backtrack_insn(env, i, bt); + hist = get_jmp_hist_entry(st, history, i); + err = backtrack_insn(env, i, hist, bt); } if (err == -ENOTSUPP) { mark_all_scalars_precise(env, st); @@ -2484,22 +2521,10 @@ static int __mark_chain_precision(struct bpf_verifier_env *env, int frame, int r bitmap_from_u64(mask, bt_stack_mask(bt)); for_each_set_bit(i, mask, 64) { if (i >= func->allocated_stack / BPF_REG_SIZE) { - /* the sequence of instructions: - * 2: (bf) r3 = r10 - * 3: (7b) *(u64 *)(r3 -8) = r0 - * 4: (79) r4 = *(u64 *)(r10 -8) - * doesn't contain jmps. It's backtracked - * as a single block. - * During backtracking insn 3 is not recognized as - * stack access, so at the end of backtracking - * stack slot fp-8 is still marked in stack_mask. - * However the parent state may not have accessed - * fp-8 and it's "unallocated" stack space. - * In such case fallback to conservative. - */ - mark_all_scalars_precise(env, st); - bt_reset(bt); - return 0; + verbose(env, "BUG backtracking (stack slot %d, total slots %d)\n", + i, func->allocated_stack / BPF_REG_SIZE); + WARN_ONCE(1, "verifier backtracking bug (stack slot out of bounds)"); + return -EFAULT; }
if (!is_spilled_scalar_reg(&func->stack[i])) { @@ -2649,7 +2674,7 @@ static int check_stack_write_fixed_off(struct bpf_verifier_env *env, int i, slot = -off - 1, spi = slot / BPF_REG_SIZE, err; struct bpf_insn *insn = &env->prog->insnsi[insn_idx]; struct bpf_reg_state *reg = NULL; - u32 dst_reg = insn->dst_reg; + int insn_flags = insn_stack_access_flags(state->frameno, spi);
/* caller checked that off % size == 0 and -MAX_BPF_STACK <= off < 0, * so it's aligned access and [off, off + size) are within stack limits @@ -2682,17 +2707,6 @@ static int check_stack_write_fixed_off(struct bpf_verifier_env *env,
if (reg && !(off % BPF_REG_SIZE) && register_is_bounded(reg) && !register_is_null(reg) && env->bpf_capable) { - if (dst_reg != BPF_REG_FP) { - /* The backtracking logic can only recognize explicit - * stack slot address like [fp - 8]. Other spill of - * scalar via different register has to be conervative. - * Backtrack from here and mark all registers as precise - * that contributed into 'reg' being a constant. - */ - err = mark_chain_precision(env, value_regno); - if (err) - return err; - } save_register_state(state, spi, reg, size); /* Break the relation on a narrowing spill. */ if (fls64(reg->umax_value) > BITS_PER_BYTE * size) @@ -2704,6 +2718,7 @@ static int check_stack_write_fixed_off(struct bpf_verifier_env *env, __mark_reg_known(&fake_reg, insn->imm); fake_reg.type = SCALAR_VALUE; save_register_state(state, spi, &fake_reg, size); + insn_flags = 0; /* not a register spill */ } else if (reg && is_spillable_regtype(reg->type)) { /* register containing pointer is being spilled into stack */ if (size != BPF_REG_SIZE) { @@ -2749,9 +2764,12 @@ static int check_stack_write_fixed_off(struct bpf_verifier_env *env,
/* Mark slots affected by this stack write. */ for (i = 0; i < size; i++) - state->stack[spi].slot_type[(slot - i) % BPF_REG_SIZE] = - type; + state->stack[spi].slot_type[(slot - i) % BPF_REG_SIZE] = type; + insn_flags = 0; /* not a register spill */ } + + if (insn_flags) + return push_jmp_history(env, env->cur_state, insn_flags); return 0; }
@@ -2927,6 +2945,7 @@ static int check_stack_read_fixed_off(struct bpf_verifier_env *env, int i, slot = -off - 1, spi = slot / BPF_REG_SIZE; struct bpf_reg_state *reg; u8 *stype, type; + int insn_flags = insn_stack_access_flags(reg_state->frameno, spi);
stype = reg_state->stack[spi].slot_type; reg = ®_state->stack[spi].spilled_ptr; @@ -2970,12 +2989,10 @@ static int check_stack_read_fixed_off(struct bpf_verifier_env *env, return -EACCES; } mark_reg_unknown(env, state->regs, dst_regno); + insn_flags = 0; /* not restoring original register state */ } state->regs[dst_regno].live |= REG_LIVE_WRITTEN; - return 0; - } - - if (dst_regno >= 0) { + } else if (dst_regno >= 0) { /* restore register state from stack */ copy_register_state(&state->regs[dst_regno], reg); /* mark reg as written since spilled pointer state likely @@ -3011,7 +3028,10 @@ static int check_stack_read_fixed_off(struct bpf_verifier_env *env, mark_reg_read(env, reg, reg->parent, REG_LIVE_READ64); if (dst_regno >= 0) mark_reg_stack_read(env, reg_state, off, off + size, dst_regno); + insn_flags = 0; /* we are not restoring spilled register */ } + if (insn_flags) + return push_jmp_history(env, env->cur_state, insn_flags); return 0; }
@@ -10101,7 +10121,8 @@ static int is_state_visited(struct bpf_verifier_env *env, int insn_idx) * the precision needs to be propagated back in * the current state. */ - err = err ? : push_jmp_history(env, cur); + if (is_jmp_point(env, env->insn_idx)) + err = err ? : push_jmp_history(env, cur, 0); err = err ? : propagate_precision(env, &sl->state); if (err) return err; @@ -10281,6 +10302,9 @@ static int do_check(struct bpf_verifier_env *env) u8 class; int err;
+ /* reset current history entry on each new instruction */ + env->cur_hist_ent = NULL; + env->prev_insn_idx = prev_insn_idx; if (env->insn_idx >= insn_cnt) { verbose(env, "invalid insn idx %d insn_cnt %d\n", @@ -10320,7 +10344,7 @@ static int do_check(struct bpf_verifier_env *env) }
if (is_jmp_point(env, env->insn_idx)) { - err = push_jmp_history(env, state); + err = push_jmp_history(env, state, 0); if (err) return err; } diff --git a/tools/testing/selftests/bpf/verifier/precise.c b/tools/testing/selftests/bpf/verifier/precise.c index 6dc8003ffc70..29ecf683bb0f 100644 --- a/tools/testing/selftests/bpf/verifier/precise.c +++ b/tools/testing/selftests/bpf/verifier/precise.c @@ -141,10 +141,11 @@ .result = REJECT, }, { - "precise: ST insn causing spi > allocated_stack", + "precise: ST zero to stack insn is supported", .insns = { BPF_MOV64_REG(BPF_REG_3, BPF_REG_10), BPF_JMP_IMM(BPF_JNE, BPF_REG_3, 123, 0), + /* not a register spill, so we stop precision propagation for R4 here */ BPF_ST_MEM(BPF_DW, BPF_REG_3, -8, 0), BPF_LDX_MEM(BPF_DW, BPF_REG_4, BPF_REG_10, -8), BPF_MOV64_IMM(BPF_REG_0, -1), @@ -159,9 +160,10 @@ last_idx 4 first_idx 2\ regs=10 stack=0 before 4\ regs=10 stack=0 before 3\ - regs=0 stack=1 before 2\ last_idx 5 first_idx 5\ - parent didn't have regs=1 stack=0 marks", + parent didn't have regs=1 stack=0 marks\ + last_idx 4 first_idx 2\ + regs=1 stack=0 before 4", .result = VERBOSE_ACCEPT, .retval = -1, }, @@ -169,6 +171,8 @@ "precise: STX insn causing spi > allocated_stack", .insns = { BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_get_prandom_u32), + /* make later reg spill more interesting by having somewhat known scalar */ + BPF_ALU64_IMM(BPF_AND, BPF_REG_0, 0xff), BPF_MOV64_REG(BPF_REG_3, BPF_REG_10), BPF_JMP_IMM(BPF_JNE, BPF_REG_3, 123, 0), BPF_STX_MEM(BPF_DW, BPF_REG_3, BPF_REG_0, -8), @@ -179,16 +183,21 @@ }, .prog_type = BPF_PROG_TYPE_XDP, .flags = BPF_F_TEST_STATE_FREQ, - .errstr = "last_idx 6 first_idx 6\ + .errstr = "last_idx 7 first_idx 7\ parent didn't have regs=10 stack=0 marks\ - last_idx 5 first_idx 3\ + last_idx 6 first_idx 4\ + regs=10 stack=0 before 6\ regs=10 stack=0 before 5\ - regs=10 stack=0 before 4\ - regs=0 stack=1 before 3\ - last_idx 6 first_idx 6\ + regs=0 stack=1 before 4\ + parent didn't have regs=1 stack=0 marks\ + last_idx 3 first_idx 3\ + regs=1 stack=0 before 3\ + regs=1 stack=0 before 2\ + regs=1 stack=0 before 1\ parent didn't have regs=1 stack=0 marks\ - last_idx 5 first_idx 3\ - regs=1 stack=0 before 5", + last_idx 0 first_idx 0\ + regs=1 stack=0 before 0\ + last_idx 7 first_idx 7", .result = VERBOSE_ACCEPT, .retval = -1, },
From: Andrii Nakryiko andrii@kernel.org
mainline inclusion from mainline-v6.8-rc1 commit 482d548d40b0af9af730e4869903d4433e44f014 category: bugfix bugzilla: https://gitee.com/src-openeuler/kernel/issues/IB2AQ3 CVE: CVE-2023-52920
Reference: https://git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git/commit/?i...
--------------------------------
When verifier validates BPF_ST_MEM instruction that stores known constant to stack (e.g., *(u64 *)(r10 - 8) = 123), it effectively spills a fake register with a constant (but initially imprecise) value to a stack slot. Because read-side logic treats it as a proper register fill from stack slot, we need to mark such stack slot initialization as INSN_F_STACK_ACCESS instruction to stop precision backtracking from missing it.
Fixes: 41f6f64e6999 ("bpf: support non-r10 register spill/fill to/from stack in precision tracking") Signed-off-by: Andrii Nakryiko andrii@kernel.org Acked-by: Eduard Zingerman eddyz87@gmail.com Link: https://lore.kernel.org/r/20231209010958.66758-1-andrii@kernel.org Signed-off-by: Alexei Starovoitov ast@kernel.org Conflicts: kernel/bpf/verifier.c [The conflicts were due to some minor issue] Signed-off-by: Pu Lehui pulehui@huawei.com --- kernel/bpf/verifier.c | 1 - 1 file changed, 1 deletion(-)
diff --git a/kernel/bpf/verifier.c b/kernel/bpf/verifier.c index 8aa160ca04c4..9eafed9d61f8 100644 --- a/kernel/bpf/verifier.c +++ b/kernel/bpf/verifier.c @@ -2718,7 +2718,6 @@ static int check_stack_write_fixed_off(struct bpf_verifier_env *env, __mark_reg_known(&fake_reg, insn->imm); fake_reg.type = SCALAR_VALUE; save_register_state(state, spi, &fake_reg, size); - insn_flags = 0; /* not a register spill */ } else if (reg && is_spillable_regtype(reg->type)) { /* register containing pointer is being spilled into stack */ if (size != BPF_REG_SIZE) {
反馈: 您发送到kernel@openeuler.org的补丁/补丁集,已成功转换为PR! PR链接地址: https://gitee.com/openeuler/kernel/pulls/13277 邮件列表地址:https://mailweb.openeuler.org/hyperkitty/list/kernel@openeuler.org/message/4...
FeedBack: The patch(es) which you have sent to kernel@openeuler.org mailing list has been converted to a pull request successfully! Pull request link: https://gitee.com/openeuler/kernel/pulls/13277 Mailing list address: https://mailweb.openeuler.org/hyperkitty/list/kernel@openeuler.org/message/4...