From: He Fengqing hefengqing@huawei.com
hulk inclusion category: bugfix bugzilla: NA CVE: CVE-2021-3444
-------------------------------------------------
This reverts commit 4b54e3f885101f833c4eaed6ef06e5083f96d346.
Signed-off-by: He Fengqing hefengqing@huawei.com Reviewed-by: Kuohai Xu xukuohai@huawei.com Signed-off-by: Yang Yingliang yangyingliang@huawei.com --- kernel/bpf/core.c | 3 +- kernel/bpf/verifier.c | 202 +++++++----------------------------------- 2 files changed, 35 insertions(+), 170 deletions(-)
diff --git a/kernel/bpf/core.c b/kernel/bpf/core.c index c5da0daa1bd20..18bf3b6c61a30 100644 --- a/kernel/bpf/core.c +++ b/kernel/bpf/core.c @@ -274,8 +274,7 @@ static int bpf_adj_branches(struct bpf_prog *prog, u32 pos, u32 delta, insn++; } code = insn->code; - if ((BPF_CLASS(code) != BPF_JMP && - BPF_CLASS(code) != BPF_JMP32) || + if (BPF_CLASS(code) != BPF_JMP || BPF_OP(code) == BPF_EXIT) continue; /* Adjust offset of jmps if we cross patch boundaries. */ diff --git a/kernel/bpf/verifier.c b/kernel/bpf/verifier.c index 2af5fa1d4c3a8..8ae00ab1307c2 100644 --- a/kernel/bpf/verifier.c +++ b/kernel/bpf/verifier.c @@ -855,7 +855,7 @@ static int check_subprogs(struct bpf_verifier_env *env) for (i = 0; i < insn_cnt; i++) { u8 code = insn[i].code;
- if (BPF_CLASS(code) != BPF_JMP && BPF_CLASS(code) != BPF_JMP32) + if (BPF_CLASS(code) != BPF_JMP) goto next; if (BPF_OP(code) == BPF_EXIT || BPF_OP(code) == BPF_CALL) goto next; @@ -3814,49 +3814,14 @@ static void find_good_pkt_pointers(struct bpf_verifier_state *vstate, * 0 - branch will not be taken and fall-through to next insn * -1 - unknown. Example: "if (reg < 5)" is unknown when register value range [0,10] */ -static int is_branch_taken(struct bpf_reg_state *reg, u64 val, u8 opcode, - bool is_jmp32) +static int is_branch_taken(struct bpf_reg_state *reg, u64 val, u8 opcode) { - struct bpf_reg_state reg_lo; s64 sval;
if (__is_pointer_value(false, reg)) return -1;
- if (is_jmp32) { - reg_lo = *reg; - reg = ®_lo; - /* For JMP32, only low 32 bits are compared, coerce_reg_to_size - * could truncate high bits and update umin/umax according to - * information of low bits. - */ - coerce_reg_to_size(reg, 4); - /* smin/smax need special handling. For example, after coerce, - * if smin_value is 0x00000000ffffffffLL, the value is -1 when - * used as operand to JMP32. It is a negative number from s32's - * point of view, while it is a positive number when seen as - * s64. The smin/smax are kept as s64, therefore, when used with - * JMP32, they need to be transformed into s32, then sign - * extended back to s64. - * - * Also, smin/smax were copied from umin/umax. If umin/umax has - * different sign bit, then min/max relationship doesn't - * maintain after casting into s32, for this case, set smin/smax - * to safest range. - */ - if ((reg->umax_value ^ reg->umin_value) & - (1ULL << 31)) { - reg->smin_value = S32_MIN; - reg->smax_value = S32_MAX; - } - reg->smin_value = (s64)(s32)reg->smin_value; - reg->smax_value = (s64)(s32)reg->smax_value; - - val = (u32)val; - sval = (s64)(s32)val; - } else { - sval = (s64)val; - } + sval = (s64)val;
switch (opcode) { case BPF_JEQ: @@ -3920,29 +3885,6 @@ static int is_branch_taken(struct bpf_reg_state *reg, u64 val, u8 opcode, return -1; }
-/* Generate min value of the high 32-bit from TNUM info. */ -static u64 gen_hi_min(struct tnum var) -{ - return var.value & ~0xffffffffULL; -} - -/* Generate max value of the high 32-bit from TNUM info. */ -static u64 gen_hi_max(struct tnum var) -{ - return (var.value | var.mask) & ~0xffffffffULL; -} - -/* Return true if VAL is compared with a s64 sign extended from s32, and they - * are with the same signedness. - */ -static bool cmp_val_with_extended_s64(s64 sval, struct bpf_reg_state *reg) -{ - return ((s32)sval >= 0 && - reg->smin_value >= 0 && reg->smax_value <= S32_MAX) || - ((s32)sval < 0 && - reg->smax_value <= 0 && reg->smin_value >= S32_MIN); -} - /* Adjusts the register min/max values in the case that the dst_reg is the * variable register that we are working on, and src_reg is a constant or we're * simply doing a BPF_K check. @@ -3950,7 +3892,7 @@ static bool cmp_val_with_extended_s64(s64 sval, struct bpf_reg_state *reg) */ static void reg_set_min_max(struct bpf_reg_state *true_reg, struct bpf_reg_state *false_reg, u64 val, - u8 opcode, bool is_jmp32) + u8 opcode) { s64 sval;
@@ -3962,8 +3904,8 @@ static void reg_set_min_max(struct bpf_reg_state *true_reg, */ if (__is_pointer_value(false, false_reg)) return; - val = is_jmp32 ? (u32)val : val; - sval = is_jmp32 ? (s64)(s32)val : (s64)val; + + sval = (s64)val;
switch (opcode) { case BPF_JEQ: @@ -3975,15 +3917,7 @@ static void reg_set_min_max(struct bpf_reg_state *true_reg, * if it is true we know the value for sure. Likewise for * BPF_JNE. */ - if (is_jmp32) { - u64 old_v = reg->var_off.value; - u64 hi_mask = ~0xffffffffULL; - - reg->var_off.value = (old_v & hi_mask) | val; - reg->var_off.mask &= hi_mask; - } else { - __mark_reg_known(reg, val); - } + __mark_reg_known(reg, val); break; } case BPF_JGE: @@ -3992,10 +3926,6 @@ static void reg_set_min_max(struct bpf_reg_state *true_reg, u64 false_umax = opcode == BPF_JGT ? val : val - 1; u64 true_umin = opcode == BPF_JGT ? val + 1 : val;
- if (is_jmp32) { - false_umax += gen_hi_max(false_reg->var_off); - true_umin += gen_hi_min(true_reg->var_off); - } false_reg->umax_value = min(false_reg->umax_value, false_umax); true_reg->umin_value = max(true_reg->umin_value, true_umin); break; @@ -4006,11 +3936,6 @@ static void reg_set_min_max(struct bpf_reg_state *true_reg, s64 false_smax = opcode == BPF_JSGT ? sval : sval - 1; s64 true_smin = opcode == BPF_JSGT ? sval + 1 : sval;
- /* If the full s64 was not sign-extended from s32 then don't - * deduct further info. - */ - if (is_jmp32 && !cmp_val_with_extended_s64(sval, false_reg)) - break; false_reg->smax_value = min(false_reg->smax_value, false_smax); true_reg->smin_value = max(true_reg->smin_value, true_smin); break; @@ -4021,10 +3946,6 @@ static void reg_set_min_max(struct bpf_reg_state *true_reg, u64 false_umin = opcode == BPF_JLT ? val : val + 1; u64 true_umax = opcode == BPF_JLT ? val - 1 : val;
- if (is_jmp32) { - false_umin += gen_hi_min(false_reg->var_off); - true_umax += gen_hi_max(true_reg->var_off); - } false_reg->umin_value = max(false_reg->umin_value, false_umin); true_reg->umax_value = min(true_reg->umax_value, true_umax);
@@ -4036,8 +3957,6 @@ static void reg_set_min_max(struct bpf_reg_state *true_reg, s64 false_smin = opcode == BPF_JSLT ? sval : sval + 1; s64 true_smax = opcode == BPF_JSLT ? sval - 1 : sval;
- if (is_jmp32 && !cmp_val_with_extended_s64(sval, false_reg)) - break; false_reg->smin_value = max(false_reg->smin_value, false_smin); true_reg->smax_value = min(true_reg->smax_value, true_smax); break; @@ -4064,15 +3983,14 @@ static void reg_set_min_max(struct bpf_reg_state *true_reg, */ static void reg_set_min_max_inv(struct bpf_reg_state *true_reg, struct bpf_reg_state *false_reg, u64 val, - u8 opcode, bool is_jmp32) + u8 opcode) { s64 sval;
if (__is_pointer_value(false, false_reg)) return;
- val = is_jmp32 ? (u32)val : val; - sval = is_jmp32 ? (s64)(s32)val : (s64)val; + sval = (s64)val;
switch (opcode) { case BPF_JEQ: @@ -4080,15 +3998,8 @@ static void reg_set_min_max_inv(struct bpf_reg_state *true_reg, { struct bpf_reg_state *reg = opcode == BPF_JEQ ? true_reg : false_reg; - if (is_jmp32) { - u64 old_v = reg->var_off.value; - u64 hi_mask = ~0xffffffffULL;
- reg->var_off.value = (old_v & hi_mask) | val; - reg->var_off.mask &= hi_mask; - } else { - __mark_reg_known(reg, val); - } + __mark_reg_known(reg, val); break; } case BPF_JGE: @@ -4097,10 +4008,6 @@ static void reg_set_min_max_inv(struct bpf_reg_state *true_reg, u64 false_umin = opcode == BPF_JGT ? val : val + 1; u64 true_umax = opcode == BPF_JGT ? val - 1 : val;
- if (is_jmp32) { - false_umin += gen_hi_min(false_reg->var_off); - true_umax += gen_hi_max(true_reg->var_off); - } false_reg->umin_value = max(false_reg->umin_value, false_umin); true_reg->umax_value = min(true_reg->umax_value, true_umax); break; @@ -4111,8 +4018,6 @@ static void reg_set_min_max_inv(struct bpf_reg_state *true_reg, s64 false_smin = opcode == BPF_JSGT ? sval : sval + 1; s64 true_smax = opcode == BPF_JSGT ? sval - 1 : sval;
- if (is_jmp32 && !cmp_val_with_extended_s64(sval, false_reg)) - break; false_reg->smin_value = max(false_reg->smin_value, false_smin); true_reg->smax_value = min(true_reg->smax_value, true_smax); break; @@ -4123,10 +4028,6 @@ static void reg_set_min_max_inv(struct bpf_reg_state *true_reg, u64 false_umax = opcode == BPF_JLT ? val : val - 1; u64 true_umin = opcode == BPF_JLT ? val + 1 : val;
- if (is_jmp32) { - false_umax += gen_hi_max(false_reg->var_off); - true_umin += gen_hi_min(true_reg->var_off); - } false_reg->umax_value = min(false_reg->umax_value, false_umax); true_reg->umin_value = max(true_reg->umin_value, true_umin); break; @@ -4137,8 +4038,6 @@ static void reg_set_min_max_inv(struct bpf_reg_state *true_reg, s64 false_smax = opcode == BPF_JSLT ? sval : sval - 1; s64 true_smin = opcode == BPF_JSLT ? sval + 1 : sval;
- if (is_jmp32 && !cmp_val_with_extended_s64(sval, false_reg)) - break; false_reg->smax_value = min(false_reg->smax_value, false_smax); true_reg->smin_value = max(true_reg->smin_value, true_smin); break; @@ -4272,10 +4171,6 @@ static bool try_match_pkt_pointers(const struct bpf_insn *insn, if (BPF_SRC(insn->code) != BPF_X) return false;
- /* Pointers are always 64-bit. */ - if (BPF_CLASS(insn->code) == BPF_JMP32) - return false; - switch (BPF_OP(insn->code)) { case BPF_JGT: if ((dst_reg->type == PTR_TO_PACKET && @@ -4368,19 +4263,17 @@ static int check_cond_jmp_op(struct bpf_verifier_env *env, struct bpf_reg_state *regs = this_branch->frame[this_branch->curframe]->regs; struct bpf_reg_state *dst_reg, *other_branch_regs, *src_reg = NULL; u8 opcode = BPF_OP(insn->code); - bool is_jmp32; int pred = -1; int err;
- /* Only conditional jumps are expected to reach here. */ - if (opcode == BPF_JA || opcode > BPF_JSLE) { - verbose(env, "invalid BPF_JMP/JMP32 opcode %x\n", opcode); + if (opcode > BPF_JSLE) { + verbose(env, "invalid BPF_JMP opcode %x\n", opcode); return -EINVAL; }
if (BPF_SRC(insn->code) == BPF_X) { if (insn->imm != 0) { - verbose(env, "BPF_JMP/JMP32 uses reserved fields\n"); + verbose(env, "BPF_JMP uses reserved fields\n"); return -EINVAL; }
@@ -4397,7 +4290,7 @@ static int check_cond_jmp_op(struct bpf_verifier_env *env, src_reg = ®s[insn->src_reg]; } else { if (insn->src_reg != BPF_REG_0) { - verbose(env, "BPF_JMP/JMP32 uses reserved fields\n"); + verbose(env, "BPF_JMP uses reserved fields\n"); return -EINVAL; } } @@ -4408,14 +4301,13 @@ static int check_cond_jmp_op(struct bpf_verifier_env *env, return err;
dst_reg = ®s[insn->dst_reg]; - is_jmp32 = BPF_CLASS(insn->code) == BPF_JMP32;
if (BPF_SRC(insn->code) == BPF_K) - pred = is_branch_taken(dst_reg, insn->imm, opcode, is_jmp32); + pred = is_branch_taken(dst_reg, insn->imm, opcode); else if (src_reg->type == SCALAR_VALUE && tnum_is_const(src_reg->var_off)) pred = is_branch_taken(dst_reg, src_reg->var_off.value, - opcode, is_jmp32); + opcode);
if (pred == 1) { /* Only follow the goto, ignore fall-through. If needed, push @@ -4426,7 +4318,6 @@ static int check_cond_jmp_op(struct bpf_verifier_env *env, !sanitize_speculative_path(env, insn, *insn_idx + 1, *insn_idx)) return -EFAULT; - *insn_idx += insn->off; return 0; } else if (pred == 0) { @@ -4457,51 +4348,30 @@ static int check_cond_jmp_op(struct bpf_verifier_env *env, * comparable. */ if (BPF_SRC(insn->code) == BPF_X) { - struct bpf_reg_state *src_reg = ®s[insn->src_reg]; - struct bpf_reg_state lo_reg0 = *dst_reg; - struct bpf_reg_state lo_reg1 = *src_reg; - struct bpf_reg_state *src_lo, *dst_lo; - - dst_lo = &lo_reg0; - src_lo = &lo_reg1; - coerce_reg_to_size(dst_lo, 4); - coerce_reg_to_size(src_lo, 4); - if (dst_reg->type == SCALAR_VALUE && - src_reg->type == SCALAR_VALUE) { - if (tnum_is_const(src_reg->var_off) || - (is_jmp32 && tnum_is_const(src_lo->var_off))) + regs[insn->src_reg].type == SCALAR_VALUE) { + if (tnum_is_const(regs[insn->src_reg].var_off)) reg_set_min_max(&other_branch_regs[insn->dst_reg], - dst_reg, - is_jmp32 - ? src_lo->var_off.value - : src_reg->var_off.value, - opcode, is_jmp32); - else if (tnum_is_const(dst_reg->var_off) || - (is_jmp32 && tnum_is_const(dst_lo->var_off))) + dst_reg, regs[insn->src_reg].var_off.value, + opcode); + else if (tnum_is_const(dst_reg->var_off)) reg_set_min_max_inv(&other_branch_regs[insn->src_reg], - src_reg, - is_jmp32 - ? dst_lo->var_off.value - : dst_reg->var_off.value, - opcode, is_jmp32); - else if (!is_jmp32 && - (opcode == BPF_JEQ || opcode == BPF_JNE)) + ®s[insn->src_reg], + dst_reg->var_off.value, opcode); + else if (opcode == BPF_JEQ || opcode == BPF_JNE) /* Comparing for equality, we can combine knowledge */ reg_combine_min_max(&other_branch_regs[insn->src_reg], &other_branch_regs[insn->dst_reg], - src_reg, dst_reg, opcode); + ®s[insn->src_reg], + ®s[insn->dst_reg], opcode); } } else if (dst_reg->type == SCALAR_VALUE) { reg_set_min_max(&other_branch_regs[insn->dst_reg], - dst_reg, insn->imm, opcode, is_jmp32); + dst_reg, insn->imm, opcode); }
- /* detect if R == 0 where R is returned from bpf_map_lookup_elem(). - * NOTE: these optimizations below are related with pointer comparison - * which will never be JMP32. - */ - if (!is_jmp32 && BPF_SRC(insn->code) == BPF_K && + /* detect if R == 0 where R is returned from bpf_map_lookup_elem() */ + if (BPF_SRC(insn->code) == BPF_K && insn->imm == 0 && (opcode == BPF_JEQ || opcode == BPF_JNE) && dst_reg->type == PTR_TO_MAP_VALUE_OR_NULL) { /* Mark all identical map registers in each branch as either @@ -4828,8 +4698,7 @@ static int check_cfg(struct bpf_verifier_env *env) goto check_state; t = insn_stack[cur_stack - 1];
- if (BPF_CLASS(insns[t].code) == BPF_JMP || - BPF_CLASS(insns[t].code) == BPF_JMP32) { + if (BPF_CLASS(insns[t].code) == BPF_JMP) { u8 opcode = BPF_OP(insns[t].code);
if (opcode == BPF_EXIT) { @@ -5560,7 +5429,7 @@ static int do_check(struct bpf_verifier_env *env) if (err) return err;
- } else if (class == BPF_JMP || class == BPF_JMP32) { + } else if (class == BPF_JMP) { u8 opcode = BPF_OP(insn->code);
if (opcode == BPF_CALL) { @@ -5568,8 +5437,7 @@ static int do_check(struct bpf_verifier_env *env) insn->off != 0 || (insn->src_reg != BPF_REG_0 && insn->src_reg != BPF_PSEUDO_CALL) || - insn->dst_reg != BPF_REG_0 || - class == BPF_JMP32) { + insn->dst_reg != BPF_REG_0) { verbose(env, "BPF_CALL uses reserved fields\n"); return -EINVAL; } @@ -5585,8 +5453,7 @@ static int do_check(struct bpf_verifier_env *env) if (BPF_SRC(insn->code) != BPF_K || insn->imm != 0 || insn->src_reg != BPF_REG_0 || - insn->dst_reg != BPF_REG_0 || - class == BPF_JMP32) { + insn->dst_reg != BPF_REG_0) { verbose(env, "BPF_JA uses reserved fields\n"); return -EINVAL; } @@ -5598,8 +5465,7 @@ static int do_check(struct bpf_verifier_env *env) if (BPF_SRC(insn->code) != BPF_K || insn->imm != 0 || insn->src_reg != BPF_REG_0 || - insn->dst_reg != BPF_REG_0 || - class == BPF_JMP32) { + insn->dst_reg != BPF_REG_0) { verbose(env, "BPF_EXIT uses reserved fields\n"); return -EINVAL; }