From: He Fengqing hefengqing@huawei.com
hulk inclusion category: bugfix bugzilla: NA CVE: CVE-2021-3444
-------------------------------------------------
This reverts commit 946dd60de74146a418f62275e5a6f83496f74dcd.
Signed-off-by: He Fengqing hefengqing@huawei.com Reviewed-by: Kuohai Xu xukuohai@huawei.com Signed-off-by: Yang Yingliang yangyingliang@huawei.com --- kernel/bpf/verifier.c | 10 ++++------ 1 file changed, 4 insertions(+), 6 deletions(-)
diff --git a/kernel/bpf/verifier.c b/kernel/bpf/verifier.c index 962dc7c48430f..61a535eec0a9b 100644 --- a/kernel/bpf/verifier.c +++ b/kernel/bpf/verifier.c @@ -6322,7 +6322,7 @@ static int fixup_bpf_calls(struct bpf_verifier_env *env) bool isdiv = BPF_OP(insn->code) == BPF_DIV; struct bpf_insn *patchlet; struct bpf_insn chk_and_div[] = { - /* [R,W]x div 0 -> 0 */ + /* Rx div 0 -> 0 */ BPF_RAW_INSN((is64 ? BPF_JMP : BPF_JMP32) | BPF_JNE | BPF_K, insn->src_reg, 0, 2, 0), @@ -6331,18 +6331,16 @@ static int fixup_bpf_calls(struct bpf_verifier_env *env) *insn, }; struct bpf_insn chk_and_mod[] = { - /* [R,W]x mod 0 -> [R,W]x */ + /* Rx mod 0 -> Rx */ BPF_RAW_INSN((is64 ? BPF_JMP : BPF_JMP32) | BPF_JEQ | BPF_K, insn->src_reg, - 0, 1 + (is64 ? 0 : 1), 0), + 0, 1, 0), *insn, - BPF_JMP_IMM(BPF_JA, 0, 0, 1), - BPF_MOV32_REG(insn->dst_reg, insn->dst_reg), };
patchlet = isdiv ? chk_and_div : chk_and_mod; cnt = isdiv ? ARRAY_SIZE(chk_and_div) : - ARRAY_SIZE(chk_and_mod) - (is64 ? 2 : 0); + ARRAY_SIZE(chk_and_mod);
new_prog = bpf_patch_insn_data(env, i + delta, patchlet, cnt); if (!new_prog)
From: He Fengqing hefengqing@huawei.com
hulk inclusion category: bugfix bugzilla: NA CVE: CVE-2021-3444
-------------------------------------------------
This reverts commit 7a310f13f41bd986c9d44b6216e9b24a586c14bd.
Signed-off-by: He Fengqing hefengqing@huawei.com Reviewed-by: Kuohai Xu xukuohai@huawei.com Signed-off-by: Yang Yingliang yangyingliang@huawei.com --- kernel/bpf/verifier.c | 28 +++++++++++++++------------- 1 file changed, 15 insertions(+), 13 deletions(-)
diff --git a/kernel/bpf/verifier.c b/kernel/bpf/verifier.c index 61a535eec0a9b..2af5fa1d4c3a8 100644 --- a/kernel/bpf/verifier.c +++ b/kernel/bpf/verifier.c @@ -6319,28 +6319,30 @@ static int fixup_bpf_calls(struct bpf_verifier_env *env) insn->code == (BPF_ALU | BPF_MOD | BPF_X) || insn->code == (BPF_ALU | BPF_DIV | BPF_X)) { bool is64 = BPF_CLASS(insn->code) == BPF_ALU64; - bool isdiv = BPF_OP(insn->code) == BPF_DIV; - struct bpf_insn *patchlet; - struct bpf_insn chk_and_div[] = { + struct bpf_insn mask_and_div[] = { + BPF_MOV32_REG(insn->src_reg, insn->src_reg), /* Rx div 0 -> 0 */ - BPF_RAW_INSN((is64 ? BPF_JMP : BPF_JMP32) | - BPF_JNE | BPF_K, insn->src_reg, - 0, 2, 0), + BPF_JMP_IMM(BPF_JNE, insn->src_reg, 0, 2), BPF_ALU32_REG(BPF_XOR, insn->dst_reg, insn->dst_reg), BPF_JMP_IMM(BPF_JA, 0, 0, 1), *insn, }; - struct bpf_insn chk_and_mod[] = { + struct bpf_insn mask_and_mod[] = { + BPF_MOV32_REG(insn->src_reg, insn->src_reg), /* Rx mod 0 -> Rx */ - BPF_RAW_INSN((is64 ? BPF_JMP : BPF_JMP32) | - BPF_JEQ | BPF_K, insn->src_reg, - 0, 1, 0), + BPF_JMP_IMM(BPF_JEQ, insn->src_reg, 0, 1), *insn, }; + struct bpf_insn *patchlet;
- patchlet = isdiv ? chk_and_div : chk_and_mod; - cnt = isdiv ? ARRAY_SIZE(chk_and_div) : - ARRAY_SIZE(chk_and_mod); + if (insn->code == (BPF_ALU64 | BPF_DIV | BPF_X) || + insn->code == (BPF_ALU | BPF_DIV | BPF_X)) { + patchlet = mask_and_div + (is64 ? 1 : 0); + cnt = ARRAY_SIZE(mask_and_div) - (is64 ? 1 : 0); + } else { + patchlet = mask_and_mod + (is64 ? 1 : 0); + cnt = ARRAY_SIZE(mask_and_mod) - (is64 ? 1 : 0); + }
new_prog = bpf_patch_insn_data(env, i + delta, patchlet, cnt); if (!new_prog)
From: He Fengqing hefengqing@huawei.com
hulk inclusion category: bugfix bugzilla: NA CVE: CVE-2021-3444
-------------------------------------------------
This reverts commit d4369efef7d7506804b7578433f684be8b7089de.
Signed-off-by: He Fengqing hefengqing@huawei.com Reviewed-by: Kuohai Xu xukuohai@huawei.com Signed-off-by: Yang Yingliang yangyingliang@huawei.com --- arch/arm64/net/bpf_jit_comp.c | 37 +++++++---------------------------- 1 file changed, 7 insertions(+), 30 deletions(-)
diff --git a/arch/arm64/net/bpf_jit_comp.c b/arch/arm64/net/bpf_jit_comp.c index b7b0fd28dde5b..04d82cf2d4617 100644 --- a/arch/arm64/net/bpf_jit_comp.c +++ b/arch/arm64/net/bpf_jit_comp.c @@ -363,8 +363,7 @@ static int build_insn(const struct bpf_insn *insn, struct jit_ctx *ctx, const s16 off = insn->off; const s32 imm = insn->imm; const int i = insn - ctx->prog->insnsi; - const bool is64 = BPF_CLASS(code) == BPF_ALU64 || - BPF_CLASS(code) == BPF_JMP; + const bool is64 = BPF_CLASS(code) == BPF_ALU64; const bool isdw = BPF_SIZE(code) == BPF_DW; u8 jmp_cond, reg; s32 jmp_offset; @@ -561,17 +560,7 @@ static int build_insn(const struct bpf_insn *insn, struct jit_ctx *ctx, case BPF_JMP | BPF_JSLT | BPF_X: case BPF_JMP | BPF_JSGE | BPF_X: case BPF_JMP | BPF_JSLE | BPF_X: - case BPF_JMP32 | BPF_JEQ | BPF_X: - case BPF_JMP32 | BPF_JGT | BPF_X: - case BPF_JMP32 | BPF_JLT | BPF_X: - case BPF_JMP32 | BPF_JGE | BPF_X: - case BPF_JMP32 | BPF_JLE | BPF_X: - case BPF_JMP32 | BPF_JNE | BPF_X: - case BPF_JMP32 | BPF_JSGT | BPF_X: - case BPF_JMP32 | BPF_JSLT | BPF_X: - case BPF_JMP32 | BPF_JSGE | BPF_X: - case BPF_JMP32 | BPF_JSLE | BPF_X: - emit(A64_CMP(is64, dst, src), ctx); + emit(A64_CMP(1, dst, src), ctx); emit_cond_jmp: jmp_offset = bpf2a64_offset(i + off, i, ctx); check_imm19(jmp_offset); @@ -613,8 +602,7 @@ static int build_insn(const struct bpf_insn *insn, struct jit_ctx *ctx, emit(A64_B_(jmp_cond, jmp_offset), ctx); break; case BPF_JMP | BPF_JSET | BPF_X: - case BPF_JMP32 | BPF_JSET | BPF_X: - emit(A64_TST(is64, dst, src), ctx); + emit(A64_TST(1, dst, src), ctx); goto emit_cond_jmp; /* IF (dst COND imm) JUMP off */ case BPF_JMP | BPF_JEQ | BPF_K: @@ -627,23 +615,12 @@ static int build_insn(const struct bpf_insn *insn, struct jit_ctx *ctx, case BPF_JMP | BPF_JSLT | BPF_K: case BPF_JMP | BPF_JSGE | BPF_K: case BPF_JMP | BPF_JSLE | BPF_K: - case BPF_JMP32 | BPF_JEQ | BPF_K: - case BPF_JMP32 | BPF_JGT | BPF_K: - case BPF_JMP32 | BPF_JLT | BPF_K: - case BPF_JMP32 | BPF_JGE | BPF_K: - case BPF_JMP32 | BPF_JLE | BPF_K: - case BPF_JMP32 | BPF_JNE | BPF_K: - case BPF_JMP32 | BPF_JSGT | BPF_K: - case BPF_JMP32 | BPF_JSLT | BPF_K: - case BPF_JMP32 | BPF_JSGE | BPF_K: - case BPF_JMP32 | BPF_JSLE | BPF_K: - emit_a64_mov_i(is64, tmp, imm, ctx); - emit(A64_CMP(is64, dst, tmp), ctx); + emit_a64_mov_i(1, tmp, imm, ctx); + emit(A64_CMP(1, dst, tmp), ctx); goto emit_cond_jmp; case BPF_JMP | BPF_JSET | BPF_K: - case BPF_JMP32 | BPF_JSET | BPF_K: - emit_a64_mov_i(is64, tmp, imm, ctx); - emit(A64_TST(is64, dst, tmp), ctx); + emit_a64_mov_i(1, tmp, imm, ctx); + emit(A64_TST(1, dst, tmp), ctx); goto emit_cond_jmp; /* function call */ case BPF_JMP | BPF_CALL:
From: He Fengqing hefengqing@huawei.com
hulk inclusion category: bugfix bugzilla: NA CVE: CVE-2021-3444
-------------------------------------------------
This reverts commit 2a8fdb1525ec3038c6e5bc6644ae61880f95a527.
Signed-off-by: He Fengqing hefengqing@huawei.com Reviewed-by: Kuohai Xu xukuohai@huawei.com Signed-off-by: Yang Yingliang yangyingliang@huawei.com --- arch/x86/net/bpf_jit_comp.c | 46 +++++-------------------------------- 1 file changed, 6 insertions(+), 40 deletions(-)
diff --git a/arch/x86/net/bpf_jit_comp.c b/arch/x86/net/bpf_jit_comp.c index f33ecbbf647b6..658869ac2e55e 100644 --- a/arch/x86/net/bpf_jit_comp.c +++ b/arch/x86/net/bpf_jit_comp.c @@ -861,41 +861,20 @@ xadd: if (is_imm8(insn->off)) case BPF_JMP | BPF_JSLT | BPF_X: case BPF_JMP | BPF_JSGE | BPF_X: case BPF_JMP | BPF_JSLE | BPF_X: - case BPF_JMP32 | BPF_JEQ | BPF_X: - case BPF_JMP32 | BPF_JNE | BPF_X: - case BPF_JMP32 | BPF_JGT | BPF_X: - case BPF_JMP32 | BPF_JLT | BPF_X: - case BPF_JMP32 | BPF_JGE | BPF_X: - case BPF_JMP32 | BPF_JLE | BPF_X: - case BPF_JMP32 | BPF_JSGT | BPF_X: - case BPF_JMP32 | BPF_JSLT | BPF_X: - case BPF_JMP32 | BPF_JSGE | BPF_X: - case BPF_JMP32 | BPF_JSLE | BPF_X: /* cmp dst_reg, src_reg */ - if (BPF_CLASS(insn->code) == BPF_JMP) - EMIT1(add_2mod(0x48, dst_reg, src_reg)); - else if (is_ereg(dst_reg) || is_ereg(src_reg)) - EMIT1(add_2mod(0x40, dst_reg, src_reg)); - EMIT2(0x39, add_2reg(0xC0, dst_reg, src_reg)); + EMIT3(add_2mod(0x48, dst_reg, src_reg), 0x39, + add_2reg(0xC0, dst_reg, src_reg)); goto emit_cond_jmp;
case BPF_JMP | BPF_JSET | BPF_X: - case BPF_JMP32 | BPF_JSET | BPF_X: /* test dst_reg, src_reg */ - if (BPF_CLASS(insn->code) == BPF_JMP) - EMIT1(add_2mod(0x48, dst_reg, src_reg)); - else if (is_ereg(dst_reg) || is_ereg(src_reg)) - EMIT1(add_2mod(0x40, dst_reg, src_reg)); - EMIT2(0x85, add_2reg(0xC0, dst_reg, src_reg)); + EMIT3(add_2mod(0x48, dst_reg, src_reg), 0x85, + add_2reg(0xC0, dst_reg, src_reg)); goto emit_cond_jmp;
case BPF_JMP | BPF_JSET | BPF_K: - case BPF_JMP32 | BPF_JSET | BPF_K: /* test dst_reg, imm32 */ - if (BPF_CLASS(insn->code) == BPF_JMP) - EMIT1(add_1mod(0x48, dst_reg)); - else if (is_ereg(dst_reg)) - EMIT1(add_1mod(0x40, dst_reg)); + EMIT1(add_1mod(0x48, dst_reg)); EMIT2_off32(0xF7, add_1reg(0xC0, dst_reg), imm32); goto emit_cond_jmp;
@@ -909,21 +888,8 @@ xadd: if (is_imm8(insn->off)) case BPF_JMP | BPF_JSLT | BPF_K: case BPF_JMP | BPF_JSGE | BPF_K: case BPF_JMP | BPF_JSLE | BPF_K: - case BPF_JMP32 | BPF_JEQ | BPF_K: - case BPF_JMP32 | BPF_JNE | BPF_K: - case BPF_JMP32 | BPF_JGT | BPF_K: - case BPF_JMP32 | BPF_JLT | BPF_K: - case BPF_JMP32 | BPF_JGE | BPF_K: - case BPF_JMP32 | BPF_JLE | BPF_K: - case BPF_JMP32 | BPF_JSGT | BPF_K: - case BPF_JMP32 | BPF_JSLT | BPF_K: - case BPF_JMP32 | BPF_JSGE | BPF_K: - case BPF_JMP32 | BPF_JSLE | BPF_K: /* cmp dst_reg, imm8/32 */ - if (BPF_CLASS(insn->code) == BPF_JMP) - EMIT1(add_1mod(0x48, dst_reg)); - else if (is_ereg(dst_reg)) - EMIT1(add_1mod(0x40, dst_reg)); + EMIT1(add_1mod(0x48, dst_reg));
if (is_imm8(imm32)) EMIT3(0x83, add_1reg(0xF8, dst_reg), imm32);
From: He Fengqing hefengqing@huawei.com
hulk inclusion category: bugfix bugzilla: NA CVE: CVE-2021-3444
-------------------------------------------------
This reverts commit c9def05f29b5377395e39a6737c3f84be980833c.
Signed-off-by: He Fengqing hefengqing@huawei.com Reviewed-by: Kuohai Xu xukuohai@huawei.com Signed-off-by: Yang Yingliang yangyingliang@huawei.com --- include/linux/filter.h | 20 -------------------- kernel/bpf/core.c | 21 --------------------- tools/include/linux/filter.h | 20 -------------------- 3 files changed, 61 deletions(-)
diff --git a/include/linux/filter.h b/include/linux/filter.h index 3e50453bf9d4a..1856413b7a629 100644 --- a/include/linux/filter.h +++ b/include/linux/filter.h @@ -282,26 +282,6 @@ struct sock_reuseport; .off = OFF, \ .imm = IMM })
-/* Like BPF_JMP_REG, but with 32-bit wide operands for comparison. */ - -#define BPF_JMP32_REG(OP, DST, SRC, OFF) \ - ((struct bpf_insn) { \ - .code = BPF_JMP32 | BPF_OP(OP) | BPF_X, \ - .dst_reg = DST, \ - .src_reg = SRC, \ - .off = OFF, \ - .imm = 0 }) - -/* Like BPF_JMP_IMM, but with 32-bit wide operands for comparison. */ - -#define BPF_JMP32_IMM(OP, DST, IMM, OFF) \ - ((struct bpf_insn) { \ - .code = BPF_JMP32 | BPF_OP(OP) | BPF_K, \ - .dst_reg = DST, \ - .src_reg = 0, \ - .off = OFF, \ - .imm = IMM }) - /* Unconditional jumps, goto pc + off16 */
#define BPF_JMP_A(OFF) \ diff --git a/kernel/bpf/core.c b/kernel/bpf/core.c index 3eee855fd231b..f965ea75aeeaa 100644 --- a/kernel/bpf/core.c +++ b/kernel/bpf/core.c @@ -807,27 +807,6 @@ static int bpf_jit_blind_insn(const struct bpf_insn *from, *to++ = BPF_JMP_REG(from->code, from->dst_reg, BPF_REG_AX, off); break;
- case BPF_JMP32 | BPF_JEQ | BPF_K: - case BPF_JMP32 | BPF_JNE | BPF_K: - case BPF_JMP32 | BPF_JGT | BPF_K: - case BPF_JMP32 | BPF_JLT | BPF_K: - case BPF_JMP32 | BPF_JGE | BPF_K: - case BPF_JMP32 | BPF_JLE | BPF_K: - case BPF_JMP32 | BPF_JSGT | BPF_K: - case BPF_JMP32 | BPF_JSLT | BPF_K: - case BPF_JMP32 | BPF_JSGE | BPF_K: - case BPF_JMP32 | BPF_JSLE | BPF_K: - case BPF_JMP32 | BPF_JSET | BPF_K: - /* Accommodate for extra offset in case of a backjump. */ - off = from->off; - if (off < 0) - off -= 2; - *to++ = BPF_ALU32_IMM(BPF_MOV, BPF_REG_AX, imm_rnd ^ from->imm); - *to++ = BPF_ALU32_IMM(BPF_XOR, BPF_REG_AX, imm_rnd); - *to++ = BPF_JMP32_REG(from->code, from->dst_reg, BPF_REG_AX, - off); - break; - case BPF_LD | BPF_IMM | BPF_DW: *to++ = BPF_ALU64_IMM(BPF_MOV, BPF_REG_AX, imm_rnd ^ aux[1].imm); *to++ = BPF_ALU64_IMM(BPF_XOR, BPF_REG_AX, imm_rnd); diff --git a/tools/include/linux/filter.h b/tools/include/linux/filter.h index cce0b02c0e286..af55acf73e75a 100644 --- a/tools/include/linux/filter.h +++ b/tools/include/linux/filter.h @@ -199,16 +199,6 @@ .off = OFF, \ .imm = 0 })
-/* Like BPF_JMP_REG, but with 32-bit wide operands for comparison. */ - -#define BPF_JMP32_REG(OP, DST, SRC, OFF) \ - ((struct bpf_insn) { \ - .code = BPF_JMP32 | BPF_OP(OP) | BPF_X, \ - .dst_reg = DST, \ - .src_reg = SRC, \ - .off = OFF, \ - .imm = 0 }) - /* Conditional jumps against immediates, if (dst_reg 'op' imm32) goto pc + off16 */
#define BPF_JMP_IMM(OP, DST, IMM, OFF) \ @@ -219,16 +209,6 @@ .off = OFF, \ .imm = IMM })
-/* Like BPF_JMP_IMM, but with 32-bit wide operands for comparison. */ - -#define BPF_JMP32_IMM(OP, DST, IMM, OFF) \ - ((struct bpf_insn) { \ - .code = BPF_JMP32 | BPF_OP(OP) | BPF_K, \ - .dst_reg = DST, \ - .src_reg = 0, \ - .off = OFF, \ - .imm = IMM }) - /* Unconditional jumps, goto pc + off16 */
#define BPF_JMP_A(OFF) \
From: He Fengqing hefengqing@huawei.com
hulk inclusion category: bugfix bugzilla: NA CVE: CVE-2021-3444
-------------------------------------------------
This reverts commit 5d2e918474f1b8fb6d42c0860cc9c47b4108e05b.
Signed-off-by: He Fengqing hefengqing@huawei.com Reviewed-by: Kuohai Xu xukuohai@huawei.com Signed-off-by: Yang Yingliang yangyingliang@huawei.com --- kernel/bpf/core.c | 197 +++++++++++++++++++++++++++++++--------------- 1 file changed, 134 insertions(+), 63 deletions(-)
diff --git a/kernel/bpf/core.c b/kernel/bpf/core.c index f965ea75aeeaa..c5da0daa1bd20 100644 --- a/kernel/bpf/core.c +++ b/kernel/bpf/core.c @@ -1001,31 +1001,6 @@ EXPORT_SYMBOL_GPL(__bpf_call_base); INSN_2(JMP, CALL), \ /* Exit instruction. */ \ INSN_2(JMP, EXIT), \ - /* 32-bit Jump instructions. */ \ - /* Register based. */ \ - INSN_3(JMP32, JEQ, X), \ - INSN_3(JMP32, JNE, X), \ - INSN_3(JMP32, JGT, X), \ - INSN_3(JMP32, JLT, X), \ - INSN_3(JMP32, JGE, X), \ - INSN_3(JMP32, JLE, X), \ - INSN_3(JMP32, JSGT, X), \ - INSN_3(JMP32, JSLT, X), \ - INSN_3(JMP32, JSGE, X), \ - INSN_3(JMP32, JSLE, X), \ - INSN_3(JMP32, JSET, X), \ - /* Immediate based. */ \ - INSN_3(JMP32, JEQ, K), \ - INSN_3(JMP32, JNE, K), \ - INSN_3(JMP32, JGT, K), \ - INSN_3(JMP32, JLT, K), \ - INSN_3(JMP32, JGE, K), \ - INSN_3(JMP32, JLE, K), \ - INSN_3(JMP32, JSGT, K), \ - INSN_3(JMP32, JSLT, K), \ - INSN_3(JMP32, JSGE, K), \ - INSN_3(JMP32, JSLE, K), \ - INSN_3(JMP32, JSET, K), \ /* Jump instructions. */ \ /* Register based. */ \ INSN_3(JMP, JEQ, X), \ @@ -1281,49 +1256,145 @@ static u64 ___bpf_prog_run(u64 *regs, const struct bpf_insn *insn, u64 *stack) out: CONT; } + /* JMP */ JMP_JA: insn += insn->off; CONT; + JMP_JEQ_X: + if (DST == SRC) { + insn += insn->off; + CONT_JMP; + } + CONT; + JMP_JEQ_K: + if (DST == IMM) { + insn += insn->off; + CONT_JMP; + } + CONT; + JMP_JNE_X: + if (DST != SRC) { + insn += insn->off; + CONT_JMP; + } + CONT; + JMP_JNE_K: + if (DST != IMM) { + insn += insn->off; + CONT_JMP; + } + CONT; + JMP_JGT_X: + if (DST > SRC) { + insn += insn->off; + CONT_JMP; + } + CONT; + JMP_JGT_K: + if (DST > IMM) { + insn += insn->off; + CONT_JMP; + } + CONT; + JMP_JLT_X: + if (DST < SRC) { + insn += insn->off; + CONT_JMP; + } + CONT; + JMP_JLT_K: + if (DST < IMM) { + insn += insn->off; + CONT_JMP; + } + CONT; + JMP_JGE_X: + if (DST >= SRC) { + insn += insn->off; + CONT_JMP; + } + CONT; + JMP_JGE_K: + if (DST >= IMM) { + insn += insn->off; + CONT_JMP; + } + CONT; + JMP_JLE_X: + if (DST <= SRC) { + insn += insn->off; + CONT_JMP; + } + CONT; + JMP_JLE_K: + if (DST <= IMM) { + insn += insn->off; + CONT_JMP; + } + CONT; + JMP_JSGT_X: + if (((s64) DST) > ((s64) SRC)) { + insn += insn->off; + CONT_JMP; + } + CONT; + JMP_JSGT_K: + if (((s64) DST) > ((s64) IMM)) { + insn += insn->off; + CONT_JMP; + } + CONT; + JMP_JSLT_X: + if (((s64) DST) < ((s64) SRC)) { + insn += insn->off; + CONT_JMP; + } + CONT; + JMP_JSLT_K: + if (((s64) DST) < ((s64) IMM)) { + insn += insn->off; + CONT_JMP; + } + CONT; + JMP_JSGE_X: + if (((s64) DST) >= ((s64) SRC)) { + insn += insn->off; + CONT_JMP; + } + CONT; + JMP_JSGE_K: + if (((s64) DST) >= ((s64) IMM)) { + insn += insn->off; + CONT_JMP; + } + CONT; + JMP_JSLE_X: + if (((s64) DST) <= ((s64) SRC)) { + insn += insn->off; + CONT_JMP; + } + CONT; + JMP_JSLE_K: + if (((s64) DST) <= ((s64) IMM)) { + insn += insn->off; + CONT_JMP; + } + CONT; + JMP_JSET_X: + if (DST & SRC) { + insn += insn->off; + CONT_JMP; + } + CONT; + JMP_JSET_K: + if (DST & IMM) { + insn += insn->off; + CONT_JMP; + } + CONT; JMP_EXIT: return BPF_R0; - /* JMP */ -#define COND_JMP(SIGN, OPCODE, CMP_OP) \ - JMP_##OPCODE##_X: \ - if ((SIGN##64) DST CMP_OP (SIGN##64) SRC) { \ - insn += insn->off; \ - CONT_JMP; \ - } \ - CONT; \ - JMP32_##OPCODE##_X: \ - if ((SIGN##32) DST CMP_OP (SIGN##32) SRC) { \ - insn += insn->off; \ - CONT_JMP; \ - } \ - CONT; \ - JMP_##OPCODE##_K: \ - if ((SIGN##64) DST CMP_OP (SIGN##64) IMM) { \ - insn += insn->off; \ - CONT_JMP; \ - } \ - CONT; \ - JMP32_##OPCODE##_K: \ - if ((SIGN##32) DST CMP_OP (SIGN##32) IMM) { \ - insn += insn->off; \ - CONT_JMP; \ - } \ - CONT; - COND_JMP(u, JEQ, ==) - COND_JMP(u, JNE, !=) - COND_JMP(u, JGT, >) - COND_JMP(u, JLT, <) - COND_JMP(u, JGE, >=) - COND_JMP(u, JLE, <=) - COND_JMP(u, JSET, &) - COND_JMP(s, JSGT, >) - COND_JMP(s, JSLT, <) - COND_JMP(s, JSGE, >=) - COND_JMP(s, JSLE, <=) -#undef COND_JMP + /* ST, STX and LDX*/ ST_NOSPEC: /* Speculation barrier for mitigating Speculative Store Bypass.
From: He Fengqing hefengqing@huawei.com
hulk inclusion category: bugfix bugzilla: NA CVE: CVE-2021-3444
-------------------------------------------------
This reverts commit b388f50a3a054ef3734642e1e000136edbd7976b.
Signed-off-by: He Fengqing hefengqing@huawei.com Reviewed-by: Kuohai Xu xukuohai@huawei.com Signed-off-by: Yang Yingliang yangyingliang@huawei.com --- tools/bpf/bpftool/cfg.c | 9 ++------- 1 file changed, 2 insertions(+), 7 deletions(-)
diff --git a/tools/bpf/bpftool/cfg.c b/tools/bpf/bpftool/cfg.c index 21014c7b1bae0..f30b3a4a840b7 100644 --- a/tools/bpf/bpftool/cfg.c +++ b/tools/bpf/bpftool/cfg.c @@ -191,11 +191,6 @@ static bool cfg_partition_funcs(struct cfg *cfg, struct bpf_insn *cur, return false; }
-static bool is_jmp_insn(u8 code) -{ - return BPF_CLASS(code) == BPF_JMP || BPF_CLASS(code) == BPF_JMP32; -} - static bool func_partition_bb_head(struct func_node *func) { struct bpf_insn *cur, *end; @@ -209,7 +204,7 @@ static bool func_partition_bb_head(struct func_node *func) return true;
for (; cur <= end; cur++) { - if (is_jmp_insn(cur->code)) { + if (BPF_CLASS(cur->code) == BPF_JMP) { u8 opcode = BPF_OP(cur->code);
if (opcode == BPF_EXIT || opcode == BPF_CALL) @@ -335,7 +330,7 @@ static bool func_add_bb_edges(struct func_node *func) e->src = bb;
insn = bb->tail; - if (!is_jmp_insn(insn->code) || + if (BPF_CLASS(insn->code) != BPF_JMP || BPF_OP(insn->code) == BPF_EXIT) { e->dst = bb_next(bb); e->flags |= EDGE_FLAG_FALLTHROUGH;
From: He Fengqing hefengqing@huawei.com
hulk inclusion category: bugfix bugzilla: NA CVE: CVE-2021-3444
-------------------------------------------------
This reverts commit 541b870c818dc42bc964ad04255138c4efcd7d76.
Signed-off-by: He Fengqing hefengqing@huawei.com Reviewed-by: Kuohai Xu xukuohai@huawei.com Signed-off-by: Yang Yingliang yangyingliang@huawei.com --- kernel/bpf/disasm.c | 34 +++++++++++++++------------------- 1 file changed, 15 insertions(+), 19 deletions(-)
diff --git a/kernel/bpf/disasm.c b/kernel/bpf/disasm.c index 3016372d01c19..cbd75dd5992ef 100644 --- a/kernel/bpf/disasm.c +++ b/kernel/bpf/disasm.c @@ -67,7 +67,7 @@ const char *const bpf_class_string[8] = { [BPF_STX] = "stx", [BPF_ALU] = "alu", [BPF_JMP] = "jmp", - [BPF_JMP32] = "jmp32", + [BPF_RET] = "BUG", [BPF_ALU64] = "alu64", };
@@ -136,22 +136,23 @@ void print_bpf_insn(const struct bpf_insn_cbs *cbs, else print_bpf_end_insn(verbose, cbs->private_data, insn); } else if (BPF_OP(insn->code) == BPF_NEG) { - verbose(cbs->private_data, "(%02x) %c%d = -%c%d\n", - insn->code, class == BPF_ALU ? 'w' : 'r', - insn->dst_reg, class == BPF_ALU ? 'w' : 'r', + verbose(cbs->private_data, "(%02x) r%d = %s-r%d\n", + insn->code, insn->dst_reg, + class == BPF_ALU ? "(u32) " : "", insn->dst_reg); } else if (BPF_SRC(insn->code) == BPF_X) { - verbose(cbs->private_data, "(%02x) %c%d %s %c%d\n", - insn->code, class == BPF_ALU ? 'w' : 'r', + verbose(cbs->private_data, "(%02x) %sr%d %s %sr%d\n", + insn->code, class == BPF_ALU ? "(u32) " : "", insn->dst_reg, bpf_alu_string[BPF_OP(insn->code) >> 4], - class == BPF_ALU ? 'w' : 'r', + class == BPF_ALU ? "(u32) " : "", insn->src_reg); } else { - verbose(cbs->private_data, "(%02x) %c%d %s %d\n", - insn->code, class == BPF_ALU ? 'w' : 'r', + verbose(cbs->private_data, "(%02x) %sr%d %s %s%d\n", + insn->code, class == BPF_ALU ? "(u32) " : "", insn->dst_reg, bpf_alu_string[BPF_OP(insn->code) >> 4], + class == BPF_ALU ? "(u32) " : "", insn->imm); } } else if (class == BPF_STX) { @@ -221,7 +222,7 @@ void print_bpf_insn(const struct bpf_insn_cbs *cbs, verbose(cbs->private_data, "BUG_ld_%02x\n", insn->code); return; } - } else if (class == BPF_JMP32 || class == BPF_JMP) { + } else if (class == BPF_JMP) { u8 opcode = BPF_OP(insn->code);
if (opcode == BPF_CALL) { @@ -245,18 +246,13 @@ void print_bpf_insn(const struct bpf_insn_cbs *cbs, } else if (insn->code == (BPF_JMP | BPF_EXIT)) { verbose(cbs->private_data, "(%02x) exit\n", insn->code); } else if (BPF_SRC(insn->code) == BPF_X) { - verbose(cbs->private_data, - "(%02x) if %c%d %s %c%d goto pc%+d\n", - insn->code, class == BPF_JMP32 ? 'w' : 'r', - insn->dst_reg, + verbose(cbs->private_data, "(%02x) if r%d %s r%d goto pc%+d\n", + insn->code, insn->dst_reg, bpf_jmp_string[BPF_OP(insn->code) >> 4], - class == BPF_JMP32 ? 'w' : 'r', insn->src_reg, insn->off); } else { - verbose(cbs->private_data, - "(%02x) if %c%d %s 0x%x goto pc%+d\n", - insn->code, class == BPF_JMP32 ? 'w' : 'r', - insn->dst_reg, + verbose(cbs->private_data, "(%02x) if r%d %s 0x%x goto pc%+d\n", + insn->code, insn->dst_reg, bpf_jmp_string[BPF_OP(insn->code) >> 4], insn->imm, insn->off); }
From: He Fengqing hefengqing@huawei.com
hulk inclusion category: bugfix bugzilla: NA CVE: CVE-2021-3444
-------------------------------------------------
This reverts commit 4b54e3f885101f833c4eaed6ef06e5083f96d346.
Signed-off-by: He Fengqing hefengqing@huawei.com Reviewed-by: Kuohai Xu xukuohai@huawei.com Signed-off-by: Yang Yingliang yangyingliang@huawei.com --- kernel/bpf/core.c | 3 +- kernel/bpf/verifier.c | 202 +++++++----------------------------------- 2 files changed, 35 insertions(+), 170 deletions(-)
diff --git a/kernel/bpf/core.c b/kernel/bpf/core.c index c5da0daa1bd20..18bf3b6c61a30 100644 --- a/kernel/bpf/core.c +++ b/kernel/bpf/core.c @@ -274,8 +274,7 @@ static int bpf_adj_branches(struct bpf_prog *prog, u32 pos, u32 delta, insn++; } code = insn->code; - if ((BPF_CLASS(code) != BPF_JMP && - BPF_CLASS(code) != BPF_JMP32) || + if (BPF_CLASS(code) != BPF_JMP || BPF_OP(code) == BPF_EXIT) continue; /* Adjust offset of jmps if we cross patch boundaries. */ diff --git a/kernel/bpf/verifier.c b/kernel/bpf/verifier.c index 2af5fa1d4c3a8..8ae00ab1307c2 100644 --- a/kernel/bpf/verifier.c +++ b/kernel/bpf/verifier.c @@ -855,7 +855,7 @@ static int check_subprogs(struct bpf_verifier_env *env) for (i = 0; i < insn_cnt; i++) { u8 code = insn[i].code;
- if (BPF_CLASS(code) != BPF_JMP && BPF_CLASS(code) != BPF_JMP32) + if (BPF_CLASS(code) != BPF_JMP) goto next; if (BPF_OP(code) == BPF_EXIT || BPF_OP(code) == BPF_CALL) goto next; @@ -3814,49 +3814,14 @@ static void find_good_pkt_pointers(struct bpf_verifier_state *vstate, * 0 - branch will not be taken and fall-through to next insn * -1 - unknown. Example: "if (reg < 5)" is unknown when register value range [0,10] */ -static int is_branch_taken(struct bpf_reg_state *reg, u64 val, u8 opcode, - bool is_jmp32) +static int is_branch_taken(struct bpf_reg_state *reg, u64 val, u8 opcode) { - struct bpf_reg_state reg_lo; s64 sval;
if (__is_pointer_value(false, reg)) return -1;
- if (is_jmp32) { - reg_lo = *reg; - reg = ®_lo; - /* For JMP32, only low 32 bits are compared, coerce_reg_to_size - * could truncate high bits and update umin/umax according to - * information of low bits. - */ - coerce_reg_to_size(reg, 4); - /* smin/smax need special handling. For example, after coerce, - * if smin_value is 0x00000000ffffffffLL, the value is -1 when - * used as operand to JMP32. It is a negative number from s32's - * point of view, while it is a positive number when seen as - * s64. The smin/smax are kept as s64, therefore, when used with - * JMP32, they need to be transformed into s32, then sign - * extended back to s64. - * - * Also, smin/smax were copied from umin/umax. If umin/umax has - * different sign bit, then min/max relationship doesn't - * maintain after casting into s32, for this case, set smin/smax - * to safest range. - */ - if ((reg->umax_value ^ reg->umin_value) & - (1ULL << 31)) { - reg->smin_value = S32_MIN; - reg->smax_value = S32_MAX; - } - reg->smin_value = (s64)(s32)reg->smin_value; - reg->smax_value = (s64)(s32)reg->smax_value; - - val = (u32)val; - sval = (s64)(s32)val; - } else { - sval = (s64)val; - } + sval = (s64)val;
switch (opcode) { case BPF_JEQ: @@ -3920,29 +3885,6 @@ static int is_branch_taken(struct bpf_reg_state *reg, u64 val, u8 opcode, return -1; }
-/* Generate min value of the high 32-bit from TNUM info. */ -static u64 gen_hi_min(struct tnum var) -{ - return var.value & ~0xffffffffULL; -} - -/* Generate max value of the high 32-bit from TNUM info. */ -static u64 gen_hi_max(struct tnum var) -{ - return (var.value | var.mask) & ~0xffffffffULL; -} - -/* Return true if VAL is compared with a s64 sign extended from s32, and they - * are with the same signedness. - */ -static bool cmp_val_with_extended_s64(s64 sval, struct bpf_reg_state *reg) -{ - return ((s32)sval >= 0 && - reg->smin_value >= 0 && reg->smax_value <= S32_MAX) || - ((s32)sval < 0 && - reg->smax_value <= 0 && reg->smin_value >= S32_MIN); -} - /* Adjusts the register min/max values in the case that the dst_reg is the * variable register that we are working on, and src_reg is a constant or we're * simply doing a BPF_K check. @@ -3950,7 +3892,7 @@ static bool cmp_val_with_extended_s64(s64 sval, struct bpf_reg_state *reg) */ static void reg_set_min_max(struct bpf_reg_state *true_reg, struct bpf_reg_state *false_reg, u64 val, - u8 opcode, bool is_jmp32) + u8 opcode) { s64 sval;
@@ -3962,8 +3904,8 @@ static void reg_set_min_max(struct bpf_reg_state *true_reg, */ if (__is_pointer_value(false, false_reg)) return; - val = is_jmp32 ? (u32)val : val; - sval = is_jmp32 ? (s64)(s32)val : (s64)val; + + sval = (s64)val;
switch (opcode) { case BPF_JEQ: @@ -3975,15 +3917,7 @@ static void reg_set_min_max(struct bpf_reg_state *true_reg, * if it is true we know the value for sure. Likewise for * BPF_JNE. */ - if (is_jmp32) { - u64 old_v = reg->var_off.value; - u64 hi_mask = ~0xffffffffULL; - - reg->var_off.value = (old_v & hi_mask) | val; - reg->var_off.mask &= hi_mask; - } else { - __mark_reg_known(reg, val); - } + __mark_reg_known(reg, val); break; } case BPF_JGE: @@ -3992,10 +3926,6 @@ static void reg_set_min_max(struct bpf_reg_state *true_reg, u64 false_umax = opcode == BPF_JGT ? val : val - 1; u64 true_umin = opcode == BPF_JGT ? val + 1 : val;
- if (is_jmp32) { - false_umax += gen_hi_max(false_reg->var_off); - true_umin += gen_hi_min(true_reg->var_off); - } false_reg->umax_value = min(false_reg->umax_value, false_umax); true_reg->umin_value = max(true_reg->umin_value, true_umin); break; @@ -4006,11 +3936,6 @@ static void reg_set_min_max(struct bpf_reg_state *true_reg, s64 false_smax = opcode == BPF_JSGT ? sval : sval - 1; s64 true_smin = opcode == BPF_JSGT ? sval + 1 : sval;
- /* If the full s64 was not sign-extended from s32 then don't - * deduct further info. - */ - if (is_jmp32 && !cmp_val_with_extended_s64(sval, false_reg)) - break; false_reg->smax_value = min(false_reg->smax_value, false_smax); true_reg->smin_value = max(true_reg->smin_value, true_smin); break; @@ -4021,10 +3946,6 @@ static void reg_set_min_max(struct bpf_reg_state *true_reg, u64 false_umin = opcode == BPF_JLT ? val : val + 1; u64 true_umax = opcode == BPF_JLT ? val - 1 : val;
- if (is_jmp32) { - false_umin += gen_hi_min(false_reg->var_off); - true_umax += gen_hi_max(true_reg->var_off); - } false_reg->umin_value = max(false_reg->umin_value, false_umin); true_reg->umax_value = min(true_reg->umax_value, true_umax);
@@ -4036,8 +3957,6 @@ static void reg_set_min_max(struct bpf_reg_state *true_reg, s64 false_smin = opcode == BPF_JSLT ? sval : sval + 1; s64 true_smax = opcode == BPF_JSLT ? sval - 1 : sval;
- if (is_jmp32 && !cmp_val_with_extended_s64(sval, false_reg)) - break; false_reg->smin_value = max(false_reg->smin_value, false_smin); true_reg->smax_value = min(true_reg->smax_value, true_smax); break; @@ -4064,15 +3983,14 @@ static void reg_set_min_max(struct bpf_reg_state *true_reg, */ static void reg_set_min_max_inv(struct bpf_reg_state *true_reg, struct bpf_reg_state *false_reg, u64 val, - u8 opcode, bool is_jmp32) + u8 opcode) { s64 sval;
if (__is_pointer_value(false, false_reg)) return;
- val = is_jmp32 ? (u32)val : val; - sval = is_jmp32 ? (s64)(s32)val : (s64)val; + sval = (s64)val;
switch (opcode) { case BPF_JEQ: @@ -4080,15 +3998,8 @@ static void reg_set_min_max_inv(struct bpf_reg_state *true_reg, { struct bpf_reg_state *reg = opcode == BPF_JEQ ? true_reg : false_reg; - if (is_jmp32) { - u64 old_v = reg->var_off.value; - u64 hi_mask = ~0xffffffffULL;
- reg->var_off.value = (old_v & hi_mask) | val; - reg->var_off.mask &= hi_mask; - } else { - __mark_reg_known(reg, val); - } + __mark_reg_known(reg, val); break; } case BPF_JGE: @@ -4097,10 +4008,6 @@ static void reg_set_min_max_inv(struct bpf_reg_state *true_reg, u64 false_umin = opcode == BPF_JGT ? val : val + 1; u64 true_umax = opcode == BPF_JGT ? val - 1 : val;
- if (is_jmp32) { - false_umin += gen_hi_min(false_reg->var_off); - true_umax += gen_hi_max(true_reg->var_off); - } false_reg->umin_value = max(false_reg->umin_value, false_umin); true_reg->umax_value = min(true_reg->umax_value, true_umax); break; @@ -4111,8 +4018,6 @@ static void reg_set_min_max_inv(struct bpf_reg_state *true_reg, s64 false_smin = opcode == BPF_JSGT ? sval : sval + 1; s64 true_smax = opcode == BPF_JSGT ? sval - 1 : sval;
- if (is_jmp32 && !cmp_val_with_extended_s64(sval, false_reg)) - break; false_reg->smin_value = max(false_reg->smin_value, false_smin); true_reg->smax_value = min(true_reg->smax_value, true_smax); break; @@ -4123,10 +4028,6 @@ static void reg_set_min_max_inv(struct bpf_reg_state *true_reg, u64 false_umax = opcode == BPF_JLT ? val : val - 1; u64 true_umin = opcode == BPF_JLT ? val + 1 : val;
- if (is_jmp32) { - false_umax += gen_hi_max(false_reg->var_off); - true_umin += gen_hi_min(true_reg->var_off); - } false_reg->umax_value = min(false_reg->umax_value, false_umax); true_reg->umin_value = max(true_reg->umin_value, true_umin); break; @@ -4137,8 +4038,6 @@ static void reg_set_min_max_inv(struct bpf_reg_state *true_reg, s64 false_smax = opcode == BPF_JSLT ? sval : sval - 1; s64 true_smin = opcode == BPF_JSLT ? sval + 1 : sval;
- if (is_jmp32 && !cmp_val_with_extended_s64(sval, false_reg)) - break; false_reg->smax_value = min(false_reg->smax_value, false_smax); true_reg->smin_value = max(true_reg->smin_value, true_smin); break; @@ -4272,10 +4171,6 @@ static bool try_match_pkt_pointers(const struct bpf_insn *insn, if (BPF_SRC(insn->code) != BPF_X) return false;
- /* Pointers are always 64-bit. */ - if (BPF_CLASS(insn->code) == BPF_JMP32) - return false; - switch (BPF_OP(insn->code)) { case BPF_JGT: if ((dst_reg->type == PTR_TO_PACKET && @@ -4368,19 +4263,17 @@ static int check_cond_jmp_op(struct bpf_verifier_env *env, struct bpf_reg_state *regs = this_branch->frame[this_branch->curframe]->regs; struct bpf_reg_state *dst_reg, *other_branch_regs, *src_reg = NULL; u8 opcode = BPF_OP(insn->code); - bool is_jmp32; int pred = -1; int err;
- /* Only conditional jumps are expected to reach here. */ - if (opcode == BPF_JA || opcode > BPF_JSLE) { - verbose(env, "invalid BPF_JMP/JMP32 opcode %x\n", opcode); + if (opcode > BPF_JSLE) { + verbose(env, "invalid BPF_JMP opcode %x\n", opcode); return -EINVAL; }
if (BPF_SRC(insn->code) == BPF_X) { if (insn->imm != 0) { - verbose(env, "BPF_JMP/JMP32 uses reserved fields\n"); + verbose(env, "BPF_JMP uses reserved fields\n"); return -EINVAL; }
@@ -4397,7 +4290,7 @@ static int check_cond_jmp_op(struct bpf_verifier_env *env, src_reg = ®s[insn->src_reg]; } else { if (insn->src_reg != BPF_REG_0) { - verbose(env, "BPF_JMP/JMP32 uses reserved fields\n"); + verbose(env, "BPF_JMP uses reserved fields\n"); return -EINVAL; } } @@ -4408,14 +4301,13 @@ static int check_cond_jmp_op(struct bpf_verifier_env *env, return err;
dst_reg = ®s[insn->dst_reg]; - is_jmp32 = BPF_CLASS(insn->code) == BPF_JMP32;
if (BPF_SRC(insn->code) == BPF_K) - pred = is_branch_taken(dst_reg, insn->imm, opcode, is_jmp32); + pred = is_branch_taken(dst_reg, insn->imm, opcode); else if (src_reg->type == SCALAR_VALUE && tnum_is_const(src_reg->var_off)) pred = is_branch_taken(dst_reg, src_reg->var_off.value, - opcode, is_jmp32); + opcode);
if (pred == 1) { /* Only follow the goto, ignore fall-through. If needed, push @@ -4426,7 +4318,6 @@ static int check_cond_jmp_op(struct bpf_verifier_env *env, !sanitize_speculative_path(env, insn, *insn_idx + 1, *insn_idx)) return -EFAULT; - *insn_idx += insn->off; return 0; } else if (pred == 0) { @@ -4457,51 +4348,30 @@ static int check_cond_jmp_op(struct bpf_verifier_env *env, * comparable. */ if (BPF_SRC(insn->code) == BPF_X) { - struct bpf_reg_state *src_reg = ®s[insn->src_reg]; - struct bpf_reg_state lo_reg0 = *dst_reg; - struct bpf_reg_state lo_reg1 = *src_reg; - struct bpf_reg_state *src_lo, *dst_lo; - - dst_lo = &lo_reg0; - src_lo = &lo_reg1; - coerce_reg_to_size(dst_lo, 4); - coerce_reg_to_size(src_lo, 4); - if (dst_reg->type == SCALAR_VALUE && - src_reg->type == SCALAR_VALUE) { - if (tnum_is_const(src_reg->var_off) || - (is_jmp32 && tnum_is_const(src_lo->var_off))) + regs[insn->src_reg].type == SCALAR_VALUE) { + if (tnum_is_const(regs[insn->src_reg].var_off)) reg_set_min_max(&other_branch_regs[insn->dst_reg], - dst_reg, - is_jmp32 - ? src_lo->var_off.value - : src_reg->var_off.value, - opcode, is_jmp32); - else if (tnum_is_const(dst_reg->var_off) || - (is_jmp32 && tnum_is_const(dst_lo->var_off))) + dst_reg, regs[insn->src_reg].var_off.value, + opcode); + else if (tnum_is_const(dst_reg->var_off)) reg_set_min_max_inv(&other_branch_regs[insn->src_reg], - src_reg, - is_jmp32 - ? dst_lo->var_off.value - : dst_reg->var_off.value, - opcode, is_jmp32); - else if (!is_jmp32 && - (opcode == BPF_JEQ || opcode == BPF_JNE)) + ®s[insn->src_reg], + dst_reg->var_off.value, opcode); + else if (opcode == BPF_JEQ || opcode == BPF_JNE) /* Comparing for equality, we can combine knowledge */ reg_combine_min_max(&other_branch_regs[insn->src_reg], &other_branch_regs[insn->dst_reg], - src_reg, dst_reg, opcode); + ®s[insn->src_reg], + ®s[insn->dst_reg], opcode); } } else if (dst_reg->type == SCALAR_VALUE) { reg_set_min_max(&other_branch_regs[insn->dst_reg], - dst_reg, insn->imm, opcode, is_jmp32); + dst_reg, insn->imm, opcode); }
- /* detect if R == 0 where R is returned from bpf_map_lookup_elem(). - * NOTE: these optimizations below are related with pointer comparison - * which will never be JMP32. - */ - if (!is_jmp32 && BPF_SRC(insn->code) == BPF_K && + /* detect if R == 0 where R is returned from bpf_map_lookup_elem() */ + if (BPF_SRC(insn->code) == BPF_K && insn->imm == 0 && (opcode == BPF_JEQ || opcode == BPF_JNE) && dst_reg->type == PTR_TO_MAP_VALUE_OR_NULL) { /* Mark all identical map registers in each branch as either @@ -4828,8 +4698,7 @@ static int check_cfg(struct bpf_verifier_env *env) goto check_state; t = insn_stack[cur_stack - 1];
- if (BPF_CLASS(insns[t].code) == BPF_JMP || - BPF_CLASS(insns[t].code) == BPF_JMP32) { + if (BPF_CLASS(insns[t].code) == BPF_JMP) { u8 opcode = BPF_OP(insns[t].code);
if (opcode == BPF_EXIT) { @@ -5560,7 +5429,7 @@ static int do_check(struct bpf_verifier_env *env) if (err) return err;
- } else if (class == BPF_JMP || class == BPF_JMP32) { + } else if (class == BPF_JMP) { u8 opcode = BPF_OP(insn->code);
if (opcode == BPF_CALL) { @@ -5568,8 +5437,7 @@ static int do_check(struct bpf_verifier_env *env) insn->off != 0 || (insn->src_reg != BPF_REG_0 && insn->src_reg != BPF_PSEUDO_CALL) || - insn->dst_reg != BPF_REG_0 || - class == BPF_JMP32) { + insn->dst_reg != BPF_REG_0) { verbose(env, "BPF_CALL uses reserved fields\n"); return -EINVAL; } @@ -5585,8 +5453,7 @@ static int do_check(struct bpf_verifier_env *env) if (BPF_SRC(insn->code) != BPF_K || insn->imm != 0 || insn->src_reg != BPF_REG_0 || - insn->dst_reg != BPF_REG_0 || - class == BPF_JMP32) { + insn->dst_reg != BPF_REG_0) { verbose(env, "BPF_JA uses reserved fields\n"); return -EINVAL; } @@ -5598,8 +5465,7 @@ static int do_check(struct bpf_verifier_env *env) if (BPF_SRC(insn->code) != BPF_K || insn->imm != 0 || insn->src_reg != BPF_REG_0 || - insn->dst_reg != BPF_REG_0 || - class == BPF_JMP32) { + insn->dst_reg != BPF_REG_0) { verbose(env, "BPF_EXIT uses reserved fields\n"); return -EINVAL; }
From: He Fengqing hefengqing@huawei.com
hulk inclusion category: bugfix bugzilla: NA CVE: CVE-2021-3444
-------------------------------------------------
This reverts commit f8442197bb55f72f0d461676678d0af5370b55bd.
Signed-off-by: He Fengqing hefengqing@huawei.com Reviewed-by: Kuohai Xu xukuohai@huawei.com Signed-off-by: Yang Yingliang yangyingliang@huawei.com --- kernel/bpf/verifier.c | 172 ++++++++++++++++++------------------------ 1 file changed, 73 insertions(+), 99 deletions(-)
diff --git a/kernel/bpf/verifier.c b/kernel/bpf/verifier.c index 8ae00ab1307c2..a7395421f3437 100644 --- a/kernel/bpf/verifier.c +++ b/kernel/bpf/verifier.c @@ -3816,13 +3816,9 @@ static void find_good_pkt_pointers(struct bpf_verifier_state *vstate, */ static int is_branch_taken(struct bpf_reg_state *reg, u64 val, u8 opcode) { - s64 sval; - if (__is_pointer_value(false, reg)) return -1;
- sval = (s64)val; - switch (opcode) { case BPF_JEQ: if (tnum_is_const(reg->var_off)) @@ -3839,9 +3835,9 @@ static int is_branch_taken(struct bpf_reg_state *reg, u64 val, u8 opcode) return 0; break; case BPF_JSGT: - if (reg->smin_value > sval) + if (reg->smin_value > (s64)val) return 1; - else if (reg->smax_value < sval) + else if (reg->smax_value < (s64)val) return 0; break; case BPF_JLT: @@ -3851,9 +3847,9 @@ static int is_branch_taken(struct bpf_reg_state *reg, u64 val, u8 opcode) return 0; break; case BPF_JSLT: - if (reg->smax_value < sval) + if (reg->smax_value < (s64)val) return 1; - else if (reg->smin_value >= sval) + else if (reg->smin_value >= (s64)val) return 0; break; case BPF_JGE: @@ -3863,9 +3859,9 @@ static int is_branch_taken(struct bpf_reg_state *reg, u64 val, u8 opcode) return 0; break; case BPF_JSGE: - if (reg->smin_value >= sval) + if (reg->smin_value >= (s64)val) return 1; - else if (reg->smax_value < sval) + else if (reg->smax_value < (s64)val) return 0; break; case BPF_JLE: @@ -3875,9 +3871,9 @@ static int is_branch_taken(struct bpf_reg_state *reg, u64 val, u8 opcode) return 0; break; case BPF_JSLE: - if (reg->smax_value <= sval) + if (reg->smax_value <= (s64)val) return 1; - else if (reg->smin_value > sval) + else if (reg->smin_value > (s64)val) return 0; break; } @@ -3894,8 +3890,6 @@ static void reg_set_min_max(struct bpf_reg_state *true_reg, struct bpf_reg_state *false_reg, u64 val, u8 opcode) { - s64 sval; - /* If the dst_reg is a pointer, we can't learn anything about its * variable offset from the compare (unless src_reg were a pointer into * the same object, but we don't bother with that. @@ -3905,62 +3899,51 @@ static void reg_set_min_max(struct bpf_reg_state *true_reg, if (__is_pointer_value(false, false_reg)) return;
- sval = (s64)val; - switch (opcode) { case BPF_JEQ: + /* If this is false then we know nothing Jon Snow, but if it is + * true then we know for sure. + */ + __mark_reg_known(true_reg, val); + break; case BPF_JNE: - { - struct bpf_reg_state *reg = - opcode == BPF_JEQ ? true_reg : false_reg; - /* For BPF_JEQ, if this is false we know nothing Jon Snow, but - * if it is true we know the value for sure. Likewise for - * BPF_JNE. + /* If this is true we know nothing Jon Snow, but if it is false + * we know the value for sure; */ - __mark_reg_known(reg, val); + __mark_reg_known(false_reg, val); break; - } - case BPF_JGE: case BPF_JGT: - { - u64 false_umax = opcode == BPF_JGT ? val : val - 1; - u64 true_umin = opcode == BPF_JGT ? val + 1 : val; - - false_reg->umax_value = min(false_reg->umax_value, false_umax); - true_reg->umin_value = max(true_reg->umin_value, true_umin); + false_reg->umax_value = min(false_reg->umax_value, val); + true_reg->umin_value = max(true_reg->umin_value, val + 1); break; - } - case BPF_JSGE: case BPF_JSGT: - { - s64 false_smax = opcode == BPF_JSGT ? sval : sval - 1; - s64 true_smin = opcode == BPF_JSGT ? sval + 1 : sval; - - false_reg->smax_value = min(false_reg->smax_value, false_smax); - true_reg->smin_value = max(true_reg->smin_value, true_smin); + false_reg->smax_value = min_t(s64, false_reg->smax_value, val); + true_reg->smin_value = max_t(s64, true_reg->smin_value, val + 1); break; - } - case BPF_JLE: case BPF_JLT: - { - u64 false_umin = opcode == BPF_JLT ? val : val + 1; - u64 true_umax = opcode == BPF_JLT ? val - 1 : val; - - false_reg->umin_value = max(false_reg->umin_value, false_umin); - true_reg->umax_value = min(true_reg->umax_value, true_umax); - + false_reg->umin_value = max(false_reg->umin_value, val); + true_reg->umax_value = min(true_reg->umax_value, val - 1); break; - } - case BPF_JSLE: case BPF_JSLT: - { - s64 false_smin = opcode == BPF_JSLT ? sval : sval + 1; - s64 true_smax = opcode == BPF_JSLT ? sval - 1 : sval; - - false_reg->smin_value = max(false_reg->smin_value, false_smin); - true_reg->smax_value = min(true_reg->smax_value, true_smax); + false_reg->smin_value = max_t(s64, false_reg->smin_value, val); + true_reg->smax_value = min_t(s64, true_reg->smax_value, val - 1); + break; + case BPF_JGE: + false_reg->umax_value = min(false_reg->umax_value, val - 1); + true_reg->umin_value = max(true_reg->umin_value, val); + break; + case BPF_JSGE: + false_reg->smax_value = min_t(s64, false_reg->smax_value, val - 1); + true_reg->smin_value = max_t(s64, true_reg->smin_value, val); + break; + case BPF_JLE: + false_reg->umin_value = max(false_reg->umin_value, val + 1); + true_reg->umax_value = min(true_reg->umax_value, val); + break; + case BPF_JSLE: + false_reg->smin_value = max_t(s64, false_reg->smin_value, val + 1); + true_reg->smax_value = min_t(s64, true_reg->smax_value, val); break; - } default: break; } @@ -3985,63 +3968,54 @@ static void reg_set_min_max_inv(struct bpf_reg_state *true_reg, struct bpf_reg_state *false_reg, u64 val, u8 opcode) { - s64 sval; - if (__is_pointer_value(false, false_reg)) return;
- sval = (s64)val; - switch (opcode) { case BPF_JEQ: + /* If this is false then we know nothing Jon Snow, but if it is + * true then we know for sure. + */ + __mark_reg_known(true_reg, val); + break; case BPF_JNE: - { - struct bpf_reg_state *reg = - opcode == BPF_JEQ ? true_reg : false_reg; - - __mark_reg_known(reg, val); + /* If this is true we know nothing Jon Snow, but if it is false + * we know the value for sure; + */ + __mark_reg_known(false_reg, val); break; - } - case BPF_JGE: case BPF_JGT: - { - u64 false_umin = opcode == BPF_JGT ? val : val + 1; - u64 true_umax = opcode == BPF_JGT ? val - 1 : val; - - false_reg->umin_value = max(false_reg->umin_value, false_umin); - true_reg->umax_value = min(true_reg->umax_value, true_umax); + true_reg->umax_value = min(true_reg->umax_value, val - 1); + false_reg->umin_value = max(false_reg->umin_value, val); break; - } - case BPF_JSGE: case BPF_JSGT: - { - s64 false_smin = opcode == BPF_JSGT ? sval : sval + 1; - s64 true_smax = opcode == BPF_JSGT ? sval - 1 : sval; - - false_reg->smin_value = max(false_reg->smin_value, false_smin); - true_reg->smax_value = min(true_reg->smax_value, true_smax); + true_reg->smax_value = min_t(s64, true_reg->smax_value, val - 1); + false_reg->smin_value = max_t(s64, false_reg->smin_value, val); break; - } - case BPF_JLE: case BPF_JLT: - { - u64 false_umax = opcode == BPF_JLT ? val : val - 1; - u64 true_umin = opcode == BPF_JLT ? val + 1 : val; - - false_reg->umax_value = min(false_reg->umax_value, false_umax); - true_reg->umin_value = max(true_reg->umin_value, true_umin); + true_reg->umin_value = max(true_reg->umin_value, val + 1); + false_reg->umax_value = min(false_reg->umax_value, val); break; - } - case BPF_JSLE: case BPF_JSLT: - { - s64 false_smax = opcode == BPF_JSLT ? sval : sval - 1; - s64 true_smin = opcode == BPF_JSLT ? sval + 1 : sval; - - false_reg->smax_value = min(false_reg->smax_value, false_smax); - true_reg->smin_value = max(true_reg->smin_value, true_smin); + true_reg->smin_value = max_t(s64, true_reg->smin_value, val + 1); + false_reg->smax_value = min_t(s64, false_reg->smax_value, val); + break; + case BPF_JGE: + true_reg->umax_value = min(true_reg->umax_value, val); + false_reg->umin_value = max(false_reg->umin_value, val + 1); + break; + case BPF_JSGE: + true_reg->smax_value = min_t(s64, true_reg->smax_value, val); + false_reg->smin_value = max_t(s64, false_reg->smin_value, val + 1); + break; + case BPF_JLE: + true_reg->umin_value = max(true_reg->umin_value, val); + false_reg->umax_value = min(false_reg->umax_value, val - 1); + break; + case BPF_JSLE: + true_reg->smin_value = max_t(s64, true_reg->smin_value, val); + false_reg->smax_value = min_t(s64, false_reg->smax_value, val - 1); break; - } default: break; }
From: He Fengqing hefengqing@huawei.com
hulk inclusion category: bugfix bugzilla: NA CVE: CVE-2021-3444
-------------------------------------------------
This reverts commit 62684a24cd00250d44605b9aded87b13d3b5ec68.
Signed-off-by: He Fengqing hefengqing@huawei.com Reviewed-by: Kuohai Xu xukuohai@huawei.com Signed-off-by: Yang Yingliang yangyingliang@huawei.com --- Documentation/networking/filter.txt | 15 +++++++-------- include/uapi/linux/bpf.h | 1 - tools/include/uapi/linux/bpf.h | 1 - 3 files changed, 7 insertions(+), 10 deletions(-)
diff --git a/Documentation/networking/filter.txt b/Documentation/networking/filter.txt index a6caadf243725..e6b4ebb2b2438 100644 --- a/Documentation/networking/filter.txt +++ b/Documentation/networking/filter.txt @@ -863,7 +863,7 @@ Three LSB bits store instruction class which is one of: BPF_STX 0x03 BPF_STX 0x03 BPF_ALU 0x04 BPF_ALU 0x04 BPF_JMP 0x05 BPF_JMP 0x05 - BPF_RET 0x06 BPF_JMP32 0x06 + BPF_RET 0x06 [ class 6 unused, for future if needed ] BPF_MISC 0x07 BPF_ALU64 0x07
When BPF_CLASS(code) == BPF_ALU or BPF_JMP, 4th bit encodes source operand ... @@ -900,9 +900,9 @@ If BPF_CLASS(code) == BPF_ALU or BPF_ALU64 [ in eBPF ], BPF_OP(code) is one of: BPF_ARSH 0xc0 /* eBPF only: sign extending shift right */ BPF_END 0xd0 /* eBPF only: endianness conversion */
-If BPF_CLASS(code) == BPF_JMP or BPF_JMP32 [ in eBPF ], BPF_OP(code) is one of: +If BPF_CLASS(code) == BPF_JMP, BPF_OP(code) is one of:
- BPF_JA 0x00 /* BPF_JMP only */ + BPF_JA 0x00 BPF_JEQ 0x10 BPF_JGT 0x20 BPF_JGE 0x30 @@ -910,8 +910,8 @@ If BPF_CLASS(code) == BPF_JMP or BPF_JMP32 [ in eBPF ], BPF_OP(code) is one of: BPF_JNE 0x50 /* eBPF only: jump != */ BPF_JSGT 0x60 /* eBPF only: signed '>' */ BPF_JSGE 0x70 /* eBPF only: signed '>=' */ - BPF_CALL 0x80 /* eBPF BPF_JMP only: function call */ - BPF_EXIT 0x90 /* eBPF BPF_JMP only: function return */ + BPF_CALL 0x80 /* eBPF only: function call */ + BPF_EXIT 0x90 /* eBPF only: function return */ BPF_JLT 0xa0 /* eBPF only: unsigned '<' */ BPF_JLE 0xb0 /* eBPF only: unsigned '<=' */ BPF_JSLT 0xc0 /* eBPF only: signed '<' */ @@ -934,9 +934,8 @@ Classic BPF wastes the whole BPF_RET class to represent a single 'ret' operation. Classic BPF_RET | BPF_K means copy imm32 into return register and perform function exit. eBPF is modeled to match CPU, so BPF_JMP | BPF_EXIT in eBPF means function exit only. The eBPF program needs to store return -value into register R0 before doing a BPF_EXIT. Class 6 in eBPF is used as -BPF_JMP32 to mean exactly the same operations as BPF_JMP, but with 32-bit wide -operands for the comparisons instead. +value into register R0 before doing a BPF_EXIT. Class 6 in eBPF is currently +unused and reserved for future use.
For load and store instructions the 8-bit 'code' field is divided as:
diff --git a/include/uapi/linux/bpf.h b/include/uapi/linux/bpf.h index b11a42eb56e97..9a347aefbb40a 100644 --- a/include/uapi/linux/bpf.h +++ b/include/uapi/linux/bpf.h @@ -14,7 +14,6 @@ /* Extended instruction set based on top of classic BPF */
/* instruction classes */ -#define BPF_JMP32 0x06 /* jmp mode in word width */ #define BPF_ALU64 0x07 /* alu mode in double word width */
/* ld/ldx fields */ diff --git a/tools/include/uapi/linux/bpf.h b/tools/include/uapi/linux/bpf.h index 45b08d90cd674..58454e7693f43 100644 --- a/tools/include/uapi/linux/bpf.h +++ b/tools/include/uapi/linux/bpf.h @@ -14,7 +14,6 @@ /* Extended instruction set based on top of classic BPF */
/* instruction classes */ -#define BPF_JMP32 0x06 /* jmp mode in word width */ #define BPF_ALU64 0x07 /* alu mode in double word width */
/* ld/ldx fields */
From: Daniel Borkmann daniel@iogearbox.net
stable inclusion from linux-4.19.206 commit c348d806ed1d3075af52345344243824d72c4945 category: bugfix bugzilla: NA CVE: CVE-2021-3444
--------------------------------
Partially undo old commit 144cd91c4c2b ("bpf: move tmp variable into ax register in interpreter"). The reason we need this here is because ax register will be used for holding temporary state for div/mod instruction which otherwise interpreter would corrupt. This will cause a small +8 byte stack increase for interpreter, but with the gain that we can use it from verifier rewrites as scratch register.
Signed-off-by: Daniel Borkmann daniel@iogearbox.net Reviewed-by: John Fastabend john.fastabend@gmail.com [cascardo: This partial revert is needed in order to support using AX for the following two commits, as there is no JMP32 on 4.19.y] Signed-off-by: Thadeu Lima de Souza Cascardo cascardo@canonical.com Signed-off-by: Greg Kroah-Hartman gregkh@linuxfoundation.org Signed-off-by: He Fengqing hefengqing@huawei.com Signed-off-by: Yang Yingliang yangyingliang@huawei.com --- kernel/bpf/core.c | 32 +++++++++++++++----------------- 1 file changed, 15 insertions(+), 17 deletions(-)
diff --git a/kernel/bpf/core.c b/kernel/bpf/core.c index 18bf3b6c61a30..d4dc65f7bed51 100644 --- a/kernel/bpf/core.c +++ b/kernel/bpf/core.c @@ -740,9 +740,6 @@ static int bpf_jit_blind_insn(const struct bpf_insn *from, * below. * * Constant blinding is only used by JITs, not in the interpreter. - * The interpreter uses AX in some occasions as a local temporary - * register e.g. in DIV or MOD instructions. - * * In restricted circumstances, the verifier can also use the AX * register for rewrites as long as they do not interfere with * the above cases! @@ -1093,6 +1090,7 @@ static u64 ___bpf_prog_run(u64 *regs, const struct bpf_insn *insn, u64 *stack) #undef BPF_INSN_3_LBL #undef BPF_INSN_2_LBL u32 tail_call_cnt = 0; + u64 tmp;
#define CONT ({ insn++; goto select_insn; }) #define CONT_JMP ({ insn++; goto select_insn; }) @@ -1153,36 +1151,36 @@ static u64 ___bpf_prog_run(u64 *regs, const struct bpf_insn *insn, u64 *stack) (*(s64 *) &DST) >>= IMM; CONT; ALU64_MOD_X: - div64_u64_rem(DST, SRC, &AX); - DST = AX; + div64_u64_rem(DST, SRC, &tmp); + DST = tmp; CONT; ALU_MOD_X: - AX = (u32) DST; - DST = do_div(AX, (u32) SRC); + tmp = (u32) DST; + DST = do_div(tmp, (u32) SRC); CONT; ALU64_MOD_K: - div64_u64_rem(DST, IMM, &AX); - DST = AX; + div64_u64_rem(DST, IMM, &tmp); + DST = tmp; CONT; ALU_MOD_K: - AX = (u32) DST; - DST = do_div(AX, (u32) IMM); + tmp = (u32) DST; + DST = do_div(tmp, (u32) IMM); CONT; ALU64_DIV_X: DST = div64_u64(DST, SRC); CONT; ALU_DIV_X: - AX = (u32) DST; - do_div(AX, (u32) SRC); - DST = (u32) AX; + tmp = (u32) DST; + do_div(tmp, (u32) SRC); + DST = (u32) tmp; CONT; ALU64_DIV_K: DST = div64_u64(DST, IMM); CONT; ALU_DIV_K: - AX = (u32) DST; - do_div(AX, (u32) IMM); - DST = (u32) AX; + tmp = (u32) DST; + do_div(tmp, (u32) IMM); + DST = (u32) tmp; CONT; ALU_END_TO_BE: switch (IMM) {
From: Daniel Borkmann daniel@iogearbox.net
stable inclusion from linux-4.19.206 commit 8313432df224d926590731ec3ace3e1bd7bc4a1a category: bugfix bugzilla: NA CVE: CVE-2021-3444
--------------------------------
Commit e88b2c6e5a4d9ce30d75391e4d950da74bb2bd90 upstream.
While reviewing a different fix, John and I noticed an oddity in one of the BPF program dumps that stood out, for example:
# bpftool p d x i 13 0: (b7) r0 = 808464450 1: (b4) w4 = 808464432 2: (bc) w0 = w0 3: (15) if r0 == 0x0 goto pc+1 4: (9c) w4 %= w0 [...]
In line 2 we noticed that the mov32 would 32 bit truncate the original src register for the div/mod operation. While for the two operations the dst register is typically marked unknown e.g. from adjust_scalar_min_max_vals() the src register is not, and thus verifier keeps tracking original bounds, simplified:
0: R1=ctx(id=0,off=0,imm=0) R10=fp0 0: (b7) r0 = -1 1: R0_w=invP-1 R1=ctx(id=0,off=0,imm=0) R10=fp0 1: (b7) r1 = -1 2: R0_w=invP-1 R1_w=invP-1 R10=fp0 2: (3c) w0 /= w1 3: R0_w=invP(id=0,umax_value=4294967295,var_off=(0x0; 0xffffffff)) R1_w=invP-1 R10=fp0 3: (77) r1 >>= 32 4: R0_w=invP(id=0,umax_value=4294967295,var_off=(0x0; 0xffffffff)) R1_w=invP4294967295 R10=fp0 4: (bf) r0 = r1 5: R0_w=invP4294967295 R1_w=invP4294967295 R10=fp0 5: (95) exit processed 6 insns (limit 1000000) max_states_per_insn 0 total_states 0 peak_states 0 mark_read 0
Runtime result of r0 at exit is 0 instead of expected -1. Remove the verifier mov32 src rewrite in div/mod and replace it with a jmp32 test instead. After the fix, we result in the following code generation when having dividend r1 and divisor r6:
div, 64 bit: div, 32 bit:
0: (b7) r6 = 8 0: (b7) r6 = 8 1: (b7) r1 = 8 1: (b7) r1 = 8 2: (55) if r6 != 0x0 goto pc+2 2: (56) if w6 != 0x0 goto pc+2 3: (ac) w1 ^= w1 3: (ac) w1 ^= w1 4: (05) goto pc+1 4: (05) goto pc+1 5: (3f) r1 /= r6 5: (3c) w1 /= w6 6: (b7) r0 = 0 6: (b7) r0 = 0 7: (95) exit 7: (95) exit
mod, 64 bit: mod, 32 bit:
0: (b7) r6 = 8 0: (b7) r6 = 8 1: (b7) r1 = 8 1: (b7) r1 = 8 2: (15) if r6 == 0x0 goto pc+1 2: (16) if w6 == 0x0 goto pc+1 3: (9f) r1 %= r6 3: (9c) w1 %= w6 4: (b7) r0 = 0 4: (b7) r0 = 0 5: (95) exit 5: (95) exit
x86 in particular can throw a 'divide error' exception for div instruction not only for divisor being zero, but also for the case when the quotient is too large for the designated register. For the edx:eax and rdx:rax dividend pair it is not an issue in x86 BPF JIT since we always zero edx (rdx). Hence really the only protection needed is against divisor being zero.
Fixes: 68fda450a7df ("bpf: fix 32-bit divide by zero") Co-developed-by: John Fastabend john.fastabend@gmail.com Signed-off-by: John Fastabend john.fastabend@gmail.com Signed-off-by: Daniel Borkmann daniel@iogearbox.net [Salvatore Bonaccorso: This is an earlier version of the patch provided by Daniel Borkmann which does not rely on availability of the BPF_JMP32 instruction class. This means it is not even strictly a backport of the upstream commit mentioned but based on Daniel's and John's work to address the issue.] Tested-by: Salvatore Bonaccorso carnil@debian.org Signed-off-by: Thadeu Lima de Souza Cascardo cascardo@canonical.com Signed-off-by: Greg Kroah-Hartman gregkh@linuxfoundation.org Signed-off-by: He Fengqing hefengqing@huawei.com Signed-off-by: Yang Yingliang yangyingliang@huawei.com --- include/linux/filter.h | 24 ++++++++++++++++++++++++ kernel/bpf/verifier.c | 22 +++++++++++----------- 2 files changed, 35 insertions(+), 11 deletions(-)
diff --git a/include/linux/filter.h b/include/linux/filter.h index 1856413b7a629..b541abf7f32fd 100644 --- a/include/linux/filter.h +++ b/include/linux/filter.h @@ -82,6 +82,14 @@ struct sock_reuseport;
/* ALU ops on registers, bpf_add|sub|...: dst_reg += src_reg */
+#define BPF_ALU_REG(CLASS, OP, DST, SRC) \ + ((struct bpf_insn) { \ + .code = CLASS | BPF_OP(OP) | BPF_X, \ + .dst_reg = DST, \ + .src_reg = SRC, \ + .off = 0, \ + .imm = 0 }) + #define BPF_ALU64_REG(OP, DST, SRC) \ ((struct bpf_insn) { \ .code = BPF_ALU64 | BPF_OP(OP) | BPF_X, \ @@ -128,6 +136,14 @@ struct sock_reuseport;
/* Short form of mov, dst_reg = src_reg */
+#define BPF_MOV_REG(CLASS, DST, SRC) \ + ((struct bpf_insn) { \ + .code = CLASS | BPF_MOV | BPF_X, \ + .dst_reg = DST, \ + .src_reg = SRC, \ + .off = 0, \ + .imm = 0 }) + #define BPF_MOV64_REG(DST, SRC) \ ((struct bpf_insn) { \ .code = BPF_ALU64 | BPF_MOV | BPF_X, \ @@ -162,6 +178,14 @@ struct sock_reuseport; .off = 0, \ .imm = IMM })
+#define BPF_RAW_REG(insn, DST, SRC) \ + ((struct bpf_insn) { \ + .code = (insn).code, \ + .dst_reg = DST, \ + .src_reg = SRC, \ + .off = (insn).off, \ + .imm = (insn).imm }) + /* BPF_LD_IMM64 macro encodes single 'load 64-bit immediate' insn */ #define BPF_LD_IMM64(DST, IMM) \ BPF_LD_IMM64_RAW(DST, 0, IMM) diff --git a/kernel/bpf/verifier.c b/kernel/bpf/verifier.c index a7395421f3437..0f1bdf19b17c1 100644 --- a/kernel/bpf/verifier.c +++ b/kernel/bpf/verifier.c @@ -6160,28 +6160,28 @@ static int fixup_bpf_calls(struct bpf_verifier_env *env) insn->code == (BPF_ALU | BPF_DIV | BPF_X)) { bool is64 = BPF_CLASS(insn->code) == BPF_ALU64; struct bpf_insn mask_and_div[] = { - BPF_MOV32_REG(insn->src_reg, insn->src_reg), + BPF_MOV_REG(BPF_CLASS(insn->code), BPF_REG_AX, insn->src_reg), /* Rx div 0 -> 0 */ - BPF_JMP_IMM(BPF_JNE, insn->src_reg, 0, 2), - BPF_ALU32_REG(BPF_XOR, insn->dst_reg, insn->dst_reg), + BPF_JMP_IMM(BPF_JEQ, BPF_REG_AX, 0, 2), + BPF_RAW_REG(*insn, insn->dst_reg, BPF_REG_AX), BPF_JMP_IMM(BPF_JA, 0, 0, 1), - *insn, + BPF_ALU_REG(BPF_CLASS(insn->code), BPF_XOR, insn->dst_reg, insn->dst_reg), }; struct bpf_insn mask_and_mod[] = { - BPF_MOV32_REG(insn->src_reg, insn->src_reg), + BPF_MOV_REG(BPF_CLASS(insn->code), BPF_REG_AX, insn->src_reg), /* Rx mod 0 -> Rx */ - BPF_JMP_IMM(BPF_JEQ, insn->src_reg, 0, 1), - *insn, + BPF_JMP_IMM(BPF_JEQ, BPF_REG_AX, 0, 1), + BPF_RAW_REG(*insn, insn->dst_reg, BPF_REG_AX), }; struct bpf_insn *patchlet;
if (insn->code == (BPF_ALU64 | BPF_DIV | BPF_X) || insn->code == (BPF_ALU | BPF_DIV | BPF_X)) { - patchlet = mask_and_div + (is64 ? 1 : 0); - cnt = ARRAY_SIZE(mask_and_div) - (is64 ? 1 : 0); + patchlet = mask_and_div; + cnt = ARRAY_SIZE(mask_and_div); } else { - patchlet = mask_and_mod + (is64 ? 1 : 0); - cnt = ARRAY_SIZE(mask_and_mod) - (is64 ? 1 : 0); + patchlet = mask_and_mod; + cnt = ARRAY_SIZE(mask_and_mod); }
new_prog = bpf_patch_insn_data(env, i + delta, patchlet, cnt);
From: Daniel Borkmann daniel@iogearbox.net
stable inclusion from linux-4.19.206 commit 39f74b7c81cca139c05757d9c8f9d1e35fbbf56b category: bugfix bugzilla: NA CVE: CVE-2021-3444
--------------------------------
Commit 9b00f1b78809309163dda2d044d9e94a3c0248a3 upstream.
Recently noticed that when mod32 with a known src reg of 0 is performed, then the dst register is 32-bit truncated in verifier:
0: R1=ctx(id=0,off=0,imm=0) R10=fp0 0: (b7) r0 = 0 1: R0_w=inv0 R1=ctx(id=0,off=0,imm=0) R10=fp0 1: (b7) r1 = -1 2: R0_w=inv0 R1_w=inv-1 R10=fp0 2: (b4) w2 = -1 3: R0_w=inv0 R1_w=inv-1 R2_w=inv4294967295 R10=fp0 3: (9c) w1 %= w0 4: R0_w=inv0 R1_w=inv(id=0,umax_value=4294967295,var_off=(0x0; 0xffffffff)) R2_w=inv4294967295 R10=fp0 4: (b7) r0 = 1 5: R0_w=inv1 R1_w=inv(id=0,umax_value=4294967295,var_off=(0x0; 0xffffffff)) R2_w=inv4294967295 R10=fp0 5: (1d) if r1 == r2 goto pc+1 R0_w=inv1 R1_w=inv(id=0,umax_value=4294967295,var_off=(0x0; 0xffffffff)) R2_w=inv4294967295 R10=fp0 6: R0_w=inv1 R1_w=inv(id=0,umax_value=4294967295,var_off=(0x0; 0xffffffff)) R2_w=inv4294967295 R10=fp0 6: (b7) r0 = 2 7: R0_w=inv2 R1_w=inv(id=0,umax_value=4294967295,var_off=(0x0; 0xffffffff)) R2_w=inv4294967295 R10=fp0 7: (95) exit 7: R0=inv1 R1=inv(id=0,umin_value=4294967295,umax_value=4294967295,var_off=(0x0; 0xffffffff)) R2=inv4294967295 R10=fp0 7: (95) exit
However, as a runtime result, we get 2 instead of 1, meaning the dst register does not contain (u32)-1 in this case. The reason is fairly straight forward given the 0 test leaves the dst register as-is:
# ./bpftool p d x i 23 0: (b7) r0 = 0 1: (b7) r1 = -1 2: (b4) w2 = -1 3: (16) if w0 == 0x0 goto pc+1 4: (9c) w1 %= w0 5: (b7) r0 = 1 6: (1d) if r1 == r2 goto pc+1 7: (b7) r0 = 2 8: (95) exit
This was originally not an issue given the dst register was marked as completely unknown (aka 64 bit unknown). However, after 468f6eafa6c4 ("bpf: fix 32-bit ALU op verification") the verifier casts the register output to 32 bit, and hence it becomes 32 bit unknown. Note that for the case where the src register is unknown, the dst register is marked 64 bit unknown. After the fix, the register is truncated by the runtime and the test passes:
# ./bpftool p d x i 23 0: (b7) r0 = 0 1: (b7) r1 = -1 2: (b4) w2 = -1 3: (16) if w0 == 0x0 goto pc+2 4: (9c) w1 %= w0 5: (05) goto pc+1 6: (bc) w1 = w1 7: (b7) r0 = 1 8: (1d) if r1 == r2 goto pc+1 9: (b7) r0 = 2 10: (95) exit
Semantics also match with {R,W}x mod{64,32} 0 -> {R,W}x. Invalid div has always been {R,W}x div{64,32} 0 -> 0. Rewrites are as follows:
mod32: mod64:
(16) if w0 == 0x0 goto pc+2 (15) if r0 == 0x0 goto pc+1 (9c) w1 %= w0 (9f) r1 %= r0 (05) goto pc+1 (bc) w1 = w1
Fixes: 468f6eafa6c4 ("bpf: fix 32-bit ALU op verification") Signed-off-by: Daniel Borkmann daniel@iogearbox.net Reviewed-by: John Fastabend john.fastabend@gmail.com [Salvatore Bonaccorso: This is an earlier version based on work by Daniel and John which does not rely on availability of the BPF_JMP32 instruction class. This means it is not even strictly a backport of the upstream commit mentioned but based on Daniel's and John's work to address the issue and was finalized by Thadeu Lima de Souza Cascardo.] Tested-by: Salvatore Bonaccorso carnil@debian.org Signed-off-by: Thadeu Lima de Souza Cascardo cascardo@canonical.com Signed-off-by: Greg Kroah-Hartman gregkh@linuxfoundation.org Signed-off-by: He Fengqing hefengqing@huawei.com Signed-off-by: Yang Yingliang yangyingliang@huawei.com --- kernel/bpf/verifier.c | 9 +++++---- 1 file changed, 5 insertions(+), 4 deletions(-)
diff --git a/kernel/bpf/verifier.c b/kernel/bpf/verifier.c index 0f1bdf19b17c1..bbfa4cf2f3209 100644 --- a/kernel/bpf/verifier.c +++ b/kernel/bpf/verifier.c @@ -6161,7 +6161,7 @@ static int fixup_bpf_calls(struct bpf_verifier_env *env) bool is64 = BPF_CLASS(insn->code) == BPF_ALU64; struct bpf_insn mask_and_div[] = { BPF_MOV_REG(BPF_CLASS(insn->code), BPF_REG_AX, insn->src_reg), - /* Rx div 0 -> 0 */ + /* [R,W]x div 0 -> 0 */ BPF_JMP_IMM(BPF_JEQ, BPF_REG_AX, 0, 2), BPF_RAW_REG(*insn, insn->dst_reg, BPF_REG_AX), BPF_JMP_IMM(BPF_JA, 0, 0, 1), @@ -6169,9 +6169,10 @@ static int fixup_bpf_calls(struct bpf_verifier_env *env) }; struct bpf_insn mask_and_mod[] = { BPF_MOV_REG(BPF_CLASS(insn->code), BPF_REG_AX, insn->src_reg), - /* Rx mod 0 -> Rx */ - BPF_JMP_IMM(BPF_JEQ, BPF_REG_AX, 0, 1), + BPF_JMP_IMM(BPF_JEQ, BPF_REG_AX, 0, 1 + (is64 ? 0 : 1)), BPF_RAW_REG(*insn, insn->dst_reg, BPF_REG_AX), + BPF_JMP_IMM(BPF_JA, 0, 0, 1), + BPF_MOV32_REG(insn->dst_reg, insn->dst_reg), }; struct bpf_insn *patchlet;
@@ -6181,7 +6182,7 @@ static int fixup_bpf_calls(struct bpf_verifier_env *env) cnt = ARRAY_SIZE(mask_and_div); } else { patchlet = mask_and_mod; - cnt = ARRAY_SIZE(mask_and_mod); + cnt = ARRAY_SIZE(mask_and_mod) - (is64 ? 2 : 0); }
new_prog = bpf_patch_insn_data(env, i + delta, patchlet, cnt);