From: He Fengqing hefengqing@huawei.com
hulk inclusion category: bugfix bugzilla: NA CVE: CVE-2021-3444
-------------------------------------------------
This reverts commit acd0d53a5af580696c55c8e8196ea96c7d3a00e0.
Signed-off-by: He Fengqing hefengqing@huawei.com Reviewed-by: Kuohai Xu xukuohai@huawei.com Signed-off-by: Yang Yingliang yangyingliang@huawei.com --- include/linux/filter.h | 20 -------------------- kernel/bpf/core.c | 21 --------------------- tools/include/linux/filter.h | 20 -------------------- 3 files changed, 61 deletions(-)
diff --git a/include/linux/filter.h b/include/linux/filter.h index 4d210560950fd..fa6c042a19493 100644 --- a/include/linux/filter.h +++ b/include/linux/filter.h @@ -282,26 +282,6 @@ struct sock_reuseport; .off = OFF, \ .imm = IMM })
-/* Like BPF_JMP_REG, but with 32-bit wide operands for comparison. */ - -#define BPF_JMP32_REG(OP, DST, SRC, OFF) \ - ((struct bpf_insn) { \ - .code = BPF_JMP32 | BPF_OP(OP) | BPF_X, \ - .dst_reg = DST, \ - .src_reg = SRC, \ - .off = OFF, \ - .imm = 0 }) - -/* Like BPF_JMP_IMM, but with 32-bit wide operands for comparison. */ - -#define BPF_JMP32_IMM(OP, DST, IMM, OFF) \ - ((struct bpf_insn) { \ - .code = BPF_JMP32 | BPF_OP(OP) | BPF_K, \ - .dst_reg = DST, \ - .src_reg = 0, \ - .off = OFF, \ - .imm = IMM }) - /* Unconditional jumps, goto pc + off16 */
#define BPF_JMP_A(OFF) \ diff --git a/kernel/bpf/core.c b/kernel/bpf/core.c index ef9f5b6abc42e..2c8322db6cf8f 100644 --- a/kernel/bpf/core.c +++ b/kernel/bpf/core.c @@ -804,27 +804,6 @@ static int bpf_jit_blind_insn(const struct bpf_insn *from, *to++ = BPF_JMP_REG(from->code, from->dst_reg, BPF_REG_AX, off); break;
- case BPF_JMP32 | BPF_JEQ | BPF_K: - case BPF_JMP32 | BPF_JNE | BPF_K: - case BPF_JMP32 | BPF_JGT | BPF_K: - case BPF_JMP32 | BPF_JLT | BPF_K: - case BPF_JMP32 | BPF_JGE | BPF_K: - case BPF_JMP32 | BPF_JLE | BPF_K: - case BPF_JMP32 | BPF_JSGT | BPF_K: - case BPF_JMP32 | BPF_JSLT | BPF_K: - case BPF_JMP32 | BPF_JSGE | BPF_K: - case BPF_JMP32 | BPF_JSLE | BPF_K: - case BPF_JMP32 | BPF_JSET | BPF_K: - /* Accommodate for extra offset in case of a backjump. */ - off = from->off; - if (off < 0) - off -= 2; - *to++ = BPF_ALU32_IMM(BPF_MOV, BPF_REG_AX, imm_rnd ^ from->imm); - *to++ = BPF_ALU32_IMM(BPF_XOR, BPF_REG_AX, imm_rnd); - *to++ = BPF_JMP32_REG(from->code, from->dst_reg, BPF_REG_AX, - off); - break; - case BPF_LD | BPF_IMM | BPF_DW: *to++ = BPF_ALU64_IMM(BPF_MOV, BPF_REG_AX, imm_rnd ^ aux[1].imm); *to++ = BPF_ALU64_IMM(BPF_XOR, BPF_REG_AX, imm_rnd); diff --git a/tools/include/linux/filter.h b/tools/include/linux/filter.h index cce0b02c0e286..af55acf73e75a 100644 --- a/tools/include/linux/filter.h +++ b/tools/include/linux/filter.h @@ -199,16 +199,6 @@ .off = OFF, \ .imm = 0 })
-/* Like BPF_JMP_REG, but with 32-bit wide operands for comparison. */ - -#define BPF_JMP32_REG(OP, DST, SRC, OFF) \ - ((struct bpf_insn) { \ - .code = BPF_JMP32 | BPF_OP(OP) | BPF_X, \ - .dst_reg = DST, \ - .src_reg = SRC, \ - .off = OFF, \ - .imm = 0 }) - /* Conditional jumps against immediates, if (dst_reg 'op' imm32) goto pc + off16 */
#define BPF_JMP_IMM(OP, DST, IMM, OFF) \ @@ -219,16 +209,6 @@ .off = OFF, \ .imm = IMM })
-/* Like BPF_JMP_IMM, but with 32-bit wide operands for comparison. */ - -#define BPF_JMP32_IMM(OP, DST, IMM, OFF) \ - ((struct bpf_insn) { \ - .code = BPF_JMP32 | BPF_OP(OP) | BPF_K, \ - .dst_reg = DST, \ - .src_reg = 0, \ - .off = OFF, \ - .imm = IMM }) - /* Unconditional jumps, goto pc + off16 */
#define BPF_JMP_A(OFF) \