This patchset is going to fix CVE-2024-47703, which may resulting in kernel panic.
Andrii Nakryiko (2): bpf: enforce exact retval range on subprog/callback exit bpf: enforce precise retval range on program exit
Tengda Wu (2): bpf: Fix kabi breakage in struct bpf_func_state bpf: Fix kabi breakage in struct bpf_insn_access_aux
Xu Kuohai (3): bpf, lsm: Add disabled BPF LSM hook list bpf, lsm: Add check for BPF LSM return value bpf: Fix compare error in function retval_range_within
include/linux/bpf.h | 1 + include/linux/bpf_lsm.h | 8 + include/linux/bpf_verifier.h | 8 +- kernel/bpf/bpf_lsm.c | 63 +++++++- kernel/bpf/btf.c | 5 +- kernel/bpf/verifier.c | 149 ++++++++++++------ .../selftests/bpf/progs/test_global_func15.c | 2 +- .../selftests/bpf/progs/timer_failure.c | 2 +- .../selftests/bpf/progs/user_ringbuf_fail.c | 2 +- .../bpf/progs/verifier_cgroup_inv_retcode.c | 8 +- .../bpf/progs/verifier_netfilter_retcode.c | 2 +- 11 files changed, 193 insertions(+), 57 deletions(-)
From: Andrii Nakryiko andrii@kernel.org
mainline inclusion from mainline-v6.8-rc1 commit 8fa4ecd49b81ccd9d1d87f1c8b2260e218644878 category: bugfix bugzilla: https://gitee.com/src-openeuler/kernel/issues/IAYPJF CVE: CVE-2024-47703
Reference: https://git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git/commit/?i...
--------------------------------
Instead of relying on potentially imprecise tnum representation of expected return value range for callbacks and subprogs, validate that smin/smax range satisfy exact expected range of return values.
E.g., if callback would need to return [0, 2] range, tnum can't represent this precisely and instead will allow [0, 3] range. By checking smin/smax range, we can make sure that subprog/callback indeed returns only valid [0, 2] range.
Acked-by: Eduard Zingerman eddyz87@gmail.com Acked-by: Shung-Hsi Yu shung-hsi.yu@suse.com Signed-off-by: Andrii Nakryiko andrii@kernel.org Link: https://lore.kernel.org/r/20231202175705.885270-5-andrii@kernel.org Signed-off-by: Alexei Starovoitov ast@kernel.org
Conflicts: include/linux/bpf_verifier.h [This conflict is due to commit 6b6e3a2eac5f ("kabi: reserve space for bpf related structures")] Signed-off-by: Tengda Wu wutengda2@huawei.com --- include/linux/bpf_verifier.h | 7 ++++++- kernel/bpf/verifier.c | 33 ++++++++++++++++++++++----------- 2 files changed, 28 insertions(+), 12 deletions(-)
diff --git a/include/linux/bpf_verifier.h b/include/linux/bpf_verifier.h index ac1262ca98f7..fcea91630903 100644 --- a/include/linux/bpf_verifier.h +++ b/include/linux/bpf_verifier.h @@ -286,6 +286,11 @@ struct bpf_reference_state { KABI_RESERVE(4) };
+struct bpf_retval_range { + s32 minval; + s32 maxval; +}; + /* state of the program: * type of all registers and stack info */ @@ -309,7 +314,7 @@ struct bpf_func_state { */ u32 async_entry_cnt; bool in_callback_fn; - struct tnum callback_ret_range; + struct bpf_retval_range callback_ret_range; bool in_async_callback_fn; /* For callback calling functions that limit number of possible * callback executions (e.g. bpf_loop) keeps track of current diff --git a/kernel/bpf/verifier.c b/kernel/bpf/verifier.c index d1041517a984..126414babca6 100644 --- a/kernel/bpf/verifier.c +++ b/kernel/bpf/verifier.c @@ -2567,6 +2567,11 @@ static void init_reg_state(struct bpf_verifier_env *env, regs[BPF_REG_FP].frameno = state->frameno; }
+static struct bpf_retval_range retval_range(s32 minval, s32 maxval) +{ + return (struct bpf_retval_range){ minval, maxval }; +} + #define BPF_MAIN_FUNC (-1) static void init_func_state(struct bpf_verifier_env *env, struct bpf_func_state *state, @@ -2575,7 +2580,7 @@ static void init_func_state(struct bpf_verifier_env *env, state->callsite = callsite; state->frameno = frameno; state->subprogno = subprogno; - state->callback_ret_range = tnum_range(0, 0); + state->callback_ret_range = retval_range(0, 0); init_reg_state(env, state); mark_verifier_state_scratched(env); } @@ -9463,7 +9468,7 @@ static int set_map_elem_callback_state(struct bpf_verifier_env *env, return err;
callee->in_callback_fn = true; - callee->callback_ret_range = tnum_range(0, 1); + callee->callback_ret_range = retval_range(0, 1); return 0; }
@@ -9485,7 +9490,7 @@ static int set_loop_callback_state(struct bpf_verifier_env *env, __mark_reg_not_init(env, &callee->regs[BPF_REG_5]);
callee->in_callback_fn = true; - callee->callback_ret_range = tnum_range(0, 1); + callee->callback_ret_range = retval_range(0, 1); return 0; }
@@ -9515,7 +9520,7 @@ static int set_timer_callback_state(struct bpf_verifier_env *env, __mark_reg_not_init(env, &callee->regs[BPF_REG_4]); __mark_reg_not_init(env, &callee->regs[BPF_REG_5]); callee->in_async_callback_fn = true; - callee->callback_ret_range = tnum_range(0, 1); + callee->callback_ret_range = retval_range(0, 1); return 0; }
@@ -9543,7 +9548,7 @@ static int set_find_vma_callback_state(struct bpf_verifier_env *env, __mark_reg_not_init(env, &callee->regs[BPF_REG_4]); __mark_reg_not_init(env, &callee->regs[BPF_REG_5]); callee->in_callback_fn = true; - callee->callback_ret_range = tnum_range(0, 1); + callee->callback_ret_range = retval_range(0, 1); return 0; }
@@ -9566,7 +9571,7 @@ static int set_user_ringbuf_callback_state(struct bpf_verifier_env *env, __mark_reg_not_init(env, &callee->regs[BPF_REG_5]);
callee->in_callback_fn = true; - callee->callback_ret_range = tnum_range(0, 1); + callee->callback_ret_range = retval_range(0, 1); return 0; }
@@ -9598,7 +9603,7 @@ static int set_rbtree_add_callback_state(struct bpf_verifier_env *env, __mark_reg_not_init(env, &callee->regs[BPF_REG_4]); __mark_reg_not_init(env, &callee->regs[BPF_REG_5]); callee->in_callback_fn = true; - callee->callback_ret_range = tnum_range(0, 1); + callee->callback_ret_range = retval_range(0, 1); return 0; }
@@ -9627,6 +9632,11 @@ static bool in_rbtree_lock_required_cb(struct bpf_verifier_env *env) return is_rbtree_lock_required_kfunc(kfunc_btf_id); }
+static bool retval_range_within(struct bpf_retval_range range, const struct bpf_reg_state *reg) +{ + return range.minval <= reg->smin_value && reg->smax_value <= range.maxval; +} + static int prepare_func_exit(struct bpf_verifier_env *env, int *insn_idx) { struct bpf_verifier_state *state = env->cur_state, *prev_st; @@ -9650,9 +9660,6 @@ static int prepare_func_exit(struct bpf_verifier_env *env, int *insn_idx)
caller = state->frame[state->curframe - 1]; if (callee->in_callback_fn) { - /* enforce R0 return value range [0, 1]. */ - struct tnum range = callee->callback_ret_range; - if (r0->type != SCALAR_VALUE) { verbose(env, "R0 not a scalar value\n"); return -EACCES; @@ -9664,7 +9671,11 @@ static int prepare_func_exit(struct bpf_verifier_env *env, int *insn_idx) if (err) return err;
- if (!tnum_in(range, r0->var_off)) { + /* enforce R0 return value range */ + if (!retval_range_within(callee->callback_ret_range, r0)) { + struct tnum range = tnum_range(callee->callback_ret_range.minval, + callee->callback_ret_range.maxval); + verbose_invalid_scalar(env, r0, &range, "callback return", "R0"); return -EINVAL; }
From: Andrii Nakryiko andrii@kernel.org
mainline inclusion from mainline-v6.8-rc1 commit c871d0e00f0e8c207ce8ff89025e35cc49a8a3c3 category: bugfix bugzilla: https://gitee.com/src-openeuler/kernel/issues/IAYPJF CVE: CVE-2024-47703
Reference: https://git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git/commit/?i...
--------------------------------
Similarly to subprog/callback logic, enforce return value of BPF program using more precise smin/smax range.
We need to adjust a bunch of tests due to a changed format of an error message.
Acked-by: Eduard Zingerman eddyz87@gmail.com Acked-by: Shung-Hsi Yu shung-hsi.yu@suse.com Signed-off-by: Andrii Nakryiko andrii@kernel.org Link: https://lore.kernel.org/r/20231202175705.885270-7-andrii@kernel.org Signed-off-by: Alexei Starovoitov ast@kernel.org
Conflicts: kernel/bpf/verifier.c tools/testing/selftests/bpf/progs/verifier_subprog_precision.c tools/testing/selftests/bpf/progs/exceptions_assert.c tools/testing/selftests/bpf/progs/exceptions_fail.c [The conflicts are due to We did not backport 5fad52bee304, d2a93715bfb0 and 60a6b2c78c62] Signed-off-by: Tengda Wu wutengda2@huawei.com --- kernel/bpf/verifier.c | 56 ++++++++++--------- .../selftests/bpf/progs/test_global_func15.c | 2 +- .../selftests/bpf/progs/timer_failure.c | 2 +- .../selftests/bpf/progs/user_ringbuf_fail.c | 2 +- .../bpf/progs/verifier_cgroup_inv_retcode.c | 8 +-- .../bpf/progs/verifier_netfilter_retcode.c | 2 +- 6 files changed, 37 insertions(+), 35 deletions(-)
diff --git a/kernel/bpf/verifier.c b/kernel/bpf/verifier.c index 126414babca6..33b838be156a 100644 --- a/kernel/bpf/verifier.c +++ b/kernel/bpf/verifier.c @@ -413,20 +413,23 @@ __printf(3, 4) static void verbose_linfo(struct bpf_verifier_env *env,
static void verbose_invalid_scalar(struct bpf_verifier_env *env, struct bpf_reg_state *reg, - struct tnum *range, const char *ctx, + struct bpf_retval_range range, const char *ctx, const char *reg_name) { - char tn_buf[48]; + bool unknown = true;
- verbose(env, "At %s the register %s ", ctx, reg_name); - if (!tnum_is_unknown(reg->var_off)) { - tnum_strn(tn_buf, sizeof(tn_buf), reg->var_off); - verbose(env, "has value %s", tn_buf); - } else { - verbose(env, "has unknown scalar value"); + verbose(env, "At %s the register %s has", ctx, reg_name); + if (reg->smin_value > S64_MIN) { + verbose(env, " smin=%lld", reg->smin_value); + unknown = false; } - tnum_strn(tn_buf, sizeof(tn_buf), *range); - verbose(env, " should have been in %s\n", tn_buf); + if (reg->smax_value < S64_MAX) { + verbose(env, " smax=%lld", reg->smax_value); + unknown = false; + } + if (unknown) + verbose(env, " unknown scalar value"); + verbose(env, " should have been in [%d, %d]\n", range.minval, range.maxval); }
static bool type_is_pkt_pointer(enum bpf_reg_type type) @@ -9673,10 +9676,8 @@ static int prepare_func_exit(struct bpf_verifier_env *env, int *insn_idx)
/* enforce R0 return value range */ if (!retval_range_within(callee->callback_ret_range, r0)) { - struct tnum range = tnum_range(callee->callback_ret_range.minval, - callee->callback_ret_range.maxval); - - verbose_invalid_scalar(env, r0, &range, "callback return", "R0"); + verbose_invalid_scalar(env, r0, callee->callback_ret_range, + "callback return", "R0"); return -EINVAL; } if (!calls_callback(env, callee->callsite)) { @@ -14951,7 +14952,8 @@ static int check_return_code(struct bpf_verifier_env *env) struct tnum enforce_attach_type_range = tnum_unknown; const struct bpf_prog *prog = env->prog; struct bpf_reg_state *reg; - struct tnum range = tnum_range(0, 1), const_0 = tnum_const(0); + struct bpf_retval_range range = retval_range(0, 1); + struct bpf_retval_range const_0 = retval_range(0, 0); enum bpf_prog_type prog_type = resolve_prog_type(env->prog); int err; struct bpf_func_state *frame = env->cur_state->frame[0]; @@ -14999,8 +15001,8 @@ static int check_return_code(struct bpf_verifier_env *env) return -EINVAL; }
- if (!tnum_in(const_0, reg->var_off)) { - verbose_invalid_scalar(env, reg, &const_0, "async callback", "R0"); + if (!retval_range_within(const_0, reg)) { + verbose_invalid_scalar(env, reg, const_0, "async callback", "R0"); return -EINVAL; } return 0; @@ -15023,14 +15025,14 @@ static int check_return_code(struct bpf_verifier_env *env) env->prog->expected_attach_type == BPF_CGROUP_INET6_GETPEERNAME || env->prog->expected_attach_type == BPF_CGROUP_INET4_GETSOCKNAME || env->prog->expected_attach_type == BPF_CGROUP_INET6_GETSOCKNAME) - range = tnum_range(1, 1); + range = retval_range(1, 1); if (env->prog->expected_attach_type == BPF_CGROUP_INET4_BIND || env->prog->expected_attach_type == BPF_CGROUP_INET6_BIND) - range = tnum_range(0, 3); + range = retval_range(0, 3); break; case BPF_PROG_TYPE_CGROUP_SKB: if (env->prog->expected_attach_type == BPF_CGROUP_INET_EGRESS) { - range = tnum_range(0, 3); + range = retval_range(0, 3); enforce_attach_type_range = tnum_range(2, 3); } break; @@ -15043,13 +15045,13 @@ static int check_return_code(struct bpf_verifier_env *env) case BPF_PROG_TYPE_RAW_TRACEPOINT: if (!env->prog->aux->attach_btf_id) return 0; - range = tnum_const(0); + range = retval_range(0, 0); break; case BPF_PROG_TYPE_TRACING: switch (env->prog->expected_attach_type) { case BPF_TRACE_FENTRY: case BPF_TRACE_FEXIT: - range = tnum_const(0); + range = retval_range(0, 0); break; case BPF_TRACE_RAW_TP: case BPF_MODIFY_RETURN: @@ -15061,7 +15063,7 @@ static int check_return_code(struct bpf_verifier_env *env) } break; case BPF_PROG_TYPE_SK_LOOKUP: - range = tnum_range(SK_DROP, SK_PASS); + range = retval_range(SK_DROP, SK_PASS); break;
case BPF_PROG_TYPE_LSM: @@ -15075,12 +15077,12 @@ static int check_return_code(struct bpf_verifier_env *env) /* Make sure programs that attach to void * hooks don't try to modify return value. */ - range = tnum_range(1, 1); + range = retval_range(1, 1); } break;
case BPF_PROG_TYPE_NETFILTER: - range = tnum_range(NF_DROP, NF_ACCEPT); + range = retval_range(NF_DROP, NF_ACCEPT); break; case BPF_PROG_TYPE_EXT: /* freplace program can return anything as its return value @@ -15096,8 +15098,8 @@ static int check_return_code(struct bpf_verifier_env *env) return -EINVAL; }
- if (!tnum_in(range, reg->var_off)) { - verbose_invalid_scalar(env, reg, &range, "program exit", "R0"); + if (!retval_range_within(range, reg)) { + verbose_invalid_scalar(env, reg, range, "program exit", "R0"); if (prog->expected_attach_type == BPF_LSM_CGROUP && prog_type == BPF_PROG_TYPE_LSM && !prog->aux->attach_func_proto->type) diff --git a/tools/testing/selftests/bpf/progs/test_global_func15.c b/tools/testing/selftests/bpf/progs/test_global_func15.c index b512d6a6c75e..f80207480e8a 100644 --- a/tools/testing/selftests/bpf/progs/test_global_func15.c +++ b/tools/testing/selftests/bpf/progs/test_global_func15.c @@ -13,7 +13,7 @@ __noinline int foo(unsigned int *v) }
SEC("cgroup_skb/ingress") -__failure __msg("At program exit the register R0 has value") +__failure __msg("At program exit the register R0 has ") int global_func15(struct __sk_buff *skb) { unsigned int v = 1; diff --git a/tools/testing/selftests/bpf/progs/timer_failure.c b/tools/testing/selftests/bpf/progs/timer_failure.c index 226d33b5a05c..9000da1e2120 100644 --- a/tools/testing/selftests/bpf/progs/timer_failure.c +++ b/tools/testing/selftests/bpf/progs/timer_failure.c @@ -30,7 +30,7 @@ static int timer_cb_ret1(void *map, int *key, struct bpf_timer *timer) }
SEC("fentry/bpf_fentry_test1") -__failure __msg("should have been in (0x0; 0x0)") +__failure __msg("should have been in [0, 0]") int BPF_PROG2(test_ret_1, int, a) { int key = 0; diff --git a/tools/testing/selftests/bpf/progs/user_ringbuf_fail.c b/tools/testing/selftests/bpf/progs/user_ringbuf_fail.c index 03ee946c6bf7..11ab25c42c36 100644 --- a/tools/testing/selftests/bpf/progs/user_ringbuf_fail.c +++ b/tools/testing/selftests/bpf/progs/user_ringbuf_fail.c @@ -184,7 +184,7 @@ invalid_drain_callback_return(struct bpf_dynptr *dynptr, void *context) * not be able to write to that pointer. */ SEC("?raw_tp") -__failure __msg("At callback return the register R0 has value") +__failure __msg("At callback return the register R0 has ") int user_ringbuf_callback_invalid_return(void *ctx) { bpf_user_ringbuf_drain(&user_ringbuf, invalid_drain_callback_return, NULL, 0); diff --git a/tools/testing/selftests/bpf/progs/verifier_cgroup_inv_retcode.c b/tools/testing/selftests/bpf/progs/verifier_cgroup_inv_retcode.c index d6c4a7f3f790..6e0f349f8f15 100644 --- a/tools/testing/selftests/bpf/progs/verifier_cgroup_inv_retcode.c +++ b/tools/testing/selftests/bpf/progs/verifier_cgroup_inv_retcode.c @@ -7,7 +7,7 @@
SEC("cgroup/sock") __description("bpf_exit with invalid return code. test1") -__failure __msg("R0 has value (0x0; 0xffffffff)") +__failure __msg("smin=0 smax=4294967295 should have been in [0, 1]") __naked void with_invalid_return_code_test1(void) { asm volatile (" \ @@ -30,7 +30,7 @@ __naked void with_invalid_return_code_test2(void)
SEC("cgroup/sock") __description("bpf_exit with invalid return code. test3") -__failure __msg("R0 has value (0x0; 0x3)") +__failure __msg("smin=0 smax=3 should have been in [0, 1]") __naked void with_invalid_return_code_test3(void) { asm volatile (" \ @@ -53,7 +53,7 @@ __naked void with_invalid_return_code_test4(void)
SEC("cgroup/sock") __description("bpf_exit with invalid return code. test5") -__failure __msg("R0 has value (0x2; 0x0)") +__failure __msg("smin=2 smax=2 should have been in [0, 1]") __naked void with_invalid_return_code_test5(void) { asm volatile (" \ @@ -75,7 +75,7 @@ __naked void with_invalid_return_code_test6(void)
SEC("cgroup/sock") __description("bpf_exit with invalid return code. test7") -__failure __msg("R0 has unknown scalar value") +__failure __msg("R0 has unknown scalar value should have been in [0, 1]") __naked void with_invalid_return_code_test7(void) { asm volatile (" \ diff --git a/tools/testing/selftests/bpf/progs/verifier_netfilter_retcode.c b/tools/testing/selftests/bpf/progs/verifier_netfilter_retcode.c index 353ae6da00e1..e1ffa5d32ff0 100644 --- a/tools/testing/selftests/bpf/progs/verifier_netfilter_retcode.c +++ b/tools/testing/selftests/bpf/progs/verifier_netfilter_retcode.c @@ -39,7 +39,7 @@ __naked void with_valid_return_code_test3(void)
SEC("netfilter") __description("bpf_exit with invalid return code. test4") -__failure __msg("R0 has value (0x2; 0x0)") +__failure __msg("R0 has smin=2 smax=2 should have been in [0, 1]") __naked void with_invalid_return_code_test4(void) { asm volatile (" \
From: Xu Kuohai xukuohai@huawei.com
mainline inclusion from mainline-v6.12-rc1 commit 21c7063f6d08ab9afa088584939791bee0c177e5 category: bugfix bugzilla: https://gitee.com/src-openeuler/kernel/issues/IAYPJF CVE: CVE-2024-47703
Reference: https://git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git/commit/?i...
--------------------------------
Add a disabled hooks list for BPF LSM. progs being attached to the listed hooks will be rejected by the verifier.
Suggested-by: KP Singh kpsingh@kernel.org Signed-off-by: Xu Kuohai xukuohai@huawei.com Link: https://lore.kernel.org/r/20240719110059.797546-2-xukuohai@huaweicloud.com Signed-off-by: Alexei Starovoitov ast@kernel.org Signed-off-by: Andrii Nakryiko andrii@kernel.org
Conflicts: kernel/bpf/bpf_lsm.c [The conflicts are due to we did not backport a04a1198088] Signed-off-by: Tengda Wu wutengda2@huawei.com --- kernel/bpf/bpf_lsm.c | 30 ++++++++++++++++++++++++++++-- 1 file changed, 28 insertions(+), 2 deletions(-)
diff --git a/kernel/bpf/bpf_lsm.c b/kernel/bpf/bpf_lsm.c index e14c822f8911..ee2b61f459a5 100644 --- a/kernel/bpf/bpf_lsm.c +++ b/kernel/bpf/bpf_lsm.c @@ -36,6 +36,23 @@ BTF_SET_START(bpf_lsm_hooks) #undef LSM_HOOK BTF_SET_END(bpf_lsm_hooks)
+BTF_SET_START(bpf_lsm_disabled_hooks) +BTF_ID(func, bpf_lsm_vm_enough_memory) +BTF_ID(func, bpf_lsm_inode_need_killpriv) +BTF_ID(func, bpf_lsm_inode_getsecurity) +BTF_ID(func, bpf_lsm_inode_listsecurity) +BTF_ID(func, bpf_lsm_inode_copy_up_xattr) +BTF_ID(func, bpf_lsm_getprocattr) +BTF_ID(func, bpf_lsm_setprocattr) +#ifdef CONFIG_KEYS +BTF_ID(func, bpf_lsm_key_getsecurity) +#endif +#ifdef CONFIG_AUDIT +BTF_ID(func, bpf_lsm_audit_rule_match) +#endif +BTF_ID(func, bpf_lsm_ismaclabel) +BTF_SET_END(bpf_lsm_disabled_hooks) + /* List of LSM hooks that should operate on 'current' cgroup regardless * of function signature. */ @@ -97,15 +114,24 @@ void bpf_lsm_find_cgroup_shim(const struct bpf_prog *prog, int bpf_lsm_verify_prog(struct bpf_verifier_log *vlog, const struct bpf_prog *prog) { + u32 btf_id = prog->aux->attach_btf_id; + const char *func_name = prog->aux->attach_func_name; + if (!prog->gpl_compatible) { bpf_log(vlog, "LSM programs must have a GPL compatible license\n"); return -EINVAL; }
- if (!btf_id_set_contains(&bpf_lsm_hooks, prog->aux->attach_btf_id)) { + if (btf_id_set_contains(&bpf_lsm_disabled_hooks, btf_id)) { + bpf_log(vlog, "attach_btf_id %u points to disabled hook %s\n", + btf_id, func_name); + return -EINVAL; + } + + if (!btf_id_set_contains(&bpf_lsm_hooks, btf_id)) { bpf_log(vlog, "attach_btf_id %u points to wrong type name %s\n", - prog->aux->attach_btf_id, prog->aux->attach_func_name); + btf_id, func_name); return -EINVAL; }
From: Xu Kuohai xukuohai@huawei.com
mainline inclusion from mainline-v6.12-rc1 commit 5d99e198be279045e6ecefe220f5c52f8ce9bfd5 category: bugfix bugzilla: https://gitee.com/src-openeuler/kernel/issues/IAYPJF CVE: CVE-2024-47703
Reference: https://git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git/commit/?i...
--------------------------------
A bpf prog returning a positive number attached to file_alloc_security hook makes kernel panic.
This happens because file system can not filter out the positive number returned by the LSM prog using IS_ERR, and misinterprets this positive number as a file pointer.
Given that hook file_alloc_security never returned positive number before the introduction of BPF LSM, and other BPF LSM hooks may encounter similar issues, this patch adds LSM return value check in verifier, to ensure no unexpected value is returned.
Fixes: 520b7aa00d8c ("bpf: lsm: Initialize the BPF LSM hooks") Signed-off-by: Xu Kuohai xukuohai@huawei.com Acked-by: Eduard Zingerman eddyz87@gmail.com Link: https://lore.kernel.org/r/20240719110059.797546-3-xukuohai@huaweicloud.com Signed-off-by: Alexei Starovoitov ast@kernel.org Signed-off-by: Andrii Nakryiko andrii@kernel.org
Conflicts: include/linux/bpf.h kernel/bpf/bpf_lsm.c kernel/bpf/verifier.c [2929bfac006d ("bpf: Minor cleanup around stack bounds") didn't backport] Signed-off-by: Tengda Wu wutengda2@huawei.com --- include/linux/bpf.h | 1 + include/linux/bpf_lsm.h | 8 ++++++ kernel/bpf/bpf_lsm.c | 33 ++++++++++++++++++++++- kernel/bpf/btf.c | 5 +++- kernel/bpf/verifier.c | 60 ++++++++++++++++++++++++++++++++++------- 5 files changed, 96 insertions(+), 11 deletions(-)
diff --git a/include/linux/bpf.h b/include/linux/bpf.h index 8c4c2c39a6c1..017401c80102 100644 --- a/include/linux/bpf.h +++ b/include/linux/bpf.h @@ -912,6 +912,7 @@ static_assert(__BPF_REG_TYPE_MAX <= BPF_BASE_TYPE_LIMIT); */ struct bpf_insn_access_aux { enum bpf_reg_type reg_type; + bool is_retval; /* is accessing function return value ? */ union { int ctx_field_size; struct { diff --git a/include/linux/bpf_lsm.h b/include/linux/bpf_lsm.h index 1de7ece5d36d..aefcd6564251 100644 --- a/include/linux/bpf_lsm.h +++ b/include/linux/bpf_lsm.h @@ -9,6 +9,7 @@
#include <linux/sched.h> #include <linux/bpf.h> +#include <linux/bpf_verifier.h> #include <linux/lsm_hooks.h>
#ifdef CONFIG_BPF_LSM @@ -45,6 +46,8 @@ void bpf_inode_storage_free(struct inode *inode);
void bpf_lsm_find_cgroup_shim(const struct bpf_prog *prog, bpf_func_t *bpf_func);
+int bpf_lsm_get_retval_range(const struct bpf_prog *prog, + struct bpf_retval_range *range); #else /* !CONFIG_BPF_LSM */
static inline bool bpf_lsm_is_sleepable_hook(u32 btf_id) @@ -78,6 +81,11 @@ static inline void bpf_lsm_find_cgroup_shim(const struct bpf_prog *prog, { }
+static inline int bpf_lsm_get_retval_range(const struct bpf_prog *prog, + struct bpf_retval_range *range) +{ + return -EOPNOTSUPP; +} #endif /* CONFIG_BPF_LSM */
#endif /* _LINUX_BPF_LSM_H */ diff --git a/kernel/bpf/bpf_lsm.c b/kernel/bpf/bpf_lsm.c index ee2b61f459a5..476a79f0ff2d 100644 --- a/kernel/bpf/bpf_lsm.c +++ b/kernel/bpf/bpf_lsm.c @@ -11,7 +11,6 @@ #include <linux/lsm_hooks.h> #include <linux/bpf_lsm.h> #include <linux/kallsyms.h> -#include <linux/bpf_verifier.h> #include <net/bpf_sk_storage.h> #include <linux/bpf_local_storage.h> #include <linux/btf_ids.h> @@ -400,3 +399,35 @@ const struct bpf_verifier_ops lsm_verifier_ops = { .get_func_proto = bpf_lsm_func_proto, .is_valid_access = btf_ctx_access, }; + +/* hooks return 0 or 1 */ +BTF_SET_START(bool_lsm_hooks) +#ifdef CONFIG_SECURITY_NETWORK_XFRM +BTF_ID(func, bpf_lsm_xfrm_state_pol_flow_match) +#endif +#ifdef CONFIG_AUDIT +BTF_ID(func, bpf_lsm_audit_rule_known) +#endif +BTF_SET_END(bool_lsm_hooks) + +int bpf_lsm_get_retval_range(const struct bpf_prog *prog, + struct bpf_retval_range *retval_range) +{ + /* no return value range for void hooks */ + if (!prog->aux->attach_func_proto->type) + return -EINVAL; + + if (btf_id_set_contains(&bool_lsm_hooks, prog->aux->attach_btf_id)) { + retval_range->minval = 0; + retval_range->maxval = 1; + } else { + /* All other available LSM hooks, except task_prctl, return 0 + * on success and negative error code on failure. + * To keep things simple, we only allow bpf progs to return 0 + * or negative errno for task_prctl too. + */ + retval_range->minval = -MAX_ERRNO; + retval_range->maxval = 0; + } + return 0; +} diff --git a/kernel/bpf/btf.c b/kernel/bpf/btf.c index 7ac62793a753..09f6954c0aff 100644 --- a/kernel/bpf/btf.c +++ b/kernel/bpf/btf.c @@ -5962,8 +5962,11 @@ bool btf_ctx_access(int off, int size, enum bpf_access_type type,
if (arg == nr_args) { switch (prog->expected_attach_type) { - case BPF_LSM_CGROUP: case BPF_LSM_MAC: + /* mark we are accessing the return value */ + info->is_retval = true; + fallthrough; + case BPF_LSM_CGROUP: case BPF_TRACE_FEXIT: /* When LSM programs are attached to void LSM hooks * they use FEXIT trampolines and when attached to diff --git a/kernel/bpf/verifier.c b/kernel/bpf/verifier.c index 33b838be156a..314b0b74fe80 100644 --- a/kernel/bpf/verifier.c +++ b/kernel/bpf/verifier.c @@ -2512,6 +2512,25 @@ static void mark_reg_unknown(struct bpf_verifier_env *env, __mark_reg_unknown(env, regs + regno); }
+static int __mark_reg_s32_range(struct bpf_verifier_env *env, + struct bpf_reg_state *regs, + u32 regno, + s32 s32_min, + s32 s32_max) +{ + struct bpf_reg_state *reg = regs + regno; + + reg->s32_min_value = max_t(s32, reg->s32_min_value, s32_min); + reg->s32_max_value = min_t(s32, reg->s32_max_value, s32_max); + + reg->smin_value = max_t(s64, reg->smin_value, s32_min); + reg->smax_value = min_t(s64, reg->smax_value, s32_max); + + reg_bounds_sync(reg); + + return 0; +} + static void __mark_reg_not_init(const struct bpf_verifier_env *env, struct bpf_reg_state *reg) { @@ -5557,11 +5576,12 @@ static int check_packet_access(struct bpf_verifier_env *env, u32 regno, int off, /* check access to 'struct bpf_context' fields. Supports fixed offsets only */ static int check_ctx_access(struct bpf_verifier_env *env, int insn_idx, int off, int size, enum bpf_access_type t, enum bpf_reg_type *reg_type, - struct btf **btf, u32 *btf_id) + struct btf **btf, u32 *btf_id, bool *is_retval) { struct bpf_insn_access_aux info = { .reg_type = *reg_type, .log = &env->log, + .is_retval = false, };
if (env->ops->is_valid_access && @@ -5574,6 +5594,7 @@ static int check_ctx_access(struct bpf_verifier_env *env, int insn_idx, int off, * type of narrower access. */ *reg_type = info.reg_type; + *is_retval = info.is_retval;
if (base_type(*reg_type) == PTR_TO_BTF_ID) { *btf = info.btf; @@ -6691,6 +6712,17 @@ static int check_stack_access_within_bounds( return grow_stack_state(env, state, round_up(-min_off, BPF_REG_SIZE)); }
+static bool get_func_retval_range(struct bpf_prog *prog, + struct bpf_retval_range *range) +{ + if (prog->type == BPF_PROG_TYPE_LSM && + prog->expected_attach_type == BPF_LSM_MAC && + !bpf_lsm_get_retval_range(prog, range)) { + return true; + } + return false; +} + /* check whether memory at (regno + off) is accessible for t = (read | write) * if t==write, value_regno is a register which value is stored into memory * if t==read, value_regno is a register which will receive the value from memory @@ -6795,6 +6827,8 @@ static int check_mem_access(struct bpf_verifier_env *env, int insn_idx, u32 regn if (!err && value_regno >= 0 && (t == BPF_READ || rdonly_mem)) mark_reg_unknown(env, regs, value_regno); } else if (reg->type == PTR_TO_CTX) { + bool is_retval = false; + struct bpf_retval_range range; enum bpf_reg_type reg_type = SCALAR_VALUE; struct btf *btf = NULL; u32 btf_id = 0; @@ -6810,7 +6844,7 @@ static int check_mem_access(struct bpf_verifier_env *env, int insn_idx, u32 regn return err;
err = check_ctx_access(env, insn_idx, off, size, t, ®_type, &btf, - &btf_id); + &btf_id, &is_retval); if (err) verbose_linfo(env, insn_idx, "; "); if (!err && t == BPF_READ && value_regno >= 0) { @@ -6819,7 +6853,14 @@ static int check_mem_access(struct bpf_verifier_env *env, int insn_idx, u32 regn * case, we know the offset is zero. */ if (reg_type == SCALAR_VALUE) { - mark_reg_unknown(env, regs, value_regno); + if (is_retval && get_func_retval_range(env->prog, &range)) { + err = __mark_reg_s32_range(env, regs, value_regno, + range.minval, range.maxval); + if (err) + return err; + } else { + mark_reg_unknown(env, regs, value_regno); + } } else { mark_reg_known_zero(env, regs, value_regno); @@ -15068,12 +15109,13 @@ static int check_return_code(struct bpf_verifier_env *env)
case BPF_PROG_TYPE_LSM: if (env->prog->expected_attach_type != BPF_LSM_CGROUP) { - /* Regular BPF_PROG_TYPE_LSM programs can return - * any value. - */ - return 0; - } - if (!env->prog->aux->attach_func_proto->type) { + /* no range found, any return value is allowed */ + if (!get_func_retval_range(env->prog, &range)) + return 0; + /* no restricted range, any return value is allowed */ + if (range.minval == S32_MIN && range.maxval == S32_MAX) + return 0; + } else if (!env->prog->aux->attach_func_proto->type) { /* Make sure programs that attach to void * hooks don't try to modify return value. */
From: Xu Kuohai xukuohai@huawei.com
mainline inclusion from mainline-v6.12-rc1 commit 763aa759d3b2c4f95b11855e3d37b860860107e2 category: bugfix bugzilla: https://gitee.com/src-openeuler/kernel/issues/IAYPJF CVE: CVE-2024-47703
Reference: https://git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git/commit/?i...
--------------------------------
After checking lsm hook return range in verifier, the test case "test_progs -t test_lsm" failed, and the failure log says:
libbpf: prog 'test_int_hook': BPF program load failed: Invalid argument libbpf: prog 'test_int_hook': -- BEGIN PROG LOAD LOG -- 0: R1=ctx() R10=fp0 ; int BPF_PROG(test_int_hook, struct vm_area_struct *vma, @ lsm.c:89 0: (79) r0 = *(u64 *)(r1 +24) ; R0_w=scalar(smin=smin32=-4095,smax=smax32=0) R1=ctx()
[...]
24: (b4) w0 = -1 ; R0_w=0xffffffff ; int BPF_PROG(test_int_hook, struct vm_area_struct *vma, @ lsm.c:89 25: (95) exit At program exit the register R0 has smin=4294967295 smax=4294967295 should have been in [-4095, 0]
It can be seen that instruction "w0 = -1" zero extended -1 to 64-bit register r0, setting both smin and smax values of r0 to 4294967295. This resulted in a false reject when r0 was checked with range [-4095, 0].
Given bpf lsm does not return 64-bit values, this patch fixes it by changing the compare between r0 and return range from 64-bit operation to 32-bit operation for bpf lsm.
Fixes: 8fa4ecd49b81 ("bpf: enforce exact retval range on subprog/callback exit") Signed-off-by: Xu Kuohai xukuohai@huawei.com Acked-by: Shung-Hsi Yu shung-hsi.yu@suse.com Link: https://lore.kernel.org/r/20240719110059.797546-5-xukuohai@huaweicloud.com Signed-off-by: Alexei Starovoitov ast@kernel.org Signed-off-by: Andrii Nakryiko andrii@kernel.org
Conflicts: kernel/bpf/verifier.c [Did not backport b9ae0c9dd0ac ("bpf: Add support for custom exception callbacks") and 0ef24c8dfae2 ("bpf: unify async callback and program retval checks")] Signed-off-by: Tengda Wu wutengda2@huawei.com --- kernel/bpf/verifier.c | 18 ++++++++++++------ 1 file changed, 12 insertions(+), 6 deletions(-)
diff --git a/kernel/bpf/verifier.c b/kernel/bpf/verifier.c index 314b0b74fe80..c36a2ff12a8d 100644 --- a/kernel/bpf/verifier.c +++ b/kernel/bpf/verifier.c @@ -9676,9 +9676,13 @@ static bool in_rbtree_lock_required_cb(struct bpf_verifier_env *env) return is_rbtree_lock_required_kfunc(kfunc_btf_id); }
-static bool retval_range_within(struct bpf_retval_range range, const struct bpf_reg_state *reg) +static bool retval_range_within(struct bpf_retval_range range, const struct bpf_reg_state *reg, + bool return_32bit) { - return range.minval <= reg->smin_value && reg->smax_value <= range.maxval; + if (return_32bit) + return range.minval <= reg->s32_min_value && reg->s32_max_value <= range.maxval; + else + return range.minval <= reg->smin_value && reg->smax_value <= range.maxval; }
static int prepare_func_exit(struct bpf_verifier_env *env, int *insn_idx) @@ -9715,8 +9719,8 @@ static int prepare_func_exit(struct bpf_verifier_env *env, int *insn_idx) if (err) return err;
- /* enforce R0 return value range */ - if (!retval_range_within(callee->callback_ret_range, r0)) { + /* enforce R0 return value range, and bpf_callback_t returns 64bit */ + if (!retval_range_within(callee->callback_ret_range, r0, false)) { verbose_invalid_scalar(env, r0, callee->callback_ret_range, "callback return", "R0"); return -EINVAL; @@ -14999,6 +15003,7 @@ static int check_return_code(struct bpf_verifier_env *env) int err; struct bpf_func_state *frame = env->cur_state->frame[0]; const bool is_subprog = frame->subprogno; + bool return_32bit = false;
/* LSM and struct_ops func-ptr's return type could be "void" */ if (!is_subprog) { @@ -15042,7 +15047,7 @@ static int check_return_code(struct bpf_verifier_env *env) return -EINVAL; }
- if (!retval_range_within(const_0, reg)) { + if (!retval_range_within(const_0, reg, false)) { verbose_invalid_scalar(env, reg, const_0, "async callback", "R0"); return -EINVAL; } @@ -15115,6 +15120,7 @@ static int check_return_code(struct bpf_verifier_env *env) /* no restricted range, any return value is allowed */ if (range.minval == S32_MIN && range.maxval == S32_MAX) return 0; + return_32bit = true; } else if (!env->prog->aux->attach_func_proto->type) { /* Make sure programs that attach to void * hooks don't try to modify return value. @@ -15140,7 +15146,7 @@ static int check_return_code(struct bpf_verifier_env *env) return -EINVAL; }
- if (!retval_range_within(range, reg)) { + if (!retval_range_within(range, reg, return_32bit)) { verbose_invalid_scalar(env, reg, range, "program exit", "R0"); if (prog->expected_attach_type == BPF_LSM_CGROUP && prog_type == BPF_PROG_TYPE_LSM &&
hulk inclusion category: bugfix bugzilla: https://gitee.com/src-openeuler/kernel/issues/IAYPJF CVE: CVE-2024-47703
--------------------------------
After backport commit b7105bb87cfd ("bpf: enforce exact retval range on subprog/callback exit"), struct `tnum` was replaced to struct `bpf_retval_range`, which reduced 8 bytes and result in a kabi breakage.
Fix this breakage by KABI_REPLACE.
Fixes: b7105bb87cfd ("bpf: enforce exact retval range on subprog/callback exit") Signed-off-by: Tengda Wu wutengda2@huawei.com --- include/linux/bpf_verifier.h | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-)
diff --git a/include/linux/bpf_verifier.h b/include/linux/bpf_verifier.h index fcea91630903..96eab612745c 100644 --- a/include/linux/bpf_verifier.h +++ b/include/linux/bpf_verifier.h @@ -314,7 +314,8 @@ struct bpf_func_state { */ u32 async_entry_cnt; bool in_callback_fn; - struct bpf_retval_range callback_ret_range; + KABI_REPLACE(struct tnum callback_ret_range, + struct bpf_retval_range callback_ret_range) bool in_async_callback_fn; /* For callback calling functions that limit number of possible * callback executions (e.g. bpf_loop) keeps track of current
hulk inclusion category: bugfix bugzilla: https://gitee.com/src-openeuler/kernel/issues/IAYPJF CVE: CVE-2024-47703
--------------------------------
After backport commit f7d4945550ef ("bpf, lsm: Add check for BPF LSM return value"), `is_retval` field was added to the struct `bpf_insn_access_aux`, resulting in a kabi breakage.
Fix this breakage by KABI_FILL_HOLE.
Fixes: f7d4945550ef ("bpf, lsm: Add check for BPF LSM return value") Signed-off-by: Tengda Wu wutengda2@huawei.com --- include/linux/bpf.h | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-)
diff --git a/include/linux/bpf.h b/include/linux/bpf.h index 017401c80102..1f0a44a59fe1 100644 --- a/include/linux/bpf.h +++ b/include/linux/bpf.h @@ -912,7 +912,7 @@ static_assert(__BPF_REG_TYPE_MAX <= BPF_BASE_TYPE_LIMIT); */ struct bpf_insn_access_aux { enum bpf_reg_type reg_type; - bool is_retval; /* is accessing function return value ? */ + KABI_FILL_HOLE(bool is_retval) /* is accessing function return value ? */ union { int ctx_field_size; struct {
反馈: 您发送到kernel@openeuler.org的补丁/补丁集,已成功转换为PR! PR链接地址: https://gitee.com/openeuler/kernel/pulls/12583 邮件列表地址:https://mailweb.openeuler.org/hyperkitty/list/kernel@openeuler.org/message/M...
FeedBack: The patch(es) which you have sent to kernel@openeuler.org mailing list has been converted to a pull request successfully! Pull request link: https://gitee.com/openeuler/kernel/pulls/12583 Mailing list address: https://mailweb.openeuler.org/hyperkitty/list/kernel@openeuler.org/message/M...