Hou Tao (1): bpf: Optimize the free of inner map
Tengda Wu (1): Fix kabi breakage in struct bpf_map
include/linux/bpf.h | 3 ++- kernel/bpf/core.c | 4 ++++ kernel/bpf/map_in_map.c | 14 +++++++++----- kernel/bpf/syscall.c | 8 ++++++++ kernel/bpf/verifier.c | 4 +++- 5 files changed, 26 insertions(+), 7 deletions(-)
From: Hou Tao houtao1@huawei.com
stable inclusion from stable-v6.6.35 commit 2ad2f2edb944baf2735b23c7008b3dbe5b8da56c category: bugfix bugzilla: https://gitee.com/openeuler/kernel/issues/IAD6H2
Reference: https://git.kernel.org/pub/scm/linux/kernel/git/stable/linux.git/commit/?id=...
--------------------------------
[ Upstream commit af66bfd3c8538ed21cf72af18426fc4a408665cf ]
When removing the inner map from the outer map, the inner map will be freed after one RCU grace period and one RCU tasks trace grace period, so it is certain that the bpf program, which may access the inner map, has exited before the inner map is freed.
However there is no need to wait for one RCU tasks trace grace period if the outer map is only accessed by non-sleepable program. So adding sleepable_refcnt in bpf_map and increasing sleepable_refcnt when adding the outer map into env->used_maps for sleepable program. Although the max number of bpf program is INT_MAX - 1, the number of bpf programs which are being loaded may be greater than INT_MAX, so using atomic64_t instead of atomic_t for sleepable_refcnt. When removing the inner map from the outer map, using sleepable_refcnt to decide whether or not a RCU tasks trace grace period is needed before freeing the inner map.
Signed-off-by: Hou Tao houtao1@huawei.com Link: https://lore.kernel.org/r/20231204140425.1480317-6-houtao@huaweicloud.com Signed-off-by: Alexei Starovoitov ast@kernel.org Stable-dep-of: 2884dc7d08d9 ("bpf: Fix a potential use-after-free in bpf_link_free()") Signed-off-by: Sasha Levin sashal@kernel.org
Conflicts: include/linux/bpf.h [Bpf related structures have changed in commit 6b6e3a2eac5f ("kabi: reserve space for bpf related structures"), causing conflicts in this patch merge] Signed-off-by: Tengda Wu wutengda2@huawei.com Signed-off-by: Pu Lehui pulehui@huawei.com --- include/linux/bpf.h | 2 ++ kernel/bpf/core.c | 4 ++++ kernel/bpf/map_in_map.c | 14 +++++++++----- kernel/bpf/syscall.c | 8 ++++++++ kernel/bpf/verifier.c | 4 +++- 5 files changed, 26 insertions(+), 6 deletions(-)
diff --git a/include/linux/bpf.h b/include/linux/bpf.h index 8c4c2c39a6c1..dd164b2267ec 100644 --- a/include/linux/bpf.h +++ b/include/linux/bpf.h @@ -302,6 +302,8 @@ struct bpf_map { bool bypass_spec_v1; bool frozen; /* write-once; write-protected by freeze_mutex */ bool free_after_mult_rcu_gp; + bool free_after_rcu_gp; + atomic64_t sleepable_refcnt; s64 __percpu *elem_count;
KABI_RESERVE(1) diff --git a/kernel/bpf/core.c b/kernel/bpf/core.c index caea5f14ac99..c68931013533 100644 --- a/kernel/bpf/core.c +++ b/kernel/bpf/core.c @@ -2677,12 +2677,16 @@ void __bpf_free_used_maps(struct bpf_prog_aux *aux, struct bpf_map **used_maps, u32 len) { struct bpf_map *map; + bool sleepable; u32 i;
+ sleepable = aux->sleepable; for (i = 0; i < len; i++) { map = used_maps[i]; if (map->ops->map_poke_untrack) map->ops->map_poke_untrack(map, aux); + if (sleepable) + atomic64_dec(&map->sleepable_refcnt); bpf_map_put(map); } } diff --git a/kernel/bpf/map_in_map.c b/kernel/bpf/map_in_map.c index 3248ff5d8161..8ef269e66ba5 100644 --- a/kernel/bpf/map_in_map.c +++ b/kernel/bpf/map_in_map.c @@ -131,12 +131,16 @@ void bpf_map_fd_put_ptr(struct bpf_map *map, void *ptr, bool need_defer) { struct bpf_map *inner_map = ptr;
- /* The inner map may still be used by both non-sleepable and sleepable - * bpf program, so free it after one RCU grace period and one tasks - * trace RCU grace period. + /* Defer the freeing of inner map according to the sleepable attribute + * of bpf program which owns the outer map, so unnecessary waiting for + * RCU tasks trace grace period can be avoided. */ - if (need_defer) - WRITE_ONCE(inner_map->free_after_mult_rcu_gp, true); + if (need_defer) { + if (atomic64_read(&map->sleepable_refcnt)) + WRITE_ONCE(inner_map->free_after_mult_rcu_gp, true); + else + WRITE_ONCE(inner_map->free_after_rcu_gp, true); + } bpf_map_put(inner_map); }
diff --git a/kernel/bpf/syscall.c b/kernel/bpf/syscall.c index bd5b5a9adfd4..086dab9c427c 100644 --- a/kernel/bpf/syscall.c +++ b/kernel/bpf/syscall.c @@ -754,8 +754,11 @@ void bpf_map_put(struct bpf_map *map) /* bpf_map_free_id() must be called first */ bpf_map_free_id(map);
+ WARN_ON_ONCE(atomic64_read(&map->sleepable_refcnt)); if (READ_ONCE(map->free_after_mult_rcu_gp)) call_rcu_tasks_trace(&map->rcu, bpf_map_free_mult_rcu_gp); + else if (READ_ONCE(map->free_after_rcu_gp)) + call_rcu(&map->rcu, bpf_map_free_rcu_gp); else bpf_map_free_in_work(map); } @@ -5400,6 +5403,11 @@ static int bpf_prog_bind_map(union bpf_attr *attr) goto out_unlock; }
+ /* The bpf program will not access the bpf map, but for the sake of + * simplicity, increase sleepable_refcnt for sleepable program as well. + */ + if (prog->aux->sleepable) + atomic64_inc(&map->sleepable_refcnt); memcpy(used_maps_new, used_maps_old, sizeof(used_maps_old[0]) * prog->aux->used_map_cnt); used_maps_new[prog->aux->used_map_cnt] = map; diff --git a/kernel/bpf/verifier.c b/kernel/bpf/verifier.c index d1041517a984..47e22d18730c 100644 --- a/kernel/bpf/verifier.c +++ b/kernel/bpf/verifier.c @@ -17741,10 +17741,12 @@ static int resolve_pseudo_ldimm64(struct bpf_verifier_env *env) return -E2BIG; }
+ if (env->prog->aux->sleepable) + atomic64_inc(&map->sleepable_refcnt); /* hold the map. If the program is rejected by verifier, * the map will be released by release_maps() or it * will be used by the valid program until it's unloaded - * and all maps are released in free_used_maps() + * and all maps are released in bpf_free_used_maps() */ bpf_map_inc(map);
From: Tengda Wu wutengda2@huawei.com
hulk inclusion category: bugfix bugzilla: https://gitee.com/openeuler/kernel/issues/IAD6H2
--------------------------------
After backport LTS commit 4b359df7b2ad ("[Backport] bpf: Optimize the free of inner map"), two fields `free_after_rcu_gp` and `sleepable_refcnt` were introduced into struct bpf_map, which result in a kabi breakage.
Use KABI_FILL_HOLE and KABI_USE to fix kabi breakage in struct bpf_map.
Before: ------ struct bpf_map { <SNIP> /* --- cacheline 3 boundary (192 bytes) --- */ struct mutex freeze_mutex; /* 192 32 */ atomic64_t writecnt; /* 224 8 */ struct { spinlock_t lock; /* 232 4 */ enum bpf_prog_type type; /* 236 4 */ bool jited; /* 240 1 */ bool xdp_has_frags; /* 241 1 */ } owner; /* 232 12 */
/* XXX last struct has 2 bytes of padding */
bool bypass_spec_v1; /* 244 1 */ bool frozen; /* 245 1 */ bool free_after_mult_rcu_gp; /* 246 1 */
/* XXX 1 byte hole, try to pack */
s64 * elem_count; /* 248 8 */ /* --- cacheline 4 boundary (256 bytes) --- */ u64 kabi_reserved1; /* 256 8 */ u64 kabi_reserved2; /* 264 8 */ u64 kabi_reserved3; /* 272 8 */ u64 kabi_reserved4; /* 280 8 */
/* size: 320, cachelines: 5, members: 32 */ /* sum members: 271, holes: 2, sum holes: 17 */ /* padding: 32 */ /* paddings: 1, sum paddings: 2 */ /* forced alignments: 3, forced holes: 1, sum forced holes: 16 */ } __attribute__((__aligned__(64)));
After: ------ struct bpf_map { <SNIP> /* --- cacheline 3 boundary (192 bytes) --- */ struct mutex freeze_mutex; /* 192 32 */ atomic64_t writecnt; /* 224 8 */ struct { spinlock_t lock; /* 232 4 */ enum bpf_prog_type type; /* 236 4 */ bool jited; /* 240 1 */ bool xdp_has_frags; /* 241 1 */ } owner; /* 232 12 */
/* XXX last struct has 2 bytes of padding */
bool bypass_spec_v1; /* 244 1 */ bool frozen; /* 245 1 */ bool free_after_mult_rcu_gp; /* 246 1 */ bool free_after_rcu_gp; /* 247 1 */ s64 * elem_count; /* 248 8 */ /* --- cacheline 4 boundary (256 bytes) --- */ union { atomic64_t sleepable_refcnt; /* 256 8 */ struct { u64 kabi_reserved1; /* 256 8 */ } kabi_hidden_308; /* 256 8 */ union { }; /* 256 0 */ }; /* 256 8 */ u64 kabi_reserved2; /* 264 8 */ u64 kabi_reserved3; /* 272 8 */ u64 kabi_reserved4; /* 280 8 */
/* size: 320, cachelines: 5, members: 33 */ /* sum members: 272, holes: 1, sum holes: 16 */ /* padding: 32 */ /* paddings: 1, sum paddings: 2 */ /* forced alignments: 3, forced holes: 1, sum forced holes: 16 */ } __attribute__((__aligned__(64)));
Fixes: 1e23ac5c8eda ("bpf: Optimize the free of inner map") Signed-off-by: Tengda Wu wutengda2@huawei.com Signed-off-by: Pu Lehui pulehui@huawei.com --- include/linux/bpf.h | 5 ++--- 1 file changed, 2 insertions(+), 3 deletions(-)
diff --git a/include/linux/bpf.h b/include/linux/bpf.h index dd164b2267ec..abc920234c28 100644 --- a/include/linux/bpf.h +++ b/include/linux/bpf.h @@ -302,11 +302,10 @@ struct bpf_map { bool bypass_spec_v1; bool frozen; /* write-once; write-protected by freeze_mutex */ bool free_after_mult_rcu_gp; - bool free_after_rcu_gp; - atomic64_t sleepable_refcnt; + KABI_FILL_HOLE(bool free_after_rcu_gp) s64 __percpu *elem_count;
- KABI_RESERVE(1) + KABI_USE(1, atomic64_t sleepable_refcnt) KABI_RESERVE(2) KABI_RESERVE(3) KABI_RESERVE(4)
反馈: 您发送到kernel@openeuler.org的补丁/补丁集,已成功转换为PR! PR链接地址: https://gitee.com/openeuler/kernel/pulls/12816 邮件列表地址:https://mailweb.openeuler.org/hyperkitty/list/kernel@openeuler.org/message/3...
FeedBack: The patch(es) which you have sent to kernel@openeuler.org mailing list has been converted to a pull request successfully! Pull request link: https://gitee.com/openeuler/kernel/pulls/12816 Mailing list address: https://mailweb.openeuler.org/hyperkitty/list/kernel@openeuler.org/message/3...