mailweb.openeuler.org
Manage this list

Keyboard Shortcuts

Thread View

  • j: Next unread message
  • k: Previous unread message
  • j a: Jump to all threads
  • j l: Jump to MailingList overview

Kernel

Threads by month
  • ----- 2025 -----
  • July
  • June
  • May
  • April
  • March
  • February
  • January
  • ----- 2024 -----
  • December
  • November
  • October
  • September
  • August
  • July
  • June
  • May
  • April
  • March
  • February
  • January
  • ----- 2023 -----
  • December
  • November
  • October
  • September
  • August
  • July
  • June
  • May
  • April
  • March
  • February
  • January
  • ----- 2022 -----
  • December
  • November
  • October
  • September
  • August
  • July
  • June
  • May
  • April
  • March
  • February
  • January
  • ----- 2021 -----
  • December
  • November
  • October
  • September
  • August
  • July
  • June
  • May
  • April
  • March
  • February
  • January
  • ----- 2020 -----
  • December
  • November
  • October
  • September
  • August
  • July
  • June
  • May
  • April
  • March
  • February
  • January
  • ----- 2019 -----
  • December
kernel@openeuler.org

  • 43 participants
  • 19040 discussions
[PATCH OLK-5.10] ksmbd: fix out-of-bound read in deassemble_neg_contexts()
by Li Lingfeng 20 Jul '23

20 Jul '23
From: Namjae Jeon <linkinjeon(a)kernel.org> mainline inclusion from mainline-v6.4-rc6 commit f1a411873c85b642f13b01f21b534c2bab81fc1b category: bugfix bugzilla: https://gitee.com/src-openeuler/kernel/issues/I7LU2Q CVE: CVE-2023-38427 Reference: https://git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git/commit/f… -------------------------------- The check in the beginning is `clen + sizeof(struct smb2_neg_context) <= len_of_ctxts`, but in the end of loop, `len_of_ctxts` will subtract `((clen + 7) & ~0x7) + sizeof(struct smb2_neg_context)`, which causes integer underflow when clen does the 8 alignment. We should use `(clen + 7) & ~0x7` in the check to avoid underflow from happening. Then there are some variables that need to be declared unsigned instead of signed. [ 11.671070] BUG: KASAN: slab-out-of-bounds in smb2_handle_negotiate+0x799/0x1610 [ 11.671533] Read of size 2 at addr ffff888005e86cf2 by task kworker/0:0/7 ... [ 11.673383] Call Trace: [ 11.673541] <TASK> [ 11.673679] dump_stack_lvl+0x33/0x50 [ 11.673913] print_report+0xcc/0x620 [ 11.674671] kasan_report+0xae/0xe0 [ 11.675171] kasan_check_range+0x35/0x1b0 [ 11.675412] smb2_handle_negotiate+0x799/0x1610 [ 11.676217] ksmbd_smb_negotiate_common+0x526/0x770 [ 11.676795] handle_ksmbd_work+0x274/0x810 ... Cc: stable(a)vger.kernel.org Signed-off-by: Chih-Yen Chang <cc85nod(a)gmail.com> Tested-by: Chih-Yen Chang <cc85nod(a)gmail.com> Signed-off-by: Namjae Jeon <linkinjeon(a)kernel.org> Signed-off-by: Steve French <stfrench(a)microsoft.com> Conflict: fs/smb/server/smb2pdu.c Signed-off-by: Li Lingfeng <lilingfeng3(a)huawei.com> --- fs/ksmbd/smb2pdu.c | 13 ++++++------- 1 file changed, 6 insertions(+), 7 deletions(-) diff --git a/fs/ksmbd/smb2pdu.c b/fs/ksmbd/smb2pdu.c index 18e176fcddf0..9add135cb5da 100644 --- a/fs/ksmbd/smb2pdu.c +++ b/fs/ksmbd/smb2pdu.c @@ -978,13 +978,13 @@ static void decode_sign_cap_ctxt(struct ksmbd_conn *conn, static __le32 deassemble_neg_contexts(struct ksmbd_conn *conn, struct smb2_negotiate_req *req, - int len_of_smb) + unsigned int len_of_smb) { /* +4 is to account for the RFC1001 len field */ struct smb2_neg_context *pctx = (struct smb2_neg_context *)req; int i = 0, len_of_ctxts; - int offset = le32_to_cpu(req->NegotiateContextOffset); - int neg_ctxt_cnt = le16_to_cpu(req->NegotiateContextCount); + unsigned int offset = le32_to_cpu(req->NegotiateContextOffset); + unsigned int neg_ctxt_cnt = le16_to_cpu(req->NegotiateContextCount); __le32 status = STATUS_INVALID_PARAMETER; ksmbd_debug(SMB, "decoding %d negotiate contexts\n", neg_ctxt_cnt); @@ -1002,7 +1002,7 @@ static __le32 deassemble_neg_contexts(struct ksmbd_conn *conn, if (len_of_ctxts == 0) break; - if (len_of_ctxts < sizeof(struct smb2_neg_context)) + if (len_of_ctxts < (int)sizeof(struct smb2_neg_context)) break; pctx = (struct smb2_neg_context *)((char *)pctx + offset); @@ -1053,9 +1053,8 @@ static __le32 deassemble_neg_contexts(struct ksmbd_conn *conn, } /* offsets must be 8 byte aligned */ - clen = (clen + 7) & ~0x7; - offset = clen + sizeof(struct smb2_neg_context); - len_of_ctxts -= clen + sizeof(struct smb2_neg_context); + offset = (clen + sizeof(struct smb2_neg_context + 7) & ~0x7; + len_of_ctxts -= offset; } return status; } -- 2.31.1
2 1
0 0
[PATCH OLK-5.10] ksmbd: fix wrong UserName check in session_user
by Li Lingfeng 20 Jul '23

20 Jul '23
From: Chih-Yen Chang <cc85nod(a)gmail.com> mainline inclusion from mainline-v6.4-rc3 commit f0a96d1aafd8964e1f9955c830a3e5cb3c60a90f category: bugfix bugzilla: https://gitee.com/src-openeuler/kernel/issues/I7LU3D CVE: CVE-2023-38428 Reference: https://git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git/commit/f… -------------------------------- The offset of UserName is related to the address of security buffer. To ensure the validaty of UserName, we need to compare name_off + name_len with secbuf_len instead of auth_msg_len. [ 27.096243] ================================================================== [ 27.096890] BUG: KASAN: slab-out-of-bounds in smb_strndup_from_utf16+0x188/0x350 [ 27.097609] Read of size 2 at addr ffff888005e3b542 by task kworker/0:0/7 ... [ 27.099950] Call Trace: [ 27.100194] <TASK> [ 27.100397] dump_stack_lvl+0x33/0x50 [ 27.100752] print_report+0xcc/0x620 [ 27.102305] kasan_report+0xae/0xe0 [ 27.103072] kasan_check_range+0x35/0x1b0 [ 27.103757] smb_strndup_from_utf16+0x188/0x350 [ 27.105474] smb2_sess_setup+0xaf8/0x19c0 [ 27.107935] handle_ksmbd_work+0x274/0x810 [ 27.108315] process_one_work+0x419/0x760 [ 27.108689] worker_thread+0x2a2/0x6f0 [ 27.109385] kthread+0x160/0x190 [ 27.110129] ret_from_fork+0x1f/0x30 [ 27.110454] </TASK> Cc: stable(a)vger.kernel.org Signed-off-by: Chih-Yen Chang <cc85nod(a)gmail.com> Acked-by: Namjae Jeon <linkinjeon(a)kernel.org> Signed-off-by: Steve French <stfrench(a)microsoft.com> Signed-off-by: Li Lingfeng <lilingfeng3(a)huawei.com> --- fs/ksmbd/smb2pdu.c | 5 ++--- 1 file changed, 2 insertions(+), 3 deletions(-) diff --git a/fs/ksmbd/smb2pdu.c b/fs/ksmbd/smb2pdu.c index 18e176fcddf0..c99b8a25f046 100644 --- a/fs/ksmbd/smb2pdu.c +++ b/fs/ksmbd/smb2pdu.c @@ -1383,7 +1383,7 @@ static struct ksmbd_user *session_user(struct ksmbd_conn *conn, struct authenticate_message *authblob; struct ksmbd_user *user; char *name; - unsigned int auth_msg_len, name_off, name_len, secbuf_len; + unsigned int name_off, name_len, secbuf_len; secbuf_len = le16_to_cpu(req->SecurityBufferLength); if (secbuf_len < sizeof(struct authenticate_message)) { @@ -1393,9 +1393,8 @@ static struct ksmbd_user *session_user(struct ksmbd_conn *conn, authblob = user_authblob(conn, req); name_off = le32_to_cpu(authblob->UserName.BufferOffset); name_len = le16_to_cpu(authblob->UserName.Length); - auth_msg_len = le16_to_cpu(req->SecurityBufferOffset) + secbuf_len; - if (auth_msg_len < (u64)name_off + name_len) + if (secbuf_len < (u64)name_off + name_len) return NULL; name = smb_strndup_from_utf16((const char *)authblob + name_off, -- 2.31.1
2 1
0 0
[PATCH OLK-5.10] ipv6/addrconf: fix a potential refcount underflow for idev
by Ziyang Xuan 19 Jul '23

19 Jul '23
mainline inclusion from mainline-v6.5-rc2 commit 06a0716949c22e2aefb648526580671197151acc category: bugfix bugzilla: https://gitee.com/openeuler/kernel/issues/I7M991 Reference: https://git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git/commit/?… --------------------------- Now in addrconf_mod_rs_timer(), reference idev depends on whether rs_timer is not pending. Then modify rs_timer timeout. There is a time gap in [1], during which if the pending rs_timer becomes not pending. It will miss to hold idev, but the rs_timer is activated. Thus rs_timer callback function addrconf_rs_timer() will be executed and put idev later without holding idev. A refcount underflow issue for idev can be caused by this. if (!timer_pending(&idev->rs_timer)) in6_dev_hold(idev); <--------------[1] mod_timer(&idev->rs_timer, jiffies + when); To fix the issue, hold idev if mod_timer() return 0. Fixes: b7b1bfce0bb6 ("ipv6: split duplicate address detection and router solicitation timer") Suggested-by: Eric Dumazet <edumazet(a)google.com> Signed-off-by: Ziyang Xuan <william.xuanziyang(a)huawei.com> Reviewed-by: Eric Dumazet <edumazet(a)google.com> Signed-off-by: David S. Miller <davem(a)davemloft.net> Signed-off-by: Ziyang Xuan <william.xuanziyang(a)huawei.com> --- net/ipv6/addrconf.c | 3 +-- 1 file changed, 1 insertion(+), 2 deletions(-) diff --git a/net/ipv6/addrconf.c b/net/ipv6/addrconf.c index 31a0b28e4167..a2bdefcd2b30 100644 --- a/net/ipv6/addrconf.c +++ b/net/ipv6/addrconf.c @@ -314,9 +314,8 @@ static void addrconf_del_dad_work(struct inet6_ifaddr *ifp) static void addrconf_mod_rs_timer(struct inet6_dev *idev, unsigned long when) { - if (!timer_pending(&idev->rs_timer)) + if (!mod_timer(&idev->rs_timer, jiffies + when)) in6_dev_hold(idev); - mod_timer(&idev->rs_timer, jiffies + when); } static void addrconf_mod_dad_work(struct inet6_ifaddr *ifp, -- 2.25.1
2 1
0 0
[PATCH openEuler-1.0-LTS] ipv6/addrconf: fix a potential refcount underflow for idev
by Ziyang Xuan 19 Jul '23

19 Jul '23
mainline inclusion from mainline-v6.5-rc2 commit 06a0716949c22e2aefb648526580671197151acc category: bugfix bugzilla: https://gitee.com/openeuler/kernel/issues/I7M991 CVE: NA Reference: https://git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git/commit/?… --------------------------- Now in addrconf_mod_rs_timer(), reference idev depends on whether rs_timer is not pending. Then modify rs_timer timeout. There is a time gap in [1], during which if the pending rs_timer becomes not pending. It will miss to hold idev, but the rs_timer is activated. Thus rs_timer callback function addrconf_rs_timer() will be executed and put idev later without holding idev. A refcount underflow issue for idev can be caused by this. if (!timer_pending(&idev->rs_timer)) in6_dev_hold(idev); <--------------[1] mod_timer(&idev->rs_timer, jiffies + when); To fix the issue, hold idev if mod_timer() return 0. Fixes: b7b1bfce0bb6 ("ipv6: split duplicate address detection and router solicitation timer") Suggested-by: Eric Dumazet <edumazet(a)google.com> Signed-off-by: Ziyang Xuan <william.xuanziyang(a)huawei.com> Reviewed-by: Eric Dumazet <edumazet(a)google.com> Signed-off-by: David S. Miller <davem(a)davemloft.net> Signed-off-by: Ziyang Xuan <william.xuanziyang(a)huawei.com> --- net/ipv6/addrconf.c | 3 +-- 1 file changed, 1 insertion(+), 2 deletions(-) diff --git a/net/ipv6/addrconf.c b/net/ipv6/addrconf.c index 9d8b791f63ef..b4f4e751e3a7 100644 --- a/net/ipv6/addrconf.c +++ b/net/ipv6/addrconf.c @@ -316,9 +316,8 @@ static void addrconf_del_dad_work(struct inet6_ifaddr *ifp) static void addrconf_mod_rs_timer(struct inet6_dev *idev, unsigned long when) { - if (!timer_pending(&idev->rs_timer)) + if (!mod_timer(&idev->rs_timer, jiffies + when)) in6_dev_hold(idev); - mod_timer(&idev->rs_timer, jiffies + when); } static void addrconf_mod_dad_work(struct inet6_ifaddr *ifp, -- 2.25.1
2 1
0 0
[PATCH OLK-5.10 0/4] fix three CVEs by backport mainline patchs
by ZhaoLong Wang 19 Jul '23

19 Jul '23
mainline patch f5c779b7ddbda("ksmbd: fix racy issue from session setup and logoff") It fixes three CVEs: CVE-2023-32250 CVE-2023-32252 CVE-2023-32257 In order to successfully apply the patch, we chose to incorporate the related pre-installed bugfix patch. Colin Ian King (1): ksmbd: Fix spelling mistake "excceed" -> "exceeded" Dawei Li (1): ksmbd: Implements sess->ksmbd_chann_list as xarray Namjae Jeon (2): ksmbd: limit pdu length size according to connection status ksmbd: fix racy issue from session setup and logoff fs/ksmbd/connection.c | 27 +++++++--- fs/ksmbd/connection.h | 39 ++++++++------ fs/ksmbd/mgmt/user_session.c | 62 +++++++++------------ fs/ksmbd/mgmt/user_session.h | 4 +- fs/ksmbd/server.c | 3 +- fs/ksmbd/smb2pdu.c | 101 +++++++++++++++++------------------ fs/ksmbd/smb2pdu.h | 5 +- fs/ksmbd/transport_tcp.c | 2 +- 8 files changed, 123 insertions(+), 120 deletions(-) -- 2.34.3
2 5
0 0
[PATCH openEuler-1.0-LTS] netfilter: nf_tables: prevent OOB access in nft_byteorder_eval
by Ziyang Xuan 19 Jul '23

19 Jul '23
From: Thadeu Lima de Souza Cascardo <cascardo(a)canonical.com> mainline inclusion from mainline-v6.5-rc2 commit caf3ef7468f7534771b5c44cd8dbd6f7f87c2cbd category: bugfix bugzilla: https://gitee.com/src-openeuler/kernel/issues/I7ISR1 CVE: CVE-2023-35001 Reference: https://git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git/commit/?… --------------------------- When evaluating byteorder expressions with size 2, a union with 32-bit and 16-bit members is used. Since the 16-bit members are aligned to 32-bit, the array accesses will be out-of-bounds. It may lead to a stack-out-of-bounds access like the one below: [ 23.095215] ================================================================== [ 23.095625] BUG: KASAN: stack-out-of-bounds in nft_byteorder_eval+0x13c/0x320 [ 23.096020] Read of size 2 at addr ffffc90000007948 by task ping/115 [ 23.096358] [ 23.096456] CPU: 0 PID: 115 Comm: ping Not tainted 6.4.0+ #413 [ 23.096770] Call Trace: [ 23.096910] <IRQ> [ 23.097030] dump_stack_lvl+0x60/0xc0 [ 23.097218] print_report+0xcf/0x630 [ 23.097388] ? nft_byteorder_eval+0x13c/0x320 [ 23.097577] ? kasan_addr_to_slab+0xd/0xc0 [ 23.097760] ? nft_byteorder_eval+0x13c/0x320 [ 23.097949] kasan_report+0xc9/0x110 [ 23.098106] ? nft_byteorder_eval+0x13c/0x320 [ 23.098298] __asan_load2+0x83/0xd0 [ 23.098453] nft_byteorder_eval+0x13c/0x320 [ 23.098659] nft_do_chain+0x1c8/0xc50 [ 23.098852] ? __pfx_nft_do_chain+0x10/0x10 [ 23.099078] ? __kasan_check_read+0x11/0x20 [ 23.099295] ? __pfx___lock_acquire+0x10/0x10 [ 23.099535] ? __pfx___lock_acquire+0x10/0x10 [ 23.099745] ? __kasan_check_read+0x11/0x20 [ 23.099929] nft_do_chain_ipv4+0xfe/0x140 [ 23.100105] ? __pfx_nft_do_chain_ipv4+0x10/0x10 [ 23.100327] ? lock_release+0x204/0x400 [ 23.100515] ? nf_hook.constprop.0+0x340/0x550 [ 23.100779] nf_hook_slow+0x6c/0x100 [ 23.100977] ? __pfx_nft_do_chain_ipv4+0x10/0x10 [ 23.101223] nf_hook.constprop.0+0x334/0x550 [ 23.101443] ? __pfx_ip_local_deliver_finish+0x10/0x10 [ 23.101677] ? __pfx_nf_hook.constprop.0+0x10/0x10 [ 23.101882] ? __pfx_ip_rcv_finish+0x10/0x10 [ 23.102071] ? __pfx_ip_local_deliver_finish+0x10/0x10 [ 23.102291] ? rcu_read_lock_held+0x4b/0x70 [ 23.102481] ip_local_deliver+0xbb/0x110 [ 23.102665] ? __pfx_ip_rcv+0x10/0x10 [ 23.102839] ip_rcv+0x199/0x2a0 [ 23.102980] ? __pfx_ip_rcv+0x10/0x10 [ 23.103140] __netif_receive_skb_one_core+0x13e/0x150 [ 23.103362] ? __pfx___netif_receive_skb_one_core+0x10/0x10 [ 23.103647] ? mark_held_locks+0x48/0xa0 [ 23.103819] ? process_backlog+0x36c/0x380 [ 23.103999] __netif_receive_skb+0x23/0xc0 [ 23.104179] process_backlog+0x91/0x380 [ 23.104350] __napi_poll.constprop.0+0x66/0x360 [ 23.104589] ? net_rx_action+0x1cb/0x610 [ 23.104811] net_rx_action+0x33e/0x610 [ 23.105024] ? _raw_spin_unlock+0x23/0x50 [ 23.105257] ? __pfx_net_rx_action+0x10/0x10 [ 23.105485] ? mark_held_locks+0x48/0xa0 [ 23.105741] __do_softirq+0xfa/0x5ab [ 23.105956] ? __dev_queue_xmit+0x765/0x1c00 [ 23.106193] do_softirq.part.0+0x49/0xc0 [ 23.106423] </IRQ> [ 23.106547] <TASK> [ 23.106670] __local_bh_enable_ip+0xf5/0x120 [ 23.106903] __dev_queue_xmit+0x789/0x1c00 [ 23.107131] ? __pfx___dev_queue_xmit+0x10/0x10 [ 23.107381] ? find_held_lock+0x8e/0xb0 [ 23.107585] ? lock_release+0x204/0x400 [ 23.107798] ? neigh_resolve_output+0x185/0x350 [ 23.108049] ? mark_held_locks+0x48/0xa0 [ 23.108265] ? neigh_resolve_output+0x185/0x350 [ 23.108514] neigh_resolve_output+0x246/0x350 [ 23.108753] ? neigh_resolve_output+0x246/0x350 [ 23.109003] ip_finish_output2+0x3c3/0x10b0 [ 23.109250] ? __pfx_ip_finish_output2+0x10/0x10 [ 23.109510] ? __pfx_nf_hook+0x10/0x10 [ 23.109732] __ip_finish_output+0x217/0x390 [ 23.109978] ip_finish_output+0x2f/0x130 [ 23.110207] ip_output+0xc9/0x170 [ 23.110404] ip_push_pending_frames+0x1a0/0x240 [ 23.110652] raw_sendmsg+0x102e/0x19e0 [ 23.110871] ? __pfx_raw_sendmsg+0x10/0x10 [ 23.111093] ? lock_release+0x204/0x400 [ 23.111304] ? __mod_lruvec_page_state+0x148/0x330 [ 23.111567] ? find_held_lock+0x8e/0xb0 [ 23.111777] ? find_held_lock+0x8e/0xb0 [ 23.111993] ? __rcu_read_unlock+0x7c/0x2f0 [ 23.112225] ? aa_sk_perm+0x18a/0x550 [ 23.112431] ? filemap_map_pages+0x4f1/0x900 [ 23.112665] ? __pfx_aa_sk_perm+0x10/0x10 [ 23.112880] ? find_held_lock+0x8e/0xb0 [ 23.113098] inet_sendmsg+0xa0/0xb0 [ 23.113297] ? inet_sendmsg+0xa0/0xb0 [ 23.113500] ? __pfx_inet_sendmsg+0x10/0x10 [ 23.113727] sock_sendmsg+0xf4/0x100 [ 23.113924] ? move_addr_to_kernel.part.0+0x4f/0xa0 [ 23.114190] __sys_sendto+0x1d4/0x290 [ 23.114391] ? __pfx___sys_sendto+0x10/0x10 [ 23.114621] ? __pfx_mark_lock.part.0+0x10/0x10 [ 23.114869] ? lock_release+0x204/0x400 [ 23.115076] ? find_held_lock+0x8e/0xb0 [ 23.115287] ? rcu_is_watching+0x23/0x60 [ 23.115503] ? __rseq_handle_notify_resume+0x6e2/0x860 [ 23.115778] ? __kasan_check_write+0x14/0x30 [ 23.116008] ? blkcg_maybe_throttle_current+0x8d/0x770 [ 23.116285] ? mark_held_locks+0x28/0xa0 [ 23.116503] ? do_syscall_64+0x37/0x90 [ 23.116713] __x64_sys_sendto+0x7f/0xb0 [ 23.116924] do_syscall_64+0x59/0x90 [ 23.117123] ? irqentry_exit_to_user_mode+0x25/0x30 [ 23.117387] ? irqentry_exit+0x77/0xb0 [ 23.117593] ? exc_page_fault+0x92/0x140 [ 23.117806] entry_SYSCALL_64_after_hwframe+0x6e/0xd8 [ 23.118081] RIP: 0033:0x7f744aee2bba [ 23.118282] Code: d8 64 89 02 48 c7 c0 ff ff ff ff eb b8 0f 1f 00 f3 0f 1e fa 41 89 ca 64 8b 04 25 18 00 00 00 85 c0 75 15 b8 2c 00 00 00 0f 05 <48> 3d 00 f0 ff ff 77 7e c3 0f 1f 44 00 00 41 54 48 83 ec 30 44 89 [ 23.119237] RSP: 002b:00007ffd04a7c9f8 EFLAGS: 00000246 ORIG_RAX: 000000000000002c [ 23.119644] RAX: ffffffffffffffda RBX: 00007ffd04a7e0a0 RCX: 00007f744aee2bba [ 23.120023] RDX: 0000000000000040 RSI: 000056488e9e6300 RDI: 0000000000000003 [ 23.120413] RBP: 000056488e9e6300 R08: 00007ffd04a80320 R09: 0000000000000010 [ 23.120809] R10: 0000000000000000 R11: 0000000000000246 R12: 0000000000000040 [ 23.121219] R13: 00007ffd04a7dc38 R14: 00007ffd04a7ca00 R15: 00007ffd04a7e0a0 [ 23.121617] </TASK> [ 23.121749] [ 23.121845] The buggy address belongs to the virtual mapping at [ 23.121845] [ffffc90000000000, ffffc90000009000) created by: [ 23.121845] irq_init_percpu_irqstack+0x1cf/0x270 [ 23.122707] [ 23.122803] The buggy address belongs to the physical page: [ 23.123104] page:0000000072ac19f0 refcount:1 mapcount:0 mapping:0000000000000000 index:0x0 pfn:0x24a09 [ 23.123609] flags: 0xfffffc0001000(reserved|node=0|zone=1|lastcpupid=0x1fffff) [ 23.123998] page_type: 0xffffffff() [ 23.124194] raw: 000fffffc0001000 ffffea0000928248 ffffea0000928248 0000000000000000 [ 23.124610] raw: 0000000000000000 0000000000000000 00000001ffffffff 0000000000000000 [ 23.125023] page dumped because: kasan: bad access detected [ 23.125326] [ 23.125421] Memory state around the buggy address: [ 23.125682] ffffc90000007800: 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 [ 23.126072] ffffc90000007880: 00 00 00 00 00 f1 f1 f1 f1 f1 f1 00 00 f2 f2 00 [ 23.126455] >ffffc90000007900: 00 00 00 00 00 00 00 00 00 f2 f2 f2 f2 00 00 00 [ 23.126840] ^ [ 23.127138] ffffc90000007980: 00 00 00 00 00 00 00 00 00 00 00 00 00 f3 f3 f3 [ 23.127522] ffffc90000007a00: f3 00 00 00 00 00 00 00 00 00 00 00 f1 f1 f1 f1 [ 23.127906] ================================================================== [ 23.128324] Disabling lock debugging due to kernel taint Using simple s16 pointers for the 16-bit accesses fixes the problem. For the 32-bit accesses, src and dst can be used directly. Fixes: 96518518cc41 ("netfilter: add nftables") Cc: stable(a)vger.kernel.org Reported-by: Tanguy DUBROCA (@SidewayRE) from @Synacktiv working with ZDI Signed-off-by: Thadeu Lima de Souza Cascardo <cascardo(a)canonical.com> Reviewed-by: Florian Westphal <fw(a)strlen.de> Signed-off-by: Pablo Neira Ayuso <pablo(a)netfilter.org> Signed-off-by: Ziyang Xuan <william.xuanziyang(a)huawei.com> --- net/netfilter/nft_byteorder.c | 14 +++++++------- 1 file changed, 7 insertions(+), 7 deletions(-) diff --git a/net/netfilter/nft_byteorder.c b/net/netfilter/nft_byteorder.c index 13d4e421a6b3..46a8f894717c 100644 --- a/net/netfilter/nft_byteorder.c +++ b/net/netfilter/nft_byteorder.c @@ -33,11 +33,11 @@ static void nft_byteorder_eval(const struct nft_expr *expr, const struct nft_byteorder *priv = nft_expr_priv(expr); u32 *src = &regs->data[priv->sreg]; u32 *dst = &regs->data[priv->dreg]; - union { u32 u32; u16 u16; } *s, *d; + u16 *s16, *d16; unsigned int i; - s = (void *)src; - d = (void *)dst; + s16 = (void *)src; + d16 = (void *)dst; switch (priv->size) { case 8: { @@ -63,11 +63,11 @@ static void nft_byteorder_eval(const struct nft_expr *expr, switch (priv->op) { case NFT_BYTEORDER_NTOH: for (i = 0; i < priv->len / 4; i++) - d[i].u32 = ntohl((__force __be32)s[i].u32); + dst[i] = ntohl((__force __be32)src[i]); break; case NFT_BYTEORDER_HTON: for (i = 0; i < priv->len / 4; i++) - d[i].u32 = (__force __u32)htonl(s[i].u32); + dst[i] = (__force __u32)htonl(src[i]); break; } break; @@ -75,11 +75,11 @@ static void nft_byteorder_eval(const struct nft_expr *expr, switch (priv->op) { case NFT_BYTEORDER_NTOH: for (i = 0; i < priv->len / 2; i++) - d[i].u16 = ntohs((__force __be16)s[i].u16); + d16[i] = ntohs((__force __be16)s16[i]); break; case NFT_BYTEORDER_HTON: for (i = 0; i < priv->len / 2; i++) - d[i].u16 = (__force __u16)htons(s[i].u16); + d16[i] = (__force __u16)htons(s16[i]); break; } break; -- 2.25.1
2 1
0 0
[PATCH openEuler-22.03-LTS 0/2] Fix null-ptr-deref while calling getpeername
by Zhong Jinghua 18 Jul '23

18 Jul '23
Fix null-ptr-deref while calling getpeername Mike Christie (1): scsi: iscsi: iscsi_tcp: Fix null-ptr-deref while calling getpeername() Zhong Jinghua (1): scsi: iscsi_tcp: Check that sock is valid before iscsi_set_param() drivers/scsi/iscsi_tcp.c | 72 ++++++++++++++++++++++++++++------------ drivers/scsi/iscsi_tcp.h | 2 ++ 2 files changed, 53 insertions(+), 21 deletions(-) -- 2.31.1
2 4
0 0
[PATCH openEuler-23.09] arm64/vmalloc: use module region only for module_alloc() if CONFIG_RANDOMIZE_BASE is set
by Liu Shixin 18 Jul '23

18 Jul '23
hulk inclusion category: bugfix bugzilla: https://gitee.com/openeuler/kernel/issues/I7LXK8 -------------------------------- After I add a 10GB pmem device, I got the following error message when insert module: insmod: vmalloc error: size 16384, vm_struct allocation failed, mode:0xcc0(GFP_KERNEL), nodemask=(null),cpuset=/,mems_allowed=0 If CONFIG_RANDOMIZE_BASE is set, the module region can be located in the vmalloc region entirely. Although module_alloc() can fall back to a 2GB window if ARM64_MODULE_PLTS is set, the module region is still easily exhausted because the module region is located at bottom of vmalloc region and the vmalloc region is allocated from bottom to top. Skip module region if not calling from module_alloc(). Signed-off-by: Liu Shixin <liushixin2(a)huawei.com> Signed-off-by: Zheng Zengkai <zhengzengkai(a)huawei.com> [cherry-pick from devel-6.1] Signed-off-by: Liu Shixin <liushixin2(a)huawei.com> --- arch/arm64/include/asm/vmalloc.h | 26 ++++++++++++++++++++++++++ include/linux/vmalloc.h | 9 +++++++++ mm/vmalloc.c | 4 ++++ 3 files changed, 39 insertions(+) diff --git a/arch/arm64/include/asm/vmalloc.h b/arch/arm64/include/asm/vmalloc.h index 38fafffe699f..4feff546b11b 100644 --- a/arch/arm64/include/asm/vmalloc.h +++ b/arch/arm64/include/asm/vmalloc.h @@ -31,4 +31,30 @@ static inline pgprot_t arch_vmap_pgprot_tagged(pgprot_t prot) return pgprot_tagged(prot); } +#ifdef CONFIG_RANDOMIZE_BASE +extern u64 module_alloc_base; +#define arch_vmap_skip_module_region arch_vmap_skip_module_region +static inline void arch_vmap_skip_module_region(unsigned long *addr, + unsigned long vstart, + unsigned long size, + unsigned long align) +{ + u64 module_alloc_end = module_alloc_base + MODULES_VSIZE; + + if (vstart == module_alloc_base) + return; + + if (IS_ENABLED(CONFIG_KASAN_GENERIC) || + IS_ENABLED(CONFIG_KASAN_SW_TAGS)) + /* don't exceed the static module region - see module_alloc() */ + module_alloc_end = MODULES_END; + + if ((module_alloc_base >= *addr + size) || + (module_alloc_end <= *addr)) + return; + + *addr = ALIGN(module_alloc_end, align); +} +#endif + #endif /* _ASM_ARM64_VMALLOC_H */ diff --git a/include/linux/vmalloc.h b/include/linux/vmalloc.h index c720be70c8dd..a7b6c2183d55 100644 --- a/include/linux/vmalloc.h +++ b/include/linux/vmalloc.h @@ -124,6 +124,15 @@ static inline pgprot_t arch_vmap_pgprot_tagged(pgprot_t prot) } #endif +#ifndef arch_vmap_skip_module_region +static inline void arch_vmap_skip_module_region(unsigned long *addr, + unsigned long vstart, + unsigned long size, + unsigned long align) +{ +} +#endif + /* * Highlevel APIs for driver use */ diff --git a/mm/vmalloc.c b/mm/vmalloc.c index 1d13d71687d7..afab3c7295b4 100644 --- a/mm/vmalloc.c +++ b/mm/vmalloc.c @@ -1230,6 +1230,8 @@ is_within_this_va(struct vmap_area *va, unsigned long size, else nva_start_addr = ALIGN(vstart, align); + arch_vmap_skip_module_region(&nva_start_addr, vstart, size, align); + /* Can be overflowed due to big size or alignment. */ if (nva_start_addr + size < nva_start_addr || nva_start_addr < vstart) @@ -1517,6 +1519,8 @@ __alloc_vmap_area(struct rb_root *root, struct list_head *head, else nva_start_addr = ALIGN(vstart, align); + arch_vmap_skip_module_region(&nva_start_addr, vstart, size, align); + /* Check the "vend" restriction. */ if (nva_start_addr + size > vend) return vend; -- 2.25.1
2 1
0 0
[PATCH openEuler-23.09 0/1] Fix module region exhausted
by Liu Shixin 18 Jul '23

18 Jul '23
Liu Shixin (1): arm64/vmalloc: use module region only for module_alloc() if CONFIG_RANDOMIZE_BASE is set arch/arm64/include/asm/vmalloc.h | 26 ++++++++++++++++++++++++++ include/linux/vmalloc.h | 9 +++++++++ mm/vmalloc.c | 4 ++++ 3 files changed, 39 insertions(+) -- 2.25.1
2 2
0 0
[PATCH OLK-5.10 1/6] workqueue: Rename "delayed" (delayed by active management) to "inactive"
by Zeng Heng 18 Jul '23

18 Jul '23
From: Lai Jiangshan <laijs(a)linux.alibaba.com> mainline inclusion from mainline-v5.15-rc1 commit f97a4a1a3f8769e3452885967955e21c88f3f263 category: bugfix bugzilla: https://gitee.com/openeuler/kernel/issues/I7LRJF Reference: https://git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git/commit/?… --------------------------- There are two kinds of "delayed" work items in workqueue subsystem. One is for timer-delayed work items which are visible to workqueue users. The other kind is for work items delayed by active management which can not be directly visible to workqueue users. We mixed the word "delayed" for both kinds and caused somewhat ambiguity. This patch renames the later one (delayed by active management) to "inactive", because it is used for workqueue active management and most of its related symbols are named with "active" or "activate". All "delayed" and "DELAYED" are carefully checked and renamed one by one to avoid accidentally changing the name of the other kind for timer-delayed. No functional change intended. Signed-off-by: Lai Jiangshan <laijs(a)linux.alibaba.com> Signed-off-by: Tejun Heo <tj(a)kernel.org> Signed-off-by: Zeng Heng <zengheng4(a)huawei.com> --- include/linux/workqueue.h | 4 +-- kernel/workqueue.c | 58 +++++++++++++++++++-------------------- 2 files changed, 31 insertions(+), 31 deletions(-) diff --git a/include/linux/workqueue.h b/include/linux/workqueue.h index 4ecfecb1202f..8067a36c9fa7 100644 --- a/include/linux/workqueue.h +++ b/include/linux/workqueue.h @@ -30,7 +30,7 @@ void delayed_work_timer_fn(struct timer_list *t); enum { WORK_STRUCT_PENDING_BIT = 0, /* work item is pending execution */ - WORK_STRUCT_DELAYED_BIT = 1, /* work item is delayed */ + WORK_STRUCT_INACTIVE_BIT= 1, /* work item is inactive */ WORK_STRUCT_PWQ_BIT = 2, /* data points to pwq */ WORK_STRUCT_LINKED_BIT = 3, /* next work is linked to this one */ #ifdef CONFIG_DEBUG_OBJECTS_WORK @@ -43,7 +43,7 @@ enum { WORK_STRUCT_COLOR_BITS = 4, WORK_STRUCT_PENDING = 1 << WORK_STRUCT_PENDING_BIT, - WORK_STRUCT_DELAYED = 1 << WORK_STRUCT_DELAYED_BIT, + WORK_STRUCT_INACTIVE = 1 << WORK_STRUCT_INACTIVE_BIT, WORK_STRUCT_PWQ = 1 << WORK_STRUCT_PWQ_BIT, WORK_STRUCT_LINKED = 1 << WORK_STRUCT_LINKED_BIT, #ifdef CONFIG_DEBUG_OBJECTS_WORK diff --git a/kernel/workqueue.c b/kernel/workqueue.c index 9db32d9739f5..670bd33d8980 100644 --- a/kernel/workqueue.c +++ b/kernel/workqueue.c @@ -208,7 +208,7 @@ struct pool_workqueue { /* L: nr of in_flight works */ int nr_active; /* L: nr of active works */ int max_active; /* L: max active works */ - struct list_head delayed_works; /* L: delayed works */ + struct list_head inactive_works; /* L: inactive works */ struct list_head pwqs_node; /* WR: node on wq->pwqs */ struct list_head mayday_node; /* MD: node on wq->maydays */ @@ -1146,7 +1146,7 @@ static void put_pwq_unlocked(struct pool_workqueue *pwq) } } -static void pwq_activate_delayed_work(struct work_struct *work) +static void pwq_activate_inactive_work(struct work_struct *work) { struct pool_workqueue *pwq = get_work_pwq(work); @@ -1154,16 +1154,16 @@ static void pwq_activate_delayed_work(struct work_struct *work) if (list_empty(&pwq->pool->worklist)) pwq->pool->watchdog_ts = jiffies; move_linked_works(work, &pwq->pool->worklist, NULL); - __clear_bit(WORK_STRUCT_DELAYED_BIT, work_data_bits(work)); + __clear_bit(WORK_STRUCT_INACTIVE_BIT, work_data_bits(work)); pwq->nr_active++; } -static void pwq_activate_first_delayed(struct pool_workqueue *pwq) +static void pwq_activate_first_inactive(struct pool_workqueue *pwq) { - struct work_struct *work = list_first_entry(&pwq->delayed_works, + struct work_struct *work = list_first_entry(&pwq->inactive_works, struct work_struct, entry); - pwq_activate_delayed_work(work); + pwq_activate_inactive_work(work); } /** @@ -1186,10 +1186,10 @@ static void pwq_dec_nr_in_flight(struct pool_workqueue *pwq, int color) pwq->nr_in_flight[color]--; pwq->nr_active--; - if (!list_empty(&pwq->delayed_works)) { - /* one down, submit a delayed one */ + if (!list_empty(&pwq->inactive_works)) { + /* one down, submit an inactive one */ if (pwq->nr_active < pwq->max_active) - pwq_activate_first_delayed(pwq); + pwq_activate_first_inactive(pwq); } /* is flush in progress and are we at the flushing tip? */ @@ -1291,14 +1291,14 @@ static int try_to_grab_pending(struct work_struct *work, bool is_dwork, debug_work_deactivate(work); /* - * A delayed work item cannot be grabbed directly because + * An inactive work item cannot be grabbed directly because * it might have linked NO_COLOR work items which, if left - * on the delayed_list, will confuse pwq->nr_active + * on the inactive_works list, will confuse pwq->nr_active * management later on and cause stall. Make sure the work * item is activated before grabbing. */ - if (*work_data_bits(work) & WORK_STRUCT_DELAYED) - pwq_activate_delayed_work(work); + if (*work_data_bits(work) & WORK_STRUCT_INACTIVE) + pwq_activate_inactive_work(work); list_del_init(&work->entry); pwq_dec_nr_in_flight(pwq, get_work_color(work)); @@ -1497,8 +1497,8 @@ static void __queue_work(int cpu, struct workqueue_struct *wq, if (list_empty(worklist)) pwq->pool->watchdog_ts = jiffies; } else { - work_flags |= WORK_STRUCT_DELAYED; - worklist = &pwq->delayed_works; + work_flags |= WORK_STRUCT_INACTIVE; + worklist = &pwq->inactive_works; } debug_work_activate(work); @@ -2535,7 +2535,7 @@ static int rescuer_thread(void *__rescuer) /* * The above execution of rescued work items could * have created more to rescue through - * pwq_activate_first_delayed() or chained + * pwq_activate_first_inactive() or chained * queueing. Let's put @pwq back on mayday list so * that such back-to-back work items, which may be * being used to relieve memory pressure, don't @@ -2961,7 +2961,7 @@ void drain_workqueue(struct workqueue_struct *wq) bool drained; raw_spin_lock_irq(&pwq->pool->lock); - drained = !pwq->nr_active && list_empty(&pwq->delayed_works); + drained = !pwq->nr_active && list_empty(&pwq->inactive_works); raw_spin_unlock_irq(&pwq->pool->lock); if (drained) @@ -3717,7 +3717,7 @@ static void pwq_unbound_release_workfn(struct work_struct *work) * @pwq: target pool_workqueue * * If @pwq isn't freezing, set @pwq->max_active to the associated - * workqueue's saved_max_active and activate delayed work items + * workqueue's saved_max_active and activate inactive work items * accordingly. If @pwq is freezing, clear @pwq->max_active to zero. */ static void pwq_adjust_max_active(struct pool_workqueue *pwq) @@ -3746,9 +3746,9 @@ static void pwq_adjust_max_active(struct pool_workqueue *pwq) pwq->max_active = wq->saved_max_active; - while (!list_empty(&pwq->delayed_works) && + while (!list_empty(&pwq->inactive_works) && pwq->nr_active < pwq->max_active) { - pwq_activate_first_delayed(pwq); + pwq_activate_first_inactive(pwq); kick = true; } @@ -3779,7 +3779,7 @@ static void init_pwq(struct pool_workqueue *pwq, struct workqueue_struct *wq, pwq->wq = wq; pwq->flush_color = -1; pwq->refcnt = 1; - INIT_LIST_HEAD(&pwq->delayed_works); + INIT_LIST_HEAD(&pwq->inactive_works); INIT_LIST_HEAD(&pwq->pwqs_node); INIT_LIST_HEAD(&pwq->mayday_node); INIT_WORK(&pwq->unbound_release_work, pwq_unbound_release_workfn); @@ -4372,7 +4372,7 @@ static bool pwq_busy(struct pool_workqueue *pwq) if ((pwq != pwq->wq->dfl_pwq) && (pwq->refcnt > 1)) return true; - if (pwq->nr_active || !list_empty(&pwq->delayed_works)) + if (pwq->nr_active || !list_empty(&pwq->inactive_works)) return true; return false; @@ -4568,7 +4568,7 @@ bool workqueue_congested(int cpu, struct workqueue_struct *wq) else pwq = unbound_pwq_by_node(wq, cpu_to_node(cpu)); - ret = !list_empty(&pwq->delayed_works); + ret = !list_empty(&pwq->inactive_works); preempt_enable(); rcu_read_unlock(); @@ -4764,11 +4764,11 @@ static void show_pwq(struct pool_workqueue *pwq) pr_cont("\n"); } - if (!list_empty(&pwq->delayed_works)) { + if (!list_empty(&pwq->inactive_works)) { bool comma = false; - pr_info(" delayed:"); - list_for_each_entry(work, &pwq->delayed_works, entry) { + pr_info(" inactive:"); + list_for_each_entry(work, &pwq->inactive_works, entry) { pr_cont_work(comma, work); comma = !(*work_data_bits(work) & WORK_STRUCT_LINKED); } @@ -4798,7 +4798,7 @@ void show_workqueue_state(void) bool idle = true; for_each_pwq(pwq, wq) { - if (pwq->nr_active || !list_empty(&pwq->delayed_works)) { + if (pwq->nr_active || !list_empty(&pwq->inactive_works)) { idle = false; break; } @@ -4810,7 +4810,7 @@ void show_workqueue_state(void) for_each_pwq(pwq, wq) { raw_spin_lock_irqsave(&pwq->pool->lock, flags); - if (pwq->nr_active || !list_empty(&pwq->delayed_works)) { + if (pwq->nr_active || !list_empty(&pwq->inactive_works)) { /* * Defer printing to avoid deadlocks in console * drivers that queue work while holding locks @@ -5206,7 +5206,7 @@ EXPORT_SYMBOL_GPL(work_on_cpu_safe); * freeze_workqueues_begin - begin freezing workqueues * * Start freezing workqueues. After this function returns, all freezable - * workqueues will queue new works to their delayed_works list instead of + * workqueues will queue new works to their inactive_works list instead of * pool->worklist. * * CONTEXT: -- 2.25.1
2 6
0 0
  • ← Newer
  • 1
  • ...
  • 1573
  • 1574
  • 1575
  • 1576
  • 1577
  • 1578
  • 1579
  • ...
  • 1904
  • Older →

HyperKitty Powered by HyperKitty