mailweb.openeuler.org
Manage this list

Keyboard Shortcuts

Thread View

  • j: Next unread message
  • k: Previous unread message
  • j a: Jump to all threads
  • j l: Jump to MailingList overview

Kernel

Threads by month
  • ----- 2025 -----
  • September
  • August
  • July
  • June
  • May
  • April
  • March
  • February
  • January
  • ----- 2024 -----
  • December
  • November
  • October
  • September
  • August
  • July
  • June
  • May
  • April
  • March
  • February
  • January
  • ----- 2023 -----
  • December
  • November
  • October
  • September
  • August
  • July
  • June
  • May
  • April
  • March
  • February
  • January
  • ----- 2022 -----
  • December
  • November
  • October
  • September
  • August
  • July
  • June
  • May
  • April
  • March
  • February
  • January
  • ----- 2021 -----
  • December
  • November
  • October
  • September
  • August
  • July
  • June
  • May
  • April
  • March
  • February
  • January
  • ----- 2020 -----
  • December
  • November
  • October
  • September
  • August
  • July
  • June
  • May
  • April
  • March
  • February
  • January
  • ----- 2019 -----
  • December
kernel@openeuler.org

September 2025

  • 4 participants
  • 8 discussions
[PATCH OLK-6.6] sdei_watchdog: add percpu flag to fix sdei watchdog state in lpi mode
by Bowen You 01 Sep '25

01 Sep '25
From: youbowen <youbowen2(a)huawei.com> hulk inclusion category: bugfix bugzilla: https://gitee.com/openeuler/kernel/issues/I8LQCC CVE: NA ------------------------------------------------- /proc/sys/kernel/nmi_watchdog interface is designed to close nmi watchdog, but currently, it's not working when in low power scenario, since pm callback will enable/disable event when power state changes. This commit add a percpu flag to be compatible with pm callback. Fixes: f022c4cac9c1 ("sdei_watchdog: use lockup_detector_retry_init() to init sdei watchdog") Signed-off-by: Bowen You <youbowen2(a)huawei.com> --- arch/arm64/kernel/watchdog_sdei.c | 9 +++++++-- 1 file changed, 7 insertions(+), 2 deletions(-) diff --git a/arch/arm64/kernel/watchdog_sdei.c b/arch/arm64/kernel/watchdog_sdei.c index 19f47e24fa59..87d97aace5b3 100644 --- a/arch/arm64/kernel/watchdog_sdei.c +++ b/arch/arm64/kernel/watchdog_sdei.c @@ -25,6 +25,7 @@ static int sdei_watchdog_event_num; bool disable_sdei_nmi_watchdog; static bool sdei_watchdog_registered; static DEFINE_PER_CPU(ktime_t, last_check_time); +static DEFINE_PER_CPU(bool, sdei_usr_en); void sdei_watchdog_hardlockup_enable(unsigned int cpu) { @@ -44,6 +45,7 @@ void sdei_watchdog_hardlockup_enable(unsigned int cpu) pr_err("Enable NMI Watchdog failed on cpu%d\n", smp_processor_id()); } + __this_cpu_write(sdei_usr_en, 1); } void sdei_watchdog_hardlockup_disable(unsigned int cpu) @@ -54,6 +56,7 @@ void sdei_watchdog_hardlockup_disable(unsigned int cpu) return; ret = sdei_api_event_disable(sdei_watchdog_event_num); + __this_cpu_write(sdei_usr_en, 0); if (ret) pr_err("Disable NMI Watchdog failed on cpu%d\n", smp_processor_id()); @@ -116,10 +119,12 @@ static int sdei_watchdog_pm_notifier(struct notifier_block *nb, switch (action) { case CPU_PM_ENTER: - rv = sdei_api_event_disable(sdei_watchdog_event_num); + if (per_cpu(sdei_usr_en, smp_processor_id())) + rv = sdei_api_event_disable(sdei_watchdog_event_num); break; case CPU_PM_EXIT: - rv = sdei_api_event_enable(sdei_watchdog_event_num); + if (per_cpu(sdei_usr_en, smp_processor_id())) + rv = sdei_api_event_enable(sdei_watchdog_event_num); break; default: return NOTIFY_DONE; -- 2.34.1
2 1
0 0
[PATCH OLK-6.6] sdei_watchdog: add percpu flag to fix sdei watchdog state in lpi mode
by Bowen You 01 Sep '25

01 Sep '25
From: youbowen <youbowen2(a)huawei.com> hulk inclusion category: bugfix bugzilla: https://gitee.com/openeuler/kernel/issues/I8LQCC CVE: NA ------------------------------------------------- /proc/sys/kernel/nmi_watchdog interface is designed to close nmi watchdog, but currently, it's not working when in low power scenario, since pm callback will enable/disable event when power state changes. This commit add a percpu flag to be compatible with pm callback. Fixes: f022c4cac9c1 ("sdei_watchdog: use lockup_detector_retry_init() to init sdei watchdog") Signed-off-by: Bowen You <youbowen2(a)huawei.com> --- arch/arm64/kernel/watchdog_sdei.c | 10 ++++++++-- 1 file changed, 8 insertions(+), 2 deletions(-) diff --git a/arch/arm64/kernel/watchdog_sdei.c b/arch/arm64/kernel/watchdog_sdei.c index 19f47e24fa59..da1d80743996 100644 --- a/arch/arm64/kernel/watchdog_sdei.c +++ b/arch/arm64/kernel/watchdog_sdei.c @@ -25,6 +25,7 @@ static int sdei_watchdog_event_num; bool disable_sdei_nmi_watchdog; static bool sdei_watchdog_registered; static DEFINE_PER_CPU(ktime_t, last_check_time); +static DEFINE_PER_CPU(bool, sdei_usr_en); void sdei_watchdog_hardlockup_enable(unsigned int cpu) { @@ -44,6 +45,7 @@ void sdei_watchdog_hardlockup_enable(unsigned int cpu) pr_err("Enable NMI Watchdog failed on cpu%d\n", smp_processor_id()); } + __this_cpu_write(sdei_usr_en, 1); } void sdei_watchdog_hardlockup_disable(unsigned int cpu) @@ -54,6 +56,7 @@ void sdei_watchdog_hardlockup_disable(unsigned int cpu) return; ret = sdei_api_event_disable(sdei_watchdog_event_num); + __this_cpu_write(sdei_usr_en, 0); if (ret) pr_err("Disable NMI Watchdog failed on cpu%d\n", smp_processor_id()); @@ -62,6 +65,7 @@ void sdei_watchdog_hardlockup_disable(unsigned int cpu) static int sdei_watchdog_callback(u32 event, struct pt_regs *regs, void *arg) { + pr_err("sdei watchdog callback on cpu %d\n", smp_processor_id()); ktime_t delta, now = ktime_get_mono_fast_ns(); delta = now - __this_cpu_read(last_check_time); @@ -116,10 +120,12 @@ static int sdei_watchdog_pm_notifier(struct notifier_block *nb, switch (action) { case CPU_PM_ENTER: - rv = sdei_api_event_disable(sdei_watchdog_event_num); + if (per_cpu(sdei_usr_en, smp_processor_id())) + rv = sdei_api_event_disable(sdei_watchdog_event_num); break; case CPU_PM_EXIT: - rv = sdei_api_event_enable(sdei_watchdog_event_num); + if (per_cpu(sdei_usr_en, smp_processor_id())) + rv = sdei_api_event_enable(sdei_watchdog_event_num); break; default: return NOTIFY_DONE; -- 2.34.1
2 1
0 0
[PATCH OLK-6.6] perf/aux: Fix pending disable flow when the AUX ring buffer overruns
by Pu Lehui 01 Sep '25

01 Sep '25
From: Leo Yan <leo.yan(a)arm.com> mainline inclusion from mainline-v6.16-rc4 commit 1476b218327b89bbb64c14619a2d34f0c320f2c3 category: bugfix bugzilla: 190743 Reference: https://git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git/commit/?… -------------------------------- If an AUX event overruns, the event core layer intends to disable the event by setting the 'pending_disable' flag. Unfortunately, the event is not actually disabled afterwards. In commit: ca6c21327c6a ("perf: Fix missing SIGTRAPs") the 'pending_disable' flag was changed to a boolean. However, the AUX event code was not updated accordingly. The flag ends up holding a CPU number. If this number is zero, the flag is taken as false and the IRQ work is never triggered. Later, with commit: 2b84def990d3 ("perf: Split __perf_pending_irq() out of perf_pending_irq()") a new IRQ work 'pending_disable_irq' was introduced to handle event disabling. The AUX event path was not updated to kick off the work queue. To fix this bug, when an AUX ring buffer overrun is detected, call perf_event_disable_inatomic() to initiate the pending disable flow. Also update the outdated comment for setting the flag, to reflect the boolean values (0 or 1). Fixes: 2b84def990d3 ("perf: Split __perf_pending_irq() out of perf_pending_irq()") Fixes: ca6c21327c6a ("perf: Fix missing SIGTRAPs") Signed-off-by: Leo Yan <leo.yan(a)arm.com> Signed-off-by: Ingo Molnar <mingo(a)kernel.org> Reviewed-by: James Clark <james.clark(a)linaro.org> Reviewed-by: Yeoreum Yun <yeoreum.yun(a)arm.com> Cc: Adrian Hunter <adrian.hunter(a)intel.com> Cc: Alexander Shishkin <alexander.shishkin(a)linux.intel.com> Cc: Arnaldo Carvalho de Melo <acme(a)kernel.org> Cc: Ian Rogers <irogers(a)google.com> Cc: Jiri Olsa <jolsa(a)redhat.com> Cc: Liang Kan <kan.liang(a)linux.intel.com> Cc: Marco Elver <elver(a)google.com> Cc: Mark Rutland <mark.rutland(a)arm.com> Cc: Namhyung Kim <namhyung(a)kernel.org> Cc: Peter Zijlstra <peterz(a)infradead.org> Cc: Sebastian Andrzej Siewior <bigeasy(a)linutronix.de> Cc: linux-perf-users(a)vger.kernel.org Link: https://lore.kernel.org/r/20250625170737.2918295-1-leo.yan@arm.com Signed-off-by: Pu Lehui <pulehui(a)huawei.com> --- kernel/events/core.c | 6 +++--- kernel/events/ring_buffer.c | 4 ++-- 2 files changed, 5 insertions(+), 5 deletions(-) diff --git a/kernel/events/core.c b/kernel/events/core.c index 1ee8084e343d..ec89dc39ae16 100644 --- a/kernel/events/core.c +++ b/kernel/events/core.c @@ -6807,15 +6807,15 @@ static void __perf_pending_irq(struct perf_event *event) * CPU-A CPU-B * * perf_event_disable_inatomic() - * @pending_disable = CPU-A; + * @pending_disable = 1; * irq_work_queue(); * * sched-out - * @pending_disable = -1; + * @pending_disable = 0; * * sched-in * perf_event_disable_inatomic() - * @pending_disable = CPU-B; + * @pending_disable = 1; * irq_work_queue(); // FAILS * * irq_work_run() diff --git a/kernel/events/ring_buffer.c b/kernel/events/ring_buffer.c index cfd9448ce28f..610e9b22f0c4 100644 --- a/kernel/events/ring_buffer.c +++ b/kernel/events/ring_buffer.c @@ -437,7 +437,7 @@ void *perf_aux_output_begin(struct perf_output_handle *handle, * store that will be enabled on successful return */ if (!handle->size) { /* A, matches D */ - event->pending_disable = smp_processor_id(); + perf_event_disable_inatomic(handle->event); perf_output_wakeup(handle); WRITE_ONCE(rb->aux_nest, 0); goto err_put; @@ -522,7 +522,7 @@ void perf_aux_output_end(struct perf_output_handle *handle, unsigned long size) if (wakeup) { if (handle->aux_flags & PERF_AUX_FLAG_TRUNCATED) - handle->event->pending_disable = smp_processor_id(); + perf_event_disable_inatomic(handle->event); perf_output_wakeup(handle); } -- 2.34.1
2 1
0 0
[PATCH OLK-6.6] sdei_watchdog: add percpu flag to fix sdei watchdog state in lpi mode
by Bowen You 01 Sep '25

01 Sep '25
From: youbowen <youbowen2(a)huawei.com> hulk inclusion category: bugfix bugzilla: https://gitee.com/openeuler/kernel/issues/I8LQCC CVE: NA ------------------------------------------------- /proc/sys/kernel/nmi_watchdog interface is designed to close nmi watchdog, but currently, it's not working when in low power scenario, since pm callback will enable/disable event when power state changes. This commit add a percpu flag to be compatible with pm callback. Signed-off-by: Bowen You <youbowen2(a)huawei.com> --- arch/arm64/kernel/watchdog_sdei.c | 10 ++++++++-- 1 file changed, 8 insertions(+), 2 deletions(-) diff --git a/arch/arm64/kernel/watchdog_sdei.c b/arch/arm64/kernel/watchdog_sdei.c index 19f47e24fa59..da1d80743996 100644 --- a/arch/arm64/kernel/watchdog_sdei.c +++ b/arch/arm64/kernel/watchdog_sdei.c @@ -25,6 +25,7 @@ static int sdei_watchdog_event_num; bool disable_sdei_nmi_watchdog; static bool sdei_watchdog_registered; static DEFINE_PER_CPU(ktime_t, last_check_time); +static DEFINE_PER_CPU(bool, sdei_usr_en); void sdei_watchdog_hardlockup_enable(unsigned int cpu) { @@ -44,6 +45,7 @@ void sdei_watchdog_hardlockup_enable(unsigned int cpu) pr_err("Enable NMI Watchdog failed on cpu%d\n", smp_processor_id()); } + __this_cpu_write(sdei_usr_en, 1); } void sdei_watchdog_hardlockup_disable(unsigned int cpu) @@ -54,6 +56,7 @@ void sdei_watchdog_hardlockup_disable(unsigned int cpu) return; ret = sdei_api_event_disable(sdei_watchdog_event_num); + __this_cpu_write(sdei_usr_en, 0); if (ret) pr_err("Disable NMI Watchdog failed on cpu%d\n", smp_processor_id()); @@ -62,6 +65,7 @@ void sdei_watchdog_hardlockup_disable(unsigned int cpu) static int sdei_watchdog_callback(u32 event, struct pt_regs *regs, void *arg) { + pr_err("sdei watchdog callback on cpu %d\n", smp_processor_id()); ktime_t delta, now = ktime_get_mono_fast_ns(); delta = now - __this_cpu_read(last_check_time); @@ -116,10 +120,12 @@ static int sdei_watchdog_pm_notifier(struct notifier_block *nb, switch (action) { case CPU_PM_ENTER: - rv = sdei_api_event_disable(sdei_watchdog_event_num); + if (per_cpu(sdei_usr_en, smp_processor_id())) + rv = sdei_api_event_disable(sdei_watchdog_event_num); break; case CPU_PM_EXIT: - rv = sdei_api_event_enable(sdei_watchdog_event_num); + if (per_cpu(sdei_usr_en, smp_processor_id())) + rv = sdei_api_event_enable(sdei_watchdog_event_num); break; default: return NOTIFY_DONE; -- 2.34.1
2 1
0 0
[PATCH openEuler-1.0-LTS] ipv6: reject malicious packets in ipv6_gso_segment()
by Wang Liang 01 Sep '25

01 Sep '25
From: Eric Dumazet <edumazet(a)google.com> mainline inclusion from mainline-v6.17-rc1 commit d45cf1e7d7180256e17c9ce88e32e8061a7887fe category: bugfix bugzilla: https://gitee.com/src-openeuler/kernel/issues/ICU6G7 CVE: CVE-2025-38572 Reference: https://git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git/commit/?… -------------------------------- syzbot was able to craft a packet with very long IPv6 extension headers leading to an overflow of skb->transport_header. This 16bit field has a limited range. Add skb_reset_transport_header_careful() helper and use it from ipv6_gso_segment() WARNING: CPU: 0 PID: 5871 at ./include/linux/skbuff.h:3032 skb_reset_transport_header include/linux/skbuff.h:3032 [inline] WARNING: CPU: 0 PID: 5871 at ./include/linux/skbuff.h:3032 ipv6_gso_segment+0x15e2/0x21e0 net/ipv6/ip6_offload.c:151 Modules linked in: CPU: 0 UID: 0 PID: 5871 Comm: syz-executor211 Not tainted 6.16.0-rc6-syzkaller-g7abc678e3084 #0 PREEMPT(full) Hardware name: Google Google Compute Engine/Google Compute Engine, BIOS Google 07/12/2025 RIP: 0010:skb_reset_transport_header include/linux/skbuff.h:3032 [inline] RIP: 0010:ipv6_gso_segment+0x15e2/0x21e0 net/ipv6/ip6_offload.c:151 Call Trace: <TASK> skb_mac_gso_segment+0x31c/0x640 net/core/gso.c:53 nsh_gso_segment+0x54a/0xe10 net/nsh/nsh.c:110 skb_mac_gso_segment+0x31c/0x640 net/core/gso.c:53 __skb_gso_segment+0x342/0x510 net/core/gso.c:124 skb_gso_segment include/net/gso.h:83 [inline] validate_xmit_skb+0x857/0x11b0 net/core/dev.c:3950 validate_xmit_skb_list+0x84/0x120 net/core/dev.c:4000 sch_direct_xmit+0xd3/0x4b0 net/sched/sch_generic.c:329 __dev_xmit_skb net/core/dev.c:4102 [inline] __dev_queue_xmit+0x17b6/0x3a70 net/core/dev.c:4679 Fixes: d1da932ed4ec ("ipv6: Separate ipv6 offload support") Reported-by: syzbot+af43e647fd835acc02df(a)syzkaller.appspotmail.com Closes: https://lore.kernel.org/netdev/688a1a05.050a0220.5d226.0008.GAE@google.com/… Signed-off-by: Eric Dumazet <edumazet(a)google.com> Reviewed-by: Dawid Osuchowski <dawid.osuchowski(a)linux.intel.com> Reviewed-by: Willem de Bruijn <willemb(a)google.com> Link: https://patch.msgid.link/20250730131738.3385939-1-edumazet@google.com Signed-off-by: Jakub Kicinski <kuba(a)kernel.org> Signed-off-by: Wang Liang <wangliang74(a)huawei.com> --- include/linux/skbuff.h | 23 +++++++++++++++++++++++ net/ipv6/ip6_offload.c | 4 +++- 2 files changed, 26 insertions(+), 1 deletion(-) diff --git a/include/linux/skbuff.h b/include/linux/skbuff.h index c7d1d8b5f41b..2e2d504c7902 100644 --- a/include/linux/skbuff.h +++ b/include/linux/skbuff.h @@ -2374,6 +2374,29 @@ static inline void skb_reset_transport_header(struct sk_buff *skb) skb->transport_header = skb->data - skb->head; } +/** + * skb_reset_transport_header_careful - conditionally reset transport header + * @skb: buffer to alter + * + * Hardened version of skb_reset_transport_header(). + * + * Returns: true if the operation was a success. + */ +static inline bool __must_check +skb_reset_transport_header_careful(struct sk_buff *skb) +{ + long offset = skb->data - skb->head; + + if (unlikely(offset != (typeof(skb->transport_header))offset)) + return false; + + if (unlikely(offset == (typeof(skb->transport_header))~0U)) + return false; + + skb->transport_header = offset; + return true; +} + static inline void skb_set_transport_header(struct sk_buff *skb, const int offset) { diff --git a/net/ipv6/ip6_offload.c b/net/ipv6/ip6_offload.c index 6c47cd0ef240..81227ca2a883 100644 --- a/net/ipv6/ip6_offload.c +++ b/net/ipv6/ip6_offload.c @@ -96,7 +96,9 @@ static struct sk_buff *ipv6_gso_segment(struct sk_buff *skb, ops = rcu_dereference(inet6_offloads[proto]); if (likely(ops && ops->callbacks.gso_segment)) { - skb_reset_transport_header(skb); + if (!skb_reset_transport_header_careful(skb)) + goto out; + segs = ops->callbacks.gso_segment(skb, features); if (!segs) skb->network_header = skb_mac_header(skb) + nhoff - skb->head; -- 2.34.1
2 1
0 0
[PATCH openEuler-1.0-LTS] vsock: Do not allow binding to VMADDR_PORT_ANY
by Wang Liang 01 Sep '25

01 Sep '25
From: Budimir Markovic <markovicbudimir(a)gmail.com> mainline inclusion from mainline-v6.17-rc2 commit aba0c94f61ec05315fa7815d21aefa4c87f6a9f4 category: bugfix bugzilla: https://gitee.com/src-openeuler/kernel/issues/ICUC4U CVE: CVE-2025-38618 Reference: https://git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git/commit/?… -------------------------------- It is possible for a vsock to autobind to VMADDR_PORT_ANY. This can cause a use-after-free when a connection is made to the bound socket. The socket returned by accept() also has port VMADDR_PORT_ANY but is not on the list of unbound sockets. Binding it will result in an extra refcount decrement similar to the one fixed in fcdd2242c023 (vsock: Keep the binding until socket destruction). Modify the check in __vsock_bind_connectible() to also prevent binding to VMADDR_PORT_ANY. Fixes: d021c344051a ("VSOCK: Introduce VM Sockets") Reported-by: Budimir Markovic <markovicbudimir(a)gmail.com> Signed-off-by: Budimir Markovic <markovicbudimir(a)gmail.com> Reviewed-by: Stefano Garzarella <sgarzare(a)redhat.com> Link: https://patch.msgid.link/20250807041811.678-1-markovicbudimir@gmail.com Signed-off-by: Jakub Kicinski <kuba(a)kernel.org> Signed-off-by: Wang Liang <wangliang74(a)huawei.com> --- net/vmw_vsock/af_vsock.c | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/net/vmw_vsock/af_vsock.c b/net/vmw_vsock/af_vsock.c index 00374327bf9a..51558c689ab9 100644 --- a/net/vmw_vsock/af_vsock.c +++ b/net/vmw_vsock/af_vsock.c @@ -495,7 +495,8 @@ static int __vsock_bind_stream(struct vsock_sock *vsk, unsigned int i; for (i = 0; i < MAX_PORT_RETRIES; i++) { - if (port <= LAST_RESERVED_PORT) + if (port == VMADDR_PORT_ANY || + port <= LAST_RESERVED_PORT) port = LAST_RESERVED_PORT + 1; new_addr.svm_port = port++; -- 2.34.1
2 1
0 0
[PATCH openEuler-1.0-LTS] net/packet: fix a race in packet_set_ring() and packet_notifier()
by Wang Liang 01 Sep '25

01 Sep '25
From: Quang Le <quanglex97(a)gmail.com> mainline inclusion from mainline-v6.17-rc1 commit 01d3c8417b9c1b884a8a981a3b886da556512f36 category: bugfix bugzilla: https://gitee.com/src-openeuler/kernel/issues/ICUC4S CVE: CVE-2025-38617 Reference: https://git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git/commit/?… -------------------------------- When packet_set_ring() releases po->bind_lock, another thread can run packet_notifier() and process an NETDEV_UP event. This race and the fix are both similar to that of commit 15fe076edea7 ("net/packet: fix a race in packet_bind() and packet_notifier()"). There too the packet_notifier NETDEV_UP event managed to run while a po->bind_lock critical section had to be temporarily released. And the fix was similarly to temporarily set po->num to zero to keep the socket unhooked until the lock is retaken. The po->bind_lock in packet_set_ring and packet_notifier precede the introduction of git history. Fixes: 1da177e4c3f4 ("Linux-2.6.12-rc2") Cc: stable(a)vger.kernel.org Signed-off-by: Quang Le <quanglex97(a)gmail.com> Signed-off-by: Willem de Bruijn <willemb(a)google.com> Link: https://patch.msgid.link/20250801175423.2970334-1-willemdebruijn.kernel@gma… Signed-off-by: Jakub Kicinski <kuba(a)kernel.org> Signed-off-by: Wang Liang <wangliang74(a)huawei.com> --- net/packet/af_packet.c | 12 ++++++------ 1 file changed, 6 insertions(+), 6 deletions(-) diff --git a/net/packet/af_packet.c b/net/packet/af_packet.c index 98b9e4f19778..3938c9df92da 100644 --- a/net/packet/af_packet.c +++ b/net/packet/af_packet.c @@ -4411,10 +4411,10 @@ static int packet_set_ring(struct sock *sk, union tpacket_req_u *req_u, spin_lock(&po->bind_lock); was_running = po->running; num = po->num; - if (was_running) { - WRITE_ONCE(po->num, 0); + WRITE_ONCE(po->num, 0); + if (was_running) __unregister_prot_hook(sk, false); - } + spin_unlock(&po->bind_lock); synchronize_net(); @@ -4446,10 +4446,10 @@ static int packet_set_ring(struct sock *sk, union tpacket_req_u *req_u, mutex_unlock(&po->pg_vec_lock); spin_lock(&po->bind_lock); - if (was_running) { - WRITE_ONCE(po->num, num); + WRITE_ONCE(po->num, num); + if (was_running) register_prot_hook(sk); - } + spin_unlock(&po->bind_lock); if (pg_vec && (po->tp_version > TPACKET_V2)) { /* Because we don't support block-based V3 on tx-ring */ -- 2.34.1
2 1
0 0
[PATCH OLK-6.6] sdei_watchdog: add percpu flag to fix sdei watchdog state in lpi mode
by Bowen You 01 Sep '25

01 Sep '25
From: youbowen <youbowen2(a)huawei.com> hulk inclusion category: bugfix bugzilla: https://gitee.com/openeuler/kernel/issues/I8LQCC CVE: NA ------------------------------------------------- /proc/sys/kernel/nmi_watchdog interface is designed to close nmi watchdog, but currently, it's not working when in low power scenario, since pm callback will enable/disable event when power state changes. This commit add a percpu flag to be compatible with pm callback. Signed-off-by: Bowen You <youbowen2(a)huawei.com> --- arch/arm64/kernel/watchdog_sdei.c | 10 ++++++++-- include/linux/nmi.h | 1 + 2 files changed, 9 insertions(+), 2 deletions(-) diff --git a/arch/arm64/kernel/watchdog_sdei.c b/arch/arm64/kernel/watchdog_sdei.c index 19f47e24fa59..13a9341bd5ca 100644 --- a/arch/arm64/kernel/watchdog_sdei.c +++ b/arch/arm64/kernel/watchdog_sdei.c @@ -25,6 +25,7 @@ static int sdei_watchdog_event_num; bool disable_sdei_nmi_watchdog; static bool sdei_watchdog_registered; static DEFINE_PER_CPU(ktime_t, last_check_time); +DEFINE_PER_CPU(bool, sdei_usr_en); void sdei_watchdog_hardlockup_enable(unsigned int cpu) { @@ -44,6 +45,7 @@ void sdei_watchdog_hardlockup_enable(unsigned int cpu) pr_err("Enable NMI Watchdog failed on cpu%d\n", smp_processor_id()); } + __this_cpu_write(sdei_usr_en, 1); } void sdei_watchdog_hardlockup_disable(unsigned int cpu) @@ -54,6 +56,7 @@ void sdei_watchdog_hardlockup_disable(unsigned int cpu) return; ret = sdei_api_event_disable(sdei_watchdog_event_num); + __this_cpu_write(sdei_usr_en, 0); if (ret) pr_err("Disable NMI Watchdog failed on cpu%d\n", smp_processor_id()); @@ -62,6 +65,7 @@ void sdei_watchdog_hardlockup_disable(unsigned int cpu) static int sdei_watchdog_callback(u32 event, struct pt_regs *regs, void *arg) { + pr_err("sdei watchdog callback on cpu %d\n", smp_processor_id()); ktime_t delta, now = ktime_get_mono_fast_ns(); delta = now - __this_cpu_read(last_check_time); @@ -116,10 +120,12 @@ static int sdei_watchdog_pm_notifier(struct notifier_block *nb, switch (action) { case CPU_PM_ENTER: - rv = sdei_api_event_disable(sdei_watchdog_event_num); + if (per_cpu(sdei_usr_en, smp_processor_id())) + rv = sdei_api_event_disable(sdei_watchdog_event_num); break; case CPU_PM_EXIT: - rv = sdei_api_event_enable(sdei_watchdog_event_num); + if (per_cpu(sdei_usr_en, smp_processor_id())) + rv = sdei_api_event_enable(sdei_watchdog_event_num); break; default: return NOTIFY_DONE; diff --git a/include/linux/nmi.h b/include/linux/nmi.h index 25fe091e1d19..0d32046d4b01 100644 --- a/include/linux/nmi.h +++ b/include/linux/nmi.h @@ -238,6 +238,7 @@ void sdei_watchdog_hardlockup_enable(unsigned int cpu); void sdei_watchdog_hardlockup_disable(unsigned int cpu); void sdei_watchdog_clear_eoi(void); int sdei_watchdog_hardlockup_probe(void); +DECLARE_PER_CPU(bool, sdei_usr_en); extern bool disable_sdei_nmi_watchdog; #else static inline void sdei_watchdog_hardlockup_enable(unsigned int cpu) { } -- 2.34.1
2 1
0 0

HyperKitty Powered by HyperKitty