mailweb.openeuler.org
Manage this list

Keyboard Shortcuts

Thread View

  • j: Next unread message
  • k: Previous unread message
  • j a: Jump to all threads
  • j l: Jump to MailingList overview

Kernel

Threads by month
  • ----- 2026 -----
  • January
  • ----- 2025 -----
  • December
  • November
  • October
  • September
  • August
  • July
  • June
  • May
  • April
  • March
  • February
  • January
  • ----- 2024 -----
  • December
  • November
  • October
  • September
  • August
  • July
  • June
  • May
  • April
  • March
  • February
  • January
  • ----- 2023 -----
  • December
  • November
  • October
  • September
  • August
  • July
  • June
  • May
  • April
  • March
  • February
  • January
  • ----- 2022 -----
  • December
  • November
  • October
  • September
  • August
  • July
  • June
  • May
  • April
  • March
  • February
  • January
  • ----- 2021 -----
  • December
  • November
  • October
  • September
  • August
  • July
  • June
  • May
  • April
  • March
  • February
  • January
  • ----- 2020 -----
  • December
  • November
  • October
  • September
  • August
  • July
  • June
  • May
  • April
  • March
  • February
  • January
  • ----- 2019 -----
  • December
kernel@openeuler.org

  • 53 participants
  • 22572 discussions
[PATCH OLK-6.6] net/venetcls: Fix flow table init for rps_policy
by Yue Haibing 26 Jan '26

26 Jan '26
hulk inclusion category: bugfix Link: https://gitee.com/openeuler/kernel/issues/ICBFCS CVE: NA -------------------------------- Initialize flow table while rps_policy enabled, and add vecls_flow_enabled static key to optimize performance. Signed-off-by: Yue Haibing <yuehaibing(a)huawei.com> --- include/linux/venetcls.h | 3 +-- net/core/dev.c | 14 ++++++++++---- net/venetcls/venetcls_flow.c | 22 +++++++++++++--------- net/venetcls/venetcls_main.c | 4 ++-- 4 files changed, 26 insertions(+), 17 deletions(-) diff --git a/include/linux/venetcls.h b/include/linux/venetcls.h index acbffdb91ee8..fdafe47e8f9f 100644 --- a/include/linux/venetcls.h +++ b/include/linux/venetcls.h @@ -16,6 +16,7 @@ struct vecls_hook_ops { typedef int (*enqueue_f)(struct sk_buff *skb, int cpu, unsigned int *qtail); extern const struct vecls_hook_ops __rcu *vecls_ops; extern struct static_key_false vecls_localrps_needed; +extern struct static_key_false vecls_flow_enabled; static inline void venetcls_cfg_rxcls(struct sock *sk, int is_del) { @@ -75,8 +76,6 @@ venetcls_skb_set_localcpu(struct sk_buff *skb, enqueue_f enq_func, int *ret) struct net_device *dev = skb->dev; bool result = false; - if (!static_branch_unlikely(&vecls_localrps_needed)) - return result; if (!dev || !(dev->type == ARPHRD_LOOPBACK && dev->flags & IFF_LOOPBACK)) return result; diff --git a/net/core/dev.c b/net/core/dev.c index b62fcd0a6daf..10445e98c8a4 100644 --- a/net/core/dev.c +++ b/net/core/dev.c @@ -166,6 +166,8 @@ const struct vecls_hook_ops __rcu *vecls_ops __read_mostly; EXPORT_SYMBOL_GPL(vecls_ops); struct static_key_false vecls_localrps_needed __read_mostly; EXPORT_SYMBOL(vecls_localrps_needed); +struct static_key_false vecls_flow_enabled __read_mostly; +EXPORT_SYMBOL(vecls_flow_enabled); #endif static DEFINE_SPINLOCK(ptype_lock); @@ -5889,9 +5891,11 @@ static int netif_receive_skb_internal(struct sk_buff *skb) rcu_read_lock(); #if IS_ENABLED(CONFIG_VENETCLS) - if (venetcls_skb_set_cpu(skb, enqueue_to_backlog, &ret)) { - rcu_read_unlock(); - return ret; + if (static_branch_unlikely(&vecls_flow_enabled)) { + if (venetcls_skb_set_cpu(skb, enqueue_to_backlog, &ret)) { + rcu_read_unlock(); + return ret; + } } #endif #ifdef CONFIG_RPS @@ -5927,7 +5931,9 @@ static void netif_receive_skb_list_internal(struct list_head *head) rcu_read_lock(); #if IS_ENABLED(CONFIG_VENETCLS) - venetcls_skblist_set_cpu(head, enqueue_to_backlog); + if (static_branch_unlikely(&vecls_flow_enabled)) { + venetcls_skblist_set_cpu(head, enqueue_to_backlog); + } #endif #ifdef CONFIG_RPS if (static_branch_unlikely(&rps_needed)) { diff --git a/net/venetcls/venetcls_flow.c b/net/venetcls/venetcls_flow.c index 758067a7c6f1..9562dc9ae03c 100644 --- a/net/venetcls/venetcls_flow.c +++ b/net/venetcls/venetcls_flow.c @@ -122,13 +122,13 @@ void _vecls_flow_update(struct sock *sk, struct sk_buff *skb) rcu_read_unlock(); } -static int flow_get_queue_idx(struct net_device *dev, int nid, struct sk_buff *skb) +static int flow_get_queue_idx(struct net_device *dev, int nid, struct sk_buff *skb, u32 hash) { struct vecls_numa_bound_dev_info *bound_dev = NULL; struct vecls_netdev_info *vecls_dev; struct vecls_numa_info *numa_info; int i, devid, rxq_num, rxq_id; - u32 hash, index; + u32 index; numa_info = get_vecls_numa_info(nid); if (!numa_info) @@ -154,7 +154,6 @@ static int flow_get_queue_idx(struct net_device *dev, int nid, struct sk_buff *s } if (rxq_num == 0) return -1; - hash = skb_get_hash(skb); index = hash % rxq_num; i = 0; @@ -170,19 +169,19 @@ static int flow_get_queue_idx(struct net_device *dev, int nid, struct sk_buff *s } static void set_vecls_cpu(struct net_device *dev, struct sk_buff *skb, - struct vecls_dev_flow *old_rflow, int old_rxq_id, u16 next_cpu) + struct vecls_dev_flow *old_rflow, int old_rxq_id, u16 next_cpu, u32 hash) { struct netdev_rx_queue *rxqueue; struct vecls_dev_flow_table *dtb; struct vecls_dev_flow *rflow; - u32 flow_id, hash; int rxq_index, rc; + u32 flow_id; if (!skb_rx_queue_recorded(skb) || !dev->rx_cpu_rmap || !(dev->features & NETIF_F_NTUPLE)) return; - rxq_index = flow_get_queue_idx(dev, cpu_to_node(next_cpu), skb); + rxq_index = flow_get_queue_idx(dev, cpu_to_node(next_cpu), skb, hash); if (rxq_index == skb_get_rx_queue(skb) || rxq_index < 0) { vecls_debug("%s skb:%p, old_rxq:%d, next_cpu:%d new_rxq:%d\n", __func__, skb, old_rxq_id, next_cpu, rxq_index); @@ -194,7 +193,6 @@ static void set_vecls_cpu(struct net_device *dev, struct sk_buff *skb, if (!dtb) return; - hash = skb_get_hash(skb); flow_id = hash & dtb->mask; rflow = &dtb->flows[flow_id]; @@ -245,7 +243,6 @@ static void __vecls_set_cpu(struct sk_buff *skb, struct net_device *ndev, u32 last_recv_cpu, hash, val, cpu, tcpu, newcpu; struct vecls_dev_flow *rflow; - cpu = raw_smp_processor_id(); skb_reset_network_header(skb); hash = skb_get_hash(skb); if (!hash) @@ -260,17 +257,22 @@ static void __vecls_set_cpu(struct sk_buff *skb, struct net_device *ndev, return; newcpu = get_rps_cpu(last_recv_cpu, hash, rps_policy); + if (rps_policy) + *rcpu = newcpu; + vecls_debug("last:%u curcpu:%d newcpu:%d rcpu:%d\n", + last_recv_cpu, raw_smp_processor_id(), newcpu, *rcpu); if (rflow->isvalid && cpu_to_node(rflow->cpu) == cpu_to_node(newcpu)) { rflow->timeout = jiffies; return; } + cpu = raw_smp_processor_id(); if (cpu_to_node(cpu) == cpu_to_node(newcpu)) return; if (tcpu >= nr_cpu_ids) - set_vecls_cpu(ndev, skb, rflow, old_rxq_id, newcpu); + set_vecls_cpu(ndev, skb, rflow, old_rxq_id, newcpu, hash); } static inline void do_loopback_rps(struct sk_buff *skb, int *rcpu) @@ -618,6 +620,7 @@ int vecls_flow_res_init(void) if (mode != 0) //for lo rps RCU_INIT_POINTER(vecls_ops, &vecls_flow_ops); synchronize_rcu(); + static_branch_inc(&vecls_flow_enabled); return 0; clean: @@ -627,6 +630,7 @@ int vecls_flow_res_init(void) void vecls_flow_res_clean(void) { + static_branch_dec(&vecls_flow_enabled); RCU_INIT_POINTER(vecls_ops, NULL); synchronize_rcu(); vecls_sock_flow_table_release(); diff --git a/net/venetcls/venetcls_main.c b/net/venetcls/venetcls_main.c index 00ec0b0e2498..d75f1fb9fff7 100644 --- a/net/venetcls/venetcls_main.c +++ b/net/venetcls/venetcls_main.c @@ -1125,7 +1125,7 @@ static __init int vecls_init(void) err = vecls_ntuple_res_init(); if (err) goto clean_rxq; - if (lo_rps_policy) + if (lo_rps_policy || rps_policy) err = vecls_flow_res_init(); } else { err = vecls_flow_res_init(); @@ -1163,7 +1163,7 @@ static __exit void vecls_exit(void) #endif if (mode == 0) { vecls_ntuple_res_clean(); - if (lo_rps_policy) + if (lo_rps_policy || rps_policy) vecls_flow_res_clean(); } else { vecls_flow_res_clean(); -- 2.34.1
2 1
0 0
[PATCH openEuler-1.0-LTS] e1000: fix OOB in e1000_tbi_should_accept()
by Pu Lehui 26 Jan '26

26 Jan '26
From: Guangshuo Li <lgs201920130244(a)gmail.com> mainline inclusion from mainline-v6.19-rc4 commit 9c72a5182ed92904d01057f208c390a303f00a0f category: bugfix bugzilla: https://atomgit.com/src-openeuler/kernel/issues/13404 CVE: CVE-2025-71093 Reference: https://git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git/commit/?… -------------------------------- In e1000_tbi_should_accept() we read the last byte of the frame via 'data[length - 1]' to evaluate the TBI workaround. If the descriptor- reported length is zero or larger than the actual RX buffer size, this read goes out of bounds and can hit unrelated slab objects. The issue is observed from the NAPI receive path (e1000_clean_rx_irq): ================================================================== BUG: KASAN: slab-out-of-bounds in e1000_tbi_should_accept+0x610/0x790 Read of size 1 at addr ffff888014114e54 by task sshd/363 CPU: 0 PID: 363 Comm: sshd Not tainted 5.18.0-rc1 #1 Hardware name: QEMU Standard PC (i440FX + PIIX, 1996), BIOS rel-1.12.0-59-gc9ba5276e321-prebuilt.qemu.org 04/01/2014 Call Trace: <IRQ> dump_stack_lvl+0x5a/0x74 print_address_description+0x7b/0x440 print_report+0x101/0x200 kasan_report+0xc1/0xf0 e1000_tbi_should_accept+0x610/0x790 e1000_clean_rx_irq+0xa8c/0x1110 e1000_clean+0xde2/0x3c10 __napi_poll+0x98/0x380 net_rx_action+0x491/0xa20 __do_softirq+0x2c9/0x61d do_softirq+0xd1/0x120 </IRQ> <TASK> __local_bh_enable_ip+0xfe/0x130 ip_finish_output2+0x7d5/0xb00 __ip_queue_xmit+0xe24/0x1ab0 __tcp_transmit_skb+0x1bcb/0x3340 tcp_write_xmit+0x175d/0x6bd0 __tcp_push_pending_frames+0x7b/0x280 tcp_sendmsg_locked+0x2e4f/0x32d0 tcp_sendmsg+0x24/0x40 sock_write_iter+0x322/0x430 vfs_write+0x56c/0xa60 ksys_write+0xd1/0x190 do_syscall_64+0x43/0x90 entry_SYSCALL_64_after_hwframe+0x44/0xae RIP: 0033:0x7f511b476b10 Code: 73 01 c3 48 8b 0d 88 d3 2b 00 f7 d8 64 89 01 48 83 c8 ff c3 66 0f 1f 44 00 00 83 3d f9 2b 2c 00 00 75 10 b8 01 00 00 00 0f 05 <48> 3d 01 f0 ff ff 73 31 c3 48 83 ec 08 e8 8e 9b 01 00 48 89 04 24 RSP: 002b:00007ffc9211d4e8 EFLAGS: 00000246 ORIG_RAX: 0000000000000001 RAX: ffffffffffffffda RBX: 0000000000004024 RCX: 00007f511b476b10 RDX: 0000000000004024 RSI: 0000559a9385962c RDI: 0000000000000003 RBP: 0000559a9383a400 R08: fffffffffffffff0 R09: 0000000000004f00 R10: 0000000000000070 R11: 0000000000000246 R12: 0000000000000000 R13: 00007ffc9211d57f R14: 0000559a9347bde7 R15: 0000000000000003 </TASK> Allocated by task 1: __kasan_krealloc+0x131/0x1c0 krealloc+0x90/0xc0 add_sysfs_param+0xcb/0x8a0 kernel_add_sysfs_param+0x81/0xd4 param_sysfs_builtin+0x138/0x1a6 param_sysfs_init+0x57/0x5b do_one_initcall+0x104/0x250 do_initcall_level+0x102/0x132 do_initcalls+0x46/0x74 kernel_init_freeable+0x28f/0x393 kernel_init+0x14/0x1a0 ret_from_fork+0x22/0x30 The buggy address belongs to the object at ffff888014114000 which belongs to the cache kmalloc-2k of size 2048 The buggy address is located 1620 bytes to the right of 2048-byte region [ffff888014114000, ffff888014114800] The buggy address belongs to the physical page: page:ffffea0000504400 refcount:1 mapcount:0 mapping:0000000000000000 index:0x0 pfn:0x14110 head:ffffea0000504400 order:3 compound_mapcount:0 compound_pincount:0 flags: 0x100000000010200(slab|head|node=0|zone=1) raw: 0100000000010200 0000000000000000 dead000000000001 ffff888013442000 raw: 0000000000000000 0000000000080008 00000001ffffffff 0000000000000000 page dumped because: kasan: bad access detected ================================================================== This happens because the TBI check unconditionally dereferences the last byte without validating the reported length first: u8 last_byte = *(data + length - 1); Fix by rejecting the frame early if the length is zero, or if it exceeds adapter->rx_buffer_len. This preserves the TBI workaround semantics for valid frames and prevents touching memory beyond the RX buffer. Fixes: 2037110c96d5 ("e1000: move tbi workaround code into helper function") Cc: stable(a)vger.kernel.org Signed-off-by: Guangshuo Li <lgs201920130244(a)gmail.com> Reviewed-by: Simon Horman <horms(a)kernel.org> Reviewed-by: Aleksandr Loktionov <aleksandr.loktionov(a)intel.com> Signed-off-by: Tony Nguyen <anthony.l.nguyen(a)intel.com> Signed-off-by: Pu Lehui <pulehui(a)huawei.com> --- drivers/net/ethernet/intel/e1000/e1000_main.c | 10 +++++++++- 1 file changed, 9 insertions(+), 1 deletion(-) diff --git a/drivers/net/ethernet/intel/e1000/e1000_main.c b/drivers/net/ethernet/intel/e1000/e1000_main.c index 18b61be9e0b9..4da32f0b99e1 100644 --- a/drivers/net/ethernet/intel/e1000/e1000_main.c +++ b/drivers/net/ethernet/intel/e1000/e1000_main.c @@ -4100,7 +4100,15 @@ static bool e1000_tbi_should_accept(struct e1000_adapter *adapter, u32 length, const u8 *data) { struct e1000_hw *hw = &adapter->hw; - u8 last_byte = *(data + length - 1); + u8 last_byte; + + /* Guard against OOB on data[length - 1] */ + if (unlikely(!length)) + return false; + /* Upper bound: length must not exceed rx_buffer_len */ + if (unlikely(length > adapter->rx_buffer_len)) + return false; + last_byte = *(data + length - 1); if (TBI_ACCEPT(hw, status, errors, length, last_byte)) { unsigned long irq_flags; -- 2.34.1
2 1
0 0
[PATCH] net/venetcls: Fix flow table init for rps_policy
by Yue Haibing 26 Jan '26

26 Jan '26
hulk inclusion category: bugfix Link: https://gitee.com/openeuler/kernel/issues/ICBFCS CVE: NA -------------------------------- Initialize flow table while rps_policy enabled, and add vecls_flow_enabled static key to optimize performance. Signed-off-by: Yue Haibing <yuehaibing(a)huawei.com> --- include/linux/venetcls.h | 3 +-- net/core/dev.c | 14 ++++++++++---- net/venetcls/venetcls_flow.c | 22 +++++++++++++--------- net/venetcls/venetcls_main.c | 4 ++-- 4 files changed, 26 insertions(+), 17 deletions(-) diff --git a/include/linux/venetcls.h b/include/linux/venetcls.h index acbffdb91ee8..fdafe47e8f9f 100644 --- a/include/linux/venetcls.h +++ b/include/linux/venetcls.h @@ -16,6 +16,7 @@ struct vecls_hook_ops { typedef int (*enqueue_f)(struct sk_buff *skb, int cpu, unsigned int *qtail); extern const struct vecls_hook_ops __rcu *vecls_ops; extern struct static_key_false vecls_localrps_needed; +extern struct static_key_false vecls_flow_enabled; static inline void venetcls_cfg_rxcls(struct sock *sk, int is_del) { @@ -75,8 +76,6 @@ venetcls_skb_set_localcpu(struct sk_buff *skb, enqueue_f enq_func, int *ret) struct net_device *dev = skb->dev; bool result = false; - if (!static_branch_unlikely(&vecls_localrps_needed)) - return result; if (!dev || !(dev->type == ARPHRD_LOOPBACK && dev->flags & IFF_LOOPBACK)) return result; diff --git a/net/core/dev.c b/net/core/dev.c index b62fcd0a6daf..10445e98c8a4 100644 --- a/net/core/dev.c +++ b/net/core/dev.c @@ -166,6 +166,8 @@ const struct vecls_hook_ops __rcu *vecls_ops __read_mostly; EXPORT_SYMBOL_GPL(vecls_ops); struct static_key_false vecls_localrps_needed __read_mostly; EXPORT_SYMBOL(vecls_localrps_needed); +struct static_key_false vecls_flow_enabled __read_mostly; +EXPORT_SYMBOL(vecls_flow_enabled); #endif static DEFINE_SPINLOCK(ptype_lock); @@ -5889,9 +5891,11 @@ static int netif_receive_skb_internal(struct sk_buff *skb) rcu_read_lock(); #if IS_ENABLED(CONFIG_VENETCLS) - if (venetcls_skb_set_cpu(skb, enqueue_to_backlog, &ret)) { - rcu_read_unlock(); - return ret; + if (static_branch_unlikely(&vecls_flow_enabled)) { + if (venetcls_skb_set_cpu(skb, enqueue_to_backlog, &ret)) { + rcu_read_unlock(); + return ret; + } } #endif #ifdef CONFIG_RPS @@ -5927,7 +5931,9 @@ static void netif_receive_skb_list_internal(struct list_head *head) rcu_read_lock(); #if IS_ENABLED(CONFIG_VENETCLS) - venetcls_skblist_set_cpu(head, enqueue_to_backlog); + if (static_branch_unlikely(&vecls_flow_enabled)) { + venetcls_skblist_set_cpu(head, enqueue_to_backlog); + } #endif #ifdef CONFIG_RPS if (static_branch_unlikely(&rps_needed)) { diff --git a/net/venetcls/venetcls_flow.c b/net/venetcls/venetcls_flow.c index 758067a7c6f1..9562dc9ae03c 100644 --- a/net/venetcls/venetcls_flow.c +++ b/net/venetcls/venetcls_flow.c @@ -122,13 +122,13 @@ void _vecls_flow_update(struct sock *sk, struct sk_buff *skb) rcu_read_unlock(); } -static int flow_get_queue_idx(struct net_device *dev, int nid, struct sk_buff *skb) +static int flow_get_queue_idx(struct net_device *dev, int nid, struct sk_buff *skb, u32 hash) { struct vecls_numa_bound_dev_info *bound_dev = NULL; struct vecls_netdev_info *vecls_dev; struct vecls_numa_info *numa_info; int i, devid, rxq_num, rxq_id; - u32 hash, index; + u32 index; numa_info = get_vecls_numa_info(nid); if (!numa_info) @@ -154,7 +154,6 @@ static int flow_get_queue_idx(struct net_device *dev, int nid, struct sk_buff *s } if (rxq_num == 0) return -1; - hash = skb_get_hash(skb); index = hash % rxq_num; i = 0; @@ -170,19 +169,19 @@ static int flow_get_queue_idx(struct net_device *dev, int nid, struct sk_buff *s } static void set_vecls_cpu(struct net_device *dev, struct sk_buff *skb, - struct vecls_dev_flow *old_rflow, int old_rxq_id, u16 next_cpu) + struct vecls_dev_flow *old_rflow, int old_rxq_id, u16 next_cpu, u32 hash) { struct netdev_rx_queue *rxqueue; struct vecls_dev_flow_table *dtb; struct vecls_dev_flow *rflow; - u32 flow_id, hash; int rxq_index, rc; + u32 flow_id; if (!skb_rx_queue_recorded(skb) || !dev->rx_cpu_rmap || !(dev->features & NETIF_F_NTUPLE)) return; - rxq_index = flow_get_queue_idx(dev, cpu_to_node(next_cpu), skb); + rxq_index = flow_get_queue_idx(dev, cpu_to_node(next_cpu), skb, hash); if (rxq_index == skb_get_rx_queue(skb) || rxq_index < 0) { vecls_debug("%s skb:%p, old_rxq:%d, next_cpu:%d new_rxq:%d\n", __func__, skb, old_rxq_id, next_cpu, rxq_index); @@ -194,7 +193,6 @@ static void set_vecls_cpu(struct net_device *dev, struct sk_buff *skb, if (!dtb) return; - hash = skb_get_hash(skb); flow_id = hash & dtb->mask; rflow = &dtb->flows[flow_id]; @@ -245,7 +243,6 @@ static void __vecls_set_cpu(struct sk_buff *skb, struct net_device *ndev, u32 last_recv_cpu, hash, val, cpu, tcpu, newcpu; struct vecls_dev_flow *rflow; - cpu = raw_smp_processor_id(); skb_reset_network_header(skb); hash = skb_get_hash(skb); if (!hash) @@ -260,17 +257,22 @@ static void __vecls_set_cpu(struct sk_buff *skb, struct net_device *ndev, return; newcpu = get_rps_cpu(last_recv_cpu, hash, rps_policy); + if (rps_policy) + *rcpu = newcpu; + vecls_debug("last:%u curcpu:%d newcpu:%d rcpu:%d\n", + last_recv_cpu, raw_smp_processor_id(), newcpu, *rcpu); if (rflow->isvalid && cpu_to_node(rflow->cpu) == cpu_to_node(newcpu)) { rflow->timeout = jiffies; return; } + cpu = raw_smp_processor_id(); if (cpu_to_node(cpu) == cpu_to_node(newcpu)) return; if (tcpu >= nr_cpu_ids) - set_vecls_cpu(ndev, skb, rflow, old_rxq_id, newcpu); + set_vecls_cpu(ndev, skb, rflow, old_rxq_id, newcpu, hash); } static inline void do_loopback_rps(struct sk_buff *skb, int *rcpu) @@ -618,6 +620,7 @@ int vecls_flow_res_init(void) if (mode != 0) //for lo rps RCU_INIT_POINTER(vecls_ops, &vecls_flow_ops); synchronize_rcu(); + static_branch_inc(&vecls_flow_enabled); return 0; clean: @@ -627,6 +630,7 @@ int vecls_flow_res_init(void) void vecls_flow_res_clean(void) { + static_branch_dec(&vecls_flow_enabled); RCU_INIT_POINTER(vecls_ops, NULL); synchronize_rcu(); vecls_sock_flow_table_release(); diff --git a/net/venetcls/venetcls_main.c b/net/venetcls/venetcls_main.c index 00ec0b0e2498..d75f1fb9fff7 100644 --- a/net/venetcls/venetcls_main.c +++ b/net/venetcls/venetcls_main.c @@ -1125,7 +1125,7 @@ static __init int vecls_init(void) err = vecls_ntuple_res_init(); if (err) goto clean_rxq; - if (lo_rps_policy) + if (lo_rps_policy || rps_policy) err = vecls_flow_res_init(); } else { err = vecls_flow_res_init(); @@ -1163,7 +1163,7 @@ static __exit void vecls_exit(void) #endif if (mode == 0) { vecls_ntuple_res_clean(); - if (lo_rps_policy) + if (lo_rps_policy || rps_policy) vecls_flow_res_clean(); } else { vecls_flow_res_clean(); -- 2.34.1
1 0
0 0
[PATCH OLK-5.10] iavf: fix off-by-one issues in iavf_config_rss_reg()
by Pu Lehui 26 Jan '26

26 Jan '26
From: Kohei Enju <enjuk(a)amazon.com> stable inclusion from stable-v5.10.248 commit ceb8459df28d22c225a82d74c0f725f2a935d194 category: bugfix bugzilla: https://atomgit.com/src-openeuler/kernel/issues/13398 CVE: CVE-2025-71087 Reference: https://git.kernel.org/pub/scm/linux/kernel/git/stable/linux.git/commit/?id… -------------------------------- [ Upstream commit 6daa2893f323981c7894c68440823326e93a7d61 ] There are off-by-one bugs when configuring RSS hash key and lookup table, causing out-of-bounds reads to memory [1] and out-of-bounds writes to device registers. Before commit 43a3d9ba34c9 ("i40evf: Allow PF driver to configure RSS"), the loop upper bounds were: i <= I40E_VFQF_{HKEY,HLUT}_MAX_INDEX which is safe since the value is the last valid index. That commit changed the bounds to: i <= adapter->rss_{key,lut}_size / 4 where `rss_{key,lut}_size / 4` is the number of dwords, so the last valid index is `(rss_{key,lut}_size / 4) - 1`. Therefore, using `<=` accesses one element past the end. Fix the issues by using `<` instead of `<=`, ensuring we do not exceed the bounds. [1] KASAN splat about rss_key_size off-by-one BUG: KASAN: slab-out-of-bounds in iavf_config_rss+0x619/0x800 Read of size 4 at addr ffff888102c50134 by task kworker/u8:6/63 CPU: 0 UID: 0 PID: 63 Comm: kworker/u8:6 Not tainted 6.18.0-rc2-enjuk-tnguy-00378-g3005f5b77652-dirty #156 PREEMPT(voluntary) Hardware name: QEMU Standard PC (Q35 + ICH9, 2009), BIOS 1.16.3-debian-1.16.3-2 04/01/2014 Workqueue: iavf iavf_watchdog_task Call Trace: <TASK> dump_stack_lvl+0x6f/0xb0 print_report+0x170/0x4f3 kasan_report+0xe1/0x1a0 iavf_config_rss+0x619/0x800 iavf_watchdog_task+0x2be7/0x3230 process_one_work+0x7fd/0x1420 worker_thread+0x4d1/0xd40 kthread+0x344/0x660 ret_from_fork+0x249/0x320 ret_from_fork_asm+0x1a/0x30 </TASK> Allocated by task 63: kasan_save_stack+0x30/0x50 kasan_save_track+0x14/0x30 __kasan_kmalloc+0x7f/0x90 __kmalloc_noprof+0x246/0x6f0 iavf_watchdog_task+0x28fc/0x3230 process_one_work+0x7fd/0x1420 worker_thread+0x4d1/0xd40 kthread+0x344/0x660 ret_from_fork+0x249/0x320 ret_from_fork_asm+0x1a/0x30 The buggy address belongs to the object at ffff888102c50100 which belongs to the cache kmalloc-64 of size 64 The buggy address is located 0 bytes to the right of allocated 52-byte region [ffff888102c50100, ffff888102c50134) The buggy address belongs to the physical page: page: refcount:0 mapcount:0 mapping:0000000000000000 index:0x0 pfn:0x102c50 flags: 0x200000000000000(node=0|zone=2) page_type: f5(slab) raw: 0200000000000000 ffff8881000418c0 dead000000000122 0000000000000000 raw: 0000000000000000 0000000080200020 00000000f5000000 0000000000000000 page dumped because: kasan: bad access detected Memory state around the buggy address: ffff888102c50000: 00 00 00 00 00 00 00 fc fc fc fc fc fc fc fc fc ffff888102c50080: 00 00 00 00 00 00 00 fc fc fc fc fc fc fc fc fc >ffff888102c50100: 00 00 00 00 00 00 04 fc fc fc fc fc fc fc fc fc ^ ffff888102c50180: 00 00 00 00 00 00 00 00 fc fc fc fc fc fc fc fc ffff888102c50200: fc fc fc fc fc fc fc fc fc fc fc fc fc fc fc fc Fixes: 43a3d9ba34c9 ("i40evf: Allow PF driver to configure RSS") Signed-off-by: Kohei Enju <enjuk(a)amazon.com> Reviewed-by: Aleksandr Loktionov <aleksandr.loktionov(a)intel.com> Reviewed-by: Przemek Kitszel <przemyslaw.kitszel(a)intel.com> Tested-by: Rafal Romanowski <rafal.romanowski(a)intel.com> Signed-off-by: Tony Nguyen <anthony.l.nguyen(a)intel.com> Signed-off-by: Sasha Levin <sashal(a)kernel.org> Signed-off-by: Pu Lehui <pulehui(a)huawei.com> --- drivers/net/ethernet/intel/iavf/iavf_main.c | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/drivers/net/ethernet/intel/iavf/iavf_main.c b/drivers/net/ethernet/intel/iavf/iavf_main.c index 65259722a572..4ed93c7f81d2 100644 --- a/drivers/net/ethernet/intel/iavf/iavf_main.c +++ b/drivers/net/ethernet/intel/iavf/iavf_main.c @@ -1262,11 +1262,11 @@ static int iavf_config_rss_reg(struct iavf_adapter *adapter) u16 i; dw = (u32 *)adapter->rss_key; - for (i = 0; i <= adapter->rss_key_size / 4; i++) + for (i = 0; i < adapter->rss_key_size / 4; i++) wr32(hw, IAVF_VFQF_HKEY(i), dw[i]); dw = (u32 *)adapter->rss_lut; - for (i = 0; i <= adapter->rss_lut_size / 4; i++) + for (i = 0; i < adapter->rss_lut_size / 4; i++) wr32(hw, IAVF_VFQF_HLUT(i), dw[i]); iavf_flush(hw); -- 2.34.1
2 1
0 0
[PATCH OLK-5.10] e1000: fix OOB in e1000_tbi_should_accept()
by Pu Lehui 26 Jan '26

26 Jan '26
From: Guangshuo Li <lgs201920130244(a)gmail.com> stable inclusion from stable-v5.10.248 commit 4ccfa56f272241e8d8e2c38191fdbb03df489d80 category: bugfix bugzilla: https://atomgit.com/src-openeuler/kernel/issues/13404 CVE: CVE-2025-71093 Reference: https://git.kernel.org/pub/scm/linux/kernel/git/stable/linux.git/commit/?id… -------------------------------- commit 9c72a5182ed92904d01057f208c390a303f00a0f upstream. In e1000_tbi_should_accept() we read the last byte of the frame via 'data[length - 1]' to evaluate the TBI workaround. If the descriptor- reported length is zero or larger than the actual RX buffer size, this read goes out of bounds and can hit unrelated slab objects. The issue is observed from the NAPI receive path (e1000_clean_rx_irq): ================================================================== BUG: KASAN: slab-out-of-bounds in e1000_tbi_should_accept+0x610/0x790 Read of size 1 at addr ffff888014114e54 by task sshd/363 CPU: 0 PID: 363 Comm: sshd Not tainted 5.18.0-rc1 #1 Hardware name: QEMU Standard PC (i440FX + PIIX, 1996), BIOS rel-1.12.0-59-gc9ba5276e321-prebuilt.qemu.org 04/01/2014 Call Trace: <IRQ> dump_stack_lvl+0x5a/0x74 print_address_description+0x7b/0x440 print_report+0x101/0x200 kasan_report+0xc1/0xf0 e1000_tbi_should_accept+0x610/0x790 e1000_clean_rx_irq+0xa8c/0x1110 e1000_clean+0xde2/0x3c10 __napi_poll+0x98/0x380 net_rx_action+0x491/0xa20 __do_softirq+0x2c9/0x61d do_softirq+0xd1/0x120 </IRQ> <TASK> __local_bh_enable_ip+0xfe/0x130 ip_finish_output2+0x7d5/0xb00 __ip_queue_xmit+0xe24/0x1ab0 __tcp_transmit_skb+0x1bcb/0x3340 tcp_write_xmit+0x175d/0x6bd0 __tcp_push_pending_frames+0x7b/0x280 tcp_sendmsg_locked+0x2e4f/0x32d0 tcp_sendmsg+0x24/0x40 sock_write_iter+0x322/0x430 vfs_write+0x56c/0xa60 ksys_write+0xd1/0x190 do_syscall_64+0x43/0x90 entry_SYSCALL_64_after_hwframe+0x44/0xae RIP: 0033:0x7f511b476b10 Code: 73 01 c3 48 8b 0d 88 d3 2b 00 f7 d8 64 89 01 48 83 c8 ff c3 66 0f 1f 44 00 00 83 3d f9 2b 2c 00 00 75 10 b8 01 00 00 00 0f 05 <48> 3d 01 f0 ff ff 73 31 c3 48 83 ec 08 e8 8e 9b 01 00 48 89 04 24 RSP: 002b:00007ffc9211d4e8 EFLAGS: 00000246 ORIG_RAX: 0000000000000001 RAX: ffffffffffffffda RBX: 0000000000004024 RCX: 00007f511b476b10 RDX: 0000000000004024 RSI: 0000559a9385962c RDI: 0000000000000003 RBP: 0000559a9383a400 R08: fffffffffffffff0 R09: 0000000000004f00 R10: 0000000000000070 R11: 0000000000000246 R12: 0000000000000000 R13: 00007ffc9211d57f R14: 0000559a9347bde7 R15: 0000000000000003 </TASK> Allocated by task 1: __kasan_krealloc+0x131/0x1c0 krealloc+0x90/0xc0 add_sysfs_param+0xcb/0x8a0 kernel_add_sysfs_param+0x81/0xd4 param_sysfs_builtin+0x138/0x1a6 param_sysfs_init+0x57/0x5b do_one_initcall+0x104/0x250 do_initcall_level+0x102/0x132 do_initcalls+0x46/0x74 kernel_init_freeable+0x28f/0x393 kernel_init+0x14/0x1a0 ret_from_fork+0x22/0x30 The buggy address belongs to the object at ffff888014114000 which belongs to the cache kmalloc-2k of size 2048 The buggy address is located 1620 bytes to the right of 2048-byte region [ffff888014114000, ffff888014114800] The buggy address belongs to the physical page: page:ffffea0000504400 refcount:1 mapcount:0 mapping:0000000000000000 index:0x0 pfn:0x14110 head:ffffea0000504400 order:3 compound_mapcount:0 compound_pincount:0 flags: 0x100000000010200(slab|head|node=0|zone=1) raw: 0100000000010200 0000000000000000 dead000000000001 ffff888013442000 raw: 0000000000000000 0000000000080008 00000001ffffffff 0000000000000000 page dumped because: kasan: bad access detected ================================================================== This happens because the TBI check unconditionally dereferences the last byte without validating the reported length first: u8 last_byte = *(data + length - 1); Fix by rejecting the frame early if the length is zero, or if it exceeds adapter->rx_buffer_len. This preserves the TBI workaround semantics for valid frames and prevents touching memory beyond the RX buffer. Fixes: 2037110c96d5 ("e1000: move tbi workaround code into helper function") Cc: stable(a)vger.kernel.org Signed-off-by: Guangshuo Li <lgs201920130244(a)gmail.com> Reviewed-by: Simon Horman <horms(a)kernel.org> Reviewed-by: Aleksandr Loktionov <aleksandr.loktionov(a)intel.com> Signed-off-by: Tony Nguyen <anthony.l.nguyen(a)intel.com> Signed-off-by: Greg Kroah-Hartman <gregkh(a)linuxfoundation.org> Signed-off-by: Pu Lehui <pulehui(a)huawei.com> --- drivers/net/ethernet/intel/e1000/e1000_main.c | 10 +++++++++- 1 file changed, 9 insertions(+), 1 deletion(-) diff --git a/drivers/net/ethernet/intel/e1000/e1000_main.c b/drivers/net/ethernet/intel/e1000/e1000_main.c index 5e28cf4fa2cd..0b7502902913 100644 --- a/drivers/net/ethernet/intel/e1000/e1000_main.c +++ b/drivers/net/ethernet/intel/e1000/e1000_main.c @@ -4090,7 +4090,15 @@ static bool e1000_tbi_should_accept(struct e1000_adapter *adapter, u32 length, const u8 *data) { struct e1000_hw *hw = &adapter->hw; - u8 last_byte = *(data + length - 1); + u8 last_byte; + + /* Guard against OOB on data[length - 1] */ + if (unlikely(!length)) + return false; + /* Upper bound: length must not exceed rx_buffer_len */ + if (unlikely(length > adapter->rx_buffer_len)) + return false; + last_byte = *(data + length - 1); if (TBI_ACCEPT(hw, status, errors, length, last_byte)) { unsigned long irq_flags; -- 2.34.1
2 1
0 0
[PATCH OLK-6.6] [Backport] ASoC: SOF: Intel: hda-dai: Ensure DAI widget is valid during params
by Lin Ruifeng 26 Jan '26

26 Jan '26
From: Bard Liao <yung-chuan.liao(a)linux.intel.com> stable inclusion from stable-v6.12.14 commit e012a77e4d7632cf615ba9625b1600ed8985c3b5 category: bugfix bugzilla: https://atomgit.com/src-openeuler/kernel/issues/30 CVE: CVE-2024-58012 Reference: https://git.kernel.org/pub/scm/linux/kernel/git/stable/linux.git/commit/?id… -------------------------------- [ Upstream commit 569922b82ca660f8b24e705f6cf674e6b1f99cc7 ] Each cpu DAI should associate with a widget. However, the topology might not create the right number of DAI widgets for aggregated amps. And it will cause NULL pointer deference. Check that the DAI widget associated with the CPU DAI is valid to prevent NULL pointer deference due to missing DAI widgets in topologies with aggregated amps. Signed-off-by: Bard Liao <yung-chuan.liao(a)linux.intel.com> Reviewed-by: Ranjani Sridharan <ranjani.sridharan(a)linux.intel.com> Reviewed-by: Péter Ujfalusi <peter.ujfalusi(a)linux.intel.com> Reviewed-by: Liam Girdwood <liam.r.girdwood(a)intel.com> Link: https://patch.msgid.link/20241203104853.56956-1-yung-chuan.liao@linux.intel… Signed-off-by: Mark Brown <broonie(a)kernel.org> Signed-off-by: Sasha Levin <sashal(a)kernel.org> Conflicts: sound/soc/sof/intel/hda-dai.c sound/soc/sof/intel/hda.c Signed-off-by: Lin Ruifeng <linruifeng4(a)huawei.com> --- sound/soc/sof/intel/hda-dai.c | 6 ++++++ sound/soc/sof/intel/hda.c | 5 +++++ 2 files changed, 11 insertions(+) diff --git a/sound/soc/sof/intel/hda-dai.c b/sound/soc/sof/intel/hda-dai.c index 19ec1a45737e..cebd9db94215 100644 --- a/sound/soc/sof/intel/hda-dai.c +++ b/sound/soc/sof/intel/hda-dai.c @@ -439,6 +439,12 @@ int sdw_hda_dai_hw_params(struct snd_pcm_substream *substream, struct snd_sof_dev *sdev; int ret; + if (!w) { + dev_err(cpu_dai->dev, "%s widget not found, check amp link num in the topology\n", + cpu_dai->name); + return -EINVAL; + } + ret = non_hda_dai_hw_params(substream, params, cpu_dai); if (ret < 0) { dev_err(cpu_dai->dev, "%s: non_hda_dai_hw_params failed %d\n", __func__, ret); diff --git a/sound/soc/sof/intel/hda.c b/sound/soc/sof/intel/hda.c index 15e6779efaa3..aa3f9c961173 100644 --- a/sound/soc/sof/intel/hda.c +++ b/sound/soc/sof/intel/hda.c @@ -102,6 +102,11 @@ static int sdw_params_stream(struct device *dev, struct snd_soc_dapm_widget *w = snd_soc_dai_get_widget(d, params_data->substream->stream); struct snd_sof_dai_config_data data = { 0 }; + if (!w) { + dev_err(dev, "%s widget not found, check amp link num in the topology\n", + d->name); + return -EINVAL; + } data.dai_index = (params_data->link_id << 8) | d->id; data.dai_data = params_data->alh_stream_id; -- 2.43.0
2 1
0 0
[PATCH v2] arm64: kexec: Add support for crashkernel CMA reservation
by Jinjie Ruan 26 Jan '26

26 Jan '26
Commit 35c18f2933c5 ("Add a new optional ",cma" suffix to the crashkernel= command line option") and commit ab475510e042 ("kdump: implement reserve_crashkernel_cma") added CMA support for kdump crashkernel reservation. Crash kernel memory reservation wastes production resources if too large, risks kdump failure if too small, and faces allocation difficulties on fragmented systems due to contiguous block constraints. The new CMA-based crashkernel reservation scheme splits the "large fixed reservation" into a "small fixed region + large CMA dynamic region": the CMA memory is available to userspace during normal operation to avoid waste, and is reclaimed for kdump upon crash—saving memory while improving reliability. So extend crashkernel CMA reservation support to arm64. The following changes are made to enable CMA reservation: - Parse and obtain the CMA reservation size along with other crashkernel parameters. - Call reserve_crashkernel_cma() to allocate the CMA region for kdump. - Include the CMA-reserved ranges for kdump kernel to use. - Exclude the CMA-reserved ranges from the crash kernel memory to prevent them from being exported through /proc/vmcore. Update kernel-parameters.txt to document CMA support for crashkernel on arm64 architecture. Signed-off-by: Jinjie Ruan <ruanjinjie(a)huawei.com> --- v2: - Free cmem in prepare_elf_headers() - Add the mtivation. --- Documentation/admin-guide/kernel-parameters.txt | 2 +- arch/arm64/kernel/machine_kexec_file.c | 15 ++++++++++++++- arch/arm64/mm/init.c | 5 +++-- 3 files changed, 18 insertions(+), 4 deletions(-) diff --git a/Documentation/admin-guide/kernel-parameters.txt b/Documentation/admin-guide/kernel-parameters.txt index 1058f2a6d6a8..36bb642a7edd 100644 --- a/Documentation/admin-guide/kernel-parameters.txt +++ b/Documentation/admin-guide/kernel-parameters.txt @@ -1119,7 +1119,7 @@ Kernel parameters It will be ignored when crashkernel=X,high is not used or memory reserved is below 4G. crashkernel=size[KMG],cma - [KNL, X86, ppc] Reserve additional crash kernel memory from + [KNL, X86, ARM64, ppc] Reserve additional crash kernel memory from CMA. This reservation is usable by the first system's userspace memory and kernel movable allocations (memory balloon, zswap). Pages allocated from this memory range diff --git a/arch/arm64/kernel/machine_kexec_file.c b/arch/arm64/kernel/machine_kexec_file.c index 410060ebd86d..ef6ce9aaba80 100644 --- a/arch/arm64/kernel/machine_kexec_file.c +++ b/arch/arm64/kernel/machine_kexec_file.c @@ -48,7 +48,7 @@ static int prepare_elf_headers(void **addr, unsigned long *sz) u64 i; phys_addr_t start, end; - nr_ranges = 2; /* for exclusion of crashkernel region */ + nr_ranges = 2 + crashk_cma_cnt; /* for exclusion of crashkernel region */ for_each_mem_range(i, &start, &end) nr_ranges++; @@ -64,6 +64,12 @@ static int prepare_elf_headers(void **addr, unsigned long *sz) cmem->nr_ranges++; } + for (i = 0; i < crashk_cma_cnt; i++) { + cmem->ranges[cmem->nr_ranges].start = crashk_cma_ranges[i].start; + cmem->ranges[cmem->nr_ranges].end = crashk_cma_ranges[i].end; + cmem->nr_ranges++; + } + /* Exclude crashkernel region */ ret = crash_exclude_mem_range(cmem, crashk_res.start, crashk_res.end); if (ret) @@ -75,6 +81,13 @@ static int prepare_elf_headers(void **addr, unsigned long *sz) goto out; } + for (i = 0; i < crashk_cma_cnt; ++i) { + ret = crash_exclude_mem_range(cmem, crashk_cma_ranges[i].start, + crashk_cma_ranges[i].end); + if (ret) + goto out; + } + ret = crash_prepare_elf64_headers(cmem, true, addr, sz); out: diff --git a/arch/arm64/mm/init.c b/arch/arm64/mm/init.c index 524d34a0e921..28165d94af08 100644 --- a/arch/arm64/mm/init.c +++ b/arch/arm64/mm/init.c @@ -96,8 +96,8 @@ phys_addr_t __ro_after_init arm64_dma_phys_limit; static void __init arch_reserve_crashkernel(void) { + unsigned long long crash_base, crash_size, cma_size = 0; unsigned long long low_size = 0; - unsigned long long crash_base, crash_size; bool high = false; int ret; @@ -106,11 +106,12 @@ static void __init arch_reserve_crashkernel(void) ret = parse_crashkernel(boot_command_line, memblock_phys_mem_size(), &crash_size, &crash_base, - &low_size, NULL, &high); + &low_size, &cma_size, &high); if (ret) return; reserve_crashkernel_generic(crash_size, crash_base, low_size, high); + reserve_crashkernel_cma(cma_size); } static phys_addr_t __init max_zone_phys(phys_addr_t zone_limit) -- 2.34.1
1 0
0 0
[PATCH OLK-5.10] KVM: x86: use array_index_nospec with indices that come from guest
by Zhang Yuwei 24 Jan '26

24 Jan '26
From: Thijs Raymakers <thijs(a)raymakers.nl> stable inclusion from stable-v5.10.242 commit 31a0ad2f60cb4816e06218b63e695eb72ce74974 category: bugfix bugzilla: https://atomgit.com/src-openeuler/kernel/issues/8676 Reference: https://git.kernel.org/pub/scm/linux/kernel/git/stable/linux.git/commit/?id… -------------------------------- commit c87bd4dd43a624109c3cc42d843138378a7f4548 upstream. min and dest_id are guest-controlled indices. Using array_index_nospec() after the bounds checks clamps these values to mitigate speculative execution side-channels. Signed-off-by: Thijs Raymakers <thijs(a)raymakers.nl> Cc: stable(a)vger.kernel.org Cc: Sean Christopherson <seanjc(a)google.com> Cc: Paolo Bonzini <pbonzini(a)redhat.com> Cc: Greg Kroah-Hartman <gregkh(a)linuxfoundation.org> Fixes: 715062970f37 ("KVM: X86: Implement PV sched yield hypercall") Fixes: bdf7ffc89922 ("KVM: LAPIC: Fix pv ipis out-of-bounds access") Fixes: 4180bf1b655a ("KVM: X86: Implement "send IPI" hypercall") Link: https://lore.kernel.org/r/20250804064405.4802-1-thijs@raymakers.nl Signed-off-by: Sean Christopherson <seanjc(a)google.com> Signed-off-by: Greg Kroah-Hartman <gregkh(a)linuxfoundation.org> Signed-off-by: Liu Mingrui <liumingrui(a)huawei.com> --- arch/x86/kvm/lapic.c | 2 ++ arch/x86/kvm/x86.c | 7 +++++-- 2 files changed, 7 insertions(+), 2 deletions(-) diff --git a/arch/x86/kvm/lapic.c b/arch/x86/kvm/lapic.c index 0c76022ea866..087e05214d8e 100644 --- a/arch/x86/kvm/lapic.c +++ b/arch/x86/kvm/lapic.c @@ -610,6 +610,8 @@ static int __pv_send_ipi(unsigned long *ipi_bitmap, struct kvm_apic_map *map, if (min > map->max_apic_id) return 0; + min = array_index_nospec(min, map->max_apic_id + 1); + for_each_set_bit(i, ipi_bitmap, min((u32)BITS_PER_LONG, (map->max_apic_id - min + 1))) { if (map->phys_map[min + i]) { diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c index 6a7e91116690..fc745dd45732 100644 --- a/arch/x86/kvm/x86.c +++ b/arch/x86/kvm/x86.c @@ -8797,8 +8797,11 @@ static void kvm_sched_yield(struct kvm *kvm, unsigned long dest_id) rcu_read_lock(); map = rcu_dereference(kvm->arch.apic_map); - if (likely(map) && dest_id <= map->max_apic_id && map->phys_map[dest_id]) - target = map->phys_map[dest_id]->vcpu; + if (likely(map) && dest_id <= map->max_apic_id) { + dest_id = array_index_nospec(dest_id, map->max_apic_id + 1); + if (map->phys_map[dest_id]) + target = map->phys_map[dest_id]->vcpu; + } rcu_read_unlock(); -- 2.22.0
2 1
0 0
[PATCH OLK-5.10] KVM: x86: use array_index_nospec with indices that come from guest
by Zhang Yuwei 24 Jan '26

24 Jan '26
From: Thijs Raymakers <thijs(a)raymakers.nl> stable inclusion from stable-v5.10.242 commit 31a0ad2f60cb4816e06218b63e695eb72ce74974 category: bugfix bugzilla: 189268 Reference: https://git.kernel.org/pub/scm/linux/kernel/git/stable/linux.git/commit/?id… -------------------------------- commit c87bd4dd43a624109c3cc42d843138378a7f4548 upstream. min and dest_id are guest-controlled indices. Using array_index_nospec() after the bounds checks clamps these values to mitigate speculative execution side-channels. Signed-off-by: Thijs Raymakers <thijs(a)raymakers.nl> Cc: stable(a)vger.kernel.org Cc: Sean Christopherson <seanjc(a)google.com> Cc: Paolo Bonzini <pbonzini(a)redhat.com> Cc: Greg Kroah-Hartman <gregkh(a)linuxfoundation.org> Fixes: 715062970f37 ("KVM: X86: Implement PV sched yield hypercall") Fixes: bdf7ffc89922 ("KVM: LAPIC: Fix pv ipis out-of-bounds access") Fixes: 4180bf1b655a ("KVM: X86: Implement "send IPI" hypercall") Link: https://lore.kernel.org/r/20250804064405.4802-1-thijs@raymakers.nl Signed-off-by: Sean Christopherson <seanjc(a)google.com> Signed-off-by: Greg Kroah-Hartman <gregkh(a)linuxfoundation.org> Signed-off-by: Liu Mingrui <liumingrui(a)huawei.com> --- arch/x86/kvm/lapic.c | 2 ++ arch/x86/kvm/x86.c | 7 +++++-- 2 files changed, 7 insertions(+), 2 deletions(-) diff --git a/arch/x86/kvm/lapic.c b/arch/x86/kvm/lapic.c index 0c76022ea866..087e05214d8e 100644 --- a/arch/x86/kvm/lapic.c +++ b/arch/x86/kvm/lapic.c @@ -610,6 +610,8 @@ static int __pv_send_ipi(unsigned long *ipi_bitmap, struct kvm_apic_map *map, if (min > map->max_apic_id) return 0; + min = array_index_nospec(min, map->max_apic_id + 1); + for_each_set_bit(i, ipi_bitmap, min((u32)BITS_PER_LONG, (map->max_apic_id - min + 1))) { if (map->phys_map[min + i]) { diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c index 6a7e91116690..fc745dd45732 100644 --- a/arch/x86/kvm/x86.c +++ b/arch/x86/kvm/x86.c @@ -8797,8 +8797,11 @@ static void kvm_sched_yield(struct kvm *kvm, unsigned long dest_id) rcu_read_lock(); map = rcu_dereference(kvm->arch.apic_map); - if (likely(map) && dest_id <= map->max_apic_id && map->phys_map[dest_id]) - target = map->phys_map[dest_id]->vcpu; + if (likely(map) && dest_id <= map->max_apic_id) { + dest_id = array_index_nospec(dest_id, map->max_apic_id + 1); + if (map->phys_map[dest_id]) + target = map->phys_map[dest_id]->vcpu; + } rcu_read_unlock(); -- 2.22.0
2 1
0 0
[PATCH OLK-5.10] drm/nouveau/kms/nv50-: init hpd_irq_lock for PIOR DP
by Zhang Yuwei 24 Jan '26

24 Jan '26
From: Ben Skeggs <bskeggs(a)redhat.com> mainline inclusion from mainline-v6.5-rc3 commit ea293f823a8805735d9e00124df81a8f448ed1ae category: bugfix bugzilla: https://atomgit.com/src-openeuler/kernel/issues/13097 CVE: CVE-2023-54263 Reference: https://git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git/commit/?… -------------------------------- Fixes OOPS on boards with ANX9805 DP encoders. Cc: stable(a)vger.kernel.org # 6.4+ Signed-off-by: Ben Skeggs <bskeggs(a)redhat.com> Reviewed-by: Karol Herbst <kherbst(a)redhat.com> Signed-off-by: Karol Herbst <kherbst(a)redhat.com> Link: https://patchwork.freedesktop.org/patch/msgid/20230719044051.6975-3-skeggsb… Conflicts: drivers/gpu/drm/nouveau/dispnv50/disp.c [commit 1b255f1ccc883 not merged] Signed-off-by: Zhang Yuwei <zhangyuwei20(a)huawei.com> --- drivers/gpu/drm/nouveau/dispnv50/disp.c | 5 +++++ 1 file changed, 5 insertions(+) diff --git a/drivers/gpu/drm/nouveau/dispnv50/disp.c b/drivers/gpu/drm/nouveau/dispnv50/disp.c index 0ac120225b4d..670ebd47b595 100644 --- a/drivers/gpu/drm/nouveau/dispnv50/disp.c +++ b/drivers/gpu/drm/nouveau/dispnv50/disp.c @@ -1965,7 +1965,10 @@ nv50_pior_help = { static void nv50_pior_destroy(struct drm_encoder *encoder) { + struct nouveau_encoder *nv_encoder = nouveau_encoder(encoder); + drm_encoder_cleanup(encoder); + mutex_destroy(&nv_encoder->dp.hpd_irq_lock); kfree(encoder); } @@ -2010,6 +2013,8 @@ nv50_pior_create(struct drm_connector *connector, struct dcb_output *dcbe) nv_encoder->i2c = ddc; nv_encoder->aux = aux; + mutex_init(&nv_encoder->dp.hpd_irq_lock); + encoder = to_drm_encoder(nv_encoder); encoder->possible_crtcs = dcbe->heads; encoder->possible_clones = 0; -- 2.22.0
2 1
0 0
  • ← Newer
  • 1
  • 2
  • 3
  • 4
  • 5
  • 6
  • 7
  • ...
  • 2258
  • Older →

HyperKitty Powered by HyperKitty