*** BLURB HERE ***
Aleksandr Mishin (2): octeon_ep: Implement helper for iterating packets in Rx queue octeon_ep: Add SKB allocation failures handling in __octep_oq_process_rx()
.../net/ethernet/marvell/octeon_ep/octep_rx.c | 82 +++++++++++++------ 1 file changed, 59 insertions(+), 23 deletions(-)
反馈: 您发送到kernel@openeuler.org的补丁/补丁集,已成功转换为PR! PR链接地址: https://gitee.com/openeuler/kernel/pulls/13133 邮件列表地址:https://mailweb.openeuler.org/hyperkitty/list/kernel@openeuler.org/message/T...
FeedBack: The patch(es) which you have sent to kernel@openeuler.org mailing list has been converted to a pull request successfully! Pull request link: https://gitee.com/openeuler/kernel/pulls/13133 Mailing list address: https://mailweb.openeuler.org/hyperkitty/list/kernel@openeuler.org/message/T...
From: Aleksandr Mishin amishin@t-argos.ru
stable inclusion from stable-v6.6.59 commit e71146ff378c7cae47e71c4eb95f2a63682dcf81 category: bugfix bugzilla: https://gitee.com/src-openeuler/kernel/issues/IB2STE CVE: CVE-2024-50145
Reference: https://git.kernel.org/stable/c/e71146ff378c7cae47e71c4eb95f2a63682dcf81
--------------------------------
[ Upstream commit bd28df26197b2bd0913bf1b36770836481975143 ]
The common code with some packet and index manipulations is extracted and moved to newly implemented helper to make the code more readable and avoid duplication. This is a preparation for skb allocation failure handling.
Found by Linux Verification Center (linuxtesting.org) with SVACE.
Suggested-by: Simon Horman horms@kernel.org Suggested-by: Paolo Abeni pabeni@redhat.com Signed-off-by: Aleksandr Mishin amishin@t-argos.ru Reviewed-by: Jacob Keller jacob.e.keller@intel.com Signed-off-by: Andrew Lunn andrew@lunn.ch Stable-dep-of: eb592008f79b ("octeon_ep: Add SKB allocation failures handling in __octep_oq_process_rx()") Signed-off-by: Sasha Levin sashal@kernel.org Signed-off-by: Luo Gengkun luogengkun2@huawei.com --- .../net/ethernet/marvell/octeon_ep/octep_rx.c | 55 +++++++++++-------- 1 file changed, 32 insertions(+), 23 deletions(-)
diff --git a/drivers/net/ethernet/marvell/octeon_ep/octep_rx.c b/drivers/net/ethernet/marvell/octeon_ep/octep_rx.c index 3c43f8078528..cfbfad95b021 100644 --- a/drivers/net/ethernet/marvell/octeon_ep/octep_rx.c +++ b/drivers/net/ethernet/marvell/octeon_ep/octep_rx.c @@ -336,6 +336,30 @@ static int octep_oq_check_hw_for_pkts(struct octep_device *oct, return new_pkts; }
+/** + * octep_oq_next_pkt() - Move to the next packet in Rx queue. + * + * @oq: Octeon Rx queue data structure. + * @buff_info: Current packet buffer info. + * @read_idx: Current packet index in the ring. + * @desc_used: Current packet descriptor number. + * + * Free the resources associated with a packet. + * Increment packet index in the ring and packet descriptor number. + */ +static void octep_oq_next_pkt(struct octep_oq *oq, + struct octep_rx_buffer *buff_info, + u32 *read_idx, u32 *desc_used) +{ + dma_unmap_page(oq->dev, oq->desc_ring[*read_idx].buffer_ptr, + PAGE_SIZE, DMA_FROM_DEVICE); + buff_info->page = NULL; + (*read_idx)++; + (*desc_used)++; + if (*read_idx == oq->max_count) + *read_idx = 0; +} + /** * __octep_oq_process_rx() - Process hardware Rx queue and push to stack. * @@ -365,10 +389,7 @@ static int __octep_oq_process_rx(struct octep_device *oct, desc_used = 0; for (pkt = 0; pkt < pkts_to_process; pkt++) { buff_info = (struct octep_rx_buffer *)&oq->buff_info[read_idx]; - dma_unmap_page(oq->dev, oq->desc_ring[read_idx].buffer_ptr, - PAGE_SIZE, DMA_FROM_DEVICE); resp_hw = page_address(buff_info->page); - buff_info->page = NULL;
/* Swap the length field that is in Big-Endian to CPU */ buff_info->len = be64_to_cpu(resp_hw->length); @@ -390,36 +411,27 @@ static int __octep_oq_process_rx(struct octep_device *oct, */ data_offset = OCTEP_OQ_RESP_HW_SIZE; } + + octep_oq_next_pkt(oq, buff_info, &read_idx, &desc_used); + + skb = build_skb((void *)resp_hw, PAGE_SIZE); + skb_reserve(skb, data_offset); + rx_bytes += buff_info->len;
if (buff_info->len <= oq->max_single_buffer_size) { - skb = build_skb((void *)resp_hw, PAGE_SIZE); - skb_reserve(skb, data_offset); skb_put(skb, buff_info->len); - read_idx++; - desc_used++; - if (read_idx == oq->max_count) - read_idx = 0; } else { struct skb_shared_info *shinfo; u16 data_len;
- skb = build_skb((void *)resp_hw, PAGE_SIZE); - skb_reserve(skb, data_offset); /* Head fragment includes response header(s); * subsequent fragments contains only data. */ skb_put(skb, oq->max_single_buffer_size); - read_idx++; - desc_used++; - if (read_idx == oq->max_count) - read_idx = 0; - shinfo = skb_shinfo(skb); data_len = buff_info->len - oq->max_single_buffer_size; while (data_len) { - dma_unmap_page(oq->dev, oq->desc_ring[read_idx].buffer_ptr, - PAGE_SIZE, DMA_FROM_DEVICE); buff_info = (struct octep_rx_buffer *) &oq->buff_info[read_idx]; if (data_len < oq->buffer_size) { @@ -434,11 +446,8 @@ static int __octep_oq_process_rx(struct octep_device *oct, buff_info->page, 0, buff_info->len, buff_info->len); - buff_info->page = NULL; - read_idx++; - desc_used++; - if (read_idx == oq->max_count) - read_idx = 0; + + octep_oq_next_pkt(oq, buff_info, &read_idx, &desc_used); } }
From: Aleksandr Mishin amishin@t-argos.ru
stable inclusion from stable-v6.6.59 commit c2d2dc4f88bb3cfc4f3cc320fd3ff51b0ae5b0ea category: bugfix bugzilla: https://gitee.com/src-openeuler/kernel/issues/IB2STE CVE: CVE-2024-50145
Reference: https://git.kernel.org/stable/c/c2d2dc4f88bb3cfc4f3cc320fd3ff51b0ae5b0ea
--------------------------------
[ Upstream commit eb592008f79be52ccef88cd9a5249b3fc0367278 ]
build_skb() returns NULL in case of a memory allocation failure so handle it inside __octep_oq_process_rx() to avoid NULL pointer dereference.
__octep_oq_process_rx() is called during NAPI polling by the driver. If skb allocation fails, keep on pulling packets out of the Rx DMA queue: we shouldn't break the polling immediately and thus falsely indicate to the octep_napi_poll() that the Rx pressure is going down. As there is no associated skb in this case, don't process the packets and don't push them up the network stack - they are skipped.
Helper function is implemented to unmmap/flush all the fragment buffers used by the dropped packet. 'alloc_failures' counter is incremented to mark the skb allocation error in driver statistics.
Found by Linux Verification Center (linuxtesting.org) with SVACE.
Fixes: 37d79d059606 ("octeon_ep: add Tx/Rx processing and interrupt support") Suggested-by: Paolo Abeni pabeni@redhat.com Signed-off-by: Aleksandr Mishin amishin@t-argos.ru Reviewed-by: Jacob Keller jacob.e.keller@intel.com Signed-off-by: Andrew Lunn andrew@lunn.ch Signed-off-by: Sasha Levin sashal@kernel.org Signed-off-by: Luo Gengkun luogengkun2@huawei.com --- .../net/ethernet/marvell/octeon_ep/octep_rx.c | 27 +++++++++++++++++++ 1 file changed, 27 insertions(+)
diff --git a/drivers/net/ethernet/marvell/octeon_ep/octep_rx.c b/drivers/net/ethernet/marvell/octeon_ep/octep_rx.c index cfbfad95b021..c7f4e3c058b7 100644 --- a/drivers/net/ethernet/marvell/octeon_ep/octep_rx.c +++ b/drivers/net/ethernet/marvell/octeon_ep/octep_rx.c @@ -360,6 +360,27 @@ static void octep_oq_next_pkt(struct octep_oq *oq, *read_idx = 0; }
+/** + * octep_oq_drop_rx() - Free the resources associated with a packet. + * + * @oq: Octeon Rx queue data structure. + * @buff_info: Current packet buffer info. + * @read_idx: Current packet index in the ring. + * @desc_used: Current packet descriptor number. + * + */ +static void octep_oq_drop_rx(struct octep_oq *oq, + struct octep_rx_buffer *buff_info, + u32 *read_idx, u32 *desc_used) +{ + int data_len = buff_info->len - oq->max_single_buffer_size; + + while (data_len > 0) { + octep_oq_next_pkt(oq, buff_info, read_idx, desc_used); + data_len -= oq->buffer_size; + }; +} + /** * __octep_oq_process_rx() - Process hardware Rx queue and push to stack. * @@ -415,6 +436,12 @@ static int __octep_oq_process_rx(struct octep_device *oct, octep_oq_next_pkt(oq, buff_info, &read_idx, &desc_used);
skb = build_skb((void *)resp_hw, PAGE_SIZE); + if (!skb) { + octep_oq_drop_rx(oq, buff_info, + &read_idx, &desc_used); + oq->stats.alloc_failures++; + continue; + } skb_reserve(skb, data_offset);
rx_bytes += buff_info->len;