Use netif_recyclable_napi_add() to register page pool to the NAPI instance, and avoid doing the DMA mapping/unmapping when the page is from page pool.
Signed-off-by: Yunsheng Lin linyunsheng@huawei.com --- drivers/net/ethernet/hisilicon/hns3/hns3_enet.c | 32 +++++++++++++++---------- 1 file changed, 19 insertions(+), 13 deletions(-)
diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3_enet.c b/drivers/net/ethernet/hisilicon/hns3/hns3_enet.c index fcbeb1f..ab86566 100644 --- a/drivers/net/ethernet/hisilicon/hns3/hns3_enet.c +++ b/drivers/net/ethernet/hisilicon/hns3/hns3_enet.c @@ -1689,12 +1689,18 @@ static int hns3_map_and_fill_desc(struct hns3_enet_ring *ring, void *priv, return 0; } else { skb_frag_t *frag = (skb_frag_t *)priv; + struct page *page = skb_frag_page(frag);
size = skb_frag_size(frag); if (!size) return 0;
- dma = skb_frag_dma_map(dev, frag, 0, size, DMA_TO_DEVICE); + if (skb_frag_is_pp(frag) && page->pp->p.dev == dev) { + dma = page_pool_get_dma_addr(page) + skb_frag_off(frag); + type = DESC_TYPE_PP_FRAG; + } else { + dma = skb_frag_dma_map(dev, frag, 0, size, DMA_TO_DEVICE); + } }
if (unlikely(dma_mapping_error(dev, dma))) { @@ -4525,7 +4531,7 @@ static int hns3_nic_init_vector_data(struct hns3_nic_priv *priv) ret = hns3_get_vector_ring_chain(tqp_vector, &vector_ring_chain); if (ret) - goto map_ring_fail; + return ret;
ret = h->ae_algo->ops->map_ring_to_vector(h, tqp_vector->vector_irq, &vector_ring_chain); @@ -4533,19 +4539,10 @@ static int hns3_nic_init_vector_data(struct hns3_nic_priv *priv) hns3_free_vector_ring_chain(tqp_vector, &vector_ring_chain);
if (ret) - goto map_ring_fail; - - netif_napi_add(priv->netdev, &tqp_vector->napi, - hns3_nic_common_poll, NAPI_POLL_WEIGHT); + return ret; }
return 0; - -map_ring_fail: - while (i--) - netif_napi_del(&priv->tqp_vector[i].napi); - - return ret; }
static void hns3_nic_init_coal_cfg(struct hns3_nic_priv *priv) @@ -4754,7 +4751,7 @@ static void hns3_alloc_page_pool(struct hns3_enet_ring *ring) (PAGE_SIZE << hns3_page_order(ring)), .nid = dev_to_node(ring_to_dev(ring)), .dev = ring_to_dev(ring), - .dma_dir = DMA_FROM_DEVICE, + .dma_dir = DMA_BIDIRECTIONAL, .offset = 0, .max_len = PAGE_SIZE << hns3_page_order(ring), }; @@ -4923,6 +4920,15 @@ int hns3_init_all_ring(struct hns3_nic_priv *priv) u64_stats_init(&priv->ring[i].syncp); }
+ for (i = 0; i < priv->vector_num; i++) { + struct hns3_enet_tqp_vector *tqp_vector; + + tqp_vector = &priv->tqp_vector[i]; + netif_recyclable_napi_add(priv->netdev, &tqp_vector->napi, + hns3_nic_common_poll, NAPI_POLL_WEIGHT, + tqp_vector->rx_group.ring->page_pool); + } + return 0;
out_when_alloc_ring_memory: