From: zhuyikai zhuyikai1@h-partners.com
driver inclusion category: feature bugzilla: https://gitee.com/openeuler/kernel/issues/IB6TTO?from=project-issue CVE: NA
--------------------------------
Support new RX WQE type.
Signed-off-by: zhuyikai zhuyikai1@h-partners.com --- drivers/net/ethernet/huawei/hinic3/hinic3_mt.h | 2 +- drivers/net/ethernet/huawei/hinic3/hinic3_nic_dev.h | 1 + drivers/net/ethernet/huawei/hinic3/hinic3_nic_io.c | 23 +++++- drivers/net/ethernet/huawei/hinic3/hinic3_nic_io.h | 3 +- drivers/net/ethernet/huawei/hinic3/hinic3_nic_qp.h | 13 +++- drivers/net/ethernet/huawei/hinic3/hinic3_rx.c | 162 ++++++++++++++++++++++++++-------------- drivers/net/ethernet/huawei/hinic3/hinic3_rx.h | 10 ++- 7 files changed, 150 insertions(+), 64 deletions(-)
diff --git a/drivers/net/ethernet/huawei/hinic3/hinic3_mt.h b/drivers/net/ethernet/huawei/hinic3/hinic3_mt.h index 507f569..b5bcd8a 100644 --- a/drivers/net/ethernet/huawei/hinic3/hinic3_mt.h +++ b/drivers/net/ethernet/huawei/hinic3/hinic3_mt.h @@ -4,7 +4,7 @@ #ifndef HINIC3_MT_H #define HINIC3_MT_H
-#define HINIC3_DRV_NAME "hisdk3" +#define HINIC3_DRV_NAME "hinic3" #define HINIC3_CHIP_NAME "hinic" /* Interrupt at most records, interrupt will be recorded in the FFM */
diff --git a/drivers/net/ethernet/huawei/hinic3/hinic3_nic_dev.h b/drivers/net/ethernet/huawei/hinic3/hinic3_nic_dev.h index 16bed02..e76a66b 100644 --- a/drivers/net/ethernet/huawei/hinic3/hinic3_nic_dev.h +++ b/drivers/net/ethernet/huawei/hinic3/hinic3_nic_dev.h @@ -247,6 +247,7 @@ struct hinic3_nic_dev { struct hinic3_txq *txqs; struct hinic3_rxq *rxqs; struct hinic3_dyna_txrxq_params q_params; + u8 cqe_mode; /* rx_cqe */
u16 num_qp_irq; struct irq_info *qps_irq_info; diff --git a/drivers/net/ethernet/huawei/hinic3/hinic3_nic_io.c b/drivers/net/ethernet/huawei/hinic3/hinic3_nic_io.c index df0ee68..a827f44 100644 --- a/drivers/net/ethernet/huawei/hinic3/hinic3_nic_io.c +++ b/drivers/net/ethernet/huawei/hinic3/hinic3_nic_io.c @@ -35,7 +35,7 @@ MODULE_PARM_DESC(tx_coalescing_time, "TX CI coalescing parameter coalescing_time
static unsigned char rq_wqe_type = HINIC3_NORMAL_RQ_WQE; module_param(rq_wqe_type, byte, 0444); -MODULE_PARM_DESC(rq_wqe_type, "RQ WQE type 0-8Bytes, 1-16Bytes, 2-32Bytes (default=2)"); +MODULE_PARM_DESC(rq_wqe_type, "RQ WQE type 0-8Bytes, 1-16Bytes, 2-32Bytes (default=1)");
/*lint +e806*/ static u32 tx_drop_thd_on = HINIC3_DEAULT_DROP_THD_ON; @@ -274,8 +274,15 @@ int hinic3_get_rq_wqe_type(void *hwdev) /* rq_wqe_type is the configuration when the driver is installed, * but it may not be the actual configuration. */ - if (rq_wqe_type != HINIC3_NORMAL_RQ_WQE && rq_wqe_type != HINIC3_EXTEND_RQ_WQE) - return HINIC3_NORMAL_RQ_WQE; + if (HINIC3_SUPPORT_RX_COMPACT_CQE(hwdev)) { + if (rq_wqe_type != HINIC3_COMPACT_RQ_WQE && rq_wqe_type != HINIC3_NORMAL_RQ_WQE && + rq_wqe_type != HINIC3_EXTEND_RQ_WQE) { + return HINIC3_NORMAL_RQ_WQE; + } + } else { + if (rq_wqe_type != HINIC3_NORMAL_RQ_WQE && rq_wqe_type != HINIC3_EXTEND_RQ_WQE) + return HINIC3_NORMAL_RQ_WQE; + } return rq_wqe_type; }
@@ -289,7 +296,7 @@ static int hinic3_create_rq(struct hinic3_nic_io *nic_io, struct hinic3_io_queue rq->msix_entry_idx = rq_msix_idx;
err = hinic3_wq_create(nic_io->hwdev, &rq->wq, rq_depth, - (u16)BIT(HINIC3_RQ_WQEBB_SHIFT + rq_wqe_type)); + (u16)BIT(HINIC3_RQ_WQEBB_SHIFT + rq->wqe_type)); if (err) { sdk_err(nic_io->dev_hdl, "Failed to create rx queue(%u) wq\n", q_id); @@ -774,6 +781,10 @@ void hinic3_rq_prepare_ctxt(struct hinic3_io_queue *rq, struct hinic3_rq_ctxt *r RQ_CTXT_WQ_PAGE_SET(2, WQE_TYPE); rq_ctxt->cqe_sge_len = RQ_CTXT_CQE_LEN_SET(1, CQE_LEN); break; + case HINIC3_COMPACT_RQ_WQE: + /* use 8Byte WQE */ + rq_ctxt->wq_pfn_hi_type_owner |= RQ_CTXT_WQ_PAGE_SET(3, WQE_TYPE); + break; default: pr_err("Invalid rq wqe type: %u", wqe_type); } @@ -985,6 +996,10 @@ static int init_rq_ci_ctxts(struct hinic3_nic_io *nic_io) rq_attr.intr_idx = nic_io->rq[q_id].msix_entry_idx; rq_attr.l2nic_rqn = q_id; rq_attr.cqe_type = 0; + if (hinic3_get_rq_wqe_type(nic_io->hwdev) == HINIC3_COMPACT_RQ_WQE) { + rq_attr.cqe_type = 1; + rq_attr.ci_dma_base = HINIC3_CI_PADDR(nic_io->rq_ci_dma_base, q_id); + }
err = hinic3_set_rq_ci_ctx(nic_io, &rq_attr); if (err != 0) { diff --git a/drivers/net/ethernet/huawei/hinic3/hinic3_nic_io.h b/drivers/net/ethernet/huawei/hinic3/hinic3_nic_io.h index 5697057..37f923d 100644 --- a/drivers/net/ethernet/huawei/hinic3/hinic3_nic_io.h +++ b/drivers/net/ethernet/huawei/hinic3/hinic3_nic_io.h @@ -49,7 +49,7 @@ struct hinic3_nic_db { struct hinic3_tx_rx_ops { void (*tx_set_wqebb_cnt)(void *wqe_combo, u32 offload, u16 num_sge); void (*tx_set_wqe_task)(void *wqe_combo, void *offload_info); - void (*rx_get_cqe_info)(void *rx_cqe, void *cqe_info); + void (*rx_get_cqe_info)(void *rx_cqe, void *cqe_info, u8 cqe_mode); bool (*rx_cqe_done)(void *rxq, void **rx_cqe); };
@@ -336,4 +336,5 @@ int hinic3_init_qps(void *hwdev, struct hinic3_dyna_qp_params *qp_params); void hinic3_deinit_qps(void *hwdev, struct hinic3_dyna_qp_params *qp_params); int hinic3_init_nicio_res(void *hwdev); void hinic3_deinit_nicio_res(void *hwdev); +int hinic3_get_rq_wqe_type(void *hwdev); #endif diff --git a/drivers/net/ethernet/huawei/hinic3/hinic3_nic_qp.h b/drivers/net/ethernet/huawei/hinic3/hinic3_nic_qp.h index 2ada9c8..67bb86d 100644 --- a/drivers/net/ethernet/huawei/hinic3/hinic3_nic_qp.h +++ b/drivers/net/ethernet/huawei/hinic3/hinic3_nic_qp.h @@ -194,6 +194,9 @@ struct hinic3_rq_cqe { };
struct hinic3_cqe_info { + u8 pkt_offset; + u8 rsvd[3]; + u8 lro_num; u8 vlan_offload; u8 pkt_fmt; @@ -230,8 +233,14 @@ struct hinic3_rq_normal_wqe { u32 cqe_lo_addr; };
+struct hinic3_rq_compact_wqe { + u32 buf_hi_addr; + u32 buf_lo_addr; +}; + struct hinic3_rq_wqe { union { + struct hinic3_rq_compact_wqe compact_wqe; struct hinic3_rq_normal_wqe normal_wqe; struct hinic3_rq_extend_wqe extend_wqe; }; @@ -286,8 +295,8 @@ struct hinic3_sq_wqe_combo { struct hinic3_sq_bufdesc *bds_sec2;
u16 first_bds_num; - u32 wqe_type; - u32 task_type; + u8 wqe_type; + u8 task_type;
u16 wqebb_cnt; u8 rsvd[2]; diff --git a/drivers/net/ethernet/huawei/hinic3/hinic3_rx.c b/drivers/net/ethernet/huawei/hinic3/hinic3_rx.c index 41c3d9e..f8a866d 100644 --- a/drivers/net/ethernet/huawei/hinic3/hinic3_rx.c +++ b/drivers/net/ethernet/huawei/hinic3/hinic3_rx.c @@ -113,7 +113,7 @@ static u32 hinic3_rx_fill_wqe(struct hinic3_rxq *rxq) /* use fixed len */ rq_wqe->extend_wqe.buf_desc.sge.len = nic_dev->rx_buff_len; - } else { + } else if (rxq->rq->wqe_type == HINIC3_NORMAL_RQ_WQE) { rq_wqe->normal_wqe.cqe_hi_addr = upper_32_bits(rx_info->cqe_dma); rq_wqe->normal_wqe.cqe_lo_addr = @@ -153,11 +153,16 @@ static u32 hinic3_rx_fill_buffers(struct hinic3_rxq *rxq) hinic3_hw_be32(upper_32_bits(dma_addr)); rq_wqe->extend_wqe.buf_desc.sge.lo_addr = hinic3_hw_be32(lower_32_bits(dma_addr)); - } else { + } else if (rxq->rq->wqe_type == HINIC3_NORMAL_RQ_WQE) { rq_wqe->normal_wqe.buf_hi_addr = hinic3_hw_be32(upper_32_bits(dma_addr)); rq_wqe->normal_wqe.buf_lo_addr = hinic3_hw_be32(lower_32_bits(dma_addr)); + } else { + rq_wqe->compact_wqe.buf_hi_addr = + hinic3_hw_be32(upper_32_bits(dma_addr)); + rq_wqe->compact_wqe.buf_lo_addr = + hinic3_hw_be32(lower_32_bits(dma_addr)); } rxq->next_to_update = (u16)((rxq->next_to_update + 1) & rxq->q_mask); } @@ -241,7 +246,7 @@ static void hinic3_reuse_rx_page(struct hinic3_rxq *rxq,
static bool hinic3_add_rx_frag(struct hinic3_rxq *rxq, struct hinic3_rx_info *rx_info, - struct sk_buff *skb, u32 size) + struct sk_buff *skb, u32 size, u8 offset) { struct page *page; u8 *va; @@ -260,7 +265,7 @@ static bool hinic3_add_rx_frag(struct hinic3_rxq *rxq, DMA_FROM_DEVICE);
if (size <= HINIC3_RX_HDR_SIZE && !skb_is_nonlinear(skb)) { - memcpy(__skb_put(skb, size), va, + memcpy(__skb_put(skb, size), va + offset, ALIGN(size, sizeof(long))); /*lint !e666*/
/* page is not reserved, we can reuse buffer as-is */ @@ -273,7 +278,7 @@ static bool hinic3_add_rx_frag(struct hinic3_rxq *rxq, }
skb_add_rx_frag(skb, skb_shinfo(skb)->nr_frags, page, - (int)rx_info->page_offset, (int)size, rxq->buf_len); + (int)(rx_info->page_offset + offset), (int)size, rxq->buf_len);
/* avoid re-using remote pages */ if (unlikely(page_to_nid(page) != numa_node_id())) @@ -291,26 +296,30 @@ static bool hinic3_add_rx_frag(struct hinic3_rxq *rxq, }
static void packaging_skb(struct hinic3_rxq *rxq, struct sk_buff *head_skb, - u8 sge_num, u32 pkt_len) + u8 sge_num, u32 pkt_len, u8 pkt_offset) { struct hinic3_rx_info *rx_info = NULL; struct sk_buff *skb = NULL; u8 frag_num = 0; - u32 size; + u32 frag_size; u32 sw_ci; - u32 temp_pkt_len = pkt_len; - u8 temp_sge_num = sge_num; + u8 tmp_sge_num; + u32 tmp_pkt_len; + u8 tmp_pkt_offset;
sw_ci = rxq->cons_idx & rxq->q_mask; skb = head_skb; - while (temp_sge_num) { + tmp_sge_num = sge_num; + tmp_pkt_len = pkt_len; + tmp_pkt_offset = pkt_offset; + while (tmp_sge_num) { rx_info = &rxq->rx_info[sw_ci]; sw_ci = (sw_ci + 1) & rxq->q_mask; - if (unlikely(temp_pkt_len > rxq->buf_len)) { - size = rxq->buf_len; - temp_pkt_len -= rxq->buf_len; + if (unlikely(tmp_pkt_len + tmp_pkt_offset > rxq->buf_len)) { + frag_size = rxq->buf_len - tmp_pkt_offset; + tmp_pkt_len -= frag_size; } else { - size = temp_pkt_len; + frag_size = tmp_pkt_len; }
if (unlikely(frag_num == MAX_SKB_FRAGS)) { @@ -322,12 +331,12 @@ static void packaging_skb(struct hinic3_rxq *rxq, struct sk_buff *head_skb, }
if (unlikely(skb != head_skb)) { - head_skb->len += size; - head_skb->data_len += size; + head_skb->len += frag_size; + head_skb->data_len += frag_size; head_skb->truesize += rxq->buf_len; }
- if (likely(hinic3_add_rx_frag(rxq, rx_info, skb, size))) { + if (likely(hinic3_add_rx_frag(rxq, rx_info, skb, frag_size, tmp_pkt_offset))) { hinic3_reuse_rx_page(rxq, rx_info); } else { /* we are not reusing the buffer so unmap it */ @@ -337,7 +346,8 @@ static void packaging_skb(struct hinic3_rxq *rxq, struct sk_buff *head_skb, /* clear contents of buffer_info */ rx_info->buf_dma_addr = 0; rx_info->page = NULL; - temp_sge_num--; + tmp_sge_num--; + tmp_pkt_offset = 0; /* only first sge use offset */ frag_num++; } } @@ -354,6 +364,7 @@ static struct sk_buff *hinic3_fetch_rx_buffer(struct hinic3_rxq *rxq, struct sk_buff *skb = NULL; struct net_device *netdev = rxq->netdev; u32 pkt_len = cqe_info->pkt_len; + u8 pkt_offset = cqe_info->pkt_offset; u8 sge_num, skb_num; u16 wqebb_cnt = 0;
@@ -361,7 +372,7 @@ static struct sk_buff *hinic3_fetch_rx_buffer(struct hinic3_rxq *rxq, if (unlikely(!head_skb)) return NULL;
- sge_num = HINIC3_GET_SGE_NUM(pkt_len, rxq); + sge_num = HINIC3_GET_SGE_NUM(pkt_len + pkt_offset, rxq); if (likely(sge_num <= MAX_SKB_FRAGS)) skb_num = 1; else @@ -387,7 +398,7 @@ static struct sk_buff *hinic3_fetch_rx_buffer(struct hinic3_rxq *rxq, prefetchw(head_skb->data); wqebb_cnt = sge_num;
- packaging_skb(rxq, head_skb, sge_num, pkt_len); + packaging_skb(rxq, head_skb, sge_num, pkt_len, pkt_offset);
rxq->cons_idx += wqebb_cnt; rxq->delta += wqebb_cnt; @@ -857,7 +868,7 @@ static int recv_one_pkt(struct hinic3_rxq *rxq, struct hinic3_cqe_info *cqe_info (HINIC3_GET_RX_IP_TYPE(hinic3_hw_cpu32((cqe)->offload_type)) == \ HINIC3_RX_IPV6_PKT ? LRO_PKT_HDR_LEN_IPV6 : LRO_PKT_HDR_LEN_IPV4)
-void hinic3_rx_get_cqe_info(void *rx_cqe, void *cqe_info) +void hinic3_rx_get_cqe_info(void *rx_cqe, void *cqe_info, u8 cqe_mode) { struct hinic3_rq_cqe *cqe = (struct hinic3_rq_cqe *)rx_cqe; struct hinic3_cqe_info *info = (struct hinic3_cqe_info *)cqe_info; @@ -880,15 +891,24 @@ void hinic3_rx_get_cqe_info(void *rx_cqe, void *cqe_info) info->rss_hash_value = dw3; }
-void hinic3_rx_get_compact_cqe_info(void *rx_cqe, void *cqe_info) +void hinic3_rx_get_compact_cqe_info(void *rx_cqe, void *cqe_info, u8 cqe_mode) { struct hinic3_rq_cqe *cqe = (struct hinic3_rq_cqe *)rx_cqe; struct hinic3_cqe_info *info = (struct hinic3_cqe_info *)cqe_info; u32 dw0, dw1, dw2;
- dw0 = hinic3_hw_cpu32(cqe->status); - dw1 = hinic3_hw_cpu32(cqe->vlan_len); - dw2 = hinic3_hw_cpu32(cqe->offload_type); + if (cqe_mode != HINIC3_RQ_CQE_INTEGRATE) { + dw0 = hinic3_hw_cpu32(cqe->status); + dw1 = hinic3_hw_cpu32(cqe->vlan_len); + dw2 = hinic3_hw_cpu32(cqe->offload_type); + } else { + /* When rx wqe is compact, cqe is integrated with packet by big endian, + * explicit endian conversion is needed. + */ + dw0 = be32_to_cpu(cqe->status); + dw1 = be32_to_cpu(cqe->vlan_len); + dw2 = be32_to_cpu(cqe->offload_type); + }
info->cqe_type = RQ_COMPACT_CQE_STATUS_GET(dw0, CQE_TYPE); info->csum_err = RQ_COMPACT_CQE_STATUS_GET(dw0, CSUM_ERR); @@ -916,9 +936,28 @@ void hinic3_rx_get_compact_cqe_info(void *rx_cqe, void *cqe_info) info->lro_num = RQ_COMPACT_CQE_OFFLOAD_GET(dw2, NUM_LRO); info->vlan_tag = RQ_COMPACT_CQE_OFFLOAD_GET(dw2, VLAN); } + if (info->cqe_type == HINIC3_RQ_CQE_INTEGRATE) { + info->pkt_offset = info->cqe_len == RQ_COMPACT_CQE_16BYTE ? + HINIC3_COMPACT_CQE_16B : HINIC3_COMPACT_CQE_8B; + } +} + +static bool rx_integrated_cqe_done(void *rx_queue, void **rx_cqe) +{ + u16 sw_ci; + u16 hw_ci; + struct hinic3_rxq *rxq = rx_queue; + + sw_ci = (u16)(rxq->cons_idx & rxq->q_mask); + hw_ci = hinic3_get_rq_hw_ci(rxq->rq); + if (hw_ci == sw_ci) + return false; + + *rx_cqe = (u8 *)page_address(rxq->rx_info[sw_ci].page) + rxq->rx_info[sw_ci].page_offset; + return true; }
-static bool hinic3_rx_cqe_done(void *rx_queue, void **rx_cqe) +static bool rx_separate_cqe_done(void *rx_queue, void **rx_cqe) { u32 sw_ci, status = 0; struct hinic3_rxq *rxq = rx_queue; @@ -952,7 +991,7 @@ int hinic3_rx_poll(struct hinic3_rxq *rxq, int budget) /* make sure we read rx_done before packet length */ rmb();
- nic_dev->tx_rx_ops.rx_get_cqe_info(rx_cqe, &cqe_info); + nic_dev->tx_rx_ops.rx_get_cqe_info(rx_cqe, &cqe_info, nic_dev->cqe_mode); if (recv_one_pkt(rxq, &cqe_info)) break;
@@ -991,7 +1030,13 @@ int hinic3_alloc_rxqs_res(struct hinic3_nic_dev *nic_dev, u16 num_rq, u32 pkts; u64 size;
- nic_dev->tx_rx_ops.rx_cqe_done = hinic3_rx_cqe_done; + if (hinic3_get_rq_wqe_type(nic_dev->hwdev) == HINIC3_COMPACT_RQ_WQE) { + nic_dev->cqe_mode = HINIC3_RQ_CQE_INTEGRATE; + nic_dev->tx_rx_ops.rx_cqe_done = rx_integrated_cqe_done; + } else { + nic_dev->cqe_mode = HINIC3_RQ_CQE_SEPARATE; + nic_dev->tx_rx_ops.rx_cqe_done = rx_separate_cqe_done; + }
for (idx = 0; idx < num_rq; idx++) { rqres = &rxqs_res[idx]; @@ -1003,23 +1048,26 @@ int hinic3_alloc_rxqs_res(struct hinic3_nic_dev *nic_dev, u16 num_rq, goto err_out; }
- rqres->cqe_start_vaddr = - dma_zalloc_coherent(&nic_dev->pdev->dev, cqe_mem_size, - &rqres->cqe_start_paddr, - GFP_KERNEL); - if (!rqres->cqe_start_vaddr) { - kfree(rqres->rx_info); - nicif_err(nic_dev, drv, nic_dev->netdev, - "Failed to alloc rxq%d cqe\n", idx); - goto err_out; + if (nic_dev->cqe_mode == HINIC3_RQ_CQE_SEPARATE) { + rqres->cqe_start_vaddr = + dma_zalloc_coherent(&nic_dev->pdev->dev, cqe_mem_size, + &rqres->cqe_start_paddr, + GFP_KERNEL); + if (!rqres->cqe_start_vaddr) { + kfree(rqres->rx_info); + nicif_err(nic_dev, drv, nic_dev->netdev, + "Failed to alloc rxq%d cqe\n", idx); + goto err_out; + } } - pkts = hinic3_rx_alloc_buffers(nic_dev, rq_depth, rqres->rx_info); if (!pkts) { - dma_free_coherent(&nic_dev->pdev->dev, cqe_mem_size, - rqres->cqe_start_vaddr, - rqres->cqe_start_paddr); + if (nic_dev->cqe_mode == HINIC3_RQ_CQE_SEPARATE) { + dma_free_coherent(&nic_dev->pdev->dev, cqe_mem_size, + rqres->cqe_start_vaddr, + rqres->cqe_start_paddr); + } kfree(rqres->rx_info); nicif_err(nic_dev, drv, nic_dev->netdev, "Failed to alloc rxq%d rx buffers\n", idx); @@ -1034,9 +1082,11 @@ err_out: rqres = &rxqs_res[i];
hinic3_rx_free_buffers(nic_dev, rq_depth, rqres->rx_info); - dma_free_coherent(&nic_dev->pdev->dev, cqe_mem_size, - rqres->cqe_start_vaddr, - rqres->cqe_start_paddr); + if (nic_dev->cqe_mode == HINIC3_RQ_CQE_SEPARATE) { + dma_free_coherent(&nic_dev->pdev->dev, cqe_mem_size, + rqres->cqe_start_vaddr, + rqres->cqe_start_paddr); + } kfree(rqres->rx_info); }
@@ -1054,9 +1104,11 @@ void hinic3_free_rxqs_res(struct hinic3_nic_dev *nic_dev, u16 num_rq, rqres = &rxqs_res[idx];
hinic3_rx_free_buffers(nic_dev, rq_depth, rqres->rx_info); - dma_free_coherent(&nic_dev->pdev->dev, cqe_mem_size, - rqres->cqe_start_vaddr, - rqres->cqe_start_paddr); + if (nic_dev->cqe_mode == HINIC3_RQ_CQE_SEPARATE) { + dma_free_coherent(&nic_dev->pdev->dev, cqe_mem_size, + rqres->cqe_start_vaddr, + rqres->cqe_start_paddr); + } kfree(rqres->rx_info); } } @@ -1099,13 +1151,15 @@ int hinic3_configure_rxqs(struct hinic3_nic_dev *nic_dev, u16 num_rq, rxq->rx_info = rqres->rx_info;
/* fill cqe */ - cqe_va = (struct hinic3_rq_cqe *)rqres->cqe_start_vaddr; - cqe_pa = rqres->cqe_start_paddr; - for (idx = 0; idx < rq_depth; idx++) { - rxq->rx_info[idx].cqe = cqe_va; - rxq->rx_info[idx].cqe_dma = cqe_pa; - cqe_va++; - cqe_pa += sizeof(*rxq->rx_info->cqe); + if (nic_dev->cqe_mode == HINIC3_RQ_CQE_SEPARATE) { + cqe_va = (struct hinic3_rq_cqe *)rqres->cqe_start_vaddr; + cqe_pa = rqres->cqe_start_paddr; + for (idx = 0; idx < rq_depth; idx++) { + rxq->rx_info[idx].cqe = cqe_va; + rxq->rx_info[idx].cqe_dma = cqe_pa; + cqe_va++; + cqe_pa += sizeof(*rxq->rx_info->cqe); + } }
rxq->rq = hinic3_get_nic_queue(nic_dev->hwdev, rxq->q_id, diff --git a/drivers/net/ethernet/huawei/hinic3/hinic3_rx.h b/drivers/net/ethernet/huawei/hinic3/hinic3_rx.h index 9064177..586a221 100644 --- a/drivers/net/ethernet/huawei/hinic3/hinic3_rx.h +++ b/drivers/net/ethernet/huawei/hinic3/hinic3_rx.h @@ -28,6 +28,12 @@
#define HINIC3_HEADER_DATA_UNIT 2
+#define HINIC3_COMPACT_CQE_8B 8 +#define HINIC3_COMPACT_CQE_16B 16 + +#define HINIC3_RQ_CQE_SEPARATE 0 +#define HINIC3_RQ_CQE_INTEGRATE 1 + struct hinic3_rxq_stats { u64 packets; u64 bytes; @@ -150,9 +156,9 @@ void hinic3_rxq_get_stats(struct hinic3_rxq *rxq,
void hinic3_rxq_clean_stats(struct hinic3_rxq_stats *rxq_stats);
-void hinic3_rx_get_cqe_info(void *rx_cqe, void *cqe_info); +void hinic3_rx_get_cqe_info(void *rx_cqe, void *cqe_info, u8 cqe_mode);
-void hinic3_rx_get_compact_cqe_info(void *rx_cqe, void *cqe_info); +void hinic3_rx_get_compact_cqe_info(void *rx_cqe, void *cqe_info, u8 cqe_mode);
void hinic3_rxq_check_work_handler(struct work_struct *work);