From: Huisong Li lihuisong@huawei.com
pkt_len_errors and l2_errors in Rx datapath indicate that driver needs to discard received packets. And driver does not discard packets for l3/l4/ol3/ol4_csum_errors in Rx datapath and others stats in Tx datapatch. Therefore, it is necessary for improving code readability and maintainability to encapsulate error stats and dfx stats.
Signed-off-by: Huisong Li lihuisong@huawei.com Signed-off-by: Lijun Ou oulijun@huawei.com --- drivers/net/hns3/hns3_rxtx.c | 30 ++--- drivers/net/hns3/hns3_rxtx.h | 134 ++++++++++--------- drivers/net/hns3/hns3_rxtx_vec_neon.h | 2 +- drivers/net/hns3/hns3_stats.c | 243 ++++++++++++++++++++++------------ drivers/net/hns3/hns3_stats.h | 9 +- 5 files changed, 251 insertions(+), 167 deletions(-)
diff --git a/drivers/net/hns3/hns3_rxtx.c b/drivers/net/hns3/hns3_rxtx.c index 0badfc9..3d5f74f 100644 --- a/drivers/net/hns3/hns3_rxtx.c +++ b/drivers/net/hns3/hns3_rxtx.c @@ -1792,12 +1792,8 @@ hns3_rx_queue_setup(struct rte_eth_dev *dev, uint16_t idx, uint16_t nb_desc, rxq->io_head_reg = (volatile void *)((char *)rxq->io_base + HNS3_RING_RX_HEAD_REG); rxq->rx_buf_len = rx_buf_size; - rxq->l2_errors = 0; - rxq->pkt_len_errors = 0; - rxq->l3_csum_errors = 0; - rxq->l4_csum_errors = 0; - rxq->ol3_csum_errors = 0; - rxq->ol4_csum_errors = 0; + memset(&rxq->err_stats, 0, sizeof(struct hns3_rx_bd_errors_stats)); + memset(&rxq->dfx_stats, 0, sizeof(struct hns3_rx_dfx_stats));
/* CRC len set here is used for amending packet length */ if (dev->data->dev_conf.rxmode.offloads & DEV_RX_OFFLOAD_KEEP_CRC) @@ -2622,12 +2618,8 @@ hns3_tx_queue_setup(struct rte_eth_dev *dev, uint16_t idx, uint16_t nb_desc, HNS3_RING_TX_TAIL_REG); txq->min_tx_pkt_len = hw->min_tx_pkt_len; txq->tso_mode = hw->tso_mode; - txq->over_length_pkt_cnt = 0; - txq->exceed_limit_bd_pkt_cnt = 0; - txq->exceed_limit_bd_reassem_fail = 0; - txq->unsupported_tunnel_pkt_cnt = 0; - txq->queue_full_cnt = 0; - txq->pkt_padding_fail_cnt = 0; + memset(&txq->dfx_stats, 0, sizeof(struct hns3_tx_dfx_stats)); + rte_spinlock_lock(&hw->lock); dev->data->tx_queues[idx] = txq; rte_spinlock_unlock(&hw->lock); @@ -3350,7 +3342,7 @@ hns3_parse_cksum(struct hns3_tx_queue *txq, uint16_t tx_desc_id, if (m->ol_flags & HNS3_TX_CKSUM_OFFLOAD_MASK) { /* Fill in tunneling parameters if necessary */ if (hns3_parse_tunneling_params(txq, m, tx_desc_id)) { - txq->unsupported_tunnel_pkt_cnt++; + txq->dfx_stats.unsupported_tunnel_pkt_cnt++; return -EINVAL; }
@@ -3380,17 +3372,17 @@ hns3_check_non_tso_pkt(uint16_t nb_buf, struct rte_mbuf **m_seg, * driver support, the packet will be ignored. */ if (unlikely(rte_pktmbuf_pkt_len(tx_pkt) > HNS3_MAX_FRAME_LEN)) { - txq->over_length_pkt_cnt++; + txq->dfx_stats.over_length_pkt_cnt++; return -EINVAL; }
max_non_tso_bd_num = txq->max_non_tso_bd_num; if (unlikely(nb_buf > max_non_tso_bd_num)) { - txq->exceed_limit_bd_pkt_cnt++; + txq->dfx_stats.exceed_limit_bd_pkt_cnt++; ret = hns3_reassemble_tx_pkts(tx_pkt, &new_pkt, max_non_tso_bd_num); if (ret) { - txq->exceed_limit_bd_reassem_fail++; + txq->dfx_stats.exceed_limit_bd_reassem_fail++; return ret; } *m_seg = new_pkt; @@ -3528,7 +3520,7 @@ hns3_xmit_pkts_simple(void *tx_queue, nb_pkts = RTE_MIN(txq->tx_bd_ready, nb_pkts); if (unlikely(nb_pkts == 0)) { if (txq->tx_bd_ready == 0) - txq->queue_full_cnt++; + txq->dfx_stats.queue_full_cnt++; return 0; }
@@ -3580,7 +3572,7 @@ hns3_xmit_pkts(void *tx_queue, struct rte_mbuf **tx_pkts, uint16_t nb_pkts) nb_buf = tx_pkt->nb_segs;
if (nb_buf > txq->tx_bd_ready) { - txq->queue_full_cnt++; + txq->dfx_stats.queue_full_cnt++; if (nb_tx == 0) return 0;
@@ -3601,7 +3593,7 @@ hns3_xmit_pkts(void *tx_queue, struct rte_mbuf **tx_pkts, uint16_t nb_pkts) rte_pktmbuf_pkt_len(tx_pkt); appended = rte_pktmbuf_append(tx_pkt, add_len); if (appended == NULL) { - txq->pkt_padding_fail_cnt++; + txq->dfx_stats.pkt_padding_fail_cnt++; break; }
diff --git a/drivers/net/hns3/hns3_rxtx.h b/drivers/net/hns3/hns3_rxtx.h index 6538848..8a0c981 100644 --- a/drivers/net/hns3/hns3_rxtx.h +++ b/drivers/net/hns3/hns3_rxtx.h @@ -266,6 +266,18 @@ struct hns3_entry { struct rte_mbuf *mbuf; };
+struct hns3_rx_dfx_stats { + uint64_t l3_csum_errors; + uint64_t l4_csum_errors; + uint64_t ol3_csum_errors; + uint64_t ol4_csum_errors; +}; + +struct hns3_rx_bd_errors_stats { + uint64_t l2_errors; + uint64_t pkt_len_errors; +}; + struct hns3_rx_queue { void *io_base; volatile void *io_head_reg; @@ -312,12 +324,10 @@ struct hns3_rx_queue { bool pvid_sw_discard_en; bool enabled; /* indicate if Rx queue has been enabled */
- uint64_t l2_errors; - uint64_t pkt_len_errors; - uint64_t l3_csum_errors; - uint64_t l4_csum_errors; - uint64_t ol3_csum_errors; - uint64_t ol4_csum_errors; + /* DFX statistics that driver does not need to discard packets */ + struct hns3_rx_dfx_stats dfx_stats; + /* Error statistics that driver needs to discard packets */ + struct hns3_rx_bd_errors_stats err_stats;
struct rte_mbuf *bulk_mbuf[HNS3_BULK_ALLOC_MBUF_NUM]; uint16_t bulk_mbuf_num; @@ -328,6 +338,57 @@ struct hns3_rx_queue { struct rte_mbuf fake_mbuf; /* fake mbuf used with vector rx */ };
+/* + * The following items are used for the abnormal errors statistics in + * the Tx datapath. When upper level application calls the + * rte_eth_tx_burst API function to send multiple packets at a time with + * burst mode based on hns3 network engine, there are some abnormal + * conditions that cause the driver to fail to operate the hardware to + * send packets correctly. + * Note: When using burst mode to call the rte_eth_tx_burst API function + * to send multiple packets at a time. When the first abnormal error is + * detected, add one to the relevant error statistics item, and then + * exit the loop of sending multiple packets of the function. That is to + * say, even if there are multiple packets in which abnormal errors may + * be detected in the burst, the relevant error statistics in the driver + * will only be increased by one. + * The detail description of the Tx abnormal errors statistic items as + * below: + * - over_length_pkt_cnt + * Total number of greater than HNS3_MAX_FRAME_LEN the driver + * supported. + * + * - exceed_limit_bd_pkt_cnt + * Total number of exceeding the hardware limited bd which process + * a packet needed bd numbers. + * + * - exceed_limit_bd_reassem_fail + * Total number of exceeding the hardware limited bd fail which + * process a packet needed bd numbers and reassemble fail. + * + * - unsupported_tunnel_pkt_cnt + * Total number of unsupported tunnel packet. The unsupported tunnel + * type: vxlan_gpe, gtp, ipip and MPLSINUDP, MPLSINUDP is a packet + * with MPLS-in-UDP RFC 7510 header. + * + * - queue_full_cnt + * Total count which the available bd numbers in current bd queue is + * less than the bd numbers with the pkt process needed. + * + * - pkt_padding_fail_cnt + * Total count which the packet length is less than minimum packet + * length(struct hns3_tx_queue::min_tx_pkt_len) supported by + * hardware in Tx direction and fail to be appended with 0. + */ +struct hns3_tx_dfx_stats { + uint64_t over_length_pkt_cnt; + uint64_t exceed_limit_bd_pkt_cnt; + uint64_t exceed_limit_bd_reassem_fail; + uint64_t unsupported_tunnel_pkt_cnt; + uint64_t queue_full_cnt; + uint64_t pkt_padding_fail_cnt; +}; + struct hns3_tx_queue { void *io_base; volatile void *io_tail_reg; @@ -411,54 +472,7 @@ struct hns3_tx_queue { bool pvid_sw_shift_en; bool enabled; /* indicate if Tx queue has been enabled */
- /* - * The following items are used for the abnormal errors statistics in - * the Tx datapath. When upper level application calls the - * rte_eth_tx_burst API function to send multiple packets at a time with - * burst mode based on hns3 network engine, there are some abnormal - * conditions that cause the driver to fail to operate the hardware to - * send packets correctly. - * Note: When using burst mode to call the rte_eth_tx_burst API function - * to send multiple packets at a time. When the first abnormal error is - * detected, add one to the relevant error statistics item, and then - * exit the loop of sending multiple packets of the function. That is to - * say, even if there are multiple packets in which abnormal errors may - * be detected in the burst, the relevant error statistics in the driver - * will only be increased by one. - * The detail description of the Tx abnormal errors statistic items as - * below: - * - over_length_pkt_cnt - * Total number of greater than HNS3_MAX_FRAME_LEN the driver - * supported. - * - * - exceed_limit_bd_pkt_cnt - * Total number of exceeding the hardware limited bd which process - * a packet needed bd numbers. - * - * - exceed_limit_bd_reassem_fail - * Total number of exceeding the hardware limited bd fail which - * process a packet needed bd numbers and reassemble fail. - * - * - unsupported_tunnel_pkt_cnt - * Total number of unsupported tunnel packet. The unsupported tunnel - * type: vxlan_gpe, gtp, ipip and MPLSINUDP, MPLSINUDP is a packet - * with MPLS-in-UDP RFC 7510 header. - * - * - queue_full_cnt - * Total count which the available bd numbers in current bd queue is - * less than the bd numbers with the pkt process needed. - * - * - pkt_padding_fail_cnt - * Total count which the packet length is less than minimum packet - * length(struct hns3_tx_queue::min_tx_pkt_len) supported by - * hardware in Tx direction and fail to be appended with 0. - */ - uint64_t over_length_pkt_cnt; - uint64_t exceed_limit_bd_pkt_cnt; - uint64_t exceed_limit_bd_reassem_fail; - uint64_t unsupported_tunnel_pkt_cnt; - uint64_t queue_full_cnt; - uint64_t pkt_padding_fail_cnt; + struct hns3_tx_dfx_stats dfx_stats; };
#define HNS3_GET_TX_QUEUE_PEND_BD_NUM(txq) \ @@ -511,9 +525,9 @@ hns3_handle_bdinfo(struct hns3_rx_queue *rxq, struct rte_mbuf *rxm,
if (unlikely((l234_info & L2E_TRUNC_ERR_FLAG) || rxm->pkt_len == 0)) { if (l234_info & BIT(HNS3_RXD_L2E_B)) - rxq->l2_errors++; + rxq->err_stats.l2_errors++; else - rxq->pkt_len_errors++; + rxq->err_stats.pkt_len_errors++; return -EINVAL; }
@@ -525,24 +539,24 @@ hns3_handle_bdinfo(struct hns3_rx_queue *rxq, struct rte_mbuf *rxm,
if (unlikely(l234_info & BIT(HNS3_RXD_L3E_B))) { rxm->ol_flags |= PKT_RX_IP_CKSUM_BAD; - rxq->l3_csum_errors++; + rxq->dfx_stats.l3_csum_errors++; tmp |= HNS3_L3_CKSUM_ERR; }
if (unlikely(l234_info & BIT(HNS3_RXD_L4E_B))) { rxm->ol_flags |= PKT_RX_L4_CKSUM_BAD; - rxq->l4_csum_errors++; + rxq->dfx_stats.l4_csum_errors++; tmp |= HNS3_L4_CKSUM_ERR; }
if (unlikely(l234_info & BIT(HNS3_RXD_OL3E_B))) { - rxq->ol3_csum_errors++; + rxq->dfx_stats.ol3_csum_errors++; tmp |= HNS3_OUTER_L3_CKSUM_ERR; }
if (unlikely(l234_info & BIT(HNS3_RXD_OL4E_B))) { rxm->ol_flags |= PKT_RX_OUTER_L4_CKSUM_BAD; - rxq->ol4_csum_errors++; + rxq->dfx_stats.ol4_csum_errors++; tmp |= HNS3_OUTER_L4_CKSUM_ERR; } } diff --git a/drivers/net/hns3/hns3_rxtx_vec_neon.h b/drivers/net/hns3/hns3_rxtx_vec_neon.h index 54addbf..a693b4b 100644 --- a/drivers/net/hns3/hns3_rxtx_vec_neon.h +++ b/drivers/net/hns3/hns3_rxtx_vec_neon.h @@ -42,7 +42,7 @@ hns3_xmit_fixed_burst_vec(void *__restrict tx_queue,
nb_commit = RTE_MIN(txq->tx_bd_ready, nb_pkts); if (unlikely(nb_commit == 0)) { - txq->queue_full_cnt++; + txq->dfx_stats.queue_full_cnt++; return 0; } nb_tx = nb_commit; diff --git a/drivers/net/hns3/hns3_stats.c b/drivers/net/hns3/hns3_stats.c index 62a712b..419d7e2 100644 --- a/drivers/net/hns3/hns3_stats.c +++ b/drivers/net/hns3/hns3_stats.c @@ -262,34 +262,38 @@ static const struct hns3_xstats_name_offset hns3_reset_stats_strings[] = {
/* The statistic of errors in Rx BD */ static const struct hns3_xstats_name_offset hns3_rx_bd_error_strings[] = { - {"RX_PKT_LEN_ERRORS", + {"PKT_LEN_ERRORS", HNS3_RX_BD_ERROR_STATS_FIELD_OFFSET(pkt_len_errors)}, - {"L2_RX_ERRORS", - HNS3_RX_BD_ERROR_STATS_FIELD_OFFSET(l2_errors)}, - {"RX_L3_CHECKSUM_ERRORS", - HNS3_RX_BD_ERROR_STATS_FIELD_OFFSET(l3_csum_errors)}, - {"RX_L4_CHECKSUM_ERRORS", - HNS3_RX_BD_ERROR_STATS_FIELD_OFFSET(l4_csum_errors)}, - {"RX_OL3_CHECKSUM_ERRORS", - HNS3_RX_BD_ERROR_STATS_FIELD_OFFSET(ol3_csum_errors)}, - {"RX_OL4_CHECKSUM_ERRORS", - HNS3_RX_BD_ERROR_STATS_FIELD_OFFSET(ol4_csum_errors)} + {"L2_ERRORS", + HNS3_RX_BD_ERROR_STATS_FIELD_OFFSET(l2_errors)} };
-/* The statistic of the Tx errors */ -static const struct hns3_xstats_name_offset hns3_tx_errors_strings[] = { - {"TX_OVER_LENGTH_PKT_CNT", - HNS3_TX_ERROR_STATS_FIELD_OFFSET(over_length_pkt_cnt)}, - {"TX_EXCEED_LIMITED_BD_PKT_CNT", - HNS3_TX_ERROR_STATS_FIELD_OFFSET(exceed_limit_bd_pkt_cnt)}, - {"TX_EXCEED_LIMITED_BD_PKT_REASSEMBLE_FAIL_CNT", - HNS3_TX_ERROR_STATS_FIELD_OFFSET(exceed_limit_bd_reassem_fail)}, - {"TX_UNSUPPORTED_TUNNEL_PKT_CNT", - HNS3_TX_ERROR_STATS_FIELD_OFFSET(unsupported_tunnel_pkt_cnt)}, - {"TX_QUEUE_FULL_CNT", - HNS3_TX_ERROR_STATS_FIELD_OFFSET(queue_full_cnt)}, - {"TX_SHORT_PKT_PAD_FAIL_CNT", - HNS3_TX_ERROR_STATS_FIELD_OFFSET(pkt_padding_fail_cnt)} +/* The dfx statistic in Rx datapath */ +static const struct hns3_xstats_name_offset hns3_rxq_dfx_stats_strings[] = { + {"L3_CHECKSUM_ERRORS", + HNS3_RXQ_DFX_STATS_FIELD_OFFSET(l3_csum_errors)}, + {"L4_CHECKSUM_ERRORS", + HNS3_RXQ_DFX_STATS_FIELD_OFFSET(l4_csum_errors)}, + {"OL3_CHECKSUM_ERRORS", + HNS3_RXQ_DFX_STATS_FIELD_OFFSET(ol3_csum_errors)}, + {"OL4_CHECKSUM_ERRORS", + HNS3_RXQ_DFX_STATS_FIELD_OFFSET(ol4_csum_errors)} +}; + +/* The dfx statistic in Tx datapath */ +static const struct hns3_xstats_name_offset hns3_txq_dfx_stats_strings[] = { + {"OVER_LENGTH_PKT_CNT", + HNS3_TXQ_DFX_STATS_FIELD_OFFSET(over_length_pkt_cnt)}, + {"EXCEED_LIMITED_BD_PKT_CNT", + HNS3_TXQ_DFX_STATS_FIELD_OFFSET(exceed_limit_bd_pkt_cnt)}, + {"EXCEED_LIMITED_BD_PKT_REASSEMBLE_FAIL_CNT", + HNS3_TXQ_DFX_STATS_FIELD_OFFSET(exceed_limit_bd_reassem_fail)}, + {"UNSUPPORTED_TUNNEL_PKT_CNT", + HNS3_TXQ_DFX_STATS_FIELD_OFFSET(unsupported_tunnel_pkt_cnt)}, + {"QUEUE_FULL_CNT", + HNS3_TXQ_DFX_STATS_FIELD_OFFSET(queue_full_cnt)}, + {"SHORT_PKT_PAD_FAIL_CNT", + HNS3_TXQ_DFX_STATS_FIELD_OFFSET(pkt_padding_fail_cnt)} };
/* The statistic of rx queue */ @@ -314,8 +318,11 @@ static const struct hns3_xstats_name_offset hns3_tx_queue_strings[] = { #define HNS3_NUM_RX_BD_ERROR_XSTATS (sizeof(hns3_rx_bd_error_strings) / \ sizeof(hns3_rx_bd_error_strings[0]))
-#define HNS3_NUM_TX_ERRORS_XSTATS (sizeof(hns3_tx_errors_strings) / \ - sizeof(hns3_tx_errors_strings[0])) +#define HNS3_NUM_RXQ_DFX_XSTATS (sizeof(hns3_rxq_dfx_stats_strings) / \ + sizeof(hns3_rxq_dfx_stats_strings[0])) + +#define HNS3_NUM_TXQ_DFX_XSTATS (sizeof(hns3_txq_dfx_stats_strings) / \ + sizeof(hns3_txq_dfx_stats_strings[0]))
#define HNS3_NUM_RX_QUEUE_STATS (sizeof(hns3_rx_queue_strings) / \ sizeof(hns3_rx_queue_strings[0])) @@ -519,7 +526,8 @@ hns3_stats_get(struct rte_eth_dev *eth_dev, struct rte_eth_stats *rte_stats) for (i = 0; i != num; ++i) { rxq = eth_dev->data->rx_queues[i]; if (rxq) { - cnt = rxq->l2_errors + rxq->pkt_len_errors; + cnt = rxq->err_stats.l2_errors + + rxq->err_stats.pkt_len_errors; rte_stats->q_errors[i] = cnt; rte_stats->q_ipackets[i] = stats->rcb_rx_ring_pktnum[i] - cnt; @@ -584,11 +592,11 @@ hns3_stats_reset(struct rte_eth_dev *eth_dev) * Clear soft stats of rx error packet which will be dropped * in driver. */ - for (i = 0; i < eth_dev->data->nb_rx_queues; ++i) { + for (i = 0; i < eth_dev->data->nb_rx_queues; i++) { rxq = eth_dev->data->rx_queues[i]; if (rxq) { - rxq->pkt_len_errors = 0; - rxq->l2_errors = 0; + rxq->err_stats.pkt_len_errors = 0; + rxq->err_stats.l2_errors = 0; } }
@@ -621,21 +629,24 @@ static int hns3_xstats_calc_num(struct rte_eth_dev *dev) { struct hns3_adapter *hns = dev->data->dev_private; - int bderr_stats = dev->data->nb_rx_queues * HNS3_NUM_RX_BD_ERROR_XSTATS; - int tx_err_stats = dev->data->nb_tx_queues * HNS3_NUM_TX_ERRORS_XSTATS; - int rx_queue_stats = dev->data->nb_rx_queues * HNS3_NUM_RX_QUEUE_STATS; - int tx_queue_stats = dev->data->nb_tx_queues * HNS3_NUM_TX_QUEUE_STATS; + uint16_t nb_rx_q = dev->data->nb_rx_queues; + uint16_t nb_tx_q = dev->data->nb_tx_queues; + int bderr_stats = nb_rx_q * HNS3_NUM_RX_BD_ERROR_XSTATS; + int rx_dfx_stats = nb_rx_q * HNS3_NUM_RXQ_DFX_XSTATS; + int tx_dfx_stats = nb_tx_q * HNS3_NUM_TXQ_DFX_XSTATS; + int rx_queue_stats = nb_rx_q * HNS3_NUM_RX_QUEUE_STATS; + int tx_queue_stats = nb_tx_q * HNS3_NUM_TX_QUEUE_STATS;
if (hns->is_vf) - return bderr_stats + tx_err_stats + rx_queue_stats + - tx_queue_stats + HNS3_NUM_RESET_XSTATS; + return bderr_stats + rx_dfx_stats + tx_dfx_stats + + rx_queue_stats + tx_queue_stats + HNS3_NUM_RESET_XSTATS; else - return bderr_stats + tx_err_stats + rx_queue_stats + - tx_queue_stats + HNS3_FIX_NUM_STATS; + return bderr_stats + rx_dfx_stats + tx_dfx_stats + + rx_queue_stats + tx_queue_stats + HNS3_FIX_NUM_STATS; }
static void -hns3_get_queue_stats(struct rte_eth_dev *dev, struct rte_eth_xstat *xstats, +hns3_queue_stats_get(struct rte_eth_dev *dev, struct rte_eth_xstat *xstats, int *count) { struct hns3_adapter *hns = dev->data->dev_private; @@ -683,6 +694,63 @@ hns3_error_int_stats_add(struct hns3_adapter *hns, const char *err) } }
+static void +hns3_rxq_dfx_stats_get(struct rte_eth_dev *dev, struct rte_eth_xstat *xstats, + int *count) +{ + struct hns3_rx_dfx_stats *dfx_stats; + struct hns3_rx_queue *rxq; + uint16_t i, j; + char *val; + + for (i = 0; i < dev->data->nb_rx_queues; i++) { + rxq = (struct hns3_rx_queue *)dev->data->rx_queues[i]; + if (rxq == NULL) + continue; + + dfx_stats = &rxq->dfx_stats; + for (j = 0; j < HNS3_NUM_RXQ_DFX_XSTATS; j++) { + val = (char *)dfx_stats + + hns3_rxq_dfx_stats_strings[j].offset; + xstats[*count].value = *(uint64_t *)val; + xstats[*count].id = *count; + (*count)++; + } + } +} + +static void +hns3_txq_dfx_stats_get(struct rte_eth_dev *dev, struct rte_eth_xstat *xstats, + int *count) +{ + struct hns3_tx_dfx_stats *dfx_stats; + struct hns3_tx_queue *txq; + uint16_t i, j; + char *val; + + for (i = 0; i < dev->data->nb_tx_queues; i++) { + txq = (struct hns3_tx_queue *)dev->data->tx_queues[i]; + if (txq == NULL) + continue; + + dfx_stats = &txq->dfx_stats; + for (j = 0; j < HNS3_NUM_TXQ_DFX_XSTATS; j++) { + val = (char *)dfx_stats + + hns3_txq_dfx_stats_strings[j].offset; + xstats[*count].value = *(uint64_t *)val; + xstats[*count].id = *count; + (*count)++; + } + } +} + +static void +hns3_tqp_dfx_stats_get(struct rte_eth_dev *dev, struct rte_eth_xstat *xstats, + int *count) +{ + hns3_rxq_dfx_stats_get(dev, xstats, count); + hns3_txq_dfx_stats_get(dev, xstats, count); +} /* * Retrieve extended(tqp | Mac) statistics of an Ethernet device. * @param dev @@ -705,8 +773,8 @@ hns3_dev_xstats_get(struct rte_eth_dev *dev, struct rte_eth_xstat *xstats, struct hns3_hw *hw = &hns->hw; struct hns3_mac_stats *mac_stats = &hw->mac_stats; struct hns3_reset_stats *reset_stats = &hw->reset.stats; + struct hns3_rx_bd_errors_stats *rx_err_stats; struct hns3_rx_queue *rxq; - struct hns3_tx_queue *txq; uint16_t i, j; char *addr; int count; @@ -758,26 +826,49 @@ hns3_dev_xstats_get(struct rte_eth_dev *dev, struct rte_eth_xstat *xstats, for (j = 0; j < dev->data->nb_rx_queues; j++) { for (i = 0; i < HNS3_NUM_RX_BD_ERROR_XSTATS; i++) { rxq = dev->data->rx_queues[j]; - addr = (char *)rxq + hns3_rx_bd_error_strings[i].offset; - xstats[count].value = *(uint64_t *)addr; - xstats[count].id = count; - count++; + if (rxq) { + rx_err_stats = &rxq->err_stats; + addr = (char *)rx_err_stats + + hns3_rx_bd_error_strings[i].offset; + xstats[count].value = *(uint64_t *)addr; + xstats[count].id = count; + count++; + } } }
- /* Get the Tx errors stats */ - for (j = 0; j < dev->data->nb_tx_queues; j++) { - for (i = 0; i < HNS3_NUM_TX_ERRORS_XSTATS; i++) { - txq = dev->data->tx_queues[j]; - addr = (char *)txq + hns3_tx_errors_strings[i].offset; - xstats[count].value = *(uint64_t *)addr; - xstats[count].id = count; - count++; + hns3_tqp_dfx_stats_get(dev, xstats, &count); + hns3_queue_stats_get(dev, xstats, &count); + + return count; +} + +static void +hns3_tqp_dfx_stats_name_get(struct rte_eth_dev *dev, + struct rte_eth_xstat_name *xstats_names, + uint32_t *count) +{ + uint16_t i, j; + + for (j = 0; j < dev->data->nb_rx_queues; j++) { + for (i = 0; i < HNS3_NUM_RXQ_DFX_XSTATS; i++) { + snprintf(xstats_names[*count].name, + sizeof(xstats_names[*count].name), + "rx_q%u_%s", j, + hns3_rxq_dfx_stats_strings[i].name); + (*count)++; } }
- hns3_get_queue_stats(dev, xstats, &count); - return count; + for (j = 0; j < dev->data->nb_tx_queues; j++) { + for (i = 0; i < HNS3_NUM_TXQ_DFX_XSTATS; i++) { + snprintf(xstats_names[*count].name, + sizeof(xstats_names[*count].name), + "tx_q%u_%s", j, + hns3_txq_dfx_stats_strings[i].name); + (*count)++; + } + } }
/* @@ -845,27 +936,19 @@ hns3_dev_xstats_get_names(struct rte_eth_dev *dev, for (i = 0; i < HNS3_NUM_RX_BD_ERROR_XSTATS; i++) { snprintf(xstats_names[count].name, sizeof(xstats_names[count].name), - "rx_q%u%s", j, + "rx_q%u_%s", j, hns3_rx_bd_error_strings[i].name); count++; } }
- for (j = 0; j < dev->data->nb_tx_queues; j++) { - for (i = 0; i < HNS3_NUM_TX_ERRORS_XSTATS; i++) { - snprintf(xstats_names[count].name, - sizeof(xstats_names[count].name), - "tx_q%u%s", j, - hns3_tx_errors_strings[i].name); - count++; - } - } + hns3_tqp_dfx_stats_name_get(dev, xstats_names, &count);
for (j = 0; j < dev->data->nb_rx_queues; j++) { for (i = 0; i < HNS3_NUM_RX_QUEUE_STATS; i++) { snprintf(xstats_names[count].name, sizeof(xstats_names[count].name), - "rx_q%u%s", j, hns3_rx_queue_strings[i].name); + "rx_q%u_%s", j, hns3_rx_queue_strings[i].name); count++; } } @@ -874,7 +957,7 @@ hns3_dev_xstats_get_names(struct rte_eth_dev *dev, for (i = 0; i < HNS3_NUM_TX_QUEUE_STATS; i++) { snprintf(xstats_names[count].name, sizeof(xstats_names[count].name), - "tx_q%u%s", j, hns3_tx_queue_strings[i].name); + "tx_q%u_%s", j, hns3_tx_queue_strings[i].name); count++; } } @@ -1043,30 +1126,22 @@ hns3_tqp_dfx_stats_clear(struct rte_eth_dev *dev) { struct hns3_rx_queue *rxq; struct hns3_tx_queue *txq; - int i; + uint16_t i;
/* Clear Rx dfx stats */ - for (i = 0; i < dev->data->nb_rx_queues; ++i) { + for (i = 0; i < dev->data->nb_rx_queues; i++) { rxq = dev->data->rx_queues[i]; - if (rxq) { - rxq->l3_csum_errors = 0; - rxq->l4_csum_errors = 0; - rxq->ol3_csum_errors = 0; - rxq->ol4_csum_errors = 0; - } + if (rxq) + memset(&rxq->dfx_stats, 0, + sizeof(struct hns3_rx_dfx_stats)); }
/* Clear Tx dfx stats */ - for (i = 0; i < dev->data->nb_tx_queues; ++i) { + for (i = 0; i < dev->data->nb_tx_queues; i++) { txq = dev->data->tx_queues[i]; - if (txq) { - txq->over_length_pkt_cnt = 0; - txq->exceed_limit_bd_pkt_cnt = 0; - txq->exceed_limit_bd_reassem_fail = 0; - txq->unsupported_tunnel_pkt_cnt = 0; - txq->queue_full_cnt = 0; - txq->pkt_padding_fail_cnt = 0; - } + if (txq) + memset(&txq->dfx_stats, 0, + sizeof(struct hns3_tx_dfx_stats)); } }
diff --git a/drivers/net/hns3/hns3_stats.h b/drivers/net/hns3/hns3_stats.h index 9fcd5f9..12842cd 100644 --- a/drivers/net/hns3/hns3_stats.h +++ b/drivers/net/hns3/hns3_stats.h @@ -127,10 +127,13 @@ struct hns3_reset_stats; (offsetof(struct hns3_reset_stats, f))
#define HNS3_RX_BD_ERROR_STATS_FIELD_OFFSET(f) \ - (offsetof(struct hns3_rx_queue, f)) + (offsetof(struct hns3_rx_bd_errors_stats, f))
-#define HNS3_TX_ERROR_STATS_FIELD_OFFSET(f) \ - (offsetof(struct hns3_tx_queue, f)) +#define HNS3_RXQ_DFX_STATS_FIELD_OFFSET(f) \ + (offsetof(struct hns3_rx_dfx_stats, f)) + +#define HNS3_TXQ_DFX_STATS_FIELD_OFFSET(f) \ + (offsetof(struct hns3_tx_dfx_stats, f))
int hns3_stats_get(struct rte_eth_dev *dev, struct rte_eth_stats *rte_stats); int hns3_dev_xstats_get(struct rte_eth_dev *dev, struct rte_eth_xstat *xstats,