This series add a new feature LSC event report and two bugfixes as well as some cleanup patches.
Chengchang Tang (2): net/hns3: reconstruct the Rx interrupt map net/hns3: fix interrupt resources in Rx interrupt mode
Chengwen Feng (1): net/hns3: support LSC event report
Huisong Li (2): net/hns3: encapsulate dfx stats in Rx/Tx datapatch net/hns3: move queue stats to xstats
Lijun Ou (9): net/hns3: use array instead of switch-case net/hns3: move judgment conditions to separated functions net/hns3: extract common judgments for all FDIR type net/hns3: refactor reset event report function net/hns3: fix memory leak with secondary process exit net/hns3: rename RSS functions net/hns3: adjust some comments net/hns3: remove unnecessary parentheses net/hns3: use %d instead of %u for enum variable
drivers/net/hns3/hns3_cmd.c | 54 ++-- drivers/net/hns3/hns3_cmd.h | 9 +- drivers/net/hns3/hns3_ethdev.c | 208 ++++++++++------ drivers/net/hns3/hns3_ethdev.h | 4 +- drivers/net/hns3/hns3_ethdev_vf.c | 111 ++++++--- drivers/net/hns3/hns3_flow.c | 180 ++++++-------- drivers/net/hns3/hns3_mbx.c | 14 +- drivers/net/hns3/hns3_regs.c | 2 +- drivers/net/hns3/hns3_regs.h | 24 +- drivers/net/hns3/hns3_rss.c | 12 +- drivers/net/hns3/hns3_rss.h | 4 +- drivers/net/hns3/hns3_rxtx.c | 60 +++-- drivers/net/hns3/hns3_rxtx.h | 148 ++++++----- drivers/net/hns3/hns3_rxtx_vec_neon.h | 2 +- drivers/net/hns3/hns3_stats.c | 450 ++++++++++++++++++++++++++-------- drivers/net/hns3/hns3_stats.h | 15 +- 16 files changed, 840 insertions(+), 457 deletions(-)
From: Huisong Li lihuisong@huawei.com
pkt_len_errors and l2_errors in Rx datapath indicate that driver needs to discard received packets. And driver does not discard packets for l3/l4/ol3/ol4_csum_errors in Rx datapath and others stats in Tx datapatch. Therefore, it is necessary for improving code readability and maintainability to encapsulate error stats and dfx stats.
Signed-off-by: Huisong Li lihuisong@huawei.com Signed-off-by: Lijun Ou oulijun@huawei.com --- drivers/net/hns3/hns3_rxtx.c | 30 ++--- drivers/net/hns3/hns3_rxtx.h | 134 ++++++++++--------- drivers/net/hns3/hns3_rxtx_vec_neon.h | 2 +- drivers/net/hns3/hns3_stats.c | 243 ++++++++++++++++++++++------------ drivers/net/hns3/hns3_stats.h | 9 +- 5 files changed, 251 insertions(+), 167 deletions(-)
diff --git a/drivers/net/hns3/hns3_rxtx.c b/drivers/net/hns3/hns3_rxtx.c index 0badfc9..3d5f74f 100644 --- a/drivers/net/hns3/hns3_rxtx.c +++ b/drivers/net/hns3/hns3_rxtx.c @@ -1792,12 +1792,8 @@ hns3_rx_queue_setup(struct rte_eth_dev *dev, uint16_t idx, uint16_t nb_desc, rxq->io_head_reg = (volatile void *)((char *)rxq->io_base + HNS3_RING_RX_HEAD_REG); rxq->rx_buf_len = rx_buf_size; - rxq->l2_errors = 0; - rxq->pkt_len_errors = 0; - rxq->l3_csum_errors = 0; - rxq->l4_csum_errors = 0; - rxq->ol3_csum_errors = 0; - rxq->ol4_csum_errors = 0; + memset(&rxq->err_stats, 0, sizeof(struct hns3_rx_bd_errors_stats)); + memset(&rxq->dfx_stats, 0, sizeof(struct hns3_rx_dfx_stats));
/* CRC len set here is used for amending packet length */ if (dev->data->dev_conf.rxmode.offloads & DEV_RX_OFFLOAD_KEEP_CRC) @@ -2622,12 +2618,8 @@ hns3_tx_queue_setup(struct rte_eth_dev *dev, uint16_t idx, uint16_t nb_desc, HNS3_RING_TX_TAIL_REG); txq->min_tx_pkt_len = hw->min_tx_pkt_len; txq->tso_mode = hw->tso_mode; - txq->over_length_pkt_cnt = 0; - txq->exceed_limit_bd_pkt_cnt = 0; - txq->exceed_limit_bd_reassem_fail = 0; - txq->unsupported_tunnel_pkt_cnt = 0; - txq->queue_full_cnt = 0; - txq->pkt_padding_fail_cnt = 0; + memset(&txq->dfx_stats, 0, sizeof(struct hns3_tx_dfx_stats)); + rte_spinlock_lock(&hw->lock); dev->data->tx_queues[idx] = txq; rte_spinlock_unlock(&hw->lock); @@ -3350,7 +3342,7 @@ hns3_parse_cksum(struct hns3_tx_queue *txq, uint16_t tx_desc_id, if (m->ol_flags & HNS3_TX_CKSUM_OFFLOAD_MASK) { /* Fill in tunneling parameters if necessary */ if (hns3_parse_tunneling_params(txq, m, tx_desc_id)) { - txq->unsupported_tunnel_pkt_cnt++; + txq->dfx_stats.unsupported_tunnel_pkt_cnt++; return -EINVAL; }
@@ -3380,17 +3372,17 @@ hns3_check_non_tso_pkt(uint16_t nb_buf, struct rte_mbuf **m_seg, * driver support, the packet will be ignored. */ if (unlikely(rte_pktmbuf_pkt_len(tx_pkt) > HNS3_MAX_FRAME_LEN)) { - txq->over_length_pkt_cnt++; + txq->dfx_stats.over_length_pkt_cnt++; return -EINVAL; }
max_non_tso_bd_num = txq->max_non_tso_bd_num; if (unlikely(nb_buf > max_non_tso_bd_num)) { - txq->exceed_limit_bd_pkt_cnt++; + txq->dfx_stats.exceed_limit_bd_pkt_cnt++; ret = hns3_reassemble_tx_pkts(tx_pkt, &new_pkt, max_non_tso_bd_num); if (ret) { - txq->exceed_limit_bd_reassem_fail++; + txq->dfx_stats.exceed_limit_bd_reassem_fail++; return ret; } *m_seg = new_pkt; @@ -3528,7 +3520,7 @@ hns3_xmit_pkts_simple(void *tx_queue, nb_pkts = RTE_MIN(txq->tx_bd_ready, nb_pkts); if (unlikely(nb_pkts == 0)) { if (txq->tx_bd_ready == 0) - txq->queue_full_cnt++; + txq->dfx_stats.queue_full_cnt++; return 0; }
@@ -3580,7 +3572,7 @@ hns3_xmit_pkts(void *tx_queue, struct rte_mbuf **tx_pkts, uint16_t nb_pkts) nb_buf = tx_pkt->nb_segs;
if (nb_buf > txq->tx_bd_ready) { - txq->queue_full_cnt++; + txq->dfx_stats.queue_full_cnt++; if (nb_tx == 0) return 0;
@@ -3601,7 +3593,7 @@ hns3_xmit_pkts(void *tx_queue, struct rte_mbuf **tx_pkts, uint16_t nb_pkts) rte_pktmbuf_pkt_len(tx_pkt); appended = rte_pktmbuf_append(tx_pkt, add_len); if (appended == NULL) { - txq->pkt_padding_fail_cnt++; + txq->dfx_stats.pkt_padding_fail_cnt++; break; }
diff --git a/drivers/net/hns3/hns3_rxtx.h b/drivers/net/hns3/hns3_rxtx.h index 6538848..8a0c981 100644 --- a/drivers/net/hns3/hns3_rxtx.h +++ b/drivers/net/hns3/hns3_rxtx.h @@ -266,6 +266,18 @@ struct hns3_entry { struct rte_mbuf *mbuf; };
+struct hns3_rx_dfx_stats { + uint64_t l3_csum_errors; + uint64_t l4_csum_errors; + uint64_t ol3_csum_errors; + uint64_t ol4_csum_errors; +}; + +struct hns3_rx_bd_errors_stats { + uint64_t l2_errors; + uint64_t pkt_len_errors; +}; + struct hns3_rx_queue { void *io_base; volatile void *io_head_reg; @@ -312,12 +324,10 @@ struct hns3_rx_queue { bool pvid_sw_discard_en; bool enabled; /* indicate if Rx queue has been enabled */
- uint64_t l2_errors; - uint64_t pkt_len_errors; - uint64_t l3_csum_errors; - uint64_t l4_csum_errors; - uint64_t ol3_csum_errors; - uint64_t ol4_csum_errors; + /* DFX statistics that driver does not need to discard packets */ + struct hns3_rx_dfx_stats dfx_stats; + /* Error statistics that driver needs to discard packets */ + struct hns3_rx_bd_errors_stats err_stats;
struct rte_mbuf *bulk_mbuf[HNS3_BULK_ALLOC_MBUF_NUM]; uint16_t bulk_mbuf_num; @@ -328,6 +338,57 @@ struct hns3_rx_queue { struct rte_mbuf fake_mbuf; /* fake mbuf used with vector rx */ };
+/* + * The following items are used for the abnormal errors statistics in + * the Tx datapath. When upper level application calls the + * rte_eth_tx_burst API function to send multiple packets at a time with + * burst mode based on hns3 network engine, there are some abnormal + * conditions that cause the driver to fail to operate the hardware to + * send packets correctly. + * Note: When using burst mode to call the rte_eth_tx_burst API function + * to send multiple packets at a time. When the first abnormal error is + * detected, add one to the relevant error statistics item, and then + * exit the loop of sending multiple packets of the function. That is to + * say, even if there are multiple packets in which abnormal errors may + * be detected in the burst, the relevant error statistics in the driver + * will only be increased by one. + * The detail description of the Tx abnormal errors statistic items as + * below: + * - over_length_pkt_cnt + * Total number of greater than HNS3_MAX_FRAME_LEN the driver + * supported. + * + * - exceed_limit_bd_pkt_cnt + * Total number of exceeding the hardware limited bd which process + * a packet needed bd numbers. + * + * - exceed_limit_bd_reassem_fail + * Total number of exceeding the hardware limited bd fail which + * process a packet needed bd numbers and reassemble fail. + * + * - unsupported_tunnel_pkt_cnt + * Total number of unsupported tunnel packet. The unsupported tunnel + * type: vxlan_gpe, gtp, ipip and MPLSINUDP, MPLSINUDP is a packet + * with MPLS-in-UDP RFC 7510 header. + * + * - queue_full_cnt + * Total count which the available bd numbers in current bd queue is + * less than the bd numbers with the pkt process needed. + * + * - pkt_padding_fail_cnt + * Total count which the packet length is less than minimum packet + * length(struct hns3_tx_queue::min_tx_pkt_len) supported by + * hardware in Tx direction and fail to be appended with 0. + */ +struct hns3_tx_dfx_stats { + uint64_t over_length_pkt_cnt; + uint64_t exceed_limit_bd_pkt_cnt; + uint64_t exceed_limit_bd_reassem_fail; + uint64_t unsupported_tunnel_pkt_cnt; + uint64_t queue_full_cnt; + uint64_t pkt_padding_fail_cnt; +}; + struct hns3_tx_queue { void *io_base; volatile void *io_tail_reg; @@ -411,54 +472,7 @@ struct hns3_tx_queue { bool pvid_sw_shift_en; bool enabled; /* indicate if Tx queue has been enabled */
- /* - * The following items are used for the abnormal errors statistics in - * the Tx datapath. When upper level application calls the - * rte_eth_tx_burst API function to send multiple packets at a time with - * burst mode based on hns3 network engine, there are some abnormal - * conditions that cause the driver to fail to operate the hardware to - * send packets correctly. - * Note: When using burst mode to call the rte_eth_tx_burst API function - * to send multiple packets at a time. When the first abnormal error is - * detected, add one to the relevant error statistics item, and then - * exit the loop of sending multiple packets of the function. That is to - * say, even if there are multiple packets in which abnormal errors may - * be detected in the burst, the relevant error statistics in the driver - * will only be increased by one. - * The detail description of the Tx abnormal errors statistic items as - * below: - * - over_length_pkt_cnt - * Total number of greater than HNS3_MAX_FRAME_LEN the driver - * supported. - * - * - exceed_limit_bd_pkt_cnt - * Total number of exceeding the hardware limited bd which process - * a packet needed bd numbers. - * - * - exceed_limit_bd_reassem_fail - * Total number of exceeding the hardware limited bd fail which - * process a packet needed bd numbers and reassemble fail. - * - * - unsupported_tunnel_pkt_cnt - * Total number of unsupported tunnel packet. The unsupported tunnel - * type: vxlan_gpe, gtp, ipip and MPLSINUDP, MPLSINUDP is a packet - * with MPLS-in-UDP RFC 7510 header. - * - * - queue_full_cnt - * Total count which the available bd numbers in current bd queue is - * less than the bd numbers with the pkt process needed. - * - * - pkt_padding_fail_cnt - * Total count which the packet length is less than minimum packet - * length(struct hns3_tx_queue::min_tx_pkt_len) supported by - * hardware in Tx direction and fail to be appended with 0. - */ - uint64_t over_length_pkt_cnt; - uint64_t exceed_limit_bd_pkt_cnt; - uint64_t exceed_limit_bd_reassem_fail; - uint64_t unsupported_tunnel_pkt_cnt; - uint64_t queue_full_cnt; - uint64_t pkt_padding_fail_cnt; + struct hns3_tx_dfx_stats dfx_stats; };
#define HNS3_GET_TX_QUEUE_PEND_BD_NUM(txq) \ @@ -511,9 +525,9 @@ hns3_handle_bdinfo(struct hns3_rx_queue *rxq, struct rte_mbuf *rxm,
if (unlikely((l234_info & L2E_TRUNC_ERR_FLAG) || rxm->pkt_len == 0)) { if (l234_info & BIT(HNS3_RXD_L2E_B)) - rxq->l2_errors++; + rxq->err_stats.l2_errors++; else - rxq->pkt_len_errors++; + rxq->err_stats.pkt_len_errors++; return -EINVAL; }
@@ -525,24 +539,24 @@ hns3_handle_bdinfo(struct hns3_rx_queue *rxq, struct rte_mbuf *rxm,
if (unlikely(l234_info & BIT(HNS3_RXD_L3E_B))) { rxm->ol_flags |= PKT_RX_IP_CKSUM_BAD; - rxq->l3_csum_errors++; + rxq->dfx_stats.l3_csum_errors++; tmp |= HNS3_L3_CKSUM_ERR; }
if (unlikely(l234_info & BIT(HNS3_RXD_L4E_B))) { rxm->ol_flags |= PKT_RX_L4_CKSUM_BAD; - rxq->l4_csum_errors++; + rxq->dfx_stats.l4_csum_errors++; tmp |= HNS3_L4_CKSUM_ERR; }
if (unlikely(l234_info & BIT(HNS3_RXD_OL3E_B))) { - rxq->ol3_csum_errors++; + rxq->dfx_stats.ol3_csum_errors++; tmp |= HNS3_OUTER_L3_CKSUM_ERR; }
if (unlikely(l234_info & BIT(HNS3_RXD_OL4E_B))) { rxm->ol_flags |= PKT_RX_OUTER_L4_CKSUM_BAD; - rxq->ol4_csum_errors++; + rxq->dfx_stats.ol4_csum_errors++; tmp |= HNS3_OUTER_L4_CKSUM_ERR; } } diff --git a/drivers/net/hns3/hns3_rxtx_vec_neon.h b/drivers/net/hns3/hns3_rxtx_vec_neon.h index 54addbf..a693b4b 100644 --- a/drivers/net/hns3/hns3_rxtx_vec_neon.h +++ b/drivers/net/hns3/hns3_rxtx_vec_neon.h @@ -42,7 +42,7 @@ hns3_xmit_fixed_burst_vec(void *__restrict tx_queue,
nb_commit = RTE_MIN(txq->tx_bd_ready, nb_pkts); if (unlikely(nb_commit == 0)) { - txq->queue_full_cnt++; + txq->dfx_stats.queue_full_cnt++; return 0; } nb_tx = nb_commit; diff --git a/drivers/net/hns3/hns3_stats.c b/drivers/net/hns3/hns3_stats.c index 62a712b..419d7e2 100644 --- a/drivers/net/hns3/hns3_stats.c +++ b/drivers/net/hns3/hns3_stats.c @@ -262,34 +262,38 @@ static const struct hns3_xstats_name_offset hns3_reset_stats_strings[] = {
/* The statistic of errors in Rx BD */ static const struct hns3_xstats_name_offset hns3_rx_bd_error_strings[] = { - {"RX_PKT_LEN_ERRORS", + {"PKT_LEN_ERRORS", HNS3_RX_BD_ERROR_STATS_FIELD_OFFSET(pkt_len_errors)}, - {"L2_RX_ERRORS", - HNS3_RX_BD_ERROR_STATS_FIELD_OFFSET(l2_errors)}, - {"RX_L3_CHECKSUM_ERRORS", - HNS3_RX_BD_ERROR_STATS_FIELD_OFFSET(l3_csum_errors)}, - {"RX_L4_CHECKSUM_ERRORS", - HNS3_RX_BD_ERROR_STATS_FIELD_OFFSET(l4_csum_errors)}, - {"RX_OL3_CHECKSUM_ERRORS", - HNS3_RX_BD_ERROR_STATS_FIELD_OFFSET(ol3_csum_errors)}, - {"RX_OL4_CHECKSUM_ERRORS", - HNS3_RX_BD_ERROR_STATS_FIELD_OFFSET(ol4_csum_errors)} + {"L2_ERRORS", + HNS3_RX_BD_ERROR_STATS_FIELD_OFFSET(l2_errors)} };
-/* The statistic of the Tx errors */ -static const struct hns3_xstats_name_offset hns3_tx_errors_strings[] = { - {"TX_OVER_LENGTH_PKT_CNT", - HNS3_TX_ERROR_STATS_FIELD_OFFSET(over_length_pkt_cnt)}, - {"TX_EXCEED_LIMITED_BD_PKT_CNT", - HNS3_TX_ERROR_STATS_FIELD_OFFSET(exceed_limit_bd_pkt_cnt)}, - {"TX_EXCEED_LIMITED_BD_PKT_REASSEMBLE_FAIL_CNT", - HNS3_TX_ERROR_STATS_FIELD_OFFSET(exceed_limit_bd_reassem_fail)}, - {"TX_UNSUPPORTED_TUNNEL_PKT_CNT", - HNS3_TX_ERROR_STATS_FIELD_OFFSET(unsupported_tunnel_pkt_cnt)}, - {"TX_QUEUE_FULL_CNT", - HNS3_TX_ERROR_STATS_FIELD_OFFSET(queue_full_cnt)}, - {"TX_SHORT_PKT_PAD_FAIL_CNT", - HNS3_TX_ERROR_STATS_FIELD_OFFSET(pkt_padding_fail_cnt)} +/* The dfx statistic in Rx datapath */ +static const struct hns3_xstats_name_offset hns3_rxq_dfx_stats_strings[] = { + {"L3_CHECKSUM_ERRORS", + HNS3_RXQ_DFX_STATS_FIELD_OFFSET(l3_csum_errors)}, + {"L4_CHECKSUM_ERRORS", + HNS3_RXQ_DFX_STATS_FIELD_OFFSET(l4_csum_errors)}, + {"OL3_CHECKSUM_ERRORS", + HNS3_RXQ_DFX_STATS_FIELD_OFFSET(ol3_csum_errors)}, + {"OL4_CHECKSUM_ERRORS", + HNS3_RXQ_DFX_STATS_FIELD_OFFSET(ol4_csum_errors)} +}; + +/* The dfx statistic in Tx datapath */ +static const struct hns3_xstats_name_offset hns3_txq_dfx_stats_strings[] = { + {"OVER_LENGTH_PKT_CNT", + HNS3_TXQ_DFX_STATS_FIELD_OFFSET(over_length_pkt_cnt)}, + {"EXCEED_LIMITED_BD_PKT_CNT", + HNS3_TXQ_DFX_STATS_FIELD_OFFSET(exceed_limit_bd_pkt_cnt)}, + {"EXCEED_LIMITED_BD_PKT_REASSEMBLE_FAIL_CNT", + HNS3_TXQ_DFX_STATS_FIELD_OFFSET(exceed_limit_bd_reassem_fail)}, + {"UNSUPPORTED_TUNNEL_PKT_CNT", + HNS3_TXQ_DFX_STATS_FIELD_OFFSET(unsupported_tunnel_pkt_cnt)}, + {"QUEUE_FULL_CNT", + HNS3_TXQ_DFX_STATS_FIELD_OFFSET(queue_full_cnt)}, + {"SHORT_PKT_PAD_FAIL_CNT", + HNS3_TXQ_DFX_STATS_FIELD_OFFSET(pkt_padding_fail_cnt)} };
/* The statistic of rx queue */ @@ -314,8 +318,11 @@ static const struct hns3_xstats_name_offset hns3_tx_queue_strings[] = { #define HNS3_NUM_RX_BD_ERROR_XSTATS (sizeof(hns3_rx_bd_error_strings) / \ sizeof(hns3_rx_bd_error_strings[0]))
-#define HNS3_NUM_TX_ERRORS_XSTATS (sizeof(hns3_tx_errors_strings) / \ - sizeof(hns3_tx_errors_strings[0])) +#define HNS3_NUM_RXQ_DFX_XSTATS (sizeof(hns3_rxq_dfx_stats_strings) / \ + sizeof(hns3_rxq_dfx_stats_strings[0])) + +#define HNS3_NUM_TXQ_DFX_XSTATS (sizeof(hns3_txq_dfx_stats_strings) / \ + sizeof(hns3_txq_dfx_stats_strings[0]))
#define HNS3_NUM_RX_QUEUE_STATS (sizeof(hns3_rx_queue_strings) / \ sizeof(hns3_rx_queue_strings[0])) @@ -519,7 +526,8 @@ hns3_stats_get(struct rte_eth_dev *eth_dev, struct rte_eth_stats *rte_stats) for (i = 0; i != num; ++i) { rxq = eth_dev->data->rx_queues[i]; if (rxq) { - cnt = rxq->l2_errors + rxq->pkt_len_errors; + cnt = rxq->err_stats.l2_errors + + rxq->err_stats.pkt_len_errors; rte_stats->q_errors[i] = cnt; rte_stats->q_ipackets[i] = stats->rcb_rx_ring_pktnum[i] - cnt; @@ -584,11 +592,11 @@ hns3_stats_reset(struct rte_eth_dev *eth_dev) * Clear soft stats of rx error packet which will be dropped * in driver. */ - for (i = 0; i < eth_dev->data->nb_rx_queues; ++i) { + for (i = 0; i < eth_dev->data->nb_rx_queues; i++) { rxq = eth_dev->data->rx_queues[i]; if (rxq) { - rxq->pkt_len_errors = 0; - rxq->l2_errors = 0; + rxq->err_stats.pkt_len_errors = 0; + rxq->err_stats.l2_errors = 0; } }
@@ -621,21 +629,24 @@ static int hns3_xstats_calc_num(struct rte_eth_dev *dev) { struct hns3_adapter *hns = dev->data->dev_private; - int bderr_stats = dev->data->nb_rx_queues * HNS3_NUM_RX_BD_ERROR_XSTATS; - int tx_err_stats = dev->data->nb_tx_queues * HNS3_NUM_TX_ERRORS_XSTATS; - int rx_queue_stats = dev->data->nb_rx_queues * HNS3_NUM_RX_QUEUE_STATS; - int tx_queue_stats = dev->data->nb_tx_queues * HNS3_NUM_TX_QUEUE_STATS; + uint16_t nb_rx_q = dev->data->nb_rx_queues; + uint16_t nb_tx_q = dev->data->nb_tx_queues; + int bderr_stats = nb_rx_q * HNS3_NUM_RX_BD_ERROR_XSTATS; + int rx_dfx_stats = nb_rx_q * HNS3_NUM_RXQ_DFX_XSTATS; + int tx_dfx_stats = nb_tx_q * HNS3_NUM_TXQ_DFX_XSTATS; + int rx_queue_stats = nb_rx_q * HNS3_NUM_RX_QUEUE_STATS; + int tx_queue_stats = nb_tx_q * HNS3_NUM_TX_QUEUE_STATS;
if (hns->is_vf) - return bderr_stats + tx_err_stats + rx_queue_stats + - tx_queue_stats + HNS3_NUM_RESET_XSTATS; + return bderr_stats + rx_dfx_stats + tx_dfx_stats + + rx_queue_stats + tx_queue_stats + HNS3_NUM_RESET_XSTATS; else - return bderr_stats + tx_err_stats + rx_queue_stats + - tx_queue_stats + HNS3_FIX_NUM_STATS; + return bderr_stats + rx_dfx_stats + tx_dfx_stats + + rx_queue_stats + tx_queue_stats + HNS3_FIX_NUM_STATS; }
static void -hns3_get_queue_stats(struct rte_eth_dev *dev, struct rte_eth_xstat *xstats, +hns3_queue_stats_get(struct rte_eth_dev *dev, struct rte_eth_xstat *xstats, int *count) { struct hns3_adapter *hns = dev->data->dev_private; @@ -683,6 +694,63 @@ hns3_error_int_stats_add(struct hns3_adapter *hns, const char *err) } }
+static void +hns3_rxq_dfx_stats_get(struct rte_eth_dev *dev, struct rte_eth_xstat *xstats, + int *count) +{ + struct hns3_rx_dfx_stats *dfx_stats; + struct hns3_rx_queue *rxq; + uint16_t i, j; + char *val; + + for (i = 0; i < dev->data->nb_rx_queues; i++) { + rxq = (struct hns3_rx_queue *)dev->data->rx_queues[i]; + if (rxq == NULL) + continue; + + dfx_stats = &rxq->dfx_stats; + for (j = 0; j < HNS3_NUM_RXQ_DFX_XSTATS; j++) { + val = (char *)dfx_stats + + hns3_rxq_dfx_stats_strings[j].offset; + xstats[*count].value = *(uint64_t *)val; + xstats[*count].id = *count; + (*count)++; + } + } +} + +static void +hns3_txq_dfx_stats_get(struct rte_eth_dev *dev, struct rte_eth_xstat *xstats, + int *count) +{ + struct hns3_tx_dfx_stats *dfx_stats; + struct hns3_tx_queue *txq; + uint16_t i, j; + char *val; + + for (i = 0; i < dev->data->nb_tx_queues; i++) { + txq = (struct hns3_tx_queue *)dev->data->tx_queues[i]; + if (txq == NULL) + continue; + + dfx_stats = &txq->dfx_stats; + for (j = 0; j < HNS3_NUM_TXQ_DFX_XSTATS; j++) { + val = (char *)dfx_stats + + hns3_txq_dfx_stats_strings[j].offset; + xstats[*count].value = *(uint64_t *)val; + xstats[*count].id = *count; + (*count)++; + } + } +} + +static void +hns3_tqp_dfx_stats_get(struct rte_eth_dev *dev, struct rte_eth_xstat *xstats, + int *count) +{ + hns3_rxq_dfx_stats_get(dev, xstats, count); + hns3_txq_dfx_stats_get(dev, xstats, count); +} /* * Retrieve extended(tqp | Mac) statistics of an Ethernet device. * @param dev @@ -705,8 +773,8 @@ hns3_dev_xstats_get(struct rte_eth_dev *dev, struct rte_eth_xstat *xstats, struct hns3_hw *hw = &hns->hw; struct hns3_mac_stats *mac_stats = &hw->mac_stats; struct hns3_reset_stats *reset_stats = &hw->reset.stats; + struct hns3_rx_bd_errors_stats *rx_err_stats; struct hns3_rx_queue *rxq; - struct hns3_tx_queue *txq; uint16_t i, j; char *addr; int count; @@ -758,26 +826,49 @@ hns3_dev_xstats_get(struct rte_eth_dev *dev, struct rte_eth_xstat *xstats, for (j = 0; j < dev->data->nb_rx_queues; j++) { for (i = 0; i < HNS3_NUM_RX_BD_ERROR_XSTATS; i++) { rxq = dev->data->rx_queues[j]; - addr = (char *)rxq + hns3_rx_bd_error_strings[i].offset; - xstats[count].value = *(uint64_t *)addr; - xstats[count].id = count; - count++; + if (rxq) { + rx_err_stats = &rxq->err_stats; + addr = (char *)rx_err_stats + + hns3_rx_bd_error_strings[i].offset; + xstats[count].value = *(uint64_t *)addr; + xstats[count].id = count; + count++; + } } }
- /* Get the Tx errors stats */ - for (j = 0; j < dev->data->nb_tx_queues; j++) { - for (i = 0; i < HNS3_NUM_TX_ERRORS_XSTATS; i++) { - txq = dev->data->tx_queues[j]; - addr = (char *)txq + hns3_tx_errors_strings[i].offset; - xstats[count].value = *(uint64_t *)addr; - xstats[count].id = count; - count++; + hns3_tqp_dfx_stats_get(dev, xstats, &count); + hns3_queue_stats_get(dev, xstats, &count); + + return count; +} + +static void +hns3_tqp_dfx_stats_name_get(struct rte_eth_dev *dev, + struct rte_eth_xstat_name *xstats_names, + uint32_t *count) +{ + uint16_t i, j; + + for (j = 0; j < dev->data->nb_rx_queues; j++) { + for (i = 0; i < HNS3_NUM_RXQ_DFX_XSTATS; i++) { + snprintf(xstats_names[*count].name, + sizeof(xstats_names[*count].name), + "rx_q%u_%s", j, + hns3_rxq_dfx_stats_strings[i].name); + (*count)++; } }
- hns3_get_queue_stats(dev, xstats, &count); - return count; + for (j = 0; j < dev->data->nb_tx_queues; j++) { + for (i = 0; i < HNS3_NUM_TXQ_DFX_XSTATS; i++) { + snprintf(xstats_names[*count].name, + sizeof(xstats_names[*count].name), + "tx_q%u_%s", j, + hns3_txq_dfx_stats_strings[i].name); + (*count)++; + } + } }
/* @@ -845,27 +936,19 @@ hns3_dev_xstats_get_names(struct rte_eth_dev *dev, for (i = 0; i < HNS3_NUM_RX_BD_ERROR_XSTATS; i++) { snprintf(xstats_names[count].name, sizeof(xstats_names[count].name), - "rx_q%u%s", j, + "rx_q%u_%s", j, hns3_rx_bd_error_strings[i].name); count++; } }
- for (j = 0; j < dev->data->nb_tx_queues; j++) { - for (i = 0; i < HNS3_NUM_TX_ERRORS_XSTATS; i++) { - snprintf(xstats_names[count].name, - sizeof(xstats_names[count].name), - "tx_q%u%s", j, - hns3_tx_errors_strings[i].name); - count++; - } - } + hns3_tqp_dfx_stats_name_get(dev, xstats_names, &count);
for (j = 0; j < dev->data->nb_rx_queues; j++) { for (i = 0; i < HNS3_NUM_RX_QUEUE_STATS; i++) { snprintf(xstats_names[count].name, sizeof(xstats_names[count].name), - "rx_q%u%s", j, hns3_rx_queue_strings[i].name); + "rx_q%u_%s", j, hns3_rx_queue_strings[i].name); count++; } } @@ -874,7 +957,7 @@ hns3_dev_xstats_get_names(struct rte_eth_dev *dev, for (i = 0; i < HNS3_NUM_TX_QUEUE_STATS; i++) { snprintf(xstats_names[count].name, sizeof(xstats_names[count].name), - "tx_q%u%s", j, hns3_tx_queue_strings[i].name); + "tx_q%u_%s", j, hns3_tx_queue_strings[i].name); count++; } } @@ -1043,30 +1126,22 @@ hns3_tqp_dfx_stats_clear(struct rte_eth_dev *dev) { struct hns3_rx_queue *rxq; struct hns3_tx_queue *txq; - int i; + uint16_t i;
/* Clear Rx dfx stats */ - for (i = 0; i < dev->data->nb_rx_queues; ++i) { + for (i = 0; i < dev->data->nb_rx_queues; i++) { rxq = dev->data->rx_queues[i]; - if (rxq) { - rxq->l3_csum_errors = 0; - rxq->l4_csum_errors = 0; - rxq->ol3_csum_errors = 0; - rxq->ol4_csum_errors = 0; - } + if (rxq) + memset(&rxq->dfx_stats, 0, + sizeof(struct hns3_rx_dfx_stats)); }
/* Clear Tx dfx stats */ - for (i = 0; i < dev->data->nb_tx_queues; ++i) { + for (i = 0; i < dev->data->nb_tx_queues; i++) { txq = dev->data->tx_queues[i]; - if (txq) { - txq->over_length_pkt_cnt = 0; - txq->exceed_limit_bd_pkt_cnt = 0; - txq->exceed_limit_bd_reassem_fail = 0; - txq->unsupported_tunnel_pkt_cnt = 0; - txq->queue_full_cnt = 0; - txq->pkt_padding_fail_cnt = 0; - } + if (txq) + memset(&txq->dfx_stats, 0, + sizeof(struct hns3_tx_dfx_stats)); } }
diff --git a/drivers/net/hns3/hns3_stats.h b/drivers/net/hns3/hns3_stats.h index 9fcd5f9..12842cd 100644 --- a/drivers/net/hns3/hns3_stats.h +++ b/drivers/net/hns3/hns3_stats.h @@ -127,10 +127,13 @@ struct hns3_reset_stats; (offsetof(struct hns3_reset_stats, f))
#define HNS3_RX_BD_ERROR_STATS_FIELD_OFFSET(f) \ - (offsetof(struct hns3_rx_queue, f)) + (offsetof(struct hns3_rx_bd_errors_stats, f))
-#define HNS3_TX_ERROR_STATS_FIELD_OFFSET(f) \ - (offsetof(struct hns3_tx_queue, f)) +#define HNS3_RXQ_DFX_STATS_FIELD_OFFSET(f) \ + (offsetof(struct hns3_rx_dfx_stats, f)) + +#define HNS3_TXQ_DFX_STATS_FIELD_OFFSET(f) \ + (offsetof(struct hns3_tx_dfx_stats, f))
int hns3_stats_get(struct rte_eth_dev *dev, struct rte_eth_stats *rte_stats); int hns3_dev_xstats_get(struct rte_eth_dev *dev, struct rte_eth_xstat *xstats,
From: Huisong Li lihuisong@huawei.com
One of the hot discussions in community recently was moving queue stats to xstats. In this solution, a temporary 'RTE_ETH_DEV_AUTOFILL_QUEUE_XSTATS' device flag is created to implement the smooth switch. And the first half of this work has been completed in the ethdev framework. Now driver needs to remove the flag from the driver initialization process and does the rest of work.
For better readability and reasonablity, per-queue stats also should be cleared when rte_eth_stats is cleared. Otherwise, the sum of one item in per-queue stats may be greater than corresponding item in rte_eth_stats.
Signed-off-by: Huisong Li lihuisong@huawei.com Signed-off-by: Lijun Ou oulijun@huawei.com --- drivers/net/hns3/hns3_ethdev.c | 2 - drivers/net/hns3/hns3_ethdev_vf.c | 2 - drivers/net/hns3/hns3_rxtx.c | 2 + drivers/net/hns3/hns3_rxtx.h | 13 ++ drivers/net/hns3/hns3_stats.c | 241 +++++++++++++++++++++++++++++++------- drivers/net/hns3/hns3_stats.h | 6 + 6 files changed, 221 insertions(+), 45 deletions(-)
diff --git a/drivers/net/hns3/hns3_ethdev.c b/drivers/net/hns3/hns3_ethdev.c index 449d967..7c51e83 100644 --- a/drivers/net/hns3/hns3_ethdev.c +++ b/drivers/net/hns3/hns3_ethdev.c @@ -6148,8 +6148,6 @@ hns3_dev_init(struct rte_eth_dev *eth_dev) return 0; }
- eth_dev->data->dev_flags |= RTE_ETH_DEV_AUTOFILL_QUEUE_XSTATS; - ret = hns3_mp_init_primary(); if (ret) { PMD_INIT_LOG(ERR, diff --git a/drivers/net/hns3/hns3_ethdev_vf.c b/drivers/net/hns3/hns3_ethdev_vf.c index bb4ec6b..37135d7 100644 --- a/drivers/net/hns3/hns3_ethdev_vf.c +++ b/drivers/net/hns3/hns3_ethdev_vf.c @@ -2746,8 +2746,6 @@ hns3vf_dev_init(struct rte_eth_dev *eth_dev) return 0; }
- eth_dev->data->dev_flags |= RTE_ETH_DEV_AUTOFILL_QUEUE_XSTATS; - ret = hns3_mp_init_primary(); if (ret) { PMD_INIT_LOG(ERR, diff --git a/drivers/net/hns3/hns3_rxtx.c b/drivers/net/hns3/hns3_rxtx.c index 3d5f74f..30f1e06 100644 --- a/drivers/net/hns3/hns3_rxtx.c +++ b/drivers/net/hns3/hns3_rxtx.c @@ -1792,6 +1792,7 @@ hns3_rx_queue_setup(struct rte_eth_dev *dev, uint16_t idx, uint16_t nb_desc, rxq->io_head_reg = (volatile void *)((char *)rxq->io_base + HNS3_RING_RX_HEAD_REG); rxq->rx_buf_len = rx_buf_size; + memset(&rxq->basic_stats, 0, sizeof(struct hns3_rx_basic_stats)); memset(&rxq->err_stats, 0, sizeof(struct hns3_rx_bd_errors_stats)); memset(&rxq->dfx_stats, 0, sizeof(struct hns3_rx_dfx_stats));
@@ -2618,6 +2619,7 @@ hns3_tx_queue_setup(struct rte_eth_dev *dev, uint16_t idx, uint16_t nb_desc, HNS3_RING_TX_TAIL_REG); txq->min_tx_pkt_len = hw->min_tx_pkt_len; txq->tso_mode = hw->tso_mode; + memset(&txq->basic_stats, 0, sizeof(struct hns3_tx_basic_stats)); memset(&txq->dfx_stats, 0, sizeof(struct hns3_tx_dfx_stats));
rte_spinlock_lock(&hw->lock); diff --git a/drivers/net/hns3/hns3_rxtx.h b/drivers/net/hns3/hns3_rxtx.h index 8a0c981..331b507 100644 --- a/drivers/net/hns3/hns3_rxtx.h +++ b/drivers/net/hns3/hns3_rxtx.h @@ -266,6 +266,12 @@ struct hns3_entry { struct rte_mbuf *mbuf; };
+struct hns3_rx_basic_stats { + uint64_t packets; + uint64_t bytes; + uint64_t errors; +}; + struct hns3_rx_dfx_stats { uint64_t l3_csum_errors; uint64_t l4_csum_errors; @@ -324,6 +330,7 @@ struct hns3_rx_queue { bool pvid_sw_discard_en; bool enabled; /* indicate if Rx queue has been enabled */
+ struct hns3_rx_basic_stats basic_stats; /* DFX statistics that driver does not need to discard packets */ struct hns3_rx_dfx_stats dfx_stats; /* Error statistics that driver needs to discard packets */ @@ -338,6 +345,11 @@ struct hns3_rx_queue { struct rte_mbuf fake_mbuf; /* fake mbuf used with vector rx */ };
+struct hns3_tx_basic_stats { + uint64_t packets; + uint64_t bytes; +}; + /* * The following items are used for the abnormal errors statistics in * the Tx datapath. When upper level application calls the @@ -472,6 +484,7 @@ struct hns3_tx_queue { bool pvid_sw_shift_en; bool enabled; /* indicate if Tx queue has been enabled */
+ struct hns3_tx_basic_stats basic_stats; struct hns3_tx_dfx_stats dfx_stats; };
diff --git a/drivers/net/hns3/hns3_stats.c b/drivers/net/hns3/hns3_stats.c index 419d7e2..3ba09e2 100644 --- a/drivers/net/hns3/hns3_stats.c +++ b/drivers/net/hns3/hns3_stats.c @@ -11,6 +11,24 @@ #include "hns3_logs.h" #include "hns3_regs.h"
+/* The statistics of the per-rxq basic stats */ +static const struct hns3_xstats_name_offset hns3_rxq_basic_stats_strings[] = { + {"packets", + HNS3_RXQ_BASIC_STATS_FIELD_OFFSET(packets)}, + {"bytes", + HNS3_RXQ_BASIC_STATS_FIELD_OFFSET(bytes)}, + {"errors", + HNS3_RXQ_BASIC_STATS_FIELD_OFFSET(errors)} +}; + +/* The statistics of the per-txq basic stats */ +static const struct hns3_xstats_name_offset hns3_txq_basic_stats_strings[] = { + {"packets", + HNS3_TXQ_BASIC_STATS_FIELD_OFFSET(packets)}, + {"bytes", + HNS3_TXQ_BASIC_STATS_FIELD_OFFSET(bytes)} +}; + /* MAC statistics */ static const struct hns3_xstats_name_offset hns3_mac_strings[] = { {"mac_tx_mac_pause_num", @@ -330,6 +348,12 @@ static const struct hns3_xstats_name_offset hns3_tx_queue_strings[] = { #define HNS3_NUM_TX_QUEUE_STATS (sizeof(hns3_tx_queue_strings) / \ sizeof(hns3_tx_queue_strings[0]))
+#define HNS3_NUM_RXQ_BASIC_STATS (sizeof(hns3_rxq_basic_stats_strings) / \ + sizeof(hns3_rxq_basic_stats_strings[0])) + +#define HNS3_NUM_TXQ_BASIC_STATS (sizeof(hns3_txq_basic_stats_strings) / \ + sizeof(hns3_txq_basic_stats_strings[0])) + #define HNS3_FIX_NUM_STATS (HNS3_NUM_MAC_STATS + HNS3_NUM_ERROR_INT_XSTATS + \ HNS3_NUM_RESET_XSTATS)
@@ -508,9 +532,7 @@ hns3_stats_get(struct rte_eth_dev *eth_dev, struct rte_eth_stats *rte_stats) struct hns3_hw *hw = &hns->hw; struct hns3_tqp_stats *stats = &hw->tqp_stats; struct hns3_rx_queue *rxq; - struct hns3_tx_queue *txq; uint64_t cnt; - uint64_t num; uint16_t i; int ret;
@@ -522,25 +544,14 @@ hns3_stats_get(struct rte_eth_dev *eth_dev, struct rte_eth_stats *rte_stats) }
/* Get the error stats of received packets */ - num = RTE_MIN(RTE_ETHDEV_QUEUE_STAT_CNTRS, eth_dev->data->nb_rx_queues); - for (i = 0; i != num; ++i) { + for (i = 0; i < eth_dev->data->nb_rx_queues; i++) { rxq = eth_dev->data->rx_queues[i]; if (rxq) { cnt = rxq->err_stats.l2_errors + rxq->err_stats.pkt_len_errors; - rte_stats->q_errors[i] = cnt; - rte_stats->q_ipackets[i] = - stats->rcb_rx_ring_pktnum[i] - cnt; rte_stats->ierrors += cnt; } } - /* Get the error stats of transmitted packets */ - num = RTE_MIN(RTE_ETHDEV_QUEUE_STAT_CNTRS, eth_dev->data->nb_tx_queues); - for (i = 0; i < num; i++) { - txq = eth_dev->data->tx_queues[i]; - if (txq) - rte_stats->q_opackets[i] = stats->rcb_tx_ring_pktnum[i]; - }
rte_stats->oerrors = 0; rte_stats->ipackets = stats->rcb_rx_ring_pktnum_rcd - @@ -600,6 +611,11 @@ hns3_stats_reset(struct rte_eth_dev *eth_dev) } }
+ /* + * 'packets' in hns3_tx_basic_stats and hns3_rx_basic_stats come + * from hw->tqp_stats. And clearing tqp stats is like clearing + * their source. + */ hns3_tqp_stats_clear(hw);
return 0; @@ -628,21 +644,26 @@ hns3_mac_stats_reset(__rte_unused struct rte_eth_dev *dev) static int hns3_xstats_calc_num(struct rte_eth_dev *dev) { +#define HNS3_PF_VF_RX_COMM_STATS_NUM (HNS3_NUM_RX_BD_ERROR_XSTATS + \ + HNS3_NUM_RXQ_DFX_XSTATS + \ + HNS3_NUM_RX_QUEUE_STATS + \ + HNS3_NUM_RXQ_BASIC_STATS) +#define HNS3_PF_VF_TX_COMM_STATS_NUM (HNS3_NUM_TXQ_DFX_XSTATS + \ + HNS3_NUM_TX_QUEUE_STATS + \ + HNS3_NUM_TXQ_BASIC_STATS) + struct hns3_adapter *hns = dev->data->dev_private; uint16_t nb_rx_q = dev->data->nb_rx_queues; uint16_t nb_tx_q = dev->data->nb_tx_queues; - int bderr_stats = nb_rx_q * HNS3_NUM_RX_BD_ERROR_XSTATS; - int rx_dfx_stats = nb_rx_q * HNS3_NUM_RXQ_DFX_XSTATS; - int tx_dfx_stats = nb_tx_q * HNS3_NUM_TXQ_DFX_XSTATS; - int rx_queue_stats = nb_rx_q * HNS3_NUM_RX_QUEUE_STATS; - int tx_queue_stats = nb_tx_q * HNS3_NUM_TX_QUEUE_STATS; + int rx_comm_stats_num = nb_rx_q * HNS3_PF_VF_RX_COMM_STATS_NUM; + int tx_comm_stats_num = nb_tx_q * HNS3_PF_VF_TX_COMM_STATS_NUM;
if (hns->is_vf) - return bderr_stats + rx_dfx_stats + tx_dfx_stats + - rx_queue_stats + tx_queue_stats + HNS3_NUM_RESET_XSTATS; + return rx_comm_stats_num + tx_comm_stats_num + + HNS3_NUM_RESET_XSTATS; else - return bderr_stats + rx_dfx_stats + tx_dfx_stats + - rx_queue_stats + tx_queue_stats + HNS3_FIX_NUM_STATS; + return rx_comm_stats_num + tx_comm_stats_num + + HNS3_FIX_NUM_STATS; }
static void @@ -751,6 +772,118 @@ hns3_tqp_dfx_stats_get(struct rte_eth_dev *dev, struct rte_eth_xstat *xstats, hns3_rxq_dfx_stats_get(dev, xstats, count); hns3_txq_dfx_stats_get(dev, xstats, count); } + +static void +hns3_rxq_basic_stats_get(struct rte_eth_dev *dev, struct rte_eth_xstat *xstats, + int *count) +{ + struct hns3_hw *hw = HNS3_DEV_PRIVATE_TO_HW(dev->data->dev_private); + struct hns3_tqp_stats *stats = &hw->tqp_stats; + struct hns3_rx_basic_stats *rxq_stats; + struct hns3_rx_queue *rxq; + uint16_t i, j; + char *val; + + for (i = 0; i < dev->data->nb_rx_queues; i++) { + rxq = dev->data->rx_queues[i]; + if (rxq == NULL) + continue; + + rxq_stats = &rxq->basic_stats; + rxq_stats->errors = rxq->err_stats.l2_errors + + rxq->err_stats.pkt_len_errors; + rxq_stats->packets = stats->rcb_rx_ring_pktnum[i] - + rxq_stats->errors; + rxq_stats->bytes = 0; + for (j = 0; j < HNS3_NUM_RXQ_BASIC_STATS; j++) { + val = (char *)rxq_stats + + hns3_rxq_basic_stats_strings[j].offset; + xstats[*count].value = *(uint64_t *)val; + xstats[*count].id = *count; + (*count)++; + } + } +} + +static void +hns3_txq_basic_stats_get(struct rte_eth_dev *dev, struct rte_eth_xstat *xstats, + int *count) +{ + struct hns3_hw *hw = HNS3_DEV_PRIVATE_TO_HW(dev->data->dev_private); + struct hns3_tqp_stats *stats = &hw->tqp_stats; + struct hns3_tx_basic_stats *txq_stats; + struct hns3_tx_queue *txq; + uint16_t i, j; + char *val; + + for (i = 0; i < dev->data->nb_tx_queues; i++) { + txq = dev->data->tx_queues[i]; + if (txq == NULL) + continue; + + txq_stats = &txq->basic_stats; + txq_stats->packets = stats->rcb_tx_ring_pktnum[i]; + txq_stats->bytes = 0; + for (j = 0; j < HNS3_NUM_TXQ_BASIC_STATS; j++) { + val = (char *)txq_stats + + hns3_txq_basic_stats_strings[j].offset; + xstats[*count].value = *(uint64_t *)val; + xstats[*count].id = *count; + (*count)++; + } + } +} + +static int +hns3_tqp_basic_stats_get(struct rte_eth_dev *dev, struct rte_eth_xstat *xstats, + int *count) +{ + struct hns3_hw *hw = HNS3_DEV_PRIVATE_TO_HW(dev->data->dev_private); + int ret; + + /* Update tqp stats by read register */ + ret = hns3_update_tqp_stats(hw); + if (ret) { + hns3_err(hw, "Update tqp stats fail, ret = %d.", ret); + return ret; + } + + hns3_rxq_basic_stats_get(dev, xstats, count); + hns3_txq_basic_stats_get(dev, xstats, count); + + return 0; +} + +/* + * The function is only called by hns3_dev_xstats_reset to clear + * basic stats of per-queue. TQP stats are all cleared in hns3_stats_reset + * which is called before this function. + * + * @param dev + * Pointer to Ethernet device. + */ +static void +hns3_tqp_basic_stats_clear(struct rte_eth_dev *dev) +{ + struct hns3_tx_queue *txq; + struct hns3_rx_queue *rxq; + uint16_t i; + + for (i = 0; i < dev->data->nb_rx_queues; i++) { + rxq = dev->data->rx_queues[i]; + if (rxq) + memset(&rxq->basic_stats, 0, + sizeof(struct hns3_rx_basic_stats)); + } + + for (i = 0; i < dev->data->nb_tx_queues; i++) { + txq = dev->data->tx_queues[i]; + if (txq) + memset(&txq->basic_stats, 0, + sizeof(struct hns3_tx_basic_stats)); + } +} + /* * Retrieve extended(tqp | Mac) statistics of an Ethernet device. * @param dev @@ -789,6 +922,10 @@ hns3_dev_xstats_get(struct rte_eth_dev *dev, struct rte_eth_xstat *xstats,
count = 0;
+ ret = hns3_tqp_basic_stats_get(dev, xstats, &count); + if (ret < 0) + return ret; + if (!hns->is_vf) { /* Update Mac stats */ ret = hns3_query_update_mac_stats(dev); @@ -844,28 +981,55 @@ hns3_dev_xstats_get(struct rte_eth_dev *dev, struct rte_eth_xstat *xstats, }
static void +hns3_tqp_basic_stats_name_get(struct rte_eth_dev *dev, + struct rte_eth_xstat_name *xstats_names, + uint32_t *count) +{ + uint16_t i, j; + + for (i = 0; i < dev->data->nb_rx_queues; i++) { + for (j = 0; j < HNS3_NUM_RXQ_BASIC_STATS; j++) { + snprintf(xstats_names[*count].name, + sizeof(xstats_names[*count].name), + "rx_q%u_%s", i, + hns3_rxq_basic_stats_strings[j].name); + (*count)++; + } + } + for (i = 0; i < dev->data->nb_tx_queues; i++) { + for (j = 0; j < HNS3_NUM_TXQ_BASIC_STATS; j++) { + snprintf(xstats_names[*count].name, + sizeof(xstats_names[*count].name), + "tx_q%u_%s", i, + hns3_txq_basic_stats_strings[j].name); + (*count)++; + } + } +} + +static void hns3_tqp_dfx_stats_name_get(struct rte_eth_dev *dev, struct rte_eth_xstat_name *xstats_names, uint32_t *count) { uint16_t i, j;
- for (j = 0; j < dev->data->nb_rx_queues; j++) { - for (i = 0; i < HNS3_NUM_RXQ_DFX_XSTATS; i++) { + for (i = 0; i < dev->data->nb_rx_queues; i++) { + for (j = 0; j < HNS3_NUM_RXQ_DFX_XSTATS; j++) { snprintf(xstats_names[*count].name, sizeof(xstats_names[*count].name), - "rx_q%u_%s", j, - hns3_rxq_dfx_stats_strings[i].name); + "rx_q%u_%s", i, + hns3_rxq_dfx_stats_strings[j].name); (*count)++; } }
- for (j = 0; j < dev->data->nb_tx_queues; j++) { - for (i = 0; i < HNS3_NUM_TXQ_DFX_XSTATS; i++) { + for (i = 0; i < dev->data->nb_tx_queues; i++) { + for (j = 0; j < HNS3_NUM_TXQ_DFX_XSTATS; j++) { snprintf(xstats_names[*count].name, sizeof(xstats_names[*count].name), - "tx_q%u_%s", j, - hns3_txq_dfx_stats_strings[i].name); + "tx_q%u_%s", i, + hns3_txq_dfx_stats_strings[j].name); (*count)++; } } @@ -908,6 +1072,8 @@ hns3_dev_xstats_get_names(struct rte_eth_dev *dev, if (xstats_names == NULL) return cnt_stats;
+ hns3_tqp_basic_stats_name_get(dev, xstats_names, &count); + /* Note: size limited checked in rte_eth_xstats_get_names() */ if (!hns->is_vf) { /* Get MAC name from hw->hw_xstats.mac_stats struct */ @@ -999,7 +1165,6 @@ hns3_dev_xstats_get_by_id(struct rte_eth_dev *dev, const uint64_t *ids, uint32_t count_value; uint64_t len; uint32_t i; - int ret;
if (ids == NULL && values == NULL) return cnt_stats; @@ -1008,13 +1173,6 @@ hns3_dev_xstats_get_by_id(struct rte_eth_dev *dev, const uint64_t *ids, if (size < cnt_stats) return cnt_stats;
- /* Update tqp stats by read register */ - ret = hns3_update_tqp_stats(hw); - if (ret) { - hns3_err(hw, "Update tqp stats fail : %d", ret); - return ret; - } - len = cnt_stats * sizeof(struct rte_eth_xstat); values_copy = rte_zmalloc("hns3_xstats_values", len, 0); if (values_copy == NULL) { @@ -1157,11 +1315,12 @@ hns3_dev_xstats_reset(struct rte_eth_dev *dev) if (ret) return ret;
+ hns3_tqp_basic_stats_clear(dev); + hns3_tqp_dfx_stats_clear(dev); + /* Clear reset stats */ memset(&hns->hw.reset.stats, 0, sizeof(struct hns3_reset_stats));
- hns3_tqp_dfx_stats_clear(dev); - if (hns->is_vf) return 0;
diff --git a/drivers/net/hns3/hns3_stats.h b/drivers/net/hns3/hns3_stats.h index 12842cd..d213be5 100644 --- a/drivers/net/hns3/hns3_stats.h +++ b/drivers/net/hns3/hns3_stats.h @@ -135,6 +135,12 @@ struct hns3_reset_stats; #define HNS3_TXQ_DFX_STATS_FIELD_OFFSET(f) \ (offsetof(struct hns3_tx_dfx_stats, f))
+#define HNS3_RXQ_BASIC_STATS_FIELD_OFFSET(f) \ + (offsetof(struct hns3_rx_basic_stats, f)) + +#define HNS3_TXQ_BASIC_STATS_FIELD_OFFSET(f) \ + (offsetof(struct hns3_tx_basic_stats, f)) + int hns3_stats_get(struct rte_eth_dev *dev, struct rte_eth_stats *rte_stats); int hns3_dev_xstats_get(struct rte_eth_dev *dev, struct rte_eth_xstat *xstats, unsigned int n);
Heres uses errno array instead of switch-case for refactor the hns3_cmd_convert_err_code function. Besides, we add a type for ROH(RDMA Over HCCS) check cmdq return error in Kunpeng930 NIC hardware.
Signed-off-by: Lijun Ou oulijun@huawei.com --- drivers/net/hns3/hns3_cmd.c | 54 ++++++++++++++++++++++----------------------- drivers/net/hns3/hns3_cmd.h | 1 + 2 files changed, 27 insertions(+), 28 deletions(-)
diff --git a/drivers/net/hns3/hns3_cmd.c b/drivers/net/hns3/hns3_cmd.c index f58f4f7..4c301cb 100644 --- a/drivers/net/hns3/hns3_cmd.c +++ b/drivers/net/hns3/hns3_cmd.c @@ -247,34 +247,32 @@ hns3_is_special_opcode(uint16_t opcode) static int hns3_cmd_convert_err_code(uint16_t desc_ret) { - switch (desc_ret) { - case HNS3_CMD_EXEC_SUCCESS: - return 0; - case HNS3_CMD_NO_AUTH: - return -EPERM; - case HNS3_CMD_NOT_SUPPORTED: - return -EOPNOTSUPP; - case HNS3_CMD_QUEUE_FULL: - return -EXFULL; - case HNS3_CMD_NEXT_ERR: - return -ENOSR; - case HNS3_CMD_UNEXE_ERR: - return -ENOTBLK; - case HNS3_CMD_PARA_ERR: - return -EINVAL; - case HNS3_CMD_RESULT_ERR: - return -ERANGE; - case HNS3_CMD_TIMEOUT: - return -ETIME; - case HNS3_CMD_HILINK_ERR: - return -ENOLINK; - case HNS3_CMD_QUEUE_ILLEGAL: - return -ENXIO; - case HNS3_CMD_INVALID: - return -EBADR; - default: - return -EREMOTEIO; - } + static const struct { + uint16_t imp_errcode; + int linux_errcode; + } hns3_cmdq_status[] = { + {HNS3_CMD_EXEC_SUCCESS, 0}, + {HNS3_CMD_NO_AUTH, -EPERM}, + {HNS3_CMD_NOT_SUPPORTED, -EOPNOTSUPP}, + {HNS3_CMD_QUEUE_FULL, -EXFULL}, + {HNS3_CMD_NEXT_ERR, -ENOSR}, + {HNS3_CMD_UNEXE_ERR, -ENOTBLK}, + {HNS3_CMD_PARA_ERR, -EINVAL}, + {HNS3_CMD_RESULT_ERR, -ERANGE}, + {HNS3_CMD_TIMEOUT, -ETIME}, + {HNS3_CMD_HILINK_ERR, -ENOLINK}, + {HNS3_CMD_QUEUE_ILLEGAL, -ENXIO}, + {HNS3_CMD_INVALID, -EBADR}, + {HNS3_CMD_ROH_CHECK_FAIL, -EINVAL} + }; + + uint32_t i; + + for (i = 0; i < ARRAY_SIZE(hns3_cmdq_status); i++) + if (hns3_cmdq_status[i].imp_errcode == desc_ret) + return hns3_cmdq_status[i].linux_errcode; + + return -EREMOTEIO; }
static int diff --git a/drivers/net/hns3/hns3_cmd.h b/drivers/net/hns3/hns3_cmd.h index e40293b..6152f6e 100644 --- a/drivers/net/hns3/hns3_cmd.h +++ b/drivers/net/hns3/hns3_cmd.h @@ -52,6 +52,7 @@ enum hns3_cmd_return_status { HNS3_CMD_HILINK_ERR = 9, HNS3_CMD_QUEUE_ILLEGAL = 10, HNS3_CMD_INVALID = 11, + HNS3_CMD_ROH_CHECK_FAIL = 12 };
enum hns3_cmd_status {
Here moves some judgement conditions to a separated function for parsing IPv4 hdr and TCP hdr in hns3_parse_normal function. Also, move the check of the selected input tuple of RSS to a separated functions named hns3_rss_input_tuple_supported in order to enhance scalability and complexity.
Signed-off-by: Lijun Ou oulijun@huawei.com --- drivers/net/hns3/hns3_flow.c | 69 ++++++++++++++++++++++++++++++-------------- 1 file changed, 48 insertions(+), 21 deletions(-)
diff --git a/drivers/net/hns3/hns3_flow.c b/drivers/net/hns3/hns3_flow.c index f303df4..889fa2f 100644 --- a/drivers/net/hns3/hns3_flow.c +++ b/drivers/net/hns3/hns3_flow.c @@ -525,6 +525,17 @@ hns3_parse_vlan(const struct rte_flow_item *item, struct hns3_fdir_rule *rule, return 0; }
+static bool +hns3_check_ipv4_mask_supported(const struct rte_flow_item_ipv4 *ipv4_mask) +{ + if (ipv4_mask->hdr.total_length || ipv4_mask->hdr.packet_id || + ipv4_mask->hdr.fragment_offset || ipv4_mask->hdr.time_to_live || + ipv4_mask->hdr.hdr_checksum) + return false; + + return true; +} + static int hns3_parse_ipv4(const struct rte_flow_item *item, struct hns3_fdir_rule *rule, struct rte_flow_error *error) @@ -546,11 +557,7 @@ hns3_parse_ipv4(const struct rte_flow_item *item, struct hns3_fdir_rule *rule,
if (item->mask) { ipv4_mask = item->mask; - if (ipv4_mask->hdr.total_length || - ipv4_mask->hdr.packet_id || - ipv4_mask->hdr.fragment_offset || - ipv4_mask->hdr.time_to_live || - ipv4_mask->hdr.hdr_checksum) { + if (!hns3_check_ipv4_mask_supported(ipv4_mask)) { return rte_flow_error_set(error, EINVAL, RTE_FLOW_ERROR_TYPE_ITEM_MASK, item, @@ -648,6 +655,18 @@ hns3_parse_ipv6(const struct rte_flow_item *item, struct hns3_fdir_rule *rule, return 0; }
+static bool +hns3_check_tcp_mask_supported(const struct rte_flow_item_tcp *tcp_mask) +{ + if (tcp_mask->hdr.sent_seq || tcp_mask->hdr.recv_ack || + tcp_mask->hdr.data_off || tcp_mask->hdr.tcp_flags || + tcp_mask->hdr.rx_win || tcp_mask->hdr.cksum || + tcp_mask->hdr.tcp_urp) + return false; + + return true; +} + static int hns3_parse_tcp(const struct rte_flow_item *item, struct hns3_fdir_rule *rule, struct rte_flow_error *error) @@ -670,10 +689,7 @@ hns3_parse_tcp(const struct rte_flow_item *item, struct hns3_fdir_rule *rule,
if (item->mask) { tcp_mask = item->mask; - if (tcp_mask->hdr.sent_seq || tcp_mask->hdr.recv_ack || - tcp_mask->hdr.data_off || tcp_mask->hdr.tcp_flags || - tcp_mask->hdr.rx_win || tcp_mask->hdr.cksum || - tcp_mask->hdr.tcp_urp) { + if (!hns3_check_tcp_mask_supported(tcp_mask)) { return rte_flow_error_set(error, EINVAL, RTE_FLOW_ERROR_TYPE_ITEM_MASK, item, @@ -1328,6 +1344,28 @@ hns3_rss_conf_copy(struct hns3_rss_conf *out, return 0; }
+static bool +hns3_rss_input_tuple_supported(struct hns3_hw *hw, + const struct rte_flow_action_rss *rss) +{ + /* + * For IP packet, it is not supported to use src/dst port fields to RSS + * hash for the following packet types. + * - IPV4 FRAG | IPV4 NONFRAG | IPV6 FRAG | IPV6 NONFRAG + * Besides, for Kunpeng920, the NIC HW is not supported to use src/dst + * port fields to RSS hash for IPV6 SCTP packet type. However, the + * Kunpeng930 and future kunpeng series support to use src/dst port + * fields to RSS hash for IPv6 SCTP packet type. + */ + if (rss->types & (ETH_RSS_L4_DST_ONLY | ETH_RSS_L4_SRC_ONLY) && + (rss->types & ETH_RSS_IP || + (!hw->rss_info.ipv6_sctp_offload_supported && + rss->types & ETH_RSS_NONFRAG_IPV6_SCTP))) + return false; + + return true; +} + /* * This function is used to parse rss action validatation. */ @@ -1386,18 +1424,7 @@ hns3_parse_rss_filter(struct rte_eth_dev *dev, RTE_FLOW_ERROR_TYPE_ACTION_CONF, act, "RSS hash key must be exactly 40 bytes");
- /* - * For Kunpeng920 and Kunpeng930 NIC hardware, it is not supported to - * use dst port/src port fields to RSS hash for the following packet - * types. - * - IPV4 FRAG | IPV4 NONFRAG | IPV6 FRAG | IPV6 NONFRAG - * Besides, for Kunpeng920, The NIC hardware is not supported to use - * src/dst port fields to RSS hash for IPV6 SCTP packet type. - */ - if (rss->types & (ETH_RSS_L4_DST_ONLY | ETH_RSS_L4_SRC_ONLY) && - (rss->types & ETH_RSS_IP || - (!hw->rss_info.ipv6_sctp_offload_supported && - rss->types & ETH_RSS_NONFRAG_IPV6_SCTP))) + if (!hns3_rss_input_tuple_supported(hw, rss)) return rte_flow_error_set(error, EINVAL, RTE_FLOW_ERROR_TYPE_ACTION_CONF, &rss->types,
From: Chengchang Tang tangchengchang@huawei.com
This patch reconstruct the Rx interrupt map to reduce the cyclic complexity and improve readability and maintainability.
Signed-off-by: Chengchang Tang tangchengchang@huawei.com Signed-off-by: Lijun Ou oulijun@huawei.com --- drivers/net/hns3/hns3_ethdev.c | 59 +++++++++++++++++++-------------------- drivers/net/hns3/hns3_ethdev_vf.c | 55 ++++++++++++++++++------------------ 2 files changed, 56 insertions(+), 58 deletions(-)
diff --git a/drivers/net/hns3/hns3_ethdev.c b/drivers/net/hns3/hns3_ethdev.c index 7c51e83..f3ce639 100644 --- a/drivers/net/hns3/hns3_ethdev.c +++ b/drivers/net/hns3/hns3_ethdev.c @@ -4782,27 +4782,28 @@ hns3_map_rx_interrupt(struct rte_eth_dev *dev) uint16_t q_id; int ret;
- if (dev->data->dev_conf.intr_conf.rxq == 0) + /* + * hns3 needs a separate interrupt to be used as event interrupt which + * could not be shared with task queue pair, so KERNEL drivers need + * support multiple interrupt vectors. + */ + if (dev->data->dev_conf.intr_conf.rxq == 0 || + !rte_intr_cap_multiple(intr_handle)) return 0;
- /* disable uio/vfio intr/eventfd mapping */ rte_intr_disable(intr_handle); + intr_vector = hw->used_rx_queues; + /* creates event fd for each intr vector when MSIX is used */ + if (rte_intr_efd_enable(intr_handle, intr_vector)) + return -EINVAL;
- /* check and configure queue intr-vector mapping */ - if (rte_intr_cap_multiple(intr_handle) || - !RTE_ETH_DEV_SRIOV(dev).active) { - intr_vector = hw->used_rx_queues; - /* creates event fd for each intr vector when MSIX is used */ - if (rte_intr_efd_enable(intr_handle, intr_vector)) - return -EINVAL; - } - if (rte_intr_dp_is_en(intr_handle) && !intr_handle->intr_vec) { + if (intr_handle->intr_vec == NULL) { intr_handle->intr_vec = rte_zmalloc("intr_vec", hw->used_rx_queues * sizeof(int), 0); if (intr_handle->intr_vec == NULL) { - hns3_err(hw, "Failed to allocate %u rx_queues" - " intr_vec", hw->used_rx_queues); + hns3_err(hw, "failed to allocate %u rx_queues intr_vec", + hw->used_rx_queues); ret = -ENOMEM; goto alloc_intr_vec_error; } @@ -4812,28 +4813,26 @@ hns3_map_rx_interrupt(struct rte_eth_dev *dev) vec = RTE_INTR_VEC_RXTX_OFFSET; base = RTE_INTR_VEC_RXTX_OFFSET; } - if (rte_intr_dp_is_en(intr_handle)) { - for (q_id = 0; q_id < hw->used_rx_queues; q_id++) { - ret = hns3_bind_ring_with_vector(hw, vec, true, - HNS3_RING_TYPE_RX, - q_id); - if (ret) - goto bind_vector_error; - intr_handle->intr_vec[q_id] = vec; - if (vec < base + intr_handle->nb_efd - 1) - vec++; - } + + for (q_id = 0; q_id < hw->used_rx_queues; q_id++) { + ret = hns3_bind_ring_with_vector(hw, vec, true, + HNS3_RING_TYPE_RX, q_id); + if (ret) + goto bind_vector_error; + intr_handle->intr_vec[q_id] = vec; + /* + * If there are not enough efds (e.g. not enough interrupt), + * remaining queues will be bond to the last interrupt. + */ + if (vec < base + intr_handle->nb_efd - 1) + vec++; } rte_intr_enable(intr_handle); return 0;
bind_vector_error: - rte_intr_efd_disable(intr_handle); - if (intr_handle->intr_vec) { - free(intr_handle->intr_vec); - intr_handle->intr_vec = NULL; - } - return ret; + rte_free(intr_handle->intr_vec); + intr_handle->intr_vec = NULL; alloc_intr_vec_error: rte_intr_efd_disable(intr_handle); return ret; diff --git a/drivers/net/hns3/hns3_ethdev_vf.c b/drivers/net/hns3/hns3_ethdev_vf.c index 37135d7..3a1d4cb 100644 --- a/drivers/net/hns3/hns3_ethdev_vf.c +++ b/drivers/net/hns3/hns3_ethdev_vf.c @@ -2085,21 +2085,22 @@ hns3vf_map_rx_interrupt(struct rte_eth_dev *dev) uint16_t q_id; int ret;
- if (dev->data->dev_conf.intr_conf.rxq == 0) + /* + * hns3 needs a separate interrupt to be used as event interrupt which + * could not be shared with task queue pair, so KERNEL drivers need + * support multiple interrupt vectors. + */ + if (dev->data->dev_conf.intr_conf.rxq == 0 || + !rte_intr_cap_multiple(intr_handle)) return 0;
- /* disable uio/vfio intr/eventfd mapping */ rte_intr_disable(intr_handle); + intr_vector = hw->used_rx_queues; + /* It creates event fd for each intr vector when MSIX is used */ + if (rte_intr_efd_enable(intr_handle, intr_vector)) + return -EINVAL;
- /* check and configure queue intr-vector mapping */ - if (rte_intr_cap_multiple(intr_handle) || - !RTE_ETH_DEV_SRIOV(dev).active) { - intr_vector = hw->used_rx_queues; - /* It creates event fd for each intr vector when MSIX is used */ - if (rte_intr_efd_enable(intr_handle, intr_vector)) - return -EINVAL; - } - if (rte_intr_dp_is_en(intr_handle) && !intr_handle->intr_vec) { + if (intr_handle->intr_vec == NULL) { intr_handle->intr_vec = rte_zmalloc("intr_vec", hw->used_rx_queues * sizeof(int), 0); @@ -2115,28 +2116,26 @@ hns3vf_map_rx_interrupt(struct rte_eth_dev *dev) vec = RTE_INTR_VEC_RXTX_OFFSET; base = RTE_INTR_VEC_RXTX_OFFSET; } - if (rte_intr_dp_is_en(intr_handle)) { - for (q_id = 0; q_id < hw->used_rx_queues; q_id++) { - ret = hns3vf_bind_ring_with_vector(hw, vec, true, - HNS3_RING_TYPE_RX, - q_id); - if (ret) - goto vf_bind_vector_error; - intr_handle->intr_vec[q_id] = vec; - if (vec < base + intr_handle->nb_efd - 1) - vec++; - } + + for (q_id = 0; q_id < hw->used_rx_queues; q_id++) { + ret = hns3vf_bind_ring_with_vector(hw, vec, true, + HNS3_RING_TYPE_RX, q_id); + if (ret) + goto vf_bind_vector_error; + intr_handle->intr_vec[q_id] = vec; + /* + * If there are not enough efds (e.g. not enough interrupt), + * remaining queues will be bond to the last interrupt. + */ + if (vec < base + intr_handle->nb_efd - 1) + vec++; } rte_intr_enable(intr_handle); return 0;
vf_bind_vector_error: - rte_intr_efd_disable(intr_handle); - if (intr_handle->intr_vec) { - free(intr_handle->intr_vec); - intr_handle->intr_vec = NULL; - } - return ret; + free(intr_handle->intr_vec); + intr_handle->intr_vec = NULL; vf_alloc_intr_vec_error: rte_intr_efd_disable(intr_handle); return ret;
When parse flow director with all types, it needs to judge the spec of item and mask of item for all packet types. The judgement is the same for all types. Therefore, we move it into the conccentrated location.
Signed-off-by: Lijun Ou oulijun@huawei.com --- drivers/net/hns3/hns3_flow.c | 84 +++++++++++--------------------------------- 1 file changed, 20 insertions(+), 64 deletions(-)
diff --git a/drivers/net/hns3/hns3_flow.c b/drivers/net/hns3/hns3_flow.c index 889fa2f..9b161f4 100644 --- a/drivers/net/hns3/hns3_flow.c +++ b/drivers/net/hns3/hns3_flow.c @@ -433,17 +433,12 @@ hns3_check_attr(const struct rte_flow_attr *attr, struct rte_flow_error *error) }
static int -hns3_parse_eth(const struct rte_flow_item *item, - struct hns3_fdir_rule *rule, struct rte_flow_error *error) +hns3_parse_eth(const struct rte_flow_item *item, struct hns3_fdir_rule *rule, + struct rte_flow_error *error __rte_unused) { const struct rte_flow_item_eth *eth_spec; const struct rte_flow_item_eth *eth_mask;
- if (item->spec == NULL && item->mask) - return rte_flow_error_set(error, EINVAL, - RTE_FLOW_ERROR_TYPE_ITEM, item, - "Can't configure FDIR with mask but without spec"); - /* Only used to describe the protocol stack. */ if (item->spec == NULL && item->mask == NULL) return 0; @@ -483,11 +478,6 @@ hns3_parse_vlan(const struct rte_flow_item *item, struct hns3_fdir_rule *rule, const struct rte_flow_item_vlan *vlan_spec; const struct rte_flow_item_vlan *vlan_mask;
- if (item->spec == NULL && item->mask) - return rte_flow_error_set(error, EINVAL, - RTE_FLOW_ERROR_TYPE_ITEM, item, - "Can't configure FDIR with mask but without spec"); - rule->key_conf.vlan_num++; if (rule->key_conf.vlan_num > VLAN_TAG_NUM_MAX) return rte_flow_error_set(error, EINVAL, @@ -543,14 +533,10 @@ hns3_parse_ipv4(const struct rte_flow_item *item, struct hns3_fdir_rule *rule, const struct rte_flow_item_ipv4 *ipv4_spec; const struct rte_flow_item_ipv4 *ipv4_mask;
- if (item->spec == NULL && item->mask) - return rte_flow_error_set(error, EINVAL, - RTE_FLOW_ERROR_TYPE_ITEM, item, - "Can't configure FDIR with mask but without spec"); - hns3_set_bit(rule->input_set, INNER_ETH_TYPE, 1); rule->key_conf.spec.ether_type = RTE_ETHER_TYPE_IPV4; rule->key_conf.mask.ether_type = ETHER_TYPE_MASK; + /* Only used to describe the protocol stack. */ if (item->spec == NULL && item->mask == NULL) return 0; @@ -606,11 +592,6 @@ hns3_parse_ipv6(const struct rte_flow_item *item, struct hns3_fdir_rule *rule, const struct rte_flow_item_ipv6 *ipv6_spec; const struct rte_flow_item_ipv6 *ipv6_mask;
- if (item->spec == NULL && item->mask) - return rte_flow_error_set(error, EINVAL, - RTE_FLOW_ERROR_TYPE_ITEM, item, - "Can't configure FDIR with mask but without spec"); - hns3_set_bit(rule->input_set, INNER_ETH_TYPE, 1); rule->key_conf.spec.ether_type = RTE_ETHER_TYPE_IPV6; rule->key_conf.mask.ether_type = ETHER_TYPE_MASK; @@ -674,11 +655,6 @@ hns3_parse_tcp(const struct rte_flow_item *item, struct hns3_fdir_rule *rule, const struct rte_flow_item_tcp *tcp_spec; const struct rte_flow_item_tcp *tcp_mask;
- if (item->spec == NULL && item->mask) - return rte_flow_error_set(error, EINVAL, - RTE_FLOW_ERROR_TYPE_ITEM, item, - "Can't configure FDIR with mask but without spec"); - hns3_set_bit(rule->input_set, INNER_IP_PROTO, 1); rule->key_conf.spec.ip_proto = IPPROTO_TCP; rule->key_conf.mask.ip_proto = IPPROTO_MASK; @@ -722,11 +698,6 @@ hns3_parse_udp(const struct rte_flow_item *item, struct hns3_fdir_rule *rule, const struct rte_flow_item_udp *udp_spec; const struct rte_flow_item_udp *udp_mask;
- if (item->spec == NULL && item->mask) - return rte_flow_error_set(error, EINVAL, - RTE_FLOW_ERROR_TYPE_ITEM, item, - "Can't configure FDIR with mask but without spec"); - hns3_set_bit(rule->input_set, INNER_IP_PROTO, 1); rule->key_conf.spec.ip_proto = IPPROTO_UDP; rule->key_conf.mask.ip_proto = IPPROTO_MASK; @@ -768,11 +739,6 @@ hns3_parse_sctp(const struct rte_flow_item *item, struct hns3_fdir_rule *rule, const struct rte_flow_item_sctp *sctp_spec; const struct rte_flow_item_sctp *sctp_mask;
- if (item->spec == NULL && item->mask) - return rte_flow_error_set(error, EINVAL, - RTE_FLOW_ERROR_TYPE_ITEM, item, - "Can't configure FDIR with mask but without spec"); - hns3_set_bit(rule->input_set, INNER_IP_PROTO, 1); rule->key_conf.spec.ip_proto = IPPROTO_SCTP; rule->key_conf.mask.ip_proto = IPPROTO_MASK; @@ -904,15 +870,6 @@ hns3_parse_vxlan(const struct rte_flow_item *item, struct hns3_fdir_rule *rule, const struct rte_flow_item_vxlan *vxlan_spec; const struct rte_flow_item_vxlan *vxlan_mask;
- if (item->spec == NULL && item->mask) - return rte_flow_error_set(error, EINVAL, - RTE_FLOW_ERROR_TYPE_ITEM, item, - "Can't configure FDIR with mask but without spec"); - else if (item->spec && (item->mask == NULL)) - return rte_flow_error_set(error, EINVAL, - RTE_FLOW_ERROR_TYPE_ITEM, item, - "Tunnel packets must configure with mask"); - hns3_set_bit(rule->input_set, OUTER_DST_PORT, 1); rule->key_conf.mask.tunnel_type = TUNNEL_TYPE_MASK; if (item->type == RTE_FLOW_ITEM_TYPE_VXLAN) @@ -955,15 +912,6 @@ hns3_parse_nvgre(const struct rte_flow_item *item, struct hns3_fdir_rule *rule, const struct rte_flow_item_nvgre *nvgre_spec; const struct rte_flow_item_nvgre *nvgre_mask;
- if (item->spec == NULL && item->mask) - return rte_flow_error_set(error, EINVAL, - RTE_FLOW_ERROR_TYPE_ITEM, item, - "Can't configure FDIR with mask but without spec"); - else if (item->spec && (item->mask == NULL)) - return rte_flow_error_set(error, EINVAL, - RTE_FLOW_ERROR_TYPE_ITEM, item, - "Tunnel packets must configure with mask"); - hns3_set_bit(rule->input_set, OUTER_IP_PROTO, 1); rule->key_conf.spec.outer_proto = IPPROTO_GRE; rule->key_conf.mask.outer_proto = IPPROTO_MASK; @@ -1013,15 +961,6 @@ hns3_parse_geneve(const struct rte_flow_item *item, struct hns3_fdir_rule *rule, const struct rte_flow_item_geneve *geneve_spec; const struct rte_flow_item_geneve *geneve_mask;
- if (item->spec == NULL && item->mask) - return rte_flow_error_set(error, EINVAL, - RTE_FLOW_ERROR_TYPE_ITEM, item, - "Can't configure FDIR with mask but without spec"); - else if (item->spec && (item->mask == NULL)) - return rte_flow_error_set(error, EINVAL, - RTE_FLOW_ERROR_TYPE_ITEM, item, - "Tunnel packets must configure with mask"); - hns3_set_bit(rule->input_set, OUTER_DST_PORT, 1); rule->key_conf.spec.tunnel_type = HNS3_TUNNEL_TYPE_GENEVE; rule->key_conf.mask.tunnel_type = TUNNEL_TYPE_MASK; @@ -1058,6 +997,17 @@ hns3_parse_tunnel(const struct rte_flow_item *item, struct hns3_fdir_rule *rule, { int ret;
+ if (item->spec == NULL && item->mask) + return rte_flow_error_set(error, EINVAL, + RTE_FLOW_ERROR_TYPE_ITEM, item, + "Can't configure FDIR with mask " + "but without spec"); + else if (item->spec && (item->mask == NULL)) + return rte_flow_error_set(error, EINVAL, + RTE_FLOW_ERROR_TYPE_ITEM, item, + "Tunnel packets must configure " + "with mask"); + switch (item->type) { case RTE_FLOW_ITEM_TYPE_VXLAN: case RTE_FLOW_ITEM_TYPE_VXLAN_GPE: @@ -1086,6 +1036,12 @@ hns3_parse_normal(const struct rte_flow_item *item, struct hns3_fdir_rule *rule, { int ret;
+ if (item->spec == NULL && item->mask) + return rte_flow_error_set(error, EINVAL, + RTE_FLOW_ERROR_TYPE_ITEM, item, + "Can't configure FDIR with mask " + "but without spec"); + switch (item->type) { case RTE_FLOW_ITEM_TYPE_ETH: ret = hns3_parse_eth(item, rule, error);
Here encapsulate the process code of the imp reset report and global reset report into function in order to reduce the complexity of the hns3_check_event_cause function.
Signed-off-by: Lijun Ou oulijun@huawei.com --- drivers/net/hns3/hns3_ethdev.c | 69 +++++++++++++++++++++++++++--------------- 1 file changed, 45 insertions(+), 24 deletions(-)
diff --git a/drivers/net/hns3/hns3_ethdev.c b/drivers/net/hns3/hns3_ethdev.c index f3ce639..817d1dc 100644 --- a/drivers/net/hns3/hns3_ethdev.c +++ b/drivers/net/hns3/hns3_ethdev.c @@ -123,6 +123,47 @@ hns3_pf_enable_irq0(struct hns3_hw *hw) }
static enum hns3_evt_cause +hns3_proc_imp_reset_event(struct hns3_adapter *hns, bool is_delay, + uint32_t *vec_val) +{ + struct hns3_hw *hw = &hns->hw; + + rte_atomic16_set(&hw->reset.disable_cmd, 1); + hns3_atomic_set_bit(HNS3_IMP_RESET, &hw->reset.pending); + *vec_val = BIT(HNS3_VECTOR0_IMPRESET_INT_B); + if (!is_delay) { + hw->reset.stats.imp_cnt++; + hns3_warn(hw, "IMP reset detected, clear reset status"); + } else { + hns3_schedule_delayed_reset(hns); + hns3_warn(hw, "IMP reset detected, don't clear reset status"); + } + + return HNS3_VECTOR0_EVENT_RST; +} + +static enum hns3_evt_cause +hns3_proc_global_reset_event(struct hns3_adapter *hns, bool is_delay, + uint32_t *vec_val) +{ + struct hns3_hw *hw = &hns->hw; + + rte_atomic16_set(&hw->reset.disable_cmd, 1); + hns3_atomic_set_bit(HNS3_GLOBAL_RESET, &hw->reset.pending); + *vec_val = BIT(HNS3_VECTOR0_GLOBALRESET_INT_B); + if (!is_delay) { + hw->reset.stats.global_cnt++; + hns3_warn(hw, "Global reset detected, clear reset status"); + } else { + hns3_schedule_delayed_reset(hns); + hns3_warn(hw, + "Global reset detected, don't clear reset status"); + } + + return HNS3_VECTOR0_EVENT_RST; +} + +static enum hns3_evt_cause hns3_check_event_cause(struct hns3_adapter *hns, uint32_t *clearval) { struct hns3_hw *hw = &hns->hw; @@ -131,12 +172,14 @@ hns3_check_event_cause(struct hns3_adapter *hns, uint32_t *clearval) uint32_t hw_err_src_reg; uint32_t val; enum hns3_evt_cause ret; + bool is_delay;
/* fetch the events from their corresponding regs */ vector0_int_stats = hns3_read_dev(hw, HNS3_VECTOR0_OTHER_INT_STS_REG); cmdq_src_val = hns3_read_dev(hw, HNS3_VECTOR0_CMDQ_SRC_REG); hw_err_src_reg = hns3_read_dev(hw, HNS3_RAS_PF_OTHER_INT_STS_REG);
+ is_delay = clearval == NULL ? true : false; /* * Assumption: If by any chance reset and mailbox events are reported * together then we will only process reset event and defer the @@ -145,35 +188,13 @@ hns3_check_event_cause(struct hns3_adapter *hns, uint32_t *clearval) * from H/W just for the mailbox. */ if (BIT(HNS3_VECTOR0_IMPRESET_INT_B) & vector0_int_stats) { /* IMP */ - rte_atomic16_set(&hw->reset.disable_cmd, 1); - hns3_atomic_set_bit(HNS3_IMP_RESET, &hw->reset.pending); - val = BIT(HNS3_VECTOR0_IMPRESET_INT_B); - if (clearval) { - hw->reset.stats.imp_cnt++; - hns3_warn(hw, "IMP reset detected, clear reset status"); - } else { - hns3_schedule_delayed_reset(hns); - hns3_warn(hw, "IMP reset detected, don't clear reset status"); - } - - ret = HNS3_VECTOR0_EVENT_RST; + ret = hns3_proc_imp_reset_event(hns, is_delay, &val); goto out; }
/* Global reset */ if (BIT(HNS3_VECTOR0_GLOBALRESET_INT_B) & vector0_int_stats) { - rte_atomic16_set(&hw->reset.disable_cmd, 1); - hns3_atomic_set_bit(HNS3_GLOBAL_RESET, &hw->reset.pending); - val = BIT(HNS3_VECTOR0_GLOBALRESET_INT_B); - if (clearval) { - hw->reset.stats.global_cnt++; - hns3_warn(hw, "Global reset detected, clear reset status"); - } else { - hns3_schedule_delayed_reset(hns); - hns3_warn(hw, "Global reset detected, don't clear reset status"); - } - - ret = HNS3_VECTOR0_EVENT_RST; + ret = hns3_proc_global_reset_event(hns, is_delay, &val); goto out; }
the secondary process is applied a memory for the process_private during initialization. Therefore, the memory needs to be released when exiting.
Fixes: c203571b3602 ("net/hns3: register and add log interface") Cc: stable@dpdk.org
Signed-off-by: Lijun Ou oulijun@huawei.com --- drivers/net/hns3/hns3_ethdev.c | 7 +++++-- drivers/net/hns3/hns3_ethdev_vf.c | 12 +++++++++--- 2 files changed, 14 insertions(+), 5 deletions(-)
diff --git a/drivers/net/hns3/hns3_ethdev.c b/drivers/net/hns3/hns3_ethdev.c index 817d1dc..2a5689c 100644 --- a/drivers/net/hns3/hns3_ethdev.c +++ b/drivers/net/hns3/hns3_ethdev.c @@ -6263,8 +6263,11 @@ hns3_dev_uninit(struct rte_eth_dev *eth_dev)
PMD_INIT_FUNC_TRACE();
- if (rte_eal_process_type() != RTE_PROC_PRIMARY) - return -EPERM; + if (rte_eal_process_type() != RTE_PROC_PRIMARY) { + rte_free(eth_dev->process_private); + eth_dev->process_private = NULL; + return 0; + }
if (hw->adapter_state < HNS3_NIC_CLOSING) hns3_dev_close(eth_dev); diff --git a/drivers/net/hns3/hns3_ethdev_vf.c b/drivers/net/hns3/hns3_ethdev_vf.c index 3a1d4cb..948d914 100644 --- a/drivers/net/hns3/hns3_ethdev_vf.c +++ b/drivers/net/hns3/hns3_ethdev_vf.c @@ -1971,8 +1971,11 @@ hns3vf_dev_close(struct rte_eth_dev *eth_dev) struct hns3_hw *hw = &hns->hw; int ret = 0;
- if (rte_eal_process_type() != RTE_PROC_PRIMARY) + if (rte_eal_process_type() != RTE_PROC_PRIMARY) { + rte_free(eth_dev->process_private); + eth_dev->process_private = NULL; return 0; + }
if (hw->adapter_state == HNS3_NIC_STARTED) ret = hns3vf_dev_stop(eth_dev); @@ -2839,8 +2842,11 @@ hns3vf_dev_uninit(struct rte_eth_dev *eth_dev)
PMD_INIT_FUNC_TRACE();
- if (rte_eal_process_type() != RTE_PROC_PRIMARY) - return -EPERM; + if (rte_eal_process_type() != RTE_PROC_PRIMARY) { + rte_free(eth_dev->process_private); + eth_dev->process_private = NULL; + return 0; + }
if (hw->adapter_state < HNS3_NIC_CLOSING) hns3vf_dev_close(eth_dev);
From: Chengchang Tang tangchengchang@huawei.com
For Kunpeng930, the NIC engine support 1280 tqps being taken over by a PF. In this case, a maximum of 1281 interrupt resources are also supported in this PF. To support the maximum number of queues, several patches are made. But the interrupt related modification are missing. So, in RX interrupt mode, a large number of queues will be aggregted into one interrupt due to insufficient interrupts. It will lead to waste of interrupt resources and reduces usability.
To utilize all these interrupt resources, related IMP command has been extended. And, the I/O address of the extended interrupt resources are different from the existing ones. So, a function used for calculating the address offset has been added.
Fixes: 76d794566d43 ("net/hns3: maximize queue number") Fixes: 27911a6e62e5 ("net/hns3: add Rx interrupts compatibility") Cc: stable@dpdk.org
Signed-off-by: Chengchang Tang tangchengchang@huawei.com --- drivers/net/hns3/hns3_cmd.h | 8 ++++++-- drivers/net/hns3/hns3_ethdev.c | 17 +++++++++-------- drivers/net/hns3/hns3_regs.c | 2 +- drivers/net/hns3/hns3_regs.h | 24 +++++++++++++++--------- drivers/net/hns3/hns3_rxtx.c | 28 +++++++++++++++++++++++----- drivers/net/hns3/hns3_rxtx.h | 1 + 6 files changed, 55 insertions(+), 25 deletions(-)
diff --git a/drivers/net/hns3/hns3_cmd.h b/drivers/net/hns3/hns3_cmd.h index 6152f6e..dc97a1a 100644 --- a/drivers/net/hns3/hns3_cmd.h +++ b/drivers/net/hns3/hns3_cmd.h @@ -776,12 +776,16 @@ enum hns3_int_gl_idx { #define HNS3_TQP_ID_M GENMASK(12, 2) #define HNS3_INT_GL_IDX_S 13 #define HNS3_INT_GL_IDX_M GENMASK(14, 13) +#define HNS3_TQP_INT_ID_L_S 0 +#define HNS3_TQP_INT_ID_L_M GENMASK(7, 0) +#define HNS3_TQP_INT_ID_H_S 8 +#define HNS3_TQP_INT_ID_H_M GENMASK(15, 8) struct hns3_ctrl_vector_chain_cmd { - uint8_t int_vector_id; + uint8_t int_vector_id; /* the low order of the interrupt id */ uint8_t int_cause_num; uint16_t tqp_type_and_id[HNS3_VECTOR_ELEMENTS_PER_CMD]; uint8_t vfid; - uint8_t rsv; + uint8_t int_vector_id_h; /* the high order of the interrupt id */ };
struct hns3_config_max_frm_size_cmd { diff --git a/drivers/net/hns3/hns3_ethdev.c b/drivers/net/hns3/hns3_ethdev.c index 2a5689c..4356860 100644 --- a/drivers/net/hns3/hns3_ethdev.c +++ b/drivers/net/hns3/hns3_ethdev.c @@ -2232,7 +2232,7 @@ hns3_check_dcb_cfg(struct rte_eth_dev *dev) }
static int -hns3_bind_ring_with_vector(struct hns3_hw *hw, uint8_t vector_id, bool mmap, +hns3_bind_ring_with_vector(struct hns3_hw *hw, uint16_t vector_id, bool en, enum hns3_ring_type queue_type, uint16_t queue_id) { struct hns3_cmd_desc desc; @@ -2241,13 +2241,15 @@ hns3_bind_ring_with_vector(struct hns3_hw *hw, uint8_t vector_id, bool mmap, enum hns3_cmd_status status; enum hns3_opcode_type op; uint16_t tqp_type_and_id = 0; - const char *op_str; uint16_t type; uint16_t gl;
- op = mmap ? HNS3_OPC_ADD_RING_TO_VECTOR : HNS3_OPC_DEL_RING_TO_VECTOR; + op = en ? HNS3_OPC_ADD_RING_TO_VECTOR : HNS3_OPC_DEL_RING_TO_VECTOR; hns3_cmd_setup_basic_desc(&desc, op, false); - req->int_vector_id = vector_id; + req->int_vector_id = hns3_get_field(vector_id, HNS3_TQP_INT_ID_L_M, + HNS3_TQP_INT_ID_L_S); + req->int_vector_id_h = hns3_get_field(vector_id, HNS3_TQP_INT_ID_H_M, + HNS3_TQP_INT_ID_H_S);
if (queue_type == HNS3_RING_TYPE_RX) gl = HNS3_RING_GL_RX; @@ -2263,11 +2265,10 @@ hns3_bind_ring_with_vector(struct hns3_hw *hw, uint8_t vector_id, bool mmap, gl); req->tqp_type_and_id[0] = rte_cpu_to_le_16(tqp_type_and_id); req->int_cause_num = 1; - op_str = mmap ? "Map" : "Unmap"; status = hns3_cmd_send(hw, &desc, 1); if (status) { hns3_err(hw, "%s TQP %u fail, vector_id is %u, status is %d.", - op_str, queue_id, req->int_vector_id, status); + en ? "Map" : "Unmap", queue_id, vector_id, status); return status; }
@@ -4797,8 +4798,8 @@ hns3_map_rx_interrupt(struct rte_eth_dev *dev) struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev); struct rte_intr_handle *intr_handle = &pci_dev->intr_handle; struct hns3_hw *hw = HNS3_DEV_PRIVATE_TO_HW(dev->data->dev_private); - uint8_t base = RTE_INTR_VEC_ZERO_OFFSET; - uint8_t vec = RTE_INTR_VEC_ZERO_OFFSET; + uint16_t base = RTE_INTR_VEC_ZERO_OFFSET; + uint16_t vec = RTE_INTR_VEC_ZERO_OFFSET; uint32_t intr_vector; uint16_t q_id; int ret; diff --git a/drivers/net/hns3/hns3_regs.c b/drivers/net/hns3/hns3_regs.c index f2cb465..8afe132 100644 --- a/drivers/net/hns3/hns3_regs.c +++ b/drivers/net/hns3/hns3_regs.c @@ -301,7 +301,7 @@ hns3_direct_access_regs(struct hns3_hw *hw, uint32_t *data)
reg_num = sizeof(tqp_intr_reg_addrs) / sizeof(uint32_t); for (j = 0; j < hw->intr_tqps_num; j++) { - reg_offset = HNS3_TQP_INTR_REG_SIZE * j; + reg_offset = hns3_get_tqp_intr_reg_offset(j); for (i = 0; i < reg_num; i++) *data++ = hns3_read_dev(hw, tqp_intr_reg_addrs[i] + reg_offset); diff --git a/drivers/net/hns3/hns3_regs.h b/drivers/net/hns3/hns3_regs.h index 81a0af5..39fc5d1 100644 --- a/drivers/net/hns3/hns3_regs.h +++ b/drivers/net/hns3/hns3_regs.h @@ -95,15 +95,21 @@ #define HNS3_MIN_EXTEND_QUEUE_ID 1024
/* bar registers for tqp interrupt */ -#define HNS3_TQP_INTR_CTRL_REG 0x20000 -#define HNS3_TQP_INTR_GL0_REG 0x20100 -#define HNS3_TQP_INTR_GL1_REG 0x20200 -#define HNS3_TQP_INTR_GL2_REG 0x20300 -#define HNS3_TQP_INTR_RL_REG 0x20900 -#define HNS3_TQP_INTR_TX_QL_REG 0x20e00 -#define HNS3_TQP_INTR_RX_QL_REG 0x20f00 - -#define HNS3_TQP_INTR_REG_SIZE 4 +#define HNS3_TQP_INTR_REG_BASE 0x20000 +#define HNS3_TQP_INTR_EXT_REG_BASE 0x30000 +#define HNS3_TQP_INTR_CTRL_REG 0 +#define HNS3_TQP_INTR_GL0_REG 0x100 +#define HNS3_TQP_INTR_GL1_REG 0x200 +#define HNS3_TQP_INTR_GL2_REG 0x300 +#define HNS3_TQP_INTR_RL_REG 0x900 +#define HNS3_TQP_INTR_TX_QL_REG 0xe00 +#define HNS3_TQP_INTR_RX_QL_REG 0xf00 +#define HNS3_TQP_INTR_RL_EN_B 6 + +#define HNS3_MIN_EXT_TQP_INTR_ID 64 +#define HNS3_TQP_INTR_LOW_ORDER_OFFSET 0x4 +#define HNS3_TQP_INTR_HIGH_ORDER_OFFSET 0x1000 + #define HNS3_TQP_INTR_GL_MAX 0x1FE0 #define HNS3_TQP_INTR_GL_DEFAULT 20 #define HNS3_TQP_INTR_GL_UNIT_1US BIT(31) diff --git a/drivers/net/hns3/hns3_rxtx.c b/drivers/net/hns3/hns3_rxtx.c index 30f1e06..1991b4e 100644 --- a/drivers/net/hns3/hns3_rxtx.c +++ b/drivers/net/hns3/hns3_rxtx.c @@ -834,6 +834,24 @@ hns3_reset_queue(struct hns3_hw *hw, uint16_t queue_id, return ret; }
+uint32_t +hns3_get_tqp_intr_reg_offset(uint16_t tqp_intr_id) +{ + uint32_t reg_offset; + + /* Need an extend offset to config queues > 64 */ + if (tqp_intr_id < HNS3_MIN_EXT_TQP_INTR_ID) + reg_offset = HNS3_TQP_INTR_REG_BASE + + tqp_intr_id * HNS3_TQP_INTR_LOW_ORDER_OFFSET; + else + reg_offset = HNS3_TQP_INTR_EXT_REG_BASE + + tqp_intr_id / HNS3_MIN_EXT_TQP_INTR_ID * + HNS3_TQP_INTR_HIGH_ORDER_OFFSET + + tqp_intr_id % HNS3_MIN_EXT_TQP_INTR_ID * + HNS3_TQP_INTR_LOW_ORDER_OFFSET; + + return reg_offset; +}
void hns3_set_queue_intr_gl(struct hns3_hw *hw, uint16_t queue_id, @@ -847,7 +865,7 @@ hns3_set_queue_intr_gl(struct hns3_hw *hw, uint16_t queue_id, if (gl_idx >= RTE_DIM(offset) || gl_value > HNS3_TQP_INTR_GL_MAX) return;
- addr = offset[gl_idx] + queue_id * HNS3_TQP_INTR_REG_SIZE; + addr = offset[gl_idx] + hns3_get_tqp_intr_reg_offset(queue_id); if (hw->intr.gl_unit == HNS3_INTR_COALESCE_GL_UINT_1US) value = gl_value | HNS3_TQP_INTR_GL_UNIT_1US; else @@ -864,7 +882,7 @@ hns3_set_queue_intr_rl(struct hns3_hw *hw, uint16_t queue_id, uint16_t rl_value) if (rl_value > HNS3_TQP_INTR_RL_MAX) return;
- addr = HNS3_TQP_INTR_RL_REG + queue_id * HNS3_TQP_INTR_REG_SIZE; + addr = HNS3_TQP_INTR_RL_REG + hns3_get_tqp_intr_reg_offset(queue_id); value = HNS3_RL_USEC_TO_REG(rl_value); if (value > 0) value |= HNS3_TQP_INTR_RL_ENABLE_MASK; @@ -885,10 +903,10 @@ hns3_set_queue_intr_ql(struct hns3_hw *hw, uint16_t queue_id, uint16_t ql_value) if (hw->intr.int_ql_max == HNS3_INTR_QL_NONE) return;
- addr = HNS3_TQP_INTR_TX_QL_REG + queue_id * HNS3_TQP_INTR_REG_SIZE; + addr = HNS3_TQP_INTR_TX_QL_REG + hns3_get_tqp_intr_reg_offset(queue_id); hns3_write_dev(hw, addr, ql_value);
- addr = HNS3_TQP_INTR_RX_QL_REG + queue_id * HNS3_TQP_INTR_REG_SIZE; + addr = HNS3_TQP_INTR_RX_QL_REG + hns3_get_tqp_intr_reg_offset(queue_id); hns3_write_dev(hw, addr, ql_value); }
@@ -897,7 +915,7 @@ hns3_queue_intr_enable(struct hns3_hw *hw, uint16_t queue_id, bool en) { uint32_t addr, value;
- addr = HNS3_TQP_INTR_CTRL_REG + queue_id * HNS3_TQP_INTR_REG_SIZE; + addr = HNS3_TQP_INTR_CTRL_REG + hns3_get_tqp_intr_reg_offset(queue_id); value = en ? 1 : 0;
hns3_write_dev(hw, addr, value); diff --git a/drivers/net/hns3/hns3_rxtx.h b/drivers/net/hns3/hns3_rxtx.h index 331b507..8f5ae5c 100644 --- a/drivers/net/hns3/hns3_rxtx.h +++ b/drivers/net/hns3/hns3_rxtx.h @@ -680,6 +680,7 @@ int hns3_tx_burst_mode_get(struct rte_eth_dev *dev, const uint32_t *hns3_dev_supported_ptypes_get(struct rte_eth_dev *dev); void hns3_init_rx_ptype_tble(struct rte_eth_dev *dev); void hns3_set_rxtx_function(struct rte_eth_dev *eth_dev); +uint32_t hns3_get_tqp_intr_reg_offset(uint16_t tqp_intr_id); void hns3_set_queue_intr_gl(struct hns3_hw *hw, uint16_t queue_id, uint8_t gl_idx, uint16_t gl_value); void hns3_set_queue_intr_rl(struct hns3_hw *hw, uint16_t queue_id,
Rename some function about RSS implement functions in order to make the functions naming style more reasonable and consistency.
Signed-off-by: Lijun Ou oulijun@huawei.com --- drivers/net/hns3/hns3_ethdev.c | 2 +- drivers/net/hns3/hns3_ethdev_vf.c | 2 +- drivers/net/hns3/hns3_flow.c | 2 +- drivers/net/hns3/hns3_rss.c | 12 ++++++------ drivers/net/hns3/hns3_rss.h | 4 ++-- 5 files changed, 11 insertions(+), 11 deletions(-)
diff --git a/drivers/net/hns3/hns3_ethdev.c b/drivers/net/hns3/hns3_ethdev.c index 4356860..94b6e44 100644 --- a/drivers/net/hns3/hns3_ethdev.c +++ b/drivers/net/hns3/hns3_ethdev.c @@ -4685,7 +4685,7 @@ hns3_init_pf(struct rte_eth_dev *eth_dev) goto err_fdir; }
- hns3_set_default_rss_args(hw); + hns3_rss_set_default_args(hw);
ret = hns3_enable_hw_error_intr(hns, true); if (ret) { diff --git a/drivers/net/hns3/hns3_ethdev_vf.c b/drivers/net/hns3/hns3_ethdev_vf.c index 948d914..7eb0b11 100644 --- a/drivers/net/hns3/hns3_ethdev_vf.c +++ b/drivers/net/hns3/hns3_ethdev_vf.c @@ -1832,7 +1832,7 @@ hns3vf_init_vf(struct rte_eth_dev *eth_dev) if (ret) goto err_set_tc_queue;
- hns3_set_default_rss_args(hw); + hns3_rss_set_default_args(hw);
return 0;
diff --git a/drivers/net/hns3/hns3_flow.c b/drivers/net/hns3/hns3_flow.c index 9b161f4..8a5179d 100644 --- a/drivers/net/hns3/hns3_flow.c +++ b/drivers/net/hns3/hns3_flow.c @@ -1469,7 +1469,7 @@ hns3_hw_rss_hash_set(struct hns3_hw *hw, struct rte_flow_action_rss *rss_config) if (ret) return ret;
- ret = hns3_set_rss_algo_key(hw, rss_config->key); + ret = hns3_rss_set_algo_key(hw, rss_config->key); if (ret) return ret;
diff --git a/drivers/net/hns3/hns3_rss.c b/drivers/net/hns3/hns3_rss.c index b5df374..7d1a297 100644 --- a/drivers/net/hns3/hns3_rss.c +++ b/drivers/net/hns3/hns3_rss.c @@ -193,7 +193,7 @@ static const struct { * Used to set algorithm, key_offset and hash key of rss. */ int -hns3_set_rss_algo_key(struct hns3_hw *hw, const uint8_t *key) +hns3_rss_set_algo_key(struct hns3_hw *hw, const uint8_t *key) { #define HNS3_KEY_OFFSET_MAX 3 #define HNS3_SET_HASH_KEY_BYTE_FOUR 2 @@ -245,7 +245,7 @@ hns3_set_rss_algo_key(struct hns3_hw *hw, const uint8_t *key) * Used to configure the tuple selection for RSS hash input. */ static int -hns3_set_rss_input_tuple(struct hns3_hw *hw) +hns3_rss_set_input_tuple(struct hns3_hw *hw) { struct hns3_rss_conf *rss_config = &hw->rss_info; struct hns3_rss_input_tuple_cmd *req; @@ -443,7 +443,7 @@ hns3_dev_rss_hash_update(struct rte_eth_dev *dev, ret = -EINVAL; goto conf_err; } - ret = hns3_set_rss_algo_key(hw, key); + ret = hns3_rss_set_algo_key(hw, key); if (ret) goto conf_err; } @@ -649,7 +649,7 @@ hns3_rss_tuple_uninit(struct hns3_hw *hw) * Set the default rss configuration in the init of driver. */ void -hns3_set_default_rss_args(struct hns3_hw *hw) +hns3_rss_set_default_args(struct hns3_hw *hw) { struct hns3_rss_conf *rss_cfg = &hw->rss_info; uint16_t queue_num = hw->alloc_rss_size; @@ -696,12 +696,12 @@ hns3_config_rss(struct hns3_adapter *hns) hns3_rss_uninit(hns);
/* Configure RSS hash algorithm and hash key offset */ - ret = hns3_set_rss_algo_key(hw, hash_key); + ret = hns3_rss_set_algo_key(hw, hash_key); if (ret) return ret;
/* Configure the tuple selection for RSS hash input */ - ret = hns3_set_rss_input_tuple(hw); + ret = hns3_rss_set_input_tuple(hw); if (ret) return ret;
diff --git a/drivers/net/hns3/hns3_rss.h b/drivers/net/hns3/hns3_rss.h index 6d1d25f..05d5c26 100644 --- a/drivers/net/hns3/hns3_rss.h +++ b/drivers/net/hns3/hns3_rss.h @@ -102,7 +102,7 @@ int hns3_dev_rss_reta_update(struct rte_eth_dev *dev, int hns3_dev_rss_reta_query(struct rte_eth_dev *dev, struct rte_eth_rss_reta_entry64 *reta_conf, uint16_t reta_size); -void hns3_set_default_rss_args(struct hns3_hw *hw); +void hns3_rss_set_default_args(struct hns3_hw *hw); int hns3_set_rss_indir_table(struct hns3_hw *hw, uint16_t *indir, uint16_t size); int hns3_rss_reset_indir_table(struct hns3_hw *hw); @@ -111,7 +111,7 @@ void hns3_rss_uninit(struct hns3_adapter *hns); int hns3_set_rss_tuple_by_rss_hf(struct hns3_hw *hw, struct hns3_rss_tuple_cfg *tuple, uint64_t rss_hf); -int hns3_set_rss_algo_key(struct hns3_hw *hw, const uint8_t *key); +int hns3_rss_set_algo_key(struct hns3_hw *hw, const uint8_t *key); int hns3_restore_rss_filter(struct rte_eth_dev *dev);
#endif /* _HNS3_RSS_H_ */
Fixe some error comments and remove some meaningless comments.
Signed-off-by: Lijun Ou oulijun@huawei.com --- drivers/net/hns3/hns3_flow.c | 19 ++++++++----------- 1 file changed, 8 insertions(+), 11 deletions(-)
diff --git a/drivers/net/hns3/hns3_flow.c b/drivers/net/hns3/hns3_flow.c index 8a5179d..f2bff1e 100644 --- a/drivers/net/hns3/hns3_flow.c +++ b/drivers/net/hns3/hns3_flow.c @@ -91,9 +91,9 @@ net_addr_to_host(uint32_t *dst, const rte_be32_t *src, size_t len) /* * This function is used to find rss general action. * 1. As we know RSS is used to spread packets among several queues, the flow - * API provide the struct rte_flow_action_rss, user could config it's field + * API provide the struct rte_flow_action_rss, user could config its field * sush as: func/level/types/key/queue to control RSS function. - * 2. The flow API also support queue region configuration for hns3. It was + * 2. The flow API also supports queue region configuration for hns3. It was * implemented by FDIR + RSS in hns3 hardware, user can create one FDIR rule * which action is RSS queues region. * 3. When action is RSS, we use the following rule to distinguish: @@ -128,11 +128,11 @@ hns3_find_rss_general_action(const struct rte_flow_item pattern[], rss = act->conf; if (have_eth && rss->conf.queue_num) { /* - * Patter have ETH and action's queue_num > 0, indicate this is + * Pattern have ETH and action's queue_num > 0, indicate this is * queue region configuration. * Because queue region is implemented by FDIR + RSS in hns3 - * hardware, it need enter FDIR process, so here return NULL to - * avoid enter RSS process. + * hardware, it needs to enter FDIR process, so here return NULL + * to avoid enter RSS process. */ return NULL; } @@ -405,7 +405,6 @@ hns3_handle_actions(struct rte_eth_dev *dev, return 0; }
-/* Parse to get the attr and action info of flow director rule. */ static int hns3_check_attr(const struct rte_flow_attr *attr, struct rte_flow_error *error) { @@ -782,7 +781,7 @@ hns3_parse_sctp(const struct rte_flow_item *item, struct hns3_fdir_rule *rule, }
/* - * Check items before tunnel, save inner configs to outer configs,and clear + * Check items before tunnel, save inner configs to outer configs, and clear * inner configs. * The key consists of two parts: meta_data and tuple keys. * Meta data uses 15 bits, including vlan_num(2bit), des_port(12bit) and tunnel @@ -1473,10 +1472,8 @@ hns3_hw_rss_hash_set(struct hns3_hw *hw, struct rte_flow_action_rss *rss_config) if (ret) return ret;
- /* Update algorithm of hw */ hw->rss_info.conf.func = rss_config->func;
- /* Set flow type supported */ tuple = &hw->rss_info.rss_tuple_sets; ret = hns3_set_rss_tuple_by_rss_hf(hw, tuple, rss_config->types); if (ret) @@ -1561,7 +1558,7 @@ hns3_config_rss_filter(struct rte_eth_dev *dev, if (rss_flow_conf.queue_num) { /* * Due the content of queue pointer have been reset to - * 0, the rss_info->conf.queue should be set NULL + * 0, the rss_info->conf.queue should be set to NULL */ rss_info->conf.queue = NULL; rss_info->conf.queue_num = 0; @@ -1727,7 +1724,7 @@ hns3_flow_validate(struct rte_eth_dev *dev, const struct rte_flow_attr *attr, /* * Create or destroy a flow rule. * Theorically one rule can match more than one filters. - * We will let it use the filter which it hitt first. + * We will let it use the filter which it hit first. * So, the sequence matters. */ static struct rte_flow *
Remove unnecessary parentheses as well as keep a reasonable blank line.
Signed-off-by: Lijun Ou oulijun@huawei.com --- drivers/net/hns3/hns3_flow.c | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-)
diff --git a/drivers/net/hns3/hns3_flow.c b/drivers/net/hns3/hns3_flow.c index f2bff1e..e9d0a0b 100644 --- a/drivers/net/hns3/hns3_flow.c +++ b/drivers/net/hns3/hns3_flow.c @@ -700,6 +700,7 @@ hns3_parse_udp(const struct rte_flow_item *item, struct hns3_fdir_rule *rule, hns3_set_bit(rule->input_set, INNER_IP_PROTO, 1); rule->key_conf.spec.ip_proto = IPPROTO_UDP; rule->key_conf.mask.ip_proto = IPPROTO_MASK; + /* Only used to describe the protocol stack. */ if (item->spec == NULL && item->mask == NULL) return 0; @@ -1264,7 +1265,7 @@ hns3_action_rss_same(const struct rte_flow_action_rss *comp, if (comp->func == RTE_ETH_HASH_FUNCTION_MAX) func_is_same = false; else - func_is_same = (with->func ? (comp->func == with->func) : true); + func_is_same = with->func ? (comp->func == with->func) : true;
return (func_is_same && comp->types == (with->types & HNS3_ETH_RSS_SUPPORT) && @@ -1861,6 +1862,7 @@ hns3_flow_destroy(struct rte_eth_dev *dev, struct rte_flow *flow, return rte_flow_error_set(error, EINVAL, RTE_FLOW_ERROR_TYPE_HANDLE, flow, "Flow is NULL"); + filter_type = flow->filter_type; switch (filter_type) { case RTE_ETH_FILTER_FDIR:
Here uses %d as printing output for enumeration member.
Signed-off-by: Lijun Ou oulijun@huawei.com --- drivers/net/hns3/hns3_flow.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-)
diff --git a/drivers/net/hns3/hns3_flow.c b/drivers/net/hns3/hns3_flow.c index e9d0a0b..3e387ac 100644 --- a/drivers/net/hns3/hns3_flow.c +++ b/drivers/net/hns3/hns3_flow.c @@ -1447,7 +1447,7 @@ hns3_parse_rss_algorithm(struct hns3_hw *hw, enum rte_eth_hash_function *func, *hash_algo = HNS3_RSS_HASH_ALGO_SYMMETRIC_TOEP; break; default: - hns3_err(hw, "Invalid RSS algorithm configuration(%u)", + hns3_err(hw, "Invalid RSS algorithm configuration(%d)", algo_func); return -EINVAL; }
From: Chengwen Feng fengchengwen@huawei.com
This patch support LSC(Link Status Change) event report.
Signed-off-by: Chengwen Feng fengchengwen@huawei.com Signed-off-by: Lijun Ou oulijun@huawei.com --- drivers/net/hns3/hns3_ethdev.c | 52 +++++++++++++++++++++++++++++++++++---- drivers/net/hns3/hns3_ethdev.h | 4 ++- drivers/net/hns3/hns3_ethdev_vf.c | 40 +++++++++++++++++++++++++++++- drivers/net/hns3/hns3_mbx.c | 14 ++++++----- 4 files changed, 97 insertions(+), 13 deletions(-)
diff --git a/drivers/net/hns3/hns3_ethdev.c b/drivers/net/hns3/hns3_ethdev.c index 94b6e44..bc77608 100644 --- a/drivers/net/hns3/hns3_ethdev.c +++ b/drivers/net/hns3/hns3_ethdev.c @@ -19,6 +19,7 @@ #define HNS3_DEFAULT_PORT_CONF_QUEUES_NUM 1
#define HNS3_SERVICE_INTERVAL 1000000 /* us */ +#define HNS3_SERVICE_QUICK_INTERVAL 10 #define HNS3_INVALID_PVID 0xFFFF
#define HNS3_FILTER_TYPE_VF 0 @@ -93,6 +94,7 @@ static int hns3_dev_mtu_set(struct rte_eth_dev *dev, uint16_t mtu); static int hns3_vlan_pvid_configure(struct hns3_adapter *hns, uint16_t pvid, int on); static int hns3_update_speed_duplex(struct rte_eth_dev *eth_dev); +static bool hns3_update_link_status(struct hns3_hw *hw);
static int hns3_add_mc_addr(struct hns3_hw *hw, struct rte_ether_addr *mac_addr); @@ -4458,7 +4460,7 @@ hns3_get_mac_link_status(struct hns3_hw *hw) return !!link_status; }
-void +static bool hns3_update_link_status(struct hns3_hw *hw) { int state; @@ -4467,7 +4469,36 @@ hns3_update_link_status(struct hns3_hw *hw) if (state != hw->mac.link_status) { hw->mac.link_status = state; hns3_warn(hw, "Link status change to %s!", state ? "up" : "down"); + return true; } + + return false; +} + +/* + * Current, the PF driver get link status by two ways: + * 1) Periodic polling in the intr thread context, driver call + * hns3_update_link_status to update link status. + * 2) Firmware report async interrupt, driver process the event in the intr + * thread context, and call hns3_update_link_status to update link status. + * + * If detect link status changed, driver need report LSE. One method is add the + * report LSE logic in hns3_update_link_status. + * + * But the PF driver ops(link_update) also call hns3_update_link_status to + * update link status. + * If we report LSE in hns3_update_link_status, it may lead to deadlock in the + * bonding application. + * + * So add the one new API which used only in intr thread context. + */ +void +hns3_update_link_status_and_event(struct hns3_hw *hw) +{ + struct rte_eth_dev *dev = &rte_eth_devices[hw->data->port_id]; + bool changed = hns3_update_link_status(hw); + if (changed) + rte_eth_dev_callback_process(dev, RTE_ETH_EVENT_INTR_LSC, NULL); }
static void @@ -4479,9 +4510,10 @@ hns3_service_handler(void *param)
if (!hns3_is_reset_pending(hns)) { hns3_update_speed_duplex(eth_dev); - hns3_update_link_status(hw); - } else + hns3_update_link_status_and_event(hw); + } else { hns3_warn(hw, "Cancel the query when reset is pending"); + }
rte_eal_alarm_set(HNS3_SERVICE_INTERVAL, hns3_service_handler, eth_dev); } @@ -5557,8 +5589,10 @@ hns3_stop_service(struct hns3_adapter *hns) struct rte_eth_dev *eth_dev;
eth_dev = &rte_eth_devices[hw->data->port_id]; - if (hw->adapter_state == HNS3_NIC_STARTED) + if (hw->adapter_state == HNS3_NIC_STARTED) { rte_eal_alarm_cancel(hns3_service_handler, eth_dev); + hns3_update_link_status_and_event(hw); + } hw->mac.link_status = ETH_LINK_DOWN;
hns3_set_rxtx_function(eth_dev); @@ -5601,7 +5635,15 @@ hns3_start_service(struct hns3_adapter *hns) hns3_set_rxtx_function(eth_dev); hns3_mp_req_start_rxtx(eth_dev); if (hw->adapter_state == HNS3_NIC_STARTED) { - hns3_service_handler(eth_dev); + /* + * This API parent function already hold the hns3_hw.lock, the + * hns3_service_handler may report lse, in bonding application + * it will call driver's ops which may acquire the hns3_hw.lock + * again, thus lead to deadlock. + * We defer calls hns3_service_handler to avoid the deadlock. + */ + rte_eal_alarm_set(HNS3_SERVICE_QUICK_INTERVAL, + hns3_service_handler, eth_dev);
/* Enable interrupt of all rx queues before enabling queues */ hns3_dev_all_rx_queue_intr_enable(hw, true); diff --git a/drivers/net/hns3/hns3_ethdev.h b/drivers/net/hns3/hns3_ethdev.h index 0d17170..547e991 100644 --- a/drivers/net/hns3/hns3_ethdev.h +++ b/drivers/net/hns3/hns3_ethdev.h @@ -946,11 +946,13 @@ int hns3_dev_filter_ctrl(struct rte_eth_dev *dev, enum rte_filter_op filter_op, void *arg); bool hns3_is_reset_pending(struct hns3_adapter *hns); bool hns3vf_is_reset_pending(struct hns3_adapter *hns); -void hns3_update_link_status(struct hns3_hw *hw); +void hns3_update_link_status_and_event(struct hns3_hw *hw); void hns3_ether_format_addr(char *buf, uint16_t size, const struct rte_ether_addr *ether_addr); int hns3_dev_infos_get(struct rte_eth_dev *eth_dev, struct rte_eth_dev_info *info); +void hns3vf_update_link_status(struct hns3_hw *hw, uint8_t link_status, + uint32_t link_speed, uint8_t link_duplex);
static inline bool is_reset_pending(struct hns3_adapter *hns) diff --git a/drivers/net/hns3/hns3_ethdev_vf.c b/drivers/net/hns3/hns3_ethdev_vf.c index 7eb0b11..470aaee 100644 --- a/drivers/net/hns3/hns3_ethdev_vf.c +++ b/drivers/net/hns3/hns3_ethdev_vf.c @@ -1440,6 +1440,41 @@ hns3vf_request_link_info(struct hns3_hw *hw) hns3_err(hw, "Failed to fetch link status from PF: %d", ret); }
+void +hns3vf_update_link_status(struct hns3_hw *hw, uint8_t link_status, + uint32_t link_speed, uint8_t link_duplex) +{ + struct rte_eth_dev *dev = &rte_eth_devices[hw->data->port_id]; + struct hns3_mac *mac = &hw->mac; + bool report_lse; + bool changed; + + changed = mac->link_status != link_status || + mac->link_speed != link_speed || + mac->link_duplex != link_duplex; + if (!changed) + return; + + /* + * VF's link status/speed/duplex were updated by polling from PF driver, + * because the link status/speed/duplex may be changed in the polling + * interval, so driver will report lse (lsc event) once any of the above + * thress variables changed. + * But if the PF's link status is down and driver saved link status is + * also down, there are no need to report lse. + */ + report_lse = true; + if (link_status == ETH_LINK_DOWN && link_status == mac->link_status) + report_lse = false; + + mac->link_status = link_status; + mac->link_speed = link_speed; + mac->link_duplex = link_duplex; + + if (report_lse) + rte_eth_dev_callback_process(dev, RTE_ETH_EVENT_INTR_LSC, NULL); +} + static int hns3vf_vlan_filter_configure(struct hns3_adapter *hns, uint16_t vlan_id, int on) { @@ -2373,8 +2408,11 @@ hns3vf_stop_service(struct hns3_adapter *hns) struct rte_eth_dev *eth_dev;
eth_dev = &rte_eth_devices[hw->data->port_id]; - if (hw->adapter_state == HNS3_NIC_STARTED) + if (hw->adapter_state == HNS3_NIC_STARTED) { rte_eal_alarm_cancel(hns3vf_service_handler, eth_dev); + hns3vf_update_link_status(hw, ETH_LINK_DOWN, hw->mac.link_speed, + hw->mac.link_duplex); + } hw->mac.link_status = ETH_LINK_DOWN;
hns3_set_rxtx_function(eth_dev); diff --git a/drivers/net/hns3/hns3_mbx.c b/drivers/net/hns3/hns3_mbx.c index d2a5db8..3e44e3b 100644 --- a/drivers/net/hns3/hns3_mbx.c +++ b/drivers/net/hns3/hns3_mbx.c @@ -203,8 +203,9 @@ hns3_cmd_crq_empty(struct hns3_hw *hw) static void hns3_mbx_handler(struct hns3_hw *hw) { - struct hns3_mac *mac = &hw->mac; enum hns3_reset_level reset_level; + uint8_t link_status, link_duplex; + uint32_t link_speed; uint16_t *msg_q; uint8_t opcode; uint32_t tail; @@ -218,10 +219,11 @@ hns3_mbx_handler(struct hns3_hw *hw) opcode = msg_q[0] & 0xff; switch (opcode) { case HNS3_MBX_LINK_STAT_CHANGE: - memcpy(&mac->link_speed, &msg_q[2], - sizeof(mac->link_speed)); - mac->link_status = rte_le_to_cpu_16(msg_q[1]); - mac->link_duplex = (uint8_t)rte_le_to_cpu_16(msg_q[4]); + memcpy(&link_speed, &msg_q[2], sizeof(link_speed)); + link_status = rte_le_to_cpu_16(msg_q[1]); + link_duplex = (uint8_t)rte_le_to_cpu_16(msg_q[4]); + hns3vf_update_link_status(hw, link_status, link_speed, + link_duplex); break; case HNS3_MBX_ASSERTING_RESET: /* PF has asserted reset hence VF should go in pending @@ -310,7 +312,7 @@ hns3_handle_link_change_event(struct hns3_hw *hw, if (!req->msg[LINK_STATUS_OFFSET]) hns3_link_fail_parse(hw, req->msg[LINK_FAIL_CODE_OFFSET]);
- hns3_update_link_status(hw); + hns3_update_link_status_and_event(hw); }
static void
On 2021/1/22 18:18, Lijun Ou wrote:
From: Chengwen Feng fengchengwen@huawei.com
This patch support LSC(Link Status Change) event report.
Signed-off-by: Chengwen Feng fengchengwen@huawei.com Signed-off-by: Lijun Ou oulijun@huawei.com
drivers/net/hns3/hns3_ethdev.c | 52 +++++++++++++++++++++++++++++++++++---- drivers/net/hns3/hns3_ethdev.h | 4 ++- drivers/net/hns3/hns3_ethdev_vf.c | 40 +++++++++++++++++++++++++++++- drivers/net/hns3/hns3_mbx.c | 14 ++++++----- 4 files changed, 97 insertions(+), 13 deletions(-)
diff --git a/drivers/net/hns3/hns3_ethdev.c b/drivers/net/hns3/hns3_ethdev.c index 94b6e44..bc77608 100644 --- a/drivers/net/hns3/hns3_ethdev.c +++ b/drivers/net/hns3/hns3_ethdev.c @@ -19,6 +19,7 @@ #define HNS3_DEFAULT_PORT_CONF_QUEUES_NUM 1
#define HNS3_SERVICE_INTERVAL 1000000 /* us */ +#define HNS3_SERVICE_QUICK_INTERVAL 10 #define HNS3_INVALID_PVID 0xFFFF
#define HNS3_FILTER_TYPE_VF 0 @@ -93,6 +94,7 @@ static int hns3_dev_mtu_set(struct rte_eth_dev *dev, uint16_t mtu); static int hns3_vlan_pvid_configure(struct hns3_adapter *hns, uint16_t pvid, int on); static int hns3_update_speed_duplex(struct rte_eth_dev *eth_dev); +static bool hns3_update_link_status(struct hns3_hw *hw);
static int hns3_add_mc_addr(struct hns3_hw *hw, struct rte_ether_addr *mac_addr); @@ -4458,7 +4460,7 @@ hns3_get_mac_link_status(struct hns3_hw *hw) return !!link_status; }
-void +static bool hns3_update_link_status(struct hns3_hw *hw) { int state; @@ -4467,7 +4469,36 @@ hns3_update_link_status(struct hns3_hw *hw) if (state != hw->mac.link_status) { hw->mac.link_status = state; hns3_warn(hw, "Link status change to %s!", state ? "up" : "down");
}return true;
pls add check reseting logic: /* * There are many reset levels, some of them have no impact on port's * link status. But driver reset recover process are same (include stop * Rx/Tx & reinit hardware and so on), so if in resetting the link * status will be ETH_LINK_DOWN else get the link status from firmware. */ if (rte_atomic16_read(&hw->reset.resetting)) state = ETH_LINK_DOWN; else state = hns3_get_mac_link_status(hw); else will not report lse in flr reset scenes.
- return false;
+}
+/*
- Current, the PF driver get link status by two ways:
- Periodic polling in the intr thread context, driver call
- hns3_update_link_status to update link status.
- Firmware report async interrupt, driver process the event in the intr
- thread context, and call hns3_update_link_status to update link status.
- If detect link status changed, driver need report LSE. One method is add the
- report LSE logic in hns3_update_link_status.
- But the PF driver ops(link_update) also call hns3_update_link_status to
- update link status.
- If we report LSE in hns3_update_link_status, it may lead to deadlock in the
- bonding application.
- So add the one new API which used only in intr thread context.
- */
+void +hns3_update_link_status_and_event(struct hns3_hw *hw) +{
- struct rte_eth_dev *dev = &rte_eth_devices[hw->data->port_id];
- bool changed = hns3_update_link_status(hw);
- if (changed)
rte_eth_dev_callback_process(dev, RTE_ETH_EVENT_INTR_LSC, NULL);
}
static void @@ -4479,9 +4510,10 @@ hns3_service_handler(void *param)
if (!hns3_is_reset_pending(hns)) { hns3_update_speed_duplex(eth_dev);
hns3_update_link_status(hw);
- } else
hns3_update_link_status_and_event(hw);
pls add hns3_update_link_status_and_event before hns3_update_speed_duplex
} else { hns3_warn(hw, "Cancel the query when reset is pending");
}
rte_eal_alarm_set(HNS3_SERVICE_INTERVAL, hns3_service_handler, eth_dev);
} @@ -5557,8 +5589,10 @@ hns3_stop_service(struct hns3_adapter *hns) struct rte_eth_dev *eth_dev;
eth_dev = &rte_eth_devices[hw->data->port_id];
- if (hw->adapter_state == HNS3_NIC_STARTED)
if (hw->adapter_state == HNS3_NIC_STARTED) { rte_eal_alarm_cancel(hns3_service_handler, eth_dev);
hns3_update_link_status_and_event(hw);
} hw->mac.link_status = ETH_LINK_DOWN;
hns3_set_rxtx_function(eth_dev);
@@ -5601,7 +5635,15 @@ hns3_start_service(struct hns3_adapter *hns) hns3_set_rxtx_function(eth_dev); hns3_mp_req_start_rxtx(eth_dev); if (hw->adapter_state == HNS3_NIC_STARTED) {
hns3_service_handler(eth_dev);
/*
* This API parent function already hold the hns3_hw.lock, the
* hns3_service_handler may report lse, in bonding application
* it will call driver's ops which may acquire the hns3_hw.lock
* again, thus lead to deadlock.
* We defer calls hns3_service_handler to avoid the deadlock.
*/
rte_eal_alarm_set(HNS3_SERVICE_QUICK_INTERVAL,
hns3_service_handler, eth_dev);
/* Enable interrupt of all rx queues before enabling queues */ hns3_dev_all_rx_queue_intr_enable(hw, true);
diff --git a/drivers/net/hns3/hns3_ethdev.h b/drivers/net/hns3/hns3_ethdev.h index 0d17170..547e991 100644 --- a/drivers/net/hns3/hns3_ethdev.h +++ b/drivers/net/hns3/hns3_ethdev.h @@ -946,11 +946,13 @@ int hns3_dev_filter_ctrl(struct rte_eth_dev *dev, enum rte_filter_op filter_op, void *arg); bool hns3_is_reset_pending(struct hns3_adapter *hns); bool hns3vf_is_reset_pending(struct hns3_adapter *hns); -void hns3_update_link_status(struct hns3_hw *hw); +void hns3_update_link_status_and_event(struct hns3_hw *hw); void hns3_ether_format_addr(char *buf, uint16_t size, const struct rte_ether_addr *ether_addr); int hns3_dev_infos_get(struct rte_eth_dev *eth_dev, struct rte_eth_dev_info *info); +void hns3vf_update_link_status(struct hns3_hw *hw, uint8_t link_status,
uint32_t link_speed, uint8_t link_duplex);
static inline bool is_reset_pending(struct hns3_adapter *hns) diff --git a/drivers/net/hns3/hns3_ethdev_vf.c b/drivers/net/hns3/hns3_ethdev_vf.c index 7eb0b11..470aaee 100644 --- a/drivers/net/hns3/hns3_ethdev_vf.c +++ b/drivers/net/hns3/hns3_ethdev_vf.c @@ -1440,6 +1440,41 @@ hns3vf_request_link_info(struct hns3_hw *hw) hns3_err(hw, "Failed to fetch link status from PF: %d", ret); }
+void +hns3vf_update_link_status(struct hns3_hw *hw, uint8_t link_status,
uint32_t link_speed, uint8_t link_duplex)
+{
- struct rte_eth_dev *dev = &rte_eth_devices[hw->data->port_id];
- struct hns3_mac *mac = &hw->mac;
- bool report_lse;
- bool changed;
- changed = mac->link_status != link_status ||
mac->link_speed != link_speed ||
mac->link_duplex != link_duplex;
- if (!changed)
return;
- /*
* VF's link status/speed/duplex were updated by polling from PF driver,
* because the link status/speed/duplex may be changed in the polling
* interval, so driver will report lse (lsc event) once any of the above
* thress variables changed.
* But if the PF's link status is down and driver saved link status is
* also down, there are no need to report lse.
*/
- report_lse = true;
- if (link_status == ETH_LINK_DOWN && link_status == mac->link_status)
report_lse = false;
- mac->link_status = link_status;
- mac->link_speed = link_speed;
- mac->link_duplex = link_duplex;
- if (report_lse)
rte_eth_dev_callback_process(dev, RTE_ETH_EVENT_INTR_LSC, NULL);
+}
static int hns3vf_vlan_filter_configure(struct hns3_adapter *hns, uint16_t vlan_id, int on) { @@ -2373,8 +2408,11 @@ hns3vf_stop_service(struct hns3_adapter *hns) struct rte_eth_dev *eth_dev;
eth_dev = &rte_eth_devices[hw->data->port_id];
- if (hw->adapter_state == HNS3_NIC_STARTED)
if (hw->adapter_state == HNS3_NIC_STARTED) { rte_eal_alarm_cancel(hns3vf_service_handler, eth_dev);
hns3vf_update_link_status(hw, ETH_LINK_DOWN, hw->mac.link_speed,
hw->mac.link_duplex);
} hw->mac.link_status = ETH_LINK_DOWN;
hns3_set_rxtx_function(eth_dev);
diff --git a/drivers/net/hns3/hns3_mbx.c b/drivers/net/hns3/hns3_mbx.c index d2a5db8..3e44e3b 100644 --- a/drivers/net/hns3/hns3_mbx.c +++ b/drivers/net/hns3/hns3_mbx.c @@ -203,8 +203,9 @@ hns3_cmd_crq_empty(struct hns3_hw *hw) static void hns3_mbx_handler(struct hns3_hw *hw) {
- struct hns3_mac *mac = &hw->mac; enum hns3_reset_level reset_level;
- uint8_t link_status, link_duplex;
- uint32_t link_speed; uint16_t *msg_q; uint8_t opcode; uint32_t tail;
@@ -218,10 +219,11 @@ hns3_mbx_handler(struct hns3_hw *hw) opcode = msg_q[0] & 0xff; switch (opcode) { case HNS3_MBX_LINK_STAT_CHANGE:
memcpy(&mac->link_speed, &msg_q[2],
sizeof(mac->link_speed));
mac->link_status = rte_le_to_cpu_16(msg_q[1]);
mac->link_duplex = (uint8_t)rte_le_to_cpu_16(msg_q[4]);
memcpy(&link_speed, &msg_q[2], sizeof(link_speed));
link_status = rte_le_to_cpu_16(msg_q[1]);
link_duplex = (uint8_t)rte_le_to_cpu_16(msg_q[4]);
hns3vf_update_link_status(hw, link_status, link_speed,
case HNS3_MBX_ASSERTING_RESET: /* PF has asserted reset hence VF should go in pendinglink_duplex); break;
@@ -310,7 +312,7 @@ hns3_handle_link_change_event(struct hns3_hw *hw, if (!req->msg[LINK_STATUS_OFFSET]) hns3_link_fail_parse(hw, req->msg[LINK_FAIL_CODE_OFFSET]);
- hns3_update_link_status(hw);
- hns3_update_link_status_and_event(hw);
}
static void
On 1/22/2021 10:18 AM, Lijun Ou wrote:
From: Chengwen Feng fengchengwen@huawei.com
This patch support LSC(Link Status Change) event report.
There is a user config for lsc, 'dev->data->dev_conf.intr_conf.lsc', which seems not taken into account.
Also 'RTE_PCI_DRV_INTR_LSC' should be set in 'rte_pci_driver.drv_flags' to report this feature to higher levels.
And when the feature is fully implemented, can you please add "Link status event" feature to 'hns3.ini'?
Signed-off-by: Chengwen Feng fengchengwen@huawei.com Signed-off-by: Lijun Ou oulijun@huawei.com
<...>
在 2021/1/29 7:41, Ferruh Yigit 写道:
On 1/22/2021 10:18 AM, Lijun Ou wrote:
From: Chengwen Feng fengchengwen@huawei.com
This patch support LSC(Link Status Change) event report.
There is a user config for lsc, 'dev->data->dev_conf.intr_conf.lsc', which seems not taken into account.
Frist of all, thank you for your review. Currently, the old firmware doest not support interrupt reporting. Therefore, the PF does not suport interrupt reporting through the mailbox. Therefore, the PF obtains interrupts in polling mode and then reports interrupts.
Also 'RTE_PCI_DRV_INTR_LSC' should be set in 'rte_pci_driver.drv_flags' to report this feature to higher levels.
In the future, the new firmware + PF will support interrupt reporting. In that case, the LSC capability of dev_flag will be set. In the future, we will enable the VF to support LSC. That is, we are developing the PF to report LSC interrupts through the mailbox.
And when the feature is fully implemented, can you please add "Link status event" feature to 'hns3.ini'?
By then, we'll add it in hns3.ini.
Signed-off-by: Chengwen Feng fengchengwen@huawei.com Signed-off-by: Lijun Ou oulijun@huawei.com
<...> .
On 1/29/2021 1:49 AM, oulijun wrote:
在 2021/1/29 7:41, Ferruh Yigit 写道:
On 1/22/2021 10:18 AM, Lijun Ou wrote:
From: Chengwen Feng fengchengwen@huawei.com
This patch support LSC(Link Status Change) event report.
There is a user config for lsc, 'dev->data->dev_conf.intr_conf.lsc', which seems not taken into account.
Frist of all, thank you for your review. Currently, the old firmware doest not support interrupt reporting. Therefore, the PF does not suport interrupt reporting through the mailbox. Therefore, the PF obtains interrupts in polling mode and then reports interrupts.
Also 'RTE_PCI_DRV_INTR_LSC' should be set in 'rte_pci_driver.drv_flags' to report this feature to higher levels.
In the future, the new firmware + PF will support interrupt reporting. In that case, the LSC capability of dev_flag will be set. In the future, we will enable the VF to support LSC. That is, we are developing the PF to report LSC interrupts through the mailbox.
And when the feature is fully implemented, can you please add "Link status event" feature to 'hns3.ini'?
By then, we'll add it in hns3.ini.
Got it, so for now only LSC event report (even callbacks call) support added, I am proceeding with the patch.
Signed-off-by: Chengwen Feng fengchengwen@huawei.com Signed-off-by: Lijun Ou oulijun@huawei.com
<...> .
On 1/22/2021 10:18 AM, Lijun Ou wrote:
From: Chengwen Feng fengchengwen@huawei.com
This patch support LSC(Link Status Change) event report.
Signed-off-by: Chengwen Feng fengchengwen@huawei.com Signed-off-by: Lijun Ou oulijun@huawei.com
<...>
@@ -2373,8 +2408,11 @@ hns3vf_stop_service(struct hns3_adapter *hns) struct rte_eth_dev *eth_dev;
eth_dev = &rte_eth_devices[hw->data->port_id];
- if (hw->adapter_state == HNS3_NIC_STARTED)
- if (hw->adapter_state == HNS3_NIC_STARTED) { rte_eal_alarm_cancel(hns3vf_service_handler, eth_dev);
hns3vf_update_link_status(hw, ETH_LINK_DOWN, hw->mac.link_speed,
hw->mac.link_duplex);
indentation fixed while merging.
Applied to dpdk-next-net/main, thanks.
On 1/22/2021 10:18 AM, Lijun Ou wrote:
This series add a new feature LSC event report and two bugfixes as well as some cleanup patches.
Chengchang Tang (2): net/hns3: reconstruct the Rx interrupt map net/hns3: fix interrupt resources in Rx interrupt mode
Chengwen Feng (1): net/hns3: support LSC event report
Huisong Li (2): net/hns3: encapsulate dfx stats in Rx/Tx datapatch net/hns3: move queue stats to xstats
Lijun Ou (9): net/hns3: use array instead of switch-case net/hns3: move judgment conditions to separated functions net/hns3: extract common judgments for all FDIR type net/hns3: refactor reset event report function net/hns3: fix memory leak with secondary process exit net/hns3: rename RSS functions net/hns3: adjust some comments net/hns3: remove unnecessary parentheses net/hns3: use %d instead of %u for enum variable
LSC interrupt patch (14/14) may need to be updated, and since it has no dependency in the set it is not merged, can be sent separately later.
Except 14/14, Series applied to dpdk-next-net/main, thanks.