From: Chiqijun chiqijun@huawei.com
driver inclusion category: feature bugzilla: 4472
-----------------------------------------------------------------------
Add XDP support for pass and drop actions.
Signed-off-by: Chiqijun chiqijun@huawei.com Reviewed-by: Wangxiaoyun cloud.wangxiaoyun@huawei.com Signed-off-by: Yang Yingliang yangyingliang@huawei.com --- .../net/ethernet/huawei/hinic/hinic_ethtool.c | 2 + .../net/ethernet/huawei/hinic/hinic_main.c | 80 +++++++++++++++++ .../net/ethernet/huawei/hinic/hinic_nic_dev.h | 5 ++ drivers/net/ethernet/huawei/hinic/hinic_rx.c | 89 +++++++++++++++++++ drivers/net/ethernet/huawei/hinic/hinic_rx.h | 3 + 5 files changed, 179 insertions(+)
diff --git a/drivers/net/ethernet/huawei/hinic/hinic_ethtool.c b/drivers/net/ethernet/huawei/hinic/hinic_ethtool.c index 9fa5af2dacce3..9796f2fa2f062 100644 --- a/drivers/net/ethernet/huawei/hinic/hinic_ethtool.c +++ b/drivers/net/ethernet/huawei/hinic/hinic_ethtool.c @@ -114,6 +114,7 @@ static struct hinic_stats hinic_rx_queue_stats[] = { HINIC_RXQ_STAT(csum_errors), HINIC_RXQ_STAT(other_errors), HINIC_RXQ_STAT(dropped), + HINIC_RXQ_STAT(xdp_dropped), HINIC_RXQ_STAT(rx_buf_empty), };
@@ -121,6 +122,7 @@ static struct hinic_stats hinic_rx_queue_stats_extern[] = { HINIC_RXQ_STAT(alloc_skb_err), HINIC_RXQ_STAT(alloc_rx_buf_err), HINIC_RXQ_STAT(map_rx_buf_err), + HINIC_RXQ_STAT(xdp_large_pkt), };
static struct hinic_stats hinic_tx_queue_stats[] = { diff --git a/drivers/net/ethernet/huawei/hinic/hinic_main.c b/drivers/net/ethernet/huawei/hinic/hinic_main.c index 6797d50d1bd49..47232e1f6ae86 100644 --- a/drivers/net/ethernet/huawei/hinic/hinic_main.c +++ b/drivers/net/ethernet/huawei/hinic/hinic_main.c @@ -30,6 +30,8 @@ #include <linux/tcp.h> #include <linux/ip.h> #include <linux/debugfs.h> +#include <linux/netlink.h> +#include <linux/bpf.h>
#include "ossl_knl.h" #include "hinic_hw_mgmt.h" @@ -1262,8 +1264,18 @@ static int hinic_change_mtu(struct net_device *netdev, int new_mtu) { struct hinic_nic_dev *nic_dev = netdev_priv(netdev); u32 mtu = (u32)new_mtu; + u32 xdp_max_mtu; int err = 0;
+ if (hinic_is_xdp_enable(nic_dev)) { + xdp_max_mtu = hinic_xdp_max_mtu(nic_dev); + if (mtu > xdp_max_mtu) { + nicif_err(nic_dev, drv, nic_dev->netdev, + "Max MTU for xdp usage is %d\n", xdp_max_mtu); + return -EINVAL; + } + } + err = hinic_set_port_mtu(nic_dev->hwdev, mtu); if (err) { nicif_err(nic_dev, drv, netdev, "Failed to change port mtu to %d\n", @@ -1484,6 +1496,12 @@ static int set_feature_lro(struct hinic_nic_dev *nic_dev, if (!(changed & NETIF_F_LRO)) return 0;
+ if (en && hinic_is_xdp_enable(nic_dev)) { + hinic_err(nic_dev, drv, "Can not enable LRO when xdp is enable\n"); + *failed_features |= NETIF_F_LRO; + return -EINVAL; + } + lro_timer = nic_dev->adaptive_cfg.lro.timer; lro_buf_size = nic_dev->adaptive_cfg.lro.buffer_size; err = hinic_set_rx_lro_state(nic_dev->hwdev, en, lro_timer, @@ -2057,6 +2075,66 @@ static void hinic_nic_set_rx_mode(struct net_device *netdev) queue_work(nic_dev->workq, &nic_dev->rx_mode_work); }
+bool hinic_is_xdp_enable(struct hinic_nic_dev *nic_dev) +{ + return !!nic_dev->xdp_prog; +} + +int hinic_xdp_max_mtu(struct hinic_nic_dev *nic_dev) +{ + return nic_dev->rx_buff_len - (ETH_HLEN + ETH_FCS_LEN + VLAN_HLEN); +} + +static int hinic_xdp_setup(struct hinic_nic_dev *nic_dev, struct bpf_prog *prog, + struct netlink_ext_ack *extack) +{ + struct bpf_prog *old_prog = NULL; + int max_mtu = hinic_xdp_max_mtu(nic_dev); + int q_id; + + if (prog && nic_dev->netdev->mtu > max_mtu) { + nicif_err(nic_dev, drv, nic_dev->netdev, + "Failed to setup xdp program, the current MTU %d is larger than max allowed MTU %d\n", + nic_dev->netdev->mtu, max_mtu); + NL_SET_ERR_MSG_MOD(extack, + "MTU is too large to load xdp program"); + return -EINVAL; + } + + if (prog && nic_dev->netdev->features & NETIF_F_LRO) { + nicif_err(nic_dev, drv, nic_dev->netdev, + "Failed to setup xdp program while LRO is on\n"); + NL_SET_ERR_MSG_MOD(extack, + "Failed to setup xdp program while LRO is on\n"); + return -EINVAL; + } + + old_prog = xchg(&nic_dev->xdp_prog, prog); + for (q_id = 0; q_id < nic_dev->max_qps; q_id++) + xchg(&nic_dev->rxqs[q_id].xdp_prog, nic_dev->xdp_prog); + + if (old_prog) + bpf_prog_put(old_prog); + + return 0; +} + +static int hinic_xdp(struct net_device *netdev, struct netdev_bpf *xdp) +{ + struct hinic_nic_dev *nic_dev = netdev_priv(netdev); + + switch (xdp->command) { + case XDP_SETUP_PROG: + return hinic_xdp_setup(nic_dev, xdp->prog, xdp->extack); + case XDP_QUERY_PROG: + xdp->prog_id = nic_dev->xdp_prog ? + nic_dev->xdp_prog->aux->id : 0; + return 0; + default: + return -EINVAL; + } +} + static const struct net_device_ops hinic_netdev_ops = { .ndo_open = hinic_open, .ndo_stop = hinic_close, @@ -2087,6 +2165,7 @@ static const struct net_device_ops hinic_netdev_ops = { .ndo_set_vf_link_state = hinic_ndo_set_vf_link_state, .ndo_fix_features = hinic_fix_features, .ndo_set_features = hinic_set_features, + .ndo_bpf = hinic_xdp, };
static const struct net_device_ops hinicvf_netdev_ops = { @@ -2110,6 +2189,7 @@ static const struct net_device_ops hinicvf_netdev_ops = {
.ndo_fix_features = hinic_fix_features, .ndo_set_features = hinic_set_features, + .ndo_bpf = hinic_xdp, };
static void netdev_feature_init(struct net_device *netdev) diff --git a/drivers/net/ethernet/huawei/hinic/hinic_nic_dev.h b/drivers/net/ethernet/huawei/hinic/hinic_nic_dev.h index 1e6479a93ead6..7255a3603b7f4 100644 --- a/drivers/net/ethernet/huawei/hinic/hinic_nic_dev.h +++ b/drivers/net/ethernet/huawei/hinic/hinic_nic_dev.h @@ -246,6 +246,8 @@ struct hinic_nic_dev { u32 lro_replenish_thld; u16 rx_buff_len; u32 page_order; + + struct bpf_prog *xdp_prog; };
extern struct hinic_uld_info nic_uld_info; @@ -266,6 +268,9 @@ void hinic_link_status_change(struct hinic_nic_dev *nic_dev, bool status); int hinic_disable_func_rss(struct hinic_nic_dev *nic_dev); int hinic_enable_func_rss(struct hinic_nic_dev *nic_dev);
+bool hinic_is_xdp_enable(struct hinic_nic_dev *nic_dev); +int hinic_xdp_max_mtu(struct hinic_nic_dev *nic_dev); + #define hinic_msg(level, nic_dev, msglvl, format, arg...) \ do { \ if ((nic_dev)->netdev && (nic_dev)->netdev->reg_state \ diff --git a/drivers/net/ethernet/huawei/hinic/hinic_rx.c b/drivers/net/ethernet/huawei/hinic/hinic_rx.c index 212a281200c4f..a9047432f3053 100644 --- a/drivers/net/ethernet/huawei/hinic/hinic_rx.c +++ b/drivers/net/ethernet/huawei/hinic/hinic_rx.c @@ -383,6 +383,7 @@ void hinic_rxq_get_stats(struct hinic_rxq *rxq, stats->csum_errors = rxq_stats->csum_errors; stats->other_errors = rxq_stats->other_errors; stats->dropped = rxq_stats->dropped; + stats->xdp_dropped = rxq_stats->xdp_dropped; stats->rx_buf_empty = rxq_stats->rx_buf_empty; } while (u64_stats_fetch_retry(&rxq_stats->syncp, start)); u64_stats_update_end(&stats->syncp); @@ -397,11 +398,13 @@ void hinic_rxq_clean_stats(struct hinic_rxq_stats *rxq_stats) rxq_stats->csum_errors = 0; rxq_stats->other_errors = 0; rxq_stats->dropped = 0; + rxq_stats->xdp_dropped = 0;
rxq_stats->alloc_skb_err = 0; rxq_stats->alloc_rx_buf_err = 0; rxq_stats->map_rx_buf_err = 0; rxq_stats->rx_buf_empty = 0; + rxq_stats->xdp_large_pkt = 0; u64_stats_update_end(&rxq_stats->syncp); }
@@ -510,6 +513,87 @@ static void hinic_copy_lp_data(struct hinic_nic_dev *nic_dev, nic_dev->lb_test_rx_idx++; }
+enum hinic_xdp_pkt { + HINIC_XDP_PKT_PASS, + HINIC_XDP_PKT_DROP, +}; + +static inline void update_drop_rx_info(struct hinic_rxq *rxq, u16 weqbb_num) +{ + struct hinic_rx_info *rx_info = NULL; + + while (weqbb_num) { + rx_info = &rxq->rx_info[rxq->cons_idx & rxq->q_mask]; + if (likely(page_to_nid(rx_info->page) == numa_node_id())) + hinic_reuse_rx_page(rxq, rx_info); + + rx_info->buf_dma_addr = 0; + rx_info->page = NULL; + rxq->cons_idx++; + rxq->delta++; + + weqbb_num--; + } +} + +int hinic_run_xdp(struct hinic_rxq *rxq, u32 pkt_len) +{ + struct bpf_prog *xdp_prog = NULL; + struct hinic_rx_info *rx_info = NULL; + struct xdp_buff xdp; + int result = HINIC_XDP_PKT_PASS; + u16 weqbb_num = 1; /* xdp can only use one rx_buff */ + u8 *va = NULL; + u32 act; + + rcu_read_lock(); + xdp_prog = READ_ONCE(rxq->xdp_prog); + if (!xdp_prog) + goto unlock_rcu; + + if (unlikely(pkt_len > rxq->buf_len)) { + RXQ_STATS_INC(rxq, xdp_large_pkt); + weqbb_num = (u16)(pkt_len >> rxq->rx_buff_shift) + + ((pkt_len & (rxq->buf_len - 1)) ? 1 : 0); + result = HINIC_XDP_PKT_DROP; + goto xdp_out; + } + + rx_info = &rxq->rx_info[rxq->cons_idx & rxq->q_mask]; + va = (u8 *)page_address(rx_info->page) + rx_info->page_offset; + prefetch(va); + dma_sync_single_range_for_cpu(rxq->dev, rx_info->buf_dma_addr, + rx_info->page_offset, rxq->buf_len, + DMA_FROM_DEVICE); + xdp.data = va; + xdp.data_hard_start = xdp.data; + xdp.data_end = xdp.data + pkt_len; + xdp_set_data_meta_invalid(&xdp); + prefetchw(xdp.data_hard_start); + act = bpf_prog_run_xdp(xdp_prog, &xdp); + switch (act) { + case XDP_PASS: + break; + default: + bpf_warn_invalid_xdp_action(act); + /* fallthrough */ + case XDP_DROP: + result = HINIC_XDP_PKT_DROP; + break; + } + +xdp_out: + if (result == HINIC_XDP_PKT_DROP) { + RXQ_STATS_INC(rxq, xdp_dropped); + update_drop_rx_info(rxq, weqbb_num); + } + +unlock_rcu: + rcu_read_unlock(); + + return result; +} + int recv_one_pkt(struct hinic_rxq *rxq, struct hinic_rq_cqe *rx_cqe, u32 pkt_len, u32 vlan_len, u32 status) { @@ -517,6 +601,11 @@ int recv_one_pkt(struct hinic_rxq *rxq, struct hinic_rq_cqe *rx_cqe, struct net_device *netdev = rxq->netdev; struct hinic_nic_dev *nic_dev = netdev_priv(netdev); u32 offload_type; + u32 xdp_status; + + xdp_status = hinic_run_xdp(rxq, pkt_len); + if (xdp_status == HINIC_XDP_PKT_DROP) + return 0;
skb = hinic_fetch_rx_buffer(rxq, pkt_len); if (unlikely(!skb)) { diff --git a/drivers/net/ethernet/huawei/hinic/hinic_rx.h b/drivers/net/ethernet/huawei/hinic/hinic_rx.h index b6e8935c01d48..827904f7839bc 100644 --- a/drivers/net/ethernet/huawei/hinic/hinic_rx.h +++ b/drivers/net/ethernet/huawei/hinic/hinic_rx.h @@ -37,11 +37,13 @@ struct hinic_rxq_stats { u64 csum_errors; u64 other_errors; u64 dropped; + u64 xdp_dropped; u64 rx_buf_empty;
u64 alloc_skb_err; u64 alloc_rx_buf_err; u64 map_rx_buf_err; + u64 xdp_large_pkt;
struct u64_stats_sync syncp; }; @@ -75,6 +77,7 @@ struct hinic_rxq { u16 msix_entry_idx;
struct hinic_rx_info *rx_info; + struct bpf_prog *xdp_prog;
struct hinic_irq *irq_cfg; u16 next_to_alloc;