For the origin type for netdev_features_t would be changed to be unsigned long * from u64, so changes the prototype of mlx5e_tunnel_features_check, mlx5e_ipsec_feature_check and mlx5e_fix_uplink_rep_features for adaption.
Signed-off-by: Jian Shen shenjian15@huawei.com --- .../mellanox/mlx5/core/en_accel/ipsec_rxtx.h | 15 +++++----- drivers/net/ethernet/mellanox/mlx5/core/en_main.c | 34 ++++++++++------------ 2 files changed, 23 insertions(+), 26 deletions(-)
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec_rxtx.h b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec_rxtx.h index 5120a59..62ab97c 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec_rxtx.h +++ b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec_rxtx.h @@ -93,8 +93,8 @@ static inline bool mlx5e_ipsec_eseg_meta(struct mlx5_wqe_eth_seg *eseg) void mlx5e_ipsec_tx_build_eseg(struct mlx5e_priv *priv, struct sk_buff *skb, struct mlx5_wqe_eth_seg *eseg);
-static inline netdev_features_t -mlx5e_ipsec_feature_check(struct sk_buff *skb, netdev_features_t features) +static inline void +mlx5e_ipsec_feature_check(struct sk_buff *skb, netdev_features_t *features) { struct xfrm_offload *xo = xfrm_offload(skb); struct sec_path *sp = skb_sec_path(skb); @@ -118,13 +118,12 @@ mlx5e_ipsec_feature_check(struct sk_buff *skb, netdev_features_t features) goto out_disable; }
- return features; - + return; }
/* Disable CSUM and GSO for software IPsec */ out_disable: - return features & ~(NETIF_F_CSUM_MASK | NETIF_F_GSO_MASK); + *features &= ~(NETIF_F_CSUM_MASK | NETIF_F_GSO_MASK); }
#else @@ -140,9 +139,9 @@ static inline bool mlx5e_ipsec_eseg_meta(struct mlx5_wqe_eth_seg *eseg) }
static inline bool mlx5_ipsec_is_rx_flow(struct mlx5_cqe64 *cqe) { return false; } -static inline netdev_features_t -mlx5e_ipsec_feature_check(struct sk_buff *skb, netdev_features_t features) -{ return features & ~(NETIF_F_CSUM_MASK | NETIF_F_GSO_MASK); } +static inline void +mlx5e_ipsec_feature_check(struct sk_buff *skb, netdev_features_t *features) +{ *features &= ~(NETIF_F_CSUM_MASK | NETIF_F_GSO_MASK); } #endif /* CONFIG_MLX5_EN_IPSEC */
#endif /* __MLX5E_IPSEC_RXTX_H__ */ diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_main.c b/drivers/net/ethernet/mellanox/mlx5/core/en_main.c index d8c82d3..e70e8c5 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en_main.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en_main.c @@ -3289,22 +3289,20 @@ int mlx5e_set_features(struct net_device *netdev, netdev_features_t features) return 0; }
-static netdev_features_t mlx5e_fix_uplink_rep_features(struct net_device *netdev, - netdev_features_t features) +static void mlx5e_fix_uplink_rep_features(struct net_device *netdev, + netdev_features_t *features) { - features &= ~NETIF_F_HW_TLS_RX; + *features &= ~NETIF_F_HW_TLS_RX; if (netdev->features & NETIF_F_HW_TLS_RX) netdev_warn(netdev, "Disabling hw_tls_rx, not supported in switchdev mode\n");
- features &= ~NETIF_F_HW_TLS_TX; + *features &= ~NETIF_F_HW_TLS_TX; if (netdev->features & NETIF_F_HW_TLS_TX) netdev_warn(netdev, "Disabling hw_tls_tx, not supported in switchdev mode\n");
- features &= ~NETIF_F_NTUPLE; + *features &= ~NETIF_F_NTUPLE; if (netdev->features & NETIF_F_NTUPLE) netdev_warn(netdev, "Disabling ntuple, not supported in switchdev mode\n"); - - return features; }
static void mlx5e_fix_features(struct net_device *netdev, @@ -3339,7 +3337,7 @@ static void mlx5e_fix_features(struct net_device *netdev, }
if (mlx5e_is_uplink_rep(priv)) - features = mlx5e_fix_uplink_rep_features(netdev, features); + mlx5e_fix_uplink_rep_features(netdev, &features);
mutex_unlock(&priv->state_lock); } @@ -3757,9 +3755,9 @@ static bool mlx5e_gre_tunnel_inner_proto_offload_supported(struct mlx5_core_dev return false; }
-static netdev_features_t mlx5e_tunnel_features_check(struct mlx5e_priv *priv, - struct sk_buff *skb, - netdev_features_t features) +static void mlx5e_tunnel_features_check(struct mlx5e_priv *priv, + struct sk_buff *skb, + netdev_features_t *features) { unsigned int offset = 0; struct udphdr *udph; @@ -3780,12 +3778,12 @@ static netdev_features_t mlx5e_tunnel_features_check(struct mlx5e_priv *priv, switch (proto) { case IPPROTO_GRE: if (mlx5e_gre_tunnel_inner_proto_offload_supported(priv->mdev, skb)) - return features; + return; break; case IPPROTO_IPIP: case IPPROTO_IPV6: if (mlx5e_tunnel_proto_supported_tx(priv->mdev, IPPROTO_IPIP)) - return features; + return; break; case IPPROTO_UDP: udph = udp_hdr(skb); @@ -3793,23 +3791,23 @@ static netdev_features_t mlx5e_tunnel_features_check(struct mlx5e_priv *priv,
/* Verify if UDP port is being offloaded by HW */ if (mlx5_vxlan_lookup_port(priv->mdev->vxlan, port)) - return features; + return;
#if IS_ENABLED(CONFIG_GENEVE) /* Support Geneve offload for default UDP port */ if (port == GENEVE_UDP_PORT && mlx5_geneve_tx_allowed(priv->mdev)) - return features; + return; #endif break; #ifdef CONFIG_MLX5_EN_IPSEC case IPPROTO_ESP: - return mlx5e_ipsec_feature_check(skb, features); + mlx5e_ipsec_feature_check(skb, *features); #endif }
out: /* Disable CSUM and GSO if the udp dport is not offloaded by HW */ - return features & ~(NETIF_F_CSUM_MASK | NETIF_F_GSO_MASK); + *features &= ~(NETIF_F_CSUM_MASK | NETIF_F_GSO_MASK); }
void mlx5e_features_check(struct sk_buff *skb, struct net_device *netdev, @@ -3823,7 +3821,7 @@ void mlx5e_features_check(struct sk_buff *skb, struct net_device *netdev, /* Validate if the tunneled packet is being offloaded by HW */ if (skb->encapsulation && (*features & NETIF_F_CSUM_MASK || *features & NETIF_F_GSO_MASK)) - *features = mlx5e_tunnel_features_check(priv, skb, *features); + mlx5e_tunnel_features_check(priv, skb, features); }
static void mlx5e_tx_timeout_work(struct work_struct *work)