Anirudh Venkataramanan (1): ice: Consolidate VSI state and flags
Jesse Brandeburg (1): ice: fix writeback enable logic
Larysa Zaremba (1): ice: protect XDP configuration with a mutex
Maciej Fijalkowski (1): ice: allow hot-swapping XDP programs
Nick Nunley (1): ice: Remove vlan_ena from vsi structure
drivers/net/ethernet/intel/ice/ice.h | 24 ++--- drivers/net/ethernet/intel/ice/ice_ethtool.c | 2 +- drivers/net/ethernet/intel/ice/ice_lib.c | 38 +++++--- drivers/net/ethernet/intel/ice/ice_main.c | 97 ++++++++++---------- drivers/net/ethernet/intel/ice/ice_txrx.c | 63 ++++++------- drivers/net/ethernet/intel/ice/ice_txrx.h | 1 - drivers/net/ethernet/intel/ice/ice_xsk.c | 3 +- 7 files changed, 120 insertions(+), 108 deletions(-)
反馈: 您发送到kernel@openeuler.org的补丁/补丁集,已成功转换为PR! PR链接地址: https://gitee.com/openeuler/kernel/pulls/14281 邮件列表地址:https://mailweb.openeuler.org/hyperkitty/list/kernel@openeuler.org/message/W...
FeedBack: The patch(es) which you have sent to kernel@openeuler.org mailing list has been converted to a pull request successfully! Pull request link: https://gitee.com/openeuler/kernel/pulls/14281 Mailing list address: https://mailweb.openeuler.org/hyperkitty/list/kernel@openeuler.org/message/W...
From: Nick Nunley nicholas.d.nunley@intel.com
mainline inclusion from mainline-v5.11-rc1 commit bcf68ea1e580df685a4a9cec4dfaf0f0f4393dbe category: bugfix bugzilla: https://gitee.com/src-openeuler/kernel/issues/IARWO7 CVE: CVE-2024-46765
Reference: https://git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git/commit/?i...
--------------------------------
vlan_ena was introduced to track whether VLAN filters are enabled on the device, but 1) checking for num_vlan > 1 already gives us this information, and is currently used in this way throughout the code 2) the logic for vlan_ena is broken when multiple VLANs are active
Just remove vlan_ena and use num_vlan instead.
Signed-off-by: Nick Nunley nicholas.d.nunley@intel.com Tested-by: Aaron Brown aaron.f.brown@intel.com Signed-off-by: Tony Nguyen anthony.l.nguyen@intel.com Signed-off-by: Wang Liang wangliang74@huawei.com --- drivers/net/ethernet/intel/ice/ice.h | 1 - drivers/net/ethernet/intel/ice/ice_main.c | 11 ++++------- 2 files changed, 4 insertions(+), 8 deletions(-)
diff --git a/drivers/net/ethernet/intel/ice/ice.h b/drivers/net/ethernet/intel/ice/ice.h index 7794703c1359..ea0897a3d39e 100644 --- a/drivers/net/ethernet/intel/ice/ice.h +++ b/drivers/net/ethernet/intel/ice/ice.h @@ -305,7 +305,6 @@ struct ice_vsi { u8 irqs_ready:1; u8 current_isup:1; /* Sync 'link up' logging */ u8 stat_offsets_loaded:1; - u8 vlan_ena:1; u16 num_vlan;
/* queue information */ diff --git a/drivers/net/ethernet/intel/ice/ice_main.c b/drivers/net/ethernet/intel/ice/ice_main.c index 65fe75cc3d43..cdca320fcdf1 100644 --- a/drivers/net/ethernet/intel/ice/ice_main.c +++ b/drivers/net/ethernet/intel/ice/ice_main.c @@ -232,7 +232,7 @@ static int ice_cfg_promisc(struct ice_vsi *vsi, u8 promisc_m, bool set_promisc) if (vsi->type != ICE_VSI_PF) return 0;
- if (vsi->vlan_ena) { + if (vsi->num_vlan > 1) { status = ice_set_vlan_vsi_promisc(hw, vsi->idx, promisc_m, set_promisc); } else { @@ -334,7 +334,7 @@ static int ice_vsi_sync_fltr(struct ice_vsi *vsi) /* check for changes in promiscuous modes */ if (changed_flags & IFF_ALLMULTI) { if (vsi->current_netdev_flags & IFF_ALLMULTI) { - if (vsi->vlan_ena) + if (vsi->num_vlan > 1) promisc_m = ICE_MCAST_VLAN_PROMISC_BITS; else promisc_m = ICE_MCAST_PROMISC_BITS; @@ -348,7 +348,7 @@ static int ice_vsi_sync_fltr(struct ice_vsi *vsi) } } else { /* !(vsi->current_netdev_flags & IFF_ALLMULTI) */ - if (vsi->vlan_ena) + if (vsi->num_vlan > 1) promisc_m = ICE_MCAST_VLAN_PROMISC_BITS; else promisc_m = ICE_MCAST_PROMISC_BITS; @@ -3162,10 +3162,8 @@ ice_vlan_rx_add_vid(struct net_device *netdev, __always_unused __be16 proto, * packets aren't pruned by the device's internal switch on Rx */ ret = ice_vsi_add_vlan(vsi, vid, ICE_FWD_TO_VSI); - if (!ret) { - vsi->vlan_ena = true; + if (!ret) set_bit(ICE_VSI_FLAG_VLAN_FLTR_CHANGED, vsi->flags); - }
return ret; } @@ -3204,7 +3202,6 @@ ice_vlan_rx_kill_vid(struct net_device *netdev, __always_unused __be16 proto, if (vsi->num_vlan == 1 && ice_vsi_is_vlan_pruning_ena(vsi)) ret = ice_cfg_vlan_pruning(vsi, false, false);
- vsi->vlan_ena = false; set_bit(ICE_VSI_FLAG_VLAN_FLTR_CHANGED, vsi->flags); return ret; }
From: Jesse Brandeburg jesse.brandeburg@intel.com
mainline inclusion from mainline-v5.12-rc1 commit 1d9f7ca324a9b73bb50ed7df58e155d1c37e1b9a category: bugfix bugzilla: https://gitee.com/src-openeuler/kernel/issues/IARWO7 CVE: CVE-2024-46765
Reference: https://git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git/commit/?i...
--------------------------------
The writeback enable logic was incorrectly implemented (due to misunderstanding what the side effects of the implementation would be during polling).
Fix this logic issue, while implementing a new feature allowing the user to control the writeback frequency using the knobs for controlling interrupt throttling that we already have. Basically if you leave adaptive interrupts enabled, the writeback frequency will be varied even if busy_polling or if napi-poll is in use. If the interrupt rates are set to a fixed value by ethtool -C and adaptive is off, the driver will allow the user-set interrupt rate to guide how frequently the hardware will complete descriptors to the driver.
Effectively the user will get a control over the hardware efficiency, allowing the choice between immediate interrupts or delayed up to a maximum of the interrupt rate, even when interrupts are disabled during polling.
Signed-off-by: Jesse Brandeburg jesse.brandeburg@intel.com Co-developed-by: Brett Creeley brett.creeley@intel.com Signed-off-by: Brett Creeley brett.creeley@intel.com Tested-by: Tony Brelinski tonyx.brelinski@intel.com Signed-off-by: Tony Nguyen anthony.l.nguyen@intel.com Signed-off-by: Wang Liang wangliang74@huawei.com --- drivers/net/ethernet/intel/ice/ice_txrx.c | 59 ++++++++++------------- drivers/net/ethernet/intel/ice/ice_txrx.h | 1 - 2 files changed, 25 insertions(+), 35 deletions(-)
diff --git a/drivers/net/ethernet/intel/ice/ice_txrx.c b/drivers/net/ethernet/intel/ice/ice_txrx.c index 0fc891861a10..d393ca03bfcb 100644 --- a/drivers/net/ethernet/intel/ice/ice_txrx.c +++ b/drivers/net/ethernet/intel/ice/ice_txrx.c @@ -1497,22 +1497,11 @@ static void ice_update_ena_itr(struct ice_q_vector *q_vector) struct ice_vsi *vsi = q_vector->vsi; u32 itr_val;
- /* when exiting WB_ON_ITR lets set a low ITR value and trigger - * interrupts to expire right away in case we have more work ready to go - * already + /* when exiting WB_ON_ITR just reset the countdown and let ITR + * resume it's normal "interrupts-enabled" path */ - if (q_vector->itr_countdown == ICE_IN_WB_ON_ITR_MODE) { - itr_val = ice_buildreg_itr(rx->itr_idx, ICE_WB_ON_ITR_USECS); - wr32(&vsi->back->hw, GLINT_DYN_CTL(q_vector->reg_idx), itr_val); - /* set target back to last user set value */ - rx->target_itr = rx->itr_setting; - /* set current to what we just wrote and dynamic if needed */ - rx->current_itr = ICE_WB_ON_ITR_USECS | - (rx->itr_setting & ICE_ITR_DYNAMIC); - /* allow normal interrupt flow to start */ + if (q_vector->itr_countdown == ICE_IN_WB_ON_ITR_MODE) q_vector->itr_countdown = 0; - return; - }
/* This will do nothing if dynamic updates are not enabled */ ice_update_itr(q_vector, tx); @@ -1552,10 +1541,8 @@ static void ice_update_ena_itr(struct ice_q_vector *q_vector) q_vector->itr_countdown--; }
- if (!test_bit(__ICE_DOWN, q_vector->vsi->state)) - wr32(&q_vector->vsi->back->hw, - GLINT_DYN_CTL(q_vector->reg_idx), - itr_val); + if (!test_bit(__ICE_DOWN, vsi->state)) + wr32(&vsi->back->hw, GLINT_DYN_CTL(q_vector->reg_idx), itr_val); }
/** @@ -1565,30 +1552,29 @@ static void ice_update_ena_itr(struct ice_q_vector *q_vector) * We need to tell hardware to write-back completed descriptors even when * interrupts are disabled. Descriptors will be written back on cache line * boundaries without WB_ON_ITR enabled, but if we don't enable WB_ON_ITR - * descriptors may not be written back if they don't fill a cache line until the - * next interrupt. + * descriptors may not be written back if they don't fill a cache line until + * the next interrupt. * - * This sets the write-back frequency to 2 microseconds as that is the minimum - * value that's not 0 due to ITR granularity. Also, set the INTENA_MSK bit to - * make sure hardware knows we aren't meddling with the INTENA_M bit. + * This sets the write-back frequency to whatever was set previously for the + * ITR indices. Also, set the INTENA_MSK bit to make sure hardware knows we + * aren't meddling with the INTENA_M bit. */ static void ice_set_wb_on_itr(struct ice_q_vector *q_vector) { struct ice_vsi *vsi = q_vector->vsi;
- /* already in WB_ON_ITR mode no need to change it */ + /* already in wb_on_itr mode no need to change it */ if (q_vector->itr_countdown == ICE_IN_WB_ON_ITR_MODE) return;
- if (q_vector->num_ring_rx) - wr32(&vsi->back->hw, GLINT_DYN_CTL(q_vector->reg_idx), - ICE_GLINT_DYN_CTL_WB_ON_ITR(ICE_WB_ON_ITR_USECS, - ICE_RX_ITR)); - - if (q_vector->num_ring_tx) - wr32(&vsi->back->hw, GLINT_DYN_CTL(q_vector->reg_idx), - ICE_GLINT_DYN_CTL_WB_ON_ITR(ICE_WB_ON_ITR_USECS, - ICE_TX_ITR)); + /* use previously set ITR values for all of the ITR indices by + * specifying ICE_ITR_NONE, which will vary in adaptive (AIM) mode and + * be static in non-adaptive mode (user configured) + */ + wr32(&vsi->back->hw, GLINT_DYN_CTL(q_vector->reg_idx), + ((ICE_ITR_NONE << GLINT_DYN_CTL_ITR_INDX_S) & + GLINT_DYN_CTL_ITR_INDX_M) | GLINT_DYN_CTL_INTENA_MSK_M | + GLINT_DYN_CTL_WB_ON_ITR_M);
q_vector->itr_countdown = ICE_IN_WB_ON_ITR_MODE; } @@ -1655,8 +1641,13 @@ int ice_napi_poll(struct napi_struct *napi, int budget) }
/* If work not completed, return budget and polling will return */ - if (!clean_complete) + if (!clean_complete) { + /* Set the writeback on ITR so partial completions of + * cache-lines will still continue even if we're polling. + */ + ice_set_wb_on_itr(q_vector); return budget; + }
/* Exit the polling mode, but don't re-enable interrupts if stack might * poll us due to busy-polling diff --git a/drivers/net/ethernet/intel/ice/ice_txrx.h b/drivers/net/ethernet/intel/ice/ice_txrx.h index eab7ceae926b..a79fa0e945f2 100644 --- a/drivers/net/ethernet/intel/ice/ice_txrx.h +++ b/drivers/net/ethernet/intel/ice/ice_txrx.h @@ -240,7 +240,6 @@ enum ice_rx_dtype { #define ICE_DFLT_INTRL 0 #define ICE_MAX_INTRL 236
-#define ICE_WB_ON_ITR_USECS 2 #define ICE_IN_WB_ON_ITR_MODE 255 /* Sets WB_ON_ITR and assumes INTENA bit is already cleared, which allows * setting the MSK_M bit to tell hardware to ignore the INTENA_M bit. Also,
From: Anirudh Venkataramanan anirudh.venkataramanan@intel.com
mainline inclusion from mainline-v5.13-rc1 commit e97fb1aea9056299d013aa30783d6a995136a2c8 category: bugfix bugzilla: https://gitee.com/src-openeuler/kernel/issues/IARWO7 CVE: CVE-2024-46765
Reference: https://git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git/commit/?i...
--------------------------------
struct ice_vsi has two fields, state and flags which seem to be serving the same purpose. Consolidate them into one field 'state'.
enum ice_state is used to represent state information of the PF. While some of these enum values can be use to represent VSI state, it makes more sense to represent VSI state with its own enum. So derive a new enum ice_vsi_state from ice_vsi_flags and ice_state and use it. Also rename enum ice_state to ice_pf_state for clarity.
Signed-off-by: Anirudh Venkataramanan anirudh.venkataramanan@intel.com Tested-by: Tony Brelinski tonyx.brelinski@intel.com Signed-off-by: Tony Nguyen anthony.l.nguyen@intel.com Signed-off-by: Wang Liang wangliang74@huawei.com --- drivers/net/ethernet/intel/ice/ice.h | 21 ++++---- drivers/net/ethernet/intel/ice/ice_ethtool.c | 2 +- drivers/net/ethernet/intel/ice/ice_lib.c | 14 +++--- drivers/net/ethernet/intel/ice/ice_main.c | 50 ++++++++++---------- drivers/net/ethernet/intel/ice/ice_txrx.c | 6 +-- 5 files changed, 47 insertions(+), 46 deletions(-)
diff --git a/drivers/net/ethernet/intel/ice/ice.h b/drivers/net/ethernet/intel/ice/ice.h index ea0897a3d39e..a4c8ba4e7791 100644 --- a/drivers/net/ethernet/intel/ice/ice.h +++ b/drivers/net/ethernet/intel/ice/ice.h @@ -188,7 +188,7 @@ struct ice_sw { u8 dflt_vsi_ena:1; /* true if above dflt_vsi is enabled */ };
-enum ice_state { +enum ice_pf_state { __ICE_TESTING, __ICE_DOWN, __ICE_NEEDS_RESTART, @@ -229,12 +229,14 @@ enum ice_state { __ICE_STATE_NBITS /* must be last */ };
-enum ice_vsi_flags { - ICE_VSI_FLAG_UMAC_FLTR_CHANGED, - ICE_VSI_FLAG_MMAC_FLTR_CHANGED, - ICE_VSI_FLAG_VLAN_FLTR_CHANGED, - ICE_VSI_FLAG_PROMISC_CHANGED, - ICE_VSI_FLAG_NBITS /* must be last */ +enum ice_vsi_state { + ICE_VSI_DOWN, + ICE_VSI_NEEDS_RESTART, + ICE_VSI_UMAC_FLTR_CHANGED, + ICE_VSI_MMAC_FLTR_CHANGED, + ICE_VSI_VLAN_FLTR_CHANGED, + ICE_VSI_PROMISC_CHANGED, + ICE_VSI_STATE_NBITS /* must be last */ };
/* struct that defines a VSI, associated with a dev */ @@ -250,8 +252,7 @@ struct ice_vsi { irqreturn_t (*irq_handler)(int irq, void *data);
u64 tx_linearize; - DECLARE_BITMAP(state, __ICE_STATE_NBITS); - DECLARE_BITMAP(flags, ICE_VSI_FLAG_NBITS); + DECLARE_BITMAP(state, ICE_VSI_STATE_NBITS); unsigned int current_netdev_flags; u32 tx_restart; u32 tx_busy; @@ -479,7 +480,7 @@ ice_irq_dynamic_ena(struct ice_hw *hw, struct ice_vsi *vsi, val = GLINT_DYN_CTL_INTENA_M | GLINT_DYN_CTL_CLEARPBA_M | (itr << GLINT_DYN_CTL_ITR_INDX_S); if (vsi) - if (test_bit(__ICE_DOWN, vsi->state)) + if (test_bit(ICE_VSI_DOWN, vsi->state)) return; wr32(hw, GLINT_DYN_CTL(vector), val); } diff --git a/drivers/net/ethernet/intel/ice/ice_ethtool.c b/drivers/net/ethernet/intel/ice/ice_ethtool.c index 9659668279dc..6865b6d8b4c1 100644 --- a/drivers/net/ethernet/intel/ice/ice_ethtool.c +++ b/drivers/net/ethernet/intel/ice/ice_ethtool.c @@ -2902,7 +2902,7 @@ ice_set_ringparam(struct net_device *netdev, struct ethtool_ringparam *ring, /* Bring interface down, copy in the new ring info, then restore the * interface. if VSI is up, bring it down and then back up */ - if (!test_and_set_bit(__ICE_DOWN, vsi->state)) { + if (!test_and_set_bit(ICE_VSI_DOWN, vsi->state)) { ice_down(vsi);
if (tx_rings) { diff --git a/drivers/net/ethernet/intel/ice/ice_lib.c b/drivers/net/ethernet/intel/ice/ice_lib.c index 092cd8bc9d3e..575f1756a662 100644 --- a/drivers/net/ethernet/intel/ice/ice_lib.c +++ b/drivers/net/ethernet/intel/ice/ice_lib.c @@ -426,7 +426,7 @@ ice_vsi_alloc(struct ice_pf *pf, enum ice_vsi_type vsi_type, u16 vf_id)
vsi->type = vsi_type; vsi->back = pf; - set_bit(__ICE_DOWN, vsi->state); + set_bit(ICE_VSI_DOWN, vsi->state);
if (vsi_type == ICE_VSI_VF) ice_vsi_set_num_qs(vsi, vf_id); @@ -2490,7 +2490,7 @@ void ice_vsi_free_rx_rings(struct ice_vsi *vsi) */ void ice_vsi_close(struct ice_vsi *vsi) { - if (!test_and_set_bit(__ICE_DOWN, vsi->state)) + if (!test_and_set_bit(ICE_VSI_DOWN, vsi->state)) ice_down(vsi);
ice_vsi_free_irq(vsi); @@ -2507,10 +2507,10 @@ int ice_ena_vsi(struct ice_vsi *vsi, bool locked) { int err = 0;
- if (!test_bit(__ICE_NEEDS_RESTART, vsi->state)) + if (!test_bit(ICE_VSI_NEEDS_RESTART, vsi->state)) return 0;
- clear_bit(__ICE_NEEDS_RESTART, vsi->state); + clear_bit(ICE_VSI_NEEDS_RESTART, vsi->state);
if (vsi->netdev && vsi->type == ICE_VSI_PF) { if (netif_running(vsi->netdev)) { @@ -2536,10 +2536,10 @@ int ice_ena_vsi(struct ice_vsi *vsi, bool locked) */ void ice_dis_vsi(struct ice_vsi *vsi, bool locked) { - if (test_bit(__ICE_DOWN, vsi->state)) + if (test_bit(ICE_VSI_DOWN, vsi->state)) return;
- set_bit(__ICE_NEEDS_RESTART, vsi->state); + set_bit(ICE_VSI_NEEDS_RESTART, vsi->state);
if (vsi->type == ICE_VSI_PF && vsi->netdev) { if (netif_running(vsi->netdev)) { @@ -2694,7 +2694,7 @@ int ice_vsi_release(struct ice_vsi *vsi) ice_vsi_free_q_vectors(vsi);
/* make sure unregister_netdev() was called by checking __ICE_DOWN */ - if (vsi->netdev && test_bit(__ICE_DOWN, vsi->state)) { + if (vsi->netdev && test_bit(ICE_VSI_DOWN, vsi->state)) { free_netdev(vsi->netdev); vsi->netdev = NULL; } diff --git a/drivers/net/ethernet/intel/ice/ice_main.c b/drivers/net/ethernet/intel/ice/ice_main.c index cdca320fcdf1..f4b2c59a8f55 100644 --- a/drivers/net/ethernet/intel/ice/ice_main.c +++ b/drivers/net/ethernet/intel/ice/ice_main.c @@ -79,7 +79,7 @@ static void ice_check_for_hang_subtask(struct ice_pf *pf) break; }
- if (!vsi || test_bit(__ICE_DOWN, vsi->state)) + if (!vsi || test_bit(ICE_VSI_DOWN, vsi->state)) return;
if (!(vsi->netdev && netif_carrier_ok(vsi->netdev))) @@ -212,9 +212,9 @@ static int ice_add_mac_to_unsync_list(struct net_device *netdev, const u8 *addr) */ static bool ice_vsi_fltr_changed(struct ice_vsi *vsi) { - return test_bit(ICE_VSI_FLAG_UMAC_FLTR_CHANGED, vsi->flags) || - test_bit(ICE_VSI_FLAG_MMAC_FLTR_CHANGED, vsi->flags) || - test_bit(ICE_VSI_FLAG_VLAN_FLTR_CHANGED, vsi->flags); + return test_bit(ICE_VSI_UMAC_FLTR_CHANGED, vsi->state) || + test_bit(ICE_VSI_MMAC_FLTR_CHANGED, vsi->state) || + test_bit(ICE_VSI_VLAN_FLTR_CHANGED, vsi->state); }
/** @@ -281,9 +281,9 @@ static int ice_vsi_sync_fltr(struct ice_vsi *vsi) INIT_LIST_HEAD(&vsi->tmp_unsync_list);
if (ice_vsi_fltr_changed(vsi)) { - clear_bit(ICE_VSI_FLAG_UMAC_FLTR_CHANGED, vsi->flags); - clear_bit(ICE_VSI_FLAG_MMAC_FLTR_CHANGED, vsi->flags); - clear_bit(ICE_VSI_FLAG_VLAN_FLTR_CHANGED, vsi->flags); + clear_bit(ICE_VSI_UMAC_FLTR_CHANGED, vsi->state); + clear_bit(ICE_VSI_MMAC_FLTR_CHANGED, vsi->state); + clear_bit(ICE_VSI_VLAN_FLTR_CHANGED, vsi->state);
/* grab the netdev's addr_list_lock */ netif_addr_lock_bh(netdev); @@ -364,8 +364,8 @@ static int ice_vsi_sync_fltr(struct ice_vsi *vsi) }
if (((changed_flags & IFF_PROMISC) || promisc_forced_on) || - test_bit(ICE_VSI_FLAG_PROMISC_CHANGED, vsi->flags)) { - clear_bit(ICE_VSI_FLAG_PROMISC_CHANGED, vsi->flags); + test_bit(ICE_VSI_PROMISC_CHANGED, vsi->state)) { + clear_bit(ICE_VSI_PROMISC_CHANGED, vsi->state); if (vsi->current_netdev_flags & IFF_PROMISC) { /* Apply Rx filter rule to get traffic from wire */ if (!ice_is_dflt_vsi_in_use(pf->first_sw)) { @@ -398,12 +398,12 @@ static int ice_vsi_sync_fltr(struct ice_vsi *vsi) goto exit;
out_promisc: - set_bit(ICE_VSI_FLAG_PROMISC_CHANGED, vsi->flags); + set_bit(ICE_VSI_PROMISC_CHANGED, vsi->state); goto exit; out: /* if something went wrong then set the changed flag so we try again */ - set_bit(ICE_VSI_FLAG_UMAC_FLTR_CHANGED, vsi->flags); - set_bit(ICE_VSI_FLAG_MMAC_FLTR_CHANGED, vsi->flags); + set_bit(ICE_VSI_UMAC_FLTR_CHANGED, vsi->state); + set_bit(ICE_VSI_MMAC_FLTR_CHANGED, vsi->state); exit: clear_bit(__ICE_CFG_BUSY, vsi->state); return err; @@ -763,7 +763,7 @@ static void ice_vsi_link_event(struct ice_vsi *vsi, bool link_up) if (!vsi) return;
- if (test_bit(__ICE_DOWN, vsi->state) || !vsi->netdev) + if (test_bit(ICE_VSI_DOWN, vsi->state) || !vsi->netdev) return;
if (vsi->type == ICE_VSI_PF) { @@ -2027,7 +2027,7 @@ static void ice_check_media_subtask(struct ice_pf *pf) /* PHY settings are reset on media insertion, reconfigure * PHY to preserve settings. */ - if (test_bit(__ICE_DOWN, vsi->state) && + if (test_bit(ICE_VSI_DOWN, vsi->state) && test_bit(ICE_FLAG_LINK_DOWN_ON_CLOSE_ENA, vsi->back->flags)) return;
@@ -2528,7 +2528,7 @@ ice_xdp_setup_prog(struct ice_vsi *vsi, struct bpf_prog *prog, }
/* need to stop netdev while setting up the program for Rx rings */ - if (if_running && !test_and_set_bit(__ICE_DOWN, vsi->state)) { + if (if_running && !test_and_set_bit(ICE_VSI_DOWN, vsi->state)) { ret = ice_down(vsi); if (ret) { NL_SET_ERR_MSG_MOD(extack, "Preparing device for XDP attach failed"); @@ -3163,7 +3163,7 @@ ice_vlan_rx_add_vid(struct net_device *netdev, __always_unused __be16 proto, */ ret = ice_vsi_add_vlan(vsi, vid, ICE_FWD_TO_VSI); if (!ret) - set_bit(ICE_VSI_FLAG_VLAN_FLTR_CHANGED, vsi->flags); + set_bit(ICE_VSI_VLAN_FLTR_CHANGED, vsi->state);
return ret; } @@ -3202,7 +3202,7 @@ ice_vlan_rx_kill_vid(struct net_device *netdev, __always_unused __be16 proto, if (vsi->num_vlan == 1 && ice_vsi_is_vlan_pruning_ena(vsi)) ret = ice_cfg_vlan_pruning(vsi, false, false);
- set_bit(ICE_VSI_FLAG_VLAN_FLTR_CHANGED, vsi->flags); + set_bit(ICE_VSI_VLAN_FLTR_CHANGED, vsi->state); return ret; }
@@ -4986,8 +4986,8 @@ static void ice_set_rx_mode(struct net_device *netdev) * ndo_set_rx_mode may be triggered even without a change in netdev * flags */ - set_bit(ICE_VSI_FLAG_UMAC_FLTR_CHANGED, vsi->flags); - set_bit(ICE_VSI_FLAG_MMAC_FLTR_CHANGED, vsi->flags); + set_bit(ICE_VSI_UMAC_FLTR_CHANGED, vsi->state); + set_bit(ICE_VSI_MMAC_FLTR_CHANGED, vsi->state); set_bit(ICE_FLAG_FLTR_SYNC, vsi->back->flags);
/* schedule our worker thread which will take care of @@ -5262,7 +5262,7 @@ static int ice_up_complete(struct ice_vsi *vsi) if (err) return err;
- clear_bit(__ICE_DOWN, vsi->state); + clear_bit(ICE_VSI_DOWN, vsi->state); ice_napi_enable_all(vsi); ice_vsi_ena_irq(vsi);
@@ -5409,7 +5409,7 @@ void ice_update_vsi_stats(struct ice_vsi *vsi) struct ice_eth_stats *cur_es = &vsi->eth_stats; struct ice_pf *pf = vsi->back;
- if (test_bit(__ICE_DOWN, vsi->state) || + if (test_bit(ICE_VSI_DOWN, vsi->state) || test_bit(__ICE_CFG_BUSY, pf->state)) return;
@@ -5614,7 +5614,7 @@ void ice_get_stats64(struct net_device *netdev, struct rtnl_link_stats64 *stats) * But, only call the update routine and read the registers if VSI is * not down. */ - if (!test_bit(__ICE_DOWN, vsi->state)) + if (!test_bit(ICE_VSI_DOWN, vsi->state)) ice_update_vsi_ring_stats(vsi); stats->tx_packets = vsi_stats->tx_packets; stats->tx_bytes = vsi_stats->tx_bytes; @@ -5814,7 +5814,7 @@ int ice_vsi_open_ctrl(struct ice_vsi *vsi) if (err) goto err_up_complete;
- clear_bit(__ICE_DOWN, vsi->state); + clear_bit(ICE_VSI_DOWN, vsi->state); ice_vsi_ena_irq(vsi);
return 0; @@ -6214,7 +6214,7 @@ static int ice_change_mtu(struct net_device *netdev, int new_mtu) netdev->mtu = (unsigned int)new_mtu;
/* if VSI is up, bring it down and then back up */ - if (!test_and_set_bit(__ICE_DOWN, vsi->state)) { + if (!test_and_set_bit(ICE_VSI_DOWN, vsi->state)) { int err;
err = ice_down(vsi); @@ -6654,7 +6654,7 @@ static void ice_tx_timeout(struct net_device *netdev, unsigned int txqueue) default: netdev_err(netdev, "tx_timeout recovery unsuccessful, device is in unrecoverable state.\n"); set_bit(__ICE_DOWN, pf->state); - set_bit(__ICE_NEEDS_RESTART, vsi->state); + set_bit(ICE_VSI_NEEDS_RESTART, vsi->state); set_bit(__ICE_SERVICE_DIS, pf->state); break; } diff --git a/drivers/net/ethernet/intel/ice/ice_txrx.c b/drivers/net/ethernet/intel/ice/ice_txrx.c index d393ca03bfcb..649420fed013 100644 --- a/drivers/net/ethernet/intel/ice/ice_txrx.c +++ b/drivers/net/ethernet/intel/ice/ice_txrx.c @@ -309,7 +309,7 @@ static bool ice_clean_tx_irq(struct ice_ring *tx_ring, int napi_budget) smp_mb(); if (__netif_subqueue_stopped(tx_ring->netdev, tx_ring->q_index) && - !test_bit(__ICE_DOWN, vsi->state)) { + !test_bit(ICE_VSI_DOWN, vsi->state)) { netif_wake_subqueue(tx_ring->netdev, tx_ring->q_index); ++tx_ring->tx_stats.restart_q; @@ -590,7 +590,7 @@ ice_xdp_xmit(struct net_device *dev, int n, struct xdp_frame **frames, struct ice_ring *xdp_ring; int drops = 0, i;
- if (test_bit(__ICE_DOWN, vsi->state)) + if (test_bit(ICE_VSI_DOWN, vsi->state)) return -ENETDOWN;
if (!ice_is_xdp_ena_vsi(vsi) || queue_index >= vsi->num_xdp_txq) @@ -1541,7 +1541,7 @@ static void ice_update_ena_itr(struct ice_q_vector *q_vector) q_vector->itr_countdown--; }
- if (!test_bit(__ICE_DOWN, vsi->state)) + if (!test_bit(ICE_VSI_DOWN, vsi->state)) wr32(&vsi->back->hw, GLINT_DYN_CTL(q_vector->reg_idx), itr_val); }
From: Maciej Fijalkowski maciej.fijalkowski@intel.com
mainline inclusion from mainline-v6.5-rc1 commit 469748429ac81f0a6a344637fc9d3b1d16a9f3d8 category: bugfix bugzilla: https://gitee.com/src-openeuler/kernel/issues/IARWO7 CVE: CVE-2024-46765
Reference: https://git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git/commit/?i...
--------------------------------
Currently ice driver's .ndo_bpf callback brings interface down and up independently of XDP resources' presence. This is only needed when either these resources have to be configured or removed. It means that if one is switching XDP programs on-the-fly with running traffic, packets will be dropped.
To avoid this, compare early on ice_xdp_setup_prog() state of incoming bpf_prog pointer vs the bpf_prog pointer that is already assigned to VSI. Do the swap in case VSI has bpf_prog and incoming one are non-NULL.
Lastly, while at it, put old bpf_prog *after* the update of Rx ring's bpf_prog pointer. In theory previous code could expose us to a state where Rx ring's bpf_prog would still be referring to old_prog that got released with earlier bpf_prog_put().
Signed-off-by: Maciej Fijalkowski maciej.fijalkowski@intel.com Acked-by: Toke Høiland-Jørgensen toke@redhat.com Reviewed-by: Alexander Lobakin aleksander.lobakin@intel.com Tested-by: Chandan Kumar Rout chandanx.rout@intel.com (A Contingent Worker at Intel) Signed-off-by: Tony Nguyen anthony.l.nguyen@intel.com Conflicts: drivers/net/ethernet/intel/ice/ice_main.c [conflicts due to not mergered 7e753eb675f0 ("ice: Fix DMA mappings leak")] Signed-off-by: Wang Liang wangliang74@huawei.com --- drivers/net/ethernet/intel/ice/ice_main.c | 19 +++++++++---------- 1 file changed, 9 insertions(+), 10 deletions(-)
diff --git a/drivers/net/ethernet/intel/ice/ice_main.c b/drivers/net/ethernet/intel/ice/ice_main.c index f4b2c59a8f55..26daf2c474bb 100644 --- a/drivers/net/ethernet/intel/ice/ice_main.c +++ b/drivers/net/ethernet/intel/ice/ice_main.c @@ -2318,11 +2318,11 @@ static void ice_vsi_assign_bpf_prog(struct ice_vsi *vsi, struct bpf_prog *prog) int i;
old_prog = xchg(&vsi->xdp_prog, prog); - if (old_prog) - bpf_prog_put(old_prog); - ice_for_each_rxq(vsi, i) WRITE_ONCE(vsi->rx_rings[i]->xdp_prog, vsi->xdp_prog); + + if (old_prog) + bpf_prog_put(old_prog); }
/** @@ -2527,6 +2527,12 @@ ice_xdp_setup_prog(struct ice_vsi *vsi, struct bpf_prog *prog, return -EOPNOTSUPP; }
+ /* hot swap progs and avoid toggling link */ + if (ice_is_xdp_ena_vsi(vsi) == !!prog) { + ice_vsi_assign_bpf_prog(vsi, prog); + return 0; + } + /* need to stop netdev while setting up the program for Rx rings */ if (if_running && !test_and_set_bit(ICE_VSI_DOWN, vsi->state)) { ret = ice_down(vsi); @@ -2545,13 +2551,6 @@ ice_xdp_setup_prog(struct ice_vsi *vsi, struct bpf_prog *prog, xdp_ring_err = ice_destroy_xdp_rings(vsi); if (xdp_ring_err) NL_SET_ERR_MSG_MOD(extack, "Freeing XDP Tx resources failed"); - } else { - /* safe to call even when prog == vsi->xdp_prog as - * dev_xdp_install in net/core/dev.c incremented prog's - * refcount so corresponding bpf_prog_put won't cause - * underflow - */ - ice_vsi_assign_bpf_prog(vsi, prog); }
if (if_running)
From: Larysa Zaremba larysa.zaremba@intel.com
mainline inclusion from mainline-v6.11-rc7 commit 2504b8405768a57a71e660dbfd5abd59f679a03f category: bugfix bugzilla: https://gitee.com/src-openeuler/kernel/issues/IARWO7 CVE: CVE-2024-46765
Reference: https://git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git/commit/?i...
--------------------------------
The main threat to data consistency in ice_xdp() is a possible asynchronous PF reset. It can be triggered by a user or by TX timeout handler.
XDP setup and PF reset code access the same resources in the following sections: * ice_vsi_close() in ice_prepare_for_reset() - already rtnl-locked * ice_vsi_rebuild() for the PF VSI - not protected * ice_vsi_open() - already rtnl-locked
With an unfortunate timing, such accesses can result in a crash such as the one below:
[ +1.999878] ice 0000:b1:00.0: Registered XDP mem model MEM_TYPE_XSK_BUFF_POOL on Rx ring 14 [ +2.002992] ice 0000:b1:00.0: Registered XDP mem model MEM_TYPE_XSK_BUFF_POOL on Rx ring 18 [Mar15 18:17] ice 0000:b1:00.0 ens801f0np0: NETDEV WATCHDOG: CPU: 38: transmit queue 14 timed out 80692736 ms [ +0.000093] ice 0000:b1:00.0 ens801f0np0: tx_timeout: VSI_num: 6, Q 14, NTC: 0x0, HW_HEAD: 0x0, NTU: 0x0, INT: 0x4000001 [ +0.000012] ice 0000:b1:00.0 ens801f0np0: tx_timeout recovery level 1, txqueue 14 [ +0.394718] ice 0000:b1:00.0: PTP reset successful [ +0.006184] BUG: kernel NULL pointer dereference, address: 0000000000000098 [ +0.000045] #PF: supervisor read access in kernel mode [ +0.000023] #PF: error_code(0x0000) - not-present page [ +0.000023] PGD 0 P4D 0 [ +0.000018] Oops: 0000 [#1] PREEMPT SMP NOPTI [ +0.000023] CPU: 38 PID: 7540 Comm: kworker/38:1 Not tainted 6.8.0-rc7 #1 [ +0.000031] Hardware name: Intel Corporation S2600WFT/S2600WFT, BIOS SE5C620.86B.02.01.0014.082620210524 08/26/2021 [ +0.000036] Workqueue: ice ice_service_task [ice] [ +0.000183] RIP: 0010:ice_clean_tx_ring+0xa/0xd0 [ice] [...] [ +0.000013] Call Trace: [ +0.000016] <TASK> [ +0.000014] ? __die+0x1f/0x70 [ +0.000029] ? page_fault_oops+0x171/0x4f0 [ +0.000029] ? schedule+0x3b/0xd0 [ +0.000027] ? exc_page_fault+0x7b/0x180 [ +0.000022] ? asm_exc_page_fault+0x22/0x30 [ +0.000031] ? ice_clean_tx_ring+0xa/0xd0 [ice] [ +0.000194] ice_free_tx_ring+0xe/0x60 [ice] [ +0.000186] ice_destroy_xdp_rings+0x157/0x310 [ice] [ +0.000151] ice_vsi_decfg+0x53/0xe0 [ice] [ +0.000180] ice_vsi_rebuild+0x239/0x540 [ice] [ +0.000186] ice_vsi_rebuild_by_type+0x76/0x180 [ice] [ +0.000145] ice_rebuild+0x18c/0x840 [ice] [ +0.000145] ? delay_tsc+0x4a/0xc0 [ +0.000022] ? delay_tsc+0x92/0xc0 [ +0.000020] ice_do_reset+0x140/0x180 [ice] [ +0.000886] ice_service_task+0x404/0x1030 [ice] [ +0.000824] process_one_work+0x171/0x340 [ +0.000685] worker_thread+0x277/0x3a0 [ +0.000675] ? preempt_count_add+0x6a/0xa0 [ +0.000677] ? _raw_spin_lock_irqsave+0x23/0x50 [ +0.000679] ? __pfx_worker_thread+0x10/0x10 [ +0.000653] kthread+0xf0/0x120 [ +0.000635] ? __pfx_kthread+0x10/0x10 [ +0.000616] ret_from_fork+0x2d/0x50 [ +0.000612] ? __pfx_kthread+0x10/0x10 [ +0.000604] ret_from_fork_asm+0x1b/0x30 [ +0.000604] </TASK>
The previous way of handling this through returning -EBUSY is not viable, particularly when destroying AF_XDP socket, because the kernel proceeds with removal anyway.
There is plenty of code between those calls and there is no need to create a large critical section that covers all of them, same as there is no need to protect ice_vsi_rebuild() with rtnl_lock().
Add xdp_state_lock mutex to protect ice_vsi_rebuild() and ice_xdp().
Leaving unprotected sections in between would result in two states that have to be considered: 1. when the VSI is closed, but not yet rebuild 2. when VSI is already rebuild, but not yet open
The latter case is actually already handled through !netif_running() case, we just need to adjust flag checking a little. The former one is not as trivial, because between ice_vsi_close() and ice_vsi_rebuild(), a lot of hardware interaction happens, this can make adding/deleting rings exit with an error. Luckily, VSI rebuild is pending and can apply new configuration for us in a managed fashion.
Therefore, add an additional VSI state flag ICE_VSI_REBUILD_PENDING to indicate that ice_xdp() can just hot-swap the program.
Also, as ice_vsi_rebuild() flow is touched in this patch, make it more consistent by deconfiguring VSI when coalesce allocation fails.
Fixes: 2d4238f55697 ("ice: Add support for AF_XDP") Fixes: efc2214b6047 ("ice: Add support for XDP") Reviewed-by: Wojciech Drewek wojciech.drewek@intel.com Reviewed-by: Jacob Keller jacob.e.keller@intel.com Tested-by: Chandan Kumar Rout chandanx.rout@intel.com Signed-off-by: Larysa Zaremba larysa.zaremba@intel.com Reviewed-by: Maciej Fijalkowski maciej.fijalkowski@intel.com Signed-off-by: Tony Nguyen anthony.l.nguyen@intel.com Conflicts: drivers/net/ethernet/intel/ice/ice.h drivers/net/ethernet/intel/ice/ice_lib.c drivers/net/ethernet/intel/ice/ice_xsk.c [conflicts due to not mergered 1a1c40df2e80 ("ice: set and release switchdev environment"), conflicts due to not mergered b3056ae2b578 ("ice: xsk: drop power of 2 ring size restriction for AF_XDP"), conflicts due to not mergered a696d61528f0 ("ice: stop hard coding the ICE_VSI_CTRL location"), conflicts due to not mergered 6624e780a577 ("ice: split ice_vsi_setup into smaller functions"), conflicts due to not mergered 0db66d20f9cf ("ice: cleanup in VSI config/deconfig code")] Signed-off-by: Wang Liang wangliang74@huawei.com --- drivers/net/ethernet/intel/ice/ice.h | 2 ++ drivers/net/ethernet/intel/ice/ice_lib.c | 24 ++++++++++++++++++----- drivers/net/ethernet/intel/ice/ice_main.c | 19 +++++++++++++----- drivers/net/ethernet/intel/ice/ice_xsk.c | 3 ++- 4 files changed, 37 insertions(+), 11 deletions(-)
diff --git a/drivers/net/ethernet/intel/ice/ice.h b/drivers/net/ethernet/intel/ice/ice.h index a4c8ba4e7791..d4ef12f506b9 100644 --- a/drivers/net/ethernet/intel/ice/ice.h +++ b/drivers/net/ethernet/intel/ice/ice.h @@ -236,6 +236,7 @@ enum ice_vsi_state { ICE_VSI_MMAC_FLTR_CHANGED, ICE_VSI_VLAN_FLTR_CHANGED, ICE_VSI_PROMISC_CHANGED, + ICE_VSI_REBUILD_PENDING, ICE_VSI_STATE_NBITS /* must be last */ };
@@ -326,6 +327,7 @@ struct ice_vsi { struct ice_ring **xdp_rings; /* XDP ring array */ u16 num_xdp_txq; /* Used XDP queues */ u8 xdp_mapping_mode; /* ICE_MAP_MODE_[CONTIG|SCATTER] */ + struct mutex xdp_state_lock; struct xsk_buff_pool **xsk_pools; u16 num_xsk_pools_used; u16 num_xsk_pools; diff --git a/drivers/net/ethernet/intel/ice/ice_lib.c b/drivers/net/ethernet/intel/ice/ice_lib.c index 575f1756a662..b7779e1dbd21 100644 --- a/drivers/net/ethernet/intel/ice/ice_lib.c +++ b/drivers/net/ethernet/intel/ice/ice_lib.c @@ -352,6 +352,7 @@ static int ice_vsi_clear(struct ice_vsi *vsi) pf->next_vsi = vsi->idx;
ice_vsi_free_arrays(vsi); + mutex_destroy(&vsi->xdp_state_lock); mutex_unlock(&pf->sw_mutex); devm_kfree(dev, vsi);
@@ -475,6 +476,9 @@ ice_vsi_alloc(struct ice_pf *pf, enum ice_vsi_type vsi_type, u16 vf_id) pf->next_vsi = ice_get_free_slot(pf->vsi, pf->num_alloc_vsi, pf->next_vsi); } + + mutex_init(&vsi->xdp_state_lock); + goto unlock_pf;
err_rings: @@ -2881,10 +2885,14 @@ int ice_vsi_rebuild(struct ice_vsi *vsi, bool init_vsi) if (vsi->type == ICE_VSI_VF) vf = &pf->vf[vsi->vf_id];
+ mutex_lock(&vsi->xdp_state_lock); + coalesce = kcalloc(vsi->num_q_vectors, sizeof(struct ice_coalesce_stored), GFP_KERNEL); - if (!coalesce) - return -ENOMEM; + if (!coalesce) { + ret = -ENOMEM; + goto unlock; + }
prev_num_q_vectors = ice_vsi_rebuild_get_coalesce(vsi, coalesce);
@@ -3000,13 +3008,16 @@ int ice_vsi_rebuild(struct ice_vsi *vsi, bool init_vsi) ret = -EIO; goto err_vectors; } else { - return ice_schedule_reset(pf, ICE_RESET_PFR); + ret = ice_schedule_reset(pf, ICE_RESET_PFR); + if (ret) + goto err_vectors; + goto free_coalesce; } } ice_vsi_rebuild_set_coalesce(vsi, coalesce, prev_num_q_vectors); - kfree(coalesce); + clear_bit(ICE_VSI_REBUILD_PENDING, vsi->state);
- return 0; + goto free_coalesce;
err_vectors: ice_vsi_free_q_vectors(vsi); @@ -3020,7 +3031,10 @@ int ice_vsi_rebuild(struct ice_vsi *vsi, bool init_vsi) err_vsi: ice_vsi_clear(vsi); set_bit(__ICE_RESET_FAILED, pf->state); +free_coalesce: kfree(coalesce); +unlock: + mutex_unlock(&vsi->xdp_state_lock); return ret; }
diff --git a/drivers/net/ethernet/intel/ice/ice_main.c b/drivers/net/ethernet/intel/ice/ice_main.c index 26daf2c474bb..30da82638c90 100644 --- a/drivers/net/ethernet/intel/ice/ice_main.c +++ b/drivers/net/ethernet/intel/ice/ice_main.c @@ -476,6 +476,7 @@ ice_prepare_for_reset(struct ice_pf *pf) /* clear SW filtering DB */ ice_clear_hw_tbls(hw); /* disable the VSIs and their queues that are not already DOWN */ + set_bit(ICE_VSI_REBUILD_PENDING, ice_get_main_vsi(pf)->state); ice_pf_dis_all_vsi(pf, false);
if (hw->port_info) @@ -2528,7 +2529,8 @@ ice_xdp_setup_prog(struct ice_vsi *vsi, struct bpf_prog *prog, }
/* hot swap progs and avoid toggling link */ - if (ice_is_xdp_ena_vsi(vsi) == !!prog) { + if (ice_is_xdp_ena_vsi(vsi) == !!prog || + test_bit(ICE_VSI_REBUILD_PENDING, vsi->state)) { ice_vsi_assign_bpf_prog(vsi, prog); return 0; } @@ -2593,21 +2595,28 @@ static int ice_xdp(struct net_device *dev, struct netdev_bpf *xdp) { struct ice_netdev_priv *np = netdev_priv(dev); struct ice_vsi *vsi = np->vsi; + int ret;
if (vsi->type != ICE_VSI_PF) { NL_SET_ERR_MSG_MOD(xdp->extack, "XDP can be loaded only on PF VSI"); return -EINVAL; }
+ mutex_lock(&vsi->xdp_state_lock); + switch (xdp->command) { case XDP_SETUP_PROG: - return ice_xdp_setup_prog(vsi, xdp->prog, xdp->extack); + ret = ice_xdp_setup_prog(vsi, xdp->prog, xdp->extack); + break; case XDP_SETUP_XSK_POOL: - return ice_xsk_pool_setup(vsi, xdp->xsk.pool, - xdp->xsk.queue_id); + ret = ice_xsk_pool_setup(vsi, xdp->xsk.pool, xdp->xsk.queue_id); + break; default: - return -EINVAL; + ret = -EINVAL; } + + mutex_unlock(&vsi->xdp_state_lock); + return ret; }
/** diff --git a/drivers/net/ethernet/intel/ice/ice_xsk.c b/drivers/net/ethernet/intel/ice/ice_xsk.c index 59963b901be0..a5d40d5d4ee2 100644 --- a/drivers/net/ethernet/intel/ice/ice_xsk.c +++ b/drivers/net/ethernet/intel/ice/ice_xsk.c @@ -384,7 +384,8 @@ int ice_xsk_pool_setup(struct ice_vsi *vsi, struct xsk_buff_pool *pool, u16 qid) goto failure; }
- if_running = netif_running(vsi->netdev) && ice_is_xdp_ena_vsi(vsi); + if_running = !test_bit(ICE_VSI_DOWN, vsi->state) && + ice_is_xdp_ena_vsi(vsi);
if (if_running) { ret = ice_qp_dis(vsi, qid);