From: Junxian Huang huangjunxian6@hisilicon.com
driver inclusion category: bugfix bugzilla: https://gitee.com/openeuler/kernel/issues/I63IM5
---------------------------------------------------------------------------
Applying this patch, RoCE driver will not set bond when NIC sets bond in a mode not supported for RoCE bonding, with VF slaves or with slaves from different I/O die.
Fixes: e62a20278f18 ("RDMA/hns: support RoCE bonding") Signed-off-by: Junxian Huang huangjunxian6@hisilicon.com Reviewed-by: Yangyang Li liyangyang20@huawei.com Reviewed-by: Yue Haibing yuehaibing@huawei.com Signed-off-by: Zheng Zengkai zhengzengkai@huawei.com --- drivers/infiniband/hw/hns/hns_roce_bond.c | 102 +++++++++++++++++----- drivers/infiniband/hw/hns/hns_roce_bond.h | 10 +++ 2 files changed, 92 insertions(+), 20 deletions(-)
diff --git a/drivers/infiniband/hw/hns/hns_roce_bond.c b/drivers/infiniband/hw/hns/hns_roce_bond.c index 8d79ae73c4ce..a5ce6d32e74f 100644 --- a/drivers/infiniband/hw/hns/hns_roce_bond.c +++ b/drivers/infiniband/hw/hns/hns_roce_bond.c @@ -513,13 +513,13 @@ static bool hns_roce_bond_upper_event(struct hns_roce_dev *hr_dev, struct netdev_notifier_changeupper_info *info) { struct hns_roce_bond_group *bond_grp = hr_dev->bond_grp; + struct netdev_lag_upper_info *bond_upper_info = NULL; struct net_device *upper_dev = info->upper_dev; - struct netdev_lag_upper_info *bond_upper_info; - u32 pre_slave_map = bond_grp->slave_map; - u8 pre_slave_num = bond_grp->slave_num; bool changed = false; + u32 pre_slave_map; + u8 pre_slave_num;
- if (!upper_dev || !netif_is_lag_master(upper_dev)) + if (!bond_grp || !upper_dev || !netif_is_lag_master(upper_dev)) return false;
if (info->linking) @@ -530,15 +530,11 @@ static bool hns_roce_bond_upper_event(struct hns_roce_dev *hr_dev, if (bond_upper_info) bond_grp->tx_type = bond_upper_info->tx_type;
+ pre_slave_map = bond_grp->slave_map; + pre_slave_num = bond_grp->slave_num; hns_roce_bond_info_record(bond_grp, upper_dev);
bond_grp->bond = netdev_priv(upper_dev); - if (!hns_roce_bond_mode_is_supported(bond_grp->tx_type) || - bond_grp->slave_num <= 1) { - changed = bond_grp->bond_ready; - bond_grp->bond_ready = false; - goto out; - }
if (bond_grp->bond_state == HNS_ROCE_BOND_NOT_BONDED) { bond_grp->bond_ready = true; @@ -553,7 +549,6 @@ static bool hns_roce_bond_upper_event(struct hns_roce_dev *hr_dev, changed = true; }
-out: mutex_unlock(&bond_grp->bond_mutex);
return changed; @@ -610,30 +605,93 @@ static bool hns_roce_is_bond_grp_exist(struct net_device *upper_dev) return false; }
+static enum bond_support_type + check_bond_support(struct hns_roce_dev *hr_dev, + struct netdev_notifier_changeupper_info *info) +{ + struct netdev_lag_upper_info *bond_upper_info = NULL; + struct net_device *upper_dev = info->upper_dev; + bool bond_grp_exist = false; + struct net_device *net_dev; + bool support = true; + u8 slave_num = 0; + int bus_num = -1; + + if (hr_dev->bond_grp || hns_roce_is_bond_grp_exist(upper_dev)) + bond_grp_exist = true; + + if (!info->linking && !bond_grp_exist) + return BOND_NOT_SUPPORT; + + if (info->linking) + bond_upper_info = info->upper_info; + + if (bond_upper_info && + !hns_roce_bond_mode_is_supported(bond_upper_info->tx_type)) + return BOND_NOT_SUPPORT; + + rcu_read_lock(); + for_each_netdev_in_bond_rcu(upper_dev, net_dev) { + hr_dev = hns_roce_get_hrdev_by_netdev(net_dev); + if (hr_dev) { + slave_num++; + if (bus_num == -1) + bus_num = hr_dev->pci_dev->bus->number; + if (hr_dev->is_vf || + bus_num != hr_dev->pci_dev->bus->number) { + support = false; + break; + } + } + } + rcu_read_unlock(); + + if (slave_num <= 1) + support = false; + + if (support) + return BOND_SUPPORT; + + return bond_grp_exist ? BOND_EXISTING_NOT_SUPPORT : BOND_NOT_SUPPORT; +} + int hns_roce_bond_event(struct notifier_block *self, unsigned long event, void *ptr) { struct net_device *net_dev = netdev_notifier_info_to_dev(ptr); struct hns_roce_dev *hr_dev = container_of(self, struct hns_roce_dev, bond_nb); + struct netdev_notifier_changeupper_info *info; + enum bond_support_type support = BOND_SUPPORT; struct net_device *upper_dev; bool changed;
if (event != NETDEV_CHANGEUPPER && event != NETDEV_CHANGELOWERSTATE) return NOTIFY_DONE;
- rcu_read_lock(); - upper_dev = netdev_master_upper_dev_get_rcu(net_dev); - rcu_read_unlock(); - if (event == NETDEV_CHANGELOWERSTATE && !upper_dev && - hr_dev != hns_roce_get_hrdev_by_netdev(net_dev)) - return NOTIFY_DONE; - if (upper_dev) { + if (event == NETDEV_CHANGEUPPER) { + info = ptr; + support = check_bond_support(hr_dev, info); + if (support == BOND_NOT_SUPPORT) + return NOTIFY_DONE; + upper_dev = info->upper_dev; + } else { + rcu_read_lock(); + upper_dev = netdev_master_upper_dev_get_rcu(net_dev); + rcu_read_unlock(); + if (!upper_dev && + hr_dev != hns_roce_get_hrdev_by_netdev(net_dev)) + return NOTIFY_DONE; + } + + if (event == NETDEV_CHANGEUPPER) { if (!hns_roce_is_slave(upper_dev, hr_dev->iboe.netdevs[0])) return NOTIFY_DONE; + if (!hr_dev->bond_grp) { if (hns_roce_is_bond_grp_exist(upper_dev)) return NOTIFY_DONE; + hr_dev->bond_grp = hns_roce_alloc_bond_grp(hr_dev, upper_dev); if (!hr_dev->bond_grp) { @@ -642,12 +700,16 @@ int hns_roce_bond_event(struct notifier_block *self, return NOTIFY_DONE; } } - }
+ if (support == BOND_EXISTING_NOT_SUPPORT) { + hr_dev->bond_grp->bond_ready = false; + hns_roce_queue_bond_work(hr_dev, HZ); + return NOTIFY_DONE; + } + } changed = (event == NETDEV_CHANGEUPPER) ? hns_roce_bond_upper_event(hr_dev, ptr) : hns_roce_bond_lowerstate_event(hr_dev, ptr); - if (changed) hns_roce_queue_bond_work(hr_dev, HZ);
diff --git a/drivers/infiniband/hw/hns/hns_roce_bond.h b/drivers/infiniband/hw/hns/hns_roce_bond.h index a222f43ab124..3c5a8a770f6c 100644 --- a/drivers/infiniband/hw/hns/hns_roce_bond.h +++ b/drivers/infiniband/hw/hns/hns_roce_bond.h @@ -17,6 +17,16 @@ enum { BOND_MODE_2_4, };
+enum bond_support_type { + BOND_NOT_SUPPORT, + /* + * bond_grp already exists, but in the current + * conditions it's no longer supported + */ + BOND_EXISTING_NOT_SUPPORT, + BOND_SUPPORT, +}; + enum hns_roce_bond_state { HNS_ROCE_BOND_NOT_BONDED, HNS_ROCE_BOND_IS_BONDED,