From: Chiqijun chiqijun@huawei.com
driver inclusion category: bugfix bugzilla: 4472
-----------------------------------------------------------------------
Set the fault device link down so that the bond device can fail over to the backup device.
Signed-off-by: Chiqijun chiqijun@huawei.com Reviewed-by: Zengweiliang zengweiliang.zengweiliang@huawei.com Signed-off-by: Yang Yingliang yangyingliang@huawei.com --- drivers/net/ethernet/huawei/hinic/hinic_hw.h | 4 +- .../net/ethernet/huawei/hinic/hinic_hwdev.c | 22 +++-- drivers/net/ethernet/huawei/hinic/hinic_lld.c | 83 +++++++++++++++++-- .../net/ethernet/huawei/hinic/hinic_main.c | 8 ++ 4 files changed, 100 insertions(+), 17 deletions(-)
diff --git a/drivers/net/ethernet/huawei/hinic/hinic_hw.h b/drivers/net/ethernet/huawei/hinic/hinic_hw.h index 6afe31c9b80e..c4d48ccf8f2a 100644 --- a/drivers/net/ethernet/huawei/hinic/hinic_hw.h +++ b/drivers/net/ethernet/huawei/hinic/hinic_hw.h @@ -565,7 +565,8 @@ union hinic_fault_hw_mgmt { struct hinic_fault_event { /* enum hinic_fault_type */ u8 type; - u8 rsvd0[3]; + u8 fault_level; /* sdk write fault level for uld event */ + u8 rsvd0[2]; union hinic_fault_hw_mgmt event; };
@@ -653,6 +654,7 @@ enum hinic_event_type { HINIC_EVENT_MCTP_GET_HOST_INFO, HINIC_EVENT_MULTI_HOST_MGMT, HINIC_EVENT_INIT_MIGRATE_PF, + HINIC_EVENT_MGMT_WATCHDOG_EVENT, };
struct hinic_event_info { diff --git a/drivers/net/ethernet/huawei/hinic/hinic_hwdev.c b/drivers/net/ethernet/huawei/hinic/hinic_hwdev.c index 058939d05346..2d6a547c4d34 100644 --- a/drivers/net/ethernet/huawei/hinic/hinic_hwdev.c +++ b/drivers/net/ethernet/huawei/hinic/hinic_hwdev.c @@ -3537,9 +3537,10 @@ static void fault_event_handler(struct hinic_hwdev *hwdev, void *buf_in, struct hinic_cmd_fault_event *fault_event; struct hinic_event_info event_info; struct hinic_fault_info_node *fault_node; + u8 fault_level;
if (in_size != sizeof(*fault_event)) { - sdk_err(hwdev->dev_hdl, "Invalid fault event report, length: %d, should be %ld.\n", + sdk_err(hwdev->dev_hdl, "Invalid fault event report, length: %d, should be %ld\n", in_size, sizeof(*fault_event)); return; } @@ -3547,11 +3548,16 @@ static void fault_event_handler(struct hinic_hwdev *hwdev, void *buf_in, fault_event = buf_in; fault_report_show(hwdev, &fault_event->event);
+ if (fault_event->event.type == HINIC_FAULT_SRC_HW_MGMT_CHIP) + fault_level = fault_event->event.event.chip.err_level; + else + fault_level = FAULT_LEVEL_FATAL; + if (hwdev->event_callback) { event_info.type = HINIC_EVENT_FAULT; memcpy(&event_info.info, &fault_event->event, sizeof(event_info.info)); - + event_info.info.fault_level = fault_level; hwdev->event_callback(hwdev->event_pri_handle, &event_info); }
@@ -3567,11 +3573,7 @@ static void fault_event_handler(struct hinic_hwdev *hwdev, void *buf_in, else if (fault_event->event.type == FAULT_TYPE_PHY_FAULT) fault_node->info.fault_src = HINIC_FAULT_SRC_HW_PHY_FAULT;
- if (fault_node->info.fault_src == HINIC_FAULT_SRC_HW_MGMT_CHIP) - fault_node->info.fault_lev = - fault_event->event.event.chip.err_level; - else - fault_node->info.fault_lev = FAULT_LEVEL_FATAL; + fault_node->info.fault_lev = fault_level;
memcpy(&fault_node->info.fault_data.hw_mgmt, &fault_event->event.event, sizeof(union hinic_fault_hw_mgmt)); @@ -3811,10 +3813,16 @@ static void mgmt_watchdog_timeout_event_handler(struct hinic_hwdev *hwdev, void *buf_out, u16 *out_size) { struct hinic_fault_info_node *fault_node; + struct hinic_event_info event_info = { 0 };
sw_watchdog_timeout_info_show(hwdev, buf_in, in_size, buf_out, out_size);
+ if (hwdev->event_callback) { + event_info.type = HINIC_EVENT_MGMT_WATCHDOG_EVENT; + hwdev->event_callback(hwdev->event_pri_handle, &event_info); + } + /* refresh history fault info */ fault_node = kzalloc(sizeof(*fault_node), GFP_KERNEL); if (!fault_node) { diff --git a/drivers/net/ethernet/huawei/hinic/hinic_lld.c b/drivers/net/ethernet/huawei/hinic/hinic_lld.c index 044d0df996e5..412f4c8a93ea 100644 --- a/drivers/net/ethernet/huawei/hinic/hinic_lld.c +++ b/drivers/net/ethernet/huawei/hinic/hinic_lld.c @@ -1793,18 +1793,11 @@ static void __multi_host_mgmt(struct hinic_pcidev *dev, } }
-void hinic_event_process(void *adapter, struct hinic_event_info *event) +static void send_uld_dev_event(struct hinic_pcidev *dev, + struct hinic_event_info *event) { - struct hinic_pcidev *dev = adapter; enum hinic_service_type type;
- if (event->type == HINIC_EVENT_FMW_ACT_NTC) - return hinic_sync_time_to_fmw(dev); - else if (event->type == HINIC_EVENT_MCTP_GET_HOST_INFO) - return __mctp_get_host_info(dev, &event->mctp_info); - else if (event->type == HINIC_EVENT_MULTI_HOST_MGMT) - return __multi_host_mgmt(dev, &event->mhost_mgmt); - for (type = SERVICE_T_NIC; type < SERVICE_T_MAX; type++) { if (test_and_set_bit(type, &dev->state)) { sdk_warn(&dev->pcidev->dev, "Event: 0x%x can't handler, %s is in detach\n", @@ -1819,6 +1812,78 @@ void hinic_event_process(void *adapter, struct hinic_event_info *event) } }
+static void send_event_to_all_pf(struct hinic_pcidev *dev, + struct hinic_event_info *event) +{ + struct hinic_pcidev *des_dev = NULL; + + lld_dev_hold(); + list_for_each_entry(des_dev, &dev->chip_node->func_list, node) { + if (test_bit(HINIC_FUNC_IN_REMOVE, &des_dev->flag)) + continue; + + if (hinic_func_type(des_dev->hwdev) == TYPE_VF) + continue; + + send_uld_dev_event(des_dev, event); + } + lld_dev_put(); +} + +static void send_event_to_dst_pf(struct hinic_pcidev *dev, u16 func_id, + struct hinic_event_info *event) +{ + struct hinic_pcidev *des_dev = NULL; + + lld_dev_hold(); + list_for_each_entry(des_dev, &dev->chip_node->func_list, node) { + if (test_bit(HINIC_FUNC_IN_REMOVE, &des_dev->flag)) + continue; + + if (hinic_func_type(des_dev->hwdev) == TYPE_VF) + continue; + + if (hinic_global_func_id(des_dev->hwdev) == func_id) { + send_uld_dev_event(des_dev, event); + break; + } + } + lld_dev_put(); +} + +void hinic_event_process(void *adapter, struct hinic_event_info *event) +{ + struct hinic_pcidev *dev = adapter; + u16 func_id; + + switch (event->type) { + case HINIC_EVENT_FMW_ACT_NTC: + hinic_sync_time_to_fmw(dev); + break; + case HINIC_EVENT_MCTP_GET_HOST_INFO: + __mctp_get_host_info(dev, &event->mctp_info); + break; + case HINIC_EVENT_MULTI_HOST_MGMT: + __multi_host_mgmt(dev, &event->mhost_mgmt); + break; + case HINIC_EVENT_FAULT: + if (event->info.fault_level == FAULT_LEVEL_SERIOUS_FLR && + event->info.event.chip.func_id < HINIC_MAX_PF_NUM) { + func_id = event->info.event.chip.func_id; + send_event_to_dst_pf(adapter, func_id, event); + } else { + send_uld_dev_event(adapter, event); + } + break; + case HINIC_EVENT_MGMT_WATCHDOG_EVENT: + send_event_to_all_pf(adapter, event); + break; + default: + send_uld_dev_event(adapter, event); + break; + } +} + static int mapping_bar(struct pci_dev *pdev, struct hinic_pcidev *pci_adapter) { u32 db_dwqe_size; diff --git a/drivers/net/ethernet/huawei/hinic/hinic_main.c b/drivers/net/ethernet/huawei/hinic/hinic_main.c index 8dc7b555f0bf..e0635287bf05 100644 --- a/drivers/net/ethernet/huawei/hinic/hinic_main.c +++ b/drivers/net/ethernet/huawei/hinic/hinic_main.c @@ -3026,8 +3026,13 @@ void nic_event(struct hinic_lld_dev *lld_dev, void *adapter, break; case HINIC_EVENT_HEART_LOST: hinic_heart_lost(nic_dev); + hinic_link_status_change(nic_dev, false); break; case HINIC_EVENT_FAULT: + if (event->info.fault_level == FAULT_LEVEL_SERIOUS_FLR && + event->info.event.chip.func_id == + hinic_global_func_id(lld_dev->hwdev)) + hinic_link_status_change(nic_dev, false); break; case HINIC_EVENT_DCB_STATE_CHANGE: if (nic_dev->default_cos_id == event->dcb_state.default_cos) @@ -3047,6 +3052,9 @@ void nic_event(struct hinic_lld_dev *lld_dev, void *adapter, case HINIC_EVENT_PORT_MODULE_EVENT: hinic_port_module_event_handler(nic_dev, event); break; + case HINIC_EVENT_MGMT_WATCHDOG_EVENT: + hinic_link_status_change(nic_dev, false); + break; default: break; }
From: Chiqijun chiqijun@huawei.com
driver inclusion category: bugfix bugzilla: 4472
-----------------------------------------------------------------------
Hinic only supports csum offloading of vxlan/ipip tunnel packets, for unsupported tunnel packets csum offloading, use the skb_checksum_help to do csum calculation and increase statistics.
Signed-off-by: Chiqijun chiqijun@huawei.com Reviewed-by: Zengweiliang zengweiliang.zengweiliang@huawei.com Signed-off-by: Yang Yingliang yangyingliang@huawei.com --- .../net/ethernet/huawei/hinic/hinic_ethtool.c | 1 + drivers/net/ethernet/huawei/hinic/hinic_tx.c | 25 +++++++++++++------ drivers/net/ethernet/huawei/hinic/hinic_tx.h | 1 + 3 files changed, 20 insertions(+), 7 deletions(-)
diff --git a/drivers/net/ethernet/huawei/hinic/hinic_ethtool.c b/drivers/net/ethernet/huawei/hinic/hinic_ethtool.c index 1a745cd38afd..0b9ce861c981 100644 --- a/drivers/net/ethernet/huawei/hinic/hinic_ethtool.c +++ b/drivers/net/ethernet/huawei/hinic/hinic_ethtool.c @@ -141,6 +141,7 @@ static struct hinic_stats hinic_tx_queue_stats_extern[] = { HINIC_TXQ_STAT(map_cpy_frag_err), HINIC_TXQ_STAT(map_frag_err), HINIC_TXQ_STAT(frag_size_err), + HINIC_TXQ_STAT(unknown_tunnel_pkt), };/*lint -restore*/
#define HINIC_FUNC_STAT(_stat_item) { \ diff --git a/drivers/net/ethernet/huawei/hinic/hinic_tx.c b/drivers/net/ethernet/huawei/hinic/hinic_tx.c index b242b7ddde04..b556132a72af 100644 --- a/drivers/net/ethernet/huawei/hinic/hinic_tx.c +++ b/drivers/net/ethernet/huawei/hinic/hinic_tx.c @@ -94,6 +94,7 @@ void hinic_txq_clean_stats(struct hinic_txq_stats *txq_stats) txq_stats->map_cpy_frag_err = 0; txq_stats->map_frag_err = 0; txq_stats->frag_size_err = 0; + txq_stats->unknown_tunnel_pkt = 0; u64_stats_update_end(&txq_stats->syncp); }
@@ -336,8 +337,8 @@ static void get_inner_l4_info(struct sk_buff *skb, union hinic_l4 *l4, } }
-static int hinic_tx_csum(struct hinic_sq_task *task, u32 *queue_info, - struct sk_buff *skb) +static int hinic_tx_csum(struct hinic_txq *txq, struct hinic_sq_task *task, + u32 *queue_info, struct sk_buff *skb) { union hinic_ip ip; union hinic_l4 l4; @@ -378,19 +379,28 @@ static int hinic_tx_csum(struct hinic_sq_task *task, u32 *queue_info, hinic_task_set_outter_l3(task, l3_type, skb_network_header_len(skb));
- if (l4_proto == IPPROTO_UDP || l4_proto == IPPROTO_GRE) { + switch (l4_proto) { + case IPPROTO_UDP: l4_tunnel_len = skb_inner_network_offset(skb) - skb_transport_offset(skb); ip.hdr = skb_inner_network_header(skb); l4.hdr = skb_inner_transport_header(skb); network_hdr_len = skb_inner_network_header_len(skb); - } else { + break; + case IPPROTO_IPIP: + case IPPROTO_IPV6: tunnel_type = NOT_TUNNEL; l4_tunnel_len = 0;
ip.hdr = skb_inner_network_header(skb); l4.hdr = skb_transport_header(skb); network_hdr_len = skb_network_header_len(skb); + break; + default: + TXQ_STATS_INC(txq, unknown_tunnel_pkt); + /* Unsupport tunnel packet, disable csum offload */ + skb_checksum_help(skb); + return 0; }
hinic_task_set_tunnel_l4(task, tunnel_type, l4_tunnel_len); @@ -494,7 +504,8 @@ static int hinic_tso(struct hinic_sq_task *task, u32 *queue_info, return 1; }
-static enum tx_offload_type hinic_tx_offload(struct sk_buff *skb, +static enum tx_offload_type hinic_tx_offload(struct hinic_txq *txq, + struct sk_buff *skb, struct hinic_sq_task *task, u32 *queue_info, u8 avd_flag) { @@ -513,7 +524,7 @@ static enum tx_offload_type hinic_tx_offload(struct sk_buff *skb, } else if (tso_cs_en) { offload |= TX_OFFLOAD_TSO; } else { - tso_cs_en = hinic_tx_csum(task, queue_info, skb); + tso_cs_en = hinic_tx_csum(txq, task, queue_info, skb); if (tso_cs_en) offload |= TX_OFFLOAD_CSUM; } @@ -721,7 +732,7 @@ static netdev_tx_t hinic_send_one_skb(struct sk_buff *skb,
__get_pkt_stats(tx_info, skb);
- offload = hinic_tx_offload(skb, &wqe->task, &queue_info, avd_flag); + offload = hinic_tx_offload(txq, skb, &wqe->task, &queue_info, avd_flag); if (unlikely(offload == TX_OFFLOAD_INVALID)) { hinic_return_sq_wqe(nic_dev->hwdev, q_id, wqebb_cnt, owner); TXQ_STATS_INC(txq, offload_cow_skb_err); diff --git a/drivers/net/ethernet/huawei/hinic/hinic_tx.h b/drivers/net/ethernet/huawei/hinic/hinic_tx.h index 8d95019528f9..3e7ff1538b99 100644 --- a/drivers/net/ethernet/huawei/hinic/hinic_tx.h +++ b/drivers/net/ethernet/huawei/hinic/hinic_tx.h @@ -43,6 +43,7 @@ struct hinic_txq_stats { u64 map_cpy_frag_err; u64 map_frag_err; u64 frag_size_err; + u64 unknown_tunnel_pkt;
struct u64_stats_sync syncp; };
From: Chiqijun chiqijun@huawei.com
driver inclusion category: bugfix bugzilla: 4472
-----------------------------------------------------------------------
When sets multiple features at a time using ethtool -K, if some commands succeed and some fail, the driver only takes the last command as the result.
If any command fails to set, the set_features interface returns failure, and the netdev->features attribute is updated.
Signed-off-by: Chiqijun chiqijun@huawei.com Reviewed-by: Zengweiliang zengweiliang.zengweiliang@huawei.com Signed-off-by: Yang Yingliang yangyingliang@huawei.com --- .../net/ethernet/huawei/hinic/hinic_main.c | 152 +++++++++++++----- 1 file changed, 115 insertions(+), 37 deletions(-)
diff --git a/drivers/net/ethernet/huawei/hinic/hinic_main.c b/drivers/net/ethernet/huawei/hinic/hinic_main.c index e0635287bf05..30a51984d9fd 100644 --- a/drivers/net/ethernet/huawei/hinic/hinic_main.c +++ b/drivers/net/ethernet/huawei/hinic/hinic_main.c @@ -1382,58 +1382,136 @@ hinic_vlan_rx_kill_vid(struct net_device *netdev, return err; }
-static int set_features(struct hinic_nic_dev *nic_dev, - netdev_features_t pre_features, - netdev_features_t features, bool force_change) +#define FEATURES_OP_STR(op) ((op) ? "Enable" : "Disable") + +static int set_feature_tso(struct hinic_nic_dev *nic_dev, + netdev_features_t wanted_features, + netdev_features_t features, + netdev_features_t *failed_features) { - netdev_features_t changed = force_change ? ~0 : pre_features ^ features; - u8 rxvlan_changed = !!(changed & NETIF_F_HW_VLAN_CTAG_RX); - u8 rxvlan_en = !!(features & NETIF_F_HW_VLAN_CTAG_RX); - u32 lro_timer, lro_buf_size; - int err = 0; + netdev_features_t changed = wanted_features ^ features; + bool en = !!(wanted_features & NETIF_F_TSO); + int err;
- if (changed & NETIF_F_TSO) { - err = hinic_set_tx_tso(nic_dev->hwdev, - !!(features & NETIF_F_TSO)); - hinic_info(nic_dev, drv, "%s tso %s\n", - (features & NETIF_F_TSO) ? "Enable" : "Disable", - err ? "failed" : "success"); + if (!(changed & NETIF_F_TSO)) + return 0; + + err = hinic_set_tx_tso(nic_dev->hwdev, en); + if (err) { + hinic_err(nic_dev, drv, "%s tso failed\n", FEATURES_OP_STR(en)); + *failed_features |= NETIF_F_TSO; + } else { + hinic_info(nic_dev, drv, "%s tso success\n", + FEATURES_OP_STR(en)); }
- if (rxvlan_changed) { - err = hinic_set_rx_vlan_offload(nic_dev->hwdev, rxvlan_en); - hinic_info(nic_dev, drv, "%s rxvlan %s\n", - rxvlan_en ? "Enable" : "Disable", - err ? "failed" : "success"); + return err; +} + +static int set_feature_cvlan(struct hinic_nic_dev *nic_dev, + netdev_features_t wanted_features, + netdev_features_t features, + netdev_features_t *failed_features) +{ + netdev_features_t changed = wanted_features ^ features; + netdev_features_t vlan_feature = NETIF_F_HW_VLAN_CTAG_RX; + bool en = !!(wanted_features & vlan_feature); + int err; + + if (!(changed & vlan_feature)) + return 0; + + err = hinic_set_rx_vlan_offload(nic_dev->hwdev, en); + if (err) { + hinic_err(nic_dev, drv, "%s rxvlan failed\n", + FEATURES_OP_STR(en)); + *failed_features |= vlan_feature; + } else { + hinic_info(nic_dev, drv, "%s rxvlan success\n", + FEATURES_OP_STR(en)); }
- if (changed & NETIF_F_RXCSUM) { - /* hw should always enable rx csum */ - u32 csum_en = HINIC_RX_CSUM_OFFLOAD_EN; + return err; +} + +static int set_feature_rxcsum(struct hinic_nic_dev *nic_dev, + netdev_features_t wanted_features, + netdev_features_t features, + netdev_features_t *failed_features) +{ + netdev_features_t changed = wanted_features ^ features; + bool en = !!(wanted_features & NETIF_F_RXCSUM); + int err; + + if (!(changed & NETIF_F_RXCSUM)) + return 0;
- err = hinic_set_rx_csum_offload(nic_dev->hwdev, csum_en); - hinic_info(nic_dev, drv, "%s rx csum %s\n", - (features & NETIF_F_RXCSUM) ? "Enable" : "Disable", - err ? "failed" : "success"); + /* hw should always enable rx csum */ + err = hinic_set_rx_csum_offload(nic_dev->hwdev, + HINIC_RX_CSUM_OFFLOAD_EN); + if (err) { + hinic_err(nic_dev, drv, "%s rx csum failed\n", + FEATURES_OP_STR(en)); + *failed_features |= NETIF_F_RXCSUM; + } else { + hinic_info(nic_dev, drv, "%s rx csum success\n", + FEATURES_OP_STR(en)); }
- if (changed & NETIF_F_LRO) { - lro_timer = nic_dev->adaptive_cfg.lro.timer; - lro_buf_size = nic_dev->adaptive_cfg.lro.buffer_size; + return err; +} + +static int set_feature_lro(struct hinic_nic_dev *nic_dev, + netdev_features_t wanted_features, + netdev_features_t features, + netdev_features_t *failed_features) +{ + netdev_features_t changed = wanted_features ^ features; + bool en = !!(wanted_features & NETIF_F_LRO); + u32 lro_timer, lro_buf_size; + int err; + + if (!(changed & NETIF_F_LRO)) + return 0;
- err = hinic_set_rx_lro_state(nic_dev->hwdev, - !!(features & NETIF_F_LRO), - lro_timer, - lro_buf_size / - nic_dev->rx_buff_len); - hinic_info(nic_dev, drv, "%s lro %s\n", - (features & NETIF_F_LRO) ? "Enable" : "Disable", - err ? "failed" : "success"); + lro_timer = nic_dev->adaptive_cfg.lro.timer; + lro_buf_size = nic_dev->adaptive_cfg.lro.buffer_size; + err = hinic_set_rx_lro_state(nic_dev->hwdev, en, lro_timer, + lro_buf_size / nic_dev->rx_buff_len); + if (err) { + hinic_err(nic_dev, drv, "%s lro failed\n", FEATURES_OP_STR(en)); + *failed_features |= NETIF_F_LRO; + } else { + hinic_info(nic_dev, drv, "%s lro success\n", + FEATURES_OP_STR(en)); }
return err; }
+static int set_features(struct hinic_nic_dev *nic_dev, + netdev_features_t pre_features, + netdev_features_t features, bool force_change) +{ + netdev_features_t failed_features = 0; + u32 err; + + err = (u32)set_feature_tso(nic_dev, features, pre_features, + &failed_features); + err |= (u32)set_feature_cvlan(nic_dev, features, pre_features, + &failed_features); + err |= (u32)set_feature_rxcsum(nic_dev, features, pre_features, + &failed_features); + err |= (u32)set_feature_lro(nic_dev, features, pre_features, + &failed_features); + if (err) { + nic_dev->netdev->features = features ^ failed_features; + return -EIO; + } + + return 0; +} + static int hinic_set_features(struct net_device *netdev, netdev_features_t features) {
From: Chiqijun chiqijun@huawei.com
driver inclusion category: bugfix bugzilla: 4472
-----------------------------------------------------------------------
It is not allowed to send a message to the firmware during the firmware upgrade process. If there is a message during the upgrade process, it will probabilistically cause the message to time out, so sleep 2s wait for the firmware message processing to complete.
Signed-off-by: Chiqijun chiqijun@huawei.com Reviewed-by: Zengweiliang zengweiliang.zengweiliang@huawei.com Signed-off-by: Yang Yingliang yangyingliang@huawei.com --- drivers/net/ethernet/huawei/hinic/hinic_hwdev.c | 4 ++++ 1 file changed, 4 insertions(+)
diff --git a/drivers/net/ethernet/huawei/hinic/hinic_hwdev.c b/drivers/net/ethernet/huawei/hinic/hinic_hwdev.c index 2d6a547c4d34..ed2fce3363a8 100644 --- a/drivers/net/ethernet/huawei/hinic/hinic_hwdev.c +++ b/drivers/net/ethernet/huawei/hinic/hinic_hwdev.c @@ -849,6 +849,10 @@ static int __pf_to_mgmt_pre_handle(struct hinic_hwdev *hwdev, /* Set channel invalid, don't allowed to send other cmd */ if (mod == HINIC_MOD_COMM && cmd == HINIC_MGMT_CMD_ACTIVATE_FW) { hinic_set_mgmt_channel_status(hwdev, true); + + /* Sleep 2s wait other pf's mgmt messages to complete */ + msleep(2000); + /* stop heartbeat enhanced detection temporary, and will * restart in firmware active event when mgmt is resetted */
From: Chiqijun chiqijun@huawei.com
driver inclusion category: bugfix bugzilla: 4472
-----------------------------------------------------------------------
Use alloc_chrdev_region to automatically allocate a major number.
Signed-off-by: Chiqijun chiqijun@huawei.com Reviewed-by: Zengweiliang zengweiliang.zengweiliang@huawei.com Signed-off-by: Yang Yingliang yangyingliang@huawei.com --- .../net/ethernet/huawei/hinic/hinic_nictool.c | 18 ++++-------------- 1 file changed, 4 insertions(+), 14 deletions(-)
diff --git a/drivers/net/ethernet/huawei/hinic/hinic_nictool.c b/drivers/net/ethernet/huawei/hinic/hinic_nictool.c index 01653891b72c..54184bdb3fc8 100644 --- a/drivers/net/ethernet/huawei/hinic/hinic_nictool.c +++ b/drivers/net/ethernet/huawei/hinic/hinic_nictool.c @@ -36,8 +36,7 @@ #define HIADM_DEV_CLASS "nictool_class" #define HIADM_DEV_NAME "nictool_dev"
-#define MAJOR_DEV_NUM 921 -#define HINIC_CMDQ_BUF_MAX_SIZE 2048U +#define HINIC_CMDQ_BUF_MAX_SIZE 2048U #define MSG_MAX_IN_SIZE (2048 * 1024) #define MSG_MAX_OUT_SIZE (2048 * 1024)
@@ -2353,19 +2352,10 @@ int nictool_k_init(void) return 0; }
- /* Device ID: primary device ID (12bit) | - * secondary device number (20bit) - */ - g_dev_id = MKDEV(MAJOR_DEV_NUM, 0); - - /* Static device registration number */ - ret = register_chrdev_region(g_dev_id, 1, HIADM_DEV_NAME); + ret = alloc_chrdev_region(&g_dev_id, 0, 1, HIADM_DEV_NAME); if (ret < 0) { - ret = alloc_chrdev_region(&g_dev_id, 0, 1, HIADM_DEV_NAME); - if (ret < 0) { - pr_err("Register nictool_dev fail(0x%x)\n", ret); - return ret; - } + pr_err("Register nictool_dev fail(0x%x)\n", ret); + return ret; }
/* Create equipment */
From: Chiqijun chiqijun@huawei.com
driver inclusion category: bugfix bugzilla: 4472
-----------------------------------------------------------------------
When the VF loses interrupt, it may cause the driver to think that the message has timed out, and the driver reschedules the work to try to obtain the ack.
Signed-off-by: Chiqijun chiqijun@huawei.com Reviewed-by: Zengweiliang zengweiliang.zengweiliang@huawei.com Signed-off-by: Yang Yingliang yangyingliang@huawei.com --- .../net/ethernet/huawei/hinic/hinic_cmdq.c | 25 +++++++- drivers/net/ethernet/huawei/hinic/hinic_eqs.c | 18 ++++++ drivers/net/ethernet/huawei/hinic/hinic_eqs.h | 4 ++ .../net/ethernet/huawei/hinic/hinic_mbox.c | 58 +++++++++++++++---- 4 files changed, 93 insertions(+), 12 deletions(-)
diff --git a/drivers/net/ethernet/huawei/hinic/hinic_cmdq.c b/drivers/net/ethernet/huawei/hinic/hinic_cmdq.c index d7802caca0a6..61bf9a829d0d 100644 --- a/drivers/net/ethernet/huawei/hinic/hinic_cmdq.c +++ b/drivers/net/ethernet/huawei/hinic/hinic_cmdq.c @@ -38,6 +38,7 @@ #include "hinic_cmdq.h"
#define CMDQ_CMD_TIMEOUT 5000 /* millisecond */ +#define CMDQ_CMD_RETRY_TIMEOUT 1000 /* millisecond */
#define UPPER_8_BITS(data) (((data) >> 8) & 0xFF) #define LOWER_8_BITS(data) ((data) & 0xFF) @@ -557,6 +558,24 @@ static void __clear_cmd_info(struct hinic_cmdq_cmd_info *cmd_info, cmd_info->direct_resp = NULL; }
+static int cmdq_retry_get_ack(struct hinic_hwdev *hwdev, + struct completion *done, u8 ceq_id) +{ + ulong timeo = msecs_to_jiffies(CMDQ_CMD_RETRY_TIMEOUT); + int err; + + init_completion(done); + + err = hinic_reschedule_eq(hwdev, HINIC_CEQ, ceq_id); + if (err) + return err; + + if (!wait_for_completion_timeout(done, timeo)) + return -ETIMEDOUT; + + return 0; +} + static int cmdq_sync_cmd_direct_resp(struct hinic_cmdq *cmdq, enum hinic_ack_type ack_type, enum hinic_mod_type mod, u8 cmd, @@ -634,7 +653,8 @@ static int cmdq_sync_cmd_direct_resp(struct hinic_cmdq *cmdq, spin_unlock_bh(&cmdq->cmdq_lock);
timeo = msecs_to_jiffies(timeout ? timeout : CMDQ_CMD_TIMEOUT); - if (!wait_for_completion_timeout(&done, timeo)) { + if (!wait_for_completion_timeout(&done, timeo) && + cmdq_retry_get_ack(cmdq->hwdev, &done, HINIC_CEQ_ID_CMDQ)) { spin_lock_bh(&cmdq->cmdq_lock);
if (cmd_info->cmpt_code == &cmpt_code) @@ -759,7 +779,8 @@ static int cmdq_sync_cmd_detail_resp(struct hinic_cmdq *cmdq, spin_unlock_bh(&cmdq->cmdq_lock);
timeo = msecs_to_jiffies(timeout ? timeout : CMDQ_CMD_TIMEOUT); - if (!wait_for_completion_timeout(&done, timeo)) { + if (!wait_for_completion_timeout(&done, timeo) && + cmdq_retry_get_ack(cmdq->hwdev, &done, HINIC_CEQ_ID_CMDQ)) { spin_lock_bh(&cmdq->cmdq_lock);
if (cmd_info->cmpt_code == &cmpt_code) diff --git a/drivers/net/ethernet/huawei/hinic/hinic_eqs.c b/drivers/net/ethernet/huawei/hinic/hinic_eqs.c index b8ea8ed36b5b..ccac811f948d 100644 --- a/drivers/net/ethernet/huawei/hinic/hinic_eqs.c +++ b/drivers/net/ethernet/huawei/hinic/hinic_eqs.c @@ -639,6 +639,24 @@ static void reschedule_eq_handler(struct hinic_eq *eq) } }
+int hinic_reschedule_eq(struct hinic_hwdev *hwdev, enum hinic_eq_type type, + u16 eq_id) +{ + if (type == HINIC_AEQ) { + if (eq_id >= hwdev->aeqs->num_aeqs) + return -EINVAL; + + reschedule_eq_handler(&hwdev->aeqs->aeq[eq_id]); + } else { + if (eq_id >= hwdev->ceqs->num_ceqs) + return -EINVAL; + + reschedule_eq_handler(&hwdev->ceqs->ceq[eq_id]); + } + + return 0; +} + /** * eq_irq_handler - handler for the eq event * @data: the event queue of the event diff --git a/drivers/net/ethernet/huawei/hinic/hinic_eqs.h b/drivers/net/ethernet/huawei/hinic/hinic_eqs.h index 5035f90c6ad6..dec541995ee2 100644 --- a/drivers/net/ethernet/huawei/hinic/hinic_eqs.h +++ b/drivers/net/ethernet/huawei/hinic/hinic_eqs.h @@ -20,6 +20,7 @@
#define HINIC_EQ_PAGE_SIZE 0x00001000
+#define HINIC_HW_MAX_AEQS 4 #define HINIC_MAX_AEQS 3 #define HINIC_MAX_CEQS 32
@@ -179,4 +180,7 @@ void hinic_dump_ceq_info(struct hinic_hwdev *hwdev);
void hinic_dump_aeq_info(struct hinic_hwdev *hwdev);
+int hinic_reschedule_eq(struct hinic_hwdev *hwdev, enum hinic_eq_type type, + u16 eq_id); + #endif diff --git a/drivers/net/ethernet/huawei/hinic/hinic_mbox.c b/drivers/net/ethernet/huawei/hinic/hinic_mbox.c index bbc16cc38b33..7e77d0319931 100644 --- a/drivers/net/ethernet/huawei/hinic/hinic_mbox.c +++ b/drivers/net/ethernet/huawei/hinic/hinic_mbox.c @@ -29,6 +29,7 @@ #include "hinic_hwdev.h" #include "hinic_csr.h" #include "hinic_hwif.h" +#include "hinic_eqs.h" #include "hinic_mbox.h"
#define HINIC_MBOX_INT_DST_FUNC_SHIFT 0 @@ -112,6 +113,7 @@ enum hinic_mbox_tx_status { #define HINIC_MBOX_SEG_LEN 48 #define HINIC_MBOX_COMP_TIME 8000U #define MBOX_MSG_POLLING_TIMEOUT 8000 +#define MBOX_MSG_RETRY_ACK_TIMEOUT 1000
#define HINIC_MBOX_DATA_SIZE 2040
@@ -975,14 +977,11 @@ static void mbox_copy_send_data(struct hinic_hwdev *hwdev, }
static void write_mbox_msg_attr(struct hinic_mbox_func_to_func *func_to_func, - u16 dst_func, u16 dst_aeqn, u16 seg_len, - int poll) + u16 dst_func, u16 dst_aeqn, u16 rsp_aeq, + u16 seg_len, int poll) { u32 mbox_int, mbox_ctrl;
- /* msg_len - the total mbox msg len */ - u16 rsp_aeq = (dst_aeqn == 0) ? 0 : HINIC_MBOX_RSP_AEQN; - mbox_int = HINIC_MBOX_INT_SET(dst_func, DST_FUNC) | HINIC_MBOX_INT_SET(dst_aeqn, DST_AEQN) | HINIC_MBOX_INT_SET(rsp_aeq, SRC_RESP_AEQN) | @@ -1030,6 +1029,38 @@ static u16 get_mbox_status(struct hinic_send_mbox *mbox) return (u16)(wb_val & MBOX_WB_STATUS_ERRCODE_MASK); }
+static u16 mbox_msg_ack_aeqn(struct hinic_hwdev *hwdev, + enum hinic_hwif_direction_type seq_dir) +{ + u8 num_aeqs = hwdev->hwif->attr.num_aeqs; + u16 dst_aeqn; + + if (num_aeqs >= HINIC_HW_MAX_AEQS) + dst_aeqn = HINIC_MBOX_RSP_AEQN; + else + dst_aeqn = 0; + + return dst_aeqn; +} + +static int mbox_retry_get_ack(struct hinic_mbox_func_to_func *func_to_func, + struct completion *done, u16 aeq_id) +{ + ulong timeo = msecs_to_jiffies(MBOX_MSG_RETRY_ACK_TIMEOUT); + int err; + + init_completion(done); + + err = hinic_reschedule_eq(func_to_func->hwdev, HINIC_AEQ, aeq_id); + if (err) + return err; + + if (!wait_for_completion_timeout(done, timeo)) + return -ETIMEDOUT; + + return 0; +} + static int send_mbox_seg(struct hinic_mbox_func_to_func *func_to_func, u64 header, u16 dst_func, void *seg, u16 seg_len, int poll, void *msg_info) @@ -1037,18 +1068,20 @@ static int send_mbox_seg(struct hinic_mbox_func_to_func *func_to_func, struct hinic_send_mbox *send_mbox = &func_to_func->send_mbox; struct hinic_hwdev *hwdev = func_to_func->hwdev; u8 num_aeqs = hwdev->hwif->attr.num_aeqs; - u16 dst_aeqn, wb_status = 0, errcode; + u16 dst_aeqn, wb_status = 0, errcode, rsp_aeq; u16 seq_dir = HINIC_MBOX_HEADER_GET(header, DIRECTION); struct completion *done = &send_mbox->send_done; ulong jif; u32 cnt = 0;
- if (num_aeqs >= 4) + if (num_aeqs >= HINIC_HW_MAX_AEQS) dst_aeqn = (seq_dir == HINIC_HWIF_DIRECT_SEND) ? HINIC_MBOX_RECV_AEQN : HINIC_MBOX_RSP_AEQN; else dst_aeqn = 0;
+ rsp_aeq = (dst_aeqn == 0) ? 0 : HINIC_MBOX_RSP_AEQN; + if (!poll) init_completion(done);
@@ -1058,7 +1091,8 @@ static int send_mbox_seg(struct hinic_mbox_func_to_func *func_to_func,
mbox_copy_send_data(hwdev, send_mbox, seg, seg_len);
- write_mbox_msg_attr(func_to_func, dst_func, dst_aeqn, seg_len, poll); + write_mbox_msg_attr(func_to_func, dst_func, dst_aeqn, rsp_aeq, + seg_len, poll);
wmb(); /* writing the mbox msg attributes */
@@ -1080,7 +1114,8 @@ static int send_mbox_seg(struct hinic_mbox_func_to_func *func_to_func, } } else { jif = msecs_to_jiffies(HINIC_MBOX_COMP_TIME); - if (!wait_for_completion_timeout(done, jif)) { + if (!wait_for_completion_timeout(done, jif) && + mbox_retry_get_ack(func_to_func, done, rsp_aeq)) { sdk_err(hwdev->dev_hdl, "Send mailbox segment timeout\n"); dump_mox_reg(hwdev); return -ETIMEDOUT; @@ -1206,7 +1241,10 @@ int hinic_mbox_to_func(struct hinic_mbox_func_to_func *func_to_func, }
timeo = msecs_to_jiffies(timeout ? timeout : HINIC_MBOX_COMP_TIME); - if (!wait_for_completion_timeout(&mbox_for_resp->recv_done, timeo)) { + if (!wait_for_completion_timeout(&mbox_for_resp->recv_done, timeo) && + mbox_retry_get_ack(func_to_func, &mbox_for_resp->recv_done, + mbox_msg_ack_aeqn(func_to_func->hwdev, + HINIC_HWIF_DIRECT_SEND))) { set_mbox_to_func_event(func_to_func, EVENT_TIMEOUT); sdk_err(func_to_func->hwdev->dev_hdl, "Send mbox msg timeout, msg_id: %d\n", msg_info.msg_id);