From: Jian Shen shenjian15@huawei.com
driver inclusion category: bugfix bugzilla: NA CVE: NA
----------------------------
Currently, when host set VF untrusted, the driver will disable the promisc mode of VF. It may be conflicted when the VF requests the host to set promisc mode. So fix it by change promisc mode for VF asynchronously. With this change, the promisc mode of VF can be restored when the VF being trusted again.
Fixes: 5c1f0ff21058 ("net: hns3: add support for setting VF trust")
Signed-off-by: Jian Shen shenjian15@huawei.com Signed-off-by: Yonglong Liu liuyonglong@huawei.com Reviewed-by: li yongxin liyongxin1@huawei.com Signed-off-by: Yang Yingliang yangyingliang@huawei.com --- .../hisilicon/hns3/hns3pf/hclge_main.c | 42 +++++++++++++------ .../hisilicon/hns3/hns3pf/hclge_main.h | 6 ++- .../hisilicon/hns3/hns3pf/hclge_mbx.c | 40 ++++-------------- 3 files changed, 42 insertions(+), 46 deletions(-)
diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c index f3d728aa54bf3..23273f08ab50f 100644 --- a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c +++ b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c @@ -10639,8 +10639,6 @@ static int hclge_set_vf_trust(struct hnae3_handle *handle, int vf, bool enable) struct hclge_vport *vport = hclge_get_vport(handle); struct hclge_dev *hdev = vport->back; u32 new_trusted = enable ? 1 : 0; - bool en_bc_pmc; - int ret;
vport = hclge_get_vf_vport(hdev, vf); if (!vport) @@ -10649,19 +10647,11 @@ static int hclge_set_vf_trust(struct hnae3_handle *handle, int vf, bool enable) if (vport->vf_info.trusted == new_trusted) return 0;
- /* Disable promisc mode for VF if it is not trusted any more. */ - if (!enable && vport->vf_info.promisc_enable) { - en_bc_pmc = hdev->pdev->revision != 0x20; - ret = hclge_set_vport_promisc_mode(vport, false, false, - en_bc_pmc); - if (ret) - return ret; - vport->vf_info.promisc_enable = 0; - hclge_inform_vf_promisc_info(vport); - } - vport->vf_info.trusted = new_trusted;
+ set_bit(HCLGE_VPORT_STATE_PROMISC_CHANGE, &vport->state); + hclge_task_schedule(hdev, 0); + return 0; }
@@ -11535,6 +11525,7 @@ static void hclge_sync_promisc_mode(struct hclge_dev *hdev) u8 tmp_flags = 0; bool vlan_en; int ret; + int i;
if (vport->last_promisc_flags != vport->overflow_promisc_flags) { set_bit(HCLGE_STATE_PROMISC_CHANGED, &hdev->state); @@ -11555,6 +11546,31 @@ static void hclge_sync_promisc_mode(struct hclge_dev *hdev) hclge_enable_vlan_filter(handle, vlan_en); } } + + for (i = 1; i < hdev->num_alloc_vport; i++) { + bool uc_en = false; + bool mc_en = false; + bool bc_en; + + vport = &hdev->vport[i]; + + if (!test_and_clear_bit(HCLGE_VPORT_STATE_PROMISC_CHANGE, + &vport->state)) + continue; + + if (vport->vf_info.trusted) { + uc_en = vport->vf_info.request_uc_en > 0; + mc_en = vport->vf_info.request_mc_en > 0; + } + bc_en = vport->vf_info.request_bc_en > 0; + + ret = hclge_set_vport_promisc_mode(vport, uc_en, mc_en, bc_en); + if (ret) { + set_bit(HCLGE_VPORT_STATE_PROMISC_CHANGE, + &vport->state); + return; + } + } }
static bool hclge_module_existed(struct hclge_dev *hdev) diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.h b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.h index 8e7dff25cbfd5..4a664801fbe92 100644 --- a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.h +++ b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.h @@ -904,6 +904,7 @@ struct hclge_rss_tuple_cfg { enum HCLGE_VPORT_STATE { HCLGE_VPORT_STATE_ALIVE, HCLGE_VPORT_STATE_MAC_TBL_CHANGE, + HCLGE_VPORT_STATE_PROMISC_CHANGE, HCLGE_VPORT_STATE_MAX };
@@ -926,7 +927,9 @@ struct hclge_vf_info { u32 spoofchk; u32 max_tx_rate; u32 trusted; - u16 promisc_enable; + u8 request_uc_en; + u8 request_mc_en; + u8 request_bc_en; };
struct hclge_vport { @@ -1051,7 +1054,6 @@ int hclge_query_bd_num_cmd_send(struct hclge_dev *hdev, struct hclge_desc *desc); void hclge_report_hw_error(struct hclge_dev *hdev, enum hnae3_hw_error_type type); -void hclge_inform_vf_promisc_info(struct hclge_vport *vport); void hclge_dbg_dump_rst_info(struct hclge_dev *hdev); bool hclge_vf_vlan_need_enable(struct hclge_vport *vport); int hclge_push_vf_link_status(struct hclge_vport *vport); diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_mbx.c b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_mbx.c index e998322d59aae..d2bfa5f7f22f1 100644 --- a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_mbx.c +++ b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_mbx.c @@ -321,35 +321,17 @@ static int hclge_get_vf_ring_vector_map(struct hclge_vport *vport, return ret; }
-static int hclge_set_vf_promisc_mode(struct hclge_vport *vport, - struct hclge_mbx_vf_to_pf_cmd *req) +static void hclge_set_vf_promisc_mode(struct hclge_vport *vport, + struct hclge_mbx_vf_to_pf_cmd *req) { - bool en_bc = req->msg.en_bc ? true : false; - bool en_uc = req->msg.en_uc ? true : false; - bool en_mc = req->msg.en_mc ? true : false; - int ret; - - if (!vport->vf_info.trusted) { - en_uc = false; - en_mc = false; - } - - ret = hclge_set_vport_promisc_mode(vport, en_uc, en_mc, en_bc); - - vport->vf_info.promisc_enable = (en_uc || en_mc) ? 1 : 0; - - return ret; -} - -void hclge_inform_vf_promisc_info(struct hclge_vport *vport) -{ - u8 dest_vfid = (u8)vport->vport_id; - u8 msg_data[2]; + struct hclge_dev *hdev = vport->back;
- memcpy(&msg_data[0], &vport->vf_info.promisc_enable, sizeof(u16)); + vport->vf_info.request_uc_en = req->msg.en_uc; + vport->vf_info.request_mc_en = req->msg.en_mc; + vport->vf_info.request_bc_en = req->msg.en_bc;
- hclge_send_mbx_msg(vport, msg_data, sizeof(msg_data), - HCLGE_MBX_PUSH_PROMISC_INFO, dest_vfid); + set_bit(HCLGE_VPORT_STATE_PROMISC_CHANGE, &vport->state); + hclge_task_schedule(hdev, 0); }
static int hclge_set_vf_uc_mac_addr(struct hclge_vport *vport, @@ -837,11 +819,7 @@ void hclge_mbx_handler(struct hclge_dev *hdev) ret); break; case HCLGE_MBX_SET_PROMISC_MODE: - ret = hclge_set_vf_promisc_mode(vport, req); - if (ret) - dev_err(&hdev->pdev->dev, - "PF fail(%d) to set VF promisc mode\n", - ret); + hclge_set_vf_promisc_mode(vport, req); break; case HCLGE_MBX_SET_UNICAST: ret = hclge_set_vf_uc_mac_addr(vport, req);
From: Jiaran Zhang zhangjiaran@huawei.com
driver inclusion category: feature bugzilla: NA CVE: NA
----------------------------
The reset interface multiplexed by flr/resume will be renamed reset_done, so the current reset_done interface is renamed reset_end.
Signed-off-by: Jiaran Zhang zhangjiaran@huawei.com Signed-off-by: Yonglong Liu liuyonglong@huawei.com Reviewed-by: li yongxin liyongxin1@huawei.com Signed-off-by: Yang Yingliang yangyingliang@huawei.com --- drivers/net/ethernet/hisilicon/hns3/hnae3.h | 2 +- .../hns3/hns3_extension/hns3pf/hclge_main_it.c | 4 ++-- .../net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c | 12 ++++++------ 3 files changed, 9 insertions(+), 9 deletions(-)
diff --git a/drivers/net/ethernet/hisilicon/hns3/hnae3.h b/drivers/net/ethernet/hisilicon/hns3/hnae3.h index e9fe1e6662905..d5f5e75f43dad 100644 --- a/drivers/net/ethernet/hisilicon/hns3/hnae3.h +++ b/drivers/net/ethernet/hisilicon/hns3/hnae3.h @@ -549,7 +549,7 @@ struct hnae3_ae_ops { void (*set_timer_task)(struct hnae3_handle *handle, bool enable); int (*mac_connect_phy)(struct hnae3_handle *handle); void (*mac_disconnect_phy)(struct hnae3_handle *handle); - bool (*reset_done)(struct hnae3_handle *handle, bool done); + bool (*reset_end)(struct hnae3_handle *handle, bool done); void (*handle_imp_error)(struct hnae3_handle *handle); int (*get_vf_config)(struct hnae3_handle *handle, int vf, struct ifla_vf_info *ivf); diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3_extension/hns3pf/hclge_main_it.c b/drivers/net/ethernet/hisilicon/hns3/hns3_extension/hns3pf/hclge_main_it.c index 59e287848ee36..7ac18e21a8bdd 100644 --- a/drivers/net/ethernet/hisilicon/hns3/hns3_extension/hns3pf/hclge_main_it.c +++ b/drivers/net/ethernet/hisilicon/hns3/hns3_extension/hns3pf/hclge_main_it.c @@ -145,7 +145,7 @@ void hclge_reset_event_it(struct pci_dev *pdev, struct hnae3_handle *handle) } }
-bool hclge_reset_done_it(struct hnae3_handle *handle, bool done) +bool hclge_reset_end_it(struct hnae3_handle *handle, bool done) { struct hclge_vport *vport = hclge_get_vport(handle); struct hclge_dev *hdev = vport->back; @@ -210,7 +210,7 @@ int hclge_init_it(void) #endif
hclge_ops.reset_event = hclge_reset_event_it; - hclge_ops.reset_done = hclge_reset_done_it; + hclge_ops.reset_end = hclge_reset_end_it; hclge_ops.handle_imp_error = hclge_handle_imp_error_it;
return hclge_init(); diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c index 23273f08ab50f..d5b6563cce739 100644 --- a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c +++ b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c @@ -3705,8 +3705,8 @@ static bool hclge_reset_err_handle(struct hclge_dev *hdev) /* recover the handshake status when reset fail */ hclge_reset_handshake(hdev, true);
- if (handle && handle->ae_algo->ops->reset_done) - handle->ae_algo->ops->reset_done(handle, false); + if (handle && handle->ae_algo->ops->reset_end) + handle->ae_algo->ops->reset_end(handle, false);
hclge_dbg_dump_rst_info(hdev);
@@ -3859,8 +3859,8 @@ static int hclge_reset_rebuild(struct hclge_dev *hdev) if (reset_level != HNAE3_NONE_RESET) set_bit(reset_level, &hdev->reset_request);
- if (handle && handle->ae_algo->ops->reset_done) - handle->ae_algo->ops->reset_done(handle, true); + if (handle && handle->ae_algo->ops->reset_end) + handle->ae_algo->ops->reset_end(handle, true);
return 0; } @@ -4018,7 +4018,7 @@ static void hclge_reset_timer(struct timer_list *t) hclge_reset_event(hdev->pdev, NULL); }
-static bool hclge_reset_done(struct hnae3_handle *handle, bool done) +static bool hclge_reset_end(struct hnae3_handle *handle, bool done) { struct hclge_vport *vport = hclge_get_vport(handle); struct hclge_dev *hdev = vport->back; @@ -11761,7 +11761,7 @@ struct hnae3_ae_ops hclge_ops = { .set_timer_task = hclge_set_timer_task, .mac_connect_phy = hclge_mac_connect_phy, .mac_disconnect_phy = hclge_mac_disconnect_phy, - .reset_done = hclge_reset_done, + .reset_end = hclge_reset_end, .get_vf_config = hclge_get_vf_config, .set_vf_link_state = hclge_set_vf_link_state, .set_vf_spoofchk = hclge_set_vf_spoofchk,
From: Jiaran Zhang zhangjiaran@huawei.com
mainline inclusion from mainline-v5.12-rc8 commit bb1890d5f97425766a865d75f99fd556d5dc6893 category: feature bugzilla: NA CVE: NA
----------------------------
The flr_prepare/flr_done functions are not only used in the FLR scenario, but also used in the suspend/resume.
Change the function names to prepare_for_reset/rebuild_for_reset, change the flr_prepare/flr_done to reset_prepare/reset_done in hnae3_ae_ops.
Signed-off-by: Jiaran Zhang zhangjiaran@huawei.com Signed-off-by: Huazhong Tan tanhuazhong@huawei.com Signed-off-by: David S. Miller davem@davemloft.net Signed-off-by: Yonglong Liu liuyonglong@huawei.com Reviewed-by: li yongxin liyongxin1@huawei.com Signed-off-by: Yang Yingliang yangyingliang@huawei.com --- drivers/net/ethernet/hisilicon/hns3/hnae3.h | 5 ++-- .../net/ethernet/hisilicon/hns3/hns3_enet.c | 8 +++--- .../hisilicon/hns3/hns3pf/hclge_main.c | 27 ++++++++++--------- .../hisilicon/hns3/hns3vf/hclgevf_main.c | 27 ++++++++++--------- 4 files changed, 37 insertions(+), 30 deletions(-)
diff --git a/drivers/net/ethernet/hisilicon/hns3/hnae3.h b/drivers/net/ethernet/hisilicon/hns3/hnae3.h index d5f5e75f43dad..9c3163e837222 100644 --- a/drivers/net/ethernet/hisilicon/hns3/hnae3.h +++ b/drivers/net/ethernet/hisilicon/hns3/hnae3.h @@ -388,8 +388,9 @@ struct hnae3_ae_dev { struct hnae3_ae_ops { int (*init_ae_dev)(struct hnae3_ae_dev *ae_dev); void (*uninit_ae_dev)(struct hnae3_ae_dev *ae_dev); - void (*flr_prepare)(struct hnae3_ae_dev *ae_dev); - void (*flr_done)(struct hnae3_ae_dev *ae_dev); + void (*reset_prepare)(struct hnae3_ae_dev *ae_dev, + enum hnae3_reset_type rst_type); + void (*reset_done)(struct hnae3_ae_dev *ae_dev); int (*init_client_instance)(struct hnae3_client *client, struct hnae3_ae_dev *ae_dev); void (*uninit_client_instance)(struct hnae3_client *client, diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3_enet.c b/drivers/net/ethernet/hisilicon/hns3/hns3_enet.c index 212e1ceb15675..a0e6553a2fd80 100644 --- a/drivers/net/ethernet/hisilicon/hns3/hns3_enet.c +++ b/drivers/net/ethernet/hisilicon/hns3/hns3_enet.c @@ -2294,8 +2294,8 @@ static void hns3_reset_prepare(struct pci_dev *pdev) struct hnae3_ae_dev *ae_dev = pci_get_drvdata(pdev);
dev_info(&pdev->dev, "FLR prepare\n"); - if (ae_dev && ae_dev->ops && ae_dev->ops->flr_prepare) - ae_dev->ops->flr_prepare(ae_dev); + if (ae_dev && ae_dev->ops && ae_dev->ops->reset_prepare) + ae_dev->ops->reset_prepare(ae_dev, HNAE3_FLR_RESET); }
static void hns3_reset_done(struct pci_dev *pdev) @@ -2303,8 +2303,8 @@ static void hns3_reset_done(struct pci_dev *pdev) struct hnae3_ae_dev *ae_dev = pci_get_drvdata(pdev);
dev_info(&pdev->dev, "FLR done\n"); - if (ae_dev && ae_dev->ops && ae_dev->ops->flr_done) - ae_dev->ops->flr_done(ae_dev); + if (ae_dev && ae_dev->ops && ae_dev->ops->reset_done) + ae_dev->ops->reset_done(ae_dev); }
struct pci_error_handlers hns3_err_handler = { diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c index d5b6563cce739..41ec86a8760cf 100644 --- a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c +++ b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c @@ -10260,10 +10260,11 @@ static void hclge_state_uninit(struct hclge_dev *hdev) cancel_delayed_work_sync(&hdev->service_task); }
-static void hclge_flr_prepare(struct hnae3_ae_dev *ae_dev) +static void hclge_reset_prepare_general(struct hnae3_ae_dev *ae_dev, + enum hnae3_reset_type rst_type) { -#define HCLGE_FLR_RETRY_WAIT_MS 500 -#define HCLGE_FLR_RETRY_CNT 5 +#define HCLGE_RESET_RETRY_WAIT_MS 500 +#define HCLGE_RESET_RETRY_CNT 5
struct hclge_dev *hdev = ae_dev->priv; int retry_cnt = 0; @@ -10272,30 +10273,32 @@ static void hclge_flr_prepare(struct hnae3_ae_dev *ae_dev) retry: down(&hdev->reset_sem); set_bit(HCLGE_STATE_RST_HANDLING, &hdev->state); - hdev->reset_type = HNAE3_FLR_RESET; + hdev->reset_type = rst_type; ret = hclge_reset_prepare(hdev); if (ret || hdev->reset_pending) { - dev_err(&hdev->pdev->dev, "fail to prepare FLR, ret=%d\n", + dev_err(&hdev->pdev->dev, "fail to prepare to reset, ret=%d\n", ret); if (hdev->reset_pending || - retry_cnt++ < HCLGE_FLR_RETRY_CNT) { + retry_cnt++ < HCLGE_RESET_RETRY_CNT) { dev_err(&hdev->pdev->dev, "reset_pending:0x%lx, retry_cnt:%d\n", hdev->reset_pending, retry_cnt); clear_bit(HCLGE_STATE_RST_HANDLING, &hdev->state); up(&hdev->reset_sem); - msleep(HCLGE_FLR_RETRY_WAIT_MS); + msleep(HCLGE_RESET_RETRY_WAIT_MS); goto retry; } }
- /* disable misc vector before FLR done */ + /* disable misc vector before reset done */ hclge_enable_vector(&hdev->misc_vector, false); set_bit(HCLGE_STATE_CMD_DISABLE, &hdev->state); - hdev->rst_stats.flr_rst_cnt++; + + if (hdev->reset_type == HNAE3_FLR_RESET) + hdev->rst_stats.flr_rst_cnt++; }
-static void hclge_flr_done(struct hnae3_ae_dev *ae_dev) +static void hclge_reset_done(struct hnae3_ae_dev *ae_dev) { struct hclge_dev *hdev = ae_dev->priv; int ret; @@ -11677,8 +11680,8 @@ static int hclge_get_module_eeprom(struct hnae3_handle *handle, u32 offset, struct hnae3_ae_ops hclge_ops = { .init_ae_dev = hclge_init_ae_dev, .uninit_ae_dev = hclge_uninit_ae_dev, - .flr_prepare = hclge_flr_prepare, - .flr_done = hclge_flr_done, + .reset_prepare = hclge_reset_prepare_general, + .reset_done = hclge_reset_done, .init_client_instance = hclge_init_client_instance, .uninit_client_instance = hclge_uninit_client_instance, .map_ring_to_vector = hclge_map_ring_to_vector, diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_main.c b/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_main.c index 0ec7ac022b293..c8c076e4bebd4 100644 --- a/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_main.c +++ b/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_main.c @@ -2028,10 +2028,11 @@ static void hclgevf_enable_vector(struct hclgevf_misc_vector *vector, bool en) writel(en ? 1 : 0, vector->addr); }
-static void hclgevf_flr_prepare(struct hnae3_ae_dev *ae_dev) +static void hclgevf_reset_prepare_general(struct hnae3_ae_dev *ae_dev, + enum hnae3_reset_type rst_type) { -#define HCLGEVF_FLR_RETRY_WAIT_MS 500 -#define HCLGEVF_FLR_RETRY_CNT 5 +#define HCLGEVF_RESET_RETRY_WAIT_MS 500 +#define HCLGEVF_RESET_RETRY_CNT 5
struct hclgevf_dev *hdev = ae_dev->priv; int retry_cnt = 0; @@ -2040,29 +2041,31 @@ static void hclgevf_flr_prepare(struct hnae3_ae_dev *ae_dev) retry: down(&hdev->reset_sem); set_bit(HCLGEVF_STATE_RST_HANDLING, &hdev->state); - hdev->reset_type = HNAE3_FLR_RESET; + hdev->reset_type = rst_type; ret = hclgevf_reset_prepare(hdev); if (ret) { - dev_err(&hdev->pdev->dev, "fail to prepare FLR, ret=%d\n", + dev_err(&hdev->pdev->dev, "fail to prepare to reset, ret=%d\n", ret); if (hdev->reset_pending || - retry_cnt++ < HCLGEVF_FLR_RETRY_CNT) { + retry_cnt++ < HCLGEVF_RESET_RETRY_CNT) { dev_err(&hdev->pdev->dev, "reset_pending:0x%lx, retry_cnt:%d\n", hdev->reset_pending, retry_cnt); clear_bit(HCLGEVF_STATE_RST_HANDLING, &hdev->state); up(&hdev->reset_sem); - msleep(HCLGEVF_FLR_RETRY_WAIT_MS); + msleep(HCLGEVF_RESET_RETRY_WAIT_MS); goto retry; } }
- /* disable misc vector before FLR done */ + /* disable misc vector before reset done */ hclgevf_enable_vector(&hdev->misc_vector, false); - hdev->rst_stats.flr_rst_cnt++; + + if (hdev->reset_type == HNAE3_FLR_RESET) + hdev->rst_stats.flr_rst_cnt++; }
-static void hclgevf_flr_done(struct hnae3_ae_dev *ae_dev) +static void hclgevf_reset_done(struct hnae3_ae_dev *ae_dev) { struct hclgevf_dev *hdev = ae_dev->priv; int ret; @@ -3534,8 +3537,8 @@ void hclgevf_update_port_base_vlan_info(struct hclgevf_dev *hdev, u16 state, static const struct hnae3_ae_ops hclgevf_ops = { .init_ae_dev = hclgevf_init_ae_dev, .uninit_ae_dev = hclgevf_uninit_ae_dev, - .flr_prepare = hclgevf_flr_prepare, - .flr_done = hclgevf_flr_done, + .reset_prepare = hclgevf_reset_prepare_general, + .reset_done = hclgevf_reset_done, .init_client_instance = hclgevf_init_client_instance, .uninit_client_instance = hclgevf_uninit_client_instance, .start = hclgevf_ae_start,
From: Yonglong Liu liuyonglong@huawei.com
driver inclusion category: feature bugzilla: NA CVE: NA
----------------------------
This reverts commit 6c1eadb888e9e9c6593294c1e99038229ac55bb2.
HNS3 driver use pm_ops to implement the suspend/resume function, so revert the old implementation.
Signed-off-by: Yonglong Liu liuyonglong@huawei.com Reviewed-by: li yongxin liyongxin1@huawei.com Signed-off-by: Yang Yingliang yangyingliang@huawei.com --- drivers/net/ethernet/hisilicon/hns3/hnae3.h | 2 - .../net/ethernet/hisilicon/hns3/hns3_enet.c | 32 --------- .../hisilicon/hns3/hns3pf/hclge_main.c | 70 ------------------- 3 files changed, 104 deletions(-)
diff --git a/drivers/net/ethernet/hisilicon/hns3/hnae3.h b/drivers/net/ethernet/hisilicon/hns3/hnae3.h index 9c3163e837222..ed108e5639e28 100644 --- a/drivers/net/ethernet/hisilicon/hns3/hnae3.h +++ b/drivers/net/ethernet/hisilicon/hns3/hnae3.h @@ -565,8 +565,6 @@ struct hnae3_ae_ops { int (*get_module_eeprom)(struct hnae3_handle *handle, u32 offset, u32 len, u8 *data); bool (*get_cmdq_stat)(struct hnae3_handle *handle); - int (*suspend)(struct hnae3_ae_dev *ae_dev); - int (*resume)(struct hnae3_ae_dev *ae_dev);
/* Notice! If the function is not for test, the definition must before * CONFIG_HNS3_TEST! Because RoCE will use this head file, and it won't diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3_enet.c b/drivers/net/ethernet/hisilicon/hns3/hns3_enet.c index a0e6553a2fd80..993bba680408e 100644 --- a/drivers/net/ethernet/hisilicon/hns3/hns3_enet.c +++ b/drivers/net/ethernet/hisilicon/hns3/hns3_enet.c @@ -2208,34 +2208,6 @@ static void hns3_shutdown(struct pci_dev *pdev) pci_set_power_state(pdev, PCI_D3hot); }
-#ifdef CONFIG_PM -static int hns3_suspend(struct pci_dev *pdev, pm_message_t state) -{ - struct hnae3_ae_dev *ae_dev = pci_get_drvdata(pdev); - - if (ae_dev->ops->suspend) - ae_dev->ops->suspend(ae_dev); - - pci_save_state(pdev); - pci_set_power_state(pdev, PCI_D3hot); - - return 0; -} - -static int hns3_resume(struct pci_dev *pdev) -{ - struct hnae3_ae_dev *ae_dev = pci_get_drvdata(pdev); - - pci_set_power_state(pdev, PCI_D0); - pci_restore_state(pdev); - - if (ae_dev->ops->resume) - return ae_dev->ops->resume(ae_dev); - - return 0; -} -#endif - static pci_ers_result_t hns3_error_detected(struct pci_dev *pdev, pci_channel_state_t state) { @@ -2320,10 +2292,6 @@ struct pci_driver hns3_driver = { .probe = hns3_probe, .remove = hns3_remove, .shutdown = hns3_shutdown, -#ifdef CONFIG_PM - .suspend = hns3_suspend, - .resume = hns3_resume, -#endif .sriov_configure = hns3_pci_sriov_configure, .err_handler = &hns3_err_handler, }; diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c index 41ec86a8760cf..a0372f93edf08 100644 --- a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c +++ b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c @@ -3883,72 +3883,6 @@ static void hclge_reset(struct hclge_dev *hdev) hclge_reset_task_schedule(hdev); }
-#ifdef CONFIG_PM -static int hclge_suspend(struct hnae3_ae_dev *ae_dev) -{ - struct hclge_dev *hdev = ae_dev->priv; - int ret; - - ret = hclge_notify_roce_client(hdev, HNAE3_DOWN_CLIENT); - if (ret) - return ret; - - rtnl_lock(); - - ret = hclge_notify_client(hdev, HNAE3_DOWN_CLIENT); - if (ret) - goto err_reset_lock; - - ret = hclge_notify_client(hdev, HNAE3_UNINIT_CLIENT); - if (ret) - goto err_reset_lock; - - rtnl_unlock(); - - return hclge_notify_roce_client(hdev, HNAE3_UNINIT_CLIENT); - -err_reset_lock: - rtnl_unlock(); - return ret; -} - -static int hclge_resume(struct hnae3_ae_dev *ae_dev) -{ - struct hclge_dev *hdev = ae_dev->priv; - int ret; - - rtnl_lock(); - - ret = hclge_reset_ae_dev(hdev->ae_dev); - if (ret) - goto err_reset_lock; - - ret = hclge_notify_client(hdev, HNAE3_INIT_CLIENT); - if (ret) - goto err_reset_lock; - - rtnl_unlock(); - - ret = hclge_notify_roce_client(hdev, HNAE3_INIT_CLIENT); - if (ret) - return ret; - - rtnl_lock(); - - ret = hclge_notify_client(hdev, HNAE3_UP_CLIENT); - if (ret) - goto err_reset_lock; - - rtnl_unlock(); - - return hclge_notify_roce_client(hdev, HNAE3_UP_CLIENT); - -err_reset_lock: - rtnl_unlock(); - return ret; -} -#endif - static void hclge_reset_event(struct pci_dev *pdev, struct hnae3_handle *handle) { struct hnae3_ae_dev *ae_dev = pci_get_drvdata(pdev); @@ -11773,10 +11707,6 @@ struct hnae3_ae_ops hclge_ops = { .set_vf_mac = hclge_set_vf_mac, .get_module_eeprom = hclge_get_module_eeprom, .get_cmdq_stat = hclge_get_cmdq_stat, -#ifdef CONFIG_PM - .suspend = hclge_suspend, - .resume = hclge_resume, -#endif };
static struct hnae3_ae_algo ae_algo = {
From: Jiaran Zhang zhangjiaran@huawei.com
mainline inclusion from mainline-v5.12-rc8 commit 715c58e94f0d78907bfccde12c2ca5236502c53e category: feature bugzilla: NA CVE: NA
----------------------------
To implement the system suspend/resume functions, the NIC driver needs to support: 1. When the system enters the suspend mode, the driver needs to implement the suspend callback function of the NIC device. The driver needs to mute the device, stop all RX/TX activities of the device, and unmap the interrupt. 2. When the system enters the resume mode, the driver needs to implement the resume callback function of the NIC device and restore the device to the state before suspension.
When the system enters the suspend and resume mode, the NIC driver actually executes the PF function reset process.
When the PFs are suspending/resuming, VFs also enter the suspend/resume state because the PFs trigger the VFs to reset, therefore no operation is required when the VF pci_driver is suspending or resuming.
Signed-off-by: Jiaran Zhang zhangjiaran@huawei.com Signed-off-by: Huazhong Tan tanhuazhong@huawei.com Signed-off-by: David S. Miller davem@davemloft.net Signed-off-by: Yonglong Liu liuyonglong@huawei.com Reviewed-by: li yongxin liyongxin1@huawei.com Signed-off-by: Yang Yingliang yangyingliang@huawei.com --- .../net/ethernet/hisilicon/hns3/hns3_enet.c | 29 +++++++++++++++++++ .../hisilicon/hns3/hns3pf/hclge_main.c | 2 ++ 2 files changed, 31 insertions(+)
diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3_enet.c b/drivers/net/ethernet/hisilicon/hns3/hns3_enet.c index 993bba680408e..b49a3fda147fd 100644 --- a/drivers/net/ethernet/hisilicon/hns3/hns3_enet.c +++ b/drivers/net/ethernet/hisilicon/hns3/hns3_enet.c @@ -2208,6 +2208,32 @@ static void hns3_shutdown(struct pci_dev *pdev) pci_set_power_state(pdev, PCI_D3hot); }
+static int __maybe_unused hns3_suspend(struct device *dev) +{ + struct hnae3_ae_dev *ae_dev = dev_get_drvdata(dev); + + if (hns3_is_phys_func(ae_dev->pdev)) { + dev_info(dev, "Begin to suspend.\n"); + if (ae_dev && ae_dev->ops && ae_dev->ops->reset_prepare) + ae_dev->ops->reset_prepare(ae_dev, HNAE3_FUNC_RESET); + } + + return 0; +} + +static int __maybe_unused hns3_resume(struct device *dev) +{ + struct hnae3_ae_dev *ae_dev = dev_get_drvdata(dev); + + if (hns3_is_phys_func(ae_dev->pdev)) { + dev_info(dev, "Begin to resume.\n"); + if (ae_dev && ae_dev->ops && ae_dev->ops->reset_done) + ae_dev->ops->reset_done(ae_dev); + } + + return 0; +} + static pci_ers_result_t hns3_error_detected(struct pci_dev *pdev, pci_channel_state_t state) { @@ -2286,12 +2312,15 @@ struct pci_error_handlers hns3_err_handler = { .reset_done = hns3_reset_done, };
+static SIMPLE_DEV_PM_OPS(hns3_pm_ops, hns3_suspend, hns3_resume); + struct pci_driver hns3_driver = { .name = hns3_driver_name, .id_table = hns3_pci_tbl, .probe = hns3_probe, .remove = hns3_remove, .shutdown = hns3_shutdown, + .driver.pm = &hns3_pm_ops, .sriov_configure = hns3_pci_sriov_configure, .err_handler = &hns3_err_handler, }; diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c index a0372f93edf08..f1c57600f45de 100644 --- a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c +++ b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c @@ -10230,6 +10230,8 @@ static void hclge_reset_prepare_general(struct hnae3_ae_dev *ae_dev,
if (hdev->reset_type == HNAE3_FLR_RESET) hdev->rst_stats.flr_rst_cnt++; + else if (hdev->reset_type == HNAE3_FUNC_RESET) + hdev->rst_stats.pf_rst_cnt++; }
static void hclge_reset_done(struct hnae3_ae_dev *ae_dev)
From: Barry Song song.bao.hua@hisilicon.com
mainline inclusion from mainline-v5.9-rc1 commit 674a135746aa7553eff12177d11868367aa102cb category: feature bugzilla: NA CVE: NA
----------------------------
since we are using device-managed function, it is unnecessary to free in probe.
Signed-off-by: Barry Song song.bao.hua@hisilicon.com Signed-off-by: David S. Miller davem@davemloft.net Signed-off-by: Yonglong Liu liuyonglong@huawei.com Reviewed-by: li yongxin liyongxin1@huawei.com Signed-off-by: Yang Yingliang yangyingliang@huawei.com --- drivers/net/ethernet/hisilicon/hns3/hns3_enet.c | 5 +---- 1 file changed, 1 insertion(+), 4 deletions(-)
diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3_enet.c b/drivers/net/ethernet/hisilicon/hns3/hns3_enet.c index b49a3fda147fd..b43f5e280be24 100644 --- a/drivers/net/ethernet/hisilicon/hns3/hns3_enet.c +++ b/drivers/net/ethernet/hisilicon/hns3/hns3_enet.c @@ -2141,10 +2141,8 @@ static int hns3_probe(struct pci_dev *pdev, const struct pci_device_id *ent) pci_set_drvdata(pdev, ae_dev);
ret = hnae3_register_ae_dev(ae_dev); - if (ret) { - devm_kfree(&pdev->dev, ae_dev); + if (ret) pci_set_drvdata(pdev, NULL); - }
return ret; } @@ -2201,7 +2199,6 @@ static void hns3_shutdown(struct pci_dev *pdev) struct hnae3_ae_dev *ae_dev = pci_get_drvdata(pdev);
hnae3_unregister_ae_dev(ae_dev); - devm_kfree(&pdev->dev, ae_dev); pci_set_drvdata(pdev, NULL);
if (system_state == SYSTEM_POWER_OFF)
From: Barry Song song.bao.hua@hisilicon.com
mainline inclusion from mainline-v5.9-rc1 commit cb0e3e611575a3335ea0521c9e8ba84a6a7e51c6 category: feature bugzilla: NA CVE: NA
----------------------------
Move the type of buffer address from unsigned char to void
Signed-off-by: Barry Song song.bao.hua@hisilicon.com Signed-off-by: David S. Miller davem@davemloft.net Signed-off-by: Yonglong Liu liuyonglong@huawei.com Reviewed-by: li yongxin liyongxin1@huawei.com Signed-off-by: Yang Yingliang yangyingliang@huawei.com --- drivers/net/ethernet/hisilicon/hns3/hns3_enet.c | 2 +- drivers/net/ethernet/hisilicon/hns3/hns3_enet.h | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-)
diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3_enet.c b/drivers/net/ethernet/hisilicon/hns3/hns3_enet.c index b43f5e280be24..3672009a96fee 100644 --- a/drivers/net/ethernet/hisilicon/hns3/hns3_enet.c +++ b/drivers/net/ethernet/hisilicon/hns3/hns3_enet.c @@ -3148,7 +3148,7 @@ static int hns3_handle_rx_bd(struct hns3_enet_ring *ring) return -ENXIO;
if (!skb) - ring->va = (unsigned char *)desc_cb->buf + desc_cb->page_offset; + ring->va = desc_cb->buf + desc_cb->page_offset;
/* Prefetch first cache line of first page * Idea is to cache few bytes of the header of the packet. Our L1 Cache diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3_enet.h b/drivers/net/ethernet/hisilicon/hns3/hns3_enet.h index c984f0ebe1615..3178e0cd2dfe2 100644 --- a/drivers/net/ethernet/hisilicon/hns3/hns3_enet.h +++ b/drivers/net/ethernet/hisilicon/hns3/hns3_enet.h @@ -426,7 +426,7 @@ struct hns3_enet_ring {
u32 pull_len; /* head length for current packet */ u32 frag_num; - unsigned char *va; /* first buffer address for current packet */ + void *va; /* first buffer address for current packet */
u32 flag; /* ring attribute */
From: Barry Song song.bao.hua@hisilicon.com
mainline inclusion from mainline-v5.9-rc1 commit 4d2cad32127e5c83e17b0e1dfd4cd5ef6080301f category: feature bugzilla: NA CVE: NA
----------------------------
This is for improving the readability.
Signed-off-by: Barry Song song.bao.hua@hisilicon.com Signed-off-by: David S. Miller davem@davemloft.net Signed-off-by: Yonglong Liu liuyonglong@huawei.com Reviewed-by: li yongxin liyongxin1@huawei.com Signed-off-by: Yang Yingliang yangyingliang@huawei.com --- drivers/net/ethernet/hisilicon/hns3/hns3_enet.c | 14 +++++++------- 1 file changed, 7 insertions(+), 7 deletions(-)
diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3_enet.c b/drivers/net/ethernet/hisilicon/hns3/hns3_enet.c index 3672009a96fee..6bde38ebae593 100644 --- a/drivers/net/ethernet/hisilicon/hns3/hns3_enet.c +++ b/drivers/net/ethernet/hisilicon/hns3/hns3_enet.c @@ -2483,8 +2483,8 @@ static int hns3_alloc_desc(struct hns3_enet_ring *ring) return 0; }
-static int hns3_reserve_buffer_map(struct hns3_enet_ring *ring, - struct hns3_desc_cb *cb) +static int hns3_alloc_and_map_buffer(struct hns3_enet_ring *ring, + struct hns3_desc_cb *cb) { int ret;
@@ -2504,9 +2504,9 @@ static int hns3_reserve_buffer_map(struct hns3_enet_ring *ring, return ret; }
-static int hns3_alloc_buffer_attach(struct hns3_enet_ring *ring, int i) +static int hns3_alloc_and_attach_buffer(struct hns3_enet_ring *ring, int i) { - int ret = hns3_reserve_buffer_map(ring, &ring->desc_cb[i]); + int ret = hns3_alloc_and_map_buffer(ring, &ring->desc_cb[i]);
if (ret) return ret; @@ -2522,7 +2522,7 @@ static int hns3_alloc_ring_buffers(struct hns3_enet_ring *ring) int i, j, ret;
for (i = 0; i < ring->desc_num; i++) { - ret = hns3_alloc_buffer_attach(ring, i); + ret = hns3_alloc_and_attach_buffer(ring, i); if (ret) goto out_buffer_fail; } @@ -2668,7 +2668,7 @@ static void hns3_nic_alloc_rx_buffers(struct hns3_enet_ring *ring,
hns3_reuse_buffer(ring, ring->next_to_use); } else { - ret = hns3_reserve_buffer_map(ring, &res_cbs); + ret = hns3_alloc_and_map_buffer(ring, &res_cbs); if (ret) { u64_stats_update_begin(&ring->syncp); ring->stats.sw_err_cnt++; @@ -4263,7 +4263,7 @@ static int hns3_clear_rx_ring(struct hns3_enet_ring *ring) * stack, so we need to replace the buffer here. */ if (!ring->desc_cb[ring->next_to_use].reuse_flag) { - ret = hns3_reserve_buffer_map(ring, &res_cbs); + ret = hns3_alloc_and_map_buffer(ring, &res_cbs); if (ret) { u64_stats_update_begin(&ring->syncp); ring->stats.sw_err_cnt++;
From: Barry Song song.bao.hua@hisilicon.com
mainline inclusion from mainline-v5.9-rc1 commit c2a2e1270a57696827da4ca69f78eb072e37d590 category: feature bugzilla: NA CVE: NA
----------------------------
Right now they are empty functions for our SoC since hardware can keep cache coherent, but it is still good to align with streaming DMA APIs as device drivers should not make an assumption of SoC.
Reviewed-by: Yunsheng Lin linyunsheng@huawei.com Signed-off-by: Barry Song song.bao.hua@hisilicon.com Signed-off-by: David S. Miller davem@davemloft.net Signed-off-by: Yonglong Liu liuyonglong@huawei.com Reviewed-by: li yongxin liyongxin1@huawei.com Signed-off-by: Yang Yingliang yangyingliang@huawei.com --- .../net/ethernet/hisilicon/hns3/hns3_enet.c | 18 +++++++++++++++++- 1 file changed, 17 insertions(+), 1 deletion(-)
diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3_enet.c b/drivers/net/ethernet/hisilicon/hns3/hns3_enet.c index 6bde38ebae593..092f649aee549 100644 --- a/drivers/net/ethernet/hisilicon/hns3/hns3_enet.c +++ b/drivers/net/ethernet/hisilicon/hns3/hns3_enet.c @@ -2551,6 +2551,11 @@ static void hns3_reuse_buffer(struct hns3_enet_ring *ring, int i) ring->desc[i].addr = cpu_to_le64(ring->desc_cb[i].dma + ring->desc_cb[i].page_offset); ring->desc[i].rx.bd_base_info = 0; + + dma_sync_single_for_device(ring_to_dev(ring), + ring->desc_cb[i].dma + ring->desc_cb[i].page_offset, + hns3_buf_size(ring), + DMA_FROM_DEVICE); }
static void hns3_nic_reclaim_desc(struct hns3_enet_ring *ring, int head, @@ -2996,6 +3001,11 @@ static int hns3_add_frag(struct hns3_enet_ring *ring) skb = ring->tail_skb; }
+ dma_sync_single_for_cpu(ring_to_dev(ring), + desc_cb->dma + desc_cb->page_offset, + hns3_buf_size(ring), + DMA_FROM_DEVICE); + hns3_nic_reuse_page(skb, ring->frag_num++, ring, 0, desc_cb); trace_hns3_rx_desc(ring); ring_ptr_move_fw(ring, next_to_clean); @@ -3147,9 +3157,15 @@ static int hns3_handle_rx_bd(struct hns3_enet_ring *ring) if (unlikely(!(bd_base_info & BIT(HNS3_RXD_VLD_B)))) return -ENXIO;
- if (!skb) + if (!skb) { ring->va = desc_cb->buf + desc_cb->page_offset;
+ dma_sync_single_for_cpu(ring_to_dev(ring), + desc_cb->dma + desc_cb->page_offset, + hns3_buf_size(ring), + DMA_FROM_DEVICE); + } + /* Prefetch first cache line of first page * Idea is to cache few bytes of the header of the packet. Our L1 Cache * line size is 64B so need to prefetch twice to make it 128B. But in
From: Yunsheng Lin linyunsheng@huawei.com
mainline inclusion from mainline-v5.10-rc1 commit aeda9bf87a4573906d6dff5fcf7a3df0f41529da category: feature bugzilla: NA CVE: NA
----------------------------
Batch the page reference count updates instead of doing them one at a time. By doing this we can improve the overall receive performance by avoid some atomic increment operations when the rx page is reused.
Signed-off-by: Yunsheng Lin linyunsheng@huawei.com Signed-off-by: Huazhong Tan tanhuazhong@huawei.com Signed-off-by: David S. Miller davem@davemloft.net Signed-off-by: Yonglong Liu liuyonglong@huawei.com Reviewed-by: li yongxin liyongxin1@huawei.com Signed-off-by: Yang Yingliang yangyingliang@huawei.com --- .../net/ethernet/hisilicon/hns3/hns3_enet.c | 32 ++++++++++++++----- .../net/ethernet/hisilicon/hns3/hns3_enet.h | 1 + 2 files changed, 25 insertions(+), 8 deletions(-)
diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3_enet.c b/drivers/net/ethernet/hisilicon/hns3/hns3_enet.c index 092f649aee549..011190809855d 100644 --- a/drivers/net/ethernet/hisilicon/hns3/hns3_enet.c +++ b/drivers/net/ethernet/hisilicon/hns3/hns3_enet.c @@ -2396,6 +2396,8 @@ static int hns3_alloc_buffer(struct hns3_enet_ring *ring, cb->buf = page_address(p); cb->length = hns3_page_size(ring); cb->type = DESC_TYPE_PAGE; + page_ref_add(p, USHRT_MAX - 1); + cb->pagecnt_bias = USHRT_MAX;
return 0; } @@ -2405,8 +2407,8 @@ static void hns3_free_buffer(struct hns3_enet_ring *ring, { if (cb->type == DESC_TYPE_SKB) dev_kfree_skb_any((struct sk_buff *)cb->priv); - else if (!HNAE3_IS_TX_RING(ring)) - put_page((struct page *)cb->priv); + else if (!HNAE3_IS_TX_RING(ring) && cb->pagecnt_bias) + __page_frag_cache_drain(cb->priv, cb->pagecnt_bias); memset(cb, 0, sizeof(*cb)); }
@@ -2704,6 +2706,11 @@ static bool hns3_page_is_reusable(struct page *page) !page_is_pfmemalloc(page); }
+static bool hns3_can_reuse_page(struct hns3_desc_cb *cb) +{ + return (page_count(cb->priv) - cb->pagecnt_bias) == 1; +} + static void hns3_nic_reuse_page(struct sk_buff *skb, int i, struct hns3_enet_ring *ring, int pull_len, struct hns3_desc_cb *desc_cb) @@ -2712,6 +2719,7 @@ static void hns3_nic_reuse_page(struct sk_buff *skb, int i, int size = le16_to_cpu(desc->rx.size); u32 truesize = hns3_buf_size(ring);
+ desc_cb->pagecnt_bias--; skb_add_rx_frag(skb, i, desc_cb->priv, desc_cb->page_offset + pull_len, size - pull_len, truesize);
@@ -2719,20 +2727,27 @@ static void hns3_nic_reuse_page(struct sk_buff *skb, int i, * when page_offset rollback to zero, flag default unreuse */ if (unlikely(!hns3_page_is_reusable(desc_cb->priv)) || - (!desc_cb->page_offset && page_count(desc_cb->priv) > 1)) + (!desc_cb->page_offset && !hns3_can_reuse_page(desc_cb))) { + __page_frag_cache_drain(desc_cb->priv, desc_cb->pagecnt_bias); return; + }
/* Move offset up to the next cache line */ desc_cb->page_offset += truesize;
if (desc_cb->page_offset + truesize <= hns3_page_size(ring)) { desc_cb->reuse_flag = 1; - /* Bump ref count on page before it is given */ - get_page(desc_cb->priv); - } else if (page_count(desc_cb->priv) == 1) { + } else if (hns3_can_reuse_page(desc_cb)) { desc_cb->reuse_flag = 1; desc_cb->page_offset = 0; - get_page(desc_cb->priv); + } else if (desc_cb->pagecnt_bias) { + __page_frag_cache_drain(desc_cb->priv, desc_cb->pagecnt_bias); + return; + } + + if (unlikely(!desc_cb->pagecnt_bias)) { + page_ref_add(desc_cb->priv, USHRT_MAX); + desc_cb->pagecnt_bias = USHRT_MAX; } }
@@ -2940,7 +2955,8 @@ static int hns3_alloc_skb(struct hns3_enet_ring *ring, unsigned int length, if (likely(hns3_page_is_reusable(desc_cb->priv))) desc_cb->reuse_flag = 1; else /* This page cannot be reused so discard it */ - put_page(desc_cb->priv); + __page_frag_cache_drain(desc_cb->priv, + desc_cb->pagecnt_bias);
ring_ptr_move_fw(ring, next_to_clean); return 0; diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3_enet.h b/drivers/net/ethernet/hisilicon/hns3/hns3_enet.h index 3178e0cd2dfe2..8fa37d9d1359a 100644 --- a/drivers/net/ethernet/hisilicon/hns3/hns3_enet.h +++ b/drivers/net/ethernet/hisilicon/hns3/hns3_enet.h @@ -313,6 +313,7 @@ struct hns3_desc_cb {
/* desc type, used by the ring user to mark the type of the priv data */ u16 type; + u16 pagecnt_bias; };
enum hns3_pkt_l3type {
From: Yunsheng Lin linyunsheng@huawei.com
mainline inclusion from mainline-v5.10-rc1 commit f6061a056c8b5cc76f23e61859519ea555b9e6fc category: feature bugzilla: NA CVE: NA
----------------------------
Use netdev_xmit_more() to defer the tx doorbell operation when the skb is passed to the driver continuously. By doing this we can improve the overall xmit performance by avoid some doorbell operations.
Also, the tx_err_cnt stat is not used, so rename it to tx_more stat.
Signed-off-by: Yunsheng Lin linyunsheng@huawei.com Signed-off-by: Huazhong Tan tanhuazhong@huawei.com Signed-off-by: David S. Miller davem@davemloft.net Signed-off-by: Yonglong Liu liuyonglong@huawei.com Reviewed-by: li yongxin liyongxin1@huawei.com Signed-off-by: Yang Yingliang yangyingliang@huawei.com --- .../hisilicon/hns3/hns3_cae/hns3_cae_stat.c | 6 +-- .../hisilicon/hns3/hns3_cae/hns3_cae_stat.h | 2 +- .../net/ethernet/hisilicon/hns3/hns3_enet.c | 51 +++++++++++++++---- .../net/ethernet/hisilicon/hns3/hns3_enet.h | 2 +- .../ethernet/hisilicon/hns3/hns3_ethtool.c | 2 +- 5 files changed, 47 insertions(+), 16 deletions(-)
diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3_cae/hns3_cae_stat.c b/drivers/net/ethernet/hisilicon/hns3/hns3_cae/hns3_cae_stat.c index ea69639b3d864..9d4d16109bb3b 100644 --- a/drivers/net/ethernet/hisilicon/hns3/hns3_cae/hns3_cae_stat.c +++ b/drivers/net/ethernet/hisilicon/hns3/hns3_cae/hns3_cae_stat.c @@ -9,7 +9,7 @@ const struct ring_stats_name hns3_ring_stats_name[] = { {"seg_pkt_cnt", SEG_PKT_CNT}, {"tx_pkts", TX_PKTS}, {"tx_bytes", TX_BYTES}, - {"tx_err_cnt", TX_ERR_CNT}, + {"tx_more", TX_MORE}, {"restart_queue", RESTART_QUEUE}, {"tx_busy", TX_BUSY}, {"rx_pkts", RX_PKTS}, @@ -58,8 +58,8 @@ static int hns3_get_stat_val(struct ring_stats *r_stats, char *val_name, case TX_BYTES: *val = &r_stats->tx_bytes; break; - case TX_ERR_CNT: - *val = &r_stats->tx_err_cnt; + case TX_MORE: + *val = &r_stats->tx_more; break; case RESTART_QUEUE: *val = &r_stats->restart_queue; diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3_cae/hns3_cae_stat.h b/drivers/net/ethernet/hisilicon/hns3/hns3_cae/hns3_cae_stat.h index a54ef8e34bf55..47c3faf44ebb7 100644 --- a/drivers/net/ethernet/hisilicon/hns3/hns3_cae/hns3_cae_stat.h +++ b/drivers/net/ethernet/hisilicon/hns3/hns3_cae/hns3_cae_stat.h @@ -24,7 +24,7 @@ enum stats_name_type { SEG_PKT_CNT, TX_PKTS, TX_BYTES, - TX_ERR_CNT, + TX_MORE, RESTART_QUEUE, TX_BUSY, RX_PKTS, diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3_enet.c b/drivers/net/ethernet/hisilicon/hns3/hns3_enet.c index 011190809855d..f9783907cf46d 100644 --- a/drivers/net/ethernet/hisilicon/hns3/hns3_enet.c +++ b/drivers/net/ethernet/hisilicon/hns3/hns3_enet.c @@ -1384,6 +1384,27 @@ static int hns3_fill_skb_to_desc(struct hns3_enet_ring *ring, return bd_num; }
+static void hns3_tx_doorbell(struct hns3_enet_ring *ring, int num, + bool doorbell) +{ + ring->pending_buf += num; + + if (!doorbell) { + u64_stats_update_begin(&ring->syncp); + ring->stats.tx_more++; + u64_stats_update_end(&ring->syncp); + return; + } + + if (!ring->pending_buf) + return; + + wmb(); /* Commit all data before submit */ + + hnae3_queue_xmit(ring->tqp, ring->pending_buf); + ring->pending_buf = 0; +} + netdev_tx_t hns3_nic_net_xmit(struct sk_buff *skb, struct net_device *netdev) { struct hns3_nic_priv *priv = netdev_priv(netdev); @@ -1395,8 +1416,10 @@ netdev_tx_t hns3_nic_net_xmit(struct sk_buff *skb, struct net_device *netdev) int ret;
/* Hardware can only handle short frames above 32 bytes */ - if (skb_put_padto(skb, HNS3_MIN_TX_LEN)) + if (skb_put_padto(skb, HNS3_MIN_TX_LEN)) { + hns3_tx_doorbell(ring, 0, !netdev_xmit_more()); return NETDEV_TX_OK; + }
/* Prefetch the data used later */ prefetch(skb->data); @@ -1407,6 +1430,7 @@ netdev_tx_t hns3_nic_net_xmit(struct sk_buff *skb, struct net_device *netdev) u64_stats_update_begin(&ring->syncp); ring->stats.tx_busy++; u64_stats_update_end(&ring->syncp); + hns3_tx_doorbell(ring, 0, true); return NETDEV_TX_BUSY; } else if (ret == -ENOMEM) { u64_stats_update_begin(&ring->syncp); @@ -1449,11 +1473,14 @@ netdev_tx_t hns3_nic_net_xmit(struct sk_buff *skb, struct net_device *netdev)
/* Complete translate all packets */ dev_queue = netdev_get_tx_queue(netdev, ring->queue_index); - netdev_tx_sent_queue(dev_queue, skb->len); - - wmb(); /* Commit all data before submit */ - - hnae3_queue_xmit(ring->tqp, bd_num); + if (!netdev_xmit_more()) { + netdev_tx_sent_queue(dev_queue, skb->len); + hns3_tx_doorbell(ring, bd_num, true); + } else { + dql_queued(&dev_queue->dql, skb->len); + hns3_tx_doorbell(ring, bd_num, + netif_tx_queue_stopped(dev_queue)); + }
return NETDEV_TX_OK;
@@ -1462,6 +1489,7 @@ netdev_tx_t hns3_nic_net_xmit(struct sk_buff *skb, struct net_device *netdev)
out_err_tx_ok: dev_kfree_skb_any(skb); + hns3_tx_doorbell(ring, 0, !netdev_xmit_more()); return NETDEV_TX_OK; }
@@ -1892,13 +1920,14 @@ bool hns3_get_tx_timeo_queue_info(struct net_device *ndev) tx_ring->next_to_clean, napi->state);
netdev_info(ndev, - "tx_pkts: %llu, tx_bytes: %llu, io_err_cnt: %llu, sw_err_cnt: %llu\n", + "tx_pkts: %llu, tx_bytes: %llu, io_err_cnt: %llu, sw_err_cnt: %llu, tx_pending: %d\n", tx_ring->stats.tx_pkts, tx_ring->stats.tx_bytes, - tx_ring->stats.io_err_cnt, tx_ring->stats.sw_err_cnt); + tx_ring->stats.io_err_cnt, tx_ring->stats.sw_err_cnt, + tx_ring->pending_buf);
netdev_info(ndev, - "seg_pkt_cnt: %llu, tx_err_cnt: %llu, restart_queue: %llu, tx_busy: %llu\n", - tx_ring->stats.seg_pkt_cnt, tx_ring->stats.tx_err_cnt, + "seg_pkt_cnt: %llu, tx_more: %llu, restart_queue: %llu, tx_busy: %llu\n", + tx_ring->stats.seg_pkt_cnt, tx_ring->stats.tx_more, tx_ring->stats.restart_queue, tx_ring->stats.tx_busy);
/* When mac received many pause frames continuous, it's unable to send @@ -4282,6 +4311,8 @@ static void hns3_clear_tx_ring(struct hns3_enet_ring *ring) hns3_free_buffer_detach(ring, ring->next_to_clean); ring_ptr_move_fw(ring, next_to_clean); } + + ring->pending_buf = 0; }
static int hns3_clear_rx_ring(struct hns3_enet_ring *ring) diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3_enet.h b/drivers/net/ethernet/hisilicon/hns3/hns3_enet.h index 8fa37d9d1359a..90b991962fb99 100644 --- a/drivers/net/ethernet/hisilicon/hns3/hns3_enet.h +++ b/drivers/net/ethernet/hisilicon/hns3/hns3_enet.h @@ -377,7 +377,7 @@ struct ring_stats { struct { u64 tx_pkts; u64 tx_bytes; - u64 tx_err_cnt; + u64 tx_more; u64 restart_queue; u64 tx_busy; u64 tx_copy; diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3_ethtool.c b/drivers/net/ethernet/hisilicon/hns3/hns3_ethtool.c index d04a61e39b68e..d6788923e30be 100644 --- a/drivers/net/ethernet/hisilicon/hns3/hns3_ethtool.c +++ b/drivers/net/ethernet/hisilicon/hns3/hns3_ethtool.c @@ -36,7 +36,7 @@ static const struct hns3_stats hns3_txq_stats[] = { HNS3_TQP_STAT("seg_pkt_cnt", seg_pkt_cnt), HNS3_TQP_STAT("packets", tx_pkts), HNS3_TQP_STAT("bytes", tx_bytes), - HNS3_TQP_STAT("errors", tx_err_cnt), + HNS3_TQP_STAT("more", tx_more), HNS3_TQP_STAT("wake", restart_queue), HNS3_TQP_STAT("busy", tx_busy), HNS3_TQP_STAT("copy", tx_copy),
From: Yunsheng Lin linyunsheng@huawei.com
mainline inclusion from mainline-v5.10-rc1 commit 20d06ca2679cb1810135c70be340e5477c83a808 category: feature bugzilla: NA CVE: NA
----------------------------
Currently HNS3_RING_TX_RING_HEAD_REG register is read to determine how many tx desc can be cleaned. To avoid the register read operation in the critical data path, use the valid bit in the tx desc to determine if a specific tx desc can be cleaned.
The hns3 driver sets valid bit in the tx desc before ringing a doorbell to the hw, and hw will only clear the valid bit of the tx desc after corresponding packet is sent out to the wire. And because next_to_use for tx ring is a changing variable when the driver is filling the tx desc, so reuse the pull_len for rx ring to record the tx desc that has notified to the hw, so that hns3_nic_reclaim_desc() can decide how many tx desc's valid bit need checking when reclaiming tx desc.
And io_err_cnt stat is also removed for it is not used anymore.
Signed-off-by: Yunsheng Lin linyunsheng@huawei.com Signed-off-by: Huazhong Tan tanhuazhong@huawei.com Signed-off-by: David S. Miller davem@davemloft.net Signed-off-by: Yonglong Liu liuyonglong@huawei.com Reviewed-by: li yongxin liyongxin1@huawei.com Signed-off-by: Yang Yingliang yangyingliang@huawei.com --- .../hisilicon/hns3/hns3_cae/hns3_cae_stat.c | 4 -- .../net/ethernet/hisilicon/hns3/hns3_enet.c | 64 +++++++++---------- .../net/ethernet/hisilicon/hns3/hns3_enet.h | 12 ++-- .../ethernet/hisilicon/hns3/hns3_ethtool.c | 2 - 4 files changed, 33 insertions(+), 49 deletions(-)
diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3_cae/hns3_cae_stat.c b/drivers/net/ethernet/hisilicon/hns3/hns3_cae/hns3_cae_stat.c index 9d4d16109bb3b..684d72953c306 100644 --- a/drivers/net/ethernet/hisilicon/hns3/hns3_cae/hns3_cae_stat.c +++ b/drivers/net/ethernet/hisilicon/hns3/hns3_cae/hns3_cae_stat.c @@ -4,7 +4,6 @@ #include "hns3_cae_stat.h"
const struct ring_stats_name hns3_ring_stats_name[] = { - {"io_err_cnt", IO_ERR_CNT}, {"sw_err_cnt", SW_ERR_CNT}, {"seg_pkt_cnt", SEG_PKT_CNT}, {"tx_pkts", TX_PKTS}, @@ -43,9 +42,6 @@ static int hns3_get_stat_val(struct ring_stats *r_stats, char *val_name, } } switch (stats_name_id) { - case IO_ERR_CNT: - *val = &r_stats->io_err_cnt; - break; case SW_ERR_CNT: *val = &r_stats->sw_err_cnt; break; diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3_enet.c b/drivers/net/ethernet/hisilicon/hns3/hns3_enet.c index f9783907cf46d..580ea9ef66292 100644 --- a/drivers/net/ethernet/hisilicon/hns3/hns3_enet.c +++ b/drivers/net/ethernet/hisilicon/hns3/hns3_enet.c @@ -1403,6 +1403,7 @@ static void hns3_tx_doorbell(struct hns3_enet_ring *ring, int num,
hnae3_queue_xmit(ring->tqp, ring->pending_buf); ring->pending_buf = 0; + WRITE_ONCE(ring->last_to_use, ring->next_to_use); }
netdev_tx_t hns3_nic_net_xmit(struct sk_buff *skb, struct net_device *netdev) @@ -1920,10 +1921,9 @@ bool hns3_get_tx_timeo_queue_info(struct net_device *ndev) tx_ring->next_to_clean, napi->state);
netdev_info(ndev, - "tx_pkts: %llu, tx_bytes: %llu, io_err_cnt: %llu, sw_err_cnt: %llu, tx_pending: %d\n", + "tx_pkts: %llu, tx_bytes: %llu, sw_err_cnt: %llu, tx_pending: %d\n", tx_ring->stats.tx_pkts, tx_ring->stats.tx_bytes, - tx_ring->stats.io_err_cnt, tx_ring->stats.sw_err_cnt, - tx_ring->pending_buf); + tx_ring->stats.sw_err_cnt, tx_ring->pending_buf);
netdev_info(ndev, "seg_pkt_cnt: %llu, tx_more: %llu, restart_queue: %llu, tx_busy: %llu\n", @@ -2589,13 +2589,26 @@ static void hns3_reuse_buffer(struct hns3_enet_ring *ring, int i) DMA_FROM_DEVICE); }
-static void hns3_nic_reclaim_desc(struct hns3_enet_ring *ring, int head, +static bool hns3_nic_reclaim_desc(struct hns3_enet_ring *ring, int *bytes, int *pkts) { + /* pair with ring->last_to_use update in hns3_tx_doorbell(), + * smp_store_release() is not used in hns3_tx_doorbell() because + * the doorbell operation already have the needed barrier operation. + */ + int ltu = smp_load_acquire(&ring->last_to_use); int ntc = ring->next_to_clean; struct hns3_desc_cb *desc_cb; + bool reclaimed = false; + struct hns3_desc *desc; + + while (ltu != ntc) { + desc = &ring->desc[ntc]; + + if (le16_to_cpu(desc->tx.bdtp_fe_sc_vld_ra_ri) & + BIT(HNS3_TXD_VLD_B)) + break;
- while (head != ntc) { desc_cb = &ring->desc_cb[ntc]; (*pkts) += (desc_cb->type == DESC_TYPE_SKB); (*bytes) += desc_cb->length; @@ -2607,23 +2620,17 @@ static void hns3_nic_reclaim_desc(struct hns3_enet_ring *ring, int head,
/* Issue prefetch for next Tx descriptor */ prefetch(&ring->desc_cb[ntc]); + reclaimed = true; }
+ if (unlikely(!reclaimed)) + return false; + /* This smp_store_release() pairs with smp_load_acquire() in * ring_space called by hns3_nic_net_xmit. */ smp_store_release(&ring->next_to_clean, ntc); -} - -static int is_valid_clean_head(struct hns3_enet_ring *ring, int h) -{ - int u = ring->next_to_use; - int c = ring->next_to_clean; - - if (unlikely(h > ring->desc_num)) - return 0; - - return u > c ? (h > c && h <= u) : (h > c || h <= u); + return true; }
void hns3_clean_tx_ring(struct hns3_enet_ring *ring) @@ -2632,28 +2639,12 @@ void hns3_clean_tx_ring(struct hns3_enet_ring *ring) struct hns3_nic_priv *priv = netdev_priv(netdev); struct netdev_queue *dev_queue; int bytes, pkts; - int head; - - head = readl_relaxed(ring->tqp->io_base + HNS3_RING_TX_RING_HEAD_REG); - - if (is_ring_empty(ring) || head == ring->next_to_clean) - return; /* no data to poll */ - - rmb(); /* Make sure head is ready before touch any data */ - - if (unlikely(!is_valid_clean_head(ring, head))) { - hns3_rl_err(netdev, "wrong head (%d, %d-%d)\n", head, - ring->next_to_use, ring->next_to_clean); - - u64_stats_update_begin(&ring->syncp); - ring->stats.io_err_cnt++; - u64_stats_update_end(&ring->syncp); - return; - }
bytes = 0; pkts = 0; - hns3_nic_reclaim_desc(ring, head, &bytes, &pkts); + + if (unlikely(!hns3_nic_reclaim_desc(ring, &bytes, &pkts))) + return;
ring->tqp_vector->tx_group.total_bytes += bytes; ring->tqp_vector->tx_group.total_packets += pkts; @@ -3818,6 +3809,7 @@ static void hns3_ring_get_cfg(struct hnae3_queue *q, struct hns3_nic_priv *priv, ring->desc_num = desc_num; ring->next_to_use = 0; ring->next_to_clean = 0; + ring->last_to_use = 0; }
static void hns3_queue_to_ring(struct hnae3_queue *tqp, @@ -3897,6 +3889,7 @@ void hns3_fini_ring(struct hns3_enet_ring *ring) ring->desc_cb = NULL; ring->next_to_clean = 0; ring->next_to_use = 0; + ring->last_to_use = 0; ring->pending_buf = 0; if (ring->skb) { dev_kfree_skb_any(ring->skb); @@ -4415,6 +4408,7 @@ int hns3_nic_reset_all_ring(struct hnae3_handle *h) hns3_clear_tx_ring(&priv->ring[i]); priv->ring[i].next_to_clean = 0; priv->ring[i].next_to_use = 0; + priv->ring[i].last_to_use = 0;
rx_ring = &priv->ring[i + h->kinfo.num_tqps]; hns3_init_ring_hw(rx_ring); diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3_enet.h b/drivers/net/ethernet/hisilicon/hns3/hns3_enet.h index 90b991962fb99..d98cf6085c7d7 100644 --- a/drivers/net/ethernet/hisilicon/hns3/hns3_enet.h +++ b/drivers/net/ethernet/hisilicon/hns3/hns3_enet.h @@ -370,7 +370,6 @@ enum hns3_pkt_ol4type { };
struct ring_stats { - u64 io_err_cnt; u64 sw_err_cnt; u64 seg_pkt_cnt; union { @@ -424,8 +423,10 @@ struct hns3_enet_ring { * next_to_use */ int next_to_clean; - - u32 pull_len; /* head length for current packet */ + union { + int last_to_use; /* last idx used by xmit */ + u32 pull_len; /* memcpy len for current rx packet */ + }; u32 frag_num; void *va; /* first buffer address for current packet */
@@ -562,11 +563,6 @@ static inline int ring_space(struct hns3_enet_ring *ring) (begin - end)) - 1; }
-static inline int is_ring_empty(struct hns3_enet_ring *ring) -{ - return ring->next_to_use == ring->next_to_clean; -} - static inline u32 hns3_read_reg(void __iomem *base, u32 reg) { return readl(base + reg); diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3_ethtool.c b/drivers/net/ethernet/hisilicon/hns3/hns3_ethtool.c index d6788923e30be..d9154dcfe33ea 100644 --- a/drivers/net/ethernet/hisilicon/hns3/hns3_ethtool.c +++ b/drivers/net/ethernet/hisilicon/hns3/hns3_ethtool.c @@ -31,7 +31,6 @@ struct hns3_sfp_type {
static const struct hns3_stats hns3_txq_stats[] = { /* Tx per-queue statistics */ - HNS3_TQP_STAT("io_err_cnt", io_err_cnt), HNS3_TQP_STAT("dropped", sw_err_cnt), HNS3_TQP_STAT("seg_pkt_cnt", seg_pkt_cnt), HNS3_TQP_STAT("packets", tx_pkts), @@ -50,7 +49,6 @@ static const struct hns3_stats hns3_txq_stats[] = {
static const struct hns3_stats hns3_rxq_stats[] = { /* Rx per-queue statistics */ - HNS3_TQP_STAT("io_err_cnt", io_err_cnt), HNS3_TQP_STAT("dropped", sw_err_cnt), HNS3_TQP_STAT("seg_pkt_cnt", seg_pkt_cnt), HNS3_TQP_STAT("packets", rx_pkts),
From: Yunsheng Lin linyunsheng@huawei.com
mainline inclusion from mainline-v5.10-rc1 commit 8c30e19460e1bb935c788a077cc063fb15f1ef04 category: feature bugzilla: NA CVE: NA
----------------------------
Currently HNS3_RING_RX_RING_FBDNUM_REG register is read to determine how many rx desc can be cleaned. To avoid the register read operation in the critical data path, use the valid bit in the rx desc to determine if a specific rx desc can be cleaned.
The hns3 driver clear valid bit in the rx desc before notifying the rx desc to the hw, and hw will only set the valid bit of the rx desc after corresponding buffer is filled with packet data and other field in the rx desc is set accordingly.
Add hns3_rx_ring_move_fw() function to clear the valid bit in the rx desc before moving rx ring's next_to_clean forward to avoid double cleaning a rx desc, also add a dma_rmb() barrier in hns3_handle_rx_bd() to make sure valid bit is set before reading other field in the rx desc.
Signed-off-by: Yunsheng Lin linyunsheng@huawei.com Signed-off-by: Huazhong Tan tanhuazhong@huawei.com Signed-off-by: David S. Miller davem@davemloft.net Signed-off-by: Yonglong Liu liuyonglong@huawei.com Reviewed-by: li yongxin liyongxin1@huawei.com Signed-off-by: Yang Yingliang yangyingliang@huawei.com --- .../net/ethernet/hisilicon/hns3/hns3_enet.c | 63 ++++++++++--------- 1 file changed, 32 insertions(+), 31 deletions(-)
diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3_enet.c b/drivers/net/ethernet/hisilicon/hns3/hns3_enet.c index 580ea9ef66292..9f2ab98b597b5 100644 --- a/drivers/net/ethernet/hisilicon/hns3/hns3_enet.c +++ b/drivers/net/ethernet/hisilicon/hns3/hns3_enet.c @@ -2943,6 +2943,16 @@ static bool hns3_parse_vlan_tag(struct hns3_enet_ring *ring, } }
+static void hns3_rx_ring_move_fw(struct hns3_enet_ring *ring) +{ + ring->desc[ring->next_to_clean].rx.bd_base_info &= + cpu_to_le32(~BIT(HNS3_RXD_VLD_B)); + ring->next_to_clean += 1; + + if (unlikely(ring->next_to_clean == ring->desc_num)) + ring->next_to_clean = 0; +} + static int hns3_alloc_skb(struct hns3_enet_ring *ring, unsigned int length, unsigned char *va) { @@ -2978,7 +2988,7 @@ static int hns3_alloc_skb(struct hns3_enet_ring *ring, unsigned int length, __page_frag_cache_drain(desc_cb->priv, desc_cb->pagecnt_bias);
- ring_ptr_move_fw(ring, next_to_clean); + hns3_rx_ring_move_fw(ring); return 0; } u64_stats_update_begin(&ring->syncp); @@ -2989,7 +2999,7 @@ static int hns3_alloc_skb(struct hns3_enet_ring *ring, unsigned int length, __skb_put(skb, ring->pull_len); hns3_nic_reuse_page(skb, ring->frag_num++, ring, ring->pull_len, desc_cb); - ring_ptr_move_fw(ring, next_to_clean); + hns3_rx_ring_move_fw(ring);
return 0; } @@ -3044,7 +3054,7 @@ static int hns3_add_frag(struct hns3_enet_ring *ring)
hns3_nic_reuse_page(skb, ring->frag_num++, ring, 0, desc_cb); trace_hns3_rx_desc(ring); - ring_ptr_move_fw(ring, next_to_clean); + hns3_rx_ring_move_fw(ring); ring->pending_buf++; } while (!(bd_base_info & BIT(HNS3_RXD_FE_B)));
@@ -3186,35 +3196,35 @@ static int hns3_handle_rx_bd(struct hns3_enet_ring *ring)
prefetch(desc);
- length = le16_to_cpu(desc->rx.size); - bd_base_info = le32_to_cpu(desc->rx.bd_base_info); + if (!skb) { + bd_base_info = le32_to_cpu(desc->rx.bd_base_info); + + /* Check valid BD */ + if (unlikely(!(bd_base_info & BIT(HNS3_RXD_VLD_B)))) + return -ENXIO;
- /* Check valid BD */ - if (unlikely(!(bd_base_info & BIT(HNS3_RXD_VLD_B)))) - return -ENXIO; + dma_rmb(); + length = le16_to_cpu(desc->rx.size);
- if (!skb) { ring->va = desc_cb->buf + desc_cb->page_offset;
dma_sync_single_for_cpu(ring_to_dev(ring), desc_cb->dma + desc_cb->page_offset, hns3_buf_size(ring), DMA_FROM_DEVICE); - }
- /* Prefetch first cache line of first page - * Idea is to cache few bytes of the header of the packet. Our L1 Cache - * line size is 64B so need to prefetch twice to make it 128B. But in - * actual we can have greater size of caches with 128B Level 1 cache - * lines. In such a case, single fetch would suffice to cache in the - * relevant part of the header. - */ - prefetch(ring->va); + /* Prefetch first cache line of first page. + * Idea is to cache few bytes of the header of the packet. + * Our L1 Cache line size is 64B so need to prefetch twice to make + * it 128B. But in actual we can have greater size of caches with + * 128B Level 1 cache lines. In such a case, single fetch would + * suffice to cache in the relevant part of the header. + */ + prefetch(ring->va); #if L1_CACHE_BYTES < 128 - prefetch(ring->va + L1_CACHE_BYTES); + prefetch(ring->va + L1_CACHE_BYTES); #endif
- if (!skb) { ret = hns3_alloc_skb(ring, length, ring->va); skb = ring->skb;
@@ -3254,19 +3264,11 @@ int hns3_clean_rx_ring(struct hns3_enet_ring *ring, int budget, #define RCB_NOF_ALLOC_RX_BUFF_ONCE 16 int unused_count = hns3_desc_unused(ring); int recv_pkts = 0; - int recv_bds = 0; - int err, num; + int err;
- num = readl_relaxed(ring->tqp->io_base + HNS3_RING_RX_RING_FBDNUM_REG); - num -= unused_count; unused_count -= ring->pending_buf;
- if (num <= 0) - goto out; - - rmb(); /* Make sure num taken effect before the other data is touched */ - - while (recv_pkts < budget && recv_bds < num) { + while (recv_pkts < budget) { /* Reuse or realloc buffers */ if (unused_count >= RCB_NOF_ALLOC_RX_BUFF_ONCE) { hns3_nic_alloc_rx_buffers(ring, unused_count); @@ -3284,7 +3286,6 @@ int hns3_clean_rx_ring(struct hns3_enet_ring *ring, int budget, recv_pkts++; }
- recv_bds += ring->pending_buf; unused_count += ring->pending_buf; ring->skb = NULL; ring->pending_buf = 0;
From: Yunsheng Lin linyunsheng@huawei.com
mainline inclusion from mainline-v5.10-rc1 commit 48ee56fd0b3785481a521fb7daaf1465219fc3af category: feature bugzilla: NA CVE: NA
----------------------------
writel() can be used to order I/O vs memory by default when writing portable drivers. Use writel() to replace wmb() + writel_relaxed(), and writel() is dma_wmb() + writel_relaxed() for ARM64, so there is an optimization here because dma_wmb() is a lighter barrier than wmb().
Signed-off-by: Yunsheng Lin linyunsheng@huawei.com Signed-off-by: Huazhong Tan tanhuazhong@huawei.com Signed-off-by: David S. Miller davem@davemloft.net Signed-off-by: Yonglong Liu liuyonglong@huawei.com Reviewed-by: li yongxin liyongxin1@huawei.com Reviewed-by: li yongxin liyongxin1@huawei.com Signed-off-by: Yang Yingliang yangyingliang@huawei.com --- drivers/net/ethernet/hisilicon/hns3/hns3_enet.c | 8 +++----- drivers/net/ethernet/hisilicon/hns3/hns3_enet.h | 3 --- 2 files changed, 3 insertions(+), 8 deletions(-)
diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3_enet.c b/drivers/net/ethernet/hisilicon/hns3/hns3_enet.c index 9f2ab98b597b5..2cf26b6ed1ed8 100644 --- a/drivers/net/ethernet/hisilicon/hns3/hns3_enet.c +++ b/drivers/net/ethernet/hisilicon/hns3/hns3_enet.c @@ -1399,9 +1399,8 @@ static void hns3_tx_doorbell(struct hns3_enet_ring *ring, int num, if (!ring->pending_buf) return;
- wmb(); /* Commit all data before submit */ - - hnae3_queue_xmit(ring->tqp, ring->pending_buf); + writel(ring->pending_buf, + ring->tqp->io_base + HNS3_RING_TX_RING_TAIL_REG); ring->pending_buf = 0; WRITE_ONCE(ring->last_to_use, ring->next_to_use); } @@ -2716,8 +2715,7 @@ static void hns3_nic_alloc_rx_buffers(struct hns3_enet_ring *ring, ring_ptr_move_fw(ring, next_to_use); }
- wmb(); /* Make all data has been write before submit */ - writel_relaxed(i, ring->tqp->io_base + HNS3_RING_RX_RING_HEAD_REG); + writel(i, ring->tqp->io_base + HNS3_RING_RX_RING_HEAD_REG); }
static bool hns3_page_is_reusable(struct page *page) diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3_enet.h b/drivers/net/ethernet/hisilicon/hns3/hns3_enet.h index d98cf6085c7d7..132b0553c9a6e 100644 --- a/drivers/net/ethernet/hisilicon/hns3/hns3_enet.h +++ b/drivers/net/ethernet/hisilicon/hns3/hns3_enet.h @@ -588,9 +588,6 @@ static inline bool hns3_nic_resetting(struct net_device *netdev) #define hns3_write_dev(a, reg, value) \ hns3_write_reg((a)->io_base, (reg), (value))
-#define hnae3_queue_xmit(tqp, buf_num) writel_relaxed(buf_num, \ - (tqp)->io_base + HNS3_RING_TX_RING_TAIL_REG) - #define ring_to_dev(ring) ((ring)->dev)
#define ring_to_netdev(ring) ((ring)->tqp_vector->napi.dev)
From: Yunsheng Lin linyunsheng@huawei.com
mainline inclusion from mainline-v5.10-rc1 commit 619ae331d1fd4cc0098800a56d55d163bd06d821 category: feature bugzilla: NA CVE: NA
----------------------------
Use napi_consume_skb() to batch consuming skb when cleaning tx desc in NAPI polling.
Signed-off-by: Yunsheng Lin linyunsheng@huawei.com Signed-off-by: Huazhong Tan tanhuazhong@huawei.com Signed-off-by: David S. Miller davem@davemloft.net Signed-off-by: Yonglong Liu liuyonglong@huawei.com Reviewed-by: li yongxin liyongxin1@huawei.com Signed-off-by: Yang Yingliang yangyingliang@huawei.com --- .../net/ethernet/hisilicon/hns3/hns3_enet.c | 27 ++++++++++--------- .../net/ethernet/hisilicon/hns3/hns3_enet.h | 2 +- .../ethernet/hisilicon/hns3/hns3_ethtool.c | 2 +- 3 files changed, 16 insertions(+), 15 deletions(-)
diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3_enet.c b/drivers/net/ethernet/hisilicon/hns3/hns3_enet.c index 2cf26b6ed1ed8..4d73b29fea1c6 100644 --- a/drivers/net/ethernet/hisilicon/hns3/hns3_enet.c +++ b/drivers/net/ethernet/hisilicon/hns3/hns3_enet.c @@ -2431,10 +2431,10 @@ static int hns3_alloc_buffer(struct hns3_enet_ring *ring, }
static void hns3_free_buffer(struct hns3_enet_ring *ring, - struct hns3_desc_cb *cb) + struct hns3_desc_cb *cb, int budget) { if (cb->type == DESC_TYPE_SKB) - dev_kfree_skb_any((struct sk_buff *)cb->priv); + napi_consume_skb(cb->priv, budget); else if (!HNAE3_IS_TX_RING(ring) && cb->pagecnt_bias) __page_frag_cache_drain(cb->priv, cb->pagecnt_bias); memset(cb, 0, sizeof(*cb)); @@ -2468,7 +2468,8 @@ static void hns3_buffer_detach(struct hns3_enet_ring *ring, int i) ring->desc[i].addr = 0; }
-static void hns3_free_buffer_detach(struct hns3_enet_ring *ring, int i) +static void hns3_free_buffer_detach(struct hns3_enet_ring *ring, int i, + int budget) { struct hns3_desc_cb *cb = &ring->desc_cb[i];
@@ -2476,7 +2477,7 @@ static void hns3_free_buffer_detach(struct hns3_enet_ring *ring, int i) return;
hns3_buffer_detach(ring, i); - hns3_free_buffer(ring, cb); + hns3_free_buffer(ring, cb, budget); }
static void hns3_free_buffers(struct hns3_enet_ring *ring) @@ -2484,7 +2485,7 @@ static void hns3_free_buffers(struct hns3_enet_ring *ring) int i;
for (i = 0; i < ring->desc_num; i++) - hns3_free_buffer_detach(ring, i); + hns3_free_buffer_detach(ring, i, 0); }
/* free desc along with its attached buffer */ @@ -2529,7 +2530,7 @@ static int hns3_alloc_and_map_buffer(struct hns3_enet_ring *ring, return 0;
out_with_buf: - hns3_free_buffer(ring, cb); + hns3_free_buffer(ring, cb, 0); out: return ret; } @@ -2561,7 +2562,7 @@ static int hns3_alloc_ring_buffers(struct hns3_enet_ring *ring)
out_buffer_fail: for (j = i - 1; j >= 0; j--) - hns3_free_buffer_detach(ring, j); + hns3_free_buffer_detach(ring, j, 0); return ret; }
@@ -2589,7 +2590,7 @@ static void hns3_reuse_buffer(struct hns3_enet_ring *ring, int i) }
static bool hns3_nic_reclaim_desc(struct hns3_enet_ring *ring, - int *bytes, int *pkts) + int *bytes, int *pkts, int budget) { /* pair with ring->last_to_use update in hns3_tx_doorbell(), * smp_store_release() is not used in hns3_tx_doorbell() because @@ -2612,7 +2613,7 @@ static bool hns3_nic_reclaim_desc(struct hns3_enet_ring *ring, (*pkts) += (desc_cb->type == DESC_TYPE_SKB); (*bytes) += desc_cb->length; /* desc_cb will be cleaned, after hnae3_free_buffer_detach */ - hns3_free_buffer_detach(ring, ntc); + hns3_free_buffer_detach(ring, ntc, budget);
if (++ntc == ring->desc_num) ntc = 0; @@ -2632,7 +2633,7 @@ static bool hns3_nic_reclaim_desc(struct hns3_enet_ring *ring, return true; }
-void hns3_clean_tx_ring(struct hns3_enet_ring *ring) +void hns3_clean_tx_ring(struct hns3_enet_ring *ring, int budget) { struct net_device *netdev = ring_to_netdev(ring); struct hns3_nic_priv *priv = netdev_priv(netdev); @@ -2642,7 +2643,7 @@ void hns3_clean_tx_ring(struct hns3_enet_ring *ring) bytes = 0; pkts = 0;
- if (unlikely(!hns3_nic_reclaim_desc(ring, &bytes, &pkts))) + if (unlikely(!hns3_nic_reclaim_desc(ring, &bytes, &pkts, budget))) return;
ring->tqp_vector->tx_group.total_bytes += bytes; @@ -3452,7 +3453,7 @@ static int hns3_nic_common_poll(struct napi_struct *napi, int budget) * budget and be more aggressive about cleaning up the Tx descriptors. */ hns3_for_each_ring(ring, tqp_vector->tx_group) - hns3_clean_tx_ring(ring); + hns3_clean_tx_ring(ring, budget);
/* make sure rx ring budget not smaller than 1 */ if (tqp_vector->num_tqps > 1) @@ -4300,7 +4301,7 @@ static void hns3_clear_tx_ring(struct hns3_enet_ring *ring) { while (ring->next_to_clean != ring->next_to_use) { ring->desc[ring->next_to_clean].tx.bdtp_fe_sc_vld_ra_ri = 0; - hns3_free_buffer_detach(ring, ring->next_to_clean); + hns3_free_buffer_detach(ring, ring->next_to_clean, 0); ring_ptr_move_fw(ring, next_to_clean); }
diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3_enet.h b/drivers/net/ethernet/hisilicon/hns3/hns3_enet.h index 132b0553c9a6e..a0fe0d9672a08 100644 --- a/drivers/net/ethernet/hisilicon/hns3/hns3_enet.h +++ b/drivers/net/ethernet/hisilicon/hns3/hns3_enet.h @@ -625,7 +625,7 @@ void hns3_ethtool_set_ops(struct net_device *netdev); int hns3_set_channels(struct net_device *netdev, struct ethtool_channels *ch);
-void hns3_clean_tx_ring(struct hns3_enet_ring *ring); +void hns3_clean_tx_ring(struct hns3_enet_ring *ring, int budget); int hns3_init_all_ring(struct hns3_nic_priv *priv); int hns3_uninit_all_ring(struct hns3_nic_priv *priv); int hns3_nic_reset_all_ring(struct hnae3_handle *h); diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3_ethtool.c b/drivers/net/ethernet/hisilicon/hns3/hns3_ethtool.c index d9154dcfe33ea..c0312e24e1254 100644 --- a/drivers/net/ethernet/hisilicon/hns3/hns3_ethtool.c +++ b/drivers/net/ethernet/hisilicon/hns3/hns3_ethtool.c @@ -240,7 +240,7 @@ static void hns3_lb_clear_tx_ring(struct hns3_nic_priv *priv, u32 start_ringid, for (i = start_ringid; i <= end_ringid; i++) { struct hns3_enet_ring *ring = &priv->ring[i];
- hns3_clean_tx_ring(ring); + hns3_clean_tx_ring(ring, 0); } }
From: Yunsheng Lin linyunsheng@huawei.com
mainline inclusion from mainline-v5.13-rc1 commit d5d5e0193ee8f88efbbc7f1471087255657bc19a category: feature bugzilla: NA CVE: NA
----------------------------
Currently hns3 driver only handle the xmit skb with one level of fraglist skb, add handling for multi level by calling hns3_tx_bd_num() recursively when calculating bd num and calling hns3_fill_skb_to_desc() recursively when filling tx desc.
When the skb has a fraglist level of 24, the skb is simply dropped and stats.max_recursion_level is added to record the error. Move the stat handling from hns3_nic_net_xmit() to hns3_nic_maybe_stop_tx() in order to handle different error stat and add the 'max_recursion_level' and 'hw_limitation' stat.
Note that the max recursive level as 24 is chose according to below: commit 48a1df65334b ("skbuff: return -EMSGSIZE in skb_to_sgvec to prevent overflow").
And that we are not able to find a testcase to verify the recursive fraglist case, so Fixes tag is not provided.
Reported-by: Barry Song song.bao.hua@hisilicon.com Signed-off-by: Yunsheng Lin linyunsheng@huawei.com Signed-off-by: Huazhong Tan tanhuazhong@huawei.com Signed-off-by: David S. Miller davem@davemloft.net Signed-off-by: Yonglong Liu liuyonglong@huawei.com Reviewed-by: li yongxin liyongxin1@huawei.com Signed-off-by: Yang Yingliang yangyingliang@huawei.com --- .../net/ethernet/hisilicon/hns3/hns3_enet.c | 117 +++++++++++------- .../net/ethernet/hisilicon/hns3/hns3_enet.h | 2 + .../ethernet/hisilicon/hns3/hns3_ethtool.c | 2 + 3 files changed, 76 insertions(+), 45 deletions(-)
diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3_enet.c b/drivers/net/ethernet/hisilicon/hns3/hns3_enet.c index 4d73b29fea1c6..f85e2a1870e5f 100644 --- a/drivers/net/ethernet/hisilicon/hns3/hns3_enet.c +++ b/drivers/net/ethernet/hisilicon/hns3/hns3_enet.c @@ -1181,22 +1181,22 @@ static unsigned int hns3_skb_bd_num(struct sk_buff *skb, unsigned int *bd_size, return bd_num; }
-static unsigned int hns3_tx_bd_num(struct sk_buff *skb, unsigned int *bd_size) +static unsigned int hns3_tx_bd_num(struct sk_buff *skb, unsigned int *bd_size, + unsigned int bd_num, + unsigned int recursion_level) { +#define HNS3_MAX_RECURSION_LEVEL 24 + struct sk_buff *frag_skb; - unsigned int bd_num = 0;
/* If the total len is within the max bd limit */ - if (likely(skb->len <= HNS3_MAX_BD_SIZE && !skb_has_frag_list(skb) && + if (likely(skb->len <= HNS3_MAX_BD_SIZE && !recursion_level && + !skb_has_frag_list(skb) && skb_shinfo(skb)->nr_frags < HNS3_MAX_NON_TSO_BD_NUM)) return skb_shinfo(skb)->nr_frags + 1U;
- /* The below case will always be linearized, return - * HNS3_MAX_BD_NUM_TSO + 1U to make sure it is linearized. - */ - if (unlikely(skb->len > HNS3_MAX_TSO_SIZE || - (!skb_is_gso(skb) && skb->len > HNS3_MAX_NON_TSO_SIZE))) - return HNS3_MAX_TSO_BD_NUM + 1U; + if (unlikely(recursion_level >= HNS3_MAX_RECURSION_LEVEL)) + return UINT_MAX;
bd_num = hns3_skb_bd_num(skb, bd_size, bd_num);
@@ -1204,7 +1204,8 @@ static unsigned int hns3_tx_bd_num(struct sk_buff *skb, unsigned int *bd_size) return bd_num;
skb_walk_frags(skb, frag_skb) { - bd_num = hns3_skb_bd_num(frag_skb, bd_size, bd_num); + bd_num = hns3_tx_bd_num(frag_skb, bd_size, bd_num, + recursion_level + 1); if (bd_num > HNS3_MAX_TSO_BD_NUM) return bd_num; } @@ -1261,6 +1262,40 @@ void hns3_shinfo_pack(struct skb_shared_info *shinfo, __u32 *size) size[i] = skb_frag_size(&shinfo->frags[i]); }
+static int hns3_skb_linearize(struct hns3_enet_ring *ring, struct sk_buff *skb, + unsigned int bd_num) +{ + /* 'bd_num == UINT_MAX' means the skb' fraglist has a + * recursion level of over HNS3_MAX_RECURSION_LEVEL. + */ + if (bd_num == UINT_MAX) { + u64_stats_update_begin(&ring->syncp); + ring->stats.over_max_recursion++; + u64_stats_update_end(&ring->syncp); + return -ENOMEM; + } + + /* The skb->len has exceeded the hw limitation, linearization + * will not help. + */ + if (skb->len > HNS3_MAX_TSO_SIZE || + (!skb_is_gso(skb) && skb->len > HNS3_MAX_NON_TSO_SIZE)) { + u64_stats_update_begin(&ring->syncp); + ring->stats.hw_limitation++; + u64_stats_update_end(&ring->syncp); + return -ENOMEM; + } + + if (__skb_linearize(skb)) { + u64_stats_update_begin(&ring->syncp); + ring->stats.sw_err_cnt++; + u64_stats_update_end(&ring->syncp); + return -ENOMEM; + } + + return 0; +} + static int hns3_nic_maybe_stop_tx(struct hns3_enet_ring *ring, struct net_device *netdev, struct sk_buff *skb) @@ -1269,7 +1304,7 @@ static int hns3_nic_maybe_stop_tx(struct hns3_enet_ring *ring, unsigned int bd_size[HNS3_MAX_TSO_BD_NUM + 1U]; unsigned int bd_num;
- bd_num = hns3_tx_bd_num(skb, bd_size); + bd_num = hns3_tx_bd_num(skb, bd_size, 0, 0); if (unlikely(bd_num > HNS3_MAX_NON_TSO_BD_NUM)) { if (bd_num <= HNS3_MAX_TSO_BD_NUM && skb_is_gso(skb) && !hns3_skb_need_linearized(skb, bd_size, bd_num)) { @@ -1277,16 +1312,10 @@ static int hns3_nic_maybe_stop_tx(struct hns3_enet_ring *ring, goto out; }
- if (__skb_linearize(skb)) + if (hns3_skb_linearize(ring, skb, bd_num)) return -ENOMEM;
bd_num = hns3_tx_bd_count(skb->len); - if ((skb_is_gso(skb) && bd_num > HNS3_MAX_TSO_BD_NUM) || - (!skb_is_gso(skb) && - bd_num > HNS3_MAX_NON_TSO_BD_NUM)) { - trace_hns3_over_8bd(skb); - return -ENOMEM; - }
u64_stats_update_begin(&ring->syncp); ring->stats.tx_copy++; @@ -1310,6 +1339,10 @@ static int hns3_nic_maybe_stop_tx(struct hns3_enet_ring *ring, return bd_num; }
+ u64_stats_update_begin(&ring->syncp); + ring->stats.tx_busy++; + u64_stats_update_end(&ring->syncp); + return -EBUSY; }
@@ -1357,6 +1390,7 @@ static int hns3_fill_skb_to_desc(struct hns3_enet_ring *ring, struct sk_buff *skb, enum hns_desc_type type) { unsigned int size = skb_headlen(skb); + struct sk_buff *frag_skb; int i, ret, bd_num = 0;
if (size) { @@ -1381,6 +1415,15 @@ static int hns3_fill_skb_to_desc(struct hns3_enet_ring *ring, bd_num += ret; }
+ skb_walk_frags(skb, frag_skb) { + ret = hns3_fill_skb_to_desc(ring, frag_skb, + DESC_TYPE_FRAGLIST_SKB); + if (unlikely(ret < 0)) + return ret; + + bd_num += ret; + } + return bd_num; }
@@ -1411,8 +1454,6 @@ netdev_tx_t hns3_nic_net_xmit(struct sk_buff *skb, struct net_device *netdev) struct hns3_enet_ring *ring = &priv->ring[skb->queue_mapping]; struct netdev_queue *dev_queue; int pre_ntu, next_to_use_head; - struct sk_buff *frag_skb; - int bd_num = 0; int ret;
/* Hardware can only handle short frames above 32 bytes */ @@ -1427,15 +1468,8 @@ netdev_tx_t hns3_nic_net_xmit(struct sk_buff *skb, struct net_device *netdev) ret = hns3_nic_maybe_stop_tx(ring, netdev, skb); if (unlikely(ret <= 0)) { if (ret == -EBUSY) { - u64_stats_update_begin(&ring->syncp); - ring->stats.tx_busy++; - u64_stats_update_end(&ring->syncp); hns3_tx_doorbell(ring, 0, true); return NETDEV_TX_BUSY; - } else if (ret == -ENOMEM) { - u64_stats_update_begin(&ring->syncp); - ring->stats.sw_err_cnt++; - u64_stats_update_end(&ring->syncp); }
hns3_rl_err(netdev, "xmit error: %d!\n", ret); @@ -1448,24 +1482,14 @@ netdev_tx_t hns3_nic_net_xmit(struct sk_buff *skb, struct net_device *netdev) if (unlikely(ret < 0)) goto fill_err;
+ /* 'ret < 0' means filling error, 'ret == 0' means skb->len is + * zero, which is unlikely, and 'ret > 0' means how many tx desc + * need to be notified to the hw. + */ ret = hns3_fill_skb_to_desc(ring, skb, DESC_TYPE_SKB); - if (unlikely(ret < 0)) + if (unlikely(ret <= 0)) goto fill_err;
- bd_num += ret; - - if (!skb_has_frag_list(skb)) - goto out; - - skb_walk_frags(skb, frag_skb) { - ret = hns3_fill_skb_to_desc(ring, frag_skb, - DESC_TYPE_FRAGLIST_SKB); - if (unlikely(ret < 0)) - goto fill_err; - - bd_num += ret; - } -out: pre_ntu = ring->next_to_use ? (ring->next_to_use - 1) : (ring->desc_num - 1); ring->desc[pre_ntu].tx.bdtp_fe_sc_vld_ra_ri |= @@ -1475,11 +1499,10 @@ netdev_tx_t hns3_nic_net_xmit(struct sk_buff *skb, struct net_device *netdev) dev_queue = netdev_get_tx_queue(netdev, ring->queue_index); if (!netdev_xmit_more()) { netdev_tx_sent_queue(dev_queue, skb->len); - hns3_tx_doorbell(ring, bd_num, true); + hns3_tx_doorbell(ring, ret, true); } else { dql_queued(&dev_queue->dql, skb->len); - hns3_tx_doorbell(ring, bd_num, - netif_tx_queue_stopped(dev_queue)); + hns3_tx_doorbell(ring, ret, netif_tx_queue_stopped(dev_queue)); }
return NETDEV_TX_OK; @@ -1667,11 +1690,15 @@ static struct rtnl_link_stats64 *hns3_nic_get_stats64(struct net_device *netdev, tx_drop += ring->stats.tx_l4_proto_err; tx_drop += ring->stats.tx_l2l3l4_err; tx_drop += ring->stats.tx_tso_err; + tx_drop += ring->stats.over_max_recursion; + tx_drop += ring->stats.hw_limitation; tx_errors += ring->stats.sw_err_cnt; tx_errors += ring->stats.tx_vlan_err; tx_errors += ring->stats.tx_l4_proto_err; tx_errors += ring->stats.tx_l2l3l4_err; tx_errors += ring->stats.tx_tso_err; + tx_errors += ring->stats.over_max_recursion; + tx_errors += ring->stats.hw_limitation; } while (u64_stats_fetch_retry_irq(&ring->syncp, start));
/* fetch the rx stats */ diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3_enet.h b/drivers/net/ethernet/hisilicon/hns3/hns3_enet.h index a0fe0d9672a08..5398adfeff1f3 100644 --- a/drivers/net/ethernet/hisilicon/hns3/hns3_enet.h +++ b/drivers/net/ethernet/hisilicon/hns3/hns3_enet.h @@ -384,6 +384,8 @@ struct ring_stats { u64 tx_l4_proto_err; u64 tx_l2l3l4_err; u64 tx_tso_err; + u64 over_max_recursion; + u64 hw_limitation; }; struct { u64 rx_pkts; diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3_ethtool.c b/drivers/net/ethernet/hisilicon/hns3/hns3_ethtool.c index c0312e24e1254..bc1aa74b039fd 100644 --- a/drivers/net/ethernet/hisilicon/hns3/hns3_ethtool.c +++ b/drivers/net/ethernet/hisilicon/hns3/hns3_ethtool.c @@ -43,6 +43,8 @@ static const struct hns3_stats hns3_txq_stats[] = { HNS3_TQP_STAT("l4_proto_err", tx_l4_proto_err), HNS3_TQP_STAT("l2l3l4_err", tx_l2l3l4_err), HNS3_TQP_STAT("tso_err", tx_tso_err), + HNS3_TQP_STAT("over_max_recursion", over_max_recursion), + HNS3_TQP_STAT("hw_limitation", hw_limitation), };
#define HNS3_TXQ_STATS_COUNT ARRAY_SIZE(hns3_txq_stats)
From: Yunsheng Lin linyunsheng@huawei.com
mainline inclusion from mainline-v5.13-rc1 commit 811c0830eb4ca8811ed80fe40378f622b9844835 category: feature bugzilla: NA CVE: NA
----------------------------
The actual size on wire for tso skb should be (gso_segs - 1) * hdr + skb->len instead of skb->len, which can be seen by user using 'ethtool -S ethX' cmd, and 'Byte Queue Limit' also use the send size stat to do the queue limiting, so add send_bytes in the desc_cb to record the actual send size for a skb. And send_bytes is only for tx desc_cb and page_offset is only for rx desc, so reuse the same space for both of them.
Signed-off-by: Yunsheng Lin linyunsheng@huawei.com Signed-off-by: Huazhong Tan tanhuazhong@huawei.com Signed-off-by: David S. Miller davem@davemloft.net Signed-off-by: Yonglong Liu liuyonglong@huawei.com Reviewed-by: li yongxin liyongxin1@huawei.com Signed-off-by: Yang Yingliang yangyingliang@huawei.com --- .../net/ethernet/hisilicon/hns3/hns3_enet.c | 27 +++++++++++++------ .../net/ethernet/hisilicon/hns3/hns3_enet.h | 7 ++++- 2 files changed, 25 insertions(+), 9 deletions(-)
diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3_enet.c b/drivers/net/ethernet/hisilicon/hns3/hns3_enet.c index f85e2a1870e5f..0b7a1573705c8 100644 --- a/drivers/net/ethernet/hisilicon/hns3/hns3_enet.c +++ b/drivers/net/ethernet/hisilicon/hns3/hns3_enet.c @@ -642,7 +642,7 @@ void hns3_enable_vlan_filter(struct net_device *netdev, bool enable) }
static int hns3_set_tso(struct sk_buff *skb, u32 *paylen, - u16 *mss, u32 *type_cs_vlan_tso) + u16 *mss, u32 *type_cs_vlan_tso, u32 *send_bytes) { u32 l4_offset, hdr_len; union l3_hdr_info l3; @@ -705,6 +705,8 @@ static int hns3_set_tso(struct sk_buff *skb, u32 *paylen, csum_replace_by_diff(&l4.tcp->check, (__force __wsum)htonl(l4_paylen));
+ *send_bytes = (skb_shinfo(skb)->gso_segs - 1) * hdr_len + skb->len; + /* find the txbd field values */ *paylen = skb->len - hdr_len; hns3_set_field(*type_cs_vlan_tso, HNS3_TXD_TSO_B, 1); @@ -994,7 +996,8 @@ static int hns3_handle_vtags(struct hns3_enet_ring *tx_ring, }
static int hns3_fill_skb_desc(struct hns3_enet_ring *ring, - struct sk_buff *skb, struct hns3_desc *desc) + struct sk_buff *skb, struct hns3_desc *desc, + struct hns3_desc_cb *desc_cb) { u32 ol_type_vlan_len_msec = 0; u32 type_cs_vlan_tso = 0; @@ -1023,6 +1026,8 @@ static int hns3_fill_skb_desc(struct hns3_enet_ring *ring, 1); }
+ desc_cb->send_bytes = skb->len; + if (skb->ip_summed == CHECKSUM_PARTIAL) { u8 ol4_proto, il4_proto;
@@ -1047,7 +1052,7 @@ static int hns3_fill_skb_desc(struct hns3_enet_ring *ring, }
ret = hns3_set_tso(skb, &paylen, &mss, - &type_cs_vlan_tso); + &type_cs_vlan_tso, &desc_cb->send_bytes); if (unlikely(ret < 0)) { u64_stats_update_begin(&ring->syncp); ring->stats.tx_tso_err++; @@ -1452,6 +1457,7 @@ netdev_tx_t hns3_nic_net_xmit(struct sk_buff *skb, struct net_device *netdev) { struct hns3_nic_priv *priv = netdev_priv(netdev); struct hns3_enet_ring *ring = &priv->ring[skb->queue_mapping]; + struct hns3_desc_cb *desc_cb = &ring->desc_cb[ring->next_to_use]; struct netdev_queue *dev_queue; int pre_ntu, next_to_use_head; int ret; @@ -1478,7 +1484,8 @@ netdev_tx_t hns3_nic_net_xmit(struct sk_buff *skb, struct net_device *netdev)
next_to_use_head = ring->next_to_use;
- ret = hns3_fill_skb_desc(ring, skb, &ring->desc[ring->next_to_use]); + ret = hns3_fill_skb_desc(ring, skb, &ring->desc[ring->next_to_use], + desc_cb); if (unlikely(ret < 0)) goto fill_err;
@@ -1498,10 +1505,10 @@ netdev_tx_t hns3_nic_net_xmit(struct sk_buff *skb, struct net_device *netdev) /* Complete translate all packets */ dev_queue = netdev_get_tx_queue(netdev, ring->queue_index); if (!netdev_xmit_more()) { - netdev_tx_sent_queue(dev_queue, skb->len); + netdev_tx_sent_queue(dev_queue, desc_cb->send_bytes); hns3_tx_doorbell(ring, ret, true); } else { - dql_queued(&dev_queue->dql, skb->len); + dql_queued(&dev_queue->dql, desc_cb->send_bytes); hns3_tx_doorbell(ring, ret, netif_tx_queue_stopped(dev_queue)); }
@@ -2637,8 +2644,12 @@ static bool hns3_nic_reclaim_desc(struct hns3_enet_ring *ring, break;
desc_cb = &ring->desc_cb[ntc]; - (*pkts) += (desc_cb->type == DESC_TYPE_SKB); - (*bytes) += desc_cb->length; + + if (desc_cb->type == DESC_TYPE_SKB) { + (*pkts)++; + (*bytes) += desc_cb->send_bytes; + } + /* desc_cb will be cleaned, after hnae3_free_buffer_detach */ hns3_free_buffer_detach(ring, ntc, budget);
diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3_enet.h b/drivers/net/ethernet/hisilicon/hns3/hns3_enet.h index 5398adfeff1f3..ab5270e084a1b 100644 --- a/drivers/net/ethernet/hisilicon/hns3/hns3_enet.h +++ b/drivers/net/ethernet/hisilicon/hns3/hns3_enet.h @@ -306,7 +306,12 @@ struct hns3_desc_cb {
/* priv data for the desc, e.g. skb when use with ip stack */ void *priv; - u32 page_offset; + + union { + u32 page_offset; /* for rx */ + u32 send_bytes; /* for tx */ + }; + u32 length; /* length of the buffer */
u16 reuse_flag;
From: Yunsheng Lin linyunsheng@huawei.com
mainline inclusion from mainline-v5.13-rc1 commit 97b9e5c131f16e2e487139ba596f9e6df927ae87 category: feature bugzilla: NA CVE: NA
----------------------------
skb_put_padto() may fails because of memory failure, sw_err_cnt is already used to log memory failure in hns3_skb_linearize(), so use it to log the memory failure for skb_put_padto() too.
Signed-off-by: Yunsheng Lin linyunsheng@huawei.com Signed-off-by: Huazhong Tan tanhuazhong@huawei.com Signed-off-by: David S. Miller davem@davemloft.net Signed-off-by: Yonglong Liu liuyonglong@huawei.com Reviewed-by: li yongxin liyongxin1@huawei.com Signed-off-by: Yang Yingliang yangyingliang@huawei.com --- drivers/net/ethernet/hisilicon/hns3/hns3_enet.c | 5 +++++ 1 file changed, 5 insertions(+)
diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3_enet.c b/drivers/net/ethernet/hisilicon/hns3/hns3_enet.c index 0b7a1573705c8..cfe8b0c0a1bc1 100644 --- a/drivers/net/ethernet/hisilicon/hns3/hns3_enet.c +++ b/drivers/net/ethernet/hisilicon/hns3/hns3_enet.c @@ -1465,6 +1465,11 @@ netdev_tx_t hns3_nic_net_xmit(struct sk_buff *skb, struct net_device *netdev) /* Hardware can only handle short frames above 32 bytes */ if (skb_put_padto(skb, HNS3_MIN_TX_LEN)) { hns3_tx_doorbell(ring, 0, !netdev_xmit_more()); + + u64_stats_update_begin(&ring->syncp); + ring->stats.sw_err_cnt++; + u64_stats_update_end(&ring->syncp); + return NETDEV_TX_OK; }
From: Yonglong Liu liuyonglong@huawei.com
driver inclusion category: cleanup bugzilla: NA CVE: NA
----------------------------
Signed-off-by: Yonglong Liu liuyonglong@huawei.com Reviewed-by: li yongxin liyongxin1@huawei.com Signed-off-by: Yang Yingliang yangyingliang@huawei.com --- drivers/net/ethernet/hisilicon/hns3/hnae3.h | 1 - 1 file changed, 1 deletion(-)
diff --git a/drivers/net/ethernet/hisilicon/hns3/hnae3.h b/drivers/net/ethernet/hisilicon/hns3/hnae3.h index ed108e5639e28..4acd729f396a4 100644 --- a/drivers/net/ethernet/hisilicon/hns3/hnae3.h +++ b/drivers/net/ethernet/hisilicon/hns3/hnae3.h @@ -149,7 +149,6 @@ enum hnae3_reset_notify_type { HNAE3_DOWN_CLIENT, HNAE3_INIT_CLIENT, HNAE3_UNINIT_CLIENT, - HNAE3_RESTORE_CLIENT, };
enum hnae3_hw_error_type {
From: Huazhong Tan tanhuazhong@huawei.com
mainline inclusion from mainline-v5.2-rc1 commit 146e92c13fdedf43a1ae211e85acde4631bb3c71 category: feature bugzilla: NA CVE: NA
----------------------------
Since the hardware does not handle mailboxes and the hardware reset include TQP reset, so it is unnecessary to reset TQP in the hclgevf_ae_stop() while doing VF reset. Also it is unnecessary to reset the remaining TQP when one reset fails.
Signed-off-by: Huazhong Tan tanhuazhong@huawei.com Signed-off-by: Peng Li lipeng321@huawei.com Signed-off-by: David S. Miller davem@davemloft.net Signed-off-by: Yonglong Liu liuyonglong@huawei.com Reviewed-by: li yongxin liyongxin1@huawei.com Signed-off-by: Yang Yingliang yangyingliang@huawei.com --- drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_main.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-)
diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_main.c b/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_main.c index c8c076e4bebd4..64ac2de016e92 100644 --- a/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_main.c +++ b/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_main.c @@ -2572,7 +2572,7 @@ static void hclgevf_ae_stop(struct hnae3_handle *handle)
set_bit(HCLGEVF_STATE_DOWN, &hdev->state);
- if (!test_bit(HCLGEVF_STATE_RST_HANDLING, &hdev->state)) + if (hdev->reset_type != HNAE3_VF_RESET) hclgevf_reset_tqp(handle);
hclgevf_reset_tqp_stats(handle);
From: Colin Ian King colin.king@canonical.com
mainline inclusion from mainline-v5.13-rc1 commit d0494135f94c7ab5a9cf7a9094fbb233275c7ba6 category: bugfix bugzilla: NA CVE: NA
----------------------------
The reset_prepare and reset_done calls have a null pointer check on ae_dev however ae_dev is being dereferenced via the call to ns3_is_phys_func with the ae->pdev argument. Fix this by performing a null pointer check on ae_dev and hence short-circuiting the dereference to ae_dev on the call to ns3_is_phys_func.
Addresses-Coverity: ("Dereference before null check") Fixes: 715c58e94f0d ("net: hns3: add suspend and resume pm_ops") Signed-off-by: Colin Ian King colin.king@canonical.com Signed-off-by: David S. Miller davem@davemloft.net Signed-off-by: Yonglong Liu liuyonglong@huawei.com Reviewed-by: li yongxin liyongxin1@huawei.com Signed-off-by: Yang Yingliang yangyingliang@huawei.com --- drivers/net/ethernet/hisilicon/hns3/hns3_enet.c | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-)
diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3_enet.c b/drivers/net/ethernet/hisilicon/hns3/hns3_enet.c index cfe8b0c0a1bc1..741cb54fded96 100644 --- a/drivers/net/ethernet/hisilicon/hns3/hns3_enet.c +++ b/drivers/net/ethernet/hisilicon/hns3/hns3_enet.c @@ -2276,9 +2276,9 @@ static int __maybe_unused hns3_suspend(struct device *dev) { struct hnae3_ae_dev *ae_dev = dev_get_drvdata(dev);
- if (hns3_is_phys_func(ae_dev->pdev)) { + if (ae_dev && hns3_is_phys_func(ae_dev->pdev)) { dev_info(dev, "Begin to suspend.\n"); - if (ae_dev && ae_dev->ops && ae_dev->ops->reset_prepare) + if (ae_dev->ops && ae_dev->ops->reset_prepare) ae_dev->ops->reset_prepare(ae_dev, HNAE3_FUNC_RESET); }
@@ -2289,9 +2289,9 @@ static int __maybe_unused hns3_resume(struct device *dev) { struct hnae3_ae_dev *ae_dev = dev_get_drvdata(dev);
- if (hns3_is_phys_func(ae_dev->pdev)) { + if (ae_dev && hns3_is_phys_func(ae_dev->pdev)) { dev_info(dev, "Begin to resume.\n"); - if (ae_dev && ae_dev->ops && ae_dev->ops->reset_done) + if (ae_dev->ops && ae_dev->ops->reset_done) ae_dev->ops->reset_done(ae_dev); }
From: Yonglong Liu liuyonglong@huawei.com
driver inclusion category: bugfix bugzilla: NA CVE: NA
----------------------------
Signed-off-by: Yonglong Liu liuyonglong@huawei.com Reviewed-by: li yongxin liyongxin1@huawei.com Signed-off-by: Yang Yingliang yangyingliang@huawei.com --- drivers/net/ethernet/hisilicon/hns3/hns3_debugfs.c | 4 +--- 1 file changed, 1 insertion(+), 3 deletions(-)
diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3_debugfs.c b/drivers/net/ethernet/hisilicon/hns3/hns3_debugfs.c index a297653d452aa..f14022d6b2f63 100644 --- a/drivers/net/ethernet/hisilicon/hns3/hns3_debugfs.c +++ b/drivers/net/ethernet/hisilicon/hns3/hns3_debugfs.c @@ -163,9 +163,7 @@ static int hns3_dbg_bd_info(struct hnae3_handle *h, const char *cmd_buf) int cnt;
cnt = sscanf(&cmd_buf[8], "%u %u", &q_num, &tx_index); - if (cnt == 2) { - rx_index = tx_index; - } else if (cnt != 1) { + if (cnt != 1 && cnt != 2) { dev_err(dev, "bd info: bad command string, cnt=%d\n", cnt); return -EINVAL; }
From: Yonglong Liu liuyonglong@huawei.com
driver inclusion category: bugfix bugzilla: NA CVE: NA
-----------------------------
This patch is used to update driver version to 1.9.40.24.
Signed-off-by: Yonglong Liu liuyonglong@huawei.com Reviewed-by: li yongxin liyongxin1@huawei.com Signed-off-by: Yang Yingliang yangyingliang@huawei.com --- drivers/net/ethernet/hisilicon/hns3/hnae3.h | 2 +- drivers/net/ethernet/hisilicon/hns3/hns3_cae/hns3_cae_version.h | 2 +- drivers/net/ethernet/hisilicon/hns3/hns3_enet.h | 2 +- drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.h | 2 +- drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_main.h | 2 +- 5 files changed, 5 insertions(+), 5 deletions(-)
diff --git a/drivers/net/ethernet/hisilicon/hns3/hnae3.h b/drivers/net/ethernet/hisilicon/hns3/hnae3.h index 4acd729f396a4..98ae2afee3b17 100644 --- a/drivers/net/ethernet/hisilicon/hns3/hnae3.h +++ b/drivers/net/ethernet/hisilicon/hns3/hnae3.h @@ -30,7 +30,7 @@ #include <linux/pci.h> #include <linux/types.h>
-#define HNAE3_MOD_VERSION "1.9.38.12" +#define HNAE3_MOD_VERSION "1.9.40.24"
#define HNAE3_MIN_VECTOR_NUM 2 /* first one for misc, another for IO */
diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3_cae/hns3_cae_version.h b/drivers/net/ethernet/hisilicon/hns3/hns3_cae/hns3_cae_version.h index 7cbcf1485eadc..f521e96789fdb 100644 --- a/drivers/net/ethernet/hisilicon/hns3/hns3_cae/hns3_cae_version.h +++ b/drivers/net/ethernet/hisilicon/hns3/hns3_cae/hns3_cae_version.h @@ -4,7 +4,7 @@ #ifndef __HNS3_CAE_VERSION_H__ #define __HNS3_CAE_VERSION_H__
-#define HNS3_CAE_MOD_VERSION "1.9.38.12" +#define HNS3_CAE_MOD_VERSION "1.9.40.24"
#define CMT_ID_LEN 8 #define RESV_LEN 3 diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3_enet.h b/drivers/net/ethernet/hisilicon/hns3/hns3_enet.h index ab5270e084a1b..b4b176cda109d 100644 --- a/drivers/net/ethernet/hisilicon/hns3/hns3_enet.h +++ b/drivers/net/ethernet/hisilicon/hns3/hns3_enet.h @@ -8,7 +8,7 @@
#include "hnae3.h"
-#define HNS3_MOD_VERSION "1.9.38.12" +#define HNS3_MOD_VERSION "1.9.40.24"
extern char hns3_driver_version[];
diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.h b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.h index 4a664801fbe92..02f64ae33d18d 100644 --- a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.h +++ b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.h @@ -12,7 +12,7 @@ #include "hclge_cmd.h" #include "hnae3.h"
-#define HCLGE_MOD_VERSION "1.9.38.12" +#define HCLGE_MOD_VERSION "1.9.40.24" #define HCLGE_DRIVER_NAME "hclge"
#define HCLGE_MAX_PF_NUM 8 diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_main.h b/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_main.h index 786d73ed7042c..dce03feb04445 100644 --- a/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_main.h +++ b/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_main.h @@ -10,7 +10,7 @@ #include "hclgevf_cmd.h" #include "hnae3.h"
-#define HCLGEVF_MOD_VERSION "1.9.38.12" +#define HCLGEVF_MOD_VERSION "1.9.40.24" #define HCLGEVF_DRIVER_NAME "hclgevf"
#define HCLGEVF_MAX_VLAN_ID 4095