[PATCH OLK-6.6] Huawei iBMA: Added support for Hi1712 Chip

From: Huangjunhua <huangjunhua14@huawei.com> driver inclusion category: feature bugzilla: https://gitee.com/openeuler/kernel/issues/ICB3EN CVE: NA ----------------------------------------- To meet the competitive evolution requirements ("5+1+1") for the new-generation Kunpeng ARM platform, Tianchi architecture, and BMC management system. The products, BMC, and HiSilicon collaboratively planned the next-generation BMC evolution chip Hi1712. Building upon Hi1711, the Hi1712 chip enhances interfaces, computing power, and security. The Huawei iBMA driver requires adaptation to support Hi1712 for in-band and out-of-band communication. Signed-off-by: Huangjunhua <huangjunhua14@huawei.com> --- MAINTAINERS | 6 + .../ethernet/huawei/bma/cdev_drv/bma_cdev.c | 2 +- .../bma/cdev_veth_drv/virtual_cdev_eth_net.c | 18 +- .../bma/cdev_veth_drv/virtual_cdev_eth_net.h | 1 - .../net/ethernet/huawei/bma/edma_drv/Makefile | 2 +- .../huawei/bma/edma_drv/bma_devintf.c | 118 ++++- .../ethernet/huawei/bma/edma_drv/bma_pci.c | 277 +++++++++-- .../ethernet/huawei/bma/edma_drv/bma_pci.h | 33 +- .../ethernet/huawei/bma/edma_drv/edma_drv.h | 340 +++++++++++++ .../ethernet/huawei/bma/edma_drv/edma_host.c | 160 +++++- .../ethernet/huawei/bma/edma_drv/edma_host.h | 14 +- .../ethernet/huawei/bma/edma_drv/edma_queue.c | 470 ++++++++++++++++++ .../ethernet/huawei/bma/edma_drv/edma_queue.h | 29 ++ .../ethernet/huawei/bma/edma_drv/edma_reg.h | 127 +++++ .../huawei/bma/include/bma_ker_intf.h | 46 ++ .../huawei/bma/kbox_drv/kbox_include.h | 2 +- .../ethernet/huawei/bma/veth_drv/veth_hb.c | 25 +- .../ethernet/huawei/bma/veth_drv/veth_hb.h | 12 +- 18 files changed, 1582 insertions(+), 100 deletions(-) create mode 100644 drivers/net/ethernet/huawei/bma/edma_drv/edma_drv.h create mode 100644 drivers/net/ethernet/huawei/bma/edma_drv/edma_queue.c create mode 100644 drivers/net/ethernet/huawei/bma/edma_drv/edma_queue.h create mode 100644 drivers/net/ethernet/huawei/bma/edma_drv/edma_reg.h diff --git a/MAINTAINERS b/MAINTAINERS index 61baf2cfc4e1..446f2f49fd14 100644 --- a/MAINTAINERS +++ b/MAINTAINERS @@ -9707,6 +9707,12 @@ F: drivers/net/ethernet/huawei/hinic3/cqm/ F: drivers/net/ethernet/huawei/hinic3/hw/ F: drivers/net/ethernet/huawei/hinic3/include/ +HUAWEI ETHERNET DRIVER +M: Huangjunhua <huangjunhua14@huawei.com> +L: netdev@vger.kernel.org +S: Maintained +F: drivers/net/ethernet/huawei/bma/ + HUAWEI BIFUR DRIVER M: Xiaoping zheng <zhengxiaoping5@huawei.com> L: netdev@vger.kernel.org diff --git a/drivers/net/ethernet/huawei/bma/cdev_drv/bma_cdev.c b/drivers/net/ethernet/huawei/bma/cdev_drv/bma_cdev.c index 275c2cdfe5db..59181c829a68 100644 --- a/drivers/net/ethernet/huawei/bma/cdev_drv/bma_cdev.c +++ b/drivers/net/ethernet/huawei/bma/cdev_drv/bma_cdev.c @@ -28,7 +28,7 @@ #ifdef DRV_VERSION #define CDEV_VERSION MICRO_TO_STR(DRV_VERSION) #else -#define CDEV_VERSION "0.3.10" +#define CDEV_VERSION "0.4.0" #endif #define CDEV_DEFAULT_NUM 4 diff --git a/drivers/net/ethernet/huawei/bma/cdev_veth_drv/virtual_cdev_eth_net.c b/drivers/net/ethernet/huawei/bma/cdev_veth_drv/virtual_cdev_eth_net.c index e6dbec7073e4..adb6dd6972f5 100644 --- a/drivers/net/ethernet/huawei/bma/cdev_veth_drv/virtual_cdev_eth_net.c +++ b/drivers/net/ethernet/huawei/bma/cdev_veth_drv/virtual_cdev_eth_net.c @@ -151,6 +151,12 @@ int edma_veth_setup_all_tx_resources(struct edma_eth_dev_s *edma_eth) u8 *shmq_head = NULL; u8 *shmq_head_p = NULL; struct edma_rxtx_q_s *tx_queue = NULL; + int ret = 0; + phys_addr_t veth_address = 0; + + ret = bma_intf_get_map_address(TYPE_VETH_ADDR, &veth_address); + if (ret != 0) + return -EFAULT; tx_queue = (struct edma_rxtx_q_s *) kmalloc(sizeof(struct edma_rxtx_q_s), GFP_KERNEL); @@ -173,7 +179,7 @@ int edma_veth_setup_all_tx_resources(struct edma_eth_dev_s *edma_eth) tx_queue->pdmalbase_v = (struct edma_dmal_s *) (shmq_head + SHMDMAL_OFFSET); - tx_queue->pdmalbase_p = (u8 *)(VETH_SHAREPOOL_BASE_INBMC + + tx_queue->pdmalbase_p = (u8 *)(veth_address + (MAX_SHAREQUEUE_SIZE * 0) + SHMDMAL_OFFSET); memset(tx_queue->pdmalbase_v, 0, MAX_SHMDMAL_SIZE); @@ -219,6 +225,12 @@ int edma_veth_setup_all_rx_resources(struct edma_eth_dev_s *edma_eth) u8 *shmq_head = NULL; u8 *shmq_head_p = NULL; struct edma_rxtx_q_s *rx_queue = NULL; + int ret = 0; + phys_addr_t veth_address = 0; + + ret = bma_intf_get_map_address(TYPE_VETH_ADDR, &veth_address); + if (ret != 0) + return -EFAULT; rx_queue = (struct edma_rxtx_q_s *) kmalloc(sizeof(struct edma_rxtx_q_s), GFP_KERNEL); @@ -241,7 +253,7 @@ int edma_veth_setup_all_rx_resources(struct edma_eth_dev_s *edma_eth) /* DMA address list (only used in host). */ rx_queue->pdmalbase_v = (struct edma_dmal_s *) (shmq_head + SHMDMAL_OFFSET); - rx_queue->pdmalbase_p = (u8 *)(VETH_SHAREPOOL_BASE_INBMC + + rx_queue->pdmalbase_p = (u8 *)(veth_address + MAX_SHAREQUEUE_SIZE + SHMDMAL_OFFSET); memset(rx_queue->pdmalbase_v, 0, MAX_SHMDMAL_SIZE); @@ -1304,6 +1316,8 @@ int __start_dmalist_H_2(struct edma_rxtx_q_s *prxtx_queue, u32 type, u32 cnt) dma_transfer.type = DMA_LIST; dma_transfer.transfer.list.dma_addr = (dma_addr_t)prxtx_queue->pdmalbase_p; + dma_transfer.pdmalbase_v = (struct bspveth_dmal *)prxtx_queue->pdmalbase_v; + dma_transfer.dmal_cnt = prxtx_queue->dmal_cnt; ret = bma_intf_start_dma(g_eth_edmaprivate.edma_priv, &dma_transfer); LOG(DLOG_DEBUG, "after -> %u/%u/%u/%u, ret: %d", diff --git a/drivers/net/ethernet/huawei/bma/cdev_veth_drv/virtual_cdev_eth_net.h b/drivers/net/ethernet/huawei/bma/cdev_veth_drv/virtual_cdev_eth_net.h index cb7c28cb5ddd..bc4b2147272b 100644 --- a/drivers/net/ethernet/huawei/bma/cdev_veth_drv/virtual_cdev_eth_net.h +++ b/drivers/net/ethernet/huawei/bma/cdev_veth_drv/virtual_cdev_eth_net.h @@ -56,7 +56,6 @@ #define BSP_ERR_AGAIN (BSP_ETH_ERR_BASE + 18) #define BSP_ERR_NOT_TO_HANDLE (BSP_ETH_ERR_BASE + 19) -#define VETH_SHAREPOOL_BASE_INBMC (0x84820000) #define VETH_SHAREPOOL_SIZE (0xdf000) #define VETH_SHAREPOOL_OFFSET (0x10000) #define MAX_SHAREQUEUE_SIZE (0x20000) diff --git a/drivers/net/ethernet/huawei/bma/edma_drv/Makefile b/drivers/net/ethernet/huawei/bma/edma_drv/Makefile index 46cc51275a71..048bcb9e2bbe 100644 --- a/drivers/net/ethernet/huawei/bma/edma_drv/Makefile +++ b/drivers/net/ethernet/huawei/bma/edma_drv/Makefile @@ -1,2 +1,2 @@ obj-$(CONFIG_BMA) += host_edma_drv.o -host_edma_drv-y := bma_pci.o bma_devintf.o edma_host.o +host_edma_drv-y := bma_pci.o bma_devintf.o edma_host.o edma_queue.o diff --git a/drivers/net/ethernet/huawei/bma/edma_drv/bma_devintf.c b/drivers/net/ethernet/huawei/bma/edma_drv/bma_devintf.c index 3b5eb39d6da6..45815fdc18eb 100644 --- a/drivers/net/ethernet/huawei/bma/edma_drv/bma_devintf.c +++ b/drivers/net/ethernet/huawei/bma/edma_drv/bma_devintf.c @@ -31,6 +31,18 @@ static struct bma_dev_s *g_bma_dev; static ATOMIC_NOTIFIER_HEAD(bma_int_notify_list); +static enum pci_type_e g_pci_type = PCI_TYPE_UNKNOWN; + +enum pci_type_e get_pci_type(void) +{ + return g_pci_type; +} + +void set_pci_type(enum pci_type_e type) +{ + g_pci_type = type; +} + static int bma_priv_insert_priv_list(struct bma_priv_data_s *priv, u32 type, u32 sub_type) { @@ -342,6 +354,82 @@ int bma_intf_unregister_type(void **handle) } EXPORT_SYMBOL(bma_intf_unregister_type); +int bma_intf_get_host_number(unsigned int *host_number) +{ + unsigned int devfn = 0; + + if (!host_number) + return -EFAULT; + + if (!g_bma_dev) { + BMA_LOG(DLOG_ERROR, "g_bma_dev is NULL\n"); + return -ENXIO; + } + + devfn = g_bma_dev->bma_pci_dev->pdev->devfn; + BMA_LOG(DLOG_DEBUG, "devfn is %u\n", devfn); + if (devfn == PF7 || devfn == PF10) { + *host_number = HOST_NUMBER_0; + } else if (devfn == PF4) { + *host_number = HOST_NUMBER_1; + } else { + BMA_LOG(DLOG_DEBUG, "Treat as host0 because of unknown PF %u\n", devfn); + *host_number = HOST_NUMBER_0; + } + + return 0; +} +EXPORT_SYMBOL(bma_intf_get_host_number); + +int bma_intf_get_map_address(enum addr_type type, phys_addr_t *addr) +{ + u32 host_number = 0; + u32 devfn = 0; + u32 i = 0; + enum pci_type_e pci_type = get_pci_type(); + struct bma_pci_dev_s *bma_pci_dev = get_bma_pci_dev(); + + static struct bma_map_addr_s addr_info[] = { + {PCI_TYPE_UNKNOWN, HOST_NUMBER_0, TYPE_EDMA_ADDR, EDMA_1711_HOST0_ADDR}, + {PCI_TYPE_UNKNOWN, HOST_NUMBER_0, TYPE_VETH_ADDR, VETH_1711_HOST0_ADDR}, + {PCI_TYPE_171x, HOST_NUMBER_0, TYPE_EDMA_ADDR, EDMA_1711_HOST0_ADDR}, + {PCI_TYPE_171x, HOST_NUMBER_0, TYPE_VETH_ADDR, VETH_1711_HOST0_ADDR}, + {PCI_TYPE_1712, HOST_NUMBER_0, TYPE_EDMA_ADDR, EDMA_1712_HOST0_ADDR}, + {PCI_TYPE_1712, HOST_NUMBER_0, TYPE_VETH_ADDR, VETH_1712_HOST0_ADDR}, + {PCI_TYPE_1712, HOST_NUMBER_1, TYPE_EDMA_ADDR, EDMA_1712_HOST1_ADDR}, + {PCI_TYPE_1712, HOST_NUMBER_1, TYPE_VETH_ADDR, VETH_1712_HOST1_ADDR}, + }; + + if (!bma_pci_dev) { + BMA_LOG(DLOG_ERROR, "bma_pci_dev is null\n"); + return -EFAULT; + } + + devfn = bma_pci_dev->pdev->devfn; + if (devfn == PF7 || devfn == PF10) { + host_number = HOST_NUMBER_0; + } else if (devfn == PF4) { + host_number = HOST_NUMBER_1; + } else { + BMA_LOG(DLOG_DEBUG, "Treat as host0 because of unknown PF %u\n", devfn); + host_number = HOST_NUMBER_0; + } + + for (i = 0; i < ARRAY_SIZE(addr_info); i++) { + if (pci_type == addr_info[i].pci_type && + host_number == addr_info[i].host_number && type == addr_info[i].addr_type) { + *addr = addr_info[i].addr; + return 0; + } + } + + BMA_LOG(DLOG_DEBUG, + "Cannot find proper map address! pci_type: %u, host_number: %u, addr_type: %u\n", + pci_type, host_number, type); + return -EFAULT; +} +EXPORT_SYMBOL(bma_intf_get_map_address); + int bma_intf_check_edma_supported(void) { return !(!g_bma_dev); @@ -350,13 +438,30 @@ EXPORT_SYMBOL(bma_intf_check_edma_supported); int bma_intf_check_dma_status(enum dma_direction_e dir) { - return edma_host_check_dma_status(dir); + enum pci_type_e pci_type = get_pci_type(); + + if (pci_type == PCI_TYPE_UNKNOWN) { + BMA_LOG(DLOG_ERROR, "pci type is UNKNOWN.\n"); + return -EFAULT; + } + + return get_bma_pci_dev_handler_s()[pci_type].check_dma(dir); } EXPORT_SYMBOL(bma_intf_check_dma_status); void bma_intf_reset_dma(enum dma_direction_e dir) { - edma_host_reset_dma(&g_bma_dev->edma_host, dir); + enum pci_type_e pci_type = get_pci_type(); + + if (!g_bma_dev) + return; + + if (pci_type == PCI_TYPE_UNKNOWN) { + BMA_LOG(DLOG_ERROR, "pci type is UNKNOWN.\n"); + return; + } + + get_bma_pci_dev_handler_s()[pci_type].reset_dma(&g_bma_dev->edma_host, dir); } EXPORT_SYMBOL(bma_intf_reset_dma); @@ -375,10 +480,16 @@ int bma_intf_start_dma(void *handle, struct bma_dma_transfer_s *dma_transfer) { int ret = 0; struct bma_priv_data_s *priv = (struct bma_priv_data_s *)handle; + enum pci_type_e pci_type = get_pci_type(); if (!handle || !dma_transfer) return -EFAULT; + if (pci_type == PCI_TYPE_UNKNOWN) { + BMA_LOG(DLOG_ERROR, "pci type is UNKNOWN.\n"); + return -EFAULT; + } + ret = edma_host_dma_start(&g_bma_dev->edma_host, priv); if (ret) { BMA_LOG(DLOG_ERROR, @@ -386,7 +497,8 @@ int bma_intf_start_dma(void *handle, struct bma_dma_transfer_s *dma_transfer) return ret; } - ret = edma_host_dma_transfer(&g_bma_dev->edma_host, priv, dma_transfer); + ret = get_bma_pci_dev_handler_s()[pci_type].transfer_edma_host(&g_bma_dev->edma_host, priv, + dma_transfer); if (ret) BMA_LOG(DLOG_ERROR, "edma_host_dma_transfer failed! ret = %d\n", ret); diff --git a/drivers/net/ethernet/huawei/bma/edma_drv/bma_pci.c b/drivers/net/ethernet/huawei/bma/edma_drv/bma_pci.c index 577acaedb0e2..0e43289e0d1a 100644 --- a/drivers/net/ethernet/huawei/bma/edma_drv/bma_pci.c +++ b/drivers/net/ethernet/huawei/bma/edma_drv/bma_pci.c @@ -27,13 +27,20 @@ #define PCI_VENDOR_ID_HUAWEI_PME 0x19e5 #define PCI_DEVICE_ID_KBOX_0_PME 0x1710 +#define PCI_DEVICE_ID_EDMA_0 0x1712 #define PCI_PME_USEABLE_SPACE (4 * 1024 * 1024) + +#define HOSTRTC_OFFSET 0x10000 +#define EDMA_OFFSET 0x20000 +#define VETH_OFFSET 0x30000 + #define PME_DEV_CHECK(device, vendor) ((device) == PCI_DEVICE_ID_KBOX_0_PME && \ (vendor) == PCI_VENDOR_ID_HUAWEI_PME) #define PCI_BAR0_PME_1710 0x85800000 #define PCI_BAR0 0 #define PCI_BAR1 1 +#define PCI_BAR2 2 #define PCI_USING_DAC_DEFAULT 0 #define GET_HIGH_ADDR(address) ((sizeof(unsigned long) == 8) ? \ @@ -51,15 +58,50 @@ int debug = DLOG_ERROR; MODULE_PARM_DESC(debug, "Debug switch (0=close debug, 1=open debug)"); static struct bma_pci_dev_s *g_bma_pci_dev; +struct bma_pci_dev_s *get_bma_pci_dev(void) +{ + return g_bma_pci_dev; +} + +void set_bma_pci_dev(struct bma_pci_dev_s *bma_pci_dev) +{ + g_bma_pci_dev = bma_pci_dev; +} static int bma_pci_suspend(struct pci_dev *pdev, pm_message_t state); static int bma_pci_resume(struct pci_dev *pdev); static int bma_pci_probe(struct pci_dev *pdev, const struct pci_device_id *ent); static void bma_pci_remove(struct pci_dev *pdev); +static struct bma_pci_dev_handler_s g_bma_pci_dev_handler_s[] = { + {0}, + // for 1710/1711 + { + .ioremap_bar_mem = ioremap_pme_bar_mem_v1, + .iounmap_bar_mem = iounmap_bar_mem_v1, + .check_dma = edma_host_check_dma_status_v1, + .transfer_edma_host = edma_host_dma_transfer_v1, + .reset_dma = edma_host_reset_dma_v1, + }, + // for 1712 + { + .ioremap_bar_mem = ioremap_pme_bar_mem_v2, + .iounmap_bar_mem = iounmap_bar_mem_v2, + .check_dma = edma_host_check_dma_status_v2, + .transfer_edma_host = edma_host_dma_transfer_v2, + .reset_dma = edma_host_reset_dma_v2, + } +}; + +struct bma_pci_dev_handler_s *get_bma_pci_dev_handler_s(void) +{ + return g_bma_pci_dev_handler_s; +} + static const struct pci_device_id bma_pci_tbl[] = { {PCI_DEVICE(PCI_VENDOR_ID_HUAWEI_FPGA, PCI_DEVICE_ID_KBOX_0)}, {PCI_DEVICE(PCI_VENDOR_ID_HUAWEI_PME, PCI_DEVICE_ID_KBOX_0_PME)}, + { PCI_DEVICE(PCI_VENDOR_ID_HUAWEI_PME, PCI_DEVICE_ID_EDMA_0) }, {} }; MODULE_DEVICE_TABLE(pci, bma_pci_tbl); @@ -73,7 +115,7 @@ int edma_param_get_statics(char *buf, const struct kernel_param *kp) } module_param_call(statistics, NULL, edma_param_get_statics, &debug, 0444); -MODULE_PARM_DESC(statistics, "Statistics info of edma driver,readonly"); +MODULE_PARM_DESC(statistics, "Statistics info of edma driver, readonly"); int edma_param_set_debug(const char *buf, const struct kernel_param *kp) { @@ -99,34 +141,40 @@ module_param_call(debug, &edma_param_set_debug, ¶m_get_int, &debug, 0644); void __iomem *kbox_get_base_addr(void) { - if (!g_bma_pci_dev || (!(g_bma_pci_dev->kbox_base_addr))) { + struct bma_pci_dev_s *bma_pci_dev = get_bma_pci_dev(); + + if (!bma_pci_dev || (!(bma_pci_dev->kbox_base_addr))) { BMA_LOG(DLOG_ERROR, "kbox_base_addr NULL point\n"); return NULL; } - return g_bma_pci_dev->kbox_base_addr; + return bma_pci_dev->kbox_base_addr; } EXPORT_SYMBOL_GPL(kbox_get_base_addr); unsigned long kbox_get_io_len(void) { - if (!g_bma_pci_dev) { - BMA_LOG(DLOG_ERROR, "kbox_io_len is error,can not get it\n"); + struct bma_pci_dev_s *bma_pci_dev = get_bma_pci_dev(); + + if (!bma_pci_dev) { + BMA_LOG(DLOG_ERROR, "kbox_io_len is error, can not get it\n"); return 0; } - return g_bma_pci_dev->kbox_base_len; + return bma_pci_dev->kbox_base_len; } EXPORT_SYMBOL_GPL(kbox_get_io_len); unsigned long kbox_get_base_phy_addr(void) { - if (!g_bma_pci_dev || !g_bma_pci_dev->kbox_base_phy_addr) { + struct bma_pci_dev_s *bma_pci_dev = get_bma_pci_dev(); + + if (!bma_pci_dev || bma_pci_dev->kbox_base_phy_addr == 0) { BMA_LOG(DLOG_ERROR, "kbox_base_phy_addr NULL point\n"); return 0; } - return g_bma_pci_dev->kbox_base_phy_addr; + return bma_pci_dev->kbox_base_phy_addr; } EXPORT_SYMBOL_GPL(kbox_get_base_phy_addr); @@ -160,7 +208,7 @@ s32 __atu_config_H(struct pci_dev *pdev, unsigned int region, return 0; } -static void iounmap_bar_mem(struct bma_pci_dev_s *bma_pci_dev) +void iounmap_bar_mem_v1(struct bma_pci_dev_s *bma_pci_dev) { if (bma_pci_dev->kbox_base_addr) { iounmap(bma_pci_dev->kbox_base_addr); @@ -171,15 +219,84 @@ static void iounmap_bar_mem(struct bma_pci_dev_s *bma_pci_dev) iounmap(bma_pci_dev->bma_base_addr); bma_pci_dev->bma_base_addr = NULL; bma_pci_dev->edma_swap_addr = NULL; + bma_pci_dev->veth_swap_addr = NULL; bma_pci_dev->hostrtc_viraddr = NULL; } } -static int ioremap_pme_bar1_mem(struct pci_dev *pdev, - struct bma_pci_dev_s *bma_pci_dev) +void iounmap_bar_mem_v2(struct bma_pci_dev_s *bma_pci_dev) +{ + if (bma_pci_dev->kbox_base_addr) { + iounmap(bma_pci_dev->kbox_base_addr); + bma_pci_dev->kbox_base_addr = NULL; + } + + if (bma_pci_dev->bma_base_addr) { + iounmap(bma_pci_dev->bma_base_addr); + bma_pci_dev->bma_base_addr = NULL; + } + + if (bma_pci_dev->hostrtc_viraddr) { + iounmap(bma_pci_dev->hostrtc_viraddr); + bma_pci_dev->hostrtc_viraddr = NULL; + bma_pci_dev->edma_swap_addr = NULL; + bma_pci_dev->veth_swap_addr = NULL; + } +} + +static void iounmap_bar_mem(struct bma_pci_dev_s *bma_pci_dev) +{ + enum pci_type_e pci_type = get_pci_type(); + + if (pci_type == PCI_TYPE_UNKNOWN) + return; + + g_bma_pci_dev_handler_s[pci_type].iounmap_bar_mem(bma_pci_dev); +} + +static int config_atu(struct pci_dev *pdev, struct bma_pci_dev_s *bma_pci_dev) +{ + int ret = 0; + phys_addr_t edma_address = 0; + phys_addr_t veth_address = 0; + + ret = bma_intf_get_map_address(TYPE_EDMA_ADDR, &edma_address); + if (ret != 0) + return ret; + + ret = bma_intf_get_map_address(TYPE_VETH_ADDR, &veth_address); + if (ret != 0) + return ret; + + __atu_config_H(pdev, 0, + GET_HIGH_ADDR(bma_pci_dev->kbox_base_phy_addr), + (bma_pci_dev->kbox_base_phy_addr & 0xffffffff), + 0, PCI_BAR0_PME_1710, PCI_PME_USEABLE_SPACE); + + __atu_config_H(pdev, 1, + GET_HIGH_ADDR(bma_pci_dev->hostrtc_phyaddr), + (bma_pci_dev->hostrtc_phyaddr & 0xffffffff), + 0, HOSTRTC_REG_BASE, HOSTRTC_REG_SIZE); + + __atu_config_H(pdev, 2, + GET_HIGH_ADDR(bma_pci_dev->edma_swap_phy_addr), + (bma_pci_dev->edma_swap_phy_addr & 0xffffffff), + 0, edma_address, EDMA_SWAP_DATA_SIZE); + + __atu_config_H(pdev, 3, + GET_HIGH_ADDR(bma_pci_dev->veth_swap_phy_addr), + (bma_pci_dev->veth_swap_phy_addr & 0xffffffff), + 0, veth_address, VETH_SWAP_DATA_SIZE); + + return ret; +} + +// for 1710 1711 +int ioremap_pme_bar_mem_v1(struct pci_dev *pdev, struct bma_pci_dev_s *bma_pci_dev) { unsigned long bar1_resource_flag = 0; u32 data = 0; + int ret; bma_pci_dev->kbox_base_len = PCI_PME_USEABLE_SPACE; BMA_LOG(DLOG_DEBUG, "1710\n"); @@ -217,25 +334,11 @@ static int ioremap_pme_bar1_mem(struct pci_dev *pdev, bma_pci_dev->edma_swap_phy_addr, bma_pci_dev->veth_swap_phy_addr); - __atu_config_H(pdev, 0, - GET_HIGH_ADDR(bma_pci_dev->kbox_base_phy_addr), - (bma_pci_dev->kbox_base_phy_addr & 0xffffffff), - 0, PCI_BAR0_PME_1710, PCI_PME_USEABLE_SPACE); - - __atu_config_H(pdev, 1, - GET_HIGH_ADDR(bma_pci_dev->hostrtc_phyaddr), - (bma_pci_dev->hostrtc_phyaddr & 0xffffffff), - 0, HOSTRTC_REG_BASE, HOSTRTC_REG_SIZE); - - __atu_config_H(pdev, 2, - GET_HIGH_ADDR(bma_pci_dev->edma_swap_phy_addr), - (bma_pci_dev->edma_swap_phy_addr & 0xffffffff), - 0, EDMA_SWAP_DATA_BASE, EDMA_SWAP_DATA_SIZE); - - __atu_config_H(pdev, 3, - GET_HIGH_ADDR(bma_pci_dev->veth_swap_phy_addr), - (bma_pci_dev->veth_swap_phy_addr & 0xffffffff), - 0, VETH_SWAP_DATA_BASE, VETH_SWAP_DATA_SIZE); + ret = config_atu(pdev, bma_pci_dev); + if (ret != 0) { + BMA_LOG(DLOG_DEBUG, "config atu failed.\n"); + return ret; + } if (bar1_resource_flag & IORESOURCE_CACHEABLE) { bma_pci_dev->bma_base_addr = @@ -250,7 +353,6 @@ static int ioremap_pme_bar1_mem(struct pci_dev *pdev, if (!bma_pci_dev->bma_base_addr) { BMA_LOG(DLOG_ERROR, "Cannot map device registers, aborting\n"); - return -ENODEV; } @@ -270,11 +372,80 @@ static int ioremap_pme_bar1_mem(struct pci_dev *pdev, return 0; } +// for 1712 +int ioremap_pme_bar_mem_v2(struct pci_dev *pdev, struct bma_pci_dev_s *bma_pci_dev) +{ + unsigned long bar2_resource_flag = 0; + + bma_pci_dev->kbox_base_len = PCI_PME_USEABLE_SPACE; + BMA_LOG(DLOG_DEBUG, "1712\n"); + + bma_pci_dev->bma_base_phy_addr = (unsigned long)pci_resource_start(pdev, PCI_BAR2); + bar2_resource_flag = (unsigned long)pci_resource_flags(pdev, PCI_BAR2); + if (!(bar2_resource_flag & IORESOURCE_MEM)) { + BMA_LOG(DLOG_ERROR, "Cannot find proper PCI device base address, aborting\n"); + return -ENODEV; + } + + bma_pci_dev->bma_base_len = (unsigned long)pci_resource_len(pdev, PCI_BAR2); + bma_pci_dev->edma_swap_len = EDMA_SWAP_DATA_SIZE; + bma_pci_dev->veth_swap_len = VETH_SWAP_DATA_SIZE; + + BMA_LOG(DLOG_DEBUG, + "bar2: bma_base_len = 0x%lx, edma_swap_len = %ld, veth_swap_len = %ld(0x%lx)\n", + bma_pci_dev->bma_base_len, bma_pci_dev->edma_swap_len, bma_pci_dev->veth_swap_len, + bma_pci_dev->veth_swap_len); + + bma_pci_dev->hostrtc_phyaddr = bma_pci_dev->bma_base_phy_addr + HOSTRTC_OFFSET; + /* edma */ + bma_pci_dev->edma_swap_phy_addr = bma_pci_dev->bma_base_phy_addr + EDMA_OFFSET; + /* veth */ + bma_pci_dev->veth_swap_phy_addr = bma_pci_dev->bma_base_phy_addr + VETH_OFFSET; + + BMA_LOG(DLOG_DEBUG, + "bar2: bma_base_phy_addr = 0x%lx, bma_base_len = %zu , hostrtc_phyaddr = 0x%lx, edma_swap_phy_addr = 0x%lx, veth_swap_phy_addr = 0x%lx\n", + bma_pci_dev->bma_base_phy_addr, bma_pci_dev->bma_base_len, + bma_pci_dev->hostrtc_phyaddr, bma_pci_dev->edma_swap_phy_addr, + bma_pci_dev->veth_swap_phy_addr); + + bma_pci_dev->bma_base_addr = ioremap(bma_pci_dev->bma_base_phy_addr, + bma_pci_dev->bma_base_len); + if (!bma_pci_dev->bma_base_addr) { + BMA_LOG(DLOG_ERROR, "Cannot map device registers, aborting\n"); + return -ENODEV; + } + + if (bar2_resource_flag & IORESOURCE_CACHEABLE) { + BMA_LOG(DLOG_DEBUG, "ioremap with cache, %d\n", IORESOURCE_CACHEABLE); + bma_pci_dev->hostrtc_viraddr = ioremap(bma_pci_dev->hostrtc_phyaddr, + bma_pci_dev->bma_base_len - HOSTRTC_OFFSET); + } else { + BMA_LOG(DLOG_DEBUG, "ioremap without cache\n"); + bma_pci_dev->hostrtc_viraddr = IOREMAP(bma_pci_dev->hostrtc_phyaddr, + bma_pci_dev->bma_base_len - HOSTRTC_OFFSET); + } + + if (!bma_pci_dev->hostrtc_viraddr) { + BMA_LOG(DLOG_ERROR, "Cannot map device registers, aborting\n"); + iounmap(bma_pci_dev->bma_base_addr); + bma_pci_dev->bma_base_addr = NULL; + return -ENODEV; + } + + bma_pci_dev->edma_swap_addr = (unsigned char *)bma_pci_dev->hostrtc_viraddr + - HOSTRTC_OFFSET + EDMA_OFFSET; + bma_pci_dev->veth_swap_addr = (unsigned char *)bma_pci_dev->hostrtc_viraddr + - HOSTRTC_OFFSET + VETH_OFFSET; + + return 0; +} + static int ioremap_bar_mem(struct pci_dev *pdev, struct bma_pci_dev_s *bma_pci_dev) { int err = 0; unsigned long bar0_resource_flag = 0; + enum pci_type_e pci_type = get_pci_type(); bar0_resource_flag = pci_resource_flags(pdev, PCI_BAR0); @@ -294,8 +465,8 @@ static int ioremap_bar_mem(struct pci_dev *pdev, bma_pci_dev->kbox_base_phy_addr, bma_pci_dev->kbox_base_len, bma_pci_dev->kbox_base_len); - if (PME_DEV_CHECK(pdev->device, pdev->vendor)) { - err = ioremap_pme_bar1_mem(pdev, bma_pci_dev); + if (pdev->vendor == PCI_VENDOR_ID_HUAWEI_PME && pci_type != PCI_TYPE_UNKNOWN) { + err = g_bma_pci_dev_handler_s[pci_type].ioremap_bar_mem(pdev, bma_pci_dev); if (err != 0) return err; } @@ -314,11 +485,7 @@ static int ioremap_bar_mem(struct pci_dev *pdev, if (!bma_pci_dev->kbox_base_addr) { BMA_LOG(DLOG_ERROR, "Cannot map device registers, aborting\n"); - - iounmap(bma_pci_dev->bma_base_addr); - bma_pci_dev->bma_base_addr = NULL; - bma_pci_dev->edma_swap_addr = NULL; - bma_pci_dev->hostrtc_viraddr = NULL; + iounmap_bar_mem(bma_pci_dev); return -ENOMEM; } @@ -355,13 +522,14 @@ int pci_device_init(struct pci_dev *pdev, struct bma_pci_dev_s *bma_pci_dev) { int err = 0; - if (PME_DEV_CHECK(pdev->device, pdev->vendor)) { + if ((pdev->device == PCI_DEVICE_ID_KBOX_0_PME || pdev->device == PCI_DEVICE_ID_EDMA_0) && + pdev->vendor == PCI_VENDOR_ID_HUAWEI_PME) { err = bma_devinft_init(bma_pci_dev); if (err) { BMA_LOG(DLOG_ERROR, "bma_devinft_init failed\n"); bma_devinft_cleanup(bma_pci_dev); iounmap_bar_mem(bma_pci_dev); - g_bma_pci_dev = NULL; + set_bma_pci_dev(NULL); pci_release_regions(pdev); kfree(bma_pci_dev); #ifdef CONFIG_PCI_MSI @@ -400,27 +568,25 @@ int pci_device_config(struct pci_dev *pdev) goto err_out_free_dev; } + set_bma_pci_dev(bma_pci_dev); + err = ioremap_bar_mem(pdev, bma_pci_dev); if (err) { BMA_LOG(DLOG_ERROR, "ioremap_edma_io_mem failed\n"); goto err_out_release_regions; } - g_bma_pci_dev = bma_pci_dev; - if (SET_DMA_MASK(&pdev->dev)) { BMA_LOG(DLOG_ERROR, - "No usable DMA ,configuration, aborting,goto failed2!!!\n"); + "No usable DMA, configuration, aborting, goto failed2!!!\n"); goto err_out_unmap_bar; } - g_bma_pci_dev = bma_pci_dev; - return pci_device_init(pdev, bma_pci_dev); err_out_unmap_bar: iounmap_bar_mem(bma_pci_dev); - g_bma_pci_dev = NULL; + set_bma_pci_dev(NULL); err_out_release_regions: pci_release_regions(pdev); err_out_free_dev: @@ -442,16 +608,27 @@ static int bma_pci_probe(struct pci_dev *pdev, const struct pci_device_id *ent) UNUSED(ent); - if (g_bma_pci_dev) + if (get_bma_pci_dev()) return -EPERM; err = pci_enable_device(pdev); if (err) { - BMA_LOG(DLOG_ERROR, "Cannot enable PCI device,aborting\n"); + BMA_LOG(DLOG_ERROR, "Cannot enable PCI device, aborting\n"); return err; } - if (PME_DEV_CHECK(pdev->device, pdev->vendor)) { + switch (pdev->device) { + case PCI_DEVICE_ID_KBOX_0_PME: + set_pci_type(PCI_TYPE_171x); + break; + case PCI_DEVICE_ID_EDMA_0: + set_pci_type(PCI_TYPE_1712); + break; + default: + set_pci_type(PCI_TYPE_UNKNOWN); + break; + } + if (pdev->vendor == PCI_VENDOR_ID_HUAWEI_PME && get_pci_type() != PCI_TYPE_UNKNOWN) { err = pme_pci_enable_msi(pdev); if (err) return err; @@ -468,7 +645,7 @@ static void bma_pci_remove(struct pci_dev *pdev) struct bma_pci_dev_s *bma_pci_dev = (struct bma_pci_dev_s *)pci_get_drvdata(pdev); - g_bma_pci_dev = NULL; + set_bma_pci_dev(NULL); (void)pci_set_drvdata(pdev, NULL); if (bma_pci_dev) { diff --git a/drivers/net/ethernet/huawei/bma/edma_drv/bma_pci.h b/drivers/net/ethernet/huawei/bma/edma_drv/bma_pci.h index a66724e2cb74..b43882997c01 100644 --- a/drivers/net/ethernet/huawei/bma/edma_drv/bma_pci.h +++ b/drivers/net/ethernet/huawei/bma/edma_drv/bma_pci.h @@ -18,6 +18,8 @@ #include "bma_devintf.h" #include "bma_include.h" +#include "../include/bma_ker_intf.h" +#include "edma_host.h" #include <linux/netdevice.h> #define EDMA_SWAP_BASE_OFFSET 0x10000 @@ -25,10 +27,8 @@ #define HOSTRTC_REG_BASE 0x2f000000 #define HOSTRTC_REG_SIZE EDMA_SWAP_BASE_OFFSET -#define EDMA_SWAP_DATA_BASE 0x84810000 #define EDMA_SWAP_DATA_SIZE 65536 -#define VETH_SWAP_DATA_BASE 0x84820000 #define VETH_SWAP_DATA_SIZE 0xdf000 #define ATU_VIEWPORT 0x900 @@ -71,7 +71,7 @@ struct bma_pci_dev_s { #ifdef DRV_VERSION #define BMA_VERSION MICRO_TO_STR(DRV_VERSION) #else -#define BMA_VERSION "0.3.10" +#define BMA_VERSION "0.4.0" #endif #ifdef CONFIG_ARM64 @@ -95,4 +95,31 @@ extern int debug; int edmainfo_show(char *buff); +struct bma_pci_dev_s *get_bma_pci_dev(void); +void set_bma_pci_dev(struct bma_pci_dev_s *bma_pci_dev); + +struct bma_pci_dev_handler_s { + int (*ioremap_bar_mem)(struct pci_dev *pdev, struct bma_pci_dev_s *bma_pci_dev); + void (*iounmap_bar_mem)(struct bma_pci_dev_s *bma_pci_dev); + int (*check_dma)(enum dma_direction_e dir); + int (*transfer_edma_host)(struct edma_host_s *edma_host, struct bma_priv_data_s *priv, + struct bma_dma_transfer_s *dma_transfer); + void (*reset_dma)(struct edma_host_s *edma_host, enum dma_direction_e dir); +}; + +struct bma_pci_dev_handler_s *get_bma_pci_dev_handler_s(void); + +int ioremap_pme_bar_mem_v1(struct pci_dev *pdev, struct bma_pci_dev_s *bma_pci_dev); +int ioremap_pme_bar_mem_v2(struct pci_dev *pdev, struct bma_pci_dev_s *bma_pci_dev); +void iounmap_bar_mem_v1(struct bma_pci_dev_s *bma_pci_dev); +void iounmap_bar_mem_v2(struct bma_pci_dev_s *bma_pci_dev); +int edma_host_check_dma_status_v1(enum dma_direction_e dir); +int edma_host_check_dma_status_v2(enum dma_direction_e dir); +int edma_host_dma_transfer_v1(struct edma_host_s *edma_host, struct bma_priv_data_s *priv, + struct bma_dma_transfer_s *dma_transfer); +int edma_host_dma_transfer_v2(struct edma_host_s *edma_host, struct bma_priv_data_s *priv, + struct bma_dma_transfer_s *dma_transfer); +void edma_host_reset_dma_v1(struct edma_host_s *edma_host, enum dma_direction_e dir); +void edma_host_reset_dma_v2(struct edma_host_s *edma_host, enum dma_direction_e dir); + #endif diff --git a/drivers/net/ethernet/huawei/bma/edma_drv/edma_drv.h b/drivers/net/ethernet/huawei/bma/edma_drv/edma_drv.h new file mode 100644 index 000000000000..b0a09c022ba8 --- /dev/null +++ b/drivers/net/ethernet/huawei/bma/edma_drv/edma_drv.h @@ -0,0 +1,340 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* Huawei iBMA driver. + * Copyright (c) 2025, Huawei Technologies Co., Ltd. + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License + * as published by the Free Software Foundation; either version 2 + * of the License, or (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + */ + +#ifndef EDMA_DRV_H +#define EDMA_DRV_H + +#define DMA_STATISTICS_LEN 16 +#define DMA_CH_TAG_SIZE 64 + +#define HISILICON_VENDOR_ID 0x19e5 +#define DMA_PCIE_DEVICE_ID 0xa122 + +#define MAX_DMA_CHS 4 /* The current version supports a maximum of 2x2 channels. */ +#define DMA_CHS_EACH_PORT 2 + +#define MAX_SQ_DEPTH 0xFFFF +#define MAX_CQ_DEPTH 0xFFFF + +#define DMA_DONE_MASK 0x1 +#define DMA_DONE_UNMASK 0x0 +#define DMA_ERR_MASK 0x7FFFE +#define DMA_ERR_UNMASK 0x0 + +#define BD_SO 0 +#define BD_RO 1 + +#define SIZE_4M 0x400000 +#define SIZE_16K 0x4000 +#define SIZE_64K 0x10000 +#define SIZE_OF_U64 0x8 +#define SPD_SIZE_MAX 32 + +/* Use integer arithmetic for approximate computation instead of floating-point. */ +#define US_PER_SECOND_DIV_1KB (1000000 / 1024) + +#define DMA_PHY_STORE_OFFSET (SIZE_64K - SIZE_OF_U64) +#define DMA_RMT_PHY_STORE_OFFSET (DMA_PHY_STORE_OFFSET - SIZE_OF_U64) +#define BIT_0_TO_31_MASK 0xFFFFFFFF + +#define DMA_TMOUT (2 * HZ) /* 2 seconds */ + +enum { + EP0 = 0, + EP1 = 1 +}; + +enum { + DRC_LOCAL = 0, + DRC_REMOTE = 1 +}; + +enum { + DIR_B2H = 0, + DIR_H2B = 1, +}; + +enum { + DMA_INIT = 0x0, + DMA_RESET = 0x1, + DMA_PAUSE = 0x2, + DMA_NOTIFY = 0x3, + LINKDOWN = 0x4, + LINKUP = 0x5, + FLR = 0x6 +}; + +enum { + PF0 = 0, + PF1 = 1, + PF2 = 2, + PF4 = 4, + PF7 = 7, + PF10 = 10 +}; + +enum { + RESERVED = 0x0, /* reserved */ + SMALL_PACKET = 0x1, /* SmallPacket Descriptor */ + DMA_READ = 0x2, /* Read Descriptor */ + DMA_WRITE = 0x3, /* Write Descriptor */ + DMA_LOOP = 0x4, /* Loop Descriptor */ + DMA_MIX = 0x10, /* not available, User-defined for test */ + DMA_WD_BARRIER = 0x11, /* not available, User-defined for test */ + DMA_RD_BARRIER = 0x12, /* not available, User-defined for test */ + DMA_LP_BARRIER = 0x13 /* not available, User-defined for test */ +}; + +enum { + IDLE_STATE = 0x0, /* dma channel in idle status */ + RUN_STATE = 0x1, /* dma channel in run status */ + CPL_STATE = 0x2, /* dma channel in cpld status */ + PAUSE_STATE = 0x3, /* dma channel in pause status */ + HALT_STATE = 0x4, /* dma channel in halt status */ + ABORT_STATE = 0x5, /* dma channel in abort status */ + WAIT_STATE = 0x6 /* dma channel in wait status */ +}; + +/* CQE status */ +enum { + DMA_DONE = 0x0, /* sqe done succ */ + OPCODE_ERR = 0x1, /* sqe opcode invalid */ + LEN_ERR = 0x2, /* sqe length invalid, only ocurs in smallpackt */ + DROP_EN = 0x4, /* sqe drop happen */ + WR_RMT_ERR = 0x8, /* write data to host fail */ + RD_RMT_ERR = 0x10, /* read data from host fail */ + RD_AXI_ERR = 0x20, /* read data/sqe from local fail */ + WR_AXI_ERR = 0x40, /* write data/cqe to local fail */ + POISON_CPL_ERR = 0x80, /* poison data */ + SUB_SQ_ERR = 0x100, /* read sqe with CPL TLP */ + DMA_CH_RESET = 0x200, /* dma channel should reset */ + LINK_DOWN_ERR = 0x400, /* linkdown happen */ + RECOVERY = 0x800 /* error status to be reset */ +}; + +enum { + SDI_DMA_ADDR_SIZE_16K = 0, + SDI_DMA_ADDR_SIZE_32K = 1, + SDI_DMA_ADDR_SIZE_64K = 2, + SDI_DMA_ADDR_SIZE_128K = 3 +}; + +union U_DMA_QUEUE_SQ_DEPTH { + struct { + unsigned int dma_queue_sq_depth : 16; /* [15..0] */ + unsigned int reserved_0 : 16; /* [31..16] */ + } bits; + + unsigned int u32; +}; + +union U_DMA_QUEUE_CQ_DEPTH { + struct { + unsigned int dma_queue_cq_depth : 16; /* [15..0] */ + unsigned int reserved_0 : 16; /* [31..16] */ + } bits; + + unsigned int u32; +}; + +union U_DMA_QUEUE_CQ_HEAD_PTR { + struct { + unsigned int dma_queue_cq_head_ptr : 16; /* [15..0] */ + unsigned int reserved_0 : 16; /* [31..16] */ + } bits; + + unsigned int u32; +}; + +union U_DMA_QUEUE_CQ_TAIL_PTR { + struct { + unsigned int dma_queue_cq_tail_ptr : 16; /* [15..0] */ + unsigned int dma_queue_sqhd : 16; /* [31..16] */ + } bits; + + unsigned int u32; +}; + +union U_DMA_QUEUE_SQ_TAIL_PTR { + struct { + unsigned int dma_queue_sq_tail_ptr : 16; /* [15..0] */ + unsigned int reserved_0 : 16; /* [31..16] */ + } bits; + + unsigned int u32; +}; + +union U_DMA_QUEUE_CTRL0 { + struct { + unsigned int dma_queue_en : 1; /* [0] */ + unsigned int dma_queue_icg_en : 1; /* [1] */ + unsigned int reserved : 1; /* [2] */ + unsigned int dma_rst_without_cq_ack_enable : 1; /* [3] */ + unsigned int dma_queue_pause : 1; /* [4] */ + unsigned int reserved_1 : 3; /* [7..5] */ + unsigned int dma_queue_arb_weight : 8; /* [15..8] */ + unsigned int reserved_2 : 3; /* [18...16] */ + unsigned int dma_queue_cq_mrg_en : 1; /* [19] */ + unsigned int dma_queue_cq_mrg_time : 2; /* [21..20] */ + unsigned int dma_queue_local_err_done_int_en : 1; /* [22] */ + unsigned int dma_queue_remote_err_done_int_en : 1; /* [23] */ + unsigned int reserved_3 : 1; /* [24] */ + unsigned int dma_queue_cq_full_disable : 1; /* [25] */ + unsigned int dma_queue_cq_drct_sel : 1; /* [26] */ + unsigned int dma_queue_sq_drct_sel : 1; /* [27] */ + unsigned int dma_queue_sq_pa_lkp_err_abort_en : 1; /* [28] */ + unsigned int dma_queue_sq_proc_err_abort_en : 1; /* [29] */ + unsigned int dma_queue_sq_drop_err_abort_en : 1; /* [30] */ + unsigned int dma_queue_sq_cfg_err_abort_en : 1; /* [31] */ + } bits; + + unsigned int u32; +}; + +union U_DMA_QUEUE_CTRL1 { + struct { + unsigned int dma_queue_reset : 1; /* [0] */ + unsigned int dma_queue_abort_exit : 1; /* [1] */ + unsigned int dma_va_enable : 1; /* [2] */ + unsigned int reserved_0 : 1; /* [3] */ + unsigned int dma_queue_port_num : 4; /* [7..4] */ + unsigned int dma_queue_remote_msi_x_mask : 1; /* [8] */ + unsigned int dma_va_enable_sq : 1; /* [9] */ + unsigned int dma_va_enable_cq : 1; /* [10] */ + unsigned int dma_queue_local_pfx_er : 1; /* [11] */ + unsigned int dma_queue_local_pfx_pmr : 1; /* [12] */ + unsigned int reserved_1 : 3; /* [15...13] */ + unsigned int dma_queue_qos_en : 1; /* [16] */ + unsigned int dma_queue_qos : 4; /* [20...17] */ + unsigned int dma_queue_mpam_id : 11; /* [31..21] */ + } bits; + + unsigned int u32; +}; + +union U_DMA_QUEUE_FSM_STS { + struct { + unsigned int dma_queue_sts : 4; /* [3..0] */ + unsigned int dma_queue_not_work : 1; /* [4] */ + unsigned int dma_queue_wait_spd_data_sts : 1; /* [5] */ + unsigned int reserved_0 : 1; /* [6] */ + unsigned int reserved_1 : 1; /* [7] */ + unsigned int dma_queue_sub_fsm_sts : 3; /* [10..8] */ + unsigned int reserved_2 : 21; /* [31..11] */ + } bits; + + unsigned int u32; +}; + +union U_DMA_QUEUE_INT_STS { + struct { + unsigned int dma_queue_done_int_sts : 1; /* [0] */ + unsigned int dma_queue_err00_int_sts : 1; /* [1] */ + unsigned int dma_queue_err01_int_sts : 1; /* [2] */ + unsigned int dma_queue_err02_int_sts : 1; /* [3] */ + unsigned int dma_queue_err03_int_sts : 1; /* [4] */ + unsigned int reserved : 1; /* [5] */ + unsigned int dma_queue_err05_int_sts : 1; /* [6] */ + unsigned int dma_queue_err06_int_sts : 1; /* [7] */ + unsigned int dma_queue_err07_int_sts : 1; /* [8] */ + unsigned int dma_queue_err08_int_sts : 1; /* [9] */ + unsigned int dma_queue_err09_int_sts : 1; /* [10] */ + unsigned int dma_queue_err10_int_sts : 1; /* [11] */ + unsigned int dma_queue_err11_int_sts : 1; /* [12] */ + unsigned int dma_queue_err12_int_sts : 1; /* [13] */ + unsigned int dma_queue_err13_int_sts : 1; /* [14] */ + unsigned int dma_queue_err14_int_sts : 1; /* [15] */ + unsigned int dma_queue_err15_int_sts : 1; /* [16] */ + unsigned int dma_queue_err16_int_sts : 1; /* [17] */ + unsigned int dma_queue_err17_int_sts : 1; /* [18] */ + unsigned int reserved_0 : 13; /* [31..19] */ + } bits; + + unsigned int u32; +}; + +union U_DMA_QUEUE_INT_MSK { + struct { + unsigned int dma_queue_done_int_msk : 1; /* [0] */ + unsigned int dma_queue_err00_int_msk : 1; /* [1] */ + unsigned int dma_queue_err01_int_msk : 1; /* [2] */ + unsigned int dma_queue_err02_int_msk : 1; /* [3] */ + unsigned int dma_queue_err03_int_msk : 1; /* [4] */ + unsigned int reserved : 1; /* [5] */ + unsigned int dma_queue_err05_int_msk : 1; /* [6] */ + unsigned int dma_queue_err06_int_msk : 1; /* [7] */ + unsigned int dma_queue_err07_int_msk : 1; /* [8] */ + unsigned int dma_queue_err08_int_msk : 1; /* [9] */ + unsigned int dma_queue_err09_int_msk : 1; /* [10] */ + unsigned int dma_queue_err10_int_msk : 1; /* [11] */ + unsigned int dma_queue_err11_int_msk : 1; /* [12] */ + unsigned int dma_queue_err12_int_msk : 1; /* [13] */ + unsigned int dma_queue_err13_int_msk : 1; /* [14] */ + unsigned int dma_queue_err14_int_msk : 1; /* [15] */ + unsigned int dma_queue_err15_int_msk : 1; /* [16] */ + unsigned int dma_queue_err16_int_msk : 1; /* [17] */ + unsigned int dma_queue_err17_int_msk : 1; /* [18] */ + unsigned int reserved_0 : 13 ; /* [31..19] */ + } bits; + + unsigned int u32; +}; + +struct dma_ch_sq_s { + u32 opcode : 4; /* [0~3] opcode */ + u32 drop : 1; /* [4] drop */ + u32 nw : 1; /* [5] nw */ + u32 wd_barrier : 1; /* [6] write done barrier */ + u32 rd_barrier : 1; /* [7] read done barrier */ + u32 ldie : 1; /* [8] LDIE */ + u32 rdie : 1; /* [9] rDIE */ + u32 loop_barrier : 1; /* [10] */ + u32 spd_barrier : 1; /* [11] */ + u32 attr : 3; /* [12~14] attr */ + u32 cq_disable : 1; /* [15] reserved */ + u32 addrt : 2; /* [16~17] at */ + u32 p3p4 : 2; /* [18~19] P3 P4 */ + u32 pf : 3; /* [20~22] pf */ + u32 vfen : 1; /* [23] vfen */ + u32 vf : 8; /* [24~31] vf */ + u32 pasid : 20; /* [0~19] pasid */ + u32 er : 1; /* [20] er */ + u32 pmr : 1; /* [21] pmr */ + u32 prfen : 1; /* [22] prfen */ + u32 reserved5 : 1; /* [23] reserved */ + u32 msi : 8; /* [24~31] MSI/MSI-X vector */ + u32 flow_id : 8; /* [0~7] Flow ID */ + u32 reserved6 : 8; /* [8~15] reserved */ + u32 TH : 1; /* [16] TH */ + u32 PH : 2; /* [17~18] PH */ + u32 reserved7 : 13; /* [19~31] reserved: some multiplex fields */ + u32 length; + u32 src_addr_l; + u32 src_addr_h; + u32 dst_addr_l; + u32 dst_addr_h; +}; + +struct dma_ch_cq_s { + u32 reserved1; + u32 reserved2; + u32 sqhd : 16; + u32 reserved3 : 16; + u32 reserved4 : 16; /* [0~15] reserved */ + u32 vld : 1; /* [16] vld */ + u32 status : 15; /* [17~31] status */ +}; + +#endif /* EDMA_DRV_H */ diff --git a/drivers/net/ethernet/huawei/bma/edma_drv/edma_host.c b/drivers/net/ethernet/huawei/bma/edma_drv/edma_host.c index be2f732ed9ed..1bfb123e43c0 100644 --- a/drivers/net/ethernet/huawei/bma/edma_drv/edma_host.c +++ b/drivers/net/ethernet/huawei/bma/edma_drv/edma_host.c @@ -20,11 +20,18 @@ #include <linux/seq_file.h> #include "bma_pci.h" +#include "edma_queue.h" #include "edma_host.h" static struct edma_user_inft_s *g_user_func[TYPE_MAX] = { 0 }; static struct bma_dev_s *g_bma_dev; + +struct bma_dev_s *get_bma_dev(void) +{ + return g_bma_dev; +} + static int edma_host_dma_interrupt(struct edma_host_s *edma_host); int edmainfo_show(char *buf) @@ -231,7 +238,8 @@ void clear_int_dmab2h(struct edma_host_s *edma_host) (void)pci_write_config_dword(pdev, REG_PCIE1_DMAWRITEINT_CLEAR, data); } -int edma_host_check_dma_status(enum dma_direction_e dir) +// for 1710 1711 +int edma_host_check_dma_status_v1(enum dma_direction_e dir) { int ret = 0; @@ -259,6 +267,18 @@ int edma_host_check_dma_status(enum dma_direction_e dir) return ret; } +// for 1712 +int edma_host_check_dma_status_v2(enum dma_direction_e dir) +{ + UNUSED(dir); + if (check_dma_queue_state(CPL_STATE, TRUE) == 0 || + check_dma_queue_state(IDLE_STATE, TRUE) == 0) { + return 1; /* ok */ + } + + return 0; /* busy */ +} + #ifdef USE_DMA static int start_transfer_h2b(struct edma_host_s *edma_host, unsigned int len, @@ -633,9 +653,9 @@ void host_dma_transfer_withlist(struct edma_host_s *edma_host, } } -int edma_host_dma_transfer(struct edma_host_s *edma_host, - struct bma_priv_data_s *priv, - struct bma_dma_transfer_s *dma_transfer) +// for 1710 1711 +int edma_host_dma_transfer_v1(struct edma_host_s *edma_host, struct bma_priv_data_s *priv, + struct bma_dma_transfer_s *dma_transfer) { int ret = 0; unsigned long flags = 0; @@ -673,7 +693,44 @@ int edma_host_dma_transfer(struct edma_host_s *edma_host, return ret; } -void edma_host_reset_dma(struct edma_host_s *edma_host, int dir) +// for 1712 +int edma_host_dma_transfer_v2(struct edma_host_s *edma_host, struct bma_priv_data_s *priv, + struct bma_dma_transfer_s *dma_transfer) +{ + int ret = 0; + unsigned long flags = 0; + struct bma_dev_s *bma_dev = NULL; + + BMA_LOG(DLOG_DEBUG, "edma_host_dma_transfer 1712"); + + if (!edma_host || !priv || !dma_transfer) + return -EFAULT; + + bma_dev = list_entry(edma_host, struct bma_dev_s, edma_host); + + spin_lock_irqsave(&bma_dev->priv_list_lock, flags); + + if (priv->user.dma_transfer == 0) { + spin_unlock_irqrestore(&bma_dev->priv_list_lock, flags); + BMA_LOG(DLOG_ERROR, "dma_transfer = %hhd\n", priv->user.dma_transfer); + return -EFAULT; + } + + BMA_LOG(DLOG_DEBUG, "transfer_edma_host 1712"); + + spin_unlock_irqrestore(&bma_dev->priv_list_lock, flags); + + edma_host->statistics.dma_count++; + + spin_lock_irqsave(&edma_host->reg_lock, flags); + ret = transfer_dma_queue(dma_transfer); + spin_unlock_irqrestore(&edma_host->reg_lock, flags); + + return ret; +} + +// for 1710/1711 +void edma_host_reset_dma_v1(struct edma_host_s *edma_host, enum dma_direction_e dir) { u32 data = 0; u32 reg_addr = 0; @@ -717,6 +774,13 @@ void edma_host_reset_dma(struct edma_host_s *edma_host, int dir) reg_addr, count, data); } +// for 1712 +void edma_host_reset_dma_v2(struct edma_host_s *edma_host, enum dma_direction_e dir) +{ + UNUSED(dir); + reset_edma_host(edma_host); +} + int edma_host_dma_stop(struct edma_host_s *edma_host, struct bma_priv_data_s *priv) { @@ -750,8 +814,8 @@ static int edma_host_send_msg(struct edma_host_s *edma_host) if (send_mbx_hdr->mbxlen > 0) { if (send_mbx_hdr->mbxlen > HOST_MAX_SEND_MBX_LEN) { /*share memory is disable */ + BMA_LOG(DLOG_DEBUG, "mbxlen is too long: %d\n", send_mbx_hdr->mbxlen); send_mbx_hdr->mbxlen = 0; - BMA_LOG(DLOG_DEBUG, "mbxlen is too long\n"); return -EFAULT; } @@ -1296,6 +1360,69 @@ int edma_host_user_unregister(u32 type) return 0; } +static void init_edma_sq_cq(struct edma_host_s *edma_host) +{ + u64 sq_phy_addr = 0; + u64 cq_phy_addr = 0; + phys_addr_t edma_address = 0; + int ret = 0; + + if (get_pci_type() != PCI_TYPE_1712) + return; + + ret = bma_intf_get_map_address(TYPE_EDMA_ADDR, &edma_address); + if (ret != 0) + return; + + edma_host->edma_sq_addr = (void *)((unsigned char *)edma_host->edma_recv_addr + + HOST_MAX_RCV_MBX_LEN); + edma_host->edma_cq_addr = (void *)((unsigned char *)edma_host->edma_sq_addr + + sizeof(struct dma_ch_sq_s) * SQ_DEPTH); + sq_phy_addr = edma_address + HOST_DMA_FLAG_LEN + HOST_MAX_SEND_MBX_LEN + + HOST_MAX_RCV_MBX_LEN; + cq_phy_addr = sq_phy_addr + sizeof(struct dma_ch_sq_s) * SQ_DEPTH; + + BMA_LOG(DLOG_DEBUG, + "sq_phy_addr = 0x%llx, SQ size = %zu, cq_phy_addr = 0x%llx, CQ size = %zu", + sq_phy_addr, sizeof(struct dma_ch_sq_s) * SQ_DEPTH, + cq_phy_addr, sizeof(struct dma_ch_cq_s) * CQ_DEPTH); + BMA_LOG(DLOG_DEBUG, "sq_addr = %pK, cq_addr = %pK", edma_host->edma_sq_addr, + edma_host->edma_cq_addr); + + (void)memset(edma_host->edma_sq_addr, 0, + sizeof(struct dma_ch_sq_s) * SQ_DEPTH + sizeof(struct dma_ch_cq_s) * CQ_DEPTH); + + set_dma_queue_sq_base_l(sq_phy_addr & PCIE_ADDR_L_32_MASK); + set_dma_queue_sq_base_h((u32)(sq_phy_addr >> PCIE_ADDR_H_SHIFT_32)); + set_dma_queue_cq_base_l(cq_phy_addr & PCIE_ADDR_L_32_MASK); + set_dma_queue_cq_base_h((u32)(cq_phy_addr >> PCIE_ADDR_H_SHIFT_32)); + + reset_edma_host(edma_host); +} + +static void edma_setup_timer(struct edma_host_s *edma_host) +{ +#ifdef HAVE_TIMER_SETUP + timer_setup(&edma_host->timer, edma_host_timeout, 0); +#else + setup_timer(&edma_host->timer, edma_host_timeout, + (unsigned long)edma_host); +#endif + (void)mod_timer(&edma_host->timer, jiffies_64 + TIMER_INTERVAL_CHECK); + +#ifdef USE_DMA + #ifdef HAVE_TIMER_SETUP + timer_setup(&edma_host->dma_timer, edma_host_dma_timeout, 0); + + #else + setup_timer(&edma_host->dma_timer, edma_host_dma_timeout, + (unsigned long)edma_host); + #endif + (void)mod_timer(&edma_host->dma_timer, + jiffies_64 + DMA_TIMER_INTERVAL_CHECK); +#endif +} + int edma_host_init(struct edma_host_s *edma_host) { int ret = 0; @@ -1352,24 +1479,7 @@ int edma_host_init(struct edma_host_s *edma_host) edma_host->b2h_state = B2HSTATE_IDLE; #ifdef EDMA_TIMER - #ifdef HAVE_TIMER_SETUP - timer_setup(&edma_host->timer, edma_host_timeout, 0); - #else - setup_timer(&edma_host->timer, edma_host_timeout, - (unsigned long)edma_host); - #endif - (void)mod_timer(&edma_host->timer, jiffies_64 + TIMER_INTERVAL_CHECK); -#ifdef USE_DMA - #ifdef HAVE_TIMER_SETUP - timer_setup(&edma_host->dma_timer, edma_host_dma_timeout, 0); - - #else - setup_timer(&edma_host->dma_timer, edma_host_dma_timeout, - (unsigned long)edma_host); - #endif - (void)mod_timer(&edma_host->dma_timer, - jiffies_64 + DMA_TIMER_INTERVAL_CHECK); -#endif + edma_setup_timer(edma_host); #else init_completion(&edma_host->msg_ready); @@ -1383,6 +1493,8 @@ int edma_host_init(struct edma_host_s *edma_host) } #endif + init_edma_sq_cq(edma_host); + #ifdef HAVE_TIMER_SETUP timer_setup(&edma_host->heartbeat_timer, edma_host_heartbeat_timer, 0); diff --git a/drivers/net/ethernet/huawei/bma/edma_drv/edma_host.h b/drivers/net/ethernet/huawei/bma/edma_drv/edma_host.h index cbbd86fd6602..93c81bc92286 100644 --- a/drivers/net/ethernet/huawei/bma/edma_drv/edma_host.h +++ b/drivers/net/ethernet/huawei/bma/edma_drv/edma_host.h @@ -18,6 +18,8 @@ #include "bma_include.h" #include "../include/bma_ker_intf.h" +#include "edma_reg.h" +#include "edma_drv.h" #define EDMA_TIMER @@ -176,6 +178,13 @@ #define U64ADDR_H(addr) ((((u64)addr) >> 32) & 0xffffffff) #define U64ADDR_L(addr) ((addr) & 0xffffffff) +#define MAX_RESET_DMA_TIMES 10 +#define DELAY_BETWEEN_RESET_DMA 100 +#define PCI_VENDOR_ID_HUAWEI_PME 0x19e5 +#define PCI_DEVICE_ID_EDMA_0 0x1712 +#define SQ_DEPTH 128 +#define CQ_DEPTH 128 + struct bma_register_dev_type_s { u32 type; u32 sub_type; @@ -263,6 +272,8 @@ struct edma_host_s { void __iomem *edma_flag; void __iomem *edma_send_addr; void __iomem *edma_recv_addr; + void __iomem *edma_sq_addr; + void __iomem *edma_cq_addr; #ifdef USE_DMA struct timer_list dma_timer; #endif @@ -309,6 +320,8 @@ struct edma_user_inft_s { int (*add_msg)(void *msg, size_t msg_len); }; +struct bma_dev_s *get_bma_dev(void); + int is_edma_b2h_int(struct edma_host_s *edma_host); void edma_int_to_bmc(struct edma_host_s *edma_host); int edma_host_mmap(struct edma_host_s *edma_hos, struct file *filp, @@ -336,7 +349,6 @@ int edma_host_user_unregister(u32 type); int edma_host_init(struct edma_host_s *edma_host); void edma_host_cleanup(struct edma_host_s *edma_host); int edma_host_send_driver_msg(const void *msg, size_t msg_len, int subtype); -void edma_host_reset_dma(struct edma_host_s *edma_host, int dir); void clear_int_dmah2b(struct edma_host_s *edma_host); void clear_int_dmab2h(struct edma_host_s *edma_host); diff --git a/drivers/net/ethernet/huawei/bma/edma_drv/edma_queue.c b/drivers/net/ethernet/huawei/bma/edma_drv/edma_queue.c new file mode 100644 index 000000000000..678262f7412c --- /dev/null +++ b/drivers/net/ethernet/huawei/bma/edma_drv/edma_queue.c @@ -0,0 +1,470 @@ +// SPDX-License-Identifier: GPL-2.0 +/* Huawei iBMA driver. + * Copyright (c) 2025, Huawei Technologies Co., Ltd. + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License + * as published by the Free Software Foundation; either version 2 + * of the License, or (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + */ + +#include <linux/errno.h> +#include <linux/kthread.h> +#include <linux/mm.h> +#include <linux/proc_fs.h> +#include <linux/seq_file.h> + +#include "bma_pci.h" +#include "edma_host.h" +#include "edma_queue.h" + +static u32 pcie_dma_read(u32 offset) +{ + u32 reg_val; + + reg_val = readl(get_bma_dev()->bma_pci_dev->bma_base_addr + offset); + BMA_LOG(DLOG_DEBUG, "readl, offset 0x%x val 0x%x\n", offset, reg_val); + return reg_val; +} + +static void pcie_dma_write(u32 offset, u32 reg_val) +{ + u32 read_val; + + (void)writel(reg_val, get_bma_dev()->bma_pci_dev->bma_base_addr + offset); + read_val = readl(get_bma_dev()->bma_pci_dev->bma_base_addr + offset); + if (read_val != reg_val) { + BMA_LOG(DLOG_DEBUG, + "writel fail, read_value: 0x%x, set_value: 0x%x, offset: 0x%x\n", + read_val, reg_val, offset); + return; + } + BMA_LOG(DLOG_DEBUG, "writel, offset 0x%x val 0x%x\n", offset, reg_val); +} + +static void set_dma_queue_int_msk(u32 val) +{ + (void)pcie_dma_write(PCIE_DMA_QUEUE_INT_MSK_0_REG, val); +} + +static void set_dma_queue_err_int_msk(u32 val) +{ + union U_DMA_QUEUE_INT_MSK reg_val; + + // The least significant bit (bit 0) of this register is reserved and must be cleared, + // while the remaining bits should retain their original values. + reg_val.u32 = val & 0xFFFFFFFE; + (void)pcie_dma_write(PCIE_DMA_QUEUE_ERR_INT_MSK_0_REG, reg_val.u32); +} + +static void set_dma_queue_int_sts(u32 val) +{ + union U_DMA_QUEUE_INT_STS reg_val; + + reg_val.u32 = val; + (void)pcie_dma_write(PCIE_DMA_QUEUE_INT_STS_0_REG, reg_val.u32); +} + +static void get_dma_queue_int_sts(u32 *val) +{ + union U_DMA_QUEUE_INT_STS reg_val; + + reg_val.u32 = pcie_dma_read(PCIE_DMA_QUEUE_INT_STS_0_REG); + *val = reg_val.u32; +} + +static void get_dma_queue_fsm_sts(u32 *val) +{ + union U_DMA_QUEUE_FSM_STS reg_val; + + reg_val.u32 = pcie_dma_read(PCIE_DMA_QUEUE_FSM_STS_0_REG); + *val = reg_val.bits.dma_queue_sts; +} + +static void pause_dma_queue(u32 val) +{ + union U_DMA_QUEUE_CTRL0 reg_val; + + reg_val.u32 = pcie_dma_read(PCIE_DMA_QUEUE_CTRL0_0_REG); + reg_val.bits.dma_queue_pause = val; + (void)pcie_dma_write(PCIE_DMA_QUEUE_CTRL0_0_REG, reg_val.u32); +} + +static void enable_dma_queue(u32 val) +{ + union U_DMA_QUEUE_CTRL0 reg_val; + + reg_val.u32 = pcie_dma_read(PCIE_DMA_QUEUE_CTRL0_0_REG); + reg_val.bits.dma_queue_en = val; + (void)pcie_dma_write(PCIE_DMA_QUEUE_CTRL0_0_REG, reg_val.u32); +} + +static void reset_dma_queue(u32 val) +{ + union U_DMA_QUEUE_CTRL1 reg_val; + + reg_val.u32 = pcie_dma_read(PCIE_DMA_QUEUE_CTRL1_0_REG); + reg_val.bits.dma_queue_reset = val; + (void)pcie_dma_write(PCIE_DMA_QUEUE_CTRL1_0_REG, reg_val.u32); +} + +static void set_dma_queue_sq_tail(u32 val) +{ + union U_DMA_QUEUE_SQ_TAIL_PTR reg_val; + + reg_val.bits.dma_queue_sq_tail_ptr = val; + (void)pcie_dma_write(PCIE_DMA_QUEUE_SQ_TAIL_PTR_0_REG, reg_val.u32); +} + +static void set_dma_queue_cq_head(u32 val) +{ + union U_DMA_QUEUE_CQ_HEAD_PTR reg_val; + + reg_val.bits.dma_queue_cq_head_ptr = val; + (void)pcie_dma_write(PCIE_DMA_QUEUE_CQ_HEAD_PTR_0_REG, reg_val.u32); +} + +void set_dma_queue_sq_base_l(u32 val) +{ + (void)pcie_dma_write(PCIE_DMA_QUEUE_SQ_BASE_L_0_REG, val); +} + +void set_dma_queue_sq_base_h(u32 val) +{ + (void)pcie_dma_write(PCIE_DMA_QUEUE_SQ_BASE_H_0_REG, val); +} + +void set_dma_queue_cq_base_l(u32 val) +{ + (void)pcie_dma_write(PCIE_DMA_QUEUE_CQ_BASE_L_0_REG, val); +} + +void set_dma_queue_cq_base_h(u32 val) +{ + (void)pcie_dma_write(PCIE_DMA_QUEUE_CQ_BASE_H_0_REG, val); +} + +static void set_dma_queue_sq_depth(u32 val) +{ + union U_DMA_QUEUE_SQ_DEPTH reg_val; + + reg_val.u32 = pcie_dma_read(PCIE_DMA_QUEUE_SQ_DEPTH_0_REG); + reg_val.bits.dma_queue_sq_depth = val; + (void)pcie_dma_write(PCIE_DMA_QUEUE_SQ_DEPTH_0_REG, reg_val.u32); +} + +static void set_dma_queue_cq_depth(u32 val) +{ + union U_DMA_QUEUE_CQ_DEPTH reg_val; + + reg_val.u32 = pcie_dma_read(PCIE_DMA_QUEUE_CQ_DEPTH_0_REG); + reg_val.bits.dma_queue_cq_depth = val; + (void)pcie_dma_write(PCIE_DMA_QUEUE_CQ_DEPTH_0_REG, reg_val.u32); +} + +static void set_dma_queue_arb_weight(u32 val) +{ + union U_DMA_QUEUE_CTRL0 reg_val; + + reg_val.u32 = pcie_dma_read(PCIE_DMA_QUEUE_CTRL0_0_REG); + reg_val.bits.dma_queue_arb_weight = val; + (void)pcie_dma_write(PCIE_DMA_QUEUE_CTRL0_0_REG, reg_val.u32); +} + +static void set_dma_queue_drct_sel(u32 val) +{ + union U_DMA_QUEUE_CTRL0 reg_val; + + reg_val.u32 = pcie_dma_read(PCIE_DMA_QUEUE_CTRL0_0_REG); + reg_val.bits.dma_queue_cq_drct_sel = val; + reg_val.bits.dma_queue_sq_drct_sel = val; + (void)pcie_dma_write(PCIE_DMA_QUEUE_CTRL0_0_REG, reg_val.u32); +} + +static void get_dma_queue_sq_tail(u32 *val) +{ + union U_DMA_QUEUE_SQ_TAIL_PTR reg_val; + + reg_val.u32 = pcie_dma_read(PCIE_DMA_QUEUE_SQ_TAIL_PTR_0_REG); + *val = reg_val.bits.dma_queue_sq_tail_ptr; +} + +static void get_dma_queue_cq_tail(u32 *val) +{ + union U_DMA_QUEUE_CQ_TAIL_PTR reg_val; + + reg_val.u32 = pcie_dma_read(PCIE_DMA_QUEUE_CQ_TAIL_PTR_0_REG); + *val = reg_val.bits.dma_queue_cq_tail_ptr; +} + +static void get_dma_queue_sq_head(u32 *val) +{ + u32 reg_val; + + reg_val = pcie_dma_read(PCIE_DMA_QUEUE_SQ_STS_0_REG); + /* dma_queue_sq_head_ptr bit[15:0] */ + *val = reg_val & 0xFFFF; +} + +static void set_dma_queue_err_abort(u32 val) +{ + union U_DMA_QUEUE_CTRL0 reg_val; + + reg_val.u32 = pcie_dma_read(PCIE_DMA_QUEUE_CTRL0_0_REG); + reg_val.bits.dma_queue_sq_pa_lkp_err_abort_en = val; + reg_val.bits.dma_queue_sq_proc_err_abort_en = val; + reg_val.bits.dma_queue_sq_drop_err_abort_en = val; + reg_val.bits.dma_queue_sq_cfg_err_abort_en = val; + (void)pcie_dma_write(PCIE_DMA_QUEUE_CTRL0_0_REG, reg_val.u32); +} + +static void set_dma_queue_flr_disable(u32 val) +{ + (void)pcie_dma_write(PCIE_DMA_FLR_DISABLE_REG, val); +} + +static void clear_dma_queue_int_chk(u32 mask) +{ + u32 int_sts; + + (void)get_dma_queue_int_sts(&int_sts); + if (int_sts & mask) + (void)set_dma_queue_int_sts(mask); +} + +s32 check_dma_queue_state(u32 state, u32 flag) +{ + u32 dma_state = 0; + unsigned long timeout; + + BMA_LOG(DLOG_DEBUG, "state:%u, flag:%u\n", state, flag); + + timeout = jiffies + TIMER_INTERVAL_CHECK; + + while (1) { + get_dma_queue_fsm_sts(&dma_state); + BMA_LOG(DLOG_DEBUG, "DMA stats[%u]\n", dma_state); + // Flag is 0 and state does not equal to target value + // OR Flag is 1 and state is equal to target value + if ((!flag && dma_state != state) || (flag && dma_state == state)) + break; + + if (time_after(jiffies, timeout)) { + BMA_LOG(DLOG_DEBUG, "Wait stats[%u] fail\n", state); + return -ETIMEDOUT; + } + udelay(1); + } + return 0; +} + +static s32 reset_dma(void) +{ + u32 dma_state = 0; + + /* get dma channel fsm */ + check_dma_queue_state(WAIT_STATE, FALSE); + get_dma_queue_fsm_sts(&dma_state); + BMA_LOG(DLOG_DEBUG, "dma_state:%u\n", dma_state); + switch (dma_state) { + /* idle status, dma channel need no reset */ + case IDLE_STATE: + return 0; + case RUN_STATE: + pause_dma_queue(ENABLE); + fallthrough; + case ABORT_STATE: + case CPL_STATE: + enable_dma_queue(DISABLE); + if (check_dma_queue_state(RUN_STATE, FALSE)) + return -ETIMEDOUT; + fallthrough; + case PAUSE_STATE: + case HALT_STATE: + set_dma_queue_sq_tail(0); + set_dma_queue_cq_head(0); + reset_dma_queue(ENABLE); + pause_dma_queue(DISABLE); + if (check_dma_queue_state(IDLE_STATE, TRUE)) + return -ETIMEDOUT; + fallthrough; + default: + return -EINVAL; + } + + return 0; +} + +static void init_dma(void) +{ + /* set dma channel sq tail */ + set_dma_queue_sq_tail(0); + /* set dma channel cq head */ + set_dma_queue_cq_head(0); + /* set dma queue drct sel */ + set_dma_queue_drct_sel(DRC_LOCAL); + /* set dma channel sq depth */ + set_dma_queue_sq_depth(SQ_DEPTH - 1); + /* set dma channel cq depth */ + set_dma_queue_cq_depth(CQ_DEPTH - 1); + /* dma not process FLR , only cpu process FLR */ + set_dma_queue_flr_disable(0x1); + /* set dma queue arb weight */ + set_dma_queue_arb_weight(0x1F); + /* clear dma queue int status */ + set_dma_queue_int_sts(0x1FFF); + /* set dma queue int mask */ + set_dma_queue_err_int_msk(0x0); + set_dma_queue_int_msk(0x0); + /* set dma queue abort err en */ + set_dma_queue_err_abort(ENABLE); + /* enable dma channel en */ + enable_dma_queue(ENABLE); +} + +s32 wait_done_dma_queue(unsigned long timeout) +{ + struct dma_ch_cq_s *p_cur_last_cq; + struct dma_ch_cq_s *p_dma_cq; + unsigned long end; + u32 sq_tail; + u32 sq_valid; + u32 cq_tail; + u32 cq_valid; + + p_dma_cq = (struct dma_ch_cq_s *)((&get_bma_dev()->edma_host)->edma_cq_addr); + end = jiffies + timeout; + + while (time_before(jiffies, end)) { + (void)get_dma_queue_sq_tail(&sq_tail); + (void)get_dma_queue_cq_tail(&cq_tail); + + cq_valid = (cq_tail + CQ_DEPTH - 1) % (CQ_DEPTH); + p_cur_last_cq = &p_dma_cq[cq_valid]; + sq_valid = (sq_tail + SQ_DEPTH - 1) % (SQ_DEPTH); + BMA_LOG(DLOG_DEBUG, + "sq_tail %d, cq_tail %d, cq_valid %d, sq_valid %d, p_cur_last_cq->sqhd %d\n", + sq_tail, cq_tail, cq_valid, sq_valid, p_cur_last_cq->sqhd); + if (p_cur_last_cq->sqhd == sq_valid) { + set_dma_queue_cq_head(cq_valid); + return 0; + } + } + + return -ETIMEDOUT; +} + +static s32 submit_dma_queue_sq(u32 dir, struct bspveth_dmal pdmalbase_v, u32 pf) +{ + u32 sq_tail; + u32 sq_head; + u32 sq_availble; + struct dma_ch_sq_s sq_submit; + struct dma_ch_sq_s *p_dma_sq; + + p_dma_sq = (struct dma_ch_sq_s *)((&get_bma_dev()->edma_host)->edma_sq_addr); + (void)get_dma_queue_sq_tail(&sq_tail); + (void)get_dma_queue_sq_head(&sq_head); + sq_availble = SQ_DEPTH - 1 - (((sq_tail - sq_head) + SQ_DEPTH) % SQ_DEPTH); + if (sq_availble < 1) { + BMA_LOG(DLOG_DEBUG, "cannot process %u descriptors, try again later\n", 1); + return -1; + } + + BMA_LOG(DLOG_DEBUG, "submit dma queue sq, sq_tail get %d, sq_head %d, sq_availble %d\n", + sq_tail, sq_head, sq_availble); + + (void)memset(&sq_submit, 0, sizeof(sq_submit)); + if (dir == DIR_H2B) + sq_submit.opcode = DMA_READ; + else + sq_submit.opcode = DMA_WRITE; + + BMA_LOG(DLOG_DEBUG, "PF: %u\n", pf); + sq_submit.ldie = ENABLE; + sq_submit.rdie = ENABLE; + sq_submit.attr &= (~0x2); /* SO(Strong Ordering) */ + sq_submit.pf = pf & 0x7; /* 0x7 */ + sq_submit.p3p4 = (pf >> 3) & 0x3; /* 0x3 */ + sq_submit.length = pdmalbase_v.len; + sq_submit.src_addr_l = pdmalbase_v.slow; + sq_submit.src_addr_h = pdmalbase_v.shi; + sq_submit.dst_addr_l = pdmalbase_v.dlow; + sq_submit.dst_addr_h = pdmalbase_v.dhi; + + BMA_LOG(DLOG_DEBUG, "submit dma queue sq, dir %d, op %d, length %d\n", dir, + sq_submit.opcode, sq_submit.length); + + memcpy(p_dma_sq + sq_tail, &sq_submit, sizeof(sq_submit)); + sq_tail = (sq_tail + 1) % SQ_DEPTH; + + BMA_LOG(DLOG_DEBUG, "submit dma queue sq, sq_tail change %d,\n", sq_tail); + wmb(); /* memory barriers. */ + + (void)set_dma_queue_sq_tail(sq_tail); + + return 0; +} + +s32 transfer_dma_queue(struct bma_dma_transfer_s *dma_transfer) +{ + struct bspveth_dmal *pdmalbase_v; + u32 dmal_cnt; + s32 ret; + int i; + + if (!dma_transfer) { + BMA_LOG(DLOG_DEBUG, "dma_transfer is NULL.\n"); + return -EFAULT; + } + + BMA_LOG(DLOG_DEBUG, "transfer dma queue.\n"); + + /* clear local done int */ + clear_dma_queue_int_chk(DMA_DONE_MASK); + + pdmalbase_v = dma_transfer->pdmalbase_v; + dmal_cnt = dma_transfer->dmal_cnt; + for (i = 0; i < dmal_cnt; i++) + submit_dma_queue_sq(dma_transfer->dir, pdmalbase_v[i], + get_bma_dev()->bma_pci_dev->pdev->devfn); + + (void)set_dma_queue_int_msk(DMA_DONE_UNMASK); + (void)set_dma_queue_err_int_msk(DMA_ERR_UNMASK); + (void)enable_dma_queue(ENABLE); + + ret = wait_done_dma_queue(DMA_TMOUT); + if (ret) + BMA_LOG(DLOG_DEBUG, "EP DMA: dma wait timeout"); + + return ret; +} + +void reset_edma_host(struct edma_host_s *edma_host) +{ + unsigned long flags = 0; + int count = 0; + + if (!edma_host) + return; + + spin_lock_irqsave(&edma_host->reg_lock, flags); + + while (count++ < MAX_RESET_DMA_TIMES) { + if (reset_dma() == 0) { + BMA_LOG(DLOG_DEBUG, "reset dma successfully\n"); + init_dma(); + break; + } + + mdelay(DELAY_BETWEEN_RESET_DMA); + } + + spin_unlock_irqrestore(&edma_host->reg_lock, flags); + BMA_LOG(DLOG_DEBUG, "reset dma count=%d\n", count); +} diff --git a/drivers/net/ethernet/huawei/bma/edma_drv/edma_queue.h b/drivers/net/ethernet/huawei/bma/edma_drv/edma_queue.h new file mode 100644 index 000000000000..0cf449c0ae00 --- /dev/null +++ b/drivers/net/ethernet/huawei/bma/edma_drv/edma_queue.h @@ -0,0 +1,29 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* Huawei iBMA driver. + * Copyright (c) 2025, Huawei Technologies Co., Ltd. + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License + * as published by the Free Software Foundation; either version 2 + * of the License, or (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + */ + +#ifndef EDMA_QUEUE_H +#define EDMA_QUEUE_H +#include "edma_host.h" + +s32 check_dma_queue_state(u32 state, u32 flag); +void set_dma_queue_sq_base_l(u32 val); +void set_dma_queue_sq_base_h(u32 val); +void set_dma_queue_cq_base_l(u32 val); +void set_dma_queue_cq_base_h(u32 val); +void reset_edma_host(struct edma_host_s *edma_host); +int transfer_edma_host(struct edma_host_s *host, struct bma_priv_data_s *priv, + struct bma_dma_transfer_s *transfer); +s32 transfer_dma_queue(struct bma_dma_transfer_s *dma_transfer); +#endif diff --git a/drivers/net/ethernet/huawei/bma/edma_drv/edma_reg.h b/drivers/net/ethernet/huawei/bma/edma_drv/edma_reg.h new file mode 100644 index 000000000000..c4e056a92bc8 --- /dev/null +++ b/drivers/net/ethernet/huawei/bma/edma_drv/edma_reg.h @@ -0,0 +1,127 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* Huawei iBMA driver. + * Copyright (c) 2025, Huawei Technologies Co., Ltd. + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License + * as published by the Free Software Foundation; either version 2 + * of the License, or (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + */ + +#ifndef EDMA_REG_H +#define EDMA_REG_H + +#define PORT_EP 0 +#define PORT_RP 1 + +#define ENABLE 1 +#define DISABLE 0 + +#define TRUE 1 +#define FALSE 0 + +/* core0:x2/x1 core1:x1 */ +#define PCIE_CORE_NUM 2 +#define PCIE_REG_OFFSET 0x100000U +#define PCIE_REG_SIZE 0x100000 + +#define GEN1 0x1 +#define GEN2 0x2 +#define GEN3 0x3 +#define GEN4 0x4 + +#define PCIE_ADDR_H_SHIFT_32 32 +#define PCIE_ADDR_L_32_MASK 0xFFFFFFFF + +#define AP_DMA_BIT BIT(5) +#define AP_MASK_ALL 0x3FF +#define AP_DMA_CHAN_REG_SIZE 0x100 + +/********************************************************************************************/ +/* PCIE reg base */ +/********************************************************************************************/ +#define PCIE_BASE_ADDR 0x1E100000U +#define AP_DMA_REG 0x10000U +#define AP_IOB_TX_REG_BASE 0x0U +#define AP_IOB_RX_REG_BASE 0x4000U +#define AP_GLOBAL_REG_BASE 0x8000U + +/********************************************************************************************/ +/* PCIE AP DMA REG */ +/********************************************************************************************/ +#define PCIE_DMA_EP_INT_MSK_REG 0x24 /* DMA_EP_INT_MSK */ +#define PCIE_DMA_EP_INT_REG 0x28 /* DMA_EP_INT */ +#define PCIE_DMA_EP_INT_STS_REG 0x2C /* DMA_EP_INT_STS */ +#define PCIE_DMA_FLR_DISABLE_REG 0xA00 /* DMA_FLR_DISABLE */ +#define PCIE_DMA_QUEUE_SQ_BASE_L_0_REG 0x2000 /* DMA Queue SQ Base Address Low Register */ +#define PCIE_DMA_QUEUE_SQ_BASE_H_0_REG 0x2004 /* DMA Queue SQ Base Address High Register */ +#define PCIE_DMA_QUEUE_SQ_DEPTH_0_REG 0x2008 /* DMA Queue SQ Depth */ +#define PCIE_DMA_QUEUE_SQ_TAIL_PTR_0_REG 0x200C /* DMA Queue SQ Tail Pointer Register */ +#define PCIE_DMA_QUEUE_CQ_BASE_L_0_REG 0x2010 /* DMA Queue CQ Base Address Low Register */ +#define PCIE_DMA_QUEUE_CQ_BASE_H_0_REG 0x2014 /* DMA Queue CQ Base Address High Register */ +#define PCIE_DMA_QUEUE_CQ_DEPTH_0_REG 0x2018 /* DMA Queue CQ Depth */ +#define PCIE_DMA_QUEUE_CQ_HEAD_PTR_0_REG 0x201C /* DMA Queue CQ Head Pointer Register */ +#define PCIE_DMA_QUEUE_CTRL0_0_REG 0x2020 /* DMA Queue control Register 0 */ +#define PCIE_DMA_QUEUE_CTRL1_0_REG 0x2024 /* DMA Queue control Register 1 */ +#define PCIE_DMA_QUEUE_FSM_STS_0_REG 0x2030 /* DMA Queue FSM Status Register */ +#define PCIE_DMA_QUEUE_SQ_STS_0_REG 0x2034 /* DMA Queue SQ and CQ status Register */ +#define PCIE_DMA_QUEUE_CQ_TAIL_PTR_0_REG 0x203C /* DMA Queue CQ Tail Pointer Register */ +#define PCIE_DMA_QUEUE_INT_STS_0_REG 0x2040 /* DMA Queue Interrupt Status */ +#define PCIE_DMA_QUEUE_INT_MSK_0_REG 0x2044 /* DMA Queue Interrupt Mask Register */ +#define PCIE_DMA_QUEUE_ERR_INT_STS_0_REG 0x2048 /* DMA Queue Err Interrupt Status */ +#define PCIE_DMA_QUEUE_ERR_INT_MSK_0_REG 0x204C /* DMA Queue Err Interrupt Mask Register */ +#define PCIE_DMA_QUEUE_INT_RO_0_REG 0x206C /* DMA Queue Interrupt RO Register */ + +/********************************************************************************************/ +/* PCIE AP_GLOBAL_REG */ +/********************************************************************************************/ +#define PCIE_CE_ENA 0x0008 +#define PCIE_UNF_ENA 0x0010 +#define PCIE_UF_ENA 0x0018 + +#define PCIE_MSI_MASK 0x00F4 +#define PORT_INTX_ASSERT_MASK 0x01B0 +#define PORT_INTX_DEASSERT_MASK 0x01B4 + +#define PCIE_AP_NI_ENA 0x0100 +#define PCIE_AP_CE_ENA 0x0104 +#define PCIE_AP_UNF_ENA 0x0108 +#define PCIE_AP_UF_ENA 0x010c +#define PCIE_AP_NI_MASK 0x0110 +#define PCIE_AP_CE_MASK 0x0114 +#define PCIE_AP_UNF_MASK 0x0118 +#define PCIE_AP_UF_MASK 0x011C +#define PCIE_AP_NI_STATUS 0x0120 +#define PCIE_AP_CE_STATUS 0x0124 +#define PCIE_AP_UNF_STATUS 0x0128 +#define PCIE_AP_UF_STATUS 0x012C +#define PCIE_CORE_NI_ENA 0x0160 +#define PCIE_CORE_CE_ENA 0x0164 +#define PCIE_CORE_UNF_ENA 0x0168 +#define PCIE_CORE_UF_ENA 0x016c + +#define AP_PORT_EN_REG 0x0800 +#define AP_APB_SYN_RST 0x0810 +#define AP_AXI_SYN_RST 0x0814 +#define AP_IDLE 0x0C08 + +/********************************************************************************************/ +/* PCIE AP_IOB_RX_COM_REG Reg */ +/********************************************************************************************/ +#define IOB_RX_AML_SNOOP 0x1AAC +#define IOB_RX_MSI_INT_CTRL 0x1040 + +#define IOB_RX_MSI_INT_ADDR_HIGH 0x1044 +#define IOB_RX_MSI_INT_ADDR_LOW 0x1048 + +#define IOB_RX_PAB_SMMU_BYPASS_CTRL 0x2004 + +#define IOB_RX_DMA_REG_REMAP_0 0x0E30 +#define IOB_RX_DMA_REG_REMAP_1 0x0E34 + +#endif /* EDMA_REG_H */ diff --git a/drivers/net/ethernet/huawei/bma/include/bma_ker_intf.h b/drivers/net/ethernet/huawei/bma/include/bma_ker_intf.h index d1df99b0c9fd..8d284d5f6e62 100644 --- a/drivers/net/ethernet/huawei/bma/include/bma_ker_intf.h +++ b/drivers/net/ethernet/huawei/bma/include/bma_ker_intf.h @@ -47,6 +47,17 @@ enum intr_mod { INTR_ENABLE = 1, }; +enum addr_type { + TYPE_EDMA_ADDR = 0, + TYPE_VETH_ADDR = 1, +}; + +enum pci_type_e { + PCI_TYPE_UNKNOWN, + PCI_TYPE_171x, + PCI_TYPE_1712 +}; + struct bma_dma_addr_s { dma_addr_t dma_addr; u32 dma_data_len; @@ -66,10 +77,28 @@ union transfer_u { struct dmalist_transfer_s list; }; +struct bspveth_dmal { + u32 chl; + u32 len; + u32 slow; + u32 shi; + u32 dlow; + u32 dhi; +}; + struct bma_dma_transfer_s { enum dma_type_e type; enum dma_direction_e dir; union transfer_u transfer; + struct bspveth_dmal *pdmalbase_v; + u32 dmal_cnt; +}; + +struct bma_map_addr_s { + enum pci_type_e pci_type; + u32 host_number; + enum addr_type addr_type; + u32 addr; }; int bma_intf_register_int_notifier(struct notifier_block *nb); @@ -91,4 +120,21 @@ int bma_cdev_add_msg(void *handle, const char __user *msg, size_t msg_len); unsigned int bma_cdev_check_recv(void *handle); void *bma_cdev_get_wait_queue(void *handle); int bma_intf_check_edma_supported(void); + +enum pci_type_e get_pci_type(void); +void set_pci_type(enum pci_type_e type); + +int bma_intf_get_host_number(unsigned int *host_number); +int bma_intf_get_map_address(enum addr_type type, phys_addr_t *addr); + +#define HOST_NUMBER_0 0 +#define HOST_NUMBER_1 1 + +#define EDMA_1711_HOST0_ADDR 0x84810000 +#define VETH_1711_HOST0_ADDR 0x84820000 +#define EDMA_1712_HOST0_ADDR 0x85400000 +#define VETH_1712_HOST0_ADDR 0x85410000 +#define EDMA_1712_HOST1_ADDR 0x87400000 +#define VETH_1712_HOST1_ADDR 0x87410000 + #endif diff --git a/drivers/net/ethernet/huawei/bma/kbox_drv/kbox_include.h b/drivers/net/ethernet/huawei/bma/kbox_drv/kbox_include.h index 0d82ee6f7c83..745d83b431f8 100644 --- a/drivers/net/ethernet/huawei/bma/kbox_drv/kbox_include.h +++ b/drivers/net/ethernet/huawei/bma/kbox_drv/kbox_include.h @@ -23,7 +23,7 @@ #ifdef DRV_VERSION #define KBOX_VERSION MICRO_TO_STR(DRV_VERSION) #else -#define KBOX_VERSION "0.3.10" +#define KBOX_VERSION "0.4.0" #endif #define UNUSED(x) (x = x) diff --git a/drivers/net/ethernet/huawei/bma/veth_drv/veth_hb.c b/drivers/net/ethernet/huawei/bma/veth_drv/veth_hb.c index 9d918edae703..774229ae8dd1 100644 --- a/drivers/net/ethernet/huawei/bma/veth_drv/veth_hb.c +++ b/drivers/net/ethernet/huawei/bma/veth_drv/veth_hb.c @@ -495,6 +495,11 @@ s32 bspveth_setup_all_tx_resources(struct bspveth_device *pvethdev) int err = 0; u8 *shmq_head_p = NULL; struct bspveth_shmq_hd *shmq_head = NULL; + phys_addr_t veth_address = 0; + + err = bma_intf_get_map_address(TYPE_VETH_ADDR, &veth_address); + if (err != 0) + goto failed; if (!pvethdev) return BSP_ERR_NULL_POINTER; @@ -526,7 +531,7 @@ s32 bspveth_setup_all_tx_resources(struct bspveth_device *pvethdev) (struct bspveth_dmal *)((BSP_VETH_T)(shmq_head) + SHMDMAL_OFFSET); pvethdev->ptx_queue[qid]->pdmalbase_p = - (u8 *)(u64)(VETH_SHAREPOOL_BASE_INBMC + + (u8 *)(u64)(veth_address + MAX_SHAREQUEUE_SIZE * qid + SHMDMAL_OFFSET); @@ -851,6 +856,11 @@ s32 bspveth_setup_all_rx_resources(struct bspveth_device *pvethdev) int qid, i, err = 0; struct bspveth_shmq_hd *shmq_head = NULL; u8 *shmq_head_p = NULL; + phys_addr_t veth_address = 0; + + err = bma_intf_get_map_address(TYPE_VETH_ADDR, &veth_address); + if (err != 0) + goto failed; if (!pvethdev) return BSP_ERR_NULL_POINTER; @@ -885,7 +895,7 @@ s32 bspveth_setup_all_rx_resources(struct bspveth_device *pvethdev) (struct bspveth_dmal *)((BSP_VETH_T)(shmq_head) + SHMDMAL_OFFSET); pvethdev->prx_queue[qid]->pdmalbase_p = - (u8 *)(u64)(VETH_SHAREPOOL_BASE_INBMC + (u8 *)(u64)(veth_address + MAX_SHAREQUEUE_SIZE * (qid + 1) + SHMDMAL_OFFSET); memset(pvethdev->prx_queue[qid]->pdmalbase_v, 0, @@ -1236,6 +1246,8 @@ void veth_netdev_func_init(struct net_device *dev) { struct tag_pcie_comm_priv *priv = (struct tag_pcie_comm_priv *)netdev_priv(dev); + u32 host_number = 0; + int ret = 0; /*9C:7D:A3:28:6F:F9*/ unsigned char veth_mac[ETH_ALEN] = {0x9C, 0x7D, 0xA3, 0x28, 0x6F, 0xF9}; @@ -1243,6 +1255,12 @@ void veth_netdev_func_init(struct net_device *dev) ether_setup(dev); + ret = bma_intf_get_host_number(&host_number); + if (ret < 0) { + VETH_LOG(DLOG_ERROR, "bma_intf_get_host_number failed!\n"); + return; + } + dev->netdev_ops = &veth_ops; dev->watchdog_timeo = BSPVETH_NET_TIMEOUT; @@ -1257,6 +1275,7 @@ void veth_netdev_func_init(struct net_device *dev) memset(priv, 0, sizeof(struct tag_pcie_comm_priv)); strncpy(priv->net_type, MODULE_NAME, NET_TYPE_LEN); + veth_mac[ETH_ALEN - 1] = (host_number == 0 ? 0xF9 : 0xFB); eth_hw_addr_set(dev, veth_mac); VETH_LOG(DLOG_DEBUG, "set veth MAC addr OK\n"); @@ -2226,6 +2245,8 @@ s32 __start_dmalist_H(struct bspveth_rxtx_q *prxtx_queue, u32 cnt, u32 type) dma_transfer.type = DMA_LIST; dma_transfer.transfer.list.dma_addr = (dma_addr_t)prxtx_queue->pdmalbase_p; + dma_transfer.pdmalbase_v = prxtx_queue->pdmalbase_v; + dma_transfer.dmal_cnt = prxtx_queue->dmal_cnt; ret = bma_intf_start_dma(g_bspveth_dev.bma_priv, &dma_transfer); if (ret < 0) diff --git a/drivers/net/ethernet/huawei/bma/veth_drv/veth_hb.h b/drivers/net/ethernet/huawei/bma/veth_drv/veth_hb.h index 242d3ec128d3..f8b7e2f8d604 100644 --- a/drivers/net/ethernet/huawei/bma/veth_drv/veth_hb.h +++ b/drivers/net/ethernet/huawei/bma/veth_drv/veth_hb.h @@ -31,7 +31,7 @@ extern "C" { #ifdef DRV_VERSION #define VETH_VERSION MICRO_TO_STR(DRV_VERSION) #else -#define VETH_VERSION "0.3.10" +#define VETH_VERSION "0.4.0" #endif #define MODULE_NAME "veth" @@ -67,7 +67,6 @@ extern "C" { #define SYSCTL_REG_SIZE (0x1000) #define PCIE1_REG_BASE (0x29000000) #define PCIE1_REG_SIZE (0x1000) -#define VETH_SHAREPOOL_BASE_INBMC (0x84820000) #define VETH_SHAREPOOL_SIZE (0xdf000) #define VETH_SHAREPOOL_OFFSET (0x10000) #define MAX_SHAREQUEUE_SIZE (0x20000) @@ -261,15 +260,6 @@ struct bspveth_dma_bd { u32 off; }; -struct bspveth_dmal { - u32 chl; - u32 len; - u32 slow; - u32 shi; - u32 dlow; - u32 dhi; -}; - struct bspveth_rxtx_q { #ifndef VETH_BMC struct bspveth_dma_bd *pbdbase_v; -- 2.33.0

反馈: 您发送到kernel@openeuler.org的补丁/补丁集,已成功转换为PR! PR链接地址: https://gitee.com/openeuler/kernel/pulls/17507 邮件列表地址:https://mailweb.openeuler.org/archives/list/kernel@openeuler.org/message/STB... FeedBack: The patch(es) which you have sent to kernel@openeuler.org mailing list has been converted to a pull request successfully! Pull request link: https://gitee.com/openeuler/kernel/pulls/17507 Mailing list address: https://mailweb.openeuler.org/archives/list/kernel@openeuler.org/message/STB...
participants (2)
-
linan666@huaweicloud.com
-
patchwork bot