Kernel
Threads by month
- ----- 2025 -----
- August
- July
- June
- May
- April
- March
- February
- January
- ----- 2024 -----
- December
- November
- October
- September
- August
- July
- June
- May
- April
- March
- February
- January
- ----- 2023 -----
- December
- November
- October
- September
- August
- July
- June
- May
- April
- March
- February
- January
- ----- 2022 -----
- December
- November
- October
- September
- August
- July
- June
- May
- April
- March
- February
- January
- ----- 2021 -----
- December
- November
- October
- September
- August
- July
- June
- May
- April
- March
- February
- January
- ----- 2020 -----
- December
- November
- October
- September
- August
- July
- June
- May
- April
- March
- February
- January
- ----- 2019 -----
- December
- 41 participants
- 19772 discussions

11 Aug '25
From: Huangjunhua <huangjunhua14(a)huawei.com>
driver inclusion
category: feature
bugzilla: https://gitee.com/openeuler/kernel/issues/ICB3EN
CVE: NA
-----------------------------------------
To meet the competitive evolution requirements ("5+1+1") for the
new-generation Kunpeng ARM platform, Tianchi architecture, and
BMC management system. The products, BMC, and HiSilicon collaboratively
planned the next-generation BMC evolution chip Hi1712.
Building upon Hi1711, the Hi1712 chip enhances interfaces, computing power,
and security. The Huawei iBMA driver requires adaptation to support Hi1712
for in-band and out-of-band communication.
Signed-off-by: Huangjunhua <huangjunhua14(a)huawei.com>
---
MAINTAINERS | 6 +
.../ethernet/huawei/bma/cdev_drv/bma_cdev.c | 2 +-
.../bma/cdev_veth_drv/virtual_cdev_eth_net.c | 18 +-
.../bma/cdev_veth_drv/virtual_cdev_eth_net.h | 1 -
.../net/ethernet/huawei/bma/edma_drv/Makefile | 2 +-
.../huawei/bma/edma_drv/bma_devintf.c | 118 ++++-
.../ethernet/huawei/bma/edma_drv/bma_pci.c | 277 +++++++++--
.../ethernet/huawei/bma/edma_drv/bma_pci.h | 33 +-
.../ethernet/huawei/bma/edma_drv/edma_drv.h | 340 +++++++++++++
.../ethernet/huawei/bma/edma_drv/edma_host.c | 160 +++++-
.../ethernet/huawei/bma/edma_drv/edma_host.h | 14 +-
.../ethernet/huawei/bma/edma_drv/edma_queue.c | 470 ++++++++++++++++++
.../ethernet/huawei/bma/edma_drv/edma_queue.h | 29 ++
.../ethernet/huawei/bma/edma_drv/edma_reg.h | 127 +++++
.../huawei/bma/include/bma_ker_intf.h | 46 ++
.../huawei/bma/kbox_drv/kbox_include.h | 2 +-
.../ethernet/huawei/bma/veth_drv/veth_hb.c | 25 +-
.../ethernet/huawei/bma/veth_drv/veth_hb.h | 12 +-
18 files changed, 1582 insertions(+), 100 deletions(-)
create mode 100644 drivers/net/ethernet/huawei/bma/edma_drv/edma_drv.h
create mode 100644 drivers/net/ethernet/huawei/bma/edma_drv/edma_queue.c
create mode 100644 drivers/net/ethernet/huawei/bma/edma_drv/edma_queue.h
create mode 100644 drivers/net/ethernet/huawei/bma/edma_drv/edma_reg.h
diff --git a/MAINTAINERS b/MAINTAINERS
index 61baf2cfc4e1..446f2f49fd14 100644
--- a/MAINTAINERS
+++ b/MAINTAINERS
@@ -9707,6 +9707,12 @@ F: drivers/net/ethernet/huawei/hinic3/cqm/
F: drivers/net/ethernet/huawei/hinic3/hw/
F: drivers/net/ethernet/huawei/hinic3/include/
+HUAWEI ETHERNET DRIVER
+M: Huangjunhua <huangjunhua14(a)huawei.com>
+L: netdev(a)vger.kernel.org
+S: Maintained
+F: drivers/net/ethernet/huawei/bma/
+
HUAWEI BIFUR DRIVER
M: Xiaoping zheng <zhengxiaoping5(a)huawei.com>
L: netdev(a)vger.kernel.org
diff --git a/drivers/net/ethernet/huawei/bma/cdev_drv/bma_cdev.c b/drivers/net/ethernet/huawei/bma/cdev_drv/bma_cdev.c
index 275c2cdfe5db..59181c829a68 100644
--- a/drivers/net/ethernet/huawei/bma/cdev_drv/bma_cdev.c
+++ b/drivers/net/ethernet/huawei/bma/cdev_drv/bma_cdev.c
@@ -28,7 +28,7 @@
#ifdef DRV_VERSION
#define CDEV_VERSION MICRO_TO_STR(DRV_VERSION)
#else
-#define CDEV_VERSION "0.3.10"
+#define CDEV_VERSION "0.4.0"
#endif
#define CDEV_DEFAULT_NUM 4
diff --git a/drivers/net/ethernet/huawei/bma/cdev_veth_drv/virtual_cdev_eth_net.c b/drivers/net/ethernet/huawei/bma/cdev_veth_drv/virtual_cdev_eth_net.c
index e6dbec7073e4..adb6dd6972f5 100644
--- a/drivers/net/ethernet/huawei/bma/cdev_veth_drv/virtual_cdev_eth_net.c
+++ b/drivers/net/ethernet/huawei/bma/cdev_veth_drv/virtual_cdev_eth_net.c
@@ -151,6 +151,12 @@ int edma_veth_setup_all_tx_resources(struct edma_eth_dev_s *edma_eth)
u8 *shmq_head = NULL;
u8 *shmq_head_p = NULL;
struct edma_rxtx_q_s *tx_queue = NULL;
+ int ret = 0;
+ phys_addr_t veth_address = 0;
+
+ ret = bma_intf_get_map_address(TYPE_VETH_ADDR, &veth_address);
+ if (ret != 0)
+ return -EFAULT;
tx_queue = (struct edma_rxtx_q_s *)
kmalloc(sizeof(struct edma_rxtx_q_s), GFP_KERNEL);
@@ -173,7 +179,7 @@ int edma_veth_setup_all_tx_resources(struct edma_eth_dev_s *edma_eth)
tx_queue->pdmalbase_v = (struct edma_dmal_s *)
(shmq_head + SHMDMAL_OFFSET);
- tx_queue->pdmalbase_p = (u8 *)(VETH_SHAREPOOL_BASE_INBMC +
+ tx_queue->pdmalbase_p = (u8 *)(veth_address +
(MAX_SHAREQUEUE_SIZE * 0) + SHMDMAL_OFFSET);
memset(tx_queue->pdmalbase_v, 0, MAX_SHMDMAL_SIZE);
@@ -219,6 +225,12 @@ int edma_veth_setup_all_rx_resources(struct edma_eth_dev_s *edma_eth)
u8 *shmq_head = NULL;
u8 *shmq_head_p = NULL;
struct edma_rxtx_q_s *rx_queue = NULL;
+ int ret = 0;
+ phys_addr_t veth_address = 0;
+
+ ret = bma_intf_get_map_address(TYPE_VETH_ADDR, &veth_address);
+ if (ret != 0)
+ return -EFAULT;
rx_queue = (struct edma_rxtx_q_s *)
kmalloc(sizeof(struct edma_rxtx_q_s), GFP_KERNEL);
@@ -241,7 +253,7 @@ int edma_veth_setup_all_rx_resources(struct edma_eth_dev_s *edma_eth)
/* DMA address list (only used in host). */
rx_queue->pdmalbase_v = (struct edma_dmal_s *)
(shmq_head + SHMDMAL_OFFSET);
- rx_queue->pdmalbase_p = (u8 *)(VETH_SHAREPOOL_BASE_INBMC +
+ rx_queue->pdmalbase_p = (u8 *)(veth_address +
MAX_SHAREQUEUE_SIZE + SHMDMAL_OFFSET);
memset(rx_queue->pdmalbase_v, 0, MAX_SHMDMAL_SIZE);
@@ -1304,6 +1316,8 @@ int __start_dmalist_H_2(struct edma_rxtx_q_s *prxtx_queue, u32 type, u32 cnt)
dma_transfer.type = DMA_LIST;
dma_transfer.transfer.list.dma_addr =
(dma_addr_t)prxtx_queue->pdmalbase_p;
+ dma_transfer.pdmalbase_v = (struct bspveth_dmal *)prxtx_queue->pdmalbase_v;
+ dma_transfer.dmal_cnt = prxtx_queue->dmal_cnt;
ret = bma_intf_start_dma(g_eth_edmaprivate.edma_priv, &dma_transfer);
LOG(DLOG_DEBUG, "after -> %u/%u/%u/%u, ret: %d",
diff --git a/drivers/net/ethernet/huawei/bma/cdev_veth_drv/virtual_cdev_eth_net.h b/drivers/net/ethernet/huawei/bma/cdev_veth_drv/virtual_cdev_eth_net.h
index cb7c28cb5ddd..bc4b2147272b 100644
--- a/drivers/net/ethernet/huawei/bma/cdev_veth_drv/virtual_cdev_eth_net.h
+++ b/drivers/net/ethernet/huawei/bma/cdev_veth_drv/virtual_cdev_eth_net.h
@@ -56,7 +56,6 @@
#define BSP_ERR_AGAIN (BSP_ETH_ERR_BASE + 18)
#define BSP_ERR_NOT_TO_HANDLE (BSP_ETH_ERR_BASE + 19)
-#define VETH_SHAREPOOL_BASE_INBMC (0x84820000)
#define VETH_SHAREPOOL_SIZE (0xdf000)
#define VETH_SHAREPOOL_OFFSET (0x10000)
#define MAX_SHAREQUEUE_SIZE (0x20000)
diff --git a/drivers/net/ethernet/huawei/bma/edma_drv/Makefile b/drivers/net/ethernet/huawei/bma/edma_drv/Makefile
index 46cc51275a71..048bcb9e2bbe 100644
--- a/drivers/net/ethernet/huawei/bma/edma_drv/Makefile
+++ b/drivers/net/ethernet/huawei/bma/edma_drv/Makefile
@@ -1,2 +1,2 @@
obj-$(CONFIG_BMA) += host_edma_drv.o
-host_edma_drv-y := bma_pci.o bma_devintf.o edma_host.o
+host_edma_drv-y := bma_pci.o bma_devintf.o edma_host.o edma_queue.o
diff --git a/drivers/net/ethernet/huawei/bma/edma_drv/bma_devintf.c b/drivers/net/ethernet/huawei/bma/edma_drv/bma_devintf.c
index 3b5eb39d6da6..45815fdc18eb 100644
--- a/drivers/net/ethernet/huawei/bma/edma_drv/bma_devintf.c
+++ b/drivers/net/ethernet/huawei/bma/edma_drv/bma_devintf.c
@@ -31,6 +31,18 @@ static struct bma_dev_s *g_bma_dev;
static ATOMIC_NOTIFIER_HEAD(bma_int_notify_list);
+static enum pci_type_e g_pci_type = PCI_TYPE_UNKNOWN;
+
+enum pci_type_e get_pci_type(void)
+{
+ return g_pci_type;
+}
+
+void set_pci_type(enum pci_type_e type)
+{
+ g_pci_type = type;
+}
+
static int bma_priv_insert_priv_list(struct bma_priv_data_s *priv, u32 type,
u32 sub_type)
{
@@ -342,6 +354,82 @@ int bma_intf_unregister_type(void **handle)
}
EXPORT_SYMBOL(bma_intf_unregister_type);
+int bma_intf_get_host_number(unsigned int *host_number)
+{
+ unsigned int devfn = 0;
+
+ if (!host_number)
+ return -EFAULT;
+
+ if (!g_bma_dev) {
+ BMA_LOG(DLOG_ERROR, "g_bma_dev is NULL\n");
+ return -ENXIO;
+ }
+
+ devfn = g_bma_dev->bma_pci_dev->pdev->devfn;
+ BMA_LOG(DLOG_DEBUG, "devfn is %u\n", devfn);
+ if (devfn == PF7 || devfn == PF10) {
+ *host_number = HOST_NUMBER_0;
+ } else if (devfn == PF4) {
+ *host_number = HOST_NUMBER_1;
+ } else {
+ BMA_LOG(DLOG_DEBUG, "Treat as host0 because of unknown PF %u\n", devfn);
+ *host_number = HOST_NUMBER_0;
+ }
+
+ return 0;
+}
+EXPORT_SYMBOL(bma_intf_get_host_number);
+
+int bma_intf_get_map_address(enum addr_type type, phys_addr_t *addr)
+{
+ u32 host_number = 0;
+ u32 devfn = 0;
+ u32 i = 0;
+ enum pci_type_e pci_type = get_pci_type();
+ struct bma_pci_dev_s *bma_pci_dev = get_bma_pci_dev();
+
+ static struct bma_map_addr_s addr_info[] = {
+ {PCI_TYPE_UNKNOWN, HOST_NUMBER_0, TYPE_EDMA_ADDR, EDMA_1711_HOST0_ADDR},
+ {PCI_TYPE_UNKNOWN, HOST_NUMBER_0, TYPE_VETH_ADDR, VETH_1711_HOST0_ADDR},
+ {PCI_TYPE_171x, HOST_NUMBER_0, TYPE_EDMA_ADDR, EDMA_1711_HOST0_ADDR},
+ {PCI_TYPE_171x, HOST_NUMBER_0, TYPE_VETH_ADDR, VETH_1711_HOST0_ADDR},
+ {PCI_TYPE_1712, HOST_NUMBER_0, TYPE_EDMA_ADDR, EDMA_1712_HOST0_ADDR},
+ {PCI_TYPE_1712, HOST_NUMBER_0, TYPE_VETH_ADDR, VETH_1712_HOST0_ADDR},
+ {PCI_TYPE_1712, HOST_NUMBER_1, TYPE_EDMA_ADDR, EDMA_1712_HOST1_ADDR},
+ {PCI_TYPE_1712, HOST_NUMBER_1, TYPE_VETH_ADDR, VETH_1712_HOST1_ADDR},
+ };
+
+ if (!bma_pci_dev) {
+ BMA_LOG(DLOG_ERROR, "bma_pci_dev is null\n");
+ return -EFAULT;
+ }
+
+ devfn = bma_pci_dev->pdev->devfn;
+ if (devfn == PF7 || devfn == PF10) {
+ host_number = HOST_NUMBER_0;
+ } else if (devfn == PF4) {
+ host_number = HOST_NUMBER_1;
+ } else {
+ BMA_LOG(DLOG_DEBUG, "Treat as host0 because of unknown PF %u\n", devfn);
+ host_number = HOST_NUMBER_0;
+ }
+
+ for (i = 0; i < ARRAY_SIZE(addr_info); i++) {
+ if (pci_type == addr_info[i].pci_type &&
+ host_number == addr_info[i].host_number && type == addr_info[i].addr_type) {
+ *addr = addr_info[i].addr;
+ return 0;
+ }
+ }
+
+ BMA_LOG(DLOG_DEBUG,
+ "Cannot find proper map address! pci_type: %u, host_number: %u, addr_type: %u\n",
+ pci_type, host_number, type);
+ return -EFAULT;
+}
+EXPORT_SYMBOL(bma_intf_get_map_address);
+
int bma_intf_check_edma_supported(void)
{
return !(!g_bma_dev);
@@ -350,13 +438,30 @@ EXPORT_SYMBOL(bma_intf_check_edma_supported);
int bma_intf_check_dma_status(enum dma_direction_e dir)
{
- return edma_host_check_dma_status(dir);
+ enum pci_type_e pci_type = get_pci_type();
+
+ if (pci_type == PCI_TYPE_UNKNOWN) {
+ BMA_LOG(DLOG_ERROR, "pci type is UNKNOWN.\n");
+ return -EFAULT;
+ }
+
+ return get_bma_pci_dev_handler_s()[pci_type].check_dma(dir);
}
EXPORT_SYMBOL(bma_intf_check_dma_status);
void bma_intf_reset_dma(enum dma_direction_e dir)
{
- edma_host_reset_dma(&g_bma_dev->edma_host, dir);
+ enum pci_type_e pci_type = get_pci_type();
+
+ if (!g_bma_dev)
+ return;
+
+ if (pci_type == PCI_TYPE_UNKNOWN) {
+ BMA_LOG(DLOG_ERROR, "pci type is UNKNOWN.\n");
+ return;
+ }
+
+ get_bma_pci_dev_handler_s()[pci_type].reset_dma(&g_bma_dev->edma_host, dir);
}
EXPORT_SYMBOL(bma_intf_reset_dma);
@@ -375,10 +480,16 @@ int bma_intf_start_dma(void *handle, struct bma_dma_transfer_s *dma_transfer)
{
int ret = 0;
struct bma_priv_data_s *priv = (struct bma_priv_data_s *)handle;
+ enum pci_type_e pci_type = get_pci_type();
if (!handle || !dma_transfer)
return -EFAULT;
+ if (pci_type == PCI_TYPE_UNKNOWN) {
+ BMA_LOG(DLOG_ERROR, "pci type is UNKNOWN.\n");
+ return -EFAULT;
+ }
+
ret = edma_host_dma_start(&g_bma_dev->edma_host, priv);
if (ret) {
BMA_LOG(DLOG_ERROR,
@@ -386,7 +497,8 @@ int bma_intf_start_dma(void *handle, struct bma_dma_transfer_s *dma_transfer)
return ret;
}
- ret = edma_host_dma_transfer(&g_bma_dev->edma_host, priv, dma_transfer);
+ ret = get_bma_pci_dev_handler_s()[pci_type].transfer_edma_host(&g_bma_dev->edma_host, priv,
+ dma_transfer);
if (ret)
BMA_LOG(DLOG_ERROR,
"edma_host_dma_transfer failed! ret = %d\n", ret);
diff --git a/drivers/net/ethernet/huawei/bma/edma_drv/bma_pci.c b/drivers/net/ethernet/huawei/bma/edma_drv/bma_pci.c
index 577acaedb0e2..0e43289e0d1a 100644
--- a/drivers/net/ethernet/huawei/bma/edma_drv/bma_pci.c
+++ b/drivers/net/ethernet/huawei/bma/edma_drv/bma_pci.c
@@ -27,13 +27,20 @@
#define PCI_VENDOR_ID_HUAWEI_PME 0x19e5
#define PCI_DEVICE_ID_KBOX_0_PME 0x1710
+#define PCI_DEVICE_ID_EDMA_0 0x1712
#define PCI_PME_USEABLE_SPACE (4 * 1024 * 1024)
+
+#define HOSTRTC_OFFSET 0x10000
+#define EDMA_OFFSET 0x20000
+#define VETH_OFFSET 0x30000
+
#define PME_DEV_CHECK(device, vendor) ((device) == PCI_DEVICE_ID_KBOX_0_PME && \
(vendor) == PCI_VENDOR_ID_HUAWEI_PME)
#define PCI_BAR0_PME_1710 0x85800000
#define PCI_BAR0 0
#define PCI_BAR1 1
+#define PCI_BAR2 2
#define PCI_USING_DAC_DEFAULT 0
#define GET_HIGH_ADDR(address) ((sizeof(unsigned long) == 8) ? \
@@ -51,15 +58,50 @@ int debug = DLOG_ERROR;
MODULE_PARM_DESC(debug, "Debug switch (0=close debug, 1=open debug)");
static struct bma_pci_dev_s *g_bma_pci_dev;
+struct bma_pci_dev_s *get_bma_pci_dev(void)
+{
+ return g_bma_pci_dev;
+}
+
+void set_bma_pci_dev(struct bma_pci_dev_s *bma_pci_dev)
+{
+ g_bma_pci_dev = bma_pci_dev;
+}
static int bma_pci_suspend(struct pci_dev *pdev, pm_message_t state);
static int bma_pci_resume(struct pci_dev *pdev);
static int bma_pci_probe(struct pci_dev *pdev, const struct pci_device_id *ent);
static void bma_pci_remove(struct pci_dev *pdev);
+static struct bma_pci_dev_handler_s g_bma_pci_dev_handler_s[] = {
+ {0},
+ // for 1710/1711
+ {
+ .ioremap_bar_mem = ioremap_pme_bar_mem_v1,
+ .iounmap_bar_mem = iounmap_bar_mem_v1,
+ .check_dma = edma_host_check_dma_status_v1,
+ .transfer_edma_host = edma_host_dma_transfer_v1,
+ .reset_dma = edma_host_reset_dma_v1,
+ },
+ // for 1712
+ {
+ .ioremap_bar_mem = ioremap_pme_bar_mem_v2,
+ .iounmap_bar_mem = iounmap_bar_mem_v2,
+ .check_dma = edma_host_check_dma_status_v2,
+ .transfer_edma_host = edma_host_dma_transfer_v2,
+ .reset_dma = edma_host_reset_dma_v2,
+ }
+};
+
+struct bma_pci_dev_handler_s *get_bma_pci_dev_handler_s(void)
+{
+ return g_bma_pci_dev_handler_s;
+}
+
static const struct pci_device_id bma_pci_tbl[] = {
{PCI_DEVICE(PCI_VENDOR_ID_HUAWEI_FPGA, PCI_DEVICE_ID_KBOX_0)},
{PCI_DEVICE(PCI_VENDOR_ID_HUAWEI_PME, PCI_DEVICE_ID_KBOX_0_PME)},
+ { PCI_DEVICE(PCI_VENDOR_ID_HUAWEI_PME, PCI_DEVICE_ID_EDMA_0) },
{}
};
MODULE_DEVICE_TABLE(pci, bma_pci_tbl);
@@ -73,7 +115,7 @@ int edma_param_get_statics(char *buf, const struct kernel_param *kp)
}
module_param_call(statistics, NULL, edma_param_get_statics, &debug, 0444);
-MODULE_PARM_DESC(statistics, "Statistics info of edma driver,readonly");
+MODULE_PARM_DESC(statistics, "Statistics info of edma driver, readonly");
int edma_param_set_debug(const char *buf, const struct kernel_param *kp)
{
@@ -99,34 +141,40 @@ module_param_call(debug, &edma_param_set_debug, ¶m_get_int, &debug, 0644);
void __iomem *kbox_get_base_addr(void)
{
- if (!g_bma_pci_dev || (!(g_bma_pci_dev->kbox_base_addr))) {
+ struct bma_pci_dev_s *bma_pci_dev = get_bma_pci_dev();
+
+ if (!bma_pci_dev || (!(bma_pci_dev->kbox_base_addr))) {
BMA_LOG(DLOG_ERROR, "kbox_base_addr NULL point\n");
return NULL;
}
- return g_bma_pci_dev->kbox_base_addr;
+ return bma_pci_dev->kbox_base_addr;
}
EXPORT_SYMBOL_GPL(kbox_get_base_addr);
unsigned long kbox_get_io_len(void)
{
- if (!g_bma_pci_dev) {
- BMA_LOG(DLOG_ERROR, "kbox_io_len is error,can not get it\n");
+ struct bma_pci_dev_s *bma_pci_dev = get_bma_pci_dev();
+
+ if (!bma_pci_dev) {
+ BMA_LOG(DLOG_ERROR, "kbox_io_len is error, can not get it\n");
return 0;
}
- return g_bma_pci_dev->kbox_base_len;
+ return bma_pci_dev->kbox_base_len;
}
EXPORT_SYMBOL_GPL(kbox_get_io_len);
unsigned long kbox_get_base_phy_addr(void)
{
- if (!g_bma_pci_dev || !g_bma_pci_dev->kbox_base_phy_addr) {
+ struct bma_pci_dev_s *bma_pci_dev = get_bma_pci_dev();
+
+ if (!bma_pci_dev || bma_pci_dev->kbox_base_phy_addr == 0) {
BMA_LOG(DLOG_ERROR, "kbox_base_phy_addr NULL point\n");
return 0;
}
- return g_bma_pci_dev->kbox_base_phy_addr;
+ return bma_pci_dev->kbox_base_phy_addr;
}
EXPORT_SYMBOL_GPL(kbox_get_base_phy_addr);
@@ -160,7 +208,7 @@ s32 __atu_config_H(struct pci_dev *pdev, unsigned int region,
return 0;
}
-static void iounmap_bar_mem(struct bma_pci_dev_s *bma_pci_dev)
+void iounmap_bar_mem_v1(struct bma_pci_dev_s *bma_pci_dev)
{
if (bma_pci_dev->kbox_base_addr) {
iounmap(bma_pci_dev->kbox_base_addr);
@@ -171,15 +219,84 @@ static void iounmap_bar_mem(struct bma_pci_dev_s *bma_pci_dev)
iounmap(bma_pci_dev->bma_base_addr);
bma_pci_dev->bma_base_addr = NULL;
bma_pci_dev->edma_swap_addr = NULL;
+ bma_pci_dev->veth_swap_addr = NULL;
bma_pci_dev->hostrtc_viraddr = NULL;
}
}
-static int ioremap_pme_bar1_mem(struct pci_dev *pdev,
- struct bma_pci_dev_s *bma_pci_dev)
+void iounmap_bar_mem_v2(struct bma_pci_dev_s *bma_pci_dev)
+{
+ if (bma_pci_dev->kbox_base_addr) {
+ iounmap(bma_pci_dev->kbox_base_addr);
+ bma_pci_dev->kbox_base_addr = NULL;
+ }
+
+ if (bma_pci_dev->bma_base_addr) {
+ iounmap(bma_pci_dev->bma_base_addr);
+ bma_pci_dev->bma_base_addr = NULL;
+ }
+
+ if (bma_pci_dev->hostrtc_viraddr) {
+ iounmap(bma_pci_dev->hostrtc_viraddr);
+ bma_pci_dev->hostrtc_viraddr = NULL;
+ bma_pci_dev->edma_swap_addr = NULL;
+ bma_pci_dev->veth_swap_addr = NULL;
+ }
+}
+
+static void iounmap_bar_mem(struct bma_pci_dev_s *bma_pci_dev)
+{
+ enum pci_type_e pci_type = get_pci_type();
+
+ if (pci_type == PCI_TYPE_UNKNOWN)
+ return;
+
+ g_bma_pci_dev_handler_s[pci_type].iounmap_bar_mem(bma_pci_dev);
+}
+
+static int config_atu(struct pci_dev *pdev, struct bma_pci_dev_s *bma_pci_dev)
+{
+ int ret = 0;
+ phys_addr_t edma_address = 0;
+ phys_addr_t veth_address = 0;
+
+ ret = bma_intf_get_map_address(TYPE_EDMA_ADDR, &edma_address);
+ if (ret != 0)
+ return ret;
+
+ ret = bma_intf_get_map_address(TYPE_VETH_ADDR, &veth_address);
+ if (ret != 0)
+ return ret;
+
+ __atu_config_H(pdev, 0,
+ GET_HIGH_ADDR(bma_pci_dev->kbox_base_phy_addr),
+ (bma_pci_dev->kbox_base_phy_addr & 0xffffffff),
+ 0, PCI_BAR0_PME_1710, PCI_PME_USEABLE_SPACE);
+
+ __atu_config_H(pdev, 1,
+ GET_HIGH_ADDR(bma_pci_dev->hostrtc_phyaddr),
+ (bma_pci_dev->hostrtc_phyaddr & 0xffffffff),
+ 0, HOSTRTC_REG_BASE, HOSTRTC_REG_SIZE);
+
+ __atu_config_H(pdev, 2,
+ GET_HIGH_ADDR(bma_pci_dev->edma_swap_phy_addr),
+ (bma_pci_dev->edma_swap_phy_addr & 0xffffffff),
+ 0, edma_address, EDMA_SWAP_DATA_SIZE);
+
+ __atu_config_H(pdev, 3,
+ GET_HIGH_ADDR(bma_pci_dev->veth_swap_phy_addr),
+ (bma_pci_dev->veth_swap_phy_addr & 0xffffffff),
+ 0, veth_address, VETH_SWAP_DATA_SIZE);
+
+ return ret;
+}
+
+// for 1710 1711
+int ioremap_pme_bar_mem_v1(struct pci_dev *pdev, struct bma_pci_dev_s *bma_pci_dev)
{
unsigned long bar1_resource_flag = 0;
u32 data = 0;
+ int ret;
bma_pci_dev->kbox_base_len = PCI_PME_USEABLE_SPACE;
BMA_LOG(DLOG_DEBUG, "1710\n");
@@ -217,25 +334,11 @@ static int ioremap_pme_bar1_mem(struct pci_dev *pdev,
bma_pci_dev->edma_swap_phy_addr,
bma_pci_dev->veth_swap_phy_addr);
- __atu_config_H(pdev, 0,
- GET_HIGH_ADDR(bma_pci_dev->kbox_base_phy_addr),
- (bma_pci_dev->kbox_base_phy_addr & 0xffffffff),
- 0, PCI_BAR0_PME_1710, PCI_PME_USEABLE_SPACE);
-
- __atu_config_H(pdev, 1,
- GET_HIGH_ADDR(bma_pci_dev->hostrtc_phyaddr),
- (bma_pci_dev->hostrtc_phyaddr & 0xffffffff),
- 0, HOSTRTC_REG_BASE, HOSTRTC_REG_SIZE);
-
- __atu_config_H(pdev, 2,
- GET_HIGH_ADDR(bma_pci_dev->edma_swap_phy_addr),
- (bma_pci_dev->edma_swap_phy_addr & 0xffffffff),
- 0, EDMA_SWAP_DATA_BASE, EDMA_SWAP_DATA_SIZE);
-
- __atu_config_H(pdev, 3,
- GET_HIGH_ADDR(bma_pci_dev->veth_swap_phy_addr),
- (bma_pci_dev->veth_swap_phy_addr & 0xffffffff),
- 0, VETH_SWAP_DATA_BASE, VETH_SWAP_DATA_SIZE);
+ ret = config_atu(pdev, bma_pci_dev);
+ if (ret != 0) {
+ BMA_LOG(DLOG_DEBUG, "config atu failed.\n");
+ return ret;
+ }
if (bar1_resource_flag & IORESOURCE_CACHEABLE) {
bma_pci_dev->bma_base_addr =
@@ -250,7 +353,6 @@ static int ioremap_pme_bar1_mem(struct pci_dev *pdev,
if (!bma_pci_dev->bma_base_addr) {
BMA_LOG(DLOG_ERROR,
"Cannot map device registers, aborting\n");
-
return -ENODEV;
}
@@ -270,11 +372,80 @@ static int ioremap_pme_bar1_mem(struct pci_dev *pdev,
return 0;
}
+// for 1712
+int ioremap_pme_bar_mem_v2(struct pci_dev *pdev, struct bma_pci_dev_s *bma_pci_dev)
+{
+ unsigned long bar2_resource_flag = 0;
+
+ bma_pci_dev->kbox_base_len = PCI_PME_USEABLE_SPACE;
+ BMA_LOG(DLOG_DEBUG, "1712\n");
+
+ bma_pci_dev->bma_base_phy_addr = (unsigned long)pci_resource_start(pdev, PCI_BAR2);
+ bar2_resource_flag = (unsigned long)pci_resource_flags(pdev, PCI_BAR2);
+ if (!(bar2_resource_flag & IORESOURCE_MEM)) {
+ BMA_LOG(DLOG_ERROR, "Cannot find proper PCI device base address, aborting\n");
+ return -ENODEV;
+ }
+
+ bma_pci_dev->bma_base_len = (unsigned long)pci_resource_len(pdev, PCI_BAR2);
+ bma_pci_dev->edma_swap_len = EDMA_SWAP_DATA_SIZE;
+ bma_pci_dev->veth_swap_len = VETH_SWAP_DATA_SIZE;
+
+ BMA_LOG(DLOG_DEBUG,
+ "bar2: bma_base_len = 0x%lx, edma_swap_len = %ld, veth_swap_len = %ld(0x%lx)\n",
+ bma_pci_dev->bma_base_len, bma_pci_dev->edma_swap_len, bma_pci_dev->veth_swap_len,
+ bma_pci_dev->veth_swap_len);
+
+ bma_pci_dev->hostrtc_phyaddr = bma_pci_dev->bma_base_phy_addr + HOSTRTC_OFFSET;
+ /* edma */
+ bma_pci_dev->edma_swap_phy_addr = bma_pci_dev->bma_base_phy_addr + EDMA_OFFSET;
+ /* veth */
+ bma_pci_dev->veth_swap_phy_addr = bma_pci_dev->bma_base_phy_addr + VETH_OFFSET;
+
+ BMA_LOG(DLOG_DEBUG,
+ "bar2: bma_base_phy_addr = 0x%lx, bma_base_len = %zu , hostrtc_phyaddr = 0x%lx, edma_swap_phy_addr = 0x%lx, veth_swap_phy_addr = 0x%lx\n",
+ bma_pci_dev->bma_base_phy_addr, bma_pci_dev->bma_base_len,
+ bma_pci_dev->hostrtc_phyaddr, bma_pci_dev->edma_swap_phy_addr,
+ bma_pci_dev->veth_swap_phy_addr);
+
+ bma_pci_dev->bma_base_addr = ioremap(bma_pci_dev->bma_base_phy_addr,
+ bma_pci_dev->bma_base_len);
+ if (!bma_pci_dev->bma_base_addr) {
+ BMA_LOG(DLOG_ERROR, "Cannot map device registers, aborting\n");
+ return -ENODEV;
+ }
+
+ if (bar2_resource_flag & IORESOURCE_CACHEABLE) {
+ BMA_LOG(DLOG_DEBUG, "ioremap with cache, %d\n", IORESOURCE_CACHEABLE);
+ bma_pci_dev->hostrtc_viraddr = ioremap(bma_pci_dev->hostrtc_phyaddr,
+ bma_pci_dev->bma_base_len - HOSTRTC_OFFSET);
+ } else {
+ BMA_LOG(DLOG_DEBUG, "ioremap without cache\n");
+ bma_pci_dev->hostrtc_viraddr = IOREMAP(bma_pci_dev->hostrtc_phyaddr,
+ bma_pci_dev->bma_base_len - HOSTRTC_OFFSET);
+ }
+
+ if (!bma_pci_dev->hostrtc_viraddr) {
+ BMA_LOG(DLOG_ERROR, "Cannot map device registers, aborting\n");
+ iounmap(bma_pci_dev->bma_base_addr);
+ bma_pci_dev->bma_base_addr = NULL;
+ return -ENODEV;
+ }
+
+ bma_pci_dev->edma_swap_addr = (unsigned char *)bma_pci_dev->hostrtc_viraddr
+ - HOSTRTC_OFFSET + EDMA_OFFSET;
+ bma_pci_dev->veth_swap_addr = (unsigned char *)bma_pci_dev->hostrtc_viraddr
+ - HOSTRTC_OFFSET + VETH_OFFSET;
+
+ return 0;
+}
+
static int ioremap_bar_mem(struct pci_dev *pdev,
struct bma_pci_dev_s *bma_pci_dev)
{
int err = 0;
unsigned long bar0_resource_flag = 0;
+ enum pci_type_e pci_type = get_pci_type();
bar0_resource_flag = pci_resource_flags(pdev, PCI_BAR0);
@@ -294,8 +465,8 @@ static int ioremap_bar_mem(struct pci_dev *pdev,
bma_pci_dev->kbox_base_phy_addr, bma_pci_dev->kbox_base_len,
bma_pci_dev->kbox_base_len);
- if (PME_DEV_CHECK(pdev->device, pdev->vendor)) {
- err = ioremap_pme_bar1_mem(pdev, bma_pci_dev);
+ if (pdev->vendor == PCI_VENDOR_ID_HUAWEI_PME && pci_type != PCI_TYPE_UNKNOWN) {
+ err = g_bma_pci_dev_handler_s[pci_type].ioremap_bar_mem(pdev, bma_pci_dev);
if (err != 0)
return err;
}
@@ -314,11 +485,7 @@ static int ioremap_bar_mem(struct pci_dev *pdev,
if (!bma_pci_dev->kbox_base_addr) {
BMA_LOG(DLOG_ERROR, "Cannot map device registers, aborting\n");
-
- iounmap(bma_pci_dev->bma_base_addr);
- bma_pci_dev->bma_base_addr = NULL;
- bma_pci_dev->edma_swap_addr = NULL;
- bma_pci_dev->hostrtc_viraddr = NULL;
+ iounmap_bar_mem(bma_pci_dev);
return -ENOMEM;
}
@@ -355,13 +522,14 @@ int pci_device_init(struct pci_dev *pdev, struct bma_pci_dev_s *bma_pci_dev)
{
int err = 0;
- if (PME_DEV_CHECK(pdev->device, pdev->vendor)) {
+ if ((pdev->device == PCI_DEVICE_ID_KBOX_0_PME || pdev->device == PCI_DEVICE_ID_EDMA_0) &&
+ pdev->vendor == PCI_VENDOR_ID_HUAWEI_PME) {
err = bma_devinft_init(bma_pci_dev);
if (err) {
BMA_LOG(DLOG_ERROR, "bma_devinft_init failed\n");
bma_devinft_cleanup(bma_pci_dev);
iounmap_bar_mem(bma_pci_dev);
- g_bma_pci_dev = NULL;
+ set_bma_pci_dev(NULL);
pci_release_regions(pdev);
kfree(bma_pci_dev);
#ifdef CONFIG_PCI_MSI
@@ -400,27 +568,25 @@ int pci_device_config(struct pci_dev *pdev)
goto err_out_free_dev;
}
+ set_bma_pci_dev(bma_pci_dev);
+
err = ioremap_bar_mem(pdev, bma_pci_dev);
if (err) {
BMA_LOG(DLOG_ERROR, "ioremap_edma_io_mem failed\n");
goto err_out_release_regions;
}
- g_bma_pci_dev = bma_pci_dev;
-
if (SET_DMA_MASK(&pdev->dev)) {
BMA_LOG(DLOG_ERROR,
- "No usable DMA ,configuration, aborting,goto failed2!!!\n");
+ "No usable DMA, configuration, aborting, goto failed2!!!\n");
goto err_out_unmap_bar;
}
- g_bma_pci_dev = bma_pci_dev;
-
return pci_device_init(pdev, bma_pci_dev);
err_out_unmap_bar:
iounmap_bar_mem(bma_pci_dev);
- g_bma_pci_dev = NULL;
+ set_bma_pci_dev(NULL);
err_out_release_regions:
pci_release_regions(pdev);
err_out_free_dev:
@@ -442,16 +608,27 @@ static int bma_pci_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
UNUSED(ent);
- if (g_bma_pci_dev)
+ if (get_bma_pci_dev())
return -EPERM;
err = pci_enable_device(pdev);
if (err) {
- BMA_LOG(DLOG_ERROR, "Cannot enable PCI device,aborting\n");
+ BMA_LOG(DLOG_ERROR, "Cannot enable PCI device, aborting\n");
return err;
}
- if (PME_DEV_CHECK(pdev->device, pdev->vendor)) {
+ switch (pdev->device) {
+ case PCI_DEVICE_ID_KBOX_0_PME:
+ set_pci_type(PCI_TYPE_171x);
+ break;
+ case PCI_DEVICE_ID_EDMA_0:
+ set_pci_type(PCI_TYPE_1712);
+ break;
+ default:
+ set_pci_type(PCI_TYPE_UNKNOWN);
+ break;
+ }
+ if (pdev->vendor == PCI_VENDOR_ID_HUAWEI_PME && get_pci_type() != PCI_TYPE_UNKNOWN) {
err = pme_pci_enable_msi(pdev);
if (err)
return err;
@@ -468,7 +645,7 @@ static void bma_pci_remove(struct pci_dev *pdev)
struct bma_pci_dev_s *bma_pci_dev =
(struct bma_pci_dev_s *)pci_get_drvdata(pdev);
- g_bma_pci_dev = NULL;
+ set_bma_pci_dev(NULL);
(void)pci_set_drvdata(pdev, NULL);
if (bma_pci_dev) {
diff --git a/drivers/net/ethernet/huawei/bma/edma_drv/bma_pci.h b/drivers/net/ethernet/huawei/bma/edma_drv/bma_pci.h
index a66724e2cb74..b43882997c01 100644
--- a/drivers/net/ethernet/huawei/bma/edma_drv/bma_pci.h
+++ b/drivers/net/ethernet/huawei/bma/edma_drv/bma_pci.h
@@ -18,6 +18,8 @@
#include "bma_devintf.h"
#include "bma_include.h"
+#include "../include/bma_ker_intf.h"
+#include "edma_host.h"
#include <linux/netdevice.h>
#define EDMA_SWAP_BASE_OFFSET 0x10000
@@ -25,10 +27,8 @@
#define HOSTRTC_REG_BASE 0x2f000000
#define HOSTRTC_REG_SIZE EDMA_SWAP_BASE_OFFSET
-#define EDMA_SWAP_DATA_BASE 0x84810000
#define EDMA_SWAP_DATA_SIZE 65536
-#define VETH_SWAP_DATA_BASE 0x84820000
#define VETH_SWAP_DATA_SIZE 0xdf000
#define ATU_VIEWPORT 0x900
@@ -71,7 +71,7 @@ struct bma_pci_dev_s {
#ifdef DRV_VERSION
#define BMA_VERSION MICRO_TO_STR(DRV_VERSION)
#else
-#define BMA_VERSION "0.3.10"
+#define BMA_VERSION "0.4.0"
#endif
#ifdef CONFIG_ARM64
@@ -95,4 +95,31 @@ extern int debug;
int edmainfo_show(char *buff);
+struct bma_pci_dev_s *get_bma_pci_dev(void);
+void set_bma_pci_dev(struct bma_pci_dev_s *bma_pci_dev);
+
+struct bma_pci_dev_handler_s {
+ int (*ioremap_bar_mem)(struct pci_dev *pdev, struct bma_pci_dev_s *bma_pci_dev);
+ void (*iounmap_bar_mem)(struct bma_pci_dev_s *bma_pci_dev);
+ int (*check_dma)(enum dma_direction_e dir);
+ int (*transfer_edma_host)(struct edma_host_s *edma_host, struct bma_priv_data_s *priv,
+ struct bma_dma_transfer_s *dma_transfer);
+ void (*reset_dma)(struct edma_host_s *edma_host, enum dma_direction_e dir);
+};
+
+struct bma_pci_dev_handler_s *get_bma_pci_dev_handler_s(void);
+
+int ioremap_pme_bar_mem_v1(struct pci_dev *pdev, struct bma_pci_dev_s *bma_pci_dev);
+int ioremap_pme_bar_mem_v2(struct pci_dev *pdev, struct bma_pci_dev_s *bma_pci_dev);
+void iounmap_bar_mem_v1(struct bma_pci_dev_s *bma_pci_dev);
+void iounmap_bar_mem_v2(struct bma_pci_dev_s *bma_pci_dev);
+int edma_host_check_dma_status_v1(enum dma_direction_e dir);
+int edma_host_check_dma_status_v2(enum dma_direction_e dir);
+int edma_host_dma_transfer_v1(struct edma_host_s *edma_host, struct bma_priv_data_s *priv,
+ struct bma_dma_transfer_s *dma_transfer);
+int edma_host_dma_transfer_v2(struct edma_host_s *edma_host, struct bma_priv_data_s *priv,
+ struct bma_dma_transfer_s *dma_transfer);
+void edma_host_reset_dma_v1(struct edma_host_s *edma_host, enum dma_direction_e dir);
+void edma_host_reset_dma_v2(struct edma_host_s *edma_host, enum dma_direction_e dir);
+
#endif
diff --git a/drivers/net/ethernet/huawei/bma/edma_drv/edma_drv.h b/drivers/net/ethernet/huawei/bma/edma_drv/edma_drv.h
new file mode 100644
index 000000000000..b0a09c022ba8
--- /dev/null
+++ b/drivers/net/ethernet/huawei/bma/edma_drv/edma_drv.h
@@ -0,0 +1,340 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/* Huawei iBMA driver.
+ * Copyright (c) 2025, Huawei Technologies Co., Ltd.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version 2
+ * of the License, or (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#ifndef EDMA_DRV_H
+#define EDMA_DRV_H
+
+#define DMA_STATISTICS_LEN 16
+#define DMA_CH_TAG_SIZE 64
+
+#define HISILICON_VENDOR_ID 0x19e5
+#define DMA_PCIE_DEVICE_ID 0xa122
+
+#define MAX_DMA_CHS 4 /* The current version supports a maximum of 2x2 channels. */
+#define DMA_CHS_EACH_PORT 2
+
+#define MAX_SQ_DEPTH 0xFFFF
+#define MAX_CQ_DEPTH 0xFFFF
+
+#define DMA_DONE_MASK 0x1
+#define DMA_DONE_UNMASK 0x0
+#define DMA_ERR_MASK 0x7FFFE
+#define DMA_ERR_UNMASK 0x0
+
+#define BD_SO 0
+#define BD_RO 1
+
+#define SIZE_4M 0x400000
+#define SIZE_16K 0x4000
+#define SIZE_64K 0x10000
+#define SIZE_OF_U64 0x8
+#define SPD_SIZE_MAX 32
+
+/* Use integer arithmetic for approximate computation instead of floating-point. */
+#define US_PER_SECOND_DIV_1KB (1000000 / 1024)
+
+#define DMA_PHY_STORE_OFFSET (SIZE_64K - SIZE_OF_U64)
+#define DMA_RMT_PHY_STORE_OFFSET (DMA_PHY_STORE_OFFSET - SIZE_OF_U64)
+#define BIT_0_TO_31_MASK 0xFFFFFFFF
+
+#define DMA_TMOUT (2 * HZ) /* 2 seconds */
+
+enum {
+ EP0 = 0,
+ EP1 = 1
+};
+
+enum {
+ DRC_LOCAL = 0,
+ DRC_REMOTE = 1
+};
+
+enum {
+ DIR_B2H = 0,
+ DIR_H2B = 1,
+};
+
+enum {
+ DMA_INIT = 0x0,
+ DMA_RESET = 0x1,
+ DMA_PAUSE = 0x2,
+ DMA_NOTIFY = 0x3,
+ LINKDOWN = 0x4,
+ LINKUP = 0x5,
+ FLR = 0x6
+};
+
+enum {
+ PF0 = 0,
+ PF1 = 1,
+ PF2 = 2,
+ PF4 = 4,
+ PF7 = 7,
+ PF10 = 10
+};
+
+enum {
+ RESERVED = 0x0, /* reserved */
+ SMALL_PACKET = 0x1, /* SmallPacket Descriptor */
+ DMA_READ = 0x2, /* Read Descriptor */
+ DMA_WRITE = 0x3, /* Write Descriptor */
+ DMA_LOOP = 0x4, /* Loop Descriptor */
+ DMA_MIX = 0x10, /* not available, User-defined for test */
+ DMA_WD_BARRIER = 0x11, /* not available, User-defined for test */
+ DMA_RD_BARRIER = 0x12, /* not available, User-defined for test */
+ DMA_LP_BARRIER = 0x13 /* not available, User-defined for test */
+};
+
+enum {
+ IDLE_STATE = 0x0, /* dma channel in idle status */
+ RUN_STATE = 0x1, /* dma channel in run status */
+ CPL_STATE = 0x2, /* dma channel in cpld status */
+ PAUSE_STATE = 0x3, /* dma channel in pause status */
+ HALT_STATE = 0x4, /* dma channel in halt status */
+ ABORT_STATE = 0x5, /* dma channel in abort status */
+ WAIT_STATE = 0x6 /* dma channel in wait status */
+};
+
+/* CQE status */
+enum {
+ DMA_DONE = 0x0, /* sqe done succ */
+ OPCODE_ERR = 0x1, /* sqe opcode invalid */
+ LEN_ERR = 0x2, /* sqe length invalid, only ocurs in smallpackt */
+ DROP_EN = 0x4, /* sqe drop happen */
+ WR_RMT_ERR = 0x8, /* write data to host fail */
+ RD_RMT_ERR = 0x10, /* read data from host fail */
+ RD_AXI_ERR = 0x20, /* read data/sqe from local fail */
+ WR_AXI_ERR = 0x40, /* write data/cqe to local fail */
+ POISON_CPL_ERR = 0x80, /* poison data */
+ SUB_SQ_ERR = 0x100, /* read sqe with CPL TLP */
+ DMA_CH_RESET = 0x200, /* dma channel should reset */
+ LINK_DOWN_ERR = 0x400, /* linkdown happen */
+ RECOVERY = 0x800 /* error status to be reset */
+};
+
+enum {
+ SDI_DMA_ADDR_SIZE_16K = 0,
+ SDI_DMA_ADDR_SIZE_32K = 1,
+ SDI_DMA_ADDR_SIZE_64K = 2,
+ SDI_DMA_ADDR_SIZE_128K = 3
+};
+
+union U_DMA_QUEUE_SQ_DEPTH {
+ struct {
+ unsigned int dma_queue_sq_depth : 16; /* [15..0] */
+ unsigned int reserved_0 : 16; /* [31..16] */
+ } bits;
+
+ unsigned int u32;
+};
+
+union U_DMA_QUEUE_CQ_DEPTH {
+ struct {
+ unsigned int dma_queue_cq_depth : 16; /* [15..0] */
+ unsigned int reserved_0 : 16; /* [31..16] */
+ } bits;
+
+ unsigned int u32;
+};
+
+union U_DMA_QUEUE_CQ_HEAD_PTR {
+ struct {
+ unsigned int dma_queue_cq_head_ptr : 16; /* [15..0] */
+ unsigned int reserved_0 : 16; /* [31..16] */
+ } bits;
+
+ unsigned int u32;
+};
+
+union U_DMA_QUEUE_CQ_TAIL_PTR {
+ struct {
+ unsigned int dma_queue_cq_tail_ptr : 16; /* [15..0] */
+ unsigned int dma_queue_sqhd : 16; /* [31..16] */
+ } bits;
+
+ unsigned int u32;
+};
+
+union U_DMA_QUEUE_SQ_TAIL_PTR {
+ struct {
+ unsigned int dma_queue_sq_tail_ptr : 16; /* [15..0] */
+ unsigned int reserved_0 : 16; /* [31..16] */
+ } bits;
+
+ unsigned int u32;
+};
+
+union U_DMA_QUEUE_CTRL0 {
+ struct {
+ unsigned int dma_queue_en : 1; /* [0] */
+ unsigned int dma_queue_icg_en : 1; /* [1] */
+ unsigned int reserved : 1; /* [2] */
+ unsigned int dma_rst_without_cq_ack_enable : 1; /* [3] */
+ unsigned int dma_queue_pause : 1; /* [4] */
+ unsigned int reserved_1 : 3; /* [7..5] */
+ unsigned int dma_queue_arb_weight : 8; /* [15..8] */
+ unsigned int reserved_2 : 3; /* [18...16] */
+ unsigned int dma_queue_cq_mrg_en : 1; /* [19] */
+ unsigned int dma_queue_cq_mrg_time : 2; /* [21..20] */
+ unsigned int dma_queue_local_err_done_int_en : 1; /* [22] */
+ unsigned int dma_queue_remote_err_done_int_en : 1; /* [23] */
+ unsigned int reserved_3 : 1; /* [24] */
+ unsigned int dma_queue_cq_full_disable : 1; /* [25] */
+ unsigned int dma_queue_cq_drct_sel : 1; /* [26] */
+ unsigned int dma_queue_sq_drct_sel : 1; /* [27] */
+ unsigned int dma_queue_sq_pa_lkp_err_abort_en : 1; /* [28] */
+ unsigned int dma_queue_sq_proc_err_abort_en : 1; /* [29] */
+ unsigned int dma_queue_sq_drop_err_abort_en : 1; /* [30] */
+ unsigned int dma_queue_sq_cfg_err_abort_en : 1; /* [31] */
+ } bits;
+
+ unsigned int u32;
+};
+
+union U_DMA_QUEUE_CTRL1 {
+ struct {
+ unsigned int dma_queue_reset : 1; /* [0] */
+ unsigned int dma_queue_abort_exit : 1; /* [1] */
+ unsigned int dma_va_enable : 1; /* [2] */
+ unsigned int reserved_0 : 1; /* [3] */
+ unsigned int dma_queue_port_num : 4; /* [7..4] */
+ unsigned int dma_queue_remote_msi_x_mask : 1; /* [8] */
+ unsigned int dma_va_enable_sq : 1; /* [9] */
+ unsigned int dma_va_enable_cq : 1; /* [10] */
+ unsigned int dma_queue_local_pfx_er : 1; /* [11] */
+ unsigned int dma_queue_local_pfx_pmr : 1; /* [12] */
+ unsigned int reserved_1 : 3; /* [15...13] */
+ unsigned int dma_queue_qos_en : 1; /* [16] */
+ unsigned int dma_queue_qos : 4; /* [20...17] */
+ unsigned int dma_queue_mpam_id : 11; /* [31..21] */
+ } bits;
+
+ unsigned int u32;
+};
+
+union U_DMA_QUEUE_FSM_STS {
+ struct {
+ unsigned int dma_queue_sts : 4; /* [3..0] */
+ unsigned int dma_queue_not_work : 1; /* [4] */
+ unsigned int dma_queue_wait_spd_data_sts : 1; /* [5] */
+ unsigned int reserved_0 : 1; /* [6] */
+ unsigned int reserved_1 : 1; /* [7] */
+ unsigned int dma_queue_sub_fsm_sts : 3; /* [10..8] */
+ unsigned int reserved_2 : 21; /* [31..11] */
+ } bits;
+
+ unsigned int u32;
+};
+
+union U_DMA_QUEUE_INT_STS {
+ struct {
+ unsigned int dma_queue_done_int_sts : 1; /* [0] */
+ unsigned int dma_queue_err00_int_sts : 1; /* [1] */
+ unsigned int dma_queue_err01_int_sts : 1; /* [2] */
+ unsigned int dma_queue_err02_int_sts : 1; /* [3] */
+ unsigned int dma_queue_err03_int_sts : 1; /* [4] */
+ unsigned int reserved : 1; /* [5] */
+ unsigned int dma_queue_err05_int_sts : 1; /* [6] */
+ unsigned int dma_queue_err06_int_sts : 1; /* [7] */
+ unsigned int dma_queue_err07_int_sts : 1; /* [8] */
+ unsigned int dma_queue_err08_int_sts : 1; /* [9] */
+ unsigned int dma_queue_err09_int_sts : 1; /* [10] */
+ unsigned int dma_queue_err10_int_sts : 1; /* [11] */
+ unsigned int dma_queue_err11_int_sts : 1; /* [12] */
+ unsigned int dma_queue_err12_int_sts : 1; /* [13] */
+ unsigned int dma_queue_err13_int_sts : 1; /* [14] */
+ unsigned int dma_queue_err14_int_sts : 1; /* [15] */
+ unsigned int dma_queue_err15_int_sts : 1; /* [16] */
+ unsigned int dma_queue_err16_int_sts : 1; /* [17] */
+ unsigned int dma_queue_err17_int_sts : 1; /* [18] */
+ unsigned int reserved_0 : 13; /* [31..19] */
+ } bits;
+
+ unsigned int u32;
+};
+
+union U_DMA_QUEUE_INT_MSK {
+ struct {
+ unsigned int dma_queue_done_int_msk : 1; /* [0] */
+ unsigned int dma_queue_err00_int_msk : 1; /* [1] */
+ unsigned int dma_queue_err01_int_msk : 1; /* [2] */
+ unsigned int dma_queue_err02_int_msk : 1; /* [3] */
+ unsigned int dma_queue_err03_int_msk : 1; /* [4] */
+ unsigned int reserved : 1; /* [5] */
+ unsigned int dma_queue_err05_int_msk : 1; /* [6] */
+ unsigned int dma_queue_err06_int_msk : 1; /* [7] */
+ unsigned int dma_queue_err07_int_msk : 1; /* [8] */
+ unsigned int dma_queue_err08_int_msk : 1; /* [9] */
+ unsigned int dma_queue_err09_int_msk : 1; /* [10] */
+ unsigned int dma_queue_err10_int_msk : 1; /* [11] */
+ unsigned int dma_queue_err11_int_msk : 1; /* [12] */
+ unsigned int dma_queue_err12_int_msk : 1; /* [13] */
+ unsigned int dma_queue_err13_int_msk : 1; /* [14] */
+ unsigned int dma_queue_err14_int_msk : 1; /* [15] */
+ unsigned int dma_queue_err15_int_msk : 1; /* [16] */
+ unsigned int dma_queue_err16_int_msk : 1; /* [17] */
+ unsigned int dma_queue_err17_int_msk : 1; /* [18] */
+ unsigned int reserved_0 : 13 ; /* [31..19] */
+ } bits;
+
+ unsigned int u32;
+};
+
+struct dma_ch_sq_s {
+ u32 opcode : 4; /* [0~3] opcode */
+ u32 drop : 1; /* [4] drop */
+ u32 nw : 1; /* [5] nw */
+ u32 wd_barrier : 1; /* [6] write done barrier */
+ u32 rd_barrier : 1; /* [7] read done barrier */
+ u32 ldie : 1; /* [8] LDIE */
+ u32 rdie : 1; /* [9] rDIE */
+ u32 loop_barrier : 1; /* [10] */
+ u32 spd_barrier : 1; /* [11] */
+ u32 attr : 3; /* [12~14] attr */
+ u32 cq_disable : 1; /* [15] reserved */
+ u32 addrt : 2; /* [16~17] at */
+ u32 p3p4 : 2; /* [18~19] P3 P4 */
+ u32 pf : 3; /* [20~22] pf */
+ u32 vfen : 1; /* [23] vfen */
+ u32 vf : 8; /* [24~31] vf */
+ u32 pasid : 20; /* [0~19] pasid */
+ u32 er : 1; /* [20] er */
+ u32 pmr : 1; /* [21] pmr */
+ u32 prfen : 1; /* [22] prfen */
+ u32 reserved5 : 1; /* [23] reserved */
+ u32 msi : 8; /* [24~31] MSI/MSI-X vector */
+ u32 flow_id : 8; /* [0~7] Flow ID */
+ u32 reserved6 : 8; /* [8~15] reserved */
+ u32 TH : 1; /* [16] TH */
+ u32 PH : 2; /* [17~18] PH */
+ u32 reserved7 : 13; /* [19~31] reserved: some multiplex fields */
+ u32 length;
+ u32 src_addr_l;
+ u32 src_addr_h;
+ u32 dst_addr_l;
+ u32 dst_addr_h;
+};
+
+struct dma_ch_cq_s {
+ u32 reserved1;
+ u32 reserved2;
+ u32 sqhd : 16;
+ u32 reserved3 : 16;
+ u32 reserved4 : 16; /* [0~15] reserved */
+ u32 vld : 1; /* [16] vld */
+ u32 status : 15; /* [17~31] status */
+};
+
+#endif /* EDMA_DRV_H */
diff --git a/drivers/net/ethernet/huawei/bma/edma_drv/edma_host.c b/drivers/net/ethernet/huawei/bma/edma_drv/edma_host.c
index be2f732ed9ed..1bfb123e43c0 100644
--- a/drivers/net/ethernet/huawei/bma/edma_drv/edma_host.c
+++ b/drivers/net/ethernet/huawei/bma/edma_drv/edma_host.c
@@ -20,11 +20,18 @@
#include <linux/seq_file.h>
#include "bma_pci.h"
+#include "edma_queue.h"
#include "edma_host.h"
static struct edma_user_inft_s *g_user_func[TYPE_MAX] = { 0 };
static struct bma_dev_s *g_bma_dev;
+
+struct bma_dev_s *get_bma_dev(void)
+{
+ return g_bma_dev;
+}
+
static int edma_host_dma_interrupt(struct edma_host_s *edma_host);
int edmainfo_show(char *buf)
@@ -231,7 +238,8 @@ void clear_int_dmab2h(struct edma_host_s *edma_host)
(void)pci_write_config_dword(pdev, REG_PCIE1_DMAWRITEINT_CLEAR, data);
}
-int edma_host_check_dma_status(enum dma_direction_e dir)
+// for 1710 1711
+int edma_host_check_dma_status_v1(enum dma_direction_e dir)
{
int ret = 0;
@@ -259,6 +267,18 @@ int edma_host_check_dma_status(enum dma_direction_e dir)
return ret;
}
+// for 1712
+int edma_host_check_dma_status_v2(enum dma_direction_e dir)
+{
+ UNUSED(dir);
+ if (check_dma_queue_state(CPL_STATE, TRUE) == 0 ||
+ check_dma_queue_state(IDLE_STATE, TRUE) == 0) {
+ return 1; /* ok */
+ }
+
+ return 0; /* busy */
+}
+
#ifdef USE_DMA
static int start_transfer_h2b(struct edma_host_s *edma_host, unsigned int len,
@@ -633,9 +653,9 @@ void host_dma_transfer_withlist(struct edma_host_s *edma_host,
}
}
-int edma_host_dma_transfer(struct edma_host_s *edma_host,
- struct bma_priv_data_s *priv,
- struct bma_dma_transfer_s *dma_transfer)
+// for 1710 1711
+int edma_host_dma_transfer_v1(struct edma_host_s *edma_host, struct bma_priv_data_s *priv,
+ struct bma_dma_transfer_s *dma_transfer)
{
int ret = 0;
unsigned long flags = 0;
@@ -673,7 +693,44 @@ int edma_host_dma_transfer(struct edma_host_s *edma_host,
return ret;
}
-void edma_host_reset_dma(struct edma_host_s *edma_host, int dir)
+// for 1712
+int edma_host_dma_transfer_v2(struct edma_host_s *edma_host, struct bma_priv_data_s *priv,
+ struct bma_dma_transfer_s *dma_transfer)
+{
+ int ret = 0;
+ unsigned long flags = 0;
+ struct bma_dev_s *bma_dev = NULL;
+
+ BMA_LOG(DLOG_DEBUG, "edma_host_dma_transfer 1712");
+
+ if (!edma_host || !priv || !dma_transfer)
+ return -EFAULT;
+
+ bma_dev = list_entry(edma_host, struct bma_dev_s, edma_host);
+
+ spin_lock_irqsave(&bma_dev->priv_list_lock, flags);
+
+ if (priv->user.dma_transfer == 0) {
+ spin_unlock_irqrestore(&bma_dev->priv_list_lock, flags);
+ BMA_LOG(DLOG_ERROR, "dma_transfer = %hhd\n", priv->user.dma_transfer);
+ return -EFAULT;
+ }
+
+ BMA_LOG(DLOG_DEBUG, "transfer_edma_host 1712");
+
+ spin_unlock_irqrestore(&bma_dev->priv_list_lock, flags);
+
+ edma_host->statistics.dma_count++;
+
+ spin_lock_irqsave(&edma_host->reg_lock, flags);
+ ret = transfer_dma_queue(dma_transfer);
+ spin_unlock_irqrestore(&edma_host->reg_lock, flags);
+
+ return ret;
+}
+
+// for 1710/1711
+void edma_host_reset_dma_v1(struct edma_host_s *edma_host, enum dma_direction_e dir)
{
u32 data = 0;
u32 reg_addr = 0;
@@ -717,6 +774,13 @@ void edma_host_reset_dma(struct edma_host_s *edma_host, int dir)
reg_addr, count, data);
}
+// for 1712
+void edma_host_reset_dma_v2(struct edma_host_s *edma_host, enum dma_direction_e dir)
+{
+ UNUSED(dir);
+ reset_edma_host(edma_host);
+}
+
int edma_host_dma_stop(struct edma_host_s *edma_host,
struct bma_priv_data_s *priv)
{
@@ -750,8 +814,8 @@ static int edma_host_send_msg(struct edma_host_s *edma_host)
if (send_mbx_hdr->mbxlen > 0) {
if (send_mbx_hdr->mbxlen > HOST_MAX_SEND_MBX_LEN) {
/*share memory is disable */
+ BMA_LOG(DLOG_DEBUG, "mbxlen is too long: %d\n", send_mbx_hdr->mbxlen);
send_mbx_hdr->mbxlen = 0;
- BMA_LOG(DLOG_DEBUG, "mbxlen is too long\n");
return -EFAULT;
}
@@ -1296,6 +1360,69 @@ int edma_host_user_unregister(u32 type)
return 0;
}
+static void init_edma_sq_cq(struct edma_host_s *edma_host)
+{
+ u64 sq_phy_addr = 0;
+ u64 cq_phy_addr = 0;
+ phys_addr_t edma_address = 0;
+ int ret = 0;
+
+ if (get_pci_type() != PCI_TYPE_1712)
+ return;
+
+ ret = bma_intf_get_map_address(TYPE_EDMA_ADDR, &edma_address);
+ if (ret != 0)
+ return;
+
+ edma_host->edma_sq_addr = (void *)((unsigned char *)edma_host->edma_recv_addr
+ + HOST_MAX_RCV_MBX_LEN);
+ edma_host->edma_cq_addr = (void *)((unsigned char *)edma_host->edma_sq_addr
+ + sizeof(struct dma_ch_sq_s) * SQ_DEPTH);
+ sq_phy_addr = edma_address + HOST_DMA_FLAG_LEN + HOST_MAX_SEND_MBX_LEN
+ + HOST_MAX_RCV_MBX_LEN;
+ cq_phy_addr = sq_phy_addr + sizeof(struct dma_ch_sq_s) * SQ_DEPTH;
+
+ BMA_LOG(DLOG_DEBUG,
+ "sq_phy_addr = 0x%llx, SQ size = %zu, cq_phy_addr = 0x%llx, CQ size = %zu",
+ sq_phy_addr, sizeof(struct dma_ch_sq_s) * SQ_DEPTH,
+ cq_phy_addr, sizeof(struct dma_ch_cq_s) * CQ_DEPTH);
+ BMA_LOG(DLOG_DEBUG, "sq_addr = %pK, cq_addr = %pK", edma_host->edma_sq_addr,
+ edma_host->edma_cq_addr);
+
+ (void)memset(edma_host->edma_sq_addr, 0,
+ sizeof(struct dma_ch_sq_s) * SQ_DEPTH + sizeof(struct dma_ch_cq_s) * CQ_DEPTH);
+
+ set_dma_queue_sq_base_l(sq_phy_addr & PCIE_ADDR_L_32_MASK);
+ set_dma_queue_sq_base_h((u32)(sq_phy_addr >> PCIE_ADDR_H_SHIFT_32));
+ set_dma_queue_cq_base_l(cq_phy_addr & PCIE_ADDR_L_32_MASK);
+ set_dma_queue_cq_base_h((u32)(cq_phy_addr >> PCIE_ADDR_H_SHIFT_32));
+
+ reset_edma_host(edma_host);
+}
+
+static void edma_setup_timer(struct edma_host_s *edma_host)
+{
+#ifdef HAVE_TIMER_SETUP
+ timer_setup(&edma_host->timer, edma_host_timeout, 0);
+#else
+ setup_timer(&edma_host->timer, edma_host_timeout,
+ (unsigned long)edma_host);
+#endif
+ (void)mod_timer(&edma_host->timer, jiffies_64 + TIMER_INTERVAL_CHECK);
+
+#ifdef USE_DMA
+ #ifdef HAVE_TIMER_SETUP
+ timer_setup(&edma_host->dma_timer, edma_host_dma_timeout, 0);
+
+ #else
+ setup_timer(&edma_host->dma_timer, edma_host_dma_timeout,
+ (unsigned long)edma_host);
+ #endif
+ (void)mod_timer(&edma_host->dma_timer,
+ jiffies_64 + DMA_TIMER_INTERVAL_CHECK);
+#endif
+}
+
int edma_host_init(struct edma_host_s *edma_host)
{
int ret = 0;
@@ -1352,24 +1479,7 @@ int edma_host_init(struct edma_host_s *edma_host)
edma_host->b2h_state = B2HSTATE_IDLE;
#ifdef EDMA_TIMER
- #ifdef HAVE_TIMER_SETUP
- timer_setup(&edma_host->timer, edma_host_timeout, 0);
- #else
- setup_timer(&edma_host->timer, edma_host_timeout,
- (unsigned long)edma_host);
- #endif
- (void)mod_timer(&edma_host->timer, jiffies_64 + TIMER_INTERVAL_CHECK);
-#ifdef USE_DMA
- #ifdef HAVE_TIMER_SETUP
- timer_setup(&edma_host->dma_timer, edma_host_dma_timeout, 0);
-
- #else
- setup_timer(&edma_host->dma_timer, edma_host_dma_timeout,
- (unsigned long)edma_host);
- #endif
- (void)mod_timer(&edma_host->dma_timer,
- jiffies_64 + DMA_TIMER_INTERVAL_CHECK);
-#endif
+ edma_setup_timer(edma_host);
#else
init_completion(&edma_host->msg_ready);
@@ -1383,6 +1493,8 @@ int edma_host_init(struct edma_host_s *edma_host)
}
#endif
+ init_edma_sq_cq(edma_host);
+
#ifdef HAVE_TIMER_SETUP
timer_setup(&edma_host->heartbeat_timer,
edma_host_heartbeat_timer, 0);
diff --git a/drivers/net/ethernet/huawei/bma/edma_drv/edma_host.h b/drivers/net/ethernet/huawei/bma/edma_drv/edma_host.h
index cbbd86fd6602..93c81bc92286 100644
--- a/drivers/net/ethernet/huawei/bma/edma_drv/edma_host.h
+++ b/drivers/net/ethernet/huawei/bma/edma_drv/edma_host.h
@@ -18,6 +18,8 @@
#include "bma_include.h"
#include "../include/bma_ker_intf.h"
+#include "edma_reg.h"
+#include "edma_drv.h"
#define EDMA_TIMER
@@ -176,6 +178,13 @@
#define U64ADDR_H(addr) ((((u64)addr) >> 32) & 0xffffffff)
#define U64ADDR_L(addr) ((addr) & 0xffffffff)
+#define MAX_RESET_DMA_TIMES 10
+#define DELAY_BETWEEN_RESET_DMA 100
+#define PCI_VENDOR_ID_HUAWEI_PME 0x19e5
+#define PCI_DEVICE_ID_EDMA_0 0x1712
+#define SQ_DEPTH 128
+#define CQ_DEPTH 128
+
struct bma_register_dev_type_s {
u32 type;
u32 sub_type;
@@ -263,6 +272,8 @@ struct edma_host_s {
void __iomem *edma_flag;
void __iomem *edma_send_addr;
void __iomem *edma_recv_addr;
+ void __iomem *edma_sq_addr;
+ void __iomem *edma_cq_addr;
#ifdef USE_DMA
struct timer_list dma_timer;
#endif
@@ -309,6 +320,8 @@ struct edma_user_inft_s {
int (*add_msg)(void *msg, size_t msg_len);
};
+struct bma_dev_s *get_bma_dev(void);
+
int is_edma_b2h_int(struct edma_host_s *edma_host);
void edma_int_to_bmc(struct edma_host_s *edma_host);
int edma_host_mmap(struct edma_host_s *edma_hos, struct file *filp,
@@ -336,7 +349,6 @@ int edma_host_user_unregister(u32 type);
int edma_host_init(struct edma_host_s *edma_host);
void edma_host_cleanup(struct edma_host_s *edma_host);
int edma_host_send_driver_msg(const void *msg, size_t msg_len, int subtype);
-void edma_host_reset_dma(struct edma_host_s *edma_host, int dir);
void clear_int_dmah2b(struct edma_host_s *edma_host);
void clear_int_dmab2h(struct edma_host_s *edma_host);
diff --git a/drivers/net/ethernet/huawei/bma/edma_drv/edma_queue.c b/drivers/net/ethernet/huawei/bma/edma_drv/edma_queue.c
new file mode 100644
index 000000000000..678262f7412c
--- /dev/null
+++ b/drivers/net/ethernet/huawei/bma/edma_drv/edma_queue.c
@@ -0,0 +1,470 @@
+// SPDX-License-Identifier: GPL-2.0
+/* Huawei iBMA driver.
+ * Copyright (c) 2025, Huawei Technologies Co., Ltd.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version 2
+ * of the License, or (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#include <linux/errno.h>
+#include <linux/kthread.h>
+#include <linux/mm.h>
+#include <linux/proc_fs.h>
+#include <linux/seq_file.h>
+
+#include "bma_pci.h"
+#include "edma_host.h"
+#include "edma_queue.h"
+
+static u32 pcie_dma_read(u32 offset)
+{
+ u32 reg_val;
+
+ reg_val = readl(get_bma_dev()->bma_pci_dev->bma_base_addr + offset);
+ BMA_LOG(DLOG_DEBUG, "readl, offset 0x%x val 0x%x\n", offset, reg_val);
+ return reg_val;
+}
+
+static void pcie_dma_write(u32 offset, u32 reg_val)
+{
+ u32 read_val;
+
+ (void)writel(reg_val, get_bma_dev()->bma_pci_dev->bma_base_addr + offset);
+ read_val = readl(get_bma_dev()->bma_pci_dev->bma_base_addr + offset);
+ if (read_val != reg_val) {
+ BMA_LOG(DLOG_DEBUG,
+ "writel fail, read_value: 0x%x, set_value: 0x%x, offset: 0x%x\n",
+ read_val, reg_val, offset);
+ return;
+ }
+ BMA_LOG(DLOG_DEBUG, "writel, offset 0x%x val 0x%x\n", offset, reg_val);
+}
+
+static void set_dma_queue_int_msk(u32 val)
+{
+ (void)pcie_dma_write(PCIE_DMA_QUEUE_INT_MSK_0_REG, val);
+}
+
+static void set_dma_queue_err_int_msk(u32 val)
+{
+ union U_DMA_QUEUE_INT_MSK reg_val;
+
+ // The least significant bit (bit 0) of this register is reserved and must be cleared,
+ // while the remaining bits should retain their original values.
+ reg_val.u32 = val & 0xFFFFFFFE;
+ (void)pcie_dma_write(PCIE_DMA_QUEUE_ERR_INT_MSK_0_REG, reg_val.u32);
+}
+
+static void set_dma_queue_int_sts(u32 val)
+{
+ union U_DMA_QUEUE_INT_STS reg_val;
+
+ reg_val.u32 = val;
+ (void)pcie_dma_write(PCIE_DMA_QUEUE_INT_STS_0_REG, reg_val.u32);
+}
+
+static void get_dma_queue_int_sts(u32 *val)
+{
+ union U_DMA_QUEUE_INT_STS reg_val;
+
+ reg_val.u32 = pcie_dma_read(PCIE_DMA_QUEUE_INT_STS_0_REG);
+ *val = reg_val.u32;
+}
+
+static void get_dma_queue_fsm_sts(u32 *val)
+{
+ union U_DMA_QUEUE_FSM_STS reg_val;
+
+ reg_val.u32 = pcie_dma_read(PCIE_DMA_QUEUE_FSM_STS_0_REG);
+ *val = reg_val.bits.dma_queue_sts;
+}
+
+static void pause_dma_queue(u32 val)
+{
+ union U_DMA_QUEUE_CTRL0 reg_val;
+
+ reg_val.u32 = pcie_dma_read(PCIE_DMA_QUEUE_CTRL0_0_REG);
+ reg_val.bits.dma_queue_pause = val;
+ (void)pcie_dma_write(PCIE_DMA_QUEUE_CTRL0_0_REG, reg_val.u32);
+}
+
+static void enable_dma_queue(u32 val)
+{
+ union U_DMA_QUEUE_CTRL0 reg_val;
+
+ reg_val.u32 = pcie_dma_read(PCIE_DMA_QUEUE_CTRL0_0_REG);
+ reg_val.bits.dma_queue_en = val;
+ (void)pcie_dma_write(PCIE_DMA_QUEUE_CTRL0_0_REG, reg_val.u32);
+}
+
+static void reset_dma_queue(u32 val)
+{
+ union U_DMA_QUEUE_CTRL1 reg_val;
+
+ reg_val.u32 = pcie_dma_read(PCIE_DMA_QUEUE_CTRL1_0_REG);
+ reg_val.bits.dma_queue_reset = val;
+ (void)pcie_dma_write(PCIE_DMA_QUEUE_CTRL1_0_REG, reg_val.u32);
+}
+
+static void set_dma_queue_sq_tail(u32 val)
+{
+ union U_DMA_QUEUE_SQ_TAIL_PTR reg_val;
+
+ reg_val.bits.dma_queue_sq_tail_ptr = val;
+ (void)pcie_dma_write(PCIE_DMA_QUEUE_SQ_TAIL_PTR_0_REG, reg_val.u32);
+}
+
+static void set_dma_queue_cq_head(u32 val)
+{
+ union U_DMA_QUEUE_CQ_HEAD_PTR reg_val;
+
+ reg_val.bits.dma_queue_cq_head_ptr = val;
+ (void)pcie_dma_write(PCIE_DMA_QUEUE_CQ_HEAD_PTR_0_REG, reg_val.u32);
+}
+
+void set_dma_queue_sq_base_l(u32 val)
+{
+ (void)pcie_dma_write(PCIE_DMA_QUEUE_SQ_BASE_L_0_REG, val);
+}
+
+void set_dma_queue_sq_base_h(u32 val)
+{
+ (void)pcie_dma_write(PCIE_DMA_QUEUE_SQ_BASE_H_0_REG, val);
+}
+
+void set_dma_queue_cq_base_l(u32 val)
+{
+ (void)pcie_dma_write(PCIE_DMA_QUEUE_CQ_BASE_L_0_REG, val);
+}
+
+void set_dma_queue_cq_base_h(u32 val)
+{
+ (void)pcie_dma_write(PCIE_DMA_QUEUE_CQ_BASE_H_0_REG, val);
+}
+
+static void set_dma_queue_sq_depth(u32 val)
+{
+ union U_DMA_QUEUE_SQ_DEPTH reg_val;
+
+ reg_val.u32 = pcie_dma_read(PCIE_DMA_QUEUE_SQ_DEPTH_0_REG);
+ reg_val.bits.dma_queue_sq_depth = val;
+ (void)pcie_dma_write(PCIE_DMA_QUEUE_SQ_DEPTH_0_REG, reg_val.u32);
+}
+
+static void set_dma_queue_cq_depth(u32 val)
+{
+ union U_DMA_QUEUE_CQ_DEPTH reg_val;
+
+ reg_val.u32 = pcie_dma_read(PCIE_DMA_QUEUE_CQ_DEPTH_0_REG);
+ reg_val.bits.dma_queue_cq_depth = val;
+ (void)pcie_dma_write(PCIE_DMA_QUEUE_CQ_DEPTH_0_REG, reg_val.u32);
+}
+
+static void set_dma_queue_arb_weight(u32 val)
+{
+ union U_DMA_QUEUE_CTRL0 reg_val;
+
+ reg_val.u32 = pcie_dma_read(PCIE_DMA_QUEUE_CTRL0_0_REG);
+ reg_val.bits.dma_queue_arb_weight = val;
+ (void)pcie_dma_write(PCIE_DMA_QUEUE_CTRL0_0_REG, reg_val.u32);
+}
+
+static void set_dma_queue_drct_sel(u32 val)
+{
+ union U_DMA_QUEUE_CTRL0 reg_val;
+
+ reg_val.u32 = pcie_dma_read(PCIE_DMA_QUEUE_CTRL0_0_REG);
+ reg_val.bits.dma_queue_cq_drct_sel = val;
+ reg_val.bits.dma_queue_sq_drct_sel = val;
+ (void)pcie_dma_write(PCIE_DMA_QUEUE_CTRL0_0_REG, reg_val.u32);
+}
+
+static void get_dma_queue_sq_tail(u32 *val)
+{
+ union U_DMA_QUEUE_SQ_TAIL_PTR reg_val;
+
+ reg_val.u32 = pcie_dma_read(PCIE_DMA_QUEUE_SQ_TAIL_PTR_0_REG);
+ *val = reg_val.bits.dma_queue_sq_tail_ptr;
+}
+
+static void get_dma_queue_cq_tail(u32 *val)
+{
+ union U_DMA_QUEUE_CQ_TAIL_PTR reg_val;
+
+ reg_val.u32 = pcie_dma_read(PCIE_DMA_QUEUE_CQ_TAIL_PTR_0_REG);
+ *val = reg_val.bits.dma_queue_cq_tail_ptr;
+}
+
+static void get_dma_queue_sq_head(u32 *val)
+{
+ u32 reg_val;
+
+ reg_val = pcie_dma_read(PCIE_DMA_QUEUE_SQ_STS_0_REG);
+ /* dma_queue_sq_head_ptr bit[15:0] */
+ *val = reg_val & 0xFFFF;
+}
+
+static void set_dma_queue_err_abort(u32 val)
+{
+ union U_DMA_QUEUE_CTRL0 reg_val;
+
+ reg_val.u32 = pcie_dma_read(PCIE_DMA_QUEUE_CTRL0_0_REG);
+ reg_val.bits.dma_queue_sq_pa_lkp_err_abort_en = val;
+ reg_val.bits.dma_queue_sq_proc_err_abort_en = val;
+ reg_val.bits.dma_queue_sq_drop_err_abort_en = val;
+ reg_val.bits.dma_queue_sq_cfg_err_abort_en = val;
+ (void)pcie_dma_write(PCIE_DMA_QUEUE_CTRL0_0_REG, reg_val.u32);
+}
+
+static void set_dma_queue_flr_disable(u32 val)
+{
+ (void)pcie_dma_write(PCIE_DMA_FLR_DISABLE_REG, val);
+}
+
+static void clear_dma_queue_int_chk(u32 mask)
+{
+ u32 int_sts;
+
+ (void)get_dma_queue_int_sts(&int_sts);
+ if (int_sts & mask)
+ (void)set_dma_queue_int_sts(mask);
+}
+
+s32 check_dma_queue_state(u32 state, u32 flag)
+{
+ u32 dma_state = 0;
+ unsigned long timeout;
+
+ BMA_LOG(DLOG_DEBUG, "state:%u, flag:%u\n", state, flag);
+
+ timeout = jiffies + TIMER_INTERVAL_CHECK;
+
+ while (1) {
+ get_dma_queue_fsm_sts(&dma_state);
+ BMA_LOG(DLOG_DEBUG, "DMA stats[%u]\n", dma_state);
+ // Flag is 0 and state does not equal to target value
+ // OR Flag is 1 and state is equal to target value
+ if ((!flag && dma_state != state) || (flag && dma_state == state))
+ break;
+
+ if (time_after(jiffies, timeout)) {
+ BMA_LOG(DLOG_DEBUG, "Wait stats[%u] fail\n", state);
+ return -ETIMEDOUT;
+ }
+ udelay(1);
+ }
+ return 0;
+}
+
+static s32 reset_dma(void)
+{
+ u32 dma_state = 0;
+
+ /* get dma channel fsm */
+ check_dma_queue_state(WAIT_STATE, FALSE);
+ get_dma_queue_fsm_sts(&dma_state);
+ BMA_LOG(DLOG_DEBUG, "dma_state:%u\n", dma_state);
+ switch (dma_state) {
+ /* idle status, dma channel need no reset */
+ case IDLE_STATE:
+ return 0;
+ case RUN_STATE:
+ pause_dma_queue(ENABLE);
+ fallthrough;
+ case ABORT_STATE:
+ case CPL_STATE:
+ enable_dma_queue(DISABLE);
+ if (check_dma_queue_state(RUN_STATE, FALSE))
+ return -ETIMEDOUT;
+ fallthrough;
+ case PAUSE_STATE:
+ case HALT_STATE:
+ set_dma_queue_sq_tail(0);
+ set_dma_queue_cq_head(0);
+ reset_dma_queue(ENABLE);
+ pause_dma_queue(DISABLE);
+ if (check_dma_queue_state(IDLE_STATE, TRUE))
+ return -ETIMEDOUT;
+ fallthrough;
+ default:
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+static void init_dma(void)
+{
+ /* set dma channel sq tail */
+ set_dma_queue_sq_tail(0);
+ /* set dma channel cq head */
+ set_dma_queue_cq_head(0);
+ /* set dma queue drct sel */
+ set_dma_queue_drct_sel(DRC_LOCAL);
+ /* set dma channel sq depth */
+ set_dma_queue_sq_depth(SQ_DEPTH - 1);
+ /* set dma channel cq depth */
+ set_dma_queue_cq_depth(CQ_DEPTH - 1);
+ /* dma not process FLR , only cpu process FLR */
+ set_dma_queue_flr_disable(0x1);
+ /* set dma queue arb weight */
+ set_dma_queue_arb_weight(0x1F);
+ /* clear dma queue int status */
+ set_dma_queue_int_sts(0x1FFF);
+ /* set dma queue int mask */
+ set_dma_queue_err_int_msk(0x0);
+ set_dma_queue_int_msk(0x0);
+ /* set dma queue abort err en */
+ set_dma_queue_err_abort(ENABLE);
+ /* enable dma channel en */
+ enable_dma_queue(ENABLE);
+}
+
+s32 wait_done_dma_queue(unsigned long timeout)
+{
+ struct dma_ch_cq_s *p_cur_last_cq;
+ struct dma_ch_cq_s *p_dma_cq;
+ unsigned long end;
+ u32 sq_tail;
+ u32 sq_valid;
+ u32 cq_tail;
+ u32 cq_valid;
+
+ p_dma_cq = (struct dma_ch_cq_s *)((&get_bma_dev()->edma_host)->edma_cq_addr);
+ end = jiffies + timeout;
+
+ while (time_before(jiffies, end)) {
+ (void)get_dma_queue_sq_tail(&sq_tail);
+ (void)get_dma_queue_cq_tail(&cq_tail);
+
+ cq_valid = (cq_tail + CQ_DEPTH - 1) % (CQ_DEPTH);
+ p_cur_last_cq = &p_dma_cq[cq_valid];
+ sq_valid = (sq_tail + SQ_DEPTH - 1) % (SQ_DEPTH);
+ BMA_LOG(DLOG_DEBUG,
+ "sq_tail %d, cq_tail %d, cq_valid %d, sq_valid %d, p_cur_last_cq->sqhd %d\n",
+ sq_tail, cq_tail, cq_valid, sq_valid, p_cur_last_cq->sqhd);
+ if (p_cur_last_cq->sqhd == sq_valid) {
+ set_dma_queue_cq_head(cq_valid);
+ return 0;
+ }
+ }
+
+ return -ETIMEDOUT;
+}
+
+static s32 submit_dma_queue_sq(u32 dir, struct bspveth_dmal pdmalbase_v, u32 pf)
+{
+ u32 sq_tail;
+ u32 sq_head;
+ u32 sq_availble;
+ struct dma_ch_sq_s sq_submit;
+ struct dma_ch_sq_s *p_dma_sq;
+
+ p_dma_sq = (struct dma_ch_sq_s *)((&get_bma_dev()->edma_host)->edma_sq_addr);
+ (void)get_dma_queue_sq_tail(&sq_tail);
+ (void)get_dma_queue_sq_head(&sq_head);
+ sq_availble = SQ_DEPTH - 1 - (((sq_tail - sq_head) + SQ_DEPTH) % SQ_DEPTH);
+ if (sq_availble < 1) {
+ BMA_LOG(DLOG_DEBUG, "cannot process %u descriptors, try again later\n", 1);
+ return -1;
+ }
+
+ BMA_LOG(DLOG_DEBUG, "submit dma queue sq, sq_tail get %d, sq_head %d, sq_availble %d\n",
+ sq_tail, sq_head, sq_availble);
+
+ (void)memset(&sq_submit, 0, sizeof(sq_submit));
+ if (dir == DIR_H2B)
+ sq_submit.opcode = DMA_READ;
+ else
+ sq_submit.opcode = DMA_WRITE;
+
+ BMA_LOG(DLOG_DEBUG, "PF: %u\n", pf);
+ sq_submit.ldie = ENABLE;
+ sq_submit.rdie = ENABLE;
+ sq_submit.attr &= (~0x2); /* SO(Strong Ordering) */
+ sq_submit.pf = pf & 0x7; /* 0x7 */
+ sq_submit.p3p4 = (pf >> 3) & 0x3; /* 0x3 */
+ sq_submit.length = pdmalbase_v.len;
+ sq_submit.src_addr_l = pdmalbase_v.slow;
+ sq_submit.src_addr_h = pdmalbase_v.shi;
+ sq_submit.dst_addr_l = pdmalbase_v.dlow;
+ sq_submit.dst_addr_h = pdmalbase_v.dhi;
+
+ BMA_LOG(DLOG_DEBUG, "submit dma queue sq, dir %d, op %d, length %d\n", dir,
+ sq_submit.opcode, sq_submit.length);
+
+ memcpy(p_dma_sq + sq_tail, &sq_submit, sizeof(sq_submit));
+ sq_tail = (sq_tail + 1) % SQ_DEPTH;
+
+ BMA_LOG(DLOG_DEBUG, "submit dma queue sq, sq_tail change %d,\n", sq_tail);
+ wmb(); /* memory barriers. */
+
+ (void)set_dma_queue_sq_tail(sq_tail);
+
+ return 0;
+}
+
+s32 transfer_dma_queue(struct bma_dma_transfer_s *dma_transfer)
+{
+ struct bspveth_dmal *pdmalbase_v;
+ u32 dmal_cnt;
+ s32 ret;
+ int i;
+
+ if (!dma_transfer) {
+ BMA_LOG(DLOG_DEBUG, "dma_transfer is NULL.\n");
+ return -EFAULT;
+ }
+
+ BMA_LOG(DLOG_DEBUG, "transfer dma queue.\n");
+
+ /* clear local done int */
+ clear_dma_queue_int_chk(DMA_DONE_MASK);
+
+ pdmalbase_v = dma_transfer->pdmalbase_v;
+ dmal_cnt = dma_transfer->dmal_cnt;
+ for (i = 0; i < dmal_cnt; i++)
+ submit_dma_queue_sq(dma_transfer->dir, pdmalbase_v[i],
+ get_bma_dev()->bma_pci_dev->pdev->devfn);
+
+ (void)set_dma_queue_int_msk(DMA_DONE_UNMASK);
+ (void)set_dma_queue_err_int_msk(DMA_ERR_UNMASK);
+ (void)enable_dma_queue(ENABLE);
+
+ ret = wait_done_dma_queue(DMA_TMOUT);
+ if (ret)
+ BMA_LOG(DLOG_DEBUG, "EP DMA: dma wait timeout");
+
+ return ret;
+}
+
+void reset_edma_host(struct edma_host_s *edma_host)
+{
+ unsigned long flags = 0;
+ int count = 0;
+
+ if (!edma_host)
+ return;
+
+ spin_lock_irqsave(&edma_host->reg_lock, flags);
+
+ while (count++ < MAX_RESET_DMA_TIMES) {
+ if (reset_dma() == 0) {
+ BMA_LOG(DLOG_DEBUG, "reset dma successfully\n");
+ init_dma();
+ break;
+ }
+
+ mdelay(DELAY_BETWEEN_RESET_DMA);
+ }
+
+ spin_unlock_irqrestore(&edma_host->reg_lock, flags);
+ BMA_LOG(DLOG_DEBUG, "reset dma count=%d\n", count);
+}
diff --git a/drivers/net/ethernet/huawei/bma/edma_drv/edma_queue.h b/drivers/net/ethernet/huawei/bma/edma_drv/edma_queue.h
new file mode 100644
index 000000000000..0cf449c0ae00
--- /dev/null
+++ b/drivers/net/ethernet/huawei/bma/edma_drv/edma_queue.h
@@ -0,0 +1,29 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/* Huawei iBMA driver.
+ * Copyright (c) 2025, Huawei Technologies Co., Ltd.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version 2
+ * of the License, or (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#ifndef EDMA_QUEUE_H
+#define EDMA_QUEUE_H
+#include "edma_host.h"
+
+s32 check_dma_queue_state(u32 state, u32 flag);
+void set_dma_queue_sq_base_l(u32 val);
+void set_dma_queue_sq_base_h(u32 val);
+void set_dma_queue_cq_base_l(u32 val);
+void set_dma_queue_cq_base_h(u32 val);
+void reset_edma_host(struct edma_host_s *edma_host);
+int transfer_edma_host(struct edma_host_s *host, struct bma_priv_data_s *priv,
+ struct bma_dma_transfer_s *transfer);
+s32 transfer_dma_queue(struct bma_dma_transfer_s *dma_transfer);
+#endif
diff --git a/drivers/net/ethernet/huawei/bma/edma_drv/edma_reg.h b/drivers/net/ethernet/huawei/bma/edma_drv/edma_reg.h
new file mode 100644
index 000000000000..c4e056a92bc8
--- /dev/null
+++ b/drivers/net/ethernet/huawei/bma/edma_drv/edma_reg.h
@@ -0,0 +1,127 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/* Huawei iBMA driver.
+ * Copyright (c) 2025, Huawei Technologies Co., Ltd.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version 2
+ * of the License, or (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#ifndef EDMA_REG_H
+#define EDMA_REG_H
+
+#define PORT_EP 0
+#define PORT_RP 1
+
+#define ENABLE 1
+#define DISABLE 0
+
+#define TRUE 1
+#define FALSE 0
+
+/* core0:x2/x1 core1:x1 */
+#define PCIE_CORE_NUM 2
+#define PCIE_REG_OFFSET 0x100000U
+#define PCIE_REG_SIZE 0x100000
+
+#define GEN1 0x1
+#define GEN2 0x2
+#define GEN3 0x3
+#define GEN4 0x4
+
+#define PCIE_ADDR_H_SHIFT_32 32
+#define PCIE_ADDR_L_32_MASK 0xFFFFFFFF
+
+#define AP_DMA_BIT BIT(5)
+#define AP_MASK_ALL 0x3FF
+#define AP_DMA_CHAN_REG_SIZE 0x100
+
+/********************************************************************************************/
+/* PCIE reg base */
+/********************************************************************************************/
+#define PCIE_BASE_ADDR 0x1E100000U
+#define AP_DMA_REG 0x10000U
+#define AP_IOB_TX_REG_BASE 0x0U
+#define AP_IOB_RX_REG_BASE 0x4000U
+#define AP_GLOBAL_REG_BASE 0x8000U
+
+/********************************************************************************************/
+/* PCIE AP DMA REG */
+/********************************************************************************************/
+#define PCIE_DMA_EP_INT_MSK_REG 0x24 /* DMA_EP_INT_MSK */
+#define PCIE_DMA_EP_INT_REG 0x28 /* DMA_EP_INT */
+#define PCIE_DMA_EP_INT_STS_REG 0x2C /* DMA_EP_INT_STS */
+#define PCIE_DMA_FLR_DISABLE_REG 0xA00 /* DMA_FLR_DISABLE */
+#define PCIE_DMA_QUEUE_SQ_BASE_L_0_REG 0x2000 /* DMA Queue SQ Base Address Low Register */
+#define PCIE_DMA_QUEUE_SQ_BASE_H_0_REG 0x2004 /* DMA Queue SQ Base Address High Register */
+#define PCIE_DMA_QUEUE_SQ_DEPTH_0_REG 0x2008 /* DMA Queue SQ Depth */
+#define PCIE_DMA_QUEUE_SQ_TAIL_PTR_0_REG 0x200C /* DMA Queue SQ Tail Pointer Register */
+#define PCIE_DMA_QUEUE_CQ_BASE_L_0_REG 0x2010 /* DMA Queue CQ Base Address Low Register */
+#define PCIE_DMA_QUEUE_CQ_BASE_H_0_REG 0x2014 /* DMA Queue CQ Base Address High Register */
+#define PCIE_DMA_QUEUE_CQ_DEPTH_0_REG 0x2018 /* DMA Queue CQ Depth */
+#define PCIE_DMA_QUEUE_CQ_HEAD_PTR_0_REG 0x201C /* DMA Queue CQ Head Pointer Register */
+#define PCIE_DMA_QUEUE_CTRL0_0_REG 0x2020 /* DMA Queue control Register 0 */
+#define PCIE_DMA_QUEUE_CTRL1_0_REG 0x2024 /* DMA Queue control Register 1 */
+#define PCIE_DMA_QUEUE_FSM_STS_0_REG 0x2030 /* DMA Queue FSM Status Register */
+#define PCIE_DMA_QUEUE_SQ_STS_0_REG 0x2034 /* DMA Queue SQ and CQ status Register */
+#define PCIE_DMA_QUEUE_CQ_TAIL_PTR_0_REG 0x203C /* DMA Queue CQ Tail Pointer Register */
+#define PCIE_DMA_QUEUE_INT_STS_0_REG 0x2040 /* DMA Queue Interrupt Status */
+#define PCIE_DMA_QUEUE_INT_MSK_0_REG 0x2044 /* DMA Queue Interrupt Mask Register */
+#define PCIE_DMA_QUEUE_ERR_INT_STS_0_REG 0x2048 /* DMA Queue Err Interrupt Status */
+#define PCIE_DMA_QUEUE_ERR_INT_MSK_0_REG 0x204C /* DMA Queue Err Interrupt Mask Register */
+#define PCIE_DMA_QUEUE_INT_RO_0_REG 0x206C /* DMA Queue Interrupt RO Register */
+
+/********************************************************************************************/
+/* PCIE AP_GLOBAL_REG */
+/********************************************************************************************/
+#define PCIE_CE_ENA 0x0008
+#define PCIE_UNF_ENA 0x0010
+#define PCIE_UF_ENA 0x0018
+
+#define PCIE_MSI_MASK 0x00F4
+#define PORT_INTX_ASSERT_MASK 0x01B0
+#define PORT_INTX_DEASSERT_MASK 0x01B4
+
+#define PCIE_AP_NI_ENA 0x0100
+#define PCIE_AP_CE_ENA 0x0104
+#define PCIE_AP_UNF_ENA 0x0108
+#define PCIE_AP_UF_ENA 0x010c
+#define PCIE_AP_NI_MASK 0x0110
+#define PCIE_AP_CE_MASK 0x0114
+#define PCIE_AP_UNF_MASK 0x0118
+#define PCIE_AP_UF_MASK 0x011C
+#define PCIE_AP_NI_STATUS 0x0120
+#define PCIE_AP_CE_STATUS 0x0124
+#define PCIE_AP_UNF_STATUS 0x0128
+#define PCIE_AP_UF_STATUS 0x012C
+#define PCIE_CORE_NI_ENA 0x0160
+#define PCIE_CORE_CE_ENA 0x0164
+#define PCIE_CORE_UNF_ENA 0x0168
+#define PCIE_CORE_UF_ENA 0x016c
+
+#define AP_PORT_EN_REG 0x0800
+#define AP_APB_SYN_RST 0x0810
+#define AP_AXI_SYN_RST 0x0814
+#define AP_IDLE 0x0C08
+
+/********************************************************************************************/
+/* PCIE AP_IOB_RX_COM_REG Reg */
+/********************************************************************************************/
+#define IOB_RX_AML_SNOOP 0x1AAC
+#define IOB_RX_MSI_INT_CTRL 0x1040
+
+#define IOB_RX_MSI_INT_ADDR_HIGH 0x1044
+#define IOB_RX_MSI_INT_ADDR_LOW 0x1048
+
+#define IOB_RX_PAB_SMMU_BYPASS_CTRL 0x2004
+
+#define IOB_RX_DMA_REG_REMAP_0 0x0E30
+#define IOB_RX_DMA_REG_REMAP_1 0x0E34
+
+#endif /* EDMA_REG_H */
diff --git a/drivers/net/ethernet/huawei/bma/include/bma_ker_intf.h b/drivers/net/ethernet/huawei/bma/include/bma_ker_intf.h
index d1df99b0c9fd..8d284d5f6e62 100644
--- a/drivers/net/ethernet/huawei/bma/include/bma_ker_intf.h
+++ b/drivers/net/ethernet/huawei/bma/include/bma_ker_intf.h
@@ -47,6 +47,17 @@ enum intr_mod {
INTR_ENABLE = 1,
};
+enum addr_type {
+ TYPE_EDMA_ADDR = 0,
+ TYPE_VETH_ADDR = 1,
+};
+
+enum pci_type_e {
+ PCI_TYPE_UNKNOWN,
+ PCI_TYPE_171x,
+ PCI_TYPE_1712
+};
+
struct bma_dma_addr_s {
dma_addr_t dma_addr;
u32 dma_data_len;
@@ -66,10 +77,28 @@ union transfer_u {
struct dmalist_transfer_s list;
};
+struct bspveth_dmal {
+ u32 chl;
+ u32 len;
+ u32 slow;
+ u32 shi;
+ u32 dlow;
+ u32 dhi;
+};
+
struct bma_dma_transfer_s {
enum dma_type_e type;
enum dma_direction_e dir;
union transfer_u transfer;
+ struct bspveth_dmal *pdmalbase_v;
+ u32 dmal_cnt;
+};
+
+struct bma_map_addr_s {
+ enum pci_type_e pci_type;
+ u32 host_number;
+ enum addr_type addr_type;
+ u32 addr;
};
int bma_intf_register_int_notifier(struct notifier_block *nb);
@@ -91,4 +120,21 @@ int bma_cdev_add_msg(void *handle, const char __user *msg, size_t msg_len);
unsigned int bma_cdev_check_recv(void *handle);
void *bma_cdev_get_wait_queue(void *handle);
int bma_intf_check_edma_supported(void);
+
+enum pci_type_e get_pci_type(void);
+void set_pci_type(enum pci_type_e type);
+
+int bma_intf_get_host_number(unsigned int *host_number);
+int bma_intf_get_map_address(enum addr_type type, phys_addr_t *addr);
+
+#define HOST_NUMBER_0 0
+#define HOST_NUMBER_1 1
+
+#define EDMA_1711_HOST0_ADDR 0x84810000
+#define VETH_1711_HOST0_ADDR 0x84820000
+#define EDMA_1712_HOST0_ADDR 0x85400000
+#define VETH_1712_HOST0_ADDR 0x85410000
+#define EDMA_1712_HOST1_ADDR 0x87400000
+#define VETH_1712_HOST1_ADDR 0x87410000
+
#endif
diff --git a/drivers/net/ethernet/huawei/bma/kbox_drv/kbox_include.h b/drivers/net/ethernet/huawei/bma/kbox_drv/kbox_include.h
index 0d82ee6f7c83..745d83b431f8 100644
--- a/drivers/net/ethernet/huawei/bma/kbox_drv/kbox_include.h
+++ b/drivers/net/ethernet/huawei/bma/kbox_drv/kbox_include.h
@@ -23,7 +23,7 @@
#ifdef DRV_VERSION
#define KBOX_VERSION MICRO_TO_STR(DRV_VERSION)
#else
-#define KBOX_VERSION "0.3.10"
+#define KBOX_VERSION "0.4.0"
#endif
#define UNUSED(x) (x = x)
diff --git a/drivers/net/ethernet/huawei/bma/veth_drv/veth_hb.c b/drivers/net/ethernet/huawei/bma/veth_drv/veth_hb.c
index 9d918edae703..774229ae8dd1 100644
--- a/drivers/net/ethernet/huawei/bma/veth_drv/veth_hb.c
+++ b/drivers/net/ethernet/huawei/bma/veth_drv/veth_hb.c
@@ -495,6 +495,11 @@ s32 bspveth_setup_all_tx_resources(struct bspveth_device *pvethdev)
int err = 0;
u8 *shmq_head_p = NULL;
struct bspveth_shmq_hd *shmq_head = NULL;
+ phys_addr_t veth_address = 0;
+
+ err = bma_intf_get_map_address(TYPE_VETH_ADDR, &veth_address);
+ if (err != 0)
+ goto failed;
if (!pvethdev)
return BSP_ERR_NULL_POINTER;
@@ -526,7 +531,7 @@ s32 bspveth_setup_all_tx_resources(struct bspveth_device *pvethdev)
(struct bspveth_dmal *)((BSP_VETH_T)(shmq_head)
+ SHMDMAL_OFFSET);
pvethdev->ptx_queue[qid]->pdmalbase_p =
- (u8 *)(u64)(VETH_SHAREPOOL_BASE_INBMC +
+ (u8 *)(u64)(veth_address +
MAX_SHAREQUEUE_SIZE * qid +
SHMDMAL_OFFSET);
@@ -851,6 +856,11 @@ s32 bspveth_setup_all_rx_resources(struct bspveth_device *pvethdev)
int qid, i, err = 0;
struct bspveth_shmq_hd *shmq_head = NULL;
u8 *shmq_head_p = NULL;
+ phys_addr_t veth_address = 0;
+
+ err = bma_intf_get_map_address(TYPE_VETH_ADDR, &veth_address);
+ if (err != 0)
+ goto failed;
if (!pvethdev)
return BSP_ERR_NULL_POINTER;
@@ -885,7 +895,7 @@ s32 bspveth_setup_all_rx_resources(struct bspveth_device *pvethdev)
(struct bspveth_dmal *)((BSP_VETH_T)(shmq_head)
+ SHMDMAL_OFFSET);
pvethdev->prx_queue[qid]->pdmalbase_p =
- (u8 *)(u64)(VETH_SHAREPOOL_BASE_INBMC
+ (u8 *)(u64)(veth_address
+ MAX_SHAREQUEUE_SIZE * (qid + 1)
+ SHMDMAL_OFFSET);
memset(pvethdev->prx_queue[qid]->pdmalbase_v, 0,
@@ -1236,6 +1246,8 @@ void veth_netdev_func_init(struct net_device *dev)
{
struct tag_pcie_comm_priv *priv =
(struct tag_pcie_comm_priv *)netdev_priv(dev);
+ u32 host_number = 0;
+ int ret = 0;
/*9C:7D:A3:28:6F:F9*/
unsigned char veth_mac[ETH_ALEN] = {0x9C, 0x7D, 0xA3, 0x28, 0x6F, 0xF9};
@@ -1243,6 +1255,12 @@ void veth_netdev_func_init(struct net_device *dev)
ether_setup(dev);
+ ret = bma_intf_get_host_number(&host_number);
+ if (ret < 0) {
+ VETH_LOG(DLOG_ERROR, "bma_intf_get_host_number failed!\n");
+ return;
+ }
+
dev->netdev_ops = &veth_ops;
dev->watchdog_timeo = BSPVETH_NET_TIMEOUT;
@@ -1257,6 +1275,7 @@ void veth_netdev_func_init(struct net_device *dev)
memset(priv, 0, sizeof(struct tag_pcie_comm_priv));
strncpy(priv->net_type, MODULE_NAME, NET_TYPE_LEN);
+ veth_mac[ETH_ALEN - 1] = (host_number == 0 ? 0xF9 : 0xFB);
eth_hw_addr_set(dev, veth_mac);
VETH_LOG(DLOG_DEBUG, "set veth MAC addr OK\n");
@@ -2226,6 +2245,8 @@ s32 __start_dmalist_H(struct bspveth_rxtx_q *prxtx_queue, u32 cnt, u32 type)
dma_transfer.type = DMA_LIST;
dma_transfer.transfer.list.dma_addr =
(dma_addr_t)prxtx_queue->pdmalbase_p;
+ dma_transfer.pdmalbase_v = prxtx_queue->pdmalbase_v;
+ dma_transfer.dmal_cnt = prxtx_queue->dmal_cnt;
ret = bma_intf_start_dma(g_bspveth_dev.bma_priv, &dma_transfer);
if (ret < 0)
diff --git a/drivers/net/ethernet/huawei/bma/veth_drv/veth_hb.h b/drivers/net/ethernet/huawei/bma/veth_drv/veth_hb.h
index 242d3ec128d3..f8b7e2f8d604 100644
--- a/drivers/net/ethernet/huawei/bma/veth_drv/veth_hb.h
+++ b/drivers/net/ethernet/huawei/bma/veth_drv/veth_hb.h
@@ -31,7 +31,7 @@ extern "C" {
#ifdef DRV_VERSION
#define VETH_VERSION MICRO_TO_STR(DRV_VERSION)
#else
-#define VETH_VERSION "0.3.10"
+#define VETH_VERSION "0.4.0"
#endif
#define MODULE_NAME "veth"
@@ -67,7 +67,6 @@ extern "C" {
#define SYSCTL_REG_SIZE (0x1000)
#define PCIE1_REG_BASE (0x29000000)
#define PCIE1_REG_SIZE (0x1000)
-#define VETH_SHAREPOOL_BASE_INBMC (0x84820000)
#define VETH_SHAREPOOL_SIZE (0xdf000)
#define VETH_SHAREPOOL_OFFSET (0x10000)
#define MAX_SHAREQUEUE_SIZE (0x20000)
@@ -261,15 +260,6 @@ struct bspveth_dma_bd {
u32 off;
};
-struct bspveth_dmal {
- u32 chl;
- u32 len;
- u32 slow;
- u32 shi;
- u32 dlow;
- u32 dhi;
-};
-
struct bspveth_rxtx_q {
#ifndef VETH_BMC
struct bspveth_dma_bd *pbdbase_v;
--
2.33.0
2
1

[openeuler:OLK-6.6 2666/2666] drivers/usb/dwc3/core.c:1174:16: warning: variable 'hw_mode' set but not used
by kernel test robot 11 Aug '25
by kernel test robot 11 Aug '25
11 Aug '25
tree: https://gitee.com/openeuler/kernel.git OLK-6.6
head: 09d0a2e6b14e15a741ad6b479c7dced36608a9ff
commit: 381994c39e251eeb2cecf06200ff200fdd5e06ff [2666/2666] usb: dwc3: core: Prevent phy suspend during init
config: x86_64-buildonly-randconfig-2004-20250811 (https://download.01.org/0day-ci/archive/20250811/202508111054.kROi2W5N-lkp@…)
compiler: clang version 20.1.8 (https://github.com/llvm/llvm-project 87f0227cb60147a26a1eeb4fb06e3b505e9c7261)
reproduce (this is a W=1 build): (https://download.01.org/0day-ci/archive/20250811/202508111054.kROi2W5N-lkp@…)
If you fix the issue in a separate patch/commit (i.e. not just a new version of
the same patch/commit), kindly add following tags
| Reported-by: kernel test robot <lkp(a)intel.com>
| Closes: https://lore.kernel.org/oe-kbuild-all/202508111054.kROi2W5N-lkp@intel.com/
All warnings (new ones prefixed by >>):
In file included from drivers/usb/dwc3/core.c:24:
In file included from include/linux/dma-mapping.h:11:
In file included from include/linux/scatterlist.h:8:
In file included from include/linux/mm.h:2235:
include/linux/vmstat.h:522:36: warning: arithmetic between different enumeration types ('enum node_stat_item' and 'enum lru_list') [-Wenum-enum-conversion]
522 | return node_stat_name(NR_LRU_BASE + lru) + 3; // skip "nr_"
| ~~~~~~~~~~~ ^ ~~~
>> drivers/usb/dwc3/core.c:1174:16: warning: variable 'hw_mode' set but not used [-Wunused-but-set-variable]
1174 | unsigned int hw_mode;
| ^
2 warnings generated.
vim +/hw_mode +1174 drivers/usb/dwc3/core.c
0a0e16cf4375dc Stanley Chang 2023-12-03 1165
941f918ecfa757 Felipe Balbi 2016-10-14 1166 /**
941f918ecfa757 Felipe Balbi 2016-10-14 1167 * dwc3_core_init - Low-level initialization of DWC3 Core
941f918ecfa757 Felipe Balbi 2016-10-14 1168 * @dwc: Pointer to our controller context structure
941f918ecfa757 Felipe Balbi 2016-10-14 1169 *
941f918ecfa757 Felipe Balbi 2016-10-14 1170 * Returns 0 on success otherwise negative errno.
941f918ecfa757 Felipe Balbi 2016-10-14 1171 */
941f918ecfa757 Felipe Balbi 2016-10-14 1172 static int dwc3_core_init(struct dwc3 *dwc)
941f918ecfa757 Felipe Balbi 2016-10-14 1173 {
9ba3aca8fe8231 Thinh Nguyen 2019-08-09 @1174 unsigned int hw_mode;
941f918ecfa757 Felipe Balbi 2016-10-14 1175 u32 reg;
941f918ecfa757 Felipe Balbi 2016-10-14 1176 int ret;
941f918ecfa757 Felipe Balbi 2016-10-14 1177
9ba3aca8fe8231 Thinh Nguyen 2019-08-09 1178 hw_mode = DWC3_GHWPARAMS0_MODE(dwc->hwparams.hwparams0);
9ba3aca8fe8231 Thinh Nguyen 2019-08-09 1179
941f918ecfa757 Felipe Balbi 2016-10-14 1180 /*
941f918ecfa757 Felipe Balbi 2016-10-14 1181 * Write Linux Version Code to our GUID register so it's easy to figure
941f918ecfa757 Felipe Balbi 2016-10-14 1182 * out which kernel version a bug was found.
941f918ecfa757 Felipe Balbi 2016-10-14 1183 */
941f918ecfa757 Felipe Balbi 2016-10-14 1184 dwc3_writel(dwc->regs, DWC3_GUID, LINUX_VERSION_CODE);
941f918ecfa757 Felipe Balbi 2016-10-14 1185
98112041bcca16 Roger Quadros 2018-02-12 1186 ret = dwc3_phy_setup(dwc);
941f918ecfa757 Felipe Balbi 2016-10-14 1187 if (ret)
d2f197822d5807 Johan Hovold 2023-04-04 1188 return ret;
941f918ecfa757 Felipe Balbi 2016-10-14 1189
98112041bcca16 Roger Quadros 2018-02-12 1190 if (!dwc->ulpi_ready) {
98112041bcca16 Roger Quadros 2018-02-12 1191 ret = dwc3_core_ulpi_init(dwc);
63130462c919ec Ferry Toth 2022-12-05 1192 if (ret) {
63130462c919ec Ferry Toth 2022-12-05 1193 if (ret == -ETIMEDOUT) {
63130462c919ec Ferry Toth 2022-12-05 1194 dwc3_core_soft_reset(dwc);
63130462c919ec Ferry Toth 2022-12-05 1195 ret = -EPROBE_DEFER;
63130462c919ec Ferry Toth 2022-12-05 1196 }
d2f197822d5807 Johan Hovold 2023-04-04 1197 return ret;
63130462c919ec Ferry Toth 2022-12-05 1198 }
98112041bcca16 Roger Quadros 2018-02-12 1199 dwc->ulpi_ready = true;
98112041bcca16 Roger Quadros 2018-02-12 1200 }
4878a02898bab1 Sebastian Andrzej Siewior 2011-10-31 1201
98112041bcca16 Roger Quadros 2018-02-12 1202 if (!dwc->phys_ready) {
98112041bcca16 Roger Quadros 2018-02-12 1203 ret = dwc3_core_get_phy(dwc);
f54edb539c1167 Felipe Balbi 2017-06-05 1204 if (ret)
d2f197822d5807 Johan Hovold 2023-04-04 1205 goto err_exit_ulpi;
98112041bcca16 Roger Quadros 2018-02-12 1206 dwc->phys_ready = true;
98112041bcca16 Roger Quadros 2018-02-12 1207 }
98112041bcca16 Roger Quadros 2018-02-12 1208
1d72fab476567a Johan Hovold 2023-04-04 1209 ret = dwc3_phy_init(dwc);
1d72fab476567a Johan Hovold 2023-04-04 1210 if (ret)
1d72fab476567a Johan Hovold 2023-04-04 1211 goto err_exit_ulpi;
8cfac9a6744fcb Li Jun 2021-09-08 1212
98112041bcca16 Roger Quadros 2018-02-12 1213 ret = dwc3_core_soft_reset(dwc);
98112041bcca16 Roger Quadros 2018-02-12 1214 if (ret)
1d72fab476567a Johan Hovold 2023-04-04 1215 goto err_exit_phy;
f54edb539c1167 Felipe Balbi 2017-06-05 1216
941f918ecfa757 Felipe Balbi 2016-10-14 1217 dwc3_core_setup_global_control(dwc);
c499ff71ff2a28 Felipe Balbi 2016-05-16 1218 dwc3_core_num_eps(dwc);
0ffcaf3798bfd8 Felipe Balbi 2013-12-19 1219
3497b9a5c8c3d4 Li Jun 2022-06-07 1220 /* Set power down scale of suspend_clk */
3497b9a5c8c3d4 Li Jun 2022-06-07 1221 dwc3_set_power_down_clk_scale(dwc);
3497b9a5c8c3d4 Li Jun 2022-06-07 1222
c499ff71ff2a28 Felipe Balbi 2016-05-16 1223 /* Adjust Frame Length */
c499ff71ff2a28 Felipe Balbi 2016-05-16 1224 dwc3_frame_length_adjustment(dwc);
c499ff71ff2a28 Felipe Balbi 2016-05-16 1225
7bee3188388901 Balaji Prakash J 2021-08-31 1226 /* Adjust Reference Clock Period */
7bee3188388901 Balaji Prakash J 2021-08-31 1227 dwc3_ref_clk_period(dwc);
7bee3188388901 Balaji Prakash J 2021-08-31 1228
d9612c2f0449e2 Pengbo Mu 2018-07-23 1229 dwc3_set_incr_burst_type(dwc);
d9612c2f0449e2 Pengbo Mu 2018-07-23 1230
8018018d9c5647 Dan Carpenter 2023-05-04 1231 ret = dwc3_phy_power_on(dwc);
1d72fab476567a Johan Hovold 2023-04-04 1232 if (ret)
1d72fab476567a Johan Hovold 2023-04-04 1233 goto err_exit_phy;
c499ff71ff2a28 Felipe Balbi 2016-05-16 1234
c499ff71ff2a28 Felipe Balbi 2016-05-16 1235 ret = dwc3_event_buffers_setup(dwc);
c499ff71ff2a28 Felipe Balbi 2016-05-16 1236 if (ret) {
c499ff71ff2a28 Felipe Balbi 2016-05-16 1237 dev_err(dwc->dev, "failed to setup event buffers\n");
1d72fab476567a Johan Hovold 2023-04-04 1238 goto err_power_off_phy;
c499ff71ff2a28 Felipe Balbi 2016-05-16 1239 }
c499ff71ff2a28 Felipe Balbi 2016-05-16 1240
06281d460fc5d8 John Youn 2016-08-22 1241 /*
06281d460fc5d8 John Youn 2016-08-22 1242 * ENDXFER polling is available on version 3.10a and later of
06281d460fc5d8 John Youn 2016-08-22 1243 * the DWC_usb3 controller. It is NOT available in the
06281d460fc5d8 John Youn 2016-08-22 1244 * DWC_usb31 controller.
06281d460fc5d8 John Youn 2016-08-22 1245 */
9af21dd6faeba5 Thinh Nguyen 2020-04-11 1246 if (DWC3_VER_IS_WITHIN(DWC3, 310A, ANY)) {
06281d460fc5d8 John Youn 2016-08-22 1247 reg = dwc3_readl(dwc->regs, DWC3_GUCTL2);
06281d460fc5d8 John Youn 2016-08-22 1248 reg |= DWC3_GUCTL2_RST_ACTBITLATER;
06281d460fc5d8 John Youn 2016-08-22 1249 dwc3_writel(dwc->regs, DWC3_GUCTL2, reg);
06281d460fc5d8 John Youn 2016-08-22 1250 }
06281d460fc5d8 John Youn 2016-08-22 1251
63d7f9810a3810 Piyush Mehta 2022-09-20 1252 /*
63d7f9810a3810 Piyush Mehta 2022-09-20 1253 * When configured in HOST mode, after issuing U3/L2 exit controller
63d7f9810a3810 Piyush Mehta 2022-09-20 1254 * fails to send proper CRC checksum in CRC5 feild. Because of this
63d7f9810a3810 Piyush Mehta 2022-09-20 1255 * behaviour Transaction Error is generated, resulting in reset and
63d7f9810a3810 Piyush Mehta 2022-09-20 1256 * re-enumeration of usb device attached. All the termsel, xcvrsel,
63d7f9810a3810 Piyush Mehta 2022-09-20 1257 * opmode becomes 0 during end of resume. Enabling bit 10 of GUCTL1
63d7f9810a3810 Piyush Mehta 2022-09-20 1258 * will correct this problem. This option is to support certain
63d7f9810a3810 Piyush Mehta 2022-09-20 1259 * legacy ULPI PHYs.
63d7f9810a3810 Piyush Mehta 2022-09-20 1260 */
63d7f9810a3810 Piyush Mehta 2022-09-20 1261 if (dwc->resume_hs_terminations) {
63d7f9810a3810 Piyush Mehta 2022-09-20 1262 reg = dwc3_readl(dwc->regs, DWC3_GUCTL1);
63d7f9810a3810 Piyush Mehta 2022-09-20 1263 reg |= DWC3_GUCTL1_RESUME_OPMODE_HS_HOST;
63d7f9810a3810 Piyush Mehta 2022-09-20 1264 dwc3_writel(dwc->regs, DWC3_GUCTL1, reg);
63d7f9810a3810 Piyush Mehta 2022-09-20 1265 }
63d7f9810a3810 Piyush Mehta 2022-09-20 1266
9af21dd6faeba5 Thinh Nguyen 2020-04-11 1267 if (!DWC3_VER_IS_PRIOR(DWC3, 250A)) {
65db7a0c981637 William Wu 2017-04-19 1268 reg = dwc3_readl(dwc->regs, DWC3_GUCTL1);
65db7a0c981637 William Wu 2017-04-19 1269
0bb39ca1ad8758 John Youn 2016-10-12 1270 /*
65db7a0c981637 William Wu 2017-04-19 1271 * Enable hardware control of sending remote wakeup
65db7a0c981637 William Wu 2017-04-19 1272 * in HS when the device is in the L1 state.
0bb39ca1ad8758 John Youn 2016-10-12 1273 */
9af21dd6faeba5 Thinh Nguyen 2020-04-11 1274 if (!DWC3_VER_IS_PRIOR(DWC3, 290A))
0bb39ca1ad8758 John Youn 2016-10-12 1275 reg |= DWC3_GUCTL1_DEV_L1_EXIT_BY_HW;
65db7a0c981637 William Wu 2017-04-19 1276
843714bb37d9a3 Jack Pham 2021-08-12 1277 /*
843714bb37d9a3 Jack Pham 2021-08-12 1278 * Decouple USB 2.0 L1 & L2 events which will allow for
843714bb37d9a3 Jack Pham 2021-08-12 1279 * gadget driver to only receive U3/L2 suspend & wakeup
843714bb37d9a3 Jack Pham 2021-08-12 1280 * events and prevent the more frequent L1 LPM transitions
843714bb37d9a3 Jack Pham 2021-08-12 1281 * from interrupting the driver.
843714bb37d9a3 Jack Pham 2021-08-12 1282 */
843714bb37d9a3 Jack Pham 2021-08-12 1283 if (!DWC3_VER_IS_PRIOR(DWC3, 300A))
843714bb37d9a3 Jack Pham 2021-08-12 1284 reg |= DWC3_GUCTL1_DEV_DECOUPLE_L1L2_EVT;
843714bb37d9a3 Jack Pham 2021-08-12 1285
65db7a0c981637 William Wu 2017-04-19 1286 if (dwc->dis_tx_ipgap_linecheck_quirk)
65db7a0c981637 William Wu 2017-04-19 1287 reg |= DWC3_GUCTL1_TX_IPGAP_LINECHECK_DIS;
65db7a0c981637 William Wu 2017-04-19 1288
7ba6b09fda5e0c Neil Armstrong 2020-02-21 1289 if (dwc->parkmode_disable_ss_quirk)
7ba6b09fda5e0c Neil Armstrong 2020-02-21 1290 reg |= DWC3_GUCTL1_PARKMODE_DISABLE_SS;
7ba6b09fda5e0c Neil Armstrong 2020-02-21 1291
d21a797a3eeb2b Stanley Chang 2023-04-19 1292 if (dwc->parkmode_disable_hs_quirk)
d21a797a3eeb2b Stanley Chang 2023-04-19 1293 reg |= DWC3_GUCTL1_PARKMODE_DISABLE_HS;
d21a797a3eeb2b Stanley Chang 2023-04-19 1294
62b20e6e0dde8d Bin Yang 2022-02-28 1295 if (DWC3_VER_IS_WITHIN(DWC3, 290A, ANY) &&
62b20e6e0dde8d Bin Yang 2022-02-28 1296 (dwc->maximum_speed == USB_SPEED_HIGH ||
62b20e6e0dde8d Bin Yang 2022-02-28 1297 dwc->maximum_speed == USB_SPEED_FULL))
62b20e6e0dde8d Bin Yang 2022-02-28 1298 reg |= DWC3_GUCTL1_DEV_FORCE_20_CLK_FOR_30_CLK;
62b20e6e0dde8d Bin Yang 2022-02-28 1299
0bb39ca1ad8758 John Youn 2016-10-12 1300 dwc3_writel(dwc->regs, DWC3_GUCTL1, reg);
0bb39ca1ad8758 John Youn 2016-10-12 1301 }
0bb39ca1ad8758 John Youn 2016-10-12 1302
0a0e16cf4375dc Stanley Chang 2023-12-03 1303 dwc3_config_threshold(dwc);
938a5ad1d3055c Thinh Nguyen 2018-03-16 1304
72246da40f3719 Felipe Balbi 2011-08-19 1305 return 0;
72246da40f3719 Felipe Balbi 2011-08-19 1306
1d72fab476567a Johan Hovold 2023-04-04 1307 err_power_off_phy:
1d72fab476567a Johan Hovold 2023-04-04 1308 dwc3_phy_power_off(dwc);
1d72fab476567a Johan Hovold 2023-04-04 1309 err_exit_phy:
1d72fab476567a Johan Hovold 2023-04-04 1310 dwc3_phy_exit(dwc);
d2f197822d5807 Johan Hovold 2023-04-04 1311 err_exit_ulpi:
98112041bcca16 Roger Quadros 2018-02-12 1312 dwc3_ulpi_exit(dwc);
98112041bcca16 Roger Quadros 2018-02-12 1313
72246da40f3719 Felipe Balbi 2011-08-19 1314 return ret;
72246da40f3719 Felipe Balbi 2011-08-19 1315 }
72246da40f3719 Felipe Balbi 2011-08-19 1316
:::::: The code at line 1174 was first introduced by commit
:::::: 9ba3aca8fe82318805709036bd50bee64570088b usb: dwc3: Disable phy suspend after power-on reset
:::::: TO: Thinh Nguyen <Thinh.Nguyen(a)synopsys.com>
:::::: CC: Felipe Balbi <felipe.balbi(a)linux.intel.com>
--
0-DAY CI Kernel Test Service
https://github.com/intel/lkp-tests/wiki
1
0
Eduard Zingerman (2):
bpf: refactor bpf_helper_changes_pkt_data to use helper number
bpf: consider that tail calls invalidate packet pointers
include/linux/filter.h | 2 +-
kernel/bpf/core.c | 2 +-
kernel/bpf/verifier.c | 2 +-
net/core/filter.c | 65 +++++++++++++++++++-----------------------
4 files changed, 33 insertions(+), 38 deletions(-)
--
2.33.0
2
3

[PATCH OLK-5.10] net_sched: hfsc: Fix a potential UAF in hfsc_dequeue() too
by Wang Liang 11 Aug '25
by Wang Liang 11 Aug '25
11 Aug '25
From: Cong Wang <xiyou.wangcong(a)gmail.com>
stable inclusion
from stable-v5.10.237
commit 2f46d14919c39528c6e540ebc43f90055993eedc
category: bugfix
bugzilla: https://gitee.com/src-openeuler/kernel/issues/IC6BXK
CVE: CVE-2025-37823
Reference: https://git.kernel.org/pub/scm/linux/kernel/git/stable/linux.git/commit/?id…
--------------------------------
[ Upstream commit 6ccbda44e2cc3d26fd22af54c650d6d5d801addf ]
Similarly to the previous patch, we need to safe guard hfsc_dequeue()
too. But for this one, we don't have a reliable reproducer.
Fixes: 1da177e4c3f41524e886b7f1b8a0c1fc7321cac2 ("Linux-2.6.12-rc2")
Reported-by: Gerrard Tai <gerrard.tai(a)starlabs.sg>
Signed-off-by: Cong Wang <xiyou.wangcong(a)gmail.com>
Reviewed-by: Jamal Hadi Salim <jhs(a)mojatatu.com>
Link: https://patch.msgid.link/20250417184732.943057-3-xiyou.wangcong@gmail.com
Signed-off-by: Jakub Kicinski <kuba(a)kernel.org>
Signed-off-by: Sasha Levin <sashal(a)kernel.org>
Signed-off-by: Wang Liang <wangliang74(a)huawei.com>
---
net/sched/sch_hfsc.c | 14 ++++++++++----
1 file changed, 10 insertions(+), 4 deletions(-)
diff --git a/net/sched/sch_hfsc.c b/net/sched/sch_hfsc.c
index adcf87d417ae..f0075e42df3f 100644
--- a/net/sched/sch_hfsc.c
+++ b/net/sched/sch_hfsc.c
@@ -1638,10 +1638,16 @@ hfsc_dequeue(struct Qdisc *sch)
if (cl->qdisc->q.qlen != 0) {
/* update ed */
next_len = qdisc_peek_len(cl->qdisc);
- if (realtime)
- update_ed(cl, next_len);
- else
- update_d(cl, next_len);
+ /* Check queue length again since some qdisc implementations
+ * (e.g., netem/codel) might empty the queue during the peek
+ * operation.
+ */
+ if (cl->qdisc->q.qlen != 0) {
+ if (realtime)
+ update_ed(cl, next_len);
+ else
+ update_d(cl, next_len);
+ }
} else {
/* the class becomes passive */
eltree_remove(cl);
--
2.33.0
2
1

[PATCH OLK-5.10] sctp: detect and prevent references to a freed transport in sendmsg
by Wang Liang 11 Aug '25
by Wang Liang 11 Aug '25
11 Aug '25
From: Ricardo Cañuelo Navarro <rcn(a)igalia.com>
stable inclusion
from stable-v5.10.237
commit 3257386be6a7eb8a8bfc9cbfb746df4eb4fc70e8
category: bugfix
bugzilla: https://gitee.com/src-openeuler/kernel/issues/IC5BIR
CVE: CVE-2025-23142
Reference: https://git.kernel.org/pub/scm/linux/kernel/git/stable/linux.git/commit/?id…
--------------------------------
commit f1a69a940de58b16e8249dff26f74c8cc59b32be upstream.
sctp_sendmsg() re-uses associations and transports when possible by
doing a lookup based on the socket endpoint and the message destination
address, and then sctp_sendmsg_to_asoc() sets the selected transport in
all the message chunks to be sent.
There's a possible race condition if another thread triggers the removal
of that selected transport, for instance, by explicitly unbinding an
address with setsockopt(SCTP_SOCKOPT_BINDX_REM), after the chunks have
been set up and before the message is sent. This can happen if the send
buffer is full, during the period when the sender thread temporarily
releases the socket lock in sctp_wait_for_sndbuf().
This causes the access to the transport data in
sctp_outq_select_transport(), when the association outqueue is flushed,
to result in a use-after-free read.
This change avoids this scenario by having sctp_transport_free() signal
the freeing of the transport, tagging it as "dead". In order to do this,
the patch restores the "dead" bit in struct sctp_transport, which was
removed in
commit 47faa1e4c50e ("sctp: remove the dead field of sctp_transport").
Then, in the scenario where the sender thread has released the socket
lock in sctp_wait_for_sndbuf(), the bit is checked again after
re-acquiring the socket lock to detect the deletion. This is done while
holding a reference to the transport to prevent it from being freed in
the process.
If the transport was deleted while the socket lock was relinquished,
sctp_sendmsg_to_asoc() will return -EAGAIN to let userspace retry the
send.
The bug was found by a private syzbot instance (see the error report [1]
and the C reproducer that triggers it [2]).
Link: https://people.igalia.com/rcn/kernel_logs/20250402__KASAN_slab-use-after-fr… [1]
Link: https://people.igalia.com/rcn/kernel_logs/20250402__KASAN_slab-use-after-fr… [2]
Cc: stable(a)vger.kernel.org
Fixes: df132eff4638 ("sctp: clear the transport of some out_chunk_list chunks in sctp_assoc_rm_peer")
Suggested-by: Xin Long <lucien.xin(a)gmail.com>
Signed-off-by: Ricardo Cañuelo Navarro <rcn(a)igalia.com>
Acked-by: Xin Long <lucien.xin(a)gmail.com>
Link: https://patch.msgid.link/20250404-kasan_slab-use-after-free_read_in_sctp_ou…
Signed-off-by: Paolo Abeni <pabeni(a)redhat.com>
Signed-off-by: Greg Kroah-Hartman <gregkh(a)linuxfoundation.org>
Signed-off-by: Wang Liang <wangliang74(a)huawei.com>
---
include/net/sctp/structs.h | 3 ++-
net/sctp/socket.c | 22 ++++++++++++++--------
net/sctp/transport.c | 2 ++
3 files changed, 18 insertions(+), 9 deletions(-)
diff --git a/include/net/sctp/structs.h b/include/net/sctp/structs.h
index 108eb62cdc2c..8d058b50f138 100644
--- a/include/net/sctp/structs.h
+++ b/include/net/sctp/structs.h
@@ -771,6 +771,7 @@ struct sctp_transport {
/* Reference counting. */
refcount_t refcnt;
+ __u32 dead:1,
/* RTO-Pending : A flag used to track if one of the DATA
* chunks sent to this address is currently being
* used to compute a RTT. If this flag is 0,
@@ -780,7 +781,7 @@ struct sctp_transport {
* calculation completes (i.e. the DATA chunk
* is SACK'd) clear this flag.
*/
- __u32 rto_pending:1,
+ rto_pending:1,
/*
* hb_sent : a flag that signals that we have a pending
diff --git a/net/sctp/socket.c b/net/sctp/socket.c
index 5ffa7515903b..279d5e9421d3 100644
--- a/net/sctp/socket.c
+++ b/net/sctp/socket.c
@@ -70,8 +70,9 @@
/* Forward declarations for internal helper functions. */
static bool sctp_writeable(const struct sock *sk);
static void sctp_wfree(struct sk_buff *skb);
-static int sctp_wait_for_sndbuf(struct sctp_association *asoc, long *timeo_p,
- size_t msg_len);
+static int sctp_wait_for_sndbuf(struct sctp_association *asoc,
+ struct sctp_transport *transport,
+ long *timeo_p, size_t msg_len);
static int sctp_wait_for_packet(struct sock *sk, int *err, long *timeo_p);
static int sctp_wait_for_connect(struct sctp_association *, long *timeo_p);
static int sctp_wait_for_accept(struct sock *sk, long timeo);
@@ -1828,7 +1829,7 @@ static int sctp_sendmsg_to_asoc(struct sctp_association *asoc,
if (sctp_wspace(asoc) <= 0 || !sk_wmem_schedule(sk, msg_len)) {
timeo = sock_sndtimeo(sk, msg->msg_flags & MSG_DONTWAIT);
- err = sctp_wait_for_sndbuf(asoc, &timeo, msg_len);
+ err = sctp_wait_for_sndbuf(asoc, transport, &timeo, msg_len);
if (err)
goto err;
if (unlikely(sinfo->sinfo_stream >= asoc->stream.outcnt)) {
@@ -8938,8 +8939,9 @@ void sctp_sock_rfree(struct sk_buff *skb)
/* Helper function to wait for space in the sndbuf. */
-static int sctp_wait_for_sndbuf(struct sctp_association *asoc, long *timeo_p,
- size_t msg_len)
+static int sctp_wait_for_sndbuf(struct sctp_association *asoc,
+ struct sctp_transport *transport,
+ long *timeo_p, size_t msg_len)
{
struct sock *sk = asoc->base.sk;
long current_timeo = *timeo_p;
@@ -8949,7 +8951,9 @@ static int sctp_wait_for_sndbuf(struct sctp_association *asoc, long *timeo_p,
pr_debug("%s: asoc:%p, timeo:%ld, msg_len:%zu\n", __func__, asoc,
*timeo_p, msg_len);
- /* Increment the association's refcnt. */
+ /* Increment the transport and association's refcnt. */
+ if (transport)
+ sctp_transport_hold(transport);
sctp_association_hold(asoc);
/* Wait on the association specific sndbuf space. */
@@ -8958,7 +8962,7 @@ static int sctp_wait_for_sndbuf(struct sctp_association *asoc, long *timeo_p,
TASK_INTERRUPTIBLE);
if (asoc->base.dead)
goto do_dead;
- if (!*timeo_p)
+ if ((!*timeo_p) || (transport && transport->dead))
goto do_nonblock;
if (sk->sk_err || asoc->state >= SCTP_STATE_SHUTDOWN_PENDING)
goto do_error;
@@ -8985,7 +8989,9 @@ static int sctp_wait_for_sndbuf(struct sctp_association *asoc, long *timeo_p,
out:
finish_wait(&asoc->wait, &wait);
- /* Release the association's refcnt. */
+ /* Release the transport and association's refcnt. */
+ if (transport)
+ sctp_transport_put(transport);
sctp_association_put(asoc);
return err;
diff --git a/net/sctp/transport.c b/net/sctp/transport.c
index 60fcf31cdcfb..9c721d70df9c 100644
--- a/net/sctp/transport.c
+++ b/net/sctp/transport.c
@@ -116,6 +116,8 @@ struct sctp_transport *sctp_transport_new(struct net *net,
*/
void sctp_transport_free(struct sctp_transport *transport)
{
+ transport->dead = 1;
+
/* Try to delete the heartbeat timer. */
if (del_timer(&transport->hb_timer))
sctp_transport_put(transport);
--
2.33.0
2
1
From: Mark Brown <broonie(a)kernel.org>
commit 33060a64901e61f09ea6faffe367551df18a54c3 upstream.
We use a local variable hwcap to refer to the element of the hwcaps array
which we are currently checking. When checking for the relevant hwcap bit
being set in testing we were dereferencing hwcaps rather than hwcap in
fetching the AT_HWCAP to use, which is perfectly valid C but means we were
always checking the bit was set in the hwcap for whichever feature is first
in the array. Remove the stray s.
Signed-off-by: Mark Brown <broonie(a)kernel.org>
Link: https://lore.kernel.org/r/20220907113400.12982-1-broonie@kernel.org
Signed-off-by: Catalin Marinas <catalin.marinas(a)arm.com>
Signed-off-by: Qi Xi <xiqi2(a)huawei.com>
---
tools/testing/selftests/arm64/abi/hwcap.c | 2 +-
1 file changed, 1 insertion(+), 1 deletion(-)
diff --git a/tools/testing/selftests/arm64/abi/hwcap.c b/tools/testing/selftests/arm64/abi/hwcap.c
index 65a7b01d7cdc..190be4356069 100644
--- a/tools/testing/selftests/arm64/abi/hwcap.c
+++ b/tools/testing/selftests/arm64/abi/hwcap.c
@@ -162,7 +162,7 @@ int main(void)
for (i = 0; i < ARRAY_SIZE(hwcaps); i++) {
hwcap = &hwcaps[i];
- have_hwcap = getauxval(hwcaps->at_hwcap) & hwcap->hwcap_bit;
+ have_hwcap = getauxval(hwcap->at_hwcap) & hwcap->hwcap_bit;
have_cpuinfo = cpuinfo_present(hwcap->cpuinfo);
if (have_hwcap)
--
2.33.0
2
1
Michael Kelley (1):
hv_netvsc: Preserve contiguous PFN grouping in the page buffer array
Paolo Abeni (1):
net: allow small head cache usage with large MAX_SKB_FRAGS values
Wang Liang (1):
net: use __GENKSYMS__ to revert the kabi change
drivers/net/hyperv/hyperv_net.h | 12 ++++++
drivers/net/hyperv/netvsc_drv.c | 63 ++++++++-----------------------
drivers/net/hyperv/rndis_filter.c | 24 +++---------
include/net/gro.h | 3 ++
net/core/gro.c | 3 --
net/core/skbuff.c | 12 ++++--
6 files changed, 44 insertions(+), 73 deletions(-)
--
2.33.0
2
4

[PATCH OLK-6.6 V1] sched: Support NUMA parallel scheduling for multiple processes
by Cheng Yu 11 Aug '25
by Cheng Yu 11 Aug '25
11 Aug '25
hulk inclusion
category: feature
bugzilla: https://gitee.com/openeuler/kernel/issues/ICBBNL
--------------------------------
For architectures with multiple NUMA node levels and large distances
between nodes, a better approach is to support processes running in
parallel on each NUMA node.
The usage is restricted to the following scenarios:
1. No CPU binding for user-space processes;
2. It is applicable to distributed applications, such as business
architectures with one master and multiple slaves running in
parallel;
3. The existing "qos dynamic affinity" and "qos smart grid" features
must not be used simultaneously.
Signed-off-by: Cheng Yu <serein.chengyu(a)huawei.com>
---
arch/arm64/Kconfig | 1 +
arch/arm64/configs/openeuler_defconfig | 1 +
arch/arm64/include/asm/prefer_numa.h | 9 +++++
arch/arm64/kernel/Makefile | 1 +
arch/arm64/kernel/prefer_numa.c | 51 ++++++++++++++++++++++++++
fs/proc/array.c | 3 --
include/linux/sched.h | 6 +++
init/Kconfig | 22 +++++++++++
kernel/cgroup/cpuset.c | 6 ++-
kernel/fork.c | 11 ++----
kernel/sched/core.c | 3 --
kernel/sched/debug.c | 45 ++++++++++++++++++++++-
kernel/sched/fair.c | 39 ++++++++++++++++----
kernel/sched/features.h | 4 ++
14 files changed, 178 insertions(+), 24 deletions(-)
create mode 100644 arch/arm64/include/asm/prefer_numa.h
create mode 100644 arch/arm64/kernel/prefer_numa.c
diff --git a/arch/arm64/Kconfig b/arch/arm64/Kconfig
index 5422d1502fd6..b1f550c8c82a 100644
--- a/arch/arm64/Kconfig
+++ b/arch/arm64/Kconfig
@@ -105,6 +105,7 @@ config ARM64
select ARCH_SUPPORTS_ATOMIC_RMW
select ARCH_SUPPORTS_INT128 if CC_HAS_INT128
select ARCH_SUPPORTS_NUMA_BALANCING
+ select ARCH_SUPPORTS_SCHED_PARAL
select ARCH_SUPPORTS_PAGE_TABLE_CHECK
select ARCH_SUPPORTS_PER_VMA_LOCK
select ARCH_WANT_BATCHED_UNMAP_TLB_FLUSH
diff --git a/arch/arm64/configs/openeuler_defconfig b/arch/arm64/configs/openeuler_defconfig
index 3cfff0701479..3d352fb1ae57 100644
--- a/arch/arm64/configs/openeuler_defconfig
+++ b/arch/arm64/configs/openeuler_defconfig
@@ -209,6 +209,7 @@ CONFIG_USER_NS=y
CONFIG_PID_NS=y
CONFIG_NET_NS=y
CONFIG_SCHED_STEAL=y
+CONFIG_SCHED_PARAL=y
CONFIG_CHECKPOINT_RESTORE=y
CONFIG_SCHED_AUTOGROUP=y
CONFIG_RELAY=y
diff --git a/arch/arm64/include/asm/prefer_numa.h b/arch/arm64/include/asm/prefer_numa.h
new file mode 100644
index 000000000000..7e579cd9355b
--- /dev/null
+++ b/arch/arm64/include/asm/prefer_numa.h
@@ -0,0 +1,9 @@
+/* SPDX-License-Identifier: GPL-2.0-or-later */
+#ifndef __ASM_PREFER_NUMA_H
+#define __ASM_PREFER_NUMA_H
+
+#include <linux/sched.h>
+
+void set_task_paral_node(struct task_struct *p);
+
+#endif /* __ASM_PREFER_NUMA_H */
diff --git a/arch/arm64/kernel/Makefile b/arch/arm64/kernel/Makefile
index 3d404a2cc961..b936be9d8baa 100644
--- a/arch/arm64/kernel/Makefile
+++ b/arch/arm64/kernel/Makefile
@@ -84,6 +84,7 @@ obj-$(CONFIG_IPI_AS_NMI) += ipi_nmi.o
obj-$(CONFIG_HISI_VIRTCCA_GUEST) += virtcca_cvm_guest.o virtcca_cvm_tsi.o
obj-$(CONFIG_HISI_VIRTCCA_HOST) += virtcca_cvm_host.o
CFLAGS_patch-scs.o += -mbranch-protection=none
+obj-$(CONFIG_SCHED_PARAL) += prefer_numa.o
# Force dependency (vdso*-wrap.S includes vdso.so through incbin)
$(obj)/vdso-wrap.o: $(obj)/vdso/vdso.so
diff --git a/arch/arm64/kernel/prefer_numa.c b/arch/arm64/kernel/prefer_numa.c
new file mode 100644
index 000000000000..8dcd6c746df8
--- /dev/null
+++ b/arch/arm64/kernel/prefer_numa.c
@@ -0,0 +1,51 @@
+// SPDX-License-Identifier: GPL-2.0-or-later
+/*
+ * choose a prefer numa node
+ *
+ * Copyright (C) 2025 Huawei Limited.
+ */
+#include <linux/nodemask.h>
+#include <linux/topology.h>
+#include <asm/prefer_numa.h>
+
+static atomic_t paral_nid_last = ATOMIC_INIT(-1);
+
+int is_prefer_numa(void)
+{
+ if (num_possible_nodes() <= 1)
+ return 0;
+
+ return 1;
+}
+
+static inline unsigned int update_sched_paral_nid(void)
+{
+ return (unsigned int)atomic_inc_return(¶l_nid_last);
+}
+
+void set_task_paral_node(struct task_struct *p)
+{
+ int nid;
+ int i = 0;
+ const cpumask_t *cpus_mask;
+
+ if (is_global_init(current))
+ return;
+
+ if (p->flags & PF_KTHREAD || p->tgid != p->pid)
+ return;
+
+ while (i < nr_node_ids) {
+ nid = update_sched_paral_nid() % nr_node_ids;
+ cpus_mask = cpumask_of_node(nid);
+
+ if (cpumask_empty(cpus_mask) ||
+ !cpumask_subset(cpus_mask, p->cpus_ptr)) {
+ i++;
+ continue;
+ }
+
+ cpumask_copy(p->prefer_cpus, cpus_mask);
+ break;
+ }
+}
diff --git a/fs/proc/array.c b/fs/proc/array.c
index a933a878df3c..6a4b0a850dce 100644
--- a/fs/proc/array.c
+++ b/fs/proc/array.c
@@ -439,9 +439,6 @@ __weak void arch_proc_pid_thread_features(struct seq_file *m,
#ifdef CONFIG_QOS_SCHED_DYNAMIC_AFFINITY
static void task_cpus_preferred(struct seq_file *m, struct task_struct *task)
{
- if (!dynamic_affinity_enabled())
- return;
-
seq_printf(m, "Cpus_preferred:\t%*pb\n",
cpumask_pr_args(task->prefer_cpus));
seq_printf(m, "Cpus_preferred_list:\t%*pbl\n",
diff --git a/include/linux/sched.h b/include/linux/sched.h
index 3979c34e9b83..ee10780715f1 100644
--- a/include/linux/sched.h
+++ b/include/linux/sched.h
@@ -2627,6 +2627,12 @@ static inline bool dynamic_affinity_enabled(void)
{
return static_branch_unlikely(&__dynamic_affinity_switch);
}
+
+#ifdef CONFIG_SCHED_PARAL
+bool sched_paral_used(void);
+#else
+static inline bool sched_paral_used(void) { return false; }
+#endif
#endif
#ifdef CONFIG_QOS_SCHED_SMART_GRID
diff --git a/init/Kconfig b/init/Kconfig
index c8bd58347a87..925e8517a7e8 100644
--- a/init/Kconfig
+++ b/init/Kconfig
@@ -1484,6 +1484,28 @@ config SCHED_STEAL
If unsure, say N here.
+#
+# For architectures that want to enable the support for SCHED_PARAL
+#
+config ARCH_SUPPORTS_SCHED_PARAL
+ bool
+
+config SCHED_PARAL
+ bool "Parallelly schedule processes on different NUMA nodes"
+ depends on ARCH_SUPPORTS_SCHED_PARAL
+ depends on QOS_SCHED_DYNAMIC_AFFINITY
+ default n
+ help
+ By enabling this feature, processes can be scheduled in parallel
+ on various NUMA nodes to better utilize the cache in NUMA node.
+ The usage is restricted to the following scenarios:
+ 1. No CPU binding is performed for user-space processes;
+ 2. It is applicable to distributed applications, such as business
+ architectures with one master and multiple slaves running in
+ parallel;
+ 3. The existing "qos dynamic affinity" and "qos smart grid"
+ features must not be used simultaneously.
+
config CHECKPOINT_RESTORE
bool "Checkpoint/restore support"
depends on PROC_FS
diff --git a/kernel/cgroup/cpuset.c b/kernel/cgroup/cpuset.c
index 417827f2c043..01a9b18d80ce 100644
--- a/kernel/cgroup/cpuset.c
+++ b/kernel/cgroup/cpuset.c
@@ -3488,7 +3488,8 @@ static void cpuset_attach_task(struct cpuset *cs, struct task_struct *task)
WARN_ON_ONCE(set_cpus_allowed_ptr(task, cpus_attach));
#ifdef CONFIG_QOS_SCHED_DYNAMIC_AFFINITY
cpumask_copy(prefer_cpus_attach, cs->prefer_cpus);
- set_prefer_cpus_ptr(task, prefer_cpus_attach);
+ if (!sched_paral_used() || !cpumask_empty(prefer_cpus_attach))
+ set_prefer_cpus_ptr(task, prefer_cpus_attach);
#endif
cpuset_change_task_nodemask(task, &cpuset_attach_nodemask_to);
@@ -4348,7 +4349,8 @@ static void cpuset_fork(struct task_struct *task)
set_cpus_allowed_ptr(task, current->cpus_ptr);
#ifdef CONFIG_QOS_SCHED_DYNAMIC_AFFINITY
- set_prefer_cpus_ptr(task, current->prefer_cpus);
+ if (!sched_paral_used() || !cpumask_empty(cs->prefer_cpus))
+ set_prefer_cpus_ptr(task, current->prefer_cpus);
#endif
task->mems_allowed = current->mems_allowed;
return;
diff --git a/kernel/fork.c b/kernel/fork.c
index 96c6a9e446ac..8b2ff47de685 100644
--- a/kernel/fork.c
+++ b/kernel/fork.c
@@ -631,8 +631,7 @@ void free_task(struct task_struct *tsk)
free_kthread_struct(tsk);
bpf_task_storage_free(tsk);
#ifdef CONFIG_QOS_SCHED_DYNAMIC_AFFINITY
- if (dynamic_affinity_enabled())
- sched_prefer_cpus_free(tsk);
+ sched_prefer_cpus_free(tsk);
#endif
#ifdef CONFIG_QOS_SCHED_SMART_GRID
if (smart_grid_enabled())
@@ -2451,11 +2450,9 @@ __latent_entropy struct task_struct *copy_process(
#endif
#ifdef CONFIG_QOS_SCHED_DYNAMIC_AFFINITY
- if (dynamic_affinity_enabled()) {
- retval = sched_prefer_cpus_fork(p, current->prefer_cpus);
- if (retval)
- goto bad_fork_free;
- }
+ retval = sched_prefer_cpus_fork(p, current->prefer_cpus);
+ if (retval)
+ goto bad_fork_free;
#endif
lockdep_assert_irqs_enabled();
diff --git a/kernel/sched/core.c b/kernel/sched/core.c
index 1b497efc763b..fab904f44c87 100644
--- a/kernel/sched/core.c
+++ b/kernel/sched/core.c
@@ -12142,9 +12142,6 @@ static int __set_prefer_cpus_ptr(struct task_struct *p,
struct rq *rq;
int ret = 0;
- if (!dynamic_affinity_enabled())
- return -EPERM;
-
if (unlikely(!p->prefer_cpus))
return -EINVAL;
diff --git a/kernel/sched/debug.c b/kernel/sched/debug.c
index 7a9e6896c699..10a19e51e9b8 100644
--- a/kernel/sched/debug.c
+++ b/kernel/sched/debug.c
@@ -7,6 +7,10 @@
* Copyright(C) 2007, Red Hat, Inc., Ingo Molnar
*/
+#ifdef CONFIG_SCHED_PARAL
+#include <asm/prefer_numa.h>
+#endif
+
/*
* This allows printing both to /proc/sched_debug and
* to the console
@@ -95,6 +99,41 @@ static void sched_feat_disable(int i) { };
static void sched_feat_enable(int i) { };
#endif /* CONFIG_JUMP_LABEL */
+int __weak is_prefer_numa(void)
+{
+ return 0;
+}
+
+#ifdef CONFIG_SCHED_PARAL
+static void sched_feat_disable_paral(char *cmp)
+{
+ struct task_struct *tsk, *t;
+
+ if (strncmp(cmp, "PARAL", 5) == 0) {
+ read_lock(&tasklist_lock);
+ for_each_process(tsk) {
+ if (tsk->flags & PF_KTHREAD || is_global_init(tsk))
+ continue;
+
+ for_each_thread(tsk, t)
+ cpumask_clear(t->prefer_cpus);
+ }
+ read_unlock(&tasklist_lock);
+ }
+}
+
+static bool sched_feat_enable_paral(char *cmp)
+{
+ if (strncmp(cmp, "PARAL", 5) != 0)
+ return true;
+
+ return is_prefer_numa();
+}
+#else
+static void sched_feat_disable_paral(char *cmp) {};
+static bool sched_feat_enable_paral(char *cmp) { return true; };
+#endif /* CONFIG_SCHED_PARAL */
+
static int sched_feat_set(char *cmp)
{
int i;
@@ -111,8 +150,12 @@ static int sched_feat_set(char *cmp)
if (neg) {
sysctl_sched_features &= ~(1UL << i);
+ sched_feat_disable_paral(cmp);
sched_feat_disable(i);
} else {
+ if (!sched_feat_enable_paral(cmp))
+ return -EPERM;
+
sysctl_sched_features |= (1UL << i);
sched_feat_enable(i);
}
@@ -1045,7 +1088,7 @@ void proc_sched_show_task(struct task_struct *p, struct pid_namespace *ns,
P_SCHEDSTAT(nr_wakeups_passive);
P_SCHEDSTAT(nr_wakeups_idle);
#ifdef CONFIG_QOS_SCHED_DYNAMIC_AFFINITY
- if (dynamic_affinity_enabled()) {
+ if (dynamic_affinity_enabled() || sched_paral_used()) {
P_SCHEDSTAT(nr_wakeups_preferred_cpus);
P_SCHEDSTAT(nr_wakeups_force_preferred_cpus);
}
diff --git a/kernel/sched/fair.c b/kernel/sched/fair.c
index 71661d6c5b54..8a32d0ac4a8b 100644
--- a/kernel/sched/fair.c
+++ b/kernel/sched/fair.c
@@ -75,6 +75,10 @@
#endif
#include <linux/sched/grid_qos.h>
+#ifdef CONFIG_SCHED_PARAL
+#include <asm/prefer_numa.h>
+#endif
+
/*
* The initial- and re-scaling of tunables is configurable
*
@@ -9057,6 +9061,12 @@ static int find_energy_efficient_cpu(struct task_struct *p, int prev_cpu)
}
#ifdef CONFIG_QOS_SCHED_DYNAMIC_AFFINITY
+#ifdef CONFIG_SCHED_PARAL
+bool sched_paral_used(void)
+{
+ return sched_feat(PARAL);
+}
+#endif
DEFINE_STATIC_KEY_FALSE(__dynamic_affinity_switch);
@@ -9084,16 +9094,15 @@ __setup("dynamic_affinity=", dynamic_affinity_switch_setup);
static inline bool prefer_cpus_valid(struct task_struct *p)
{
- struct cpumask *prefer_cpus;
+ struct cpumask *prefer_cpus = task_prefer_cpus(p);
- if (!dynamic_affinity_enabled())
- return false;
-
- prefer_cpus = task_prefer_cpus(p);
+ if (dynamic_affinity_enabled() || sched_paral_used()) {
+ return !cpumask_empty(prefer_cpus) &&
+ !cpumask_equal(prefer_cpus, p->cpus_ptr) &&
+ cpumask_subset(prefer_cpus, p->cpus_ptr);
+ }
- return !cpumask_empty(prefer_cpus) &&
- !cpumask_equal(prefer_cpus, p->cpus_ptr) &&
- cpumask_subset(prefer_cpus, p->cpus_ptr);
+ return false;
}
static inline unsigned long taskgroup_cpu_util(struct task_group *tg,
@@ -9193,6 +9202,14 @@ static void set_task_select_cpus(struct task_struct *p, int *idlest_cpu,
}
rcu_read_unlock();
+ /* In extreme cases, it may cause uneven system load. */
+ if (sched_paral_used() && sysctl_sched_util_low_pct == 100 && nr_cpus_valid > 0) {
+ p->select_cpus = p->prefer_cpus;
+ if (sd_flag & SD_BALANCE_WAKE)
+ schedstat_inc(p->stats.nr_wakeups_preferred_cpus);
+ return;
+ }
+
/*
* Follow cases should select cpus_ptr, checking by condition of
* tg_capacity > nr_cpus_valid:
@@ -14679,6 +14696,12 @@ static void task_fork_fair(struct task_struct *p)
if (curr)
update_curr(cfs_rq);
place_entity(cfs_rq, se, ENQUEUE_INITIAL);
+
+#ifdef CONFIG_SCHED_PARAL
+ if (sched_paral_used())
+ set_task_paral_node(p);
+#endif
+
rq_unlock(rq, &rf);
}
diff --git a/kernel/sched/features.h b/kernel/sched/features.h
index ea7ba74810e3..67939d04542f 100644
--- a/kernel/sched/features.h
+++ b/kernel/sched/features.h
@@ -61,6 +61,10 @@ SCHED_FEAT(SIS_UTIL, true)
SCHED_FEAT(STEAL, false)
#endif
+#ifdef CONFIG_SCHED_PARAL
+SCHED_FEAT(PARAL, false)
+#endif
+
/*
* Issue a WARN when we do multiple update_rq_clock() calls
* in a single rq->lock section. Default disabled because the
--
2.25.1
2
1
Offering: HULK
hulk inclusion
category: other
bugzilla: https://gitee.com/openeuler/kernel/issues/ICDF44
--------------------------------
The purpose of this patch is only for debugging purposes. It focus on
the null pointer issue caused by the vruntime_eligible() function
consistently returning false due to overflow in s64 casting and
operations related to cfs_rq->avg_vruntime within EEVDF.
When CONFIG_TEMP_EEVDF_NULL_POINTER_CHECKER is enabled, it will record
the calculation factors required by each sched_entity's vruntime in
place_entity() and related contexts.
When CONFIG_TEMP_EEVDF_NULL_POINTER_CHECKER_BUGON is enabled, in
addition to recording, it will trigger BUG_ON for some suspicious
values. Note that this BUG_ON may have false triggers and it is only
intended for EEVDF troubleshooting.
Signed-off-by: Zicheng Qu <quzicheng(a)huawei.com>
---
include/linux/sched.h | 34 +++++++++++++++++++++++++++
init/init_task.c | 9 +++++++
kernel/Kconfig.preempt | 15 ++++++++++++
kernel/fork.c | 15 ++++++++++++
kernel/sched/fair.c | 53 ++++++++++++++++++++++++++++++++++++++++++
kernel/sched/sched.h | 34 +++++++++++++++++++++++++++
6 files changed, 160 insertions(+)
diff --git a/include/linux/sched.h b/include/linux/sched.h
index b6bc8d72309a..9d089094f1e6 100644
--- a/include/linux/sched.h
+++ b/include/linux/sched.h
@@ -572,6 +572,36 @@ struct sched_statistics {
#endif /* CONFIG_SCHEDSTATS */
} ____cacheline_aligned;
+#ifdef CONFIG_TEMP_EEVDF_NULL_POINTER_CHECKER
+struct sched_entity_resvd {
+ /* pointer back to the main sched_entity */
+ struct sched_entity *se;
+
+ /*
+ * CONFIG_TEMP_EEVDF_NULL_POINTER_CHECKER is only designed
+ * to verify EEVDF null pointer issues.
+ */
+ /* attrs for cfs_rq */
+ s64 cfs_rq_avg_vruntime;
+ u64 cfs_rq_avg_load;
+ u64 cfs_rq_min_vruntime;
+ unsigned long cfs_rq_load_weight;
+ u64 cfs_rq_load_inv_weight;
+
+ /* attrs for cfs_rq->curr */
+ struct sched_entity *curr_address;
+ unsigned int curr_on_rq;
+ u64 curr_vruntime;
+ u64 curr_min_vruntime;
+ unsigned long curr_load_weight;
+ u32 curr_load_inv_weight;
+
+ /* calculators for place_entity() */
+ u64 function_place_entity_vruntime;
+ s64 function_place_entity_lag;
+};
+#endif /* CONFIG_TEMP_EEVDF_NULL_POINTER_CHECKER */
+
struct sched_entity {
/* For load-balancing: */
struct load_weight load;
@@ -611,8 +641,12 @@ struct sched_entity {
*/
struct sched_avg avg;
#endif
+#ifdef CONFIG_TEMP_EEVDF_NULL_POINTER_CHECKER
+ KABI_REPLACE(KABI_RESERVE(1); KABI_RESERVE(2), KABI_AUX_PTR(sched_entity))
+#else
KABI_RESERVE(1)
KABI_RESERVE(2)
+#endif /* CONFIG_TEMP_EEVDF_NULL_POINTER_CHECKER */
KABI_RESERVE(3)
KABI_RESERVE(4)
};
diff --git a/init/init_task.c b/init/init_task.c
index 1adc17149558..7e4bec74f241 100644
--- a/init/init_task.c
+++ b/init/init_task.c
@@ -61,6 +61,12 @@ static struct task_struct_resvd init_task_struct_resvd = {
.task = &init_task,
};
+#ifdef CONFIG_TEMP_EEVDF_NULL_POINTER_CHECKER
+static struct sched_entity_resvd init_sched_entity_resvd = {
+ .se = &init_task.se,
+};
+#endif /* CONFIG_TEMP_EEVDF_NULL_POINTER_CHECKER */
+
/*
* Set up the first task table, touch at your own risk!. Base=0,
* limit=0x1fffff (=2MB)
@@ -94,6 +100,9 @@ struct task_struct init_task
},
.se = {
.group_node = LIST_HEAD_INIT(init_task.se.group_node),
+#ifdef CONFIG_TEMP_EEVDF_NULL_POINTER_CHECKER
+ ._resvd = &init_sched_entity_resvd,
+#endif /* CONFIG_TEMP_EEVDF_NULL_POINTER_CHECKER */
},
.rt = {
.run_list = LIST_HEAD_INIT(init_task.rt.run_list),
diff --git a/kernel/Kconfig.preempt b/kernel/Kconfig.preempt
index dc2a630f200a..090f588a88ee 100644
--- a/kernel/Kconfig.preempt
+++ b/kernel/Kconfig.preempt
@@ -134,3 +134,18 @@ config SCHED_CORE
be no measurable impact on performance.
+config TEMP_EEVDF_NULL_POINTER_CHECKER
+ bool "temp eevdf null pointer"
+ default n
+ help
+ This option can record the attrs of the cfs_rq and cfs_rq->curr
+ when a sched_entity in place_entity().
+
+config TEMP_EEVDF_NULL_POINTER_CHECKER_BUGON
+ bool "temp eevdf null pointer bugon"
+ depends on TEMP_EEVDF_NULL_POINTER_CHECKER
+ default n
+ help
+ This option can record the attrs of the cfs_rq and cfs_rq->curr
+ when a sched_entity in place_entity(). Also, it will bug_on when
+ necessary.
\ No newline at end of file
diff --git a/kernel/fork.c b/kernel/fork.c
index 7f7c297d5f48..a056de39f327 100644
--- a/kernel/fork.c
+++ b/kernel/fork.c
@@ -184,6 +184,9 @@ static inline struct task_struct *alloc_task_struct_node(int node)
static inline void free_task_struct(struct task_struct *tsk)
{
kfree(tsk->_resvd);
+#ifdef CONFIG_TEMP_EEVDF_NULL_POINTER_CHECKER
+ kfree(tsk->se._resvd);
+#endif /* CONFIG_TEMP_EEVDF_NULL_POINTER_CHECKER */
kmem_cache_free(task_struct_cachep, tsk);
}
#endif
@@ -1158,6 +1161,15 @@ static bool dup_resvd_task_struct(struct task_struct *dst,
if (!dst->_resvd)
return false;
+#ifdef CONFIG_TEMP_EEVDF_NULL_POINTER_CHECKER
+ dst->se._resvd = kzalloc_node(sizeof(struct sched_entity_resvd),
+ GFP_KERNEL, node);
+ if (!dst->se._resvd)
+ return false;
+
+ dst->se._resvd->se = &dst->se;
+#endif /* CONFIG_TEMP_EEVDF_NULL_POINTER_CHECKER */
+
dst->_resvd->task = dst;
return true;
}
@@ -1178,6 +1190,9 @@ static struct task_struct *dup_task_struct(struct task_struct *orig, int node)
* a double-free for task_struct_resvd extension object.
*/
WRITE_ONCE(tsk->_resvd, NULL);
+#ifdef CONFIG_TEMP_EEVDF_NULL_POINTER_CHECKER
+ WRITE_ONCE(tsk->se._resvd, NULL);
+#endif /* CONFIG_TEMP_EEVDF_NULL_POINTER_CHECKER */
err = arch_dup_task_struct(tsk, orig);
if (err || !dup_resvd_task_struct(tsk, orig, node))
diff --git a/kernel/sched/fair.c b/kernel/sched/fair.c
index f619dd53cc49..8cf3311a672c 100644
--- a/kernel/sched/fair.c
+++ b/kernel/sched/fair.c
@@ -330,6 +330,10 @@ static int __init sched_fair_sysctl_init(void)
late_initcall(sched_fair_sysctl_init);
#endif
+#ifdef CONFIG_TEMP_EEVDF_NULL_POINTER_CHECKER_BUGON
+static inline void avg_vruntime_validate(struct cfs_rq *cfs_rq);
+#endif /* CONFIG_TEMP_EEVDF_NULL_POINTER_CHECKER_BUGON */
+
static inline void update_load_add(struct load_weight *lw, unsigned long inc)
{
lw->weight += inc;
@@ -799,6 +803,11 @@ avg_vruntime_add(struct cfs_rq *cfs_rq, struct sched_entity *se)
unsigned long weight = scale_load_down(se->load.weight);
s64 key = entity_key(cfs_rq, se);
+#ifdef CONFIG_TEMP_EEVDF_NULL_POINTER_CHECKER_BUGON
+ /* not yet added to tree */
+ avg_vruntime_validate(cfs_rq);
+#endif /* CONFIG_TEMP_EEVDF_NULL_POINTER_CHECKER_BUGON */
+
cfs_rq->avg_vruntime += key * weight;
cfs_rq->avg_load += weight;
}
@@ -811,6 +820,11 @@ avg_vruntime_sub(struct cfs_rq *cfs_rq, struct sched_entity *se)
cfs_rq->avg_vruntime -= key * weight;
cfs_rq->avg_load -= weight;
+
+#ifdef CONFIG_TEMP_EEVDF_NULL_POINTER_CHECKER_BUGON
+ /* already removed from tree */
+ avg_vruntime_validate(cfs_rq);
+#endif /* CONFIG_TEMP_EEVDF_NULL_POINTER_CHECKER_BUGON */
}
static inline
@@ -912,6 +926,10 @@ static int vruntime_eligible(struct cfs_rq *cfs_rq, u64 vruntime)
load += weight;
}
+#ifdef CONFIG_TEMP_EEVDF_NULL_POINTER_CHECKER_BUGON
+ WARN_ON_ONCE(!(avg >= (s64)(vruntime - cfs_rq->min_vruntime) * load)
+ && (avg - (s64)(vruntime - cfs_rq->min_vruntime) * load >= 0));
+#endif /* CONFIG_TEMP_EEVDF_NULL_POINTER_CHECKER_BUGON */
return avg >= (s64)(vruntime - cfs_rq->min_vruntime) * load;
}
@@ -3818,6 +3836,32 @@ static inline void
dequeue_load_avg(struct cfs_rq *cfs_rq, struct sched_entity *se) { }
#endif
+#ifdef CONFIG_TEMP_EEVDF_NULL_POINTER_CHECKER_BUGON
+
+#define TEMP_EEVDF_MAX_LAG (1ULL << 50)
+#define TEMP_EEVDF_NULL_ABS(x) ((x) < 0 ? -(x) : (x))
+
+static inline void avg_vruntime_validate(struct cfs_rq *cfs_rq)
+{
+ unsigned long load = 0;
+ s64 vruntime = 0;
+ struct rb_node *node = rb_first_cached(&cfs_rq->tasks_timeline);
+
+ for (; node; node = rb_next(node)) {
+ struct sched_entity *se = __node_2_se(node);
+ unsigned long weight = scale_load_down(se->load.weight);
+ s64 key = entity_key(cfs_rq, se);
+ /* vruntime += key * weight; */
+ WARN_ON_ONCE(__builtin_mul_overflow(key, weight, &key));
+ WARN_ON_ONCE(__builtin_add_overflow(vruntime, key, &vruntime));
+ load += weight;
+ }
+
+ WARN_ON_ONCE(cfs_rq->avg_load != load);
+ WARN_ON_ONCE(cfs_rq->avg_vruntime != vruntime);
+}
+#endif /* CONFIG_TEMP_EEVDF_NULL_POINTER_CHECKER_BUGON */
+
static void reweight_eevdf(struct sched_entity *se, u64 avruntime,
unsigned long weight)
{
@@ -3905,6 +3949,9 @@ static void reweight_eevdf(struct sched_entity *se, u64 avruntime,
vlag = entity_lag(avruntime, se);
vlag = div_s64(vlag * old_weight, weight);
se->vruntime = avruntime - vlag;
+#ifdef CONFIG_TEMP_EEVDF_NULL_POINTER_CHECKER_BUGON
+ BUG_ON(TEMP_EEVDF_NULL_ABS(vlag) > TEMP_EEVDF_MAX_LAG);
+#endif /* CONFIG_TEMP_EEVDF_NULL_POINTER_CHECKER_BUGON */
}
/*
@@ -5417,6 +5464,12 @@ place_entity(struct cfs_rq *cfs_rq, struct sched_entity *se, int flags)
}
se->vruntime = vruntime - lag;
+#ifdef CONFIG_TEMP_EEVDF_NULL_POINTER_CHECKER
+ temp_save_info_for_eevdf_nullpointer(cfs_rq, se, vruntime, lag);
+#ifdef CONFIG_TEMP_EEVDF_NULL_POINTER_CHECKER_BUGON
+ BUG_ON(TEMP_EEVDF_NULL_ABS(lag) > TEMP_EEVDF_MAX_LAG);
+#endif /* CONFIG_TEMP_EEVDF_NULL_POINTER_CHECKER_BUGON */
+#endif /* CONFIG_TEMP_EEVDF_NULL_POINTER_CHECKER */
/*
* When joining the competition; the exisiting tasks will be,
diff --git a/kernel/sched/sched.h b/kernel/sched/sched.h
index f6a3f93d1f75..d3560256cd1b 100644
--- a/kernel/sched/sched.h
+++ b/kernel/sched/sched.h
@@ -3788,4 +3788,38 @@ static inline int destroy_soft_domain(struct task_group *tg)
#endif
+#ifdef CONFIG_TEMP_EEVDF_NULL_POINTER_CHECKER
+static inline void temp_save_info_for_eevdf_nullpointer(
+ struct cfs_rq *cfs_rq,
+ struct sched_entity *se,
+ u64 function_place_entity_vruntime,
+ s64 function_place_entity_lag)
+{
+ // attrs for cfs_rq
+ se->_resvd->cfs_rq_avg_vruntime = cfs_rq->avg_vruntime;
+ se->_resvd->cfs_rq_avg_load = cfs_rq->avg_load;
+ se->_resvd->cfs_rq_min_vruntime = cfs_rq->min_vruntime;
+ se->_resvd->cfs_rq_load_weight = cfs_rq->load.weight;
+ se->_resvd->cfs_rq_load_inv_weight = cfs_rq->load.inv_weight;
+
+ // attrs for cfs_rq->curr
+ struct sched_entity *curr = cfs_rq->curr;
+
+ if (curr) {
+ se->_resvd->curr_address = curr;
+ se->_resvd->curr_on_rq = curr->on_rq;
+ se->_resvd->curr_vruntime = curr->vruntime;
+ se->_resvd->curr_min_vruntime = curr->min_vruntime;
+ se->_resvd->curr_load_weight = curr->load.weight;
+ se->_resvd->curr_load_inv_weight = curr->load.inv_weight;
+ } else {
+ se->_resvd->curr_address = NULL;
+ }
+
+ // calculators for place_entity()
+ se->_resvd->function_place_entity_vruntime = function_place_entity_vruntime;
+ se->_resvd->function_place_entity_lag = function_place_entity_lag;
+}
+#endif /* CONFIG_TEMP_EEVDF_NULL_POINTER_CHECKER */
+
#endif /* _KERNEL_SCHED_SCHED_H */
--
2.34.1
2
1

11 Aug '25
From: Huangjunhua <huangjunhua14(a)huawei.com>
driver inclusion
category: feature
bugzilla: https://gitee.com/openeuler/kernel/issues/ICB3EN
CVE: NA
-----------------------------------------
To meet the competitive evolution requirements ("5+1+1") for the
new-generation Kunpeng ARM platform, Tianchi architecture, and
BMC management system. The products, BMC, and HiSilicon collaboratively
planned the next-generation BMC evolution chip Hi1712.
Building upon Hi1711, the Hi1712 chip enhances interfaces, computing power,
and security. The Huawei iBMA driver requires adaptation to support Hi1712
for in-band and out-of-band communication.
Signed-off-by: Huangjunhua <huangjunhua14(a)huawei.com>
---
MAINTAINERS | 6 +
.../ethernet/huawei/bma/cdev_drv/bma_cdev.c | 2 +-
.../bma/cdev_veth_drv/virtual_cdev_eth_net.c | 18 +-
.../bma/cdev_veth_drv/virtual_cdev_eth_net.h | 1 -
.../net/ethernet/huawei/bma/edma_drv/Makefile | 2 +-
.../huawei/bma/edma_drv/bma_devintf.c | 118 ++++-
.../ethernet/huawei/bma/edma_drv/bma_pci.c | 277 +++++++++--
.../ethernet/huawei/bma/edma_drv/bma_pci.h | 33 +-
.../ethernet/huawei/bma/edma_drv/edma_drv.h | 340 +++++++++++++
.../ethernet/huawei/bma/edma_drv/edma_host.c | 160 +++++-
.../ethernet/huawei/bma/edma_drv/edma_host.h | 14 +-
.../ethernet/huawei/bma/edma_drv/edma_queue.c | 470 ++++++++++++++++++
.../ethernet/huawei/bma/edma_drv/edma_queue.h | 29 ++
.../ethernet/huawei/bma/edma_drv/edma_reg.h | 127 +++++
.../huawei/bma/include/bma_ker_intf.h | 46 ++
.../huawei/bma/kbox_drv/kbox_include.h | 2 +-
.../ethernet/huawei/bma/veth_drv/veth_hb.c | 25 +-
.../ethernet/huawei/bma/veth_drv/veth_hb.h | 12 +-
18 files changed, 1582 insertions(+), 100 deletions(-)
create mode 100644 drivers/net/ethernet/huawei/bma/edma_drv/edma_drv.h
create mode 100644 drivers/net/ethernet/huawei/bma/edma_drv/edma_queue.c
create mode 100644 drivers/net/ethernet/huawei/bma/edma_drv/edma_queue.h
create mode 100644 drivers/net/ethernet/huawei/bma/edma_drv/edma_reg.h
diff --git a/MAINTAINERS b/MAINTAINERS
index 61baf2cfc4e1..446f2f49fd14 100644
--- a/MAINTAINERS
+++ b/MAINTAINERS
@@ -9707,6 +9707,12 @@ F: drivers/net/ethernet/huawei/hinic3/cqm/
F: drivers/net/ethernet/huawei/hinic3/hw/
F: drivers/net/ethernet/huawei/hinic3/include/
+HUAWEI ETHERNET DRIVER
+M: Huangjunhua <huangjunhua14(a)huawei.com>
+L: netdev(a)vger.kernel.org
+S: Maintained
+F: drivers/net/ethernet/huawei/bma/
+
HUAWEI BIFUR DRIVER
M: Xiaoping zheng <zhengxiaoping5(a)huawei.com>
L: netdev(a)vger.kernel.org
diff --git a/drivers/net/ethernet/huawei/bma/cdev_drv/bma_cdev.c b/drivers/net/ethernet/huawei/bma/cdev_drv/bma_cdev.c
index 275c2cdfe5db..59181c829a68 100644
--- a/drivers/net/ethernet/huawei/bma/cdev_drv/bma_cdev.c
+++ b/drivers/net/ethernet/huawei/bma/cdev_drv/bma_cdev.c
@@ -28,7 +28,7 @@
#ifdef DRV_VERSION
#define CDEV_VERSION MICRO_TO_STR(DRV_VERSION)
#else
-#define CDEV_VERSION "0.3.10"
+#define CDEV_VERSION "0.4.0"
#endif
#define CDEV_DEFAULT_NUM 4
diff --git a/drivers/net/ethernet/huawei/bma/cdev_veth_drv/virtual_cdev_eth_net.c b/drivers/net/ethernet/huawei/bma/cdev_veth_drv/virtual_cdev_eth_net.c
index e6dbec7073e4..adb6dd6972f5 100644
--- a/drivers/net/ethernet/huawei/bma/cdev_veth_drv/virtual_cdev_eth_net.c
+++ b/drivers/net/ethernet/huawei/bma/cdev_veth_drv/virtual_cdev_eth_net.c
@@ -151,6 +151,12 @@ int edma_veth_setup_all_tx_resources(struct edma_eth_dev_s *edma_eth)
u8 *shmq_head = NULL;
u8 *shmq_head_p = NULL;
struct edma_rxtx_q_s *tx_queue = NULL;
+ int ret = 0;
+ phys_addr_t veth_address = 0;
+
+ ret = bma_intf_get_map_address(TYPE_VETH_ADDR, &veth_address);
+ if (ret != 0)
+ return -EFAULT;
tx_queue = (struct edma_rxtx_q_s *)
kmalloc(sizeof(struct edma_rxtx_q_s), GFP_KERNEL);
@@ -173,7 +179,7 @@ int edma_veth_setup_all_tx_resources(struct edma_eth_dev_s *edma_eth)
tx_queue->pdmalbase_v = (struct edma_dmal_s *)
(shmq_head + SHMDMAL_OFFSET);
- tx_queue->pdmalbase_p = (u8 *)(VETH_SHAREPOOL_BASE_INBMC +
+ tx_queue->pdmalbase_p = (u8 *)(veth_address +
(MAX_SHAREQUEUE_SIZE * 0) + SHMDMAL_OFFSET);
memset(tx_queue->pdmalbase_v, 0, MAX_SHMDMAL_SIZE);
@@ -219,6 +225,12 @@ int edma_veth_setup_all_rx_resources(struct edma_eth_dev_s *edma_eth)
u8 *shmq_head = NULL;
u8 *shmq_head_p = NULL;
struct edma_rxtx_q_s *rx_queue = NULL;
+ int ret = 0;
+ phys_addr_t veth_address = 0;
+
+ ret = bma_intf_get_map_address(TYPE_VETH_ADDR, &veth_address);
+ if (ret != 0)
+ return -EFAULT;
rx_queue = (struct edma_rxtx_q_s *)
kmalloc(sizeof(struct edma_rxtx_q_s), GFP_KERNEL);
@@ -241,7 +253,7 @@ int edma_veth_setup_all_rx_resources(struct edma_eth_dev_s *edma_eth)
/* DMA address list (only used in host). */
rx_queue->pdmalbase_v = (struct edma_dmal_s *)
(shmq_head + SHMDMAL_OFFSET);
- rx_queue->pdmalbase_p = (u8 *)(VETH_SHAREPOOL_BASE_INBMC +
+ rx_queue->pdmalbase_p = (u8 *)(veth_address +
MAX_SHAREQUEUE_SIZE + SHMDMAL_OFFSET);
memset(rx_queue->pdmalbase_v, 0, MAX_SHMDMAL_SIZE);
@@ -1304,6 +1316,8 @@ int __start_dmalist_H_2(struct edma_rxtx_q_s *prxtx_queue, u32 type, u32 cnt)
dma_transfer.type = DMA_LIST;
dma_transfer.transfer.list.dma_addr =
(dma_addr_t)prxtx_queue->pdmalbase_p;
+ dma_transfer.pdmalbase_v = (struct bspveth_dmal *)prxtx_queue->pdmalbase_v;
+ dma_transfer.dmal_cnt = prxtx_queue->dmal_cnt;
ret = bma_intf_start_dma(g_eth_edmaprivate.edma_priv, &dma_transfer);
LOG(DLOG_DEBUG, "after -> %u/%u/%u/%u, ret: %d",
diff --git a/drivers/net/ethernet/huawei/bma/cdev_veth_drv/virtual_cdev_eth_net.h b/drivers/net/ethernet/huawei/bma/cdev_veth_drv/virtual_cdev_eth_net.h
index cb7c28cb5ddd..bc4b2147272b 100644
--- a/drivers/net/ethernet/huawei/bma/cdev_veth_drv/virtual_cdev_eth_net.h
+++ b/drivers/net/ethernet/huawei/bma/cdev_veth_drv/virtual_cdev_eth_net.h
@@ -56,7 +56,6 @@
#define BSP_ERR_AGAIN (BSP_ETH_ERR_BASE + 18)
#define BSP_ERR_NOT_TO_HANDLE (BSP_ETH_ERR_BASE + 19)
-#define VETH_SHAREPOOL_BASE_INBMC (0x84820000)
#define VETH_SHAREPOOL_SIZE (0xdf000)
#define VETH_SHAREPOOL_OFFSET (0x10000)
#define MAX_SHAREQUEUE_SIZE (0x20000)
diff --git a/drivers/net/ethernet/huawei/bma/edma_drv/Makefile b/drivers/net/ethernet/huawei/bma/edma_drv/Makefile
index 46cc51275a71..048bcb9e2bbe 100644
--- a/drivers/net/ethernet/huawei/bma/edma_drv/Makefile
+++ b/drivers/net/ethernet/huawei/bma/edma_drv/Makefile
@@ -1,2 +1,2 @@
obj-$(CONFIG_BMA) += host_edma_drv.o
-host_edma_drv-y := bma_pci.o bma_devintf.o edma_host.o
+host_edma_drv-y := bma_pci.o bma_devintf.o edma_host.o edma_queue.o
diff --git a/drivers/net/ethernet/huawei/bma/edma_drv/bma_devintf.c b/drivers/net/ethernet/huawei/bma/edma_drv/bma_devintf.c
index 3b5eb39d6da6..45815fdc18eb 100644
--- a/drivers/net/ethernet/huawei/bma/edma_drv/bma_devintf.c
+++ b/drivers/net/ethernet/huawei/bma/edma_drv/bma_devintf.c
@@ -31,6 +31,18 @@ static struct bma_dev_s *g_bma_dev;
static ATOMIC_NOTIFIER_HEAD(bma_int_notify_list);
+static enum pci_type_e g_pci_type = PCI_TYPE_UNKNOWN;
+
+enum pci_type_e get_pci_type(void)
+{
+ return g_pci_type;
+}
+
+void set_pci_type(enum pci_type_e type)
+{
+ g_pci_type = type;
+}
+
static int bma_priv_insert_priv_list(struct bma_priv_data_s *priv, u32 type,
u32 sub_type)
{
@@ -342,6 +354,82 @@ int bma_intf_unregister_type(void **handle)
}
EXPORT_SYMBOL(bma_intf_unregister_type);
+int bma_intf_get_host_number(unsigned int *host_number)
+{
+ unsigned int devfn = 0;
+
+ if (!host_number)
+ return -EFAULT;
+
+ if (!g_bma_dev) {
+ BMA_LOG(DLOG_ERROR, "g_bma_dev is NULL\n");
+ return -ENXIO;
+ }
+
+ devfn = g_bma_dev->bma_pci_dev->pdev->devfn;
+ BMA_LOG(DLOG_DEBUG, "devfn is %u\n", devfn);
+ if (devfn == PF7 || devfn == PF10) {
+ *host_number = HOST_NUMBER_0;
+ } else if (devfn == PF4) {
+ *host_number = HOST_NUMBER_1;
+ } else {
+ BMA_LOG(DLOG_DEBUG, "Treat as host0 because of unknown PF %u\n", devfn);
+ *host_number = HOST_NUMBER_0;
+ }
+
+ return 0;
+}
+EXPORT_SYMBOL(bma_intf_get_host_number);
+
+int bma_intf_get_map_address(enum addr_type type, phys_addr_t *addr)
+{
+ u32 host_number = 0;
+ u32 devfn = 0;
+ u32 i = 0;
+ enum pci_type_e pci_type = get_pci_type();
+ struct bma_pci_dev_s *bma_pci_dev = get_bma_pci_dev();
+
+ static struct bma_map_addr_s addr_info[] = {
+ {PCI_TYPE_UNKNOWN, HOST_NUMBER_0, TYPE_EDMA_ADDR, EDMA_1711_HOST0_ADDR},
+ {PCI_TYPE_UNKNOWN, HOST_NUMBER_0, TYPE_VETH_ADDR, VETH_1711_HOST0_ADDR},
+ {PCI_TYPE_171x, HOST_NUMBER_0, TYPE_EDMA_ADDR, EDMA_1711_HOST0_ADDR},
+ {PCI_TYPE_171x, HOST_NUMBER_0, TYPE_VETH_ADDR, VETH_1711_HOST0_ADDR},
+ {PCI_TYPE_1712, HOST_NUMBER_0, TYPE_EDMA_ADDR, EDMA_1712_HOST0_ADDR},
+ {PCI_TYPE_1712, HOST_NUMBER_0, TYPE_VETH_ADDR, VETH_1712_HOST0_ADDR},
+ {PCI_TYPE_1712, HOST_NUMBER_1, TYPE_EDMA_ADDR, EDMA_1712_HOST1_ADDR},
+ {PCI_TYPE_1712, HOST_NUMBER_1, TYPE_VETH_ADDR, VETH_1712_HOST1_ADDR},
+ };
+
+ if (!bma_pci_dev) {
+ BMA_LOG(DLOG_ERROR, "bma_pci_dev is null\n");
+ return -EFAULT;
+ }
+
+ devfn = bma_pci_dev->pdev->devfn;
+ if (devfn == PF7 || devfn == PF10) {
+ host_number = HOST_NUMBER_0;
+ } else if (devfn == PF4) {
+ host_number = HOST_NUMBER_1;
+ } else {
+ BMA_LOG(DLOG_DEBUG, "Treat as host0 because of unknown PF %u\n", devfn);
+ host_number = HOST_NUMBER_0;
+ }
+
+ for (i = 0; i < ARRAY_SIZE(addr_info); i++) {
+ if (pci_type == addr_info[i].pci_type &&
+ host_number == addr_info[i].host_number && type == addr_info[i].addr_type) {
+ *addr = addr_info[i].addr;
+ return 0;
+ }
+ }
+
+ BMA_LOG(DLOG_DEBUG,
+ "Cannot find proper map address! pci_type: %u, host_number: %u, addr_type: %u\n",
+ pci_type, host_number, type);
+ return -EFAULT;
+}
+EXPORT_SYMBOL(bma_intf_get_map_address);
+
int bma_intf_check_edma_supported(void)
{
return !(!g_bma_dev);
@@ -350,13 +438,30 @@ EXPORT_SYMBOL(bma_intf_check_edma_supported);
int bma_intf_check_dma_status(enum dma_direction_e dir)
{
- return edma_host_check_dma_status(dir);
+ enum pci_type_e pci_type = get_pci_type();
+
+ if (pci_type == PCI_TYPE_UNKNOWN) {
+ BMA_LOG(DLOG_ERROR, "pci type is UNKNOWN.\n");
+ return -EFAULT;
+ }
+
+ return get_bma_pci_dev_handler_s()[pci_type].check_dma(dir);
}
EXPORT_SYMBOL(bma_intf_check_dma_status);
void bma_intf_reset_dma(enum dma_direction_e dir)
{
- edma_host_reset_dma(&g_bma_dev->edma_host, dir);
+ enum pci_type_e pci_type = get_pci_type();
+
+ if (!g_bma_dev)
+ return;
+
+ if (pci_type == PCI_TYPE_UNKNOWN) {
+ BMA_LOG(DLOG_ERROR, "pci type is UNKNOWN.\n");
+ return;
+ }
+
+ get_bma_pci_dev_handler_s()[pci_type].reset_dma(&g_bma_dev->edma_host, dir);
}
EXPORT_SYMBOL(bma_intf_reset_dma);
@@ -375,10 +480,16 @@ int bma_intf_start_dma(void *handle, struct bma_dma_transfer_s *dma_transfer)
{
int ret = 0;
struct bma_priv_data_s *priv = (struct bma_priv_data_s *)handle;
+ enum pci_type_e pci_type = get_pci_type();
if (!handle || !dma_transfer)
return -EFAULT;
+ if (pci_type == PCI_TYPE_UNKNOWN) {
+ BMA_LOG(DLOG_ERROR, "pci type is UNKNOWN.\n");
+ return -EFAULT;
+ }
+
ret = edma_host_dma_start(&g_bma_dev->edma_host, priv);
if (ret) {
BMA_LOG(DLOG_ERROR,
@@ -386,7 +497,8 @@ int bma_intf_start_dma(void *handle, struct bma_dma_transfer_s *dma_transfer)
return ret;
}
- ret = edma_host_dma_transfer(&g_bma_dev->edma_host, priv, dma_transfer);
+ ret = get_bma_pci_dev_handler_s()[pci_type].transfer_edma_host(&g_bma_dev->edma_host, priv,
+ dma_transfer);
if (ret)
BMA_LOG(DLOG_ERROR,
"edma_host_dma_transfer failed! ret = %d\n", ret);
diff --git a/drivers/net/ethernet/huawei/bma/edma_drv/bma_pci.c b/drivers/net/ethernet/huawei/bma/edma_drv/bma_pci.c
index 577acaedb0e2..0e43289e0d1a 100644
--- a/drivers/net/ethernet/huawei/bma/edma_drv/bma_pci.c
+++ b/drivers/net/ethernet/huawei/bma/edma_drv/bma_pci.c
@@ -27,13 +27,20 @@
#define PCI_VENDOR_ID_HUAWEI_PME 0x19e5
#define PCI_DEVICE_ID_KBOX_0_PME 0x1710
+#define PCI_DEVICE_ID_EDMA_0 0x1712
#define PCI_PME_USEABLE_SPACE (4 * 1024 * 1024)
+
+#define HOSTRTC_OFFSET 0x10000
+#define EDMA_OFFSET 0x20000
+#define VETH_OFFSET 0x30000
+
#define PME_DEV_CHECK(device, vendor) ((device) == PCI_DEVICE_ID_KBOX_0_PME && \
(vendor) == PCI_VENDOR_ID_HUAWEI_PME)
#define PCI_BAR0_PME_1710 0x85800000
#define PCI_BAR0 0
#define PCI_BAR1 1
+#define PCI_BAR2 2
#define PCI_USING_DAC_DEFAULT 0
#define GET_HIGH_ADDR(address) ((sizeof(unsigned long) == 8) ? \
@@ -51,15 +58,50 @@ int debug = DLOG_ERROR;
MODULE_PARM_DESC(debug, "Debug switch (0=close debug, 1=open debug)");
static struct bma_pci_dev_s *g_bma_pci_dev;
+struct bma_pci_dev_s *get_bma_pci_dev(void)
+{
+ return g_bma_pci_dev;
+}
+
+void set_bma_pci_dev(struct bma_pci_dev_s *bma_pci_dev)
+{
+ g_bma_pci_dev = bma_pci_dev;
+}
static int bma_pci_suspend(struct pci_dev *pdev, pm_message_t state);
static int bma_pci_resume(struct pci_dev *pdev);
static int bma_pci_probe(struct pci_dev *pdev, const struct pci_device_id *ent);
static void bma_pci_remove(struct pci_dev *pdev);
+static struct bma_pci_dev_handler_s g_bma_pci_dev_handler_s[] = {
+ {0},
+ // for 1710/1711
+ {
+ .ioremap_bar_mem = ioremap_pme_bar_mem_v1,
+ .iounmap_bar_mem = iounmap_bar_mem_v1,
+ .check_dma = edma_host_check_dma_status_v1,
+ .transfer_edma_host = edma_host_dma_transfer_v1,
+ .reset_dma = edma_host_reset_dma_v1,
+ },
+ // for 1712
+ {
+ .ioremap_bar_mem = ioremap_pme_bar_mem_v2,
+ .iounmap_bar_mem = iounmap_bar_mem_v2,
+ .check_dma = edma_host_check_dma_status_v2,
+ .transfer_edma_host = edma_host_dma_transfer_v2,
+ .reset_dma = edma_host_reset_dma_v2,
+ }
+};
+
+struct bma_pci_dev_handler_s *get_bma_pci_dev_handler_s(void)
+{
+ return g_bma_pci_dev_handler_s;
+}
+
static const struct pci_device_id bma_pci_tbl[] = {
{PCI_DEVICE(PCI_VENDOR_ID_HUAWEI_FPGA, PCI_DEVICE_ID_KBOX_0)},
{PCI_DEVICE(PCI_VENDOR_ID_HUAWEI_PME, PCI_DEVICE_ID_KBOX_0_PME)},
+ { PCI_DEVICE(PCI_VENDOR_ID_HUAWEI_PME, PCI_DEVICE_ID_EDMA_0) },
{}
};
MODULE_DEVICE_TABLE(pci, bma_pci_tbl);
@@ -73,7 +115,7 @@ int edma_param_get_statics(char *buf, const struct kernel_param *kp)
}
module_param_call(statistics, NULL, edma_param_get_statics, &debug, 0444);
-MODULE_PARM_DESC(statistics, "Statistics info of edma driver,readonly");
+MODULE_PARM_DESC(statistics, "Statistics info of edma driver, readonly");
int edma_param_set_debug(const char *buf, const struct kernel_param *kp)
{
@@ -99,34 +141,40 @@ module_param_call(debug, &edma_param_set_debug, ¶m_get_int, &debug, 0644);
void __iomem *kbox_get_base_addr(void)
{
- if (!g_bma_pci_dev || (!(g_bma_pci_dev->kbox_base_addr))) {
+ struct bma_pci_dev_s *bma_pci_dev = get_bma_pci_dev();
+
+ if (!bma_pci_dev || (!(bma_pci_dev->kbox_base_addr))) {
BMA_LOG(DLOG_ERROR, "kbox_base_addr NULL point\n");
return NULL;
}
- return g_bma_pci_dev->kbox_base_addr;
+ return bma_pci_dev->kbox_base_addr;
}
EXPORT_SYMBOL_GPL(kbox_get_base_addr);
unsigned long kbox_get_io_len(void)
{
- if (!g_bma_pci_dev) {
- BMA_LOG(DLOG_ERROR, "kbox_io_len is error,can not get it\n");
+ struct bma_pci_dev_s *bma_pci_dev = get_bma_pci_dev();
+
+ if (!bma_pci_dev) {
+ BMA_LOG(DLOG_ERROR, "kbox_io_len is error, can not get it\n");
return 0;
}
- return g_bma_pci_dev->kbox_base_len;
+ return bma_pci_dev->kbox_base_len;
}
EXPORT_SYMBOL_GPL(kbox_get_io_len);
unsigned long kbox_get_base_phy_addr(void)
{
- if (!g_bma_pci_dev || !g_bma_pci_dev->kbox_base_phy_addr) {
+ struct bma_pci_dev_s *bma_pci_dev = get_bma_pci_dev();
+
+ if (!bma_pci_dev || bma_pci_dev->kbox_base_phy_addr == 0) {
BMA_LOG(DLOG_ERROR, "kbox_base_phy_addr NULL point\n");
return 0;
}
- return g_bma_pci_dev->kbox_base_phy_addr;
+ return bma_pci_dev->kbox_base_phy_addr;
}
EXPORT_SYMBOL_GPL(kbox_get_base_phy_addr);
@@ -160,7 +208,7 @@ s32 __atu_config_H(struct pci_dev *pdev, unsigned int region,
return 0;
}
-static void iounmap_bar_mem(struct bma_pci_dev_s *bma_pci_dev)
+void iounmap_bar_mem_v1(struct bma_pci_dev_s *bma_pci_dev)
{
if (bma_pci_dev->kbox_base_addr) {
iounmap(bma_pci_dev->kbox_base_addr);
@@ -171,15 +219,84 @@ static void iounmap_bar_mem(struct bma_pci_dev_s *bma_pci_dev)
iounmap(bma_pci_dev->bma_base_addr);
bma_pci_dev->bma_base_addr = NULL;
bma_pci_dev->edma_swap_addr = NULL;
+ bma_pci_dev->veth_swap_addr = NULL;
bma_pci_dev->hostrtc_viraddr = NULL;
}
}
-static int ioremap_pme_bar1_mem(struct pci_dev *pdev,
- struct bma_pci_dev_s *bma_pci_dev)
+void iounmap_bar_mem_v2(struct bma_pci_dev_s *bma_pci_dev)
+{
+ if (bma_pci_dev->kbox_base_addr) {
+ iounmap(bma_pci_dev->kbox_base_addr);
+ bma_pci_dev->kbox_base_addr = NULL;
+ }
+
+ if (bma_pci_dev->bma_base_addr) {
+ iounmap(bma_pci_dev->bma_base_addr);
+ bma_pci_dev->bma_base_addr = NULL;
+ }
+
+ if (bma_pci_dev->hostrtc_viraddr) {
+ iounmap(bma_pci_dev->hostrtc_viraddr);
+ bma_pci_dev->hostrtc_viraddr = NULL;
+ bma_pci_dev->edma_swap_addr = NULL;
+ bma_pci_dev->veth_swap_addr = NULL;
+ }
+}
+
+static void iounmap_bar_mem(struct bma_pci_dev_s *bma_pci_dev)
+{
+ enum pci_type_e pci_type = get_pci_type();
+
+ if (pci_type == PCI_TYPE_UNKNOWN)
+ return;
+
+ g_bma_pci_dev_handler_s[pci_type].iounmap_bar_mem(bma_pci_dev);
+}
+
+static int config_atu(struct pci_dev *pdev, struct bma_pci_dev_s *bma_pci_dev)
+{
+ int ret = 0;
+ phys_addr_t edma_address = 0;
+ phys_addr_t veth_address = 0;
+
+ ret = bma_intf_get_map_address(TYPE_EDMA_ADDR, &edma_address);
+ if (ret != 0)
+ return ret;
+
+ ret = bma_intf_get_map_address(TYPE_VETH_ADDR, &veth_address);
+ if (ret != 0)
+ return ret;
+
+ __atu_config_H(pdev, 0,
+ GET_HIGH_ADDR(bma_pci_dev->kbox_base_phy_addr),
+ (bma_pci_dev->kbox_base_phy_addr & 0xffffffff),
+ 0, PCI_BAR0_PME_1710, PCI_PME_USEABLE_SPACE);
+
+ __atu_config_H(pdev, 1,
+ GET_HIGH_ADDR(bma_pci_dev->hostrtc_phyaddr),
+ (bma_pci_dev->hostrtc_phyaddr & 0xffffffff),
+ 0, HOSTRTC_REG_BASE, HOSTRTC_REG_SIZE);
+
+ __atu_config_H(pdev, 2,
+ GET_HIGH_ADDR(bma_pci_dev->edma_swap_phy_addr),
+ (bma_pci_dev->edma_swap_phy_addr & 0xffffffff),
+ 0, edma_address, EDMA_SWAP_DATA_SIZE);
+
+ __atu_config_H(pdev, 3,
+ GET_HIGH_ADDR(bma_pci_dev->veth_swap_phy_addr),
+ (bma_pci_dev->veth_swap_phy_addr & 0xffffffff),
+ 0, veth_address, VETH_SWAP_DATA_SIZE);
+
+ return ret;
+}
+
+// for 1710 1711
+int ioremap_pme_bar_mem_v1(struct pci_dev *pdev, struct bma_pci_dev_s *bma_pci_dev)
{
unsigned long bar1_resource_flag = 0;
u32 data = 0;
+ int ret;
bma_pci_dev->kbox_base_len = PCI_PME_USEABLE_SPACE;
BMA_LOG(DLOG_DEBUG, "1710\n");
@@ -217,25 +334,11 @@ static int ioremap_pme_bar1_mem(struct pci_dev *pdev,
bma_pci_dev->edma_swap_phy_addr,
bma_pci_dev->veth_swap_phy_addr);
- __atu_config_H(pdev, 0,
- GET_HIGH_ADDR(bma_pci_dev->kbox_base_phy_addr),
- (bma_pci_dev->kbox_base_phy_addr & 0xffffffff),
- 0, PCI_BAR0_PME_1710, PCI_PME_USEABLE_SPACE);
-
- __atu_config_H(pdev, 1,
- GET_HIGH_ADDR(bma_pci_dev->hostrtc_phyaddr),
- (bma_pci_dev->hostrtc_phyaddr & 0xffffffff),
- 0, HOSTRTC_REG_BASE, HOSTRTC_REG_SIZE);
-
- __atu_config_H(pdev, 2,
- GET_HIGH_ADDR(bma_pci_dev->edma_swap_phy_addr),
- (bma_pci_dev->edma_swap_phy_addr & 0xffffffff),
- 0, EDMA_SWAP_DATA_BASE, EDMA_SWAP_DATA_SIZE);
-
- __atu_config_H(pdev, 3,
- GET_HIGH_ADDR(bma_pci_dev->veth_swap_phy_addr),
- (bma_pci_dev->veth_swap_phy_addr & 0xffffffff),
- 0, VETH_SWAP_DATA_BASE, VETH_SWAP_DATA_SIZE);
+ ret = config_atu(pdev, bma_pci_dev);
+ if (ret != 0) {
+ BMA_LOG(DLOG_DEBUG, "config atu failed.\n");
+ return ret;
+ }
if (bar1_resource_flag & IORESOURCE_CACHEABLE) {
bma_pci_dev->bma_base_addr =
@@ -250,7 +353,6 @@ static int ioremap_pme_bar1_mem(struct pci_dev *pdev,
if (!bma_pci_dev->bma_base_addr) {
BMA_LOG(DLOG_ERROR,
"Cannot map device registers, aborting\n");
-
return -ENODEV;
}
@@ -270,11 +372,80 @@ static int ioremap_pme_bar1_mem(struct pci_dev *pdev,
return 0;
}
+// for 1712
+int ioremap_pme_bar_mem_v2(struct pci_dev *pdev, struct bma_pci_dev_s *bma_pci_dev)
+{
+ unsigned long bar2_resource_flag = 0;
+
+ bma_pci_dev->kbox_base_len = PCI_PME_USEABLE_SPACE;
+ BMA_LOG(DLOG_DEBUG, "1712\n");
+
+ bma_pci_dev->bma_base_phy_addr = (unsigned long)pci_resource_start(pdev, PCI_BAR2);
+ bar2_resource_flag = (unsigned long)pci_resource_flags(pdev, PCI_BAR2);
+ if (!(bar2_resource_flag & IORESOURCE_MEM)) {
+ BMA_LOG(DLOG_ERROR, "Cannot find proper PCI device base address, aborting\n");
+ return -ENODEV;
+ }
+
+ bma_pci_dev->bma_base_len = (unsigned long)pci_resource_len(pdev, PCI_BAR2);
+ bma_pci_dev->edma_swap_len = EDMA_SWAP_DATA_SIZE;
+ bma_pci_dev->veth_swap_len = VETH_SWAP_DATA_SIZE;
+
+ BMA_LOG(DLOG_DEBUG,
+ "bar2: bma_base_len = 0x%lx, edma_swap_len = %ld, veth_swap_len = %ld(0x%lx)\n",
+ bma_pci_dev->bma_base_len, bma_pci_dev->edma_swap_len, bma_pci_dev->veth_swap_len,
+ bma_pci_dev->veth_swap_len);
+
+ bma_pci_dev->hostrtc_phyaddr = bma_pci_dev->bma_base_phy_addr + HOSTRTC_OFFSET;
+ /* edma */
+ bma_pci_dev->edma_swap_phy_addr = bma_pci_dev->bma_base_phy_addr + EDMA_OFFSET;
+ /* veth */
+ bma_pci_dev->veth_swap_phy_addr = bma_pci_dev->bma_base_phy_addr + VETH_OFFSET;
+
+ BMA_LOG(DLOG_DEBUG,
+ "bar2: bma_base_phy_addr = 0x%lx, bma_base_len = %zu , hostrtc_phyaddr = 0x%lx, edma_swap_phy_addr = 0x%lx, veth_swap_phy_addr = 0x%lx\n",
+ bma_pci_dev->bma_base_phy_addr, bma_pci_dev->bma_base_len,
+ bma_pci_dev->hostrtc_phyaddr, bma_pci_dev->edma_swap_phy_addr,
+ bma_pci_dev->veth_swap_phy_addr);
+
+ bma_pci_dev->bma_base_addr = ioremap(bma_pci_dev->bma_base_phy_addr,
+ bma_pci_dev->bma_base_len);
+ if (!bma_pci_dev->bma_base_addr) {
+ BMA_LOG(DLOG_ERROR, "Cannot map device registers, aborting\n");
+ return -ENODEV;
+ }
+
+ if (bar2_resource_flag & IORESOURCE_CACHEABLE) {
+ BMA_LOG(DLOG_DEBUG, "ioremap with cache, %d\n", IORESOURCE_CACHEABLE);
+ bma_pci_dev->hostrtc_viraddr = ioremap(bma_pci_dev->hostrtc_phyaddr,
+ bma_pci_dev->bma_base_len - HOSTRTC_OFFSET);
+ } else {
+ BMA_LOG(DLOG_DEBUG, "ioremap without cache\n");
+ bma_pci_dev->hostrtc_viraddr = IOREMAP(bma_pci_dev->hostrtc_phyaddr,
+ bma_pci_dev->bma_base_len - HOSTRTC_OFFSET);
+ }
+
+ if (!bma_pci_dev->hostrtc_viraddr) {
+ BMA_LOG(DLOG_ERROR, "Cannot map device registers, aborting\n");
+ iounmap(bma_pci_dev->bma_base_addr);
+ bma_pci_dev->bma_base_addr = NULL;
+ return -ENODEV;
+ }
+
+ bma_pci_dev->edma_swap_addr = (unsigned char *)bma_pci_dev->hostrtc_viraddr
+ - HOSTRTC_OFFSET + EDMA_OFFSET;
+ bma_pci_dev->veth_swap_addr = (unsigned char *)bma_pci_dev->hostrtc_viraddr
+ - HOSTRTC_OFFSET + VETH_OFFSET;
+
+ return 0;
+}
+
static int ioremap_bar_mem(struct pci_dev *pdev,
struct bma_pci_dev_s *bma_pci_dev)
{
int err = 0;
unsigned long bar0_resource_flag = 0;
+ enum pci_type_e pci_type = get_pci_type();
bar0_resource_flag = pci_resource_flags(pdev, PCI_BAR0);
@@ -294,8 +465,8 @@ static int ioremap_bar_mem(struct pci_dev *pdev,
bma_pci_dev->kbox_base_phy_addr, bma_pci_dev->kbox_base_len,
bma_pci_dev->kbox_base_len);
- if (PME_DEV_CHECK(pdev->device, pdev->vendor)) {
- err = ioremap_pme_bar1_mem(pdev, bma_pci_dev);
+ if (pdev->vendor == PCI_VENDOR_ID_HUAWEI_PME && pci_type != PCI_TYPE_UNKNOWN) {
+ err = g_bma_pci_dev_handler_s[pci_type].ioremap_bar_mem(pdev, bma_pci_dev);
if (err != 0)
return err;
}
@@ -314,11 +485,7 @@ static int ioremap_bar_mem(struct pci_dev *pdev,
if (!bma_pci_dev->kbox_base_addr) {
BMA_LOG(DLOG_ERROR, "Cannot map device registers, aborting\n");
-
- iounmap(bma_pci_dev->bma_base_addr);
- bma_pci_dev->bma_base_addr = NULL;
- bma_pci_dev->edma_swap_addr = NULL;
- bma_pci_dev->hostrtc_viraddr = NULL;
+ iounmap_bar_mem(bma_pci_dev);
return -ENOMEM;
}
@@ -355,13 +522,14 @@ int pci_device_init(struct pci_dev *pdev, struct bma_pci_dev_s *bma_pci_dev)
{
int err = 0;
- if (PME_DEV_CHECK(pdev->device, pdev->vendor)) {
+ if ((pdev->device == PCI_DEVICE_ID_KBOX_0_PME || pdev->device == PCI_DEVICE_ID_EDMA_0) &&
+ pdev->vendor == PCI_VENDOR_ID_HUAWEI_PME) {
err = bma_devinft_init(bma_pci_dev);
if (err) {
BMA_LOG(DLOG_ERROR, "bma_devinft_init failed\n");
bma_devinft_cleanup(bma_pci_dev);
iounmap_bar_mem(bma_pci_dev);
- g_bma_pci_dev = NULL;
+ set_bma_pci_dev(NULL);
pci_release_regions(pdev);
kfree(bma_pci_dev);
#ifdef CONFIG_PCI_MSI
@@ -400,27 +568,25 @@ int pci_device_config(struct pci_dev *pdev)
goto err_out_free_dev;
}
+ set_bma_pci_dev(bma_pci_dev);
+
err = ioremap_bar_mem(pdev, bma_pci_dev);
if (err) {
BMA_LOG(DLOG_ERROR, "ioremap_edma_io_mem failed\n");
goto err_out_release_regions;
}
- g_bma_pci_dev = bma_pci_dev;
-
if (SET_DMA_MASK(&pdev->dev)) {
BMA_LOG(DLOG_ERROR,
- "No usable DMA ,configuration, aborting,goto failed2!!!\n");
+ "No usable DMA, configuration, aborting, goto failed2!!!\n");
goto err_out_unmap_bar;
}
- g_bma_pci_dev = bma_pci_dev;
-
return pci_device_init(pdev, bma_pci_dev);
err_out_unmap_bar:
iounmap_bar_mem(bma_pci_dev);
- g_bma_pci_dev = NULL;
+ set_bma_pci_dev(NULL);
err_out_release_regions:
pci_release_regions(pdev);
err_out_free_dev:
@@ -442,16 +608,27 @@ static int bma_pci_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
UNUSED(ent);
- if (g_bma_pci_dev)
+ if (get_bma_pci_dev())
return -EPERM;
err = pci_enable_device(pdev);
if (err) {
- BMA_LOG(DLOG_ERROR, "Cannot enable PCI device,aborting\n");
+ BMA_LOG(DLOG_ERROR, "Cannot enable PCI device, aborting\n");
return err;
}
- if (PME_DEV_CHECK(pdev->device, pdev->vendor)) {
+ switch (pdev->device) {
+ case PCI_DEVICE_ID_KBOX_0_PME:
+ set_pci_type(PCI_TYPE_171x);
+ break;
+ case PCI_DEVICE_ID_EDMA_0:
+ set_pci_type(PCI_TYPE_1712);
+ break;
+ default:
+ set_pci_type(PCI_TYPE_UNKNOWN);
+ break;
+ }
+ if (pdev->vendor == PCI_VENDOR_ID_HUAWEI_PME && get_pci_type() != PCI_TYPE_UNKNOWN) {
err = pme_pci_enable_msi(pdev);
if (err)
return err;
@@ -468,7 +645,7 @@ static void bma_pci_remove(struct pci_dev *pdev)
struct bma_pci_dev_s *bma_pci_dev =
(struct bma_pci_dev_s *)pci_get_drvdata(pdev);
- g_bma_pci_dev = NULL;
+ set_bma_pci_dev(NULL);
(void)pci_set_drvdata(pdev, NULL);
if (bma_pci_dev) {
diff --git a/drivers/net/ethernet/huawei/bma/edma_drv/bma_pci.h b/drivers/net/ethernet/huawei/bma/edma_drv/bma_pci.h
index a66724e2cb74..b43882997c01 100644
--- a/drivers/net/ethernet/huawei/bma/edma_drv/bma_pci.h
+++ b/drivers/net/ethernet/huawei/bma/edma_drv/bma_pci.h
@@ -18,6 +18,8 @@
#include "bma_devintf.h"
#include "bma_include.h"
+#include "../include/bma_ker_intf.h"
+#include "edma_host.h"
#include <linux/netdevice.h>
#define EDMA_SWAP_BASE_OFFSET 0x10000
@@ -25,10 +27,8 @@
#define HOSTRTC_REG_BASE 0x2f000000
#define HOSTRTC_REG_SIZE EDMA_SWAP_BASE_OFFSET
-#define EDMA_SWAP_DATA_BASE 0x84810000
#define EDMA_SWAP_DATA_SIZE 65536
-#define VETH_SWAP_DATA_BASE 0x84820000
#define VETH_SWAP_DATA_SIZE 0xdf000
#define ATU_VIEWPORT 0x900
@@ -71,7 +71,7 @@ struct bma_pci_dev_s {
#ifdef DRV_VERSION
#define BMA_VERSION MICRO_TO_STR(DRV_VERSION)
#else
-#define BMA_VERSION "0.3.10"
+#define BMA_VERSION "0.4.0"
#endif
#ifdef CONFIG_ARM64
@@ -95,4 +95,31 @@ extern int debug;
int edmainfo_show(char *buff);
+struct bma_pci_dev_s *get_bma_pci_dev(void);
+void set_bma_pci_dev(struct bma_pci_dev_s *bma_pci_dev);
+
+struct bma_pci_dev_handler_s {
+ int (*ioremap_bar_mem)(struct pci_dev *pdev, struct bma_pci_dev_s *bma_pci_dev);
+ void (*iounmap_bar_mem)(struct bma_pci_dev_s *bma_pci_dev);
+ int (*check_dma)(enum dma_direction_e dir);
+ int (*transfer_edma_host)(struct edma_host_s *edma_host, struct bma_priv_data_s *priv,
+ struct bma_dma_transfer_s *dma_transfer);
+ void (*reset_dma)(struct edma_host_s *edma_host, enum dma_direction_e dir);
+};
+
+struct bma_pci_dev_handler_s *get_bma_pci_dev_handler_s(void);
+
+int ioremap_pme_bar_mem_v1(struct pci_dev *pdev, struct bma_pci_dev_s *bma_pci_dev);
+int ioremap_pme_bar_mem_v2(struct pci_dev *pdev, struct bma_pci_dev_s *bma_pci_dev);
+void iounmap_bar_mem_v1(struct bma_pci_dev_s *bma_pci_dev);
+void iounmap_bar_mem_v2(struct bma_pci_dev_s *bma_pci_dev);
+int edma_host_check_dma_status_v1(enum dma_direction_e dir);
+int edma_host_check_dma_status_v2(enum dma_direction_e dir);
+int edma_host_dma_transfer_v1(struct edma_host_s *edma_host, struct bma_priv_data_s *priv,
+ struct bma_dma_transfer_s *dma_transfer);
+int edma_host_dma_transfer_v2(struct edma_host_s *edma_host, struct bma_priv_data_s *priv,
+ struct bma_dma_transfer_s *dma_transfer);
+void edma_host_reset_dma_v1(struct edma_host_s *edma_host, enum dma_direction_e dir);
+void edma_host_reset_dma_v2(struct edma_host_s *edma_host, enum dma_direction_e dir);
+
#endif
diff --git a/drivers/net/ethernet/huawei/bma/edma_drv/edma_drv.h b/drivers/net/ethernet/huawei/bma/edma_drv/edma_drv.h
new file mode 100644
index 000000000000..b0a09c022ba8
--- /dev/null
+++ b/drivers/net/ethernet/huawei/bma/edma_drv/edma_drv.h
@@ -0,0 +1,340 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/* Huawei iBMA driver.
+ * Copyright (c) 2025, Huawei Technologies Co., Ltd.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version 2
+ * of the License, or (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#ifndef EDMA_DRV_H
+#define EDMA_DRV_H
+
+#define DMA_STATISTICS_LEN 16
+#define DMA_CH_TAG_SIZE 64
+
+#define HISILICON_VENDOR_ID 0x19e5
+#define DMA_PCIE_DEVICE_ID 0xa122
+
+#define MAX_DMA_CHS 4 /* The current version supports a maximum of 2x2 channels. */
+#define DMA_CHS_EACH_PORT 2
+
+#define MAX_SQ_DEPTH 0xFFFF
+#define MAX_CQ_DEPTH 0xFFFF
+
+#define DMA_DONE_MASK 0x1
+#define DMA_DONE_UNMASK 0x0
+#define DMA_ERR_MASK 0x7FFFE
+#define DMA_ERR_UNMASK 0x0
+
+#define BD_SO 0
+#define BD_RO 1
+
+#define SIZE_4M 0x400000
+#define SIZE_16K 0x4000
+#define SIZE_64K 0x10000
+#define SIZE_OF_U64 0x8
+#define SPD_SIZE_MAX 32
+
+/* Use integer arithmetic for approximate computation instead of floating-point. */
+#define US_PER_SECOND_DIV_1KB (1000000 / 1024)
+
+#define DMA_PHY_STORE_OFFSET (SIZE_64K - SIZE_OF_U64)
+#define DMA_RMT_PHY_STORE_OFFSET (DMA_PHY_STORE_OFFSET - SIZE_OF_U64)
+#define BIT_0_TO_31_MASK 0xFFFFFFFF
+
+#define DMA_TMOUT (2 * HZ) /* 2 seconds */
+
+enum {
+ EP0 = 0,
+ EP1 = 1
+};
+
+enum {
+ DRC_LOCAL = 0,
+ DRC_REMOTE = 1
+};
+
+enum {
+ DIR_B2H = 0,
+ DIR_H2B = 1,
+};
+
+enum {
+ DMA_INIT = 0x0,
+ DMA_RESET = 0x1,
+ DMA_PAUSE = 0x2,
+ DMA_NOTIFY = 0x3,
+ LINKDOWN = 0x4,
+ LINKUP = 0x5,
+ FLR = 0x6
+};
+
+enum {
+ PF0 = 0,
+ PF1 = 1,
+ PF2 = 2,
+ PF4 = 4,
+ PF7 = 7,
+ PF10 = 10
+};
+
+enum {
+ RESERVED = 0x0, /* reserved */
+ SMALL_PACKET = 0x1, /* SmallPacket Descriptor */
+ DMA_READ = 0x2, /* Read Descriptor */
+ DMA_WRITE = 0x3, /* Write Descriptor */
+ DMA_LOOP = 0x4, /* Loop Descriptor */
+ DMA_MIX = 0x10, /* not available, User-defined for test */
+ DMA_WD_BARRIER = 0x11, /* not available, User-defined for test */
+ DMA_RD_BARRIER = 0x12, /* not available, User-defined for test */
+ DMA_LP_BARRIER = 0x13 /* not available, User-defined for test */
+};
+
+enum {
+ IDLE_STATE = 0x0, /* dma channel in idle status */
+ RUN_STATE = 0x1, /* dma channel in run status */
+ CPL_STATE = 0x2, /* dma channel in cpld status */
+ PAUSE_STATE = 0x3, /* dma channel in pause status */
+ HALT_STATE = 0x4, /* dma channel in halt status */
+ ABORT_STATE = 0x5, /* dma channel in abort status */
+ WAIT_STATE = 0x6 /* dma channel in wait status */
+};
+
+/* CQE status */
+enum {
+ DMA_DONE = 0x0, /* sqe done succ */
+ OPCODE_ERR = 0x1, /* sqe opcode invalid */
+ LEN_ERR = 0x2, /* sqe length invalid, only ocurs in smallpackt */
+ DROP_EN = 0x4, /* sqe drop happen */
+ WR_RMT_ERR = 0x8, /* write data to host fail */
+ RD_RMT_ERR = 0x10, /* read data from host fail */
+ RD_AXI_ERR = 0x20, /* read data/sqe from local fail */
+ WR_AXI_ERR = 0x40, /* write data/cqe to local fail */
+ POISON_CPL_ERR = 0x80, /* poison data */
+ SUB_SQ_ERR = 0x100, /* read sqe with CPL TLP */
+ DMA_CH_RESET = 0x200, /* dma channel should reset */
+ LINK_DOWN_ERR = 0x400, /* linkdown happen */
+ RECOVERY = 0x800 /* error status to be reset */
+};
+
+enum {
+ SDI_DMA_ADDR_SIZE_16K = 0,
+ SDI_DMA_ADDR_SIZE_32K = 1,
+ SDI_DMA_ADDR_SIZE_64K = 2,
+ SDI_DMA_ADDR_SIZE_128K = 3
+};
+
+union U_DMA_QUEUE_SQ_DEPTH {
+ struct {
+ unsigned int dma_queue_sq_depth : 16; /* [15..0] */
+ unsigned int reserved_0 : 16; /* [31..16] */
+ } bits;
+
+ unsigned int u32;
+};
+
+union U_DMA_QUEUE_CQ_DEPTH {
+ struct {
+ unsigned int dma_queue_cq_depth : 16; /* [15..0] */
+ unsigned int reserved_0 : 16; /* [31..16] */
+ } bits;
+
+ unsigned int u32;
+};
+
+union U_DMA_QUEUE_CQ_HEAD_PTR {
+ struct {
+ unsigned int dma_queue_cq_head_ptr : 16; /* [15..0] */
+ unsigned int reserved_0 : 16; /* [31..16] */
+ } bits;
+
+ unsigned int u32;
+};
+
+union U_DMA_QUEUE_CQ_TAIL_PTR {
+ struct {
+ unsigned int dma_queue_cq_tail_ptr : 16; /* [15..0] */
+ unsigned int dma_queue_sqhd : 16; /* [31..16] */
+ } bits;
+
+ unsigned int u32;
+};
+
+union U_DMA_QUEUE_SQ_TAIL_PTR {
+ struct {
+ unsigned int dma_queue_sq_tail_ptr : 16; /* [15..0] */
+ unsigned int reserved_0 : 16; /* [31..16] */
+ } bits;
+
+ unsigned int u32;
+};
+
+union U_DMA_QUEUE_CTRL0 {
+ struct {
+ unsigned int dma_queue_en : 1; /* [0] */
+ unsigned int dma_queue_icg_en : 1; /* [1] */
+ unsigned int reserved : 1; /* [2] */
+ unsigned int dma_rst_without_cq_ack_enable : 1; /* [3] */
+ unsigned int dma_queue_pause : 1; /* [4] */
+ unsigned int reserved_1 : 3; /* [7..5] */
+ unsigned int dma_queue_arb_weight : 8; /* [15..8] */
+ unsigned int reserved_2 : 3; /* [18...16] */
+ unsigned int dma_queue_cq_mrg_en : 1; /* [19] */
+ unsigned int dma_queue_cq_mrg_time : 2; /* [21..20] */
+ unsigned int dma_queue_local_err_done_int_en : 1; /* [22] */
+ unsigned int dma_queue_remote_err_done_int_en : 1; /* [23] */
+ unsigned int reserved_3 : 1; /* [24] */
+ unsigned int dma_queue_cq_full_disable : 1; /* [25] */
+ unsigned int dma_queue_cq_drct_sel : 1; /* [26] */
+ unsigned int dma_queue_sq_drct_sel : 1; /* [27] */
+ unsigned int dma_queue_sq_pa_lkp_err_abort_en : 1; /* [28] */
+ unsigned int dma_queue_sq_proc_err_abort_en : 1; /* [29] */
+ unsigned int dma_queue_sq_drop_err_abort_en : 1; /* [30] */
+ unsigned int dma_queue_sq_cfg_err_abort_en : 1; /* [31] */
+ } bits;
+
+ unsigned int u32;
+};
+
+union U_DMA_QUEUE_CTRL1 {
+ struct {
+ unsigned int dma_queue_reset : 1; /* [0] */
+ unsigned int dma_queue_abort_exit : 1; /* [1] */
+ unsigned int dma_va_enable : 1; /* [2] */
+ unsigned int reserved_0 : 1; /* [3] */
+ unsigned int dma_queue_port_num : 4; /* [7..4] */
+ unsigned int dma_queue_remote_msi_x_mask : 1; /* [8] */
+ unsigned int dma_va_enable_sq : 1; /* [9] */
+ unsigned int dma_va_enable_cq : 1; /* [10] */
+ unsigned int dma_queue_local_pfx_er : 1; /* [11] */
+ unsigned int dma_queue_local_pfx_pmr : 1; /* [12] */
+ unsigned int reserved_1 : 3; /* [15...13] */
+ unsigned int dma_queue_qos_en : 1; /* [16] */
+ unsigned int dma_queue_qos : 4; /* [20...17] */
+ unsigned int dma_queue_mpam_id : 11; /* [31..21] */
+ } bits;
+
+ unsigned int u32;
+};
+
+union U_DMA_QUEUE_FSM_STS {
+ struct {
+ unsigned int dma_queue_sts : 4; /* [3..0] */
+ unsigned int dma_queue_not_work : 1; /* [4] */
+ unsigned int dma_queue_wait_spd_data_sts : 1; /* [5] */
+ unsigned int reserved_0 : 1; /* [6] */
+ unsigned int reserved_1 : 1; /* [7] */
+ unsigned int dma_queue_sub_fsm_sts : 3; /* [10..8] */
+ unsigned int reserved_2 : 21; /* [31..11] */
+ } bits;
+
+ unsigned int u32;
+};
+
+union U_DMA_QUEUE_INT_STS {
+ struct {
+ unsigned int dma_queue_done_int_sts : 1; /* [0] */
+ unsigned int dma_queue_err00_int_sts : 1; /* [1] */
+ unsigned int dma_queue_err01_int_sts : 1; /* [2] */
+ unsigned int dma_queue_err02_int_sts : 1; /* [3] */
+ unsigned int dma_queue_err03_int_sts : 1; /* [4] */
+ unsigned int reserved : 1; /* [5] */
+ unsigned int dma_queue_err05_int_sts : 1; /* [6] */
+ unsigned int dma_queue_err06_int_sts : 1; /* [7] */
+ unsigned int dma_queue_err07_int_sts : 1; /* [8] */
+ unsigned int dma_queue_err08_int_sts : 1; /* [9] */
+ unsigned int dma_queue_err09_int_sts : 1; /* [10] */
+ unsigned int dma_queue_err10_int_sts : 1; /* [11] */
+ unsigned int dma_queue_err11_int_sts : 1; /* [12] */
+ unsigned int dma_queue_err12_int_sts : 1; /* [13] */
+ unsigned int dma_queue_err13_int_sts : 1; /* [14] */
+ unsigned int dma_queue_err14_int_sts : 1; /* [15] */
+ unsigned int dma_queue_err15_int_sts : 1; /* [16] */
+ unsigned int dma_queue_err16_int_sts : 1; /* [17] */
+ unsigned int dma_queue_err17_int_sts : 1; /* [18] */
+ unsigned int reserved_0 : 13; /* [31..19] */
+ } bits;
+
+ unsigned int u32;
+};
+
+union U_DMA_QUEUE_INT_MSK {
+ struct {
+ unsigned int dma_queue_done_int_msk : 1; /* [0] */
+ unsigned int dma_queue_err00_int_msk : 1; /* [1] */
+ unsigned int dma_queue_err01_int_msk : 1; /* [2] */
+ unsigned int dma_queue_err02_int_msk : 1; /* [3] */
+ unsigned int dma_queue_err03_int_msk : 1; /* [4] */
+ unsigned int reserved : 1; /* [5] */
+ unsigned int dma_queue_err05_int_msk : 1; /* [6] */
+ unsigned int dma_queue_err06_int_msk : 1; /* [7] */
+ unsigned int dma_queue_err07_int_msk : 1; /* [8] */
+ unsigned int dma_queue_err08_int_msk : 1; /* [9] */
+ unsigned int dma_queue_err09_int_msk : 1; /* [10] */
+ unsigned int dma_queue_err10_int_msk : 1; /* [11] */
+ unsigned int dma_queue_err11_int_msk : 1; /* [12] */
+ unsigned int dma_queue_err12_int_msk : 1; /* [13] */
+ unsigned int dma_queue_err13_int_msk : 1; /* [14] */
+ unsigned int dma_queue_err14_int_msk : 1; /* [15] */
+ unsigned int dma_queue_err15_int_msk : 1; /* [16] */
+ unsigned int dma_queue_err16_int_msk : 1; /* [17] */
+ unsigned int dma_queue_err17_int_msk : 1; /* [18] */
+ unsigned int reserved_0 : 13 ; /* [31..19] */
+ } bits;
+
+ unsigned int u32;
+};
+
+struct dma_ch_sq_s {
+ u32 opcode : 4; /* [0~3] opcode */
+ u32 drop : 1; /* [4] drop */
+ u32 nw : 1; /* [5] nw */
+ u32 wd_barrier : 1; /* [6] write done barrier */
+ u32 rd_barrier : 1; /* [7] read done barrier */
+ u32 ldie : 1; /* [8] LDIE */
+ u32 rdie : 1; /* [9] rDIE */
+ u32 loop_barrier : 1; /* [10] */
+ u32 spd_barrier : 1; /* [11] */
+ u32 attr : 3; /* [12~14] attr */
+ u32 cq_disable : 1; /* [15] reserved */
+ u32 addrt : 2; /* [16~17] at */
+ u32 p3p4 : 2; /* [18~19] P3 P4 */
+ u32 pf : 3; /* [20~22] pf */
+ u32 vfen : 1; /* [23] vfen */
+ u32 vf : 8; /* [24~31] vf */
+ u32 pasid : 20; /* [0~19] pasid */
+ u32 er : 1; /* [20] er */
+ u32 pmr : 1; /* [21] pmr */
+ u32 prfen : 1; /* [22] prfen */
+ u32 reserved5 : 1; /* [23] reserved */
+ u32 msi : 8; /* [24~31] MSI/MSI-X vector */
+ u32 flow_id : 8; /* [0~7] Flow ID */
+ u32 reserved6 : 8; /* [8~15] reserved */
+ u32 TH : 1; /* [16] TH */
+ u32 PH : 2; /* [17~18] PH */
+ u32 reserved7 : 13; /* [19~31] reserved: some multiplex fields */
+ u32 length;
+ u32 src_addr_l;
+ u32 src_addr_h;
+ u32 dst_addr_l;
+ u32 dst_addr_h;
+};
+
+struct dma_ch_cq_s {
+ u32 reserved1;
+ u32 reserved2;
+ u32 sqhd : 16;
+ u32 reserved3 : 16;
+ u32 reserved4 : 16; /* [0~15] reserved */
+ u32 vld : 1; /* [16] vld */
+ u32 status : 15; /* [17~31] status */
+};
+
+#endif /* EDMA_DRV_H */
diff --git a/drivers/net/ethernet/huawei/bma/edma_drv/edma_host.c b/drivers/net/ethernet/huawei/bma/edma_drv/edma_host.c
index be2f732ed9ed..1bfb123e43c0 100644
--- a/drivers/net/ethernet/huawei/bma/edma_drv/edma_host.c
+++ b/drivers/net/ethernet/huawei/bma/edma_drv/edma_host.c
@@ -20,11 +20,18 @@
#include <linux/seq_file.h>
#include "bma_pci.h"
+#include "edma_queue.h"
#include "edma_host.h"
static struct edma_user_inft_s *g_user_func[TYPE_MAX] = { 0 };
static struct bma_dev_s *g_bma_dev;
+
+struct bma_dev_s *get_bma_dev(void)
+{
+ return g_bma_dev;
+}
+
static int edma_host_dma_interrupt(struct edma_host_s *edma_host);
int edmainfo_show(char *buf)
@@ -231,7 +238,8 @@ void clear_int_dmab2h(struct edma_host_s *edma_host)
(void)pci_write_config_dword(pdev, REG_PCIE1_DMAWRITEINT_CLEAR, data);
}
-int edma_host_check_dma_status(enum dma_direction_e dir)
+// for 1710 1711
+int edma_host_check_dma_status_v1(enum dma_direction_e dir)
{
int ret = 0;
@@ -259,6 +267,18 @@ int edma_host_check_dma_status(enum dma_direction_e dir)
return ret;
}
+// for 1712
+int edma_host_check_dma_status_v2(enum dma_direction_e dir)
+{
+ UNUSED(dir);
+ if (check_dma_queue_state(CPL_STATE, TRUE) == 0 ||
+ check_dma_queue_state(IDLE_STATE, TRUE) == 0) {
+ return 1; /* ok */
+ }
+
+ return 0; /* busy */
+}
+
#ifdef USE_DMA
static int start_transfer_h2b(struct edma_host_s *edma_host, unsigned int len,
@@ -633,9 +653,9 @@ void host_dma_transfer_withlist(struct edma_host_s *edma_host,
}
}
-int edma_host_dma_transfer(struct edma_host_s *edma_host,
- struct bma_priv_data_s *priv,
- struct bma_dma_transfer_s *dma_transfer)
+// for 1710 1711
+int edma_host_dma_transfer_v1(struct edma_host_s *edma_host, struct bma_priv_data_s *priv,
+ struct bma_dma_transfer_s *dma_transfer)
{
int ret = 0;
unsigned long flags = 0;
@@ -673,7 +693,44 @@ int edma_host_dma_transfer(struct edma_host_s *edma_host,
return ret;
}
-void edma_host_reset_dma(struct edma_host_s *edma_host, int dir)
+// for 1712
+int edma_host_dma_transfer_v2(struct edma_host_s *edma_host, struct bma_priv_data_s *priv,
+ struct bma_dma_transfer_s *dma_transfer)
+{
+ int ret = 0;
+ unsigned long flags = 0;
+ struct bma_dev_s *bma_dev = NULL;
+
+ BMA_LOG(DLOG_DEBUG, "edma_host_dma_transfer 1712");
+
+ if (!edma_host || !priv || !dma_transfer)
+ return -EFAULT;
+
+ bma_dev = list_entry(edma_host, struct bma_dev_s, edma_host);
+
+ spin_lock_irqsave(&bma_dev->priv_list_lock, flags);
+
+ if (priv->user.dma_transfer == 0) {
+ spin_unlock_irqrestore(&bma_dev->priv_list_lock, flags);
+ BMA_LOG(DLOG_ERROR, "dma_transfer = %hhd\n", priv->user.dma_transfer);
+ return -EFAULT;
+ }
+
+ BMA_LOG(DLOG_DEBUG, "transfer_edma_host 1712");
+
+ spin_unlock_irqrestore(&bma_dev->priv_list_lock, flags);
+
+ edma_host->statistics.dma_count++;
+
+ spin_lock_irqsave(&edma_host->reg_lock, flags);
+ ret = transfer_dma_queue(dma_transfer);
+ spin_unlock_irqrestore(&edma_host->reg_lock, flags);
+
+ return ret;
+}
+
+// for 1710/1711
+void edma_host_reset_dma_v1(struct edma_host_s *edma_host, enum dma_direction_e dir)
{
u32 data = 0;
u32 reg_addr = 0;
@@ -717,6 +774,13 @@ void edma_host_reset_dma(struct edma_host_s *edma_host, int dir)
reg_addr, count, data);
}
+// for 1712
+void edma_host_reset_dma_v2(struct edma_host_s *edma_host, enum dma_direction_e dir)
+{
+ UNUSED(dir);
+ reset_edma_host(edma_host);
+}
+
int edma_host_dma_stop(struct edma_host_s *edma_host,
struct bma_priv_data_s *priv)
{
@@ -750,8 +814,8 @@ static int edma_host_send_msg(struct edma_host_s *edma_host)
if (send_mbx_hdr->mbxlen > 0) {
if (send_mbx_hdr->mbxlen > HOST_MAX_SEND_MBX_LEN) {
/*share memory is disable */
+ BMA_LOG(DLOG_DEBUG, "mbxlen is too long: %d\n", send_mbx_hdr->mbxlen);
send_mbx_hdr->mbxlen = 0;
- BMA_LOG(DLOG_DEBUG, "mbxlen is too long\n");
return -EFAULT;
}
@@ -1296,6 +1360,69 @@ int edma_host_user_unregister(u32 type)
return 0;
}
+static void init_edma_sq_cq(struct edma_host_s *edma_host)
+{
+ u64 sq_phy_addr = 0;
+ u64 cq_phy_addr = 0;
+ phys_addr_t edma_address = 0;
+ int ret = 0;
+
+ if (get_pci_type() != PCI_TYPE_1712)
+ return;
+
+ ret = bma_intf_get_map_address(TYPE_EDMA_ADDR, &edma_address);
+ if (ret != 0)
+ return;
+
+ edma_host->edma_sq_addr = (void *)((unsigned char *)edma_host->edma_recv_addr
+ + HOST_MAX_RCV_MBX_LEN);
+ edma_host->edma_cq_addr = (void *)((unsigned char *)edma_host->edma_sq_addr
+ + sizeof(struct dma_ch_sq_s) * SQ_DEPTH);
+ sq_phy_addr = edma_address + HOST_DMA_FLAG_LEN + HOST_MAX_SEND_MBX_LEN
+ + HOST_MAX_RCV_MBX_LEN;
+ cq_phy_addr = sq_phy_addr + sizeof(struct dma_ch_sq_s) * SQ_DEPTH;
+
+ BMA_LOG(DLOG_DEBUG,
+ "sq_phy_addr = 0x%llx, SQ size = %zu, cq_phy_addr = 0x%llx, CQ size = %zu",
+ sq_phy_addr, sizeof(struct dma_ch_sq_s) * SQ_DEPTH,
+ cq_phy_addr, sizeof(struct dma_ch_cq_s) * CQ_DEPTH);
+ BMA_LOG(DLOG_DEBUG, "sq_addr = %pK, cq_addr = %pK", edma_host->edma_sq_addr,
+ edma_host->edma_cq_addr);
+
+ (void)memset(edma_host->edma_sq_addr, 0,
+ sizeof(struct dma_ch_sq_s) * SQ_DEPTH + sizeof(struct dma_ch_cq_s) * CQ_DEPTH);
+
+ set_dma_queue_sq_base_l(sq_phy_addr & PCIE_ADDR_L_32_MASK);
+ set_dma_queue_sq_base_h((u32)(sq_phy_addr >> PCIE_ADDR_H_SHIFT_32));
+ set_dma_queue_cq_base_l(cq_phy_addr & PCIE_ADDR_L_32_MASK);
+ set_dma_queue_cq_base_h((u32)(cq_phy_addr >> PCIE_ADDR_H_SHIFT_32));
+
+ reset_edma_host(edma_host);
+}
+
+static void edma_setup_timer(struct edma_host_s *edma_host)
+{
+#ifdef HAVE_TIMER_SETUP
+ timer_setup(&edma_host->timer, edma_host_timeout, 0);
+#else
+ setup_timer(&edma_host->timer, edma_host_timeout,
+ (unsigned long)edma_host);
+#endif
+ (void)mod_timer(&edma_host->timer, jiffies_64 + TIMER_INTERVAL_CHECK);
+
+#ifdef USE_DMA
+ #ifdef HAVE_TIMER_SETUP
+ timer_setup(&edma_host->dma_timer, edma_host_dma_timeout, 0);
+
+ #else
+ setup_timer(&edma_host->dma_timer, edma_host_dma_timeout,
+ (unsigned long)edma_host);
+ #endif
+ (void)mod_timer(&edma_host->dma_timer,
+ jiffies_64 + DMA_TIMER_INTERVAL_CHECK);
+#endif
+}
+
int edma_host_init(struct edma_host_s *edma_host)
{
int ret = 0;
@@ -1352,24 +1479,7 @@ int edma_host_init(struct edma_host_s *edma_host)
edma_host->b2h_state = B2HSTATE_IDLE;
#ifdef EDMA_TIMER
- #ifdef HAVE_TIMER_SETUP
- timer_setup(&edma_host->timer, edma_host_timeout, 0);
- #else
- setup_timer(&edma_host->timer, edma_host_timeout,
- (unsigned long)edma_host);
- #endif
- (void)mod_timer(&edma_host->timer, jiffies_64 + TIMER_INTERVAL_CHECK);
-#ifdef USE_DMA
- #ifdef HAVE_TIMER_SETUP
- timer_setup(&edma_host->dma_timer, edma_host_dma_timeout, 0);
-
- #else
- setup_timer(&edma_host->dma_timer, edma_host_dma_timeout,
- (unsigned long)edma_host);
- #endif
- (void)mod_timer(&edma_host->dma_timer,
- jiffies_64 + DMA_TIMER_INTERVAL_CHECK);
-#endif
+ edma_setup_timer(edma_host);
#else
init_completion(&edma_host->msg_ready);
@@ -1383,6 +1493,8 @@ int edma_host_init(struct edma_host_s *edma_host)
}
#endif
+ init_edma_sq_cq(edma_host);
+
#ifdef HAVE_TIMER_SETUP
timer_setup(&edma_host->heartbeat_timer,
edma_host_heartbeat_timer, 0);
diff --git a/drivers/net/ethernet/huawei/bma/edma_drv/edma_host.h b/drivers/net/ethernet/huawei/bma/edma_drv/edma_host.h
index cbbd86fd6602..93c81bc92286 100644
--- a/drivers/net/ethernet/huawei/bma/edma_drv/edma_host.h
+++ b/drivers/net/ethernet/huawei/bma/edma_drv/edma_host.h
@@ -18,6 +18,8 @@
#include "bma_include.h"
#include "../include/bma_ker_intf.h"
+#include "edma_reg.h"
+#include "edma_drv.h"
#define EDMA_TIMER
@@ -176,6 +178,13 @@
#define U64ADDR_H(addr) ((((u64)addr) >> 32) & 0xffffffff)
#define U64ADDR_L(addr) ((addr) & 0xffffffff)
+#define MAX_RESET_DMA_TIMES 10
+#define DELAY_BETWEEN_RESET_DMA 100
+#define PCI_VENDOR_ID_HUAWEI_PME 0x19e5
+#define PCI_DEVICE_ID_EDMA_0 0x1712
+#define SQ_DEPTH 128
+#define CQ_DEPTH 128
+
struct bma_register_dev_type_s {
u32 type;
u32 sub_type;
@@ -263,6 +272,8 @@ struct edma_host_s {
void __iomem *edma_flag;
void __iomem *edma_send_addr;
void __iomem *edma_recv_addr;
+ void __iomem *edma_sq_addr;
+ void __iomem *edma_cq_addr;
#ifdef USE_DMA
struct timer_list dma_timer;
#endif
@@ -309,6 +320,8 @@ struct edma_user_inft_s {
int (*add_msg)(void *msg, size_t msg_len);
};
+struct bma_dev_s *get_bma_dev(void);
+
int is_edma_b2h_int(struct edma_host_s *edma_host);
void edma_int_to_bmc(struct edma_host_s *edma_host);
int edma_host_mmap(struct edma_host_s *edma_hos, struct file *filp,
@@ -336,7 +349,6 @@ int edma_host_user_unregister(u32 type);
int edma_host_init(struct edma_host_s *edma_host);
void edma_host_cleanup(struct edma_host_s *edma_host);
int edma_host_send_driver_msg(const void *msg, size_t msg_len, int subtype);
-void edma_host_reset_dma(struct edma_host_s *edma_host, int dir);
void clear_int_dmah2b(struct edma_host_s *edma_host);
void clear_int_dmab2h(struct edma_host_s *edma_host);
diff --git a/drivers/net/ethernet/huawei/bma/edma_drv/edma_queue.c b/drivers/net/ethernet/huawei/bma/edma_drv/edma_queue.c
new file mode 100644
index 000000000000..678262f7412c
--- /dev/null
+++ b/drivers/net/ethernet/huawei/bma/edma_drv/edma_queue.c
@@ -0,0 +1,470 @@
+// SPDX-License-Identifier: GPL-2.0
+/* Huawei iBMA driver.
+ * Copyright (c) 2025, Huawei Technologies Co., Ltd.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version 2
+ * of the License, or (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#include <linux/errno.h>
+#include <linux/kthread.h>
+#include <linux/mm.h>
+#include <linux/proc_fs.h>
+#include <linux/seq_file.h>
+
+#include "bma_pci.h"
+#include "edma_host.h"
+#include "edma_queue.h"
+
+static u32 pcie_dma_read(u32 offset)
+{
+ u32 reg_val;
+
+ reg_val = readl(get_bma_dev()->bma_pci_dev->bma_base_addr + offset);
+ BMA_LOG(DLOG_DEBUG, "readl, offset 0x%x val 0x%x\n", offset, reg_val);
+ return reg_val;
+}
+
+static void pcie_dma_write(u32 offset, u32 reg_val)
+{
+ u32 read_val;
+
+ (void)writel(reg_val, get_bma_dev()->bma_pci_dev->bma_base_addr + offset);
+ read_val = readl(get_bma_dev()->bma_pci_dev->bma_base_addr + offset);
+ if (read_val != reg_val) {
+ BMA_LOG(DLOG_DEBUG,
+ "writel fail, read_value: 0x%x, set_value: 0x%x, offset: 0x%x\n",
+ read_val, reg_val, offset);
+ return;
+ }
+ BMA_LOG(DLOG_DEBUG, "writel, offset 0x%x val 0x%x\n", offset, reg_val);
+}
+
+static void set_dma_queue_int_msk(u32 val)
+{
+ (void)pcie_dma_write(PCIE_DMA_QUEUE_INT_MSK_0_REG, val);
+}
+
+static void set_dma_queue_err_int_msk(u32 val)
+{
+ union U_DMA_QUEUE_INT_MSK reg_val;
+
+ // The least significant bit (bit 0) of this register is reserved and must be cleared,
+ // while the remaining bits should retain their original values.
+ reg_val.u32 = val & 0xFFFFFFFE;
+ (void)pcie_dma_write(PCIE_DMA_QUEUE_ERR_INT_MSK_0_REG, reg_val.u32);
+}
+
+static void set_dma_queue_int_sts(u32 val)
+{
+ union U_DMA_QUEUE_INT_STS reg_val;
+
+ reg_val.u32 = val;
+ (void)pcie_dma_write(PCIE_DMA_QUEUE_INT_STS_0_REG, reg_val.u32);
+}
+
+static void get_dma_queue_int_sts(u32 *val)
+{
+ union U_DMA_QUEUE_INT_STS reg_val;
+
+ reg_val.u32 = pcie_dma_read(PCIE_DMA_QUEUE_INT_STS_0_REG);
+ *val = reg_val.u32;
+}
+
+static void get_dma_queue_fsm_sts(u32 *val)
+{
+ union U_DMA_QUEUE_FSM_STS reg_val;
+
+ reg_val.u32 = pcie_dma_read(PCIE_DMA_QUEUE_FSM_STS_0_REG);
+ *val = reg_val.bits.dma_queue_sts;
+}
+
+static void pause_dma_queue(u32 val)
+{
+ union U_DMA_QUEUE_CTRL0 reg_val;
+
+ reg_val.u32 = pcie_dma_read(PCIE_DMA_QUEUE_CTRL0_0_REG);
+ reg_val.bits.dma_queue_pause = val;
+ (void)pcie_dma_write(PCIE_DMA_QUEUE_CTRL0_0_REG, reg_val.u32);
+}
+
+static void enable_dma_queue(u32 val)
+{
+ union U_DMA_QUEUE_CTRL0 reg_val;
+
+ reg_val.u32 = pcie_dma_read(PCIE_DMA_QUEUE_CTRL0_0_REG);
+ reg_val.bits.dma_queue_en = val;
+ (void)pcie_dma_write(PCIE_DMA_QUEUE_CTRL0_0_REG, reg_val.u32);
+}
+
+static void reset_dma_queue(u32 val)
+{
+ union U_DMA_QUEUE_CTRL1 reg_val;
+
+ reg_val.u32 = pcie_dma_read(PCIE_DMA_QUEUE_CTRL1_0_REG);
+ reg_val.bits.dma_queue_reset = val;
+ (void)pcie_dma_write(PCIE_DMA_QUEUE_CTRL1_0_REG, reg_val.u32);
+}
+
+static void set_dma_queue_sq_tail(u32 val)
+{
+ union U_DMA_QUEUE_SQ_TAIL_PTR reg_val;
+
+ reg_val.bits.dma_queue_sq_tail_ptr = val;
+ (void)pcie_dma_write(PCIE_DMA_QUEUE_SQ_TAIL_PTR_0_REG, reg_val.u32);
+}
+
+static void set_dma_queue_cq_head(u32 val)
+{
+ union U_DMA_QUEUE_CQ_HEAD_PTR reg_val;
+
+ reg_val.bits.dma_queue_cq_head_ptr = val;
+ (void)pcie_dma_write(PCIE_DMA_QUEUE_CQ_HEAD_PTR_0_REG, reg_val.u32);
+}
+
+void set_dma_queue_sq_base_l(u32 val)
+{
+ (void)pcie_dma_write(PCIE_DMA_QUEUE_SQ_BASE_L_0_REG, val);
+}
+
+void set_dma_queue_sq_base_h(u32 val)
+{
+ (void)pcie_dma_write(PCIE_DMA_QUEUE_SQ_BASE_H_0_REG, val);
+}
+
+void set_dma_queue_cq_base_l(u32 val)
+{
+ (void)pcie_dma_write(PCIE_DMA_QUEUE_CQ_BASE_L_0_REG, val);
+}
+
+void set_dma_queue_cq_base_h(u32 val)
+{
+ (void)pcie_dma_write(PCIE_DMA_QUEUE_CQ_BASE_H_0_REG, val);
+}
+
+static void set_dma_queue_sq_depth(u32 val)
+{
+ union U_DMA_QUEUE_SQ_DEPTH reg_val;
+
+ reg_val.u32 = pcie_dma_read(PCIE_DMA_QUEUE_SQ_DEPTH_0_REG);
+ reg_val.bits.dma_queue_sq_depth = val;
+ (void)pcie_dma_write(PCIE_DMA_QUEUE_SQ_DEPTH_0_REG, reg_val.u32);
+}
+
+static void set_dma_queue_cq_depth(u32 val)
+{
+ union U_DMA_QUEUE_CQ_DEPTH reg_val;
+
+ reg_val.u32 = pcie_dma_read(PCIE_DMA_QUEUE_CQ_DEPTH_0_REG);
+ reg_val.bits.dma_queue_cq_depth = val;
+ (void)pcie_dma_write(PCIE_DMA_QUEUE_CQ_DEPTH_0_REG, reg_val.u32);
+}
+
+static void set_dma_queue_arb_weight(u32 val)
+{
+ union U_DMA_QUEUE_CTRL0 reg_val;
+
+ reg_val.u32 = pcie_dma_read(PCIE_DMA_QUEUE_CTRL0_0_REG);
+ reg_val.bits.dma_queue_arb_weight = val;
+ (void)pcie_dma_write(PCIE_DMA_QUEUE_CTRL0_0_REG, reg_val.u32);
+}
+
+static void set_dma_queue_drct_sel(u32 val)
+{
+ union U_DMA_QUEUE_CTRL0 reg_val;
+
+ reg_val.u32 = pcie_dma_read(PCIE_DMA_QUEUE_CTRL0_0_REG);
+ reg_val.bits.dma_queue_cq_drct_sel = val;
+ reg_val.bits.dma_queue_sq_drct_sel = val;
+ (void)pcie_dma_write(PCIE_DMA_QUEUE_CTRL0_0_REG, reg_val.u32);
+}
+
+static void get_dma_queue_sq_tail(u32 *val)
+{
+ union U_DMA_QUEUE_SQ_TAIL_PTR reg_val;
+
+ reg_val.u32 = pcie_dma_read(PCIE_DMA_QUEUE_SQ_TAIL_PTR_0_REG);
+ *val = reg_val.bits.dma_queue_sq_tail_ptr;
+}
+
+static void get_dma_queue_cq_tail(u32 *val)
+{
+ union U_DMA_QUEUE_CQ_TAIL_PTR reg_val;
+
+ reg_val.u32 = pcie_dma_read(PCIE_DMA_QUEUE_CQ_TAIL_PTR_0_REG);
+ *val = reg_val.bits.dma_queue_cq_tail_ptr;
+}
+
+static void get_dma_queue_sq_head(u32 *val)
+{
+ u32 reg_val;
+
+ reg_val = pcie_dma_read(PCIE_DMA_QUEUE_SQ_STS_0_REG);
+ /* dma_queue_sq_head_ptr bit[15:0] */
+ *val = reg_val & 0xFFFF;
+}
+
+static void set_dma_queue_err_abort(u32 val)
+{
+ union U_DMA_QUEUE_CTRL0 reg_val;
+
+ reg_val.u32 = pcie_dma_read(PCIE_DMA_QUEUE_CTRL0_0_REG);
+ reg_val.bits.dma_queue_sq_pa_lkp_err_abort_en = val;
+ reg_val.bits.dma_queue_sq_proc_err_abort_en = val;
+ reg_val.bits.dma_queue_sq_drop_err_abort_en = val;
+ reg_val.bits.dma_queue_sq_cfg_err_abort_en = val;
+ (void)pcie_dma_write(PCIE_DMA_QUEUE_CTRL0_0_REG, reg_val.u32);
+}
+
+static void set_dma_queue_flr_disable(u32 val)
+{
+ (void)pcie_dma_write(PCIE_DMA_FLR_DISABLE_REG, val);
+}
+
+static void clear_dma_queue_int_chk(u32 mask)
+{
+ u32 int_sts;
+
+ (void)get_dma_queue_int_sts(&int_sts);
+ if (int_sts & mask)
+ (void)set_dma_queue_int_sts(mask);
+}
+
+s32 check_dma_queue_state(u32 state, u32 flag)
+{
+ u32 dma_state = 0;
+ unsigned long timeout;
+
+ BMA_LOG(DLOG_DEBUG, "state:%u, flag:%u\n", state, flag);
+
+ timeout = jiffies + TIMER_INTERVAL_CHECK;
+
+ while (1) {
+ get_dma_queue_fsm_sts(&dma_state);
+ BMA_LOG(DLOG_DEBUG, "DMA stats[%u]\n", dma_state);
+ // Flag is 0 and state does not equal to target value
+ // OR Flag is 1 and state is equal to target value
+ if ((!flag && dma_state != state) || (flag && dma_state == state))
+ break;
+
+ if (time_after(jiffies, timeout)) {
+ BMA_LOG(DLOG_DEBUG, "Wait stats[%u] fail\n", state);
+ return -ETIMEDOUT;
+ }
+ udelay(1);
+ }
+ return 0;
+}
+
+static s32 reset_dma(void)
+{
+ u32 dma_state = 0;
+
+ /* get dma channel fsm */
+ check_dma_queue_state(WAIT_STATE, FALSE);
+ get_dma_queue_fsm_sts(&dma_state);
+ BMA_LOG(DLOG_DEBUG, "dma_state:%u\n", dma_state);
+ switch (dma_state) {
+ /* idle status, dma channel need no reset */
+ case IDLE_STATE:
+ return 0;
+ case RUN_STATE:
+ pause_dma_queue(ENABLE);
+ fallthrough;
+ case ABORT_STATE:
+ case CPL_STATE:
+ enable_dma_queue(DISABLE);
+ if (check_dma_queue_state(RUN_STATE, FALSE))
+ return -ETIMEDOUT;
+ fallthrough;
+ case PAUSE_STATE:
+ case HALT_STATE:
+ set_dma_queue_sq_tail(0);
+ set_dma_queue_cq_head(0);
+ reset_dma_queue(ENABLE);
+ pause_dma_queue(DISABLE);
+ if (check_dma_queue_state(IDLE_STATE, TRUE))
+ return -ETIMEDOUT;
+ fallthrough;
+ default:
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+static void init_dma(void)
+{
+ /* set dma channel sq tail */
+ set_dma_queue_sq_tail(0);
+ /* set dma channel cq head */
+ set_dma_queue_cq_head(0);
+ /* set dma queue drct sel */
+ set_dma_queue_drct_sel(DRC_LOCAL);
+ /* set dma channel sq depth */
+ set_dma_queue_sq_depth(SQ_DEPTH - 1);
+ /* set dma channel cq depth */
+ set_dma_queue_cq_depth(CQ_DEPTH - 1);
+ /* dma not process FLR , only cpu process FLR */
+ set_dma_queue_flr_disable(0x1);
+ /* set dma queue arb weight */
+ set_dma_queue_arb_weight(0x1F);
+ /* clear dma queue int status */
+ set_dma_queue_int_sts(0x1FFF);
+ /* set dma queue int mask */
+ set_dma_queue_err_int_msk(0x0);
+ set_dma_queue_int_msk(0x0);
+ /* set dma queue abort err en */
+ set_dma_queue_err_abort(ENABLE);
+ /* enable dma channel en */
+ enable_dma_queue(ENABLE);
+}
+
+s32 wait_done_dma_queue(unsigned long timeout)
+{
+ struct dma_ch_cq_s *p_cur_last_cq;
+ struct dma_ch_cq_s *p_dma_cq;
+ unsigned long end;
+ u32 sq_tail;
+ u32 sq_valid;
+ u32 cq_tail;
+ u32 cq_valid;
+
+ p_dma_cq = (struct dma_ch_cq_s *)((&get_bma_dev()->edma_host)->edma_cq_addr);
+ end = jiffies + timeout;
+
+ while (time_before(jiffies, end)) {
+ (void)get_dma_queue_sq_tail(&sq_tail);
+ (void)get_dma_queue_cq_tail(&cq_tail);
+
+ cq_valid = (cq_tail + CQ_DEPTH - 1) % (CQ_DEPTH);
+ p_cur_last_cq = &p_dma_cq[cq_valid];
+ sq_valid = (sq_tail + SQ_DEPTH - 1) % (SQ_DEPTH);
+ BMA_LOG(DLOG_DEBUG,
+ "sq_tail %d, cq_tail %d, cq_valid %d, sq_valid %d, p_cur_last_cq->sqhd %d\n",
+ sq_tail, cq_tail, cq_valid, sq_valid, p_cur_last_cq->sqhd);
+ if (p_cur_last_cq->sqhd == sq_valid) {
+ set_dma_queue_cq_head(cq_valid);
+ return 0;
+ }
+ }
+
+ return -ETIMEDOUT;
+}
+
+static s32 submit_dma_queue_sq(u32 dir, struct bspveth_dmal pdmalbase_v, u32 pf)
+{
+ u32 sq_tail;
+ u32 sq_head;
+ u32 sq_availble;
+ struct dma_ch_sq_s sq_submit;
+ struct dma_ch_sq_s *p_dma_sq;
+
+ p_dma_sq = (struct dma_ch_sq_s *)((&get_bma_dev()->edma_host)->edma_sq_addr);
+ (void)get_dma_queue_sq_tail(&sq_tail);
+ (void)get_dma_queue_sq_head(&sq_head);
+ sq_availble = SQ_DEPTH - 1 - (((sq_tail - sq_head) + SQ_DEPTH) % SQ_DEPTH);
+ if (sq_availble < 1) {
+ BMA_LOG(DLOG_DEBUG, "cannot process %u descriptors, try again later\n", 1);
+ return -1;
+ }
+
+ BMA_LOG(DLOG_DEBUG, "submit dma queue sq, sq_tail get %d, sq_head %d, sq_availble %d\n",
+ sq_tail, sq_head, sq_availble);
+
+ (void)memset(&sq_submit, 0, sizeof(sq_submit));
+ if (dir == DIR_H2B)
+ sq_submit.opcode = DMA_READ;
+ else
+ sq_submit.opcode = DMA_WRITE;
+
+ BMA_LOG(DLOG_DEBUG, "PF: %u\n", pf);
+ sq_submit.ldie = ENABLE;
+ sq_submit.rdie = ENABLE;
+ sq_submit.attr &= (~0x2); /* SO(Strong Ordering) */
+ sq_submit.pf = pf & 0x7; /* 0x7 */
+ sq_submit.p3p4 = (pf >> 3) & 0x3; /* 0x3 */
+ sq_submit.length = pdmalbase_v.len;
+ sq_submit.src_addr_l = pdmalbase_v.slow;
+ sq_submit.src_addr_h = pdmalbase_v.shi;
+ sq_submit.dst_addr_l = pdmalbase_v.dlow;
+ sq_submit.dst_addr_h = pdmalbase_v.dhi;
+
+ BMA_LOG(DLOG_DEBUG, "submit dma queue sq, dir %d, op %d, length %d\n", dir,
+ sq_submit.opcode, sq_submit.length);
+
+ memcpy(p_dma_sq + sq_tail, &sq_submit, sizeof(sq_submit));
+ sq_tail = (sq_tail + 1) % SQ_DEPTH;
+
+ BMA_LOG(DLOG_DEBUG, "submit dma queue sq, sq_tail change %d,\n", sq_tail);
+ wmb(); /* memory barriers. */
+
+ (void)set_dma_queue_sq_tail(sq_tail);
+
+ return 0;
+}
+
+s32 transfer_dma_queue(struct bma_dma_transfer_s *dma_transfer)
+{
+ struct bspveth_dmal *pdmalbase_v;
+ u32 dmal_cnt;
+ s32 ret;
+ int i;
+
+ if (!dma_transfer) {
+ BMA_LOG(DLOG_DEBUG, "dma_transfer is NULL.\n");
+ return -EFAULT;
+ }
+
+ BMA_LOG(DLOG_DEBUG, "transfer dma queue.\n");
+
+ /* clear local done int */
+ clear_dma_queue_int_chk(DMA_DONE_MASK);
+
+ pdmalbase_v = dma_transfer->pdmalbase_v;
+ dmal_cnt = dma_transfer->dmal_cnt;
+ for (i = 0; i < dmal_cnt; i++)
+ submit_dma_queue_sq(dma_transfer->dir, pdmalbase_v[i],
+ get_bma_dev()->bma_pci_dev->pdev->devfn);
+
+ (void)set_dma_queue_int_msk(DMA_DONE_UNMASK);
+ (void)set_dma_queue_err_int_msk(DMA_ERR_UNMASK);
+ (void)enable_dma_queue(ENABLE);
+
+ ret = wait_done_dma_queue(DMA_TMOUT);
+ if (ret)
+ BMA_LOG(DLOG_DEBUG, "EP DMA: dma wait timeout");
+
+ return ret;
+}
+
+void reset_edma_host(struct edma_host_s *edma_host)
+{
+ unsigned long flags = 0;
+ int count = 0;
+
+ if (!edma_host)
+ return;
+
+ spin_lock_irqsave(&edma_host->reg_lock, flags);
+
+ while (count++ < MAX_RESET_DMA_TIMES) {
+ if (reset_dma() == 0) {
+ BMA_LOG(DLOG_DEBUG, "reset dma successfully\n");
+ init_dma();
+ break;
+ }
+
+ mdelay(DELAY_BETWEEN_RESET_DMA);
+ }
+
+ spin_unlock_irqrestore(&edma_host->reg_lock, flags);
+ BMA_LOG(DLOG_DEBUG, "reset dma count=%d\n", count);
+}
diff --git a/drivers/net/ethernet/huawei/bma/edma_drv/edma_queue.h b/drivers/net/ethernet/huawei/bma/edma_drv/edma_queue.h
new file mode 100644
index 000000000000..0cf449c0ae00
--- /dev/null
+++ b/drivers/net/ethernet/huawei/bma/edma_drv/edma_queue.h
@@ -0,0 +1,29 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/* Huawei iBMA driver.
+ * Copyright (c) 2025, Huawei Technologies Co., Ltd.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version 2
+ * of the License, or (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#ifndef EDMA_QUEUE_H
+#define EDMA_QUEUE_H
+#include "edma_host.h"
+
+s32 check_dma_queue_state(u32 state, u32 flag);
+void set_dma_queue_sq_base_l(u32 val);
+void set_dma_queue_sq_base_h(u32 val);
+void set_dma_queue_cq_base_l(u32 val);
+void set_dma_queue_cq_base_h(u32 val);
+void reset_edma_host(struct edma_host_s *edma_host);
+int transfer_edma_host(struct edma_host_s *host, struct bma_priv_data_s *priv,
+ struct bma_dma_transfer_s *transfer);
+s32 transfer_dma_queue(struct bma_dma_transfer_s *dma_transfer);
+#endif
diff --git a/drivers/net/ethernet/huawei/bma/edma_drv/edma_reg.h b/drivers/net/ethernet/huawei/bma/edma_drv/edma_reg.h
new file mode 100644
index 000000000000..c4e056a92bc8
--- /dev/null
+++ b/drivers/net/ethernet/huawei/bma/edma_drv/edma_reg.h
@@ -0,0 +1,127 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/* Huawei iBMA driver.
+ * Copyright (c) 2025, Huawei Technologies Co., Ltd.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version 2
+ * of the License, or (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#ifndef EDMA_REG_H
+#define EDMA_REG_H
+
+#define PORT_EP 0
+#define PORT_RP 1
+
+#define ENABLE 1
+#define DISABLE 0
+
+#define TRUE 1
+#define FALSE 0
+
+/* core0:x2/x1 core1:x1 */
+#define PCIE_CORE_NUM 2
+#define PCIE_REG_OFFSET 0x100000U
+#define PCIE_REG_SIZE 0x100000
+
+#define GEN1 0x1
+#define GEN2 0x2
+#define GEN3 0x3
+#define GEN4 0x4
+
+#define PCIE_ADDR_H_SHIFT_32 32
+#define PCIE_ADDR_L_32_MASK 0xFFFFFFFF
+
+#define AP_DMA_BIT BIT(5)
+#define AP_MASK_ALL 0x3FF
+#define AP_DMA_CHAN_REG_SIZE 0x100
+
+/********************************************************************************************/
+/* PCIE reg base */
+/********************************************************************************************/
+#define PCIE_BASE_ADDR 0x1E100000U
+#define AP_DMA_REG 0x10000U
+#define AP_IOB_TX_REG_BASE 0x0U
+#define AP_IOB_RX_REG_BASE 0x4000U
+#define AP_GLOBAL_REG_BASE 0x8000U
+
+/********************************************************************************************/
+/* PCIE AP DMA REG */
+/********************************************************************************************/
+#define PCIE_DMA_EP_INT_MSK_REG 0x24 /* DMA_EP_INT_MSK */
+#define PCIE_DMA_EP_INT_REG 0x28 /* DMA_EP_INT */
+#define PCIE_DMA_EP_INT_STS_REG 0x2C /* DMA_EP_INT_STS */
+#define PCIE_DMA_FLR_DISABLE_REG 0xA00 /* DMA_FLR_DISABLE */
+#define PCIE_DMA_QUEUE_SQ_BASE_L_0_REG 0x2000 /* DMA Queue SQ Base Address Low Register */
+#define PCIE_DMA_QUEUE_SQ_BASE_H_0_REG 0x2004 /* DMA Queue SQ Base Address High Register */
+#define PCIE_DMA_QUEUE_SQ_DEPTH_0_REG 0x2008 /* DMA Queue SQ Depth */
+#define PCIE_DMA_QUEUE_SQ_TAIL_PTR_0_REG 0x200C /* DMA Queue SQ Tail Pointer Register */
+#define PCIE_DMA_QUEUE_CQ_BASE_L_0_REG 0x2010 /* DMA Queue CQ Base Address Low Register */
+#define PCIE_DMA_QUEUE_CQ_BASE_H_0_REG 0x2014 /* DMA Queue CQ Base Address High Register */
+#define PCIE_DMA_QUEUE_CQ_DEPTH_0_REG 0x2018 /* DMA Queue CQ Depth */
+#define PCIE_DMA_QUEUE_CQ_HEAD_PTR_0_REG 0x201C /* DMA Queue CQ Head Pointer Register */
+#define PCIE_DMA_QUEUE_CTRL0_0_REG 0x2020 /* DMA Queue control Register 0 */
+#define PCIE_DMA_QUEUE_CTRL1_0_REG 0x2024 /* DMA Queue control Register 1 */
+#define PCIE_DMA_QUEUE_FSM_STS_0_REG 0x2030 /* DMA Queue FSM Status Register */
+#define PCIE_DMA_QUEUE_SQ_STS_0_REG 0x2034 /* DMA Queue SQ and CQ status Register */
+#define PCIE_DMA_QUEUE_CQ_TAIL_PTR_0_REG 0x203C /* DMA Queue CQ Tail Pointer Register */
+#define PCIE_DMA_QUEUE_INT_STS_0_REG 0x2040 /* DMA Queue Interrupt Status */
+#define PCIE_DMA_QUEUE_INT_MSK_0_REG 0x2044 /* DMA Queue Interrupt Mask Register */
+#define PCIE_DMA_QUEUE_ERR_INT_STS_0_REG 0x2048 /* DMA Queue Err Interrupt Status */
+#define PCIE_DMA_QUEUE_ERR_INT_MSK_0_REG 0x204C /* DMA Queue Err Interrupt Mask Register */
+#define PCIE_DMA_QUEUE_INT_RO_0_REG 0x206C /* DMA Queue Interrupt RO Register */
+
+/********************************************************************************************/
+/* PCIE AP_GLOBAL_REG */
+/********************************************************************************************/
+#define PCIE_CE_ENA 0x0008
+#define PCIE_UNF_ENA 0x0010
+#define PCIE_UF_ENA 0x0018
+
+#define PCIE_MSI_MASK 0x00F4
+#define PORT_INTX_ASSERT_MASK 0x01B0
+#define PORT_INTX_DEASSERT_MASK 0x01B4
+
+#define PCIE_AP_NI_ENA 0x0100
+#define PCIE_AP_CE_ENA 0x0104
+#define PCIE_AP_UNF_ENA 0x0108
+#define PCIE_AP_UF_ENA 0x010c
+#define PCIE_AP_NI_MASK 0x0110
+#define PCIE_AP_CE_MASK 0x0114
+#define PCIE_AP_UNF_MASK 0x0118
+#define PCIE_AP_UF_MASK 0x011C
+#define PCIE_AP_NI_STATUS 0x0120
+#define PCIE_AP_CE_STATUS 0x0124
+#define PCIE_AP_UNF_STATUS 0x0128
+#define PCIE_AP_UF_STATUS 0x012C
+#define PCIE_CORE_NI_ENA 0x0160
+#define PCIE_CORE_CE_ENA 0x0164
+#define PCIE_CORE_UNF_ENA 0x0168
+#define PCIE_CORE_UF_ENA 0x016c
+
+#define AP_PORT_EN_REG 0x0800
+#define AP_APB_SYN_RST 0x0810
+#define AP_AXI_SYN_RST 0x0814
+#define AP_IDLE 0x0C08
+
+/********************************************************************************************/
+/* PCIE AP_IOB_RX_COM_REG Reg */
+/********************************************************************************************/
+#define IOB_RX_AML_SNOOP 0x1AAC
+#define IOB_RX_MSI_INT_CTRL 0x1040
+
+#define IOB_RX_MSI_INT_ADDR_HIGH 0x1044
+#define IOB_RX_MSI_INT_ADDR_LOW 0x1048
+
+#define IOB_RX_PAB_SMMU_BYPASS_CTRL 0x2004
+
+#define IOB_RX_DMA_REG_REMAP_0 0x0E30
+#define IOB_RX_DMA_REG_REMAP_1 0x0E34
+
+#endif /* EDMA_REG_H */
diff --git a/drivers/net/ethernet/huawei/bma/include/bma_ker_intf.h b/drivers/net/ethernet/huawei/bma/include/bma_ker_intf.h
index d1df99b0c9fd..8d284d5f6e62 100644
--- a/drivers/net/ethernet/huawei/bma/include/bma_ker_intf.h
+++ b/drivers/net/ethernet/huawei/bma/include/bma_ker_intf.h
@@ -47,6 +47,17 @@ enum intr_mod {
INTR_ENABLE = 1,
};
+enum addr_type {
+ TYPE_EDMA_ADDR = 0,
+ TYPE_VETH_ADDR = 1,
+};
+
+enum pci_type_e {
+ PCI_TYPE_UNKNOWN,
+ PCI_TYPE_171x,
+ PCI_TYPE_1712
+};
+
struct bma_dma_addr_s {
dma_addr_t dma_addr;
u32 dma_data_len;
@@ -66,10 +77,28 @@ union transfer_u {
struct dmalist_transfer_s list;
};
+struct bspveth_dmal {
+ u32 chl;
+ u32 len;
+ u32 slow;
+ u32 shi;
+ u32 dlow;
+ u32 dhi;
+};
+
struct bma_dma_transfer_s {
enum dma_type_e type;
enum dma_direction_e dir;
union transfer_u transfer;
+ struct bspveth_dmal *pdmalbase_v;
+ u32 dmal_cnt;
+};
+
+struct bma_map_addr_s {
+ enum pci_type_e pci_type;
+ u32 host_number;
+ enum addr_type addr_type;
+ u32 addr;
};
int bma_intf_register_int_notifier(struct notifier_block *nb);
@@ -91,4 +120,21 @@ int bma_cdev_add_msg(void *handle, const char __user *msg, size_t msg_len);
unsigned int bma_cdev_check_recv(void *handle);
void *bma_cdev_get_wait_queue(void *handle);
int bma_intf_check_edma_supported(void);
+
+enum pci_type_e get_pci_type(void);
+void set_pci_type(enum pci_type_e type);
+
+int bma_intf_get_host_number(unsigned int *host_number);
+int bma_intf_get_map_address(enum addr_type type, phys_addr_t *addr);
+
+#define HOST_NUMBER_0 0
+#define HOST_NUMBER_1 1
+
+#define EDMA_1711_HOST0_ADDR 0x84810000
+#define VETH_1711_HOST0_ADDR 0x84820000
+#define EDMA_1712_HOST0_ADDR 0x85400000
+#define VETH_1712_HOST0_ADDR 0x85410000
+#define EDMA_1712_HOST1_ADDR 0x87400000
+#define VETH_1712_HOST1_ADDR 0x87410000
+
#endif
diff --git a/drivers/net/ethernet/huawei/bma/kbox_drv/kbox_include.h b/drivers/net/ethernet/huawei/bma/kbox_drv/kbox_include.h
index 0d82ee6f7c83..745d83b431f8 100644
--- a/drivers/net/ethernet/huawei/bma/kbox_drv/kbox_include.h
+++ b/drivers/net/ethernet/huawei/bma/kbox_drv/kbox_include.h
@@ -23,7 +23,7 @@
#ifdef DRV_VERSION
#define KBOX_VERSION MICRO_TO_STR(DRV_VERSION)
#else
-#define KBOX_VERSION "0.3.10"
+#define KBOX_VERSION "0.4.0"
#endif
#define UNUSED(x) (x = x)
diff --git a/drivers/net/ethernet/huawei/bma/veth_drv/veth_hb.c b/drivers/net/ethernet/huawei/bma/veth_drv/veth_hb.c
index 9d918edae703..774229ae8dd1 100644
--- a/drivers/net/ethernet/huawei/bma/veth_drv/veth_hb.c
+++ b/drivers/net/ethernet/huawei/bma/veth_drv/veth_hb.c
@@ -495,6 +495,11 @@ s32 bspveth_setup_all_tx_resources(struct bspveth_device *pvethdev)
int err = 0;
u8 *shmq_head_p = NULL;
struct bspveth_shmq_hd *shmq_head = NULL;
+ phys_addr_t veth_address = 0;
+
+ err = bma_intf_get_map_address(TYPE_VETH_ADDR, &veth_address);
+ if (err != 0)
+ goto failed;
if (!pvethdev)
return BSP_ERR_NULL_POINTER;
@@ -526,7 +531,7 @@ s32 bspveth_setup_all_tx_resources(struct bspveth_device *pvethdev)
(struct bspveth_dmal *)((BSP_VETH_T)(shmq_head)
+ SHMDMAL_OFFSET);
pvethdev->ptx_queue[qid]->pdmalbase_p =
- (u8 *)(u64)(VETH_SHAREPOOL_BASE_INBMC +
+ (u8 *)(u64)(veth_address +
MAX_SHAREQUEUE_SIZE * qid +
SHMDMAL_OFFSET);
@@ -851,6 +856,11 @@ s32 bspveth_setup_all_rx_resources(struct bspveth_device *pvethdev)
int qid, i, err = 0;
struct bspveth_shmq_hd *shmq_head = NULL;
u8 *shmq_head_p = NULL;
+ phys_addr_t veth_address = 0;
+
+ err = bma_intf_get_map_address(TYPE_VETH_ADDR, &veth_address);
+ if (err != 0)
+ goto failed;
if (!pvethdev)
return BSP_ERR_NULL_POINTER;
@@ -885,7 +895,7 @@ s32 bspveth_setup_all_rx_resources(struct bspveth_device *pvethdev)
(struct bspveth_dmal *)((BSP_VETH_T)(shmq_head)
+ SHMDMAL_OFFSET);
pvethdev->prx_queue[qid]->pdmalbase_p =
- (u8 *)(u64)(VETH_SHAREPOOL_BASE_INBMC
+ (u8 *)(u64)(veth_address
+ MAX_SHAREQUEUE_SIZE * (qid + 1)
+ SHMDMAL_OFFSET);
memset(pvethdev->prx_queue[qid]->pdmalbase_v, 0,
@@ -1236,6 +1246,8 @@ void veth_netdev_func_init(struct net_device *dev)
{
struct tag_pcie_comm_priv *priv =
(struct tag_pcie_comm_priv *)netdev_priv(dev);
+ u32 host_number = 0;
+ int ret = 0;
/*9C:7D:A3:28:6F:F9*/
unsigned char veth_mac[ETH_ALEN] = {0x9C, 0x7D, 0xA3, 0x28, 0x6F, 0xF9};
@@ -1243,6 +1255,12 @@ void veth_netdev_func_init(struct net_device *dev)
ether_setup(dev);
+ ret = bma_intf_get_host_number(&host_number);
+ if (ret < 0) {
+ VETH_LOG(DLOG_ERROR, "bma_intf_get_host_number failed!\n");
+ return;
+ }
+
dev->netdev_ops = &veth_ops;
dev->watchdog_timeo = BSPVETH_NET_TIMEOUT;
@@ -1257,6 +1275,7 @@ void veth_netdev_func_init(struct net_device *dev)
memset(priv, 0, sizeof(struct tag_pcie_comm_priv));
strncpy(priv->net_type, MODULE_NAME, NET_TYPE_LEN);
+ veth_mac[ETH_ALEN - 1] = (host_number == 0 ? 0xF9 : 0xFB);
eth_hw_addr_set(dev, veth_mac);
VETH_LOG(DLOG_DEBUG, "set veth MAC addr OK\n");
@@ -2226,6 +2245,8 @@ s32 __start_dmalist_H(struct bspveth_rxtx_q *prxtx_queue, u32 cnt, u32 type)
dma_transfer.type = DMA_LIST;
dma_transfer.transfer.list.dma_addr =
(dma_addr_t)prxtx_queue->pdmalbase_p;
+ dma_transfer.pdmalbase_v = prxtx_queue->pdmalbase_v;
+ dma_transfer.dmal_cnt = prxtx_queue->dmal_cnt;
ret = bma_intf_start_dma(g_bspveth_dev.bma_priv, &dma_transfer);
if (ret < 0)
diff --git a/drivers/net/ethernet/huawei/bma/veth_drv/veth_hb.h b/drivers/net/ethernet/huawei/bma/veth_drv/veth_hb.h
index 242d3ec128d3..f8b7e2f8d604 100644
--- a/drivers/net/ethernet/huawei/bma/veth_drv/veth_hb.h
+++ b/drivers/net/ethernet/huawei/bma/veth_drv/veth_hb.h
@@ -31,7 +31,7 @@ extern "C" {
#ifdef DRV_VERSION
#define VETH_VERSION MICRO_TO_STR(DRV_VERSION)
#else
-#define VETH_VERSION "0.3.10"
+#define VETH_VERSION "0.4.0"
#endif
#define MODULE_NAME "veth"
@@ -67,7 +67,6 @@ extern "C" {
#define SYSCTL_REG_SIZE (0x1000)
#define PCIE1_REG_BASE (0x29000000)
#define PCIE1_REG_SIZE (0x1000)
-#define VETH_SHAREPOOL_BASE_INBMC (0x84820000)
#define VETH_SHAREPOOL_SIZE (0xdf000)
#define VETH_SHAREPOOL_OFFSET (0x10000)
#define MAX_SHAREQUEUE_SIZE (0x20000)
@@ -261,15 +260,6 @@ struct bspveth_dma_bd {
u32 off;
};
-struct bspveth_dmal {
- u32 chl;
- u32 len;
- u32 slow;
- u32 shi;
- u32 dlow;
- u32 dhi;
-};
-
struct bspveth_rxtx_q {
#ifndef VETH_BMC
struct bspveth_dma_bd *pbdbase_v;
--
2.33.0
2
1