Kernel
  Threads by month 
                
            - ----- 2025 -----
- October
- September
- August
- July
- June
- May
- April
- March
- February
- January
- ----- 2024 -----
- December
- November
- October
- September
- August
- July
- June
- May
- April
- March
- February
- January
- ----- 2023 -----
- December
- November
- October
- September
- August
- July
- June
- May
- April
- March
- February
- January
- ----- 2022 -----
- December
- November
- October
- September
- August
- July
- June
- May
- April
- March
- February
- January
- ----- 2021 -----
- December
- November
- October
- September
- August
- July
- June
- May
- April
- March
- February
- January
- ----- 2020 -----
- December
- November
- October
- September
- August
- July
- June
- May
- April
- March
- February
- January
- ----- 2019 -----
- December
- 59 participants
- 20953 discussions
 
                        
                    11 Aug '25
                    
                        From: Huangjunhua <huangjunhua14(a)huawei.com>
driver inclusion
category: feature
bugzilla: https://gitee.com/openeuler/kernel/issues/ICB3EN
CVE: NA
-----------------------------------------
To meet the competitive evolution requirements ("5+1+1") for the
new-generation Kunpeng ARM platform, Tianchi architecture, and
BMC management system. The products, BMC, and HiSilicon collaboratively
planned the next-generation BMC evolution chip Hi1712.
Building upon Hi1711, the Hi1712 chip enhances interfaces, computing power,
and security. The Huawei iBMA driver requires adaptation to support Hi1712
for in-band and out-of-band communication.
Signed-off-by: Huangjunhua <huangjunhua14(a)huawei.com>
---
 MAINTAINERS                                   |   6 +
 .../ethernet/huawei/bma/cdev_drv/bma_cdev.c   |   2 +-
 .../bma/cdev_veth_drv/virtual_cdev_eth_net.c  |  18 +-
 .../bma/cdev_veth_drv/virtual_cdev_eth_net.h  |   1 -
 .../net/ethernet/huawei/bma/edma_drv/Makefile |   2 +-
 .../huawei/bma/edma_drv/bma_devintf.c         | 118 ++++-
 .../ethernet/huawei/bma/edma_drv/bma_pci.c    | 277 +++++++++--
 .../ethernet/huawei/bma/edma_drv/bma_pci.h    |  33 +-
 .../ethernet/huawei/bma/edma_drv/edma_drv.h   | 340 +++++++++++++
 .../ethernet/huawei/bma/edma_drv/edma_host.c  | 160 +++++-
 .../ethernet/huawei/bma/edma_drv/edma_host.h  |  14 +-
 .../ethernet/huawei/bma/edma_drv/edma_queue.c | 470 ++++++++++++++++++
 .../ethernet/huawei/bma/edma_drv/edma_queue.h |  29 ++
 .../ethernet/huawei/bma/edma_drv/edma_reg.h   | 127 +++++
 .../huawei/bma/include/bma_ker_intf.h         |  46 ++
 .../huawei/bma/kbox_drv/kbox_include.h        |   2 +-
 .../ethernet/huawei/bma/veth_drv/veth_hb.c    |  25 +-
 .../ethernet/huawei/bma/veth_drv/veth_hb.h    |  12 +-
 18 files changed, 1582 insertions(+), 100 deletions(-)
 create mode 100644 drivers/net/ethernet/huawei/bma/edma_drv/edma_drv.h
 create mode 100644 drivers/net/ethernet/huawei/bma/edma_drv/edma_queue.c
 create mode 100644 drivers/net/ethernet/huawei/bma/edma_drv/edma_queue.h
 create mode 100644 drivers/net/ethernet/huawei/bma/edma_drv/edma_reg.h
diff --git a/MAINTAINERS b/MAINTAINERS
index 61baf2cfc4e1..446f2f49fd14 100644
--- a/MAINTAINERS
+++ b/MAINTAINERS
@@ -9707,6 +9707,12 @@ F:	drivers/net/ethernet/huawei/hinic3/cqm/
 F:	drivers/net/ethernet/huawei/hinic3/hw/
 F:	drivers/net/ethernet/huawei/hinic3/include/
 
+HUAWEI ETHERNET DRIVER
+M:	Huangjunhua <huangjunhua14(a)huawei.com>
+L:	netdev(a)vger.kernel.org
+S:	Maintained
+F:	drivers/net/ethernet/huawei/bma/
+
 HUAWEI BIFUR DRIVER
 M:	Xiaoping zheng <zhengxiaoping5(a)huawei.com>
 L:	netdev(a)vger.kernel.org
diff --git a/drivers/net/ethernet/huawei/bma/cdev_drv/bma_cdev.c b/drivers/net/ethernet/huawei/bma/cdev_drv/bma_cdev.c
index 275c2cdfe5db..59181c829a68 100644
--- a/drivers/net/ethernet/huawei/bma/cdev_drv/bma_cdev.c
+++ b/drivers/net/ethernet/huawei/bma/cdev_drv/bma_cdev.c
@@ -28,7 +28,7 @@
 #ifdef DRV_VERSION
 #define CDEV_VERSION		MICRO_TO_STR(DRV_VERSION)
 #else
-#define CDEV_VERSION		"0.3.10"
+#define CDEV_VERSION		"0.4.0"
 #endif
 
 #define CDEV_DEFAULT_NUM	4
diff --git a/drivers/net/ethernet/huawei/bma/cdev_veth_drv/virtual_cdev_eth_net.c b/drivers/net/ethernet/huawei/bma/cdev_veth_drv/virtual_cdev_eth_net.c
index e6dbec7073e4..adb6dd6972f5 100644
--- a/drivers/net/ethernet/huawei/bma/cdev_veth_drv/virtual_cdev_eth_net.c
+++ b/drivers/net/ethernet/huawei/bma/cdev_veth_drv/virtual_cdev_eth_net.c
@@ -151,6 +151,12 @@ int edma_veth_setup_all_tx_resources(struct edma_eth_dev_s *edma_eth)
 	u8 *shmq_head = NULL;
 	u8 *shmq_head_p = NULL;
 	struct edma_rxtx_q_s *tx_queue = NULL;
+	int ret = 0;
+	phys_addr_t veth_address = 0;
+
+	ret = bma_intf_get_map_address(TYPE_VETH_ADDR, &veth_address);
+	if (ret != 0)
+		return -EFAULT;
 
 	tx_queue = (struct edma_rxtx_q_s *)
 		   kmalloc(sizeof(struct edma_rxtx_q_s), GFP_KERNEL);
@@ -173,7 +179,7 @@ int edma_veth_setup_all_tx_resources(struct edma_eth_dev_s *edma_eth)
 
 	tx_queue->pdmalbase_v = (struct edma_dmal_s *)
 				(shmq_head + SHMDMAL_OFFSET);
-	tx_queue->pdmalbase_p = (u8 *)(VETH_SHAREPOOL_BASE_INBMC +
+	tx_queue->pdmalbase_p = (u8 *)(veth_address +
 				(MAX_SHAREQUEUE_SIZE * 0) + SHMDMAL_OFFSET);
 
 	memset(tx_queue->pdmalbase_v, 0, MAX_SHMDMAL_SIZE);
@@ -219,6 +225,12 @@ int edma_veth_setup_all_rx_resources(struct edma_eth_dev_s *edma_eth)
 	u8 *shmq_head = NULL;
 	u8 *shmq_head_p = NULL;
 	struct edma_rxtx_q_s *rx_queue = NULL;
+	int ret = 0;
+	phys_addr_t veth_address = 0;
+
+	ret = bma_intf_get_map_address(TYPE_VETH_ADDR, &veth_address);
+	if (ret != 0)
+		return -EFAULT;
 
 	rx_queue = (struct edma_rxtx_q_s *)
 		   kmalloc(sizeof(struct edma_rxtx_q_s), GFP_KERNEL);
@@ -241,7 +253,7 @@ int edma_veth_setup_all_rx_resources(struct edma_eth_dev_s *edma_eth)
 	/* DMA address list (only used in host). */
 	rx_queue->pdmalbase_v = (struct edma_dmal_s *)
 				(shmq_head + SHMDMAL_OFFSET);
-	rx_queue->pdmalbase_p = (u8 *)(VETH_SHAREPOOL_BASE_INBMC +
+	rx_queue->pdmalbase_p = (u8 *)(veth_address +
 				MAX_SHAREQUEUE_SIZE + SHMDMAL_OFFSET);
 	memset(rx_queue->pdmalbase_v, 0, MAX_SHMDMAL_SIZE);
 
@@ -1304,6 +1316,8 @@ int __start_dmalist_H_2(struct edma_rxtx_q_s *prxtx_queue, u32 type, u32 cnt)
 	dma_transfer.type = DMA_LIST;
 	dma_transfer.transfer.list.dma_addr =
 	    (dma_addr_t)prxtx_queue->pdmalbase_p;
+	dma_transfer.pdmalbase_v = (struct bspveth_dmal *)prxtx_queue->pdmalbase_v;
+	dma_transfer.dmal_cnt = prxtx_queue->dmal_cnt;
 
 	ret = bma_intf_start_dma(g_eth_edmaprivate.edma_priv, &dma_transfer);
 	LOG(DLOG_DEBUG, "after -> %u/%u/%u/%u, ret: %d",
diff --git a/drivers/net/ethernet/huawei/bma/cdev_veth_drv/virtual_cdev_eth_net.h b/drivers/net/ethernet/huawei/bma/cdev_veth_drv/virtual_cdev_eth_net.h
index cb7c28cb5ddd..bc4b2147272b 100644
--- a/drivers/net/ethernet/huawei/bma/cdev_veth_drv/virtual_cdev_eth_net.h
+++ b/drivers/net/ethernet/huawei/bma/cdev_veth_drv/virtual_cdev_eth_net.h
@@ -56,7 +56,6 @@
 #define BSP_ERR_AGAIN                  (BSP_ETH_ERR_BASE + 18)
 #define BSP_ERR_NOT_TO_HANDLE          (BSP_ETH_ERR_BASE + 19)
 
-#define VETH_SHAREPOOL_BASE_INBMC  (0x84820000)
 #define VETH_SHAREPOOL_SIZE        (0xdf000)
 #define VETH_SHAREPOOL_OFFSET      (0x10000)
 #define MAX_SHAREQUEUE_SIZE        (0x20000)
diff --git a/drivers/net/ethernet/huawei/bma/edma_drv/Makefile b/drivers/net/ethernet/huawei/bma/edma_drv/Makefile
index 46cc51275a71..048bcb9e2bbe 100644
--- a/drivers/net/ethernet/huawei/bma/edma_drv/Makefile
+++ b/drivers/net/ethernet/huawei/bma/edma_drv/Makefile
@@ -1,2 +1,2 @@
 obj-$(CONFIG_BMA) += host_edma_drv.o
-host_edma_drv-y := bma_pci.o bma_devintf.o edma_host.o
+host_edma_drv-y := bma_pci.o bma_devintf.o edma_host.o edma_queue.o
diff --git a/drivers/net/ethernet/huawei/bma/edma_drv/bma_devintf.c b/drivers/net/ethernet/huawei/bma/edma_drv/bma_devintf.c
index 3b5eb39d6da6..45815fdc18eb 100644
--- a/drivers/net/ethernet/huawei/bma/edma_drv/bma_devintf.c
+++ b/drivers/net/ethernet/huawei/bma/edma_drv/bma_devintf.c
@@ -31,6 +31,18 @@ static struct bma_dev_s *g_bma_dev;
 
 static ATOMIC_NOTIFIER_HEAD(bma_int_notify_list);
 
+static enum pci_type_e g_pci_type = PCI_TYPE_UNKNOWN;
+
+enum pci_type_e get_pci_type(void)
+{
+	return g_pci_type;
+}
+
+void set_pci_type(enum pci_type_e type)
+{
+	g_pci_type = type;
+}
+
 static int bma_priv_insert_priv_list(struct bma_priv_data_s *priv, u32 type,
 				     u32 sub_type)
 {
@@ -342,6 +354,82 @@ int bma_intf_unregister_type(void **handle)
 }
 EXPORT_SYMBOL(bma_intf_unregister_type);
 
+int bma_intf_get_host_number(unsigned int *host_number)
+{
+	unsigned int devfn = 0;
+
+	if (!host_number)
+		return -EFAULT;
+
+	if (!g_bma_dev) {
+		BMA_LOG(DLOG_ERROR, "g_bma_dev is NULL\n");
+		return -ENXIO;
+	}
+
+	devfn = g_bma_dev->bma_pci_dev->pdev->devfn;
+	BMA_LOG(DLOG_DEBUG, "devfn is %u\n", devfn);
+	if (devfn == PF7 || devfn == PF10) {
+		*host_number = HOST_NUMBER_0;
+	} else if (devfn == PF4) {
+		*host_number = HOST_NUMBER_1;
+	} else {
+		BMA_LOG(DLOG_DEBUG, "Treat as host0 because of unknown PF %u\n", devfn);
+		*host_number = HOST_NUMBER_0;
+	}
+
+	return 0;
+}
+EXPORT_SYMBOL(bma_intf_get_host_number);
+
+int bma_intf_get_map_address(enum addr_type type, phys_addr_t *addr)
+{
+	u32 host_number = 0;
+	u32 devfn = 0;
+	u32 i = 0;
+	enum pci_type_e pci_type = get_pci_type();
+	struct bma_pci_dev_s *bma_pci_dev = get_bma_pci_dev();
+
+	static struct bma_map_addr_s addr_info[] = {
+		{PCI_TYPE_UNKNOWN,  HOST_NUMBER_0, TYPE_EDMA_ADDR, EDMA_1711_HOST0_ADDR},
+		{PCI_TYPE_UNKNOWN,  HOST_NUMBER_0, TYPE_VETH_ADDR, VETH_1711_HOST0_ADDR},
+		{PCI_TYPE_171x,	 HOST_NUMBER_0, TYPE_EDMA_ADDR, EDMA_1711_HOST0_ADDR},
+		{PCI_TYPE_171x,	 HOST_NUMBER_0, TYPE_VETH_ADDR, VETH_1711_HOST0_ADDR},
+		{PCI_TYPE_1712,	 HOST_NUMBER_0, TYPE_EDMA_ADDR, EDMA_1712_HOST0_ADDR},
+		{PCI_TYPE_1712,	 HOST_NUMBER_0, TYPE_VETH_ADDR, VETH_1712_HOST0_ADDR},
+		{PCI_TYPE_1712,	 HOST_NUMBER_1, TYPE_EDMA_ADDR, EDMA_1712_HOST1_ADDR},
+		{PCI_TYPE_1712,	 HOST_NUMBER_1, TYPE_VETH_ADDR, VETH_1712_HOST1_ADDR},
+	};
+
+	if (!bma_pci_dev) {
+		BMA_LOG(DLOG_ERROR, "bma_pci_dev is null\n");
+		return -EFAULT;
+	}
+
+	devfn = bma_pci_dev->pdev->devfn;
+	if (devfn == PF7 || devfn == PF10) {
+		host_number = HOST_NUMBER_0;
+	} else if (devfn == PF4) {
+		host_number = HOST_NUMBER_1;
+	} else {
+		BMA_LOG(DLOG_DEBUG, "Treat as host0 because of unknown PF %u\n", devfn);
+		host_number = HOST_NUMBER_0;
+	}
+
+	for (i = 0; i < ARRAY_SIZE(addr_info); i++) {
+		if (pci_type == addr_info[i].pci_type &&
+		    host_number == addr_info[i].host_number && type == addr_info[i].addr_type) {
+			*addr = addr_info[i].addr;
+			return 0;
+		}
+	}
+
+	BMA_LOG(DLOG_DEBUG,
+		"Cannot find proper map address! pci_type: %u, host_number: %u, addr_type: %u\n",
+		pci_type, host_number, type);
+	return -EFAULT;
+}
+EXPORT_SYMBOL(bma_intf_get_map_address);
+
 int bma_intf_check_edma_supported(void)
 {
 	return !(!g_bma_dev);
@@ -350,13 +438,30 @@ EXPORT_SYMBOL(bma_intf_check_edma_supported);
 
 int bma_intf_check_dma_status(enum dma_direction_e dir)
 {
-	return edma_host_check_dma_status(dir);
+	enum pci_type_e pci_type = get_pci_type();
+
+	if (pci_type == PCI_TYPE_UNKNOWN) {
+		BMA_LOG(DLOG_ERROR, "pci type is UNKNOWN.\n");
+		return -EFAULT;
+	}
+
+	return get_bma_pci_dev_handler_s()[pci_type].check_dma(dir);
 }
 EXPORT_SYMBOL(bma_intf_check_dma_status);
 
 void bma_intf_reset_dma(enum dma_direction_e dir)
 {
-	edma_host_reset_dma(&g_bma_dev->edma_host, dir);
+	enum pci_type_e pci_type = get_pci_type();
+
+	if (!g_bma_dev)
+		return;
+
+	if (pci_type == PCI_TYPE_UNKNOWN) {
+		BMA_LOG(DLOG_ERROR, "pci type is UNKNOWN.\n");
+		return;
+	}
+
+	get_bma_pci_dev_handler_s()[pci_type].reset_dma(&g_bma_dev->edma_host, dir);
 }
 EXPORT_SYMBOL(bma_intf_reset_dma);
 
@@ -375,10 +480,16 @@ int bma_intf_start_dma(void *handle, struct bma_dma_transfer_s *dma_transfer)
 {
 	int ret = 0;
 	struct bma_priv_data_s *priv = (struct bma_priv_data_s *)handle;
+	enum pci_type_e pci_type = get_pci_type();
 
 	if (!handle || !dma_transfer)
 		return -EFAULT;
 
+	if (pci_type == PCI_TYPE_UNKNOWN) {
+		BMA_LOG(DLOG_ERROR, "pci type is UNKNOWN.\n");
+		return -EFAULT;
+	}
+
 	ret = edma_host_dma_start(&g_bma_dev->edma_host, priv);
 	if (ret) {
 		BMA_LOG(DLOG_ERROR,
@@ -386,7 +497,8 @@ int bma_intf_start_dma(void *handle, struct bma_dma_transfer_s *dma_transfer)
 		return ret;
 	}
 
-	ret = edma_host_dma_transfer(&g_bma_dev->edma_host, priv, dma_transfer);
+	ret = get_bma_pci_dev_handler_s()[pci_type].transfer_edma_host(&g_bma_dev->edma_host, priv,
+								       dma_transfer);
 	if (ret)
 		BMA_LOG(DLOG_ERROR,
 			"edma_host_dma_transfer failed! ret = %d\n", ret);
diff --git a/drivers/net/ethernet/huawei/bma/edma_drv/bma_pci.c b/drivers/net/ethernet/huawei/bma/edma_drv/bma_pci.c
index 577acaedb0e2..0e43289e0d1a 100644
--- a/drivers/net/ethernet/huawei/bma/edma_drv/bma_pci.c
+++ b/drivers/net/ethernet/huawei/bma/edma_drv/bma_pci.c
@@ -27,13 +27,20 @@
 
 #define PCI_VENDOR_ID_HUAWEI_PME	0x19e5
 #define PCI_DEVICE_ID_KBOX_0_PME	0x1710
+#define PCI_DEVICE_ID_EDMA_0		0x1712
 #define PCI_PME_USEABLE_SPACE		(4 * 1024 * 1024)
+
+#define HOSTRTC_OFFSET	0x10000
+#define EDMA_OFFSET	0x20000
+#define VETH_OFFSET	0x30000
+
 #define PME_DEV_CHECK(device, vendor) ((device) == PCI_DEVICE_ID_KBOX_0_PME && \
 				       (vendor) == PCI_VENDOR_ID_HUAWEI_PME)
 
 #define PCI_BAR0_PME_1710		0x85800000
 #define PCI_BAR0			0
 #define PCI_BAR1			1
+#define PCI_BAR2			2
 #define PCI_USING_DAC_DEFAULT 0
 
 #define GET_HIGH_ADDR(address)	((sizeof(unsigned long) == 8) ? \
@@ -51,15 +58,50 @@ int debug = DLOG_ERROR;
 MODULE_PARM_DESC(debug, "Debug switch (0=close debug, 1=open debug)");
 
 static struct bma_pci_dev_s *g_bma_pci_dev;
+struct bma_pci_dev_s *get_bma_pci_dev(void)
+{
+	return g_bma_pci_dev;
+}
+
+void set_bma_pci_dev(struct bma_pci_dev_s *bma_pci_dev)
+{
+	g_bma_pci_dev = bma_pci_dev;
+}
 
 static int bma_pci_suspend(struct pci_dev *pdev, pm_message_t state);
 static int bma_pci_resume(struct pci_dev *pdev);
 static int bma_pci_probe(struct pci_dev *pdev, const struct pci_device_id *ent);
 static void bma_pci_remove(struct pci_dev *pdev);
 
+static struct bma_pci_dev_handler_s g_bma_pci_dev_handler_s[] = {
+	{0},
+	// for 1710/1711
+	{
+		.ioremap_bar_mem = ioremap_pme_bar_mem_v1,
+		.iounmap_bar_mem = iounmap_bar_mem_v1,
+		.check_dma = edma_host_check_dma_status_v1,
+		.transfer_edma_host = edma_host_dma_transfer_v1,
+		.reset_dma = edma_host_reset_dma_v1,
+	},
+	// for 1712
+	{
+		.ioremap_bar_mem = ioremap_pme_bar_mem_v2,
+		.iounmap_bar_mem = iounmap_bar_mem_v2,
+		.check_dma = edma_host_check_dma_status_v2,
+		.transfer_edma_host = edma_host_dma_transfer_v2,
+		.reset_dma = edma_host_reset_dma_v2,
+	}
+};
+
+struct bma_pci_dev_handler_s *get_bma_pci_dev_handler_s(void)
+{
+	return g_bma_pci_dev_handler_s;
+}
+
 static const struct pci_device_id bma_pci_tbl[] = {
 	{PCI_DEVICE(PCI_VENDOR_ID_HUAWEI_FPGA, PCI_DEVICE_ID_KBOX_0)},
 	{PCI_DEVICE(PCI_VENDOR_ID_HUAWEI_PME, PCI_DEVICE_ID_KBOX_0_PME)},
+	{ PCI_DEVICE(PCI_VENDOR_ID_HUAWEI_PME, PCI_DEVICE_ID_EDMA_0) },
 	{}
 };
 MODULE_DEVICE_TABLE(pci, bma_pci_tbl);
@@ -73,7 +115,7 @@ int edma_param_get_statics(char *buf, const struct kernel_param *kp)
 }
 
 module_param_call(statistics, NULL, edma_param_get_statics, &debug, 0444);
-MODULE_PARM_DESC(statistics, "Statistics info of edma driver,readonly");
+MODULE_PARM_DESC(statistics, "Statistics info of edma driver, readonly");
 
 int edma_param_set_debug(const char *buf, const struct kernel_param *kp)
 {
@@ -99,34 +141,40 @@ module_param_call(debug, &edma_param_set_debug, ¶m_get_int, &debug, 0644);
 
 void __iomem *kbox_get_base_addr(void)
 {
-	if (!g_bma_pci_dev || (!(g_bma_pci_dev->kbox_base_addr))) {
+	struct bma_pci_dev_s *bma_pci_dev = get_bma_pci_dev();
+
+	if (!bma_pci_dev || (!(bma_pci_dev->kbox_base_addr))) {
 		BMA_LOG(DLOG_ERROR, "kbox_base_addr NULL point\n");
 		return NULL;
 	}
 
-	return g_bma_pci_dev->kbox_base_addr;
+	return bma_pci_dev->kbox_base_addr;
 }
 EXPORT_SYMBOL_GPL(kbox_get_base_addr);
 
 unsigned long kbox_get_io_len(void)
 {
-	if (!g_bma_pci_dev) {
-		BMA_LOG(DLOG_ERROR, "kbox_io_len is error,can not get it\n");
+	struct bma_pci_dev_s *bma_pci_dev = get_bma_pci_dev();
+
+	if (!bma_pci_dev) {
+		BMA_LOG(DLOG_ERROR, "kbox_io_len is error, can not get it\n");
 		return 0;
 	}
 
-	return g_bma_pci_dev->kbox_base_len;
+	return bma_pci_dev->kbox_base_len;
 }
 EXPORT_SYMBOL_GPL(kbox_get_io_len);
 
 unsigned long kbox_get_base_phy_addr(void)
 {
-	if (!g_bma_pci_dev || !g_bma_pci_dev->kbox_base_phy_addr) {
+	struct bma_pci_dev_s *bma_pci_dev = get_bma_pci_dev();
+
+	if (!bma_pci_dev || bma_pci_dev->kbox_base_phy_addr == 0) {
 		BMA_LOG(DLOG_ERROR, "kbox_base_phy_addr NULL point\n");
 		return 0;
 	}
 
-	return g_bma_pci_dev->kbox_base_phy_addr;
+	return bma_pci_dev->kbox_base_phy_addr;
 }
 EXPORT_SYMBOL_GPL(kbox_get_base_phy_addr);
 
@@ -160,7 +208,7 @@ s32 __atu_config_H(struct pci_dev *pdev, unsigned int region,
 	return 0;
 }
 
-static void iounmap_bar_mem(struct bma_pci_dev_s *bma_pci_dev)
+void iounmap_bar_mem_v1(struct bma_pci_dev_s *bma_pci_dev)
 {
 	if (bma_pci_dev->kbox_base_addr) {
 		iounmap(bma_pci_dev->kbox_base_addr);
@@ -171,15 +219,84 @@ static void iounmap_bar_mem(struct bma_pci_dev_s *bma_pci_dev)
 		iounmap(bma_pci_dev->bma_base_addr);
 		bma_pci_dev->bma_base_addr = NULL;
 		bma_pci_dev->edma_swap_addr = NULL;
+		bma_pci_dev->veth_swap_addr = NULL;
 		bma_pci_dev->hostrtc_viraddr = NULL;
 	}
 }
 
-static int ioremap_pme_bar1_mem(struct pci_dev *pdev,
-				struct bma_pci_dev_s *bma_pci_dev)
+void iounmap_bar_mem_v2(struct bma_pci_dev_s *bma_pci_dev)
+{
+	if (bma_pci_dev->kbox_base_addr) {
+		iounmap(bma_pci_dev->kbox_base_addr);
+		bma_pci_dev->kbox_base_addr = NULL;
+	}
+
+	if (bma_pci_dev->bma_base_addr) {
+		iounmap(bma_pci_dev->bma_base_addr);
+		bma_pci_dev->bma_base_addr = NULL;
+	}
+
+	if (bma_pci_dev->hostrtc_viraddr) {
+		iounmap(bma_pci_dev->hostrtc_viraddr);
+		bma_pci_dev->hostrtc_viraddr = NULL;
+		bma_pci_dev->edma_swap_addr = NULL;
+		bma_pci_dev->veth_swap_addr = NULL;
+	}
+}
+
+static void iounmap_bar_mem(struct bma_pci_dev_s *bma_pci_dev)
+{
+	enum pci_type_e pci_type = get_pci_type();
+
+	if (pci_type == PCI_TYPE_UNKNOWN)
+		return;
+
+	g_bma_pci_dev_handler_s[pci_type].iounmap_bar_mem(bma_pci_dev);
+}
+
+static int config_atu(struct pci_dev *pdev, struct bma_pci_dev_s *bma_pci_dev)
+{
+	int ret = 0;
+	phys_addr_t edma_address = 0;
+	phys_addr_t veth_address = 0;
+
+	ret = bma_intf_get_map_address(TYPE_EDMA_ADDR, &edma_address);
+	if (ret != 0)
+		return ret;
+
+	ret = bma_intf_get_map_address(TYPE_VETH_ADDR, &veth_address);
+	if (ret != 0)
+		return ret;
+
+	__atu_config_H(pdev, 0,
+		       GET_HIGH_ADDR(bma_pci_dev->kbox_base_phy_addr),
+			(bma_pci_dev->kbox_base_phy_addr & 0xffffffff),
+		0, PCI_BAR0_PME_1710, PCI_PME_USEABLE_SPACE);
+
+	__atu_config_H(pdev, 1,
+		       GET_HIGH_ADDR(bma_pci_dev->hostrtc_phyaddr),
+			(bma_pci_dev->hostrtc_phyaddr & 0xffffffff),
+			0, HOSTRTC_REG_BASE, HOSTRTC_REG_SIZE);
+
+	__atu_config_H(pdev, 2,
+		       GET_HIGH_ADDR(bma_pci_dev->edma_swap_phy_addr),
+			(bma_pci_dev->edma_swap_phy_addr & 0xffffffff),
+			0, edma_address, EDMA_SWAP_DATA_SIZE);
+
+	__atu_config_H(pdev, 3,
+		       GET_HIGH_ADDR(bma_pci_dev->veth_swap_phy_addr),
+			(bma_pci_dev->veth_swap_phy_addr & 0xffffffff),
+			0, veth_address, VETH_SWAP_DATA_SIZE);
+
+	return ret;
+}
+
+// for 1710 1711
+int ioremap_pme_bar_mem_v1(struct pci_dev *pdev, struct bma_pci_dev_s *bma_pci_dev)
 {
 	unsigned long bar1_resource_flag = 0;
 	u32 data = 0;
+	int ret;
 
 	bma_pci_dev->kbox_base_len = PCI_PME_USEABLE_SPACE;
 	BMA_LOG(DLOG_DEBUG, "1710\n");
@@ -217,25 +334,11 @@ static int ioremap_pme_bar1_mem(struct pci_dev *pdev,
 		bma_pci_dev->edma_swap_phy_addr,
 		bma_pci_dev->veth_swap_phy_addr);
 
-	__atu_config_H(pdev, 0,
-		       GET_HIGH_ADDR(bma_pci_dev->kbox_base_phy_addr),
-			(bma_pci_dev->kbox_base_phy_addr & 0xffffffff),
-		0, PCI_BAR0_PME_1710, PCI_PME_USEABLE_SPACE);
-
-	__atu_config_H(pdev, 1,
-		       GET_HIGH_ADDR(bma_pci_dev->hostrtc_phyaddr),
-			(bma_pci_dev->hostrtc_phyaddr & 0xffffffff),
-			0, HOSTRTC_REG_BASE, HOSTRTC_REG_SIZE);
-
-	__atu_config_H(pdev, 2,
-		       GET_HIGH_ADDR(bma_pci_dev->edma_swap_phy_addr),
-			(bma_pci_dev->edma_swap_phy_addr & 0xffffffff),
-			0, EDMA_SWAP_DATA_BASE, EDMA_SWAP_DATA_SIZE);
-
-	__atu_config_H(pdev, 3,
-		       GET_HIGH_ADDR(bma_pci_dev->veth_swap_phy_addr),
-			(bma_pci_dev->veth_swap_phy_addr & 0xffffffff),
-			0, VETH_SWAP_DATA_BASE, VETH_SWAP_DATA_SIZE);
+	ret = config_atu(pdev, bma_pci_dev);
+	if (ret != 0) {
+		BMA_LOG(DLOG_DEBUG, "config atu failed.\n");
+		return ret;
+	}
 
 	if (bar1_resource_flag & IORESOURCE_CACHEABLE) {
 		bma_pci_dev->bma_base_addr =
@@ -250,7 +353,6 @@ static int ioremap_pme_bar1_mem(struct pci_dev *pdev,
 	if (!bma_pci_dev->bma_base_addr) {
 		BMA_LOG(DLOG_ERROR,
 			"Cannot map device registers, aborting\n");
-
 		return -ENODEV;
 	}
 
@@ -270,11 +372,80 @@ static int ioremap_pme_bar1_mem(struct pci_dev *pdev,
 	return 0;
 }
 
+// for 1712
+int ioremap_pme_bar_mem_v2(struct pci_dev *pdev, struct bma_pci_dev_s *bma_pci_dev)
+{
+	unsigned long bar2_resource_flag = 0;
+
+	bma_pci_dev->kbox_base_len = PCI_PME_USEABLE_SPACE;
+	BMA_LOG(DLOG_DEBUG, "1712\n");
+
+	bma_pci_dev->bma_base_phy_addr = (unsigned long)pci_resource_start(pdev, PCI_BAR2);
+	bar2_resource_flag = (unsigned long)pci_resource_flags(pdev, PCI_BAR2);
+	if (!(bar2_resource_flag & IORESOURCE_MEM)) {
+		BMA_LOG(DLOG_ERROR, "Cannot find proper PCI device base address, aborting\n");
+		return -ENODEV;
+	}
+
+	bma_pci_dev->bma_base_len = (unsigned long)pci_resource_len(pdev, PCI_BAR2);
+	bma_pci_dev->edma_swap_len = EDMA_SWAP_DATA_SIZE;
+	bma_pci_dev->veth_swap_len = VETH_SWAP_DATA_SIZE;
+
+	BMA_LOG(DLOG_DEBUG,
+		"bar2: bma_base_len = 0x%lx, edma_swap_len = %ld, veth_swap_len = %ld(0x%lx)\n",
+		bma_pci_dev->bma_base_len, bma_pci_dev->edma_swap_len, bma_pci_dev->veth_swap_len,
+		bma_pci_dev->veth_swap_len);
+
+	bma_pci_dev->hostrtc_phyaddr = bma_pci_dev->bma_base_phy_addr + HOSTRTC_OFFSET;
+	/* edma */
+	bma_pci_dev->edma_swap_phy_addr = bma_pci_dev->bma_base_phy_addr + EDMA_OFFSET;
+	/* veth */
+	bma_pci_dev->veth_swap_phy_addr = bma_pci_dev->bma_base_phy_addr + VETH_OFFSET;
+
+	BMA_LOG(DLOG_DEBUG,
+		"bar2: bma_base_phy_addr = 0x%lx, bma_base_len = %zu , hostrtc_phyaddr = 0x%lx, edma_swap_phy_addr = 0x%lx, veth_swap_phy_addr = 0x%lx\n",
+		bma_pci_dev->bma_base_phy_addr, bma_pci_dev->bma_base_len,
+		bma_pci_dev->hostrtc_phyaddr, bma_pci_dev->edma_swap_phy_addr,
+		bma_pci_dev->veth_swap_phy_addr);
+
+	bma_pci_dev->bma_base_addr = ioremap(bma_pci_dev->bma_base_phy_addr,
+					     bma_pci_dev->bma_base_len);
+	if (!bma_pci_dev->bma_base_addr) {
+		BMA_LOG(DLOG_ERROR, "Cannot map device registers, aborting\n");
+		return -ENODEV;
+	}
+
+	if (bar2_resource_flag & IORESOURCE_CACHEABLE) {
+		BMA_LOG(DLOG_DEBUG, "ioremap with cache, %d\n", IORESOURCE_CACHEABLE);
+		bma_pci_dev->hostrtc_viraddr = ioremap(bma_pci_dev->hostrtc_phyaddr,
+						       bma_pci_dev->bma_base_len - HOSTRTC_OFFSET);
+	} else {
+		BMA_LOG(DLOG_DEBUG, "ioremap without cache\n");
+		bma_pci_dev->hostrtc_viraddr = IOREMAP(bma_pci_dev->hostrtc_phyaddr,
+						       bma_pci_dev->bma_base_len - HOSTRTC_OFFSET);
+	}
+
+	if (!bma_pci_dev->hostrtc_viraddr) {
+		BMA_LOG(DLOG_ERROR, "Cannot map device registers, aborting\n");
+		iounmap(bma_pci_dev->bma_base_addr);
+		bma_pci_dev->bma_base_addr = NULL;
+		return -ENODEV;
+	}
+
+	bma_pci_dev->edma_swap_addr = (unsigned char *)bma_pci_dev->hostrtc_viraddr
+				      - HOSTRTC_OFFSET + EDMA_OFFSET;
+	bma_pci_dev->veth_swap_addr = (unsigned char *)bma_pci_dev->hostrtc_viraddr
+				      - HOSTRTC_OFFSET + VETH_OFFSET;
+
+	return 0;
+}
+
 static int ioremap_bar_mem(struct pci_dev *pdev,
 			   struct bma_pci_dev_s *bma_pci_dev)
 {
 	int err = 0;
 	unsigned long bar0_resource_flag = 0;
+	enum pci_type_e pci_type = get_pci_type();
 
 	bar0_resource_flag = pci_resource_flags(pdev, PCI_BAR0);
 
@@ -294,8 +465,8 @@ static int ioremap_bar_mem(struct pci_dev *pdev,
 		bma_pci_dev->kbox_base_phy_addr, bma_pci_dev->kbox_base_len,
 		bma_pci_dev->kbox_base_len);
 
-	if (PME_DEV_CHECK(pdev->device, pdev->vendor)) {
-		err = ioremap_pme_bar1_mem(pdev, bma_pci_dev);
+	if (pdev->vendor == PCI_VENDOR_ID_HUAWEI_PME && pci_type != PCI_TYPE_UNKNOWN) {
+		err = g_bma_pci_dev_handler_s[pci_type].ioremap_bar_mem(pdev, bma_pci_dev);
 		if (err != 0)
 			return err;
 	}
@@ -314,11 +485,7 @@ static int ioremap_bar_mem(struct pci_dev *pdev,
 
 	if (!bma_pci_dev->kbox_base_addr) {
 		BMA_LOG(DLOG_ERROR, "Cannot map device registers, aborting\n");
-
-		iounmap(bma_pci_dev->bma_base_addr);
-		bma_pci_dev->bma_base_addr = NULL;
-		bma_pci_dev->edma_swap_addr = NULL;
-		bma_pci_dev->hostrtc_viraddr = NULL;
+		iounmap_bar_mem(bma_pci_dev);
 		return -ENOMEM;
 	}
 
@@ -355,13 +522,14 @@ int pci_device_init(struct pci_dev *pdev, struct bma_pci_dev_s *bma_pci_dev)
 {
 	int err = 0;
 
-	if (PME_DEV_CHECK(pdev->device, pdev->vendor)) {
+	if ((pdev->device == PCI_DEVICE_ID_KBOX_0_PME || pdev->device == PCI_DEVICE_ID_EDMA_0) &&
+	    pdev->vendor == PCI_VENDOR_ID_HUAWEI_PME) {
 		err = bma_devinft_init(bma_pci_dev);
 		if (err) {
 			BMA_LOG(DLOG_ERROR, "bma_devinft_init failed\n");
 			bma_devinft_cleanup(bma_pci_dev);
 			iounmap_bar_mem(bma_pci_dev);
-			g_bma_pci_dev = NULL;
+			set_bma_pci_dev(NULL);
 			pci_release_regions(pdev);
 			kfree(bma_pci_dev);
 		#ifdef CONFIG_PCI_MSI
@@ -400,27 +568,25 @@ int pci_device_config(struct pci_dev *pdev)
 		goto err_out_free_dev;
 	}
 
+	set_bma_pci_dev(bma_pci_dev);
+
 	err = ioremap_bar_mem(pdev, bma_pci_dev);
 	if (err) {
 		BMA_LOG(DLOG_ERROR, "ioremap_edma_io_mem failed\n");
 		goto err_out_release_regions;
 	}
 
-	g_bma_pci_dev = bma_pci_dev;
-
 	if (SET_DMA_MASK(&pdev->dev)) {
 		BMA_LOG(DLOG_ERROR,
-			"No usable DMA ,configuration, aborting,goto failed2!!!\n");
+			"No usable DMA, configuration, aborting, goto failed2!!!\n");
 		goto err_out_unmap_bar;
 	}
 
-	g_bma_pci_dev = bma_pci_dev;
-
 	return pci_device_init(pdev, bma_pci_dev);
 
 err_out_unmap_bar:
 	iounmap_bar_mem(bma_pci_dev);
-	g_bma_pci_dev = NULL;
+	set_bma_pci_dev(NULL);
 err_out_release_regions:
 	pci_release_regions(pdev);
 err_out_free_dev:
@@ -442,16 +608,27 @@ static int bma_pci_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
 
 	UNUSED(ent);
 
-	if (g_bma_pci_dev)
+	if (get_bma_pci_dev())
 		return -EPERM;
 
 	err = pci_enable_device(pdev);
 	if (err) {
-		BMA_LOG(DLOG_ERROR, "Cannot enable PCI device,aborting\n");
+		BMA_LOG(DLOG_ERROR, "Cannot enable PCI device, aborting\n");
 		return err;
 	}
 
-	if (PME_DEV_CHECK(pdev->device, pdev->vendor)) {
+	switch (pdev->device) {
+	case PCI_DEVICE_ID_KBOX_0_PME:
+		set_pci_type(PCI_TYPE_171x);
+		break;
+	case PCI_DEVICE_ID_EDMA_0:
+		set_pci_type(PCI_TYPE_1712);
+		break;
+	default:
+		set_pci_type(PCI_TYPE_UNKNOWN);
+		break;
+	}
+	if (pdev->vendor == PCI_VENDOR_ID_HUAWEI_PME && get_pci_type() != PCI_TYPE_UNKNOWN) {
 		err = pme_pci_enable_msi(pdev);
 		if (err)
 			return err;
@@ -468,7 +645,7 @@ static void bma_pci_remove(struct pci_dev *pdev)
 	struct bma_pci_dev_s *bma_pci_dev =
 		(struct bma_pci_dev_s *)pci_get_drvdata(pdev);
 
-	g_bma_pci_dev = NULL;
+	set_bma_pci_dev(NULL);
 	(void)pci_set_drvdata(pdev, NULL);
 
 	if (bma_pci_dev) {
diff --git a/drivers/net/ethernet/huawei/bma/edma_drv/bma_pci.h b/drivers/net/ethernet/huawei/bma/edma_drv/bma_pci.h
index a66724e2cb74..b43882997c01 100644
--- a/drivers/net/ethernet/huawei/bma/edma_drv/bma_pci.h
+++ b/drivers/net/ethernet/huawei/bma/edma_drv/bma_pci.h
@@ -18,6 +18,8 @@
 
 #include "bma_devintf.h"
 #include "bma_include.h"
+#include "../include/bma_ker_intf.h"
+#include "edma_host.h"
 #include <linux/netdevice.h>
 
 #define EDMA_SWAP_BASE_OFFSET	0x10000
@@ -25,10 +27,8 @@
 #define HOSTRTC_REG_BASE	0x2f000000
 #define HOSTRTC_REG_SIZE	EDMA_SWAP_BASE_OFFSET
 
-#define EDMA_SWAP_DATA_BASE	0x84810000
 #define EDMA_SWAP_DATA_SIZE	65536
 
-#define VETH_SWAP_DATA_BASE	0x84820000
 #define VETH_SWAP_DATA_SIZE	0xdf000
 
 #define ATU_VIEWPORT		0x900
@@ -71,7 +71,7 @@ struct bma_pci_dev_s {
 #ifdef DRV_VERSION
 #define BMA_VERSION MICRO_TO_STR(DRV_VERSION)
 #else
-#define BMA_VERSION "0.3.10"
+#define BMA_VERSION "0.4.0"
 #endif
 
 #ifdef CONFIG_ARM64
@@ -95,4 +95,31 @@ extern int debug;
 
 int edmainfo_show(char *buff);
 
+struct bma_pci_dev_s *get_bma_pci_dev(void);
+void set_bma_pci_dev(struct bma_pci_dev_s *bma_pci_dev);
+
+struct bma_pci_dev_handler_s {
+	int (*ioremap_bar_mem)(struct pci_dev *pdev, struct bma_pci_dev_s *bma_pci_dev);
+	void (*iounmap_bar_mem)(struct bma_pci_dev_s *bma_pci_dev);
+	int (*check_dma)(enum dma_direction_e dir);
+	int (*transfer_edma_host)(struct edma_host_s *edma_host, struct bma_priv_data_s *priv,
+				  struct bma_dma_transfer_s *dma_transfer);
+	void (*reset_dma)(struct edma_host_s *edma_host, enum dma_direction_e dir);
+};
+
+struct bma_pci_dev_handler_s *get_bma_pci_dev_handler_s(void);
+
+int ioremap_pme_bar_mem_v1(struct pci_dev *pdev, struct bma_pci_dev_s *bma_pci_dev);
+int ioremap_pme_bar_mem_v2(struct pci_dev *pdev, struct bma_pci_dev_s *bma_pci_dev);
+void iounmap_bar_mem_v1(struct bma_pci_dev_s *bma_pci_dev);
+void iounmap_bar_mem_v2(struct bma_pci_dev_s *bma_pci_dev);
+int edma_host_check_dma_status_v1(enum dma_direction_e dir);
+int edma_host_check_dma_status_v2(enum dma_direction_e dir);
+int edma_host_dma_transfer_v1(struct edma_host_s *edma_host, struct bma_priv_data_s *priv,
+			      struct bma_dma_transfer_s *dma_transfer);
+int edma_host_dma_transfer_v2(struct edma_host_s *edma_host, struct bma_priv_data_s *priv,
+			      struct bma_dma_transfer_s *dma_transfer);
+void edma_host_reset_dma_v1(struct edma_host_s *edma_host, enum dma_direction_e dir);
+void edma_host_reset_dma_v2(struct edma_host_s *edma_host, enum dma_direction_e dir);
+
 #endif
diff --git a/drivers/net/ethernet/huawei/bma/edma_drv/edma_drv.h b/drivers/net/ethernet/huawei/bma/edma_drv/edma_drv.h
new file mode 100644
index 000000000000..b0a09c022ba8
--- /dev/null
+++ b/drivers/net/ethernet/huawei/bma/edma_drv/edma_drv.h
@@ -0,0 +1,340 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/* Huawei iBMA driver.
+ * Copyright (c) 2025, Huawei Technologies Co., Ltd.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version 2
+ * of the License, or (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+#ifndef EDMA_DRV_H
+#define EDMA_DRV_H
+
+#define DMA_STATISTICS_LEN  16
+#define DMA_CH_TAG_SIZE	 64
+
+#define HISILICON_VENDOR_ID	 0x19e5
+#define DMA_PCIE_DEVICE_ID	  0xa122
+
+#define MAX_DMA_CHS		 4  /* The current version supports a maximum of 2x2 channels. */
+#define DMA_CHS_EACH_PORT   2
+
+#define MAX_SQ_DEPTH	0xFFFF
+#define MAX_CQ_DEPTH	0xFFFF
+
+#define DMA_DONE_MASK	   0x1
+#define DMA_DONE_UNMASK	 0x0
+#define DMA_ERR_MASK		0x7FFFE
+#define DMA_ERR_UNMASK	  0x0
+
+#define BD_SO 0
+#define BD_RO 1
+
+#define SIZE_4M  0x400000
+#define SIZE_16K 0x4000
+#define SIZE_64K 0x10000
+#define SIZE_OF_U64 0x8
+#define SPD_SIZE_MAX 32
+
+/* Use integer arithmetic for approximate computation instead of floating-point. */
+#define US_PER_SECOND_DIV_1KB (1000000 / 1024)
+
+#define DMA_PHY_STORE_OFFSET (SIZE_64K - SIZE_OF_U64)
+#define DMA_RMT_PHY_STORE_OFFSET (DMA_PHY_STORE_OFFSET - SIZE_OF_U64)
+#define BIT_0_TO_31_MASK 0xFFFFFFFF
+
+#define DMA_TMOUT (2 * HZ) /* 2 seconds */
+
+enum {
+	EP0 = 0,
+	EP1 = 1
+};
+
+enum {
+	DRC_LOCAL  = 0,
+	DRC_REMOTE = 1
+};
+
+enum {
+	DIR_B2H = 0,
+	DIR_H2B = 1,
+};
+
+enum {
+	DMA_INIT		= 0x0,
+	DMA_RESET	   = 0x1,
+	DMA_PAUSE	   = 0x2,
+	DMA_NOTIFY	  = 0x3,
+	LINKDOWN		= 0x4,
+	LINKUP		  = 0x5,
+	FLR			 = 0x6
+};
+
+enum {
+	PF0 = 0,
+	PF1 = 1,
+	PF2 = 2,
+	PF4 = 4,
+	PF7 = 7,
+	PF10 = 10
+};
+
+enum {
+	RESERVED		= 0x0,  /* reserved */
+	SMALL_PACKET	= 0x1,  /* SmallPacket Descriptor */
+	DMA_READ		= 0x2,  /* Read Descriptor */
+	DMA_WRITE	   = 0x3,  /* Write Descriptor */
+	DMA_LOOP		= 0x4,  /* Loop Descriptor */
+	DMA_MIX		 = 0x10, /* not available, User-defined for test */
+	DMA_WD_BARRIER  = 0x11, /* not available, User-defined for test */
+	DMA_RD_BARRIER  = 0x12, /* not available, User-defined for test */
+	DMA_LP_BARRIER  = 0x13  /* not available, User-defined for test */
+};
+
+enum {
+	IDLE_STATE	  = 0x0,  /* dma channel in idle status */
+	RUN_STATE	   = 0x1,  /* dma channel in run status */
+	CPL_STATE	   = 0x2,  /* dma channel in cpld status */
+	PAUSE_STATE	 = 0x3,  /* dma channel in pause status */
+	HALT_STATE	  = 0x4,  /* dma channel in halt status */
+	ABORT_STATE	 = 0x5,  /* dma channel in abort status */
+	WAIT_STATE	  = 0x6   /* dma channel in wait status */
+};
+
+/* CQE status */
+enum {
+	DMA_DONE		  = 0x0,	/* sqe done succ */
+	OPCODE_ERR		= 0x1,	/* sqe opcode invalid */
+	LEN_ERR		   = 0x2,	/* sqe length invalid, only ocurs in smallpackt */
+	DROP_EN		   = 0x4,	/* sqe drop happen */
+	WR_RMT_ERR		= 0x8,	/* write data to host fail */
+	RD_RMT_ERR		= 0x10,   /* read data from host fail */
+	RD_AXI_ERR		= 0x20,   /* read data/sqe from local fail */
+	WR_AXI_ERR		= 0x40,   /* write data/cqe to local fail */
+	POISON_CPL_ERR	= 0x80,   /* poison data */
+	SUB_SQ_ERR		= 0x100,  /* read sqe with CPL TLP */
+	DMA_CH_RESET	  = 0x200,  /* dma channel should reset */
+	LINK_DOWN_ERR	 = 0x400,  /* linkdown happen */
+	RECOVERY		  = 0x800   /* error status to be reset */
+};
+
+enum {
+	SDI_DMA_ADDR_SIZE_16K = 0,
+	SDI_DMA_ADDR_SIZE_32K = 1,
+	SDI_DMA_ADDR_SIZE_64K = 2,
+	SDI_DMA_ADDR_SIZE_128K = 3
+};
+
+union U_DMA_QUEUE_SQ_DEPTH {
+	struct {
+		unsigned int	dma_queue_sq_depth	: 16; /* [15..0] */
+		unsigned int	reserved_0			: 16; /* [31..16] */
+	} bits;
+
+	unsigned int	u32;
+};
+
+union U_DMA_QUEUE_CQ_DEPTH {
+	struct {
+		unsigned int	dma_queue_cq_depth	: 16; /* [15..0] */
+		unsigned int	reserved_0			: 16; /* [31..16] */
+	} bits;
+
+	unsigned int	u32;
+};
+
+union U_DMA_QUEUE_CQ_HEAD_PTR {
+	struct {
+		unsigned int	dma_queue_cq_head_ptr : 16; /* [15..0] */
+		unsigned int	reserved_0			: 16; /* [31..16] */
+	} bits;
+
+	unsigned int	u32;
+};
+
+union U_DMA_QUEUE_CQ_TAIL_PTR {
+	struct {
+		unsigned int	dma_queue_cq_tail_ptr : 16; /* [15..0]  */
+		unsigned int	dma_queue_sqhd		: 16; /* [31..16]  */
+	} bits;
+
+	unsigned int	u32;
+};
+
+union U_DMA_QUEUE_SQ_TAIL_PTR {
+	struct {
+		unsigned int	dma_queue_sq_tail_ptr : 16; /* [15..0] */
+		unsigned int	reserved_0			: 16; /* [31..16] */
+	} bits;
+
+	unsigned int	u32;
+};
+
+union U_DMA_QUEUE_CTRL0 {
+	struct {
+		unsigned int	dma_queue_en			: 1; /* [0] */
+		unsigned int	dma_queue_icg_en		: 1; /* [1] */
+		unsigned int	reserved				: 1; /* [2] */
+		unsigned int	dma_rst_without_cq_ack_enable : 1; /* [3] */
+		unsigned int	dma_queue_pause		 : 1; /* [4] */
+		unsigned int	reserved_1			  : 3; /* [7..5] */
+		unsigned int	dma_queue_arb_weight	: 8; /* [15..8] */
+		unsigned int	reserved_2			  : 3; /* [18...16] */
+		unsigned int	dma_queue_cq_mrg_en	 : 1; /* [19]  */
+		unsigned int	dma_queue_cq_mrg_time   : 2; /* [21..20] */
+		unsigned int	dma_queue_local_err_done_int_en	 : 1; /* [22] */
+		unsigned int	dma_queue_remote_err_done_int_en	: 1; /* [23] */
+		unsigned int	reserved_3				: 1; /* [24] */
+		unsigned int	dma_queue_cq_full_disable		   : 1; /* [25] */
+		unsigned int	dma_queue_cq_drct_sel			   : 1; /* [26] */
+		unsigned int	dma_queue_sq_drct_sel			   : 1; /* [27] */
+		unsigned int	dma_queue_sq_pa_lkp_err_abort_en	: 1; /* [28] */
+		unsigned int	dma_queue_sq_proc_err_abort_en	  : 1; /* [29] */
+		unsigned int	dma_queue_sq_drop_err_abort_en	  : 1; /* [30] */
+		unsigned int	dma_queue_sq_cfg_err_abort_en	   : 1; /* [31] */
+	} bits;
+
+	unsigned int	u32;
+};
+
+union U_DMA_QUEUE_CTRL1 {
+	struct {
+		unsigned int	dma_queue_reset	   : 1; /* [0] */
+		unsigned int	dma_queue_abort_exit  : 1; /* [1] */
+		unsigned int	dma_va_enable		 : 1; /* [2] */
+		unsigned int	reserved_0			: 1; /* [3] */
+		unsigned int	dma_queue_port_num	: 4; /* [7..4] */
+		unsigned int	dma_queue_remote_msi_x_mask : 1; /* [8] */
+		unsigned int	dma_va_enable_sq			: 1; /* [9] */
+		unsigned int	dma_va_enable_cq			: 1; /* [10] */
+		unsigned int	dma_queue_local_pfx_er	  : 1; /* [11] */
+		unsigned int	dma_queue_local_pfx_pmr	 : 1; /* [12] */
+		unsigned int	reserved_1				  : 3; /* [15...13] */
+		unsigned int	dma_queue_qos_en			: 1; /* [16] */
+		unsigned int	dma_queue_qos			   : 4; /* [20...17] */
+		unsigned int	dma_queue_mpam_id		   : 11; /* [31..21] */
+	} bits;
+
+	unsigned int	u32;
+};
+
+union U_DMA_QUEUE_FSM_STS {
+	struct {
+		unsigned int	dma_queue_sts		 : 4; /* [3..0] */
+		unsigned int	dma_queue_not_work	: 1; /* [4] */
+		unsigned int	dma_queue_wait_spd_data_sts : 1; /* [5] */
+		unsigned int	reserved_0			: 1; /* [6] */
+		unsigned int	reserved_1			: 1; /* [7] */
+		unsigned int	dma_queue_sub_fsm_sts : 3; /* [10..8] */
+		unsigned int	reserved_2			: 21; /* [31..11] */
+	} bits;
+
+	unsigned int	u32;
+};
+
+union U_DMA_QUEUE_INT_STS {
+	struct {
+		unsigned int	dma_queue_done_int_sts  : 1; /* [0] */
+		unsigned int	dma_queue_err00_int_sts : 1; /* [1] */
+		unsigned int	dma_queue_err01_int_sts : 1; /* [2] */
+		unsigned int	dma_queue_err02_int_sts : 1; /* [3] */
+		unsigned int	dma_queue_err03_int_sts : 1; /* [4] */
+		unsigned int	reserved				: 1; /* [5] */
+		unsigned int	dma_queue_err05_int_sts : 1; /* [6] */
+		unsigned int	dma_queue_err06_int_sts : 1; /* [7] */
+		unsigned int	dma_queue_err07_int_sts : 1; /* [8] */
+		unsigned int	dma_queue_err08_int_sts : 1; /* [9] */
+		unsigned int	dma_queue_err09_int_sts : 1; /* [10] */
+		unsigned int	dma_queue_err10_int_sts : 1; /* [11] */
+		unsigned int	dma_queue_err11_int_sts : 1; /* [12] */
+		unsigned int	dma_queue_err12_int_sts : 1; /* [13] */
+		unsigned int	dma_queue_err13_int_sts : 1; /* [14] */
+		unsigned int	dma_queue_err14_int_sts : 1; /* [15] */
+		unsigned int	dma_queue_err15_int_sts : 1; /* [16] */
+		unsigned int	dma_queue_err16_int_sts : 1; /* [17] */
+		unsigned int	dma_queue_err17_int_sts : 1; /* [18] */
+		unsigned int	reserved_0			 : 13; /* [31..19] */
+	} bits;
+
+	unsigned int	u32;
+};
+
+union U_DMA_QUEUE_INT_MSK {
+	struct {
+		unsigned int	dma_queue_done_int_msk  : 1; /* [0] */
+		unsigned int	dma_queue_err00_int_msk : 1; /* [1] */
+		unsigned int	dma_queue_err01_int_msk : 1; /* [2] */
+		unsigned int	dma_queue_err02_int_msk : 1; /* [3] */
+		unsigned int	dma_queue_err03_int_msk : 1; /* [4] */
+		unsigned int	reserved				: 1; /* [5] */
+		unsigned int	dma_queue_err05_int_msk : 1; /* [6] */
+		unsigned int	dma_queue_err06_int_msk : 1; /* [7] */
+		unsigned int	dma_queue_err07_int_msk : 1; /* [8] */
+		unsigned int	dma_queue_err08_int_msk : 1; /* [9] */
+		unsigned int	dma_queue_err09_int_msk : 1; /* [10] */
+		unsigned int	dma_queue_err10_int_msk : 1; /* [11] */
+		unsigned int	dma_queue_err11_int_msk : 1; /* [12] */
+		unsigned int	dma_queue_err12_int_msk : 1; /* [13] */
+		unsigned int	dma_queue_err13_int_msk : 1; /* [14] */
+		unsigned int	dma_queue_err14_int_msk : 1; /* [15] */
+		unsigned int	dma_queue_err15_int_msk : 1; /* [16] */
+		unsigned int	dma_queue_err16_int_msk : 1; /* [17] */
+		unsigned int	dma_queue_err17_int_msk : 1; /* [18] */
+		unsigned int	reserved_0			: 13 ; /* [31..19] */
+	} bits;
+
+	unsigned int	u32;
+};
+
+struct dma_ch_sq_s {
+	u32 opcode : 4; /* [0~3] opcode */
+	u32 drop : 1; /* [4] drop */
+	u32 nw : 1; /* [5] nw */
+	u32 wd_barrier : 1; /* [6] write done barrier */
+	u32 rd_barrier : 1; /* [7] read done barrier */
+	u32 ldie : 1; /* [8] LDIE */
+	u32 rdie : 1; /* [9] rDIE */
+	u32 loop_barrier : 1; /* [10] */
+	u32 spd_barrier : 1; /* [11] */
+	u32 attr : 3; /* [12~14] attr */
+	u32 cq_disable : 1; /* [15] reserved */
+	u32 addrt : 2; /* [16~17] at */
+	u32 p3p4 : 2; /* [18~19] P3 P4 */
+	u32 pf : 3; /* [20~22] pf */
+	u32 vfen : 1; /* [23] vfen */
+	u32 vf : 8; /* [24~31] vf */
+	u32 pasid : 20; /* [0~19] pasid */
+	u32 er : 1; /* [20] er */
+	u32 pmr : 1; /* [21] pmr */
+	u32 prfen : 1; /* [22] prfen */
+	u32 reserved5 : 1; /* [23] reserved */
+	u32 msi : 8; /* [24~31] MSI/MSI-X vector */
+	u32 flow_id : 8; /* [0~7] Flow ID */
+	u32 reserved6 : 8; /* [8~15] reserved */
+	u32 TH : 1; /* [16] TH */
+	u32 PH : 2; /* [17~18] PH */
+	u32 reserved7 : 13; /* [19~31] reserved: some multiplex fields */
+	u32 length;
+	u32 src_addr_l;
+	u32 src_addr_h;
+	u32 dst_addr_l;
+	u32 dst_addr_h;
+};
+
+struct dma_ch_cq_s {
+	u32 reserved1;
+	u32 reserved2;
+	u32 sqhd : 16;
+	u32 reserved3 : 16;
+	u32 reserved4 : 16; /* [0~15] reserved */
+	u32 vld : 1; /* [16] vld */
+	u32 status : 15; /* [17~31] status */
+};
+
+#endif /* EDMA_DRV_H */
diff --git a/drivers/net/ethernet/huawei/bma/edma_drv/edma_host.c b/drivers/net/ethernet/huawei/bma/edma_drv/edma_host.c
index be2f732ed9ed..1bfb123e43c0 100644
--- a/drivers/net/ethernet/huawei/bma/edma_drv/edma_host.c
+++ b/drivers/net/ethernet/huawei/bma/edma_drv/edma_host.c
@@ -20,11 +20,18 @@
 #include <linux/seq_file.h>
 
 #include "bma_pci.h"
+#include "edma_queue.h"
 #include "edma_host.h"
 
 static struct edma_user_inft_s *g_user_func[TYPE_MAX] = { 0 };
 
 static struct bma_dev_s *g_bma_dev;
+
+struct bma_dev_s *get_bma_dev(void)
+{
+	return g_bma_dev;
+}
+
 static int edma_host_dma_interrupt(struct edma_host_s *edma_host);
 
 int edmainfo_show(char *buf)
@@ -231,7 +238,8 @@ void clear_int_dmab2h(struct edma_host_s *edma_host)
 	(void)pci_write_config_dword(pdev, REG_PCIE1_DMAWRITEINT_CLEAR, data);
 }
 
-int edma_host_check_dma_status(enum dma_direction_e dir)
+// for 1710 1711
+int edma_host_check_dma_status_v1(enum dma_direction_e dir)
 {
 	int ret = 0;
 
@@ -259,6 +267,18 @@ int edma_host_check_dma_status(enum dma_direction_e dir)
 	return ret;
 }
 
+// for 1712
+int edma_host_check_dma_status_v2(enum dma_direction_e dir)
+{
+	UNUSED(dir);
+	if (check_dma_queue_state(CPL_STATE, TRUE) == 0 ||
+	    check_dma_queue_state(IDLE_STATE, TRUE) == 0) {
+		return 1; /* ok */
+	}
+
+	return 0; /* busy */
+}
+
 #ifdef USE_DMA
 
 static int start_transfer_h2b(struct edma_host_s *edma_host, unsigned int len,
@@ -633,9 +653,9 @@ void host_dma_transfer_withlist(struct edma_host_s *edma_host,
 	}
 }
 
-int edma_host_dma_transfer(struct edma_host_s *edma_host,
-			   struct bma_priv_data_s *priv,
-			   struct bma_dma_transfer_s *dma_transfer)
+// for 1710 1711
+int edma_host_dma_transfer_v1(struct edma_host_s *edma_host, struct bma_priv_data_s *priv,
+			      struct bma_dma_transfer_s *dma_transfer)
 {
 	int ret = 0;
 	unsigned long flags = 0;
@@ -673,7 +693,44 @@ int edma_host_dma_transfer(struct edma_host_s *edma_host,
 	return ret;
 }
 
-void edma_host_reset_dma(struct edma_host_s *edma_host, int dir)
+// for 1712
+int edma_host_dma_transfer_v2(struct edma_host_s *edma_host, struct bma_priv_data_s *priv,
+			      struct bma_dma_transfer_s *dma_transfer)
+{
+	int ret = 0;
+	unsigned long flags = 0;
+	struct bma_dev_s *bma_dev = NULL;
+
+	BMA_LOG(DLOG_DEBUG, "edma_host_dma_transfer 1712");
+
+	if (!edma_host || !priv || !dma_transfer)
+		return -EFAULT;
+
+	bma_dev = list_entry(edma_host, struct bma_dev_s, edma_host);
+
+	spin_lock_irqsave(&bma_dev->priv_list_lock, flags);
+
+	if (priv->user.dma_transfer == 0) {
+		spin_unlock_irqrestore(&bma_dev->priv_list_lock, flags);
+		BMA_LOG(DLOG_ERROR, "dma_transfer = %hhd\n", priv->user.dma_transfer);
+		return -EFAULT;
+	}
+
+	BMA_LOG(DLOG_DEBUG, "transfer_edma_host 1712");
+
+	spin_unlock_irqrestore(&bma_dev->priv_list_lock, flags);
+
+	edma_host->statistics.dma_count++;
+
+	spin_lock_irqsave(&edma_host->reg_lock, flags);
+	ret = transfer_dma_queue(dma_transfer);
+	spin_unlock_irqrestore(&edma_host->reg_lock, flags);
+
+	return ret;
+}
+
+// for 1710/1711
+void edma_host_reset_dma_v1(struct edma_host_s *edma_host, enum dma_direction_e dir)
 {
 	u32 data = 0;
 	u32 reg_addr = 0;
@@ -717,6 +774,13 @@ void edma_host_reset_dma(struct edma_host_s *edma_host, int dir)
 		reg_addr, count, data);
 }
 
+// for 1712
+void edma_host_reset_dma_v2(struct edma_host_s *edma_host, enum dma_direction_e dir)
+{
+	UNUSED(dir);
+	reset_edma_host(edma_host);
+}
+
 int edma_host_dma_stop(struct edma_host_s *edma_host,
 		       struct bma_priv_data_s *priv)
 {
@@ -750,8 +814,8 @@ static int edma_host_send_msg(struct edma_host_s *edma_host)
 	if (send_mbx_hdr->mbxlen > 0) {
 		if (send_mbx_hdr->mbxlen > HOST_MAX_SEND_MBX_LEN) {
 			/*share memory is disable */
+			BMA_LOG(DLOG_DEBUG, "mbxlen is too long: %d\n", send_mbx_hdr->mbxlen);
 			send_mbx_hdr->mbxlen = 0;
-			BMA_LOG(DLOG_DEBUG, "mbxlen is too long\n");
 			return -EFAULT;
 		}
 
@@ -1296,6 +1360,69 @@ int edma_host_user_unregister(u32 type)
 	return 0;
 }
 
+static void init_edma_sq_cq(struct edma_host_s *edma_host)
+{
+	u64 sq_phy_addr = 0;
+	u64 cq_phy_addr = 0;
+	phys_addr_t edma_address = 0;
+	int ret = 0;
+
+	if (get_pci_type() != PCI_TYPE_1712)
+		return;
+
+	ret = bma_intf_get_map_address(TYPE_EDMA_ADDR, &edma_address);
+	if (ret != 0)
+		return;
+
+	edma_host->edma_sq_addr = (void *)((unsigned char *)edma_host->edma_recv_addr
+				  + HOST_MAX_RCV_MBX_LEN);
+	edma_host->edma_cq_addr = (void *)((unsigned char *)edma_host->edma_sq_addr
+				  + sizeof(struct dma_ch_sq_s) * SQ_DEPTH);
+	sq_phy_addr = edma_address + HOST_DMA_FLAG_LEN + HOST_MAX_SEND_MBX_LEN
+		      + HOST_MAX_RCV_MBX_LEN;
+	cq_phy_addr = sq_phy_addr + sizeof(struct dma_ch_sq_s) * SQ_DEPTH;
+
+	BMA_LOG(DLOG_DEBUG,
+		"sq_phy_addr = 0x%llx, SQ size = %zu, cq_phy_addr = 0x%llx, CQ size = %zu",
+		sq_phy_addr, sizeof(struct dma_ch_sq_s) * SQ_DEPTH,
+		cq_phy_addr, sizeof(struct dma_ch_cq_s) * CQ_DEPTH);
+	BMA_LOG(DLOG_DEBUG, "sq_addr = %pK, cq_addr = %pK", edma_host->edma_sq_addr,
+		edma_host->edma_cq_addr);
+
+	(void)memset(edma_host->edma_sq_addr, 0,
+		     sizeof(struct dma_ch_sq_s) * SQ_DEPTH + sizeof(struct dma_ch_cq_s) * CQ_DEPTH);
+
+	set_dma_queue_sq_base_l(sq_phy_addr & PCIE_ADDR_L_32_MASK);
+	set_dma_queue_sq_base_h((u32)(sq_phy_addr >> PCIE_ADDR_H_SHIFT_32));
+	set_dma_queue_cq_base_l(cq_phy_addr & PCIE_ADDR_L_32_MASK);
+	set_dma_queue_cq_base_h((u32)(cq_phy_addr >> PCIE_ADDR_H_SHIFT_32));
+
+	reset_edma_host(edma_host);
+}
+
+static void edma_setup_timer(struct edma_host_s *edma_host)
+{
+#ifdef HAVE_TIMER_SETUP
+	timer_setup(&edma_host->timer, edma_host_timeout, 0);
+#else
+	setup_timer(&edma_host->timer, edma_host_timeout,
+		    (unsigned long)edma_host);
+#endif
+	(void)mod_timer(&edma_host->timer, jiffies_64 + TIMER_INTERVAL_CHECK);
+
+#ifdef USE_DMA
+	#ifdef HAVE_TIMER_SETUP
+		timer_setup(&edma_host->dma_timer, edma_host_dma_timeout, 0);
+
+	#else
+		setup_timer(&edma_host->dma_timer, edma_host_dma_timeout,
+			    (unsigned long)edma_host);
+	#endif
+	(void)mod_timer(&edma_host->dma_timer,
+			jiffies_64 + DMA_TIMER_INTERVAL_CHECK);
+#endif
+}
+
 int edma_host_init(struct edma_host_s *edma_host)
 {
 	int ret = 0;
@@ -1352,24 +1479,7 @@ int edma_host_init(struct edma_host_s *edma_host)
 	edma_host->b2h_state = B2HSTATE_IDLE;
 
 #ifdef EDMA_TIMER
-	#ifdef HAVE_TIMER_SETUP
-		timer_setup(&edma_host->timer, edma_host_timeout, 0);
-	#else
-		setup_timer(&edma_host->timer, edma_host_timeout,
-			    (unsigned long)edma_host);
-	#endif
-	(void)mod_timer(&edma_host->timer, jiffies_64 + TIMER_INTERVAL_CHECK);
-#ifdef USE_DMA
-	#ifdef HAVE_TIMER_SETUP
-		timer_setup(&edma_host->dma_timer, edma_host_dma_timeout, 0);
-
-	#else
-		setup_timer(&edma_host->dma_timer, edma_host_dma_timeout,
-			    (unsigned long)edma_host);
-	#endif
-	(void)mod_timer(&edma_host->dma_timer,
-			jiffies_64 + DMA_TIMER_INTERVAL_CHECK);
-#endif
+	edma_setup_timer(edma_host);
 
 #else
 	init_completion(&edma_host->msg_ready);
@@ -1383,6 +1493,8 @@ int edma_host_init(struct edma_host_s *edma_host)
 	}
 #endif
 
+	init_edma_sq_cq(edma_host);
+
 	#ifdef HAVE_TIMER_SETUP
 		timer_setup(&edma_host->heartbeat_timer,
 			    edma_host_heartbeat_timer, 0);
diff --git a/drivers/net/ethernet/huawei/bma/edma_drv/edma_host.h b/drivers/net/ethernet/huawei/bma/edma_drv/edma_host.h
index cbbd86fd6602..93c81bc92286 100644
--- a/drivers/net/ethernet/huawei/bma/edma_drv/edma_host.h
+++ b/drivers/net/ethernet/huawei/bma/edma_drv/edma_host.h
@@ -18,6 +18,8 @@
 
 #include "bma_include.h"
 #include "../include/bma_ker_intf.h"
+#include "edma_reg.h"
+#include "edma_drv.h"
 
 #define EDMA_TIMER
 
@@ -176,6 +178,13 @@
 #define U64ADDR_H(addr)			((((u64)addr) >> 32) & 0xffffffff)
 #define U64ADDR_L(addr)			((addr) & 0xffffffff)
 
+#define MAX_RESET_DMA_TIMES 10
+#define DELAY_BETWEEN_RESET_DMA 100
+#define PCI_VENDOR_ID_HUAWEI_PME 0x19e5
+#define PCI_DEVICE_ID_EDMA_0 0x1712
+#define SQ_DEPTH 128
+#define CQ_DEPTH 128
+
 struct bma_register_dev_type_s {
 	u32 type;
 	u32 sub_type;
@@ -263,6 +272,8 @@ struct edma_host_s {
 	void __iomem *edma_flag;
 	void __iomem *edma_send_addr;
 	void __iomem *edma_recv_addr;
+	void __iomem *edma_sq_addr;
+	void __iomem *edma_cq_addr;
 #ifdef USE_DMA
 	struct timer_list dma_timer;
 #endif
@@ -309,6 +320,8 @@ struct edma_user_inft_s {
 	int (*add_msg)(void *msg, size_t msg_len);
 };
 
+struct bma_dev_s *get_bma_dev(void);
+
 int is_edma_b2h_int(struct edma_host_s *edma_host);
 void edma_int_to_bmc(struct edma_host_s *edma_host);
 int edma_host_mmap(struct edma_host_s *edma_hos, struct file *filp,
@@ -336,7 +349,6 @@ int edma_host_user_unregister(u32 type);
 int edma_host_init(struct edma_host_s *edma_host);
 void edma_host_cleanup(struct edma_host_s *edma_host);
 int edma_host_send_driver_msg(const void *msg, size_t msg_len, int subtype);
-void edma_host_reset_dma(struct edma_host_s *edma_host, int dir);
 void clear_int_dmah2b(struct edma_host_s *edma_host);
 void clear_int_dmab2h(struct edma_host_s *edma_host);
 
diff --git a/drivers/net/ethernet/huawei/bma/edma_drv/edma_queue.c b/drivers/net/ethernet/huawei/bma/edma_drv/edma_queue.c
new file mode 100644
index 000000000000..678262f7412c
--- /dev/null
+++ b/drivers/net/ethernet/huawei/bma/edma_drv/edma_queue.c
@@ -0,0 +1,470 @@
+// SPDX-License-Identifier: GPL-2.0
+/* Huawei iBMA driver.
+ * Copyright (c) 2025, Huawei Technologies Co., Ltd.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version 2
+ * of the License, or (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+#include <linux/errno.h>
+#include <linux/kthread.h>
+#include <linux/mm.h>
+#include <linux/proc_fs.h>
+#include <linux/seq_file.h>
+
+#include "bma_pci.h"
+#include "edma_host.h"
+#include "edma_queue.h"
+
+static u32 pcie_dma_read(u32 offset)
+{
+	u32 reg_val;
+
+	reg_val = readl(get_bma_dev()->bma_pci_dev->bma_base_addr + offset);
+	BMA_LOG(DLOG_DEBUG, "readl, offset 0x%x val 0x%x\n", offset, reg_val);
+	return reg_val;
+}
+
+static void pcie_dma_write(u32 offset, u32 reg_val)
+{
+	u32 read_val;
+
+	(void)writel(reg_val, get_bma_dev()->bma_pci_dev->bma_base_addr + offset);
+	read_val = readl(get_bma_dev()->bma_pci_dev->bma_base_addr + offset);
+	if (read_val != reg_val) {
+		BMA_LOG(DLOG_DEBUG,
+			"writel fail, read_value: 0x%x, set_value: 0x%x, offset: 0x%x\n",
+			read_val, reg_val, offset);
+		return;
+	}
+	BMA_LOG(DLOG_DEBUG, "writel, offset 0x%x val 0x%x\n", offset, reg_val);
+}
+
+static void set_dma_queue_int_msk(u32 val)
+{
+	(void)pcie_dma_write(PCIE_DMA_QUEUE_INT_MSK_0_REG, val);
+}
+
+static void set_dma_queue_err_int_msk(u32 val)
+{
+	union U_DMA_QUEUE_INT_MSK reg_val;
+
+	// The least significant bit (bit 0) of this register is reserved and must be cleared,
+	// while the remaining bits should retain their original values.
+	reg_val.u32 = val & 0xFFFFFFFE;
+	(void)pcie_dma_write(PCIE_DMA_QUEUE_ERR_INT_MSK_0_REG, reg_val.u32);
+}
+
+static void set_dma_queue_int_sts(u32 val)
+{
+	union U_DMA_QUEUE_INT_STS reg_val;
+
+	reg_val.u32 = val;
+	(void)pcie_dma_write(PCIE_DMA_QUEUE_INT_STS_0_REG, reg_val.u32);
+}
+
+static void get_dma_queue_int_sts(u32 *val)
+{
+	union U_DMA_QUEUE_INT_STS reg_val;
+
+	reg_val.u32 = pcie_dma_read(PCIE_DMA_QUEUE_INT_STS_0_REG);
+	*val = reg_val.u32;
+}
+
+static void get_dma_queue_fsm_sts(u32 *val)
+{
+	union U_DMA_QUEUE_FSM_STS reg_val;
+
+	reg_val.u32 = pcie_dma_read(PCIE_DMA_QUEUE_FSM_STS_0_REG);
+	*val = reg_val.bits.dma_queue_sts;
+}
+
+static void pause_dma_queue(u32 val)
+{
+	union U_DMA_QUEUE_CTRL0 reg_val;
+
+	reg_val.u32 = pcie_dma_read(PCIE_DMA_QUEUE_CTRL0_0_REG);
+	reg_val.bits.dma_queue_pause = val;
+	(void)pcie_dma_write(PCIE_DMA_QUEUE_CTRL0_0_REG, reg_val.u32);
+}
+
+static void enable_dma_queue(u32 val)
+{
+	union U_DMA_QUEUE_CTRL0 reg_val;
+
+	reg_val.u32 = pcie_dma_read(PCIE_DMA_QUEUE_CTRL0_0_REG);
+	reg_val.bits.dma_queue_en = val;
+	(void)pcie_dma_write(PCIE_DMA_QUEUE_CTRL0_0_REG, reg_val.u32);
+}
+
+static void reset_dma_queue(u32 val)
+{
+	union U_DMA_QUEUE_CTRL1 reg_val;
+
+	reg_val.u32 = pcie_dma_read(PCIE_DMA_QUEUE_CTRL1_0_REG);
+	reg_val.bits.dma_queue_reset = val;
+	(void)pcie_dma_write(PCIE_DMA_QUEUE_CTRL1_0_REG, reg_val.u32);
+}
+
+static void set_dma_queue_sq_tail(u32 val)
+{
+	union U_DMA_QUEUE_SQ_TAIL_PTR reg_val;
+
+	reg_val.bits.dma_queue_sq_tail_ptr = val;
+	(void)pcie_dma_write(PCIE_DMA_QUEUE_SQ_TAIL_PTR_0_REG, reg_val.u32);
+}
+
+static void set_dma_queue_cq_head(u32 val)
+{
+	union U_DMA_QUEUE_CQ_HEAD_PTR reg_val;
+
+	reg_val.bits.dma_queue_cq_head_ptr = val;
+	(void)pcie_dma_write(PCIE_DMA_QUEUE_CQ_HEAD_PTR_0_REG, reg_val.u32);
+}
+
+void set_dma_queue_sq_base_l(u32 val)
+{
+	(void)pcie_dma_write(PCIE_DMA_QUEUE_SQ_BASE_L_0_REG, val);
+}
+
+void set_dma_queue_sq_base_h(u32 val)
+{
+	(void)pcie_dma_write(PCIE_DMA_QUEUE_SQ_BASE_H_0_REG, val);
+}
+
+void set_dma_queue_cq_base_l(u32 val)
+{
+	(void)pcie_dma_write(PCIE_DMA_QUEUE_CQ_BASE_L_0_REG, val);
+}
+
+void set_dma_queue_cq_base_h(u32 val)
+{
+	(void)pcie_dma_write(PCIE_DMA_QUEUE_CQ_BASE_H_0_REG, val);
+}
+
+static void set_dma_queue_sq_depth(u32 val)
+{
+	union U_DMA_QUEUE_SQ_DEPTH reg_val;
+
+	reg_val.u32 = pcie_dma_read(PCIE_DMA_QUEUE_SQ_DEPTH_0_REG);
+	reg_val.bits.dma_queue_sq_depth = val;
+	(void)pcie_dma_write(PCIE_DMA_QUEUE_SQ_DEPTH_0_REG, reg_val.u32);
+}
+
+static void set_dma_queue_cq_depth(u32 val)
+{
+	union U_DMA_QUEUE_CQ_DEPTH reg_val;
+
+	reg_val.u32 = pcie_dma_read(PCIE_DMA_QUEUE_CQ_DEPTH_0_REG);
+	reg_val.bits.dma_queue_cq_depth = val;
+	(void)pcie_dma_write(PCIE_DMA_QUEUE_CQ_DEPTH_0_REG, reg_val.u32);
+}
+
+static void set_dma_queue_arb_weight(u32 val)
+{
+	union U_DMA_QUEUE_CTRL0 reg_val;
+
+	reg_val.u32 = pcie_dma_read(PCIE_DMA_QUEUE_CTRL0_0_REG);
+	reg_val.bits.dma_queue_arb_weight = val;
+	(void)pcie_dma_write(PCIE_DMA_QUEUE_CTRL0_0_REG, reg_val.u32);
+}
+
+static void set_dma_queue_drct_sel(u32 val)
+{
+	union U_DMA_QUEUE_CTRL0 reg_val;
+
+	reg_val.u32 = pcie_dma_read(PCIE_DMA_QUEUE_CTRL0_0_REG);
+	reg_val.bits.dma_queue_cq_drct_sel = val;
+	reg_val.bits.dma_queue_sq_drct_sel = val;
+	(void)pcie_dma_write(PCIE_DMA_QUEUE_CTRL0_0_REG, reg_val.u32);
+}
+
+static void get_dma_queue_sq_tail(u32 *val)
+{
+	union U_DMA_QUEUE_SQ_TAIL_PTR reg_val;
+
+	reg_val.u32 = pcie_dma_read(PCIE_DMA_QUEUE_SQ_TAIL_PTR_0_REG);
+	*val = reg_val.bits.dma_queue_sq_tail_ptr;
+}
+
+static void get_dma_queue_cq_tail(u32 *val)
+{
+	union U_DMA_QUEUE_CQ_TAIL_PTR reg_val;
+
+	reg_val.u32 = pcie_dma_read(PCIE_DMA_QUEUE_CQ_TAIL_PTR_0_REG);
+	*val = reg_val.bits.dma_queue_cq_tail_ptr;
+}
+
+static void get_dma_queue_sq_head(u32 *val)
+{
+	u32 reg_val;
+
+	reg_val = pcie_dma_read(PCIE_DMA_QUEUE_SQ_STS_0_REG);
+	/* dma_queue_sq_head_ptr bit[15:0] */
+	*val = reg_val & 0xFFFF;
+}
+
+static void set_dma_queue_err_abort(u32 val)
+{
+	union U_DMA_QUEUE_CTRL0 reg_val;
+
+	reg_val.u32 = pcie_dma_read(PCIE_DMA_QUEUE_CTRL0_0_REG);
+	reg_val.bits.dma_queue_sq_pa_lkp_err_abort_en = val;
+	reg_val.bits.dma_queue_sq_proc_err_abort_en = val;
+	reg_val.bits.dma_queue_sq_drop_err_abort_en = val;
+	reg_val.bits.dma_queue_sq_cfg_err_abort_en = val;
+	(void)pcie_dma_write(PCIE_DMA_QUEUE_CTRL0_0_REG, reg_val.u32);
+}
+
+static void set_dma_queue_flr_disable(u32 val)
+{
+	(void)pcie_dma_write(PCIE_DMA_FLR_DISABLE_REG, val);
+}
+
+static void clear_dma_queue_int_chk(u32 mask)
+{
+	u32 int_sts;
+
+	(void)get_dma_queue_int_sts(&int_sts);
+	if (int_sts & mask)
+		(void)set_dma_queue_int_sts(mask);
+}
+
+s32 check_dma_queue_state(u32 state, u32 flag)
+{
+	u32 dma_state = 0;
+	unsigned long timeout;
+
+	BMA_LOG(DLOG_DEBUG, "state:%u, flag:%u\n", state, flag);
+
+	timeout = jiffies + TIMER_INTERVAL_CHECK;
+
+	while (1) {
+		get_dma_queue_fsm_sts(&dma_state);
+		BMA_LOG(DLOG_DEBUG, "DMA stats[%u]\n", dma_state);
+		// Flag is 0 and state does not equal to target value
+		// OR Flag is 1 and state is equal to target value
+		if ((!flag && dma_state != state) || (flag && dma_state == state))
+			break;
+
+		if (time_after(jiffies, timeout)) {
+			BMA_LOG(DLOG_DEBUG, "Wait stats[%u] fail\n", state);
+			return -ETIMEDOUT;
+		}
+		udelay(1);
+	}
+	return 0;
+}
+
+static s32 reset_dma(void)
+{
+	u32 dma_state = 0;
+
+	/* get dma channel fsm */
+	check_dma_queue_state(WAIT_STATE, FALSE);
+	get_dma_queue_fsm_sts(&dma_state);
+	BMA_LOG(DLOG_DEBUG, "dma_state:%u\n", dma_state);
+	switch (dma_state) {
+	/* idle status, dma channel need no reset */
+	case IDLE_STATE:
+		return 0;
+	case RUN_STATE:
+		pause_dma_queue(ENABLE);
+		fallthrough;
+	case ABORT_STATE:
+	case CPL_STATE:
+		enable_dma_queue(DISABLE);
+		if (check_dma_queue_state(RUN_STATE, FALSE))
+			return -ETIMEDOUT;
+		fallthrough;
+	case PAUSE_STATE:
+	case HALT_STATE:
+		set_dma_queue_sq_tail(0);
+		set_dma_queue_cq_head(0);
+		reset_dma_queue(ENABLE);
+		pause_dma_queue(DISABLE);
+		if (check_dma_queue_state(IDLE_STATE, TRUE))
+			return -ETIMEDOUT;
+		fallthrough;
+	default:
+		return -EINVAL;
+	}
+
+	return 0;
+}
+
+static void init_dma(void)
+{
+	/* set dma channel sq tail */
+	set_dma_queue_sq_tail(0);
+	/* set dma channel cq head */
+	set_dma_queue_cq_head(0);
+	/* set dma queue drct sel */
+	set_dma_queue_drct_sel(DRC_LOCAL);
+	/* set dma channel sq depth */
+	set_dma_queue_sq_depth(SQ_DEPTH - 1);
+	/* set dma channel cq depth */
+	set_dma_queue_cq_depth(CQ_DEPTH - 1);
+	/* dma not process FLR , only cpu process FLR */
+	set_dma_queue_flr_disable(0x1);
+	/* set dma queue arb weight */
+	set_dma_queue_arb_weight(0x1F);
+	/* clear dma queue int status */
+	set_dma_queue_int_sts(0x1FFF);
+	/* set dma queue int mask */
+	set_dma_queue_err_int_msk(0x0);
+	set_dma_queue_int_msk(0x0);
+	/* set dma queue abort err en */
+	set_dma_queue_err_abort(ENABLE);
+	/* enable dma channel en */
+	enable_dma_queue(ENABLE);
+}
+
+s32 wait_done_dma_queue(unsigned long timeout)
+{
+	struct dma_ch_cq_s *p_cur_last_cq;
+	struct dma_ch_cq_s *p_dma_cq;
+	unsigned long end;
+	u32 sq_tail;
+	u32 sq_valid;
+	u32 cq_tail;
+	u32 cq_valid;
+
+	p_dma_cq = (struct dma_ch_cq_s *)((&get_bma_dev()->edma_host)->edma_cq_addr);
+	end = jiffies + timeout;
+
+	while (time_before(jiffies, end)) {
+		(void)get_dma_queue_sq_tail(&sq_tail);
+		(void)get_dma_queue_cq_tail(&cq_tail);
+
+		cq_valid = (cq_tail + CQ_DEPTH - 1) % (CQ_DEPTH);
+		p_cur_last_cq = &p_dma_cq[cq_valid];
+		sq_valid = (sq_tail + SQ_DEPTH - 1) % (SQ_DEPTH);
+		BMA_LOG(DLOG_DEBUG,
+			"sq_tail %d, cq_tail %d, cq_valid %d, sq_valid %d, p_cur_last_cq->sqhd %d\n",
+			sq_tail, cq_tail, cq_valid, sq_valid, p_cur_last_cq->sqhd);
+		if (p_cur_last_cq->sqhd == sq_valid) {
+			set_dma_queue_cq_head(cq_valid);
+			return 0;
+		}
+	}
+
+	return -ETIMEDOUT;
+}
+
+static s32 submit_dma_queue_sq(u32 dir, struct bspveth_dmal pdmalbase_v, u32 pf)
+{
+	u32 sq_tail;
+	u32 sq_head;
+	u32 sq_availble;
+	struct dma_ch_sq_s sq_submit;
+	struct dma_ch_sq_s *p_dma_sq;
+
+	p_dma_sq = (struct dma_ch_sq_s *)((&get_bma_dev()->edma_host)->edma_sq_addr);
+	(void)get_dma_queue_sq_tail(&sq_tail);
+	(void)get_dma_queue_sq_head(&sq_head);
+	sq_availble = SQ_DEPTH - 1 - (((sq_tail - sq_head) + SQ_DEPTH) % SQ_DEPTH);
+	if (sq_availble < 1) {
+		BMA_LOG(DLOG_DEBUG, "cannot process %u descriptors, try again later\n", 1);
+		return -1;
+	}
+
+	BMA_LOG(DLOG_DEBUG, "submit dma queue sq, sq_tail get %d, sq_head %d, sq_availble %d\n",
+		sq_tail, sq_head, sq_availble);
+
+	(void)memset(&sq_submit, 0, sizeof(sq_submit));
+	if (dir == DIR_H2B)
+		sq_submit.opcode = DMA_READ;
+	else
+		sq_submit.opcode = DMA_WRITE;
+
+	BMA_LOG(DLOG_DEBUG, "PF: %u\n", pf);
+	sq_submit.ldie = ENABLE;
+	sq_submit.rdie = ENABLE;
+	sq_submit.attr &= (~0x2); /* SO(Strong Ordering) */
+	sq_submit.pf = pf & 0x7;  /* 0x7 */
+	sq_submit.p3p4 = (pf >> 3) & 0x3; /* 0x3 */
+	sq_submit.length = pdmalbase_v.len;
+	sq_submit.src_addr_l = pdmalbase_v.slow;
+	sq_submit.src_addr_h = pdmalbase_v.shi;
+	sq_submit.dst_addr_l = pdmalbase_v.dlow;
+	sq_submit.dst_addr_h = pdmalbase_v.dhi;
+
+	BMA_LOG(DLOG_DEBUG, "submit dma queue sq, dir %d, op %d, length %d\n", dir,
+		sq_submit.opcode, sq_submit.length);
+
+	memcpy(p_dma_sq + sq_tail, &sq_submit, sizeof(sq_submit));
+	sq_tail = (sq_tail + 1) % SQ_DEPTH;
+
+	BMA_LOG(DLOG_DEBUG, "submit dma queue sq, sq_tail change %d,\n", sq_tail);
+	wmb(); /* memory barriers. */
+
+	(void)set_dma_queue_sq_tail(sq_tail);
+
+	return 0;
+}
+
+s32 transfer_dma_queue(struct bma_dma_transfer_s *dma_transfer)
+{
+	struct bspveth_dmal *pdmalbase_v;
+	u32 dmal_cnt;
+	s32 ret;
+	int i;
+
+	if (!dma_transfer) {
+		BMA_LOG(DLOG_DEBUG, "dma_transfer is NULL.\n");
+		return -EFAULT;
+	}
+
+	BMA_LOG(DLOG_DEBUG, "transfer dma queue.\n");
+
+	/* clear local done int */
+	clear_dma_queue_int_chk(DMA_DONE_MASK);
+
+	pdmalbase_v = dma_transfer->pdmalbase_v;
+	dmal_cnt = dma_transfer->dmal_cnt;
+	for (i = 0; i < dmal_cnt; i++)
+		submit_dma_queue_sq(dma_transfer->dir, pdmalbase_v[i],
+				    get_bma_dev()->bma_pci_dev->pdev->devfn);
+
+	(void)set_dma_queue_int_msk(DMA_DONE_UNMASK);
+	(void)set_dma_queue_err_int_msk(DMA_ERR_UNMASK);
+	(void)enable_dma_queue(ENABLE);
+
+	ret = wait_done_dma_queue(DMA_TMOUT);
+	if (ret)
+		BMA_LOG(DLOG_DEBUG, "EP DMA: dma wait timeout");
+
+	return ret;
+}
+
+void reset_edma_host(struct edma_host_s *edma_host)
+{
+	unsigned long flags = 0;
+	int count = 0;
+
+	if (!edma_host)
+		return;
+
+	spin_lock_irqsave(&edma_host->reg_lock, flags);
+
+	while (count++ < MAX_RESET_DMA_TIMES) {
+		if (reset_dma() == 0) {
+			BMA_LOG(DLOG_DEBUG, "reset dma successfully\n");
+			init_dma();
+			break;
+		}
+
+		mdelay(DELAY_BETWEEN_RESET_DMA);
+	}
+
+	spin_unlock_irqrestore(&edma_host->reg_lock, flags);
+	BMA_LOG(DLOG_DEBUG, "reset dma count=%d\n", count);
+}
diff --git a/drivers/net/ethernet/huawei/bma/edma_drv/edma_queue.h b/drivers/net/ethernet/huawei/bma/edma_drv/edma_queue.h
new file mode 100644
index 000000000000..0cf449c0ae00
--- /dev/null
+++ b/drivers/net/ethernet/huawei/bma/edma_drv/edma_queue.h
@@ -0,0 +1,29 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/* Huawei iBMA driver.
+ * Copyright (c) 2025, Huawei Technologies Co., Ltd.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version 2
+ * of the License, or (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+#ifndef EDMA_QUEUE_H
+#define EDMA_QUEUE_H
+#include "edma_host.h"
+
+s32 check_dma_queue_state(u32 state, u32 flag);
+void set_dma_queue_sq_base_l(u32 val);
+void set_dma_queue_sq_base_h(u32 val);
+void set_dma_queue_cq_base_l(u32 val);
+void set_dma_queue_cq_base_h(u32 val);
+void reset_edma_host(struct edma_host_s *edma_host);
+int transfer_edma_host(struct edma_host_s *host, struct bma_priv_data_s *priv,
+		       struct bma_dma_transfer_s *transfer);
+s32 transfer_dma_queue(struct bma_dma_transfer_s *dma_transfer);
+#endif
diff --git a/drivers/net/ethernet/huawei/bma/edma_drv/edma_reg.h b/drivers/net/ethernet/huawei/bma/edma_drv/edma_reg.h
new file mode 100644
index 000000000000..c4e056a92bc8
--- /dev/null
+++ b/drivers/net/ethernet/huawei/bma/edma_drv/edma_reg.h
@@ -0,0 +1,127 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/* Huawei iBMA driver.
+ * Copyright (c) 2025, Huawei Technologies Co., Ltd.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version 2
+ * of the License, or (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+#ifndef EDMA_REG_H
+#define EDMA_REG_H
+
+#define PORT_EP 0
+#define PORT_RP 1
+
+#define ENABLE 1
+#define DISABLE 0
+
+#define TRUE 1
+#define FALSE 0
+
+/* core0:x2/x1 core1:x1 */
+#define PCIE_CORE_NUM 2
+#define PCIE_REG_OFFSET 0x100000U
+#define PCIE_REG_SIZE   0x100000
+
+#define GEN1 0x1
+#define GEN2 0x2
+#define GEN3 0x3
+#define GEN4 0x4
+
+#define PCIE_ADDR_H_SHIFT_32 32
+#define PCIE_ADDR_L_32_MASK  0xFFFFFFFF
+
+#define AP_DMA_BIT            BIT(5)
+#define AP_MASK_ALL           0x3FF
+#define AP_DMA_CHAN_REG_SIZE  0x100
+
+/********************************************************************************************/
+/*                  PCIE reg base                                                           */
+/********************************************************************************************/
+#define PCIE_BASE_ADDR                       0x1E100000U
+#define AP_DMA_REG                           0x10000U
+#define AP_IOB_TX_REG_BASE                   0x0U
+#define AP_IOB_RX_REG_BASE                   0x4000U
+#define AP_GLOBAL_REG_BASE                   0x8000U
+
+/********************************************************************************************/
+/*                   PCIE AP DMA REG                                                   */
+/********************************************************************************************/
+#define PCIE_DMA_EP_INT_MSK_REG             0x24   /* DMA_EP_INT_MSK */
+#define PCIE_DMA_EP_INT_REG                 0x28   /* DMA_EP_INT */
+#define PCIE_DMA_EP_INT_STS_REG             0x2C   /* DMA_EP_INT_STS */
+#define PCIE_DMA_FLR_DISABLE_REG            0xA00  /* DMA_FLR_DISABLE */
+#define PCIE_DMA_QUEUE_SQ_BASE_L_0_REG      0x2000 /* DMA Queue SQ Base Address Low Register */
+#define PCIE_DMA_QUEUE_SQ_BASE_H_0_REG      0x2004 /* DMA Queue SQ Base Address High Register */
+#define PCIE_DMA_QUEUE_SQ_DEPTH_0_REG       0x2008 /* DMA Queue SQ Depth */
+#define PCIE_DMA_QUEUE_SQ_TAIL_PTR_0_REG    0x200C /* DMA Queue SQ Tail Pointer Register */
+#define PCIE_DMA_QUEUE_CQ_BASE_L_0_REG      0x2010 /* DMA Queue CQ Base Address Low Register */
+#define PCIE_DMA_QUEUE_CQ_BASE_H_0_REG      0x2014 /* DMA Queue CQ Base Address High Register */
+#define PCIE_DMA_QUEUE_CQ_DEPTH_0_REG       0x2018 /* DMA Queue CQ Depth */
+#define PCIE_DMA_QUEUE_CQ_HEAD_PTR_0_REG    0x201C /* DMA Queue CQ Head Pointer Register */
+#define PCIE_DMA_QUEUE_CTRL0_0_REG          0x2020 /* DMA Queue control Register 0 */
+#define PCIE_DMA_QUEUE_CTRL1_0_REG          0x2024 /* DMA Queue control Register 1 */
+#define PCIE_DMA_QUEUE_FSM_STS_0_REG        0x2030 /* DMA Queue FSM Status Register */
+#define PCIE_DMA_QUEUE_SQ_STS_0_REG         0x2034 /* DMA Queue SQ and CQ status Register */
+#define PCIE_DMA_QUEUE_CQ_TAIL_PTR_0_REG    0x203C /* DMA Queue CQ Tail Pointer Register */
+#define PCIE_DMA_QUEUE_INT_STS_0_REG        0x2040 /* DMA Queue Interrupt Status */
+#define PCIE_DMA_QUEUE_INT_MSK_0_REG        0x2044 /* DMA Queue Interrupt Mask Register */
+#define PCIE_DMA_QUEUE_ERR_INT_STS_0_REG    0x2048 /* DMA Queue Err Interrupt Status */
+#define PCIE_DMA_QUEUE_ERR_INT_MSK_0_REG    0x204C /* DMA Queue Err Interrupt Mask Register */
+#define PCIE_DMA_QUEUE_INT_RO_0_REG         0x206C /* DMA Queue Interrupt RO Register */
+
+/********************************************************************************************/
+/*                   PCIE AP_GLOBAL_REG                                                     */
+/********************************************************************************************/
+#define PCIE_CE_ENA                  0x0008
+#define PCIE_UNF_ENA                 0x0010
+#define PCIE_UF_ENA                  0x0018
+
+#define PCIE_MSI_MASK                0x00F4
+#define PORT_INTX_ASSERT_MASK        0x01B0
+#define PORT_INTX_DEASSERT_MASK      0x01B4
+
+#define PCIE_AP_NI_ENA               0x0100
+#define PCIE_AP_CE_ENA               0x0104
+#define PCIE_AP_UNF_ENA              0x0108
+#define PCIE_AP_UF_ENA               0x010c
+#define PCIE_AP_NI_MASK              0x0110
+#define PCIE_AP_CE_MASK              0x0114
+#define PCIE_AP_UNF_MASK             0x0118
+#define PCIE_AP_UF_MASK              0x011C
+#define PCIE_AP_NI_STATUS            0x0120
+#define PCIE_AP_CE_STATUS            0x0124
+#define PCIE_AP_UNF_STATUS           0x0128
+#define PCIE_AP_UF_STATUS            0x012C
+#define PCIE_CORE_NI_ENA             0x0160
+#define PCIE_CORE_CE_ENA             0x0164
+#define PCIE_CORE_UNF_ENA            0x0168
+#define PCIE_CORE_UF_ENA             0x016c
+
+#define AP_PORT_EN_REG               0x0800
+#define AP_APB_SYN_RST               0x0810
+#define AP_AXI_SYN_RST               0x0814
+#define AP_IDLE                      0x0C08
+
+/********************************************************************************************/
+/*                   PCIE AP_IOB_RX_COM_REG Reg                                             */
+/********************************************************************************************/
+#define IOB_RX_AML_SNOOP                    0x1AAC
+#define IOB_RX_MSI_INT_CTRL                 0x1040
+
+#define IOB_RX_MSI_INT_ADDR_HIGH       0x1044
+#define IOB_RX_MSI_INT_ADDR_LOW        0x1048
+
+#define IOB_RX_PAB_SMMU_BYPASS_CTRL    0x2004
+
+#define IOB_RX_DMA_REG_REMAP_0 0x0E30
+#define IOB_RX_DMA_REG_REMAP_1 0x0E34
+
+#endif /* EDMA_REG_H */
diff --git a/drivers/net/ethernet/huawei/bma/include/bma_ker_intf.h b/drivers/net/ethernet/huawei/bma/include/bma_ker_intf.h
index d1df99b0c9fd..8d284d5f6e62 100644
--- a/drivers/net/ethernet/huawei/bma/include/bma_ker_intf.h
+++ b/drivers/net/ethernet/huawei/bma/include/bma_ker_intf.h
@@ -47,6 +47,17 @@ enum intr_mod {
 	INTR_ENABLE = 1,
 };
 
+enum addr_type {
+	TYPE_EDMA_ADDR = 0,
+	TYPE_VETH_ADDR = 1,
+};
+
+enum pci_type_e {
+	PCI_TYPE_UNKNOWN,
+	PCI_TYPE_171x,
+	PCI_TYPE_1712
+};
+
 struct bma_dma_addr_s {
 	dma_addr_t dma_addr;
 	u32 dma_data_len;
@@ -66,10 +77,28 @@ union transfer_u {
 	struct dmalist_transfer_s list;
 };
 
+struct bspveth_dmal {
+	u32 chl;
+	u32 len;
+	u32 slow;
+	u32 shi;
+	u32 dlow;
+	u32 dhi;
+};
+
 struct bma_dma_transfer_s {
 	enum dma_type_e type;
 	enum dma_direction_e dir;
 	union transfer_u transfer;
+	struct bspveth_dmal *pdmalbase_v;
+	u32 dmal_cnt;
+};
+
+struct bma_map_addr_s {
+	enum pci_type_e pci_type;
+	u32 host_number;
+	enum addr_type addr_type;
+	u32 addr;
 };
 
 int bma_intf_register_int_notifier(struct notifier_block *nb);
@@ -91,4 +120,21 @@ int bma_cdev_add_msg(void *handle, const char __user *msg, size_t msg_len);
 unsigned int bma_cdev_check_recv(void *handle);
 void *bma_cdev_get_wait_queue(void *handle);
 int bma_intf_check_edma_supported(void);
+
+enum pci_type_e get_pci_type(void);
+void set_pci_type(enum pci_type_e type);
+
+int bma_intf_get_host_number(unsigned int *host_number);
+int bma_intf_get_map_address(enum addr_type type, phys_addr_t *addr);
+
+#define HOST_NUMBER_0 0
+#define HOST_NUMBER_1 1
+
+#define EDMA_1711_HOST0_ADDR 0x84810000
+#define VETH_1711_HOST0_ADDR 0x84820000
+#define EDMA_1712_HOST0_ADDR 0x85400000
+#define VETH_1712_HOST0_ADDR 0x85410000
+#define EDMA_1712_HOST1_ADDR 0x87400000
+#define VETH_1712_HOST1_ADDR 0x87410000
+
 #endif
diff --git a/drivers/net/ethernet/huawei/bma/kbox_drv/kbox_include.h b/drivers/net/ethernet/huawei/bma/kbox_drv/kbox_include.h
index 0d82ee6f7c83..745d83b431f8 100644
--- a/drivers/net/ethernet/huawei/bma/kbox_drv/kbox_include.h
+++ b/drivers/net/ethernet/huawei/bma/kbox_drv/kbox_include.h
@@ -23,7 +23,7 @@
 #ifdef DRV_VERSION
 #define KBOX_VERSION MICRO_TO_STR(DRV_VERSION)
 #else
-#define KBOX_VERSION "0.3.10"
+#define KBOX_VERSION "0.4.0"
 #endif
 
 #define UNUSED(x) (x = x)
diff --git a/drivers/net/ethernet/huawei/bma/veth_drv/veth_hb.c b/drivers/net/ethernet/huawei/bma/veth_drv/veth_hb.c
index 9d918edae703..774229ae8dd1 100644
--- a/drivers/net/ethernet/huawei/bma/veth_drv/veth_hb.c
+++ b/drivers/net/ethernet/huawei/bma/veth_drv/veth_hb.c
@@ -495,6 +495,11 @@ s32 bspveth_setup_all_tx_resources(struct bspveth_device *pvethdev)
 	int err = 0;
 	u8 *shmq_head_p = NULL;
 	struct bspveth_shmq_hd *shmq_head = NULL;
+	phys_addr_t veth_address = 0;
+
+	err = bma_intf_get_map_address(TYPE_VETH_ADDR, &veth_address);
+	if (err != 0)
+		goto failed;
 
 	if (!pvethdev)
 		return BSP_ERR_NULL_POINTER;
@@ -526,7 +531,7 @@ s32 bspveth_setup_all_tx_resources(struct bspveth_device *pvethdev)
 			(struct bspveth_dmal *)((BSP_VETH_T)(shmq_head)
 			+ SHMDMAL_OFFSET);
 		pvethdev->ptx_queue[qid]->pdmalbase_p =
-			(u8 *)(u64)(VETH_SHAREPOOL_BASE_INBMC +
+			(u8 *)(u64)(veth_address +
 			MAX_SHAREQUEUE_SIZE * qid +
 			SHMDMAL_OFFSET);
 
@@ -851,6 +856,11 @@ s32 bspveth_setup_all_rx_resources(struct bspveth_device *pvethdev)
 	int qid, i, err = 0;
 	struct bspveth_shmq_hd *shmq_head = NULL;
 	u8 *shmq_head_p = NULL;
+	phys_addr_t veth_address = 0;
+
+	err = bma_intf_get_map_address(TYPE_VETH_ADDR, &veth_address);
+	if (err != 0)
+		goto failed;
 
 	if (!pvethdev)
 		return BSP_ERR_NULL_POINTER;
@@ -885,7 +895,7 @@ s32 bspveth_setup_all_rx_resources(struct bspveth_device *pvethdev)
 			(struct bspveth_dmal *)((BSP_VETH_T)(shmq_head)
 			+ SHMDMAL_OFFSET);
 		pvethdev->prx_queue[qid]->pdmalbase_p =
-			(u8 *)(u64)(VETH_SHAREPOOL_BASE_INBMC
+			(u8 *)(u64)(veth_address
 			+ MAX_SHAREQUEUE_SIZE * (qid + 1)
 			+ SHMDMAL_OFFSET);
 		memset(pvethdev->prx_queue[qid]->pdmalbase_v, 0,
@@ -1236,6 +1246,8 @@ void veth_netdev_func_init(struct net_device *dev)
 {
 	struct tag_pcie_comm_priv *priv =
 				(struct tag_pcie_comm_priv *)netdev_priv(dev);
+	u32 host_number = 0;
+	int ret = 0;
 	/*9C:7D:A3:28:6F:F9*/
 	unsigned char veth_mac[ETH_ALEN] = {0x9C, 0x7D, 0xA3, 0x28, 0x6F, 0xF9};
 
@@ -1243,6 +1255,12 @@ void veth_netdev_func_init(struct net_device *dev)
 
 	ether_setup(dev);
 
+	ret = bma_intf_get_host_number(&host_number);
+	if (ret < 0) {
+		VETH_LOG(DLOG_ERROR, "bma_intf_get_host_number failed!\n");
+		return;
+	}
+
 	dev->netdev_ops = &veth_ops;
 
 	dev->watchdog_timeo = BSPVETH_NET_TIMEOUT;
@@ -1257,6 +1275,7 @@ void veth_netdev_func_init(struct net_device *dev)
 	memset(priv, 0, sizeof(struct tag_pcie_comm_priv));
 	strncpy(priv->net_type, MODULE_NAME, NET_TYPE_LEN);
 
+	veth_mac[ETH_ALEN - 1] = (host_number == 0 ? 0xF9 : 0xFB);
 	eth_hw_addr_set(dev, veth_mac);
 
 	VETH_LOG(DLOG_DEBUG, "set veth MAC addr OK\n");
@@ -2226,6 +2245,8 @@ s32 __start_dmalist_H(struct bspveth_rxtx_q *prxtx_queue, u32 cnt, u32 type)
 	dma_transfer.type = DMA_LIST;
 	dma_transfer.transfer.list.dma_addr =
 		(dma_addr_t)prxtx_queue->pdmalbase_p;
+	dma_transfer.pdmalbase_v = prxtx_queue->pdmalbase_v;
+	dma_transfer.dmal_cnt = prxtx_queue->dmal_cnt;
 
 	ret = bma_intf_start_dma(g_bspveth_dev.bma_priv, &dma_transfer);
 	if (ret < 0)
diff --git a/drivers/net/ethernet/huawei/bma/veth_drv/veth_hb.h b/drivers/net/ethernet/huawei/bma/veth_drv/veth_hb.h
index 242d3ec128d3..f8b7e2f8d604 100644
--- a/drivers/net/ethernet/huawei/bma/veth_drv/veth_hb.h
+++ b/drivers/net/ethernet/huawei/bma/veth_drv/veth_hb.h
@@ -31,7 +31,7 @@ extern "C" {
 #ifdef DRV_VERSION
 #define VETH_VERSION	MICRO_TO_STR(DRV_VERSION)
 #else
-#define VETH_VERSION	"0.3.10"
+#define VETH_VERSION	"0.4.0"
 #endif
 
 #define MODULE_NAME	"veth"
@@ -67,7 +67,6 @@ extern "C" {
 #define SYSCTL_REG_SIZE			(0x1000)
 #define PCIE1_REG_BASE			(0x29000000)
 #define PCIE1_REG_SIZE			(0x1000)
-#define VETH_SHAREPOOL_BASE_INBMC	(0x84820000)
 #define VETH_SHAREPOOL_SIZE		(0xdf000)
 #define VETH_SHAREPOOL_OFFSET		(0x10000)
 #define MAX_SHAREQUEUE_SIZE		(0x20000)
@@ -261,15 +260,6 @@ struct bspveth_dma_bd {
 	u32 off;
 };
 
-struct bspveth_dmal {
-	u32 chl;
-	u32 len;
-	u32 slow;
-	u32 shi;
-	u32 dlow;
-	u32 dhi;
-};
-
 struct bspveth_rxtx_q {
 #ifndef VETH_BMC
 	struct bspveth_dma_bd *pbdbase_v;
-- 
2.33.0
                    
                  
                  
                          
                            
                            2
                            
                          
                          
                            
                            1
                            
                          
                          
                            
    
                          
                        
                     
                        
                    
                        
                            
                                
                            
                            [openeuler:openEuler-1.0-LTS 1753/1753] net/netfilter/nf_nat_proto.c:56:6: warning: no previous prototype for 'nf_nat_csum_recalc'
                        
                        
by kernel test robot 09 Aug '25
                    by kernel test robot 09 Aug '25
09 Aug '25
                    
                        tree:   https://gitee.com/openeuler/kernel.git openEuler-1.0-LTS
head:   748957057cef2d3b3e35d749c9a1ade66e2b5e73
commit: 83abe3a3b85762720192809ac8695ff9255cfd23 [1753/1753] netfilter: nat: remove csum_recalc hook
config: x86_64-buildonly-randconfig-2002-20250806 (https://download.01.org/0day-ci/archive/20250809/202508091141.LQ39llkM-lkp@…)
compiler: gcc-11 (Debian 11.3.0-12) 11.3.0
reproduce (this is a W=1 build): (https://download.01.org/0day-ci/archive/20250809/202508091141.LQ39llkM-lkp@…)
If you fix the issue in a separate patch/commit (i.e. not just a new version of
the same patch/commit), kindly add following tags
| Reported-by: kernel test robot <lkp(a)intel.com>
| Closes: https://lore.kernel.org/oe-kbuild-all/202508091141.LQ39llkM-lkp@intel.com/
All warnings (new ones prefixed by >>):
>> net/netfilter/nf_nat_proto.c:56:6: warning: no previous prototype for 'nf_nat_csum_recalc' [-Wmissing-prototypes]
      56 | void nf_nat_csum_recalc(struct sk_buff *skb,
         |      ^~~~~~~~~~~~~~~~~~
vim +/nf_nat_csum_recalc +56 net/netfilter/nf_nat_proto.c
    55	
  > 56	void nf_nat_csum_recalc(struct sk_buff *skb,
-- 
0-DAY CI Kernel Test Service
https://github.com/intel/lkp-tests/wiki
                    
                  
                  
                          
                            
                            1
                            
                          
                          
                            
                            0
                            
                          
                          
                            
    
                          
                        
                     
                        
                    
                        
                            
                                
                            
                            [openeuler:openEuler-1.0-LTS 1753/1753] kernel/sched/debug.o: warning: objtool: missing symbol for section .text.unlikely
                        
                        
by kernel test robot 09 Aug '25
                    by kernel test robot 09 Aug '25
09 Aug '25
                    
                        tree:   https://gitee.com/openeuler/kernel.git openEuler-1.0-LTS
head:   748957057cef2d3b3e35d749c9a1ade66e2b5e73
commit: acfa0f545c7dd43f959c28e8ab4b139c927ba387 [1753/1753] svm: reduce log of run queue and atomic->atomic64
config: x86_64-buildonly-randconfig-2002-20250806 (https://download.01.org/0day-ci/archive/20250809/202508090931.8saWh06I-lkp@…)
compiler: gcc-11 (Debian 11.3.0-12) 11.3.0
reproduce (this is a W=1 build): (https://download.01.org/0day-ci/archive/20250809/202508090931.8saWh06I-lkp@…)
If you fix the issue in a separate patch/commit (i.e. not just a new version of
the same patch/commit), kindly add following tags
| Reported-by: kernel test robot <lkp(a)intel.com>
| Closes: https://lore.kernel.org/oe-kbuild-all/202508090931.8saWh06I-lkp@intel.com/
All warnings (new ones prefixed by >>):
   kernel/sched/debug.c:797:6: warning: no previous prototype for 'sysrq_sched_debug_tidy' [-Wmissing-prototypes]
     797 | void sysrq_sched_debug_tidy(void)
         |      ^~~~~~~~~~~~~~~~~~~~~~
>> kernel/sched/debug.o: warning: objtool: missing symbol for section .text.unlikely
-- 
0-DAY CI Kernel Test Service
https://github.com/intel/lkp-tests/wiki
                    
                  
                  
                          
                            
                            1
                            
                          
                          
                            
                            0
                            
                          
                          
                            
    
                          
                        
                     
                        
                    
                        
                            
                                
                            
                            [PATCH OLK-6.6 v2] mm/mem_sampling: preserve kernel subsystem SPE state across perf sampling
                        
                        
by Ze Zuo 09 Aug '25
                    by Ze Zuo 09 Aug '25
09 Aug '25
                    
                        hulk inclusion
category: bugfix
bugzilla: https://gitee.com/openeuler/kernel/issues/ICRXXI
CVE: NA
--------------------------------
Ensure proper save/restore of SPE state for kernel subsystems (NUMA
balancing, DAMON etc.) when perf SPE sampling is enabled/disabled:
- Suspend subsystem memory sampling during perf SPE operation
- Restore original configurations when perf sampling stops
Fixes: 390982f28c57 ("mm/mem_sampling: Add sysctl control for NUMA balancing integration")
Signed-off-by: Ze Zuo <zuoze1(a)huawei.com>
---
changes since v1:
-- add Fixes tag.
 mm/mem_sampling.c | 124 ++++++++++++++++++++++++++--------------------
 1 file changed, 69 insertions(+), 55 deletions(-)
diff --git a/mm/mem_sampling.c b/mm/mem_sampling.c
index 74f95e4611fe..8483ebe1a83c 100644
--- a/mm/mem_sampling.c
+++ b/mm/mem_sampling.c
@@ -41,8 +41,11 @@ DEFINE_PER_CPU(enum arm_spe_user_e, arm_spe_user);
 EXPORT_PER_CPU_SYMBOL_GPL(arm_spe_user);
 
 enum mem_sampling_saved_state_e {
-	MEM_SAMPLING_STATE_ENABLE,
 	MEM_SAMPLING_STATE_DISABLE,
+	MEM_SAMPLING_STATE_ENABLE,
+	MEM_SAMPLING_STATE_NUMA_ENABLE,
+	MEM_SAMPLING_STATE_DAMON_ENABLE,
+	MEM_SAMPLING_STATE_NUM_DAMON_ENABLE,
 	MEM_SAMPLING_STATE_EMPTY,
 };
 enum mem_sampling_saved_state_e mem_sampling_saved_state = MEM_SAMPLING_STATE_EMPTY;
@@ -416,17 +419,26 @@ static void __set_mem_sampling_state(bool enabled)
 	}
 }
 
+static enum mem_sampling_saved_state_e get_mem_sampling_saved_state(void)
+{
+	if (static_branch_likely(&mm_damon_mem_sampling) &&
+		static_branch_likely(&sched_numabalancing_mem_sampling))
+		return MEM_SAMPLING_STATE_NUM_DAMON_ENABLE;
+	if (static_branch_likely(&mm_damon_mem_sampling))
+		return MEM_SAMPLING_STATE_DAMON_ENABLE;
+	if (static_branch_likely(&sched_numabalancing_mem_sampling))
+		return MEM_SAMPLING_STATE_NUMA_ENABLE;
+	if (static_branch_likely(&mem_sampling_access_hints))
+		return MEM_SAMPLING_STATE_ENABLE;
+
+	return MEM_SAMPLING_STATE_DISABLE;
+}
+
 void set_mem_sampling_state(bool enabled)
 {
 	if (!mem_sampling_ops.sampling_start || !mm_spe_enabled())
 		return;
 
-	if (mem_sampling_saved_state != MEM_SAMPLING_STATE_EMPTY) {
-		mem_sampling_saved_state = enabled ? MEM_SAMPLING_STATE_ENABLE :
-					    MEM_SAMPLING_STATE_DISABLE;
-		return;
-	}
-
 	if (enabled)
 		sysctl_mem_sampling_mode = MEM_SAMPLING_NORMAL;
 	else
@@ -434,10 +446,45 @@ void set_mem_sampling_state(bool enabled)
 	__set_mem_sampling_state(enabled);
 }
 
+static int set_state(int state)
+{
+	if (mem_sampling_saved_state != MEM_SAMPLING_STATE_EMPTY) {
+		mem_sampling_saved_state = state;
+		return -EINVAL;
+	}
+	switch (state) {
+	case 0:
+		set_mem_sampling_state(false);
+		break;
+	case 1:
+		set_mem_sampling_state(false);
+		set_mem_sampling_state(true);
+		break;
+	case 2:
+		set_mem_sampling_state(false);
+		set_mem_sampling_state(true);
+		set_numabalancing_mem_sampling_state(true);
+		break;
+	case 3:
+		set_mem_sampling_state(false);
+		set_mem_sampling_state(true);
+		set_damon_mem_sampling_state(true);
+		break;
+	case 4:
+		set_mem_sampling_state(true);
+		set_numabalancing_mem_sampling_state(true);
+		set_damon_mem_sampling_state(true);
+		break;
+	default:
+		return -EINVAL;
+	}
+	return 0;
+}
+
 void mem_sampling_user_switch_process(enum user_switch_type type)
 {
-	bool state, mm_spe_is_perf_user = false;
-	int cpu;
+	bool mm_spe_is_perf_user = false;
+	int cpu, hints;
 
 	if (type >= USER_SWITCH_TYPE_MAX) {
 		pr_err("user switch type error.\n");
@@ -456,26 +503,27 @@ void mem_sampling_user_switch_process(enum user_switch_type type)
 		if (mem_sampling_saved_state != MEM_SAMPLING_STATE_EMPTY)
 			return;
 
-		if (static_branch_unlikely(&mem_sampling_access_hints))
-			mem_sampling_saved_state = MEM_SAMPLING_STATE_ENABLE;
+		hints = get_mem_sampling_saved_state();
+		set_state(0);
+
+		if (hints)
+			mem_sampling_saved_state = hints;
 		else
 			mem_sampling_saved_state = MEM_SAMPLING_STATE_DISABLE;
 
-		pr_debug("user switch away from mem_sampling, %s is saved, set to disable.\n",
-				mem_sampling_saved_state ? "disabled" : "enabled");
+		pr_debug("user switch away from mem_sampling, %d is saved, set to disable.\n",
+				mem_sampling_saved_state);
 
-		set_mem_sampling_state(false);
 	} else {
 		/* If the state is not backed up, do not restore it */
 		if (mem_sampling_saved_state == MEM_SAMPLING_STATE_EMPTY || mm_spe_is_perf_user)
 			return;
 
-		state = (mem_sampling_saved_state == MEM_SAMPLING_STATE_ENABLE) ? true : false;
-		set_mem_sampling_state(state);
+		hints = mem_sampling_saved_state;
 		mem_sampling_saved_state = MEM_SAMPLING_STATE_EMPTY;
+		set_state(hints);
 
-		pr_debug("user switch back to mem_sampling, set to saved %s.\n",
-				state ? "enalbe" : "disable");
+		pr_debug("user switch back to mem_sampling, set to saved %d.\n", hints);
 	}
 }
 EXPORT_SYMBOL_GPL(mem_sampling_user_switch_process);
@@ -488,15 +536,7 @@ static int proc_mem_sampling_enable(struct ctl_table *table, int write,
 	int err;
 	int state = 0;
 
-	if (static_branch_likely(&mem_sampling_access_hints))
-		state = 1;
-	if (static_branch_likely(&sched_numabalancing_mem_sampling))
-		state = 2;
-	if (static_branch_likely(&mm_damon_mem_sampling))
-		state = 3;
-	if (static_branch_likely(&mm_damon_mem_sampling) &&
-		static_branch_likely(&sched_numabalancing_mem_sampling))
-		state = 4;
+	state = get_mem_sampling_saved_state();
 
 	if (write && !capable(CAP_SYS_ADMIN))
 		return -EPERM;
@@ -508,34 +548,8 @@ static int proc_mem_sampling_enable(struct ctl_table *table, int write,
 	err = proc_dointvec_minmax(&t, write, buffer, lenp, ppos);
 	if (err < 0)
 		return err;
-	if (write) {
-		switch (state) {
-		case 0:
-			set_mem_sampling_state(false);
-			break;
-		case 1:
-			set_mem_sampling_state(false);
-			set_mem_sampling_state(true);
-			break;
-		case 2:
-			set_mem_sampling_state(false);
-			set_mem_sampling_state(true);
-			set_numabalancing_mem_sampling_state(true);
-			break;
-		case 3:
-			set_mem_sampling_state(false);
-			set_mem_sampling_state(true);
-			set_damon_mem_sampling_state(true);
-			break;
-		case 4:
-			set_mem_sampling_state(true);
-			set_numabalancing_mem_sampling_state(true);
-			set_damon_mem_sampling_state(true);
-			break;
-		default:
-			return -EINVAL;
-		}
-	}
+	if (write)
+		err = set_state(state);
 	return err;
 }
 
-- 
2.33.0
                    
                  
                  
                          
                            
                            2
                            
                          
                          
                            
                            1
                            
                          
                          
                            
    
                          
                        
                     
                        
                    
                        
                            
                                
                            
                            [openeuler:openEuler-1.0-LTS 1753/1753] include/linux/string.h:249:16: warning: '__builtin_strncpy' accessing 80 bytes at offsets 100 and 36 may overlap up to 0 bytes at offset [9223372036854775807, -9223372036854775808]
                        
                        
by kernel test robot 09 Aug '25
                    by kernel test robot 09 Aug '25
09 Aug '25
                    
                        tree:   https://gitee.com/openeuler/kernel.git openEuler-1.0-LTS
head:   748957057cef2d3b3e35d749c9a1ade66e2b5e73
commit: a24ec7d8d3462ba48b4bd932960c397d92de8782 [1753/1753] ASoC: dmaengine: Make the pcm->name equal to pcm->id if the name is not set
config: x86_64-buildonly-randconfig-2002-20250806 (https://download.01.org/0day-ci/archive/20250809/202508090752.oTu2Do8L-lkp@…)
compiler: gcc-11 (Debian 11.3.0-12) 11.3.0
reproduce (this is a W=1 build): (https://download.01.org/0day-ci/archive/20250809/202508090752.oTu2Do8L-lkp@…)
If you fix the issue in a separate patch/commit (i.e. not just a new version of
the same patch/commit), kindly add following tags
| Reported-by: kernel test robot <lkp(a)intel.com>
| Closes: https://lore.kernel.org/oe-kbuild-all/202508090752.oTu2Do8L-lkp@intel.com/
All warnings (new ones prefixed by >>):
   In file included from include/linux/bitmap.h:9,
                    from include/linux/cpumask.h:12,
                    from arch/x86/include/asm/cpumask.h:5,
                    from arch/x86/include/asm/msr.h:11,
                    from arch/x86/include/asm/processor.h:21,
                    from arch/x86/include/asm/cpufeature.h:5,
                    from arch/x86/include/asm/thread_info.h:53,
                    from include/linux/thread_info.h:38,
                    from arch/x86/include/asm/preempt.h:7,
                    from include/linux/preempt.h:81,
                    from include/linux/spinlock.h:51,
                    from include/linux/seqlock.h:36,
                    from include/linux/time.h:6,
                    from include/linux/stat.h:19,
                    from include/linux/module.h:11,
                    from sound/soc/soc-generic-dmaengine-pcm.c:6:
   In function 'strncpy',
       inlined from 'dmaengine_pcm_new' at sound/soc/soc-generic-dmaengine-pcm.c:318:4:
>> include/linux/string.h:249:16: warning: '__builtin_strncpy' accessing 80 bytes at offsets 100 and 36 may overlap up to 0 bytes at offset [9223372036854775807, -9223372036854775808] [-Wrestrict]
     249 |         return __builtin_strncpy(p, q, size);
         |                ^~~~~~~~~~~~~~~~~~~~~~~~~~~~~
vim +/__builtin_strncpy +249 include/linux/string.h
6974f0c4555e28 Daniel Micay 2017-07-12  240  
6974f0c4555e28 Daniel Micay 2017-07-12  241  #if !defined(__NO_FORTIFY) && defined(__OPTIMIZE__) && defined(CONFIG_FORTIFY_SOURCE)
6974f0c4555e28 Daniel Micay 2017-07-12  242  __FORTIFY_INLINE char *strncpy(char *p, const char *q, __kernel_size_t size)
6974f0c4555e28 Daniel Micay 2017-07-12  243  {
6974f0c4555e28 Daniel Micay 2017-07-12  244  	size_t p_size = __builtin_object_size(p, 0);
6974f0c4555e28 Daniel Micay 2017-07-12  245  	if (__builtin_constant_p(size) && p_size < size)
6974f0c4555e28 Daniel Micay 2017-07-12  246  		__write_overflow();
6974f0c4555e28 Daniel Micay 2017-07-12  247  	if (p_size < size)
6974f0c4555e28 Daniel Micay 2017-07-12  248  		fortify_panic(__func__);
6974f0c4555e28 Daniel Micay 2017-07-12 @249  	return __builtin_strncpy(p, q, size);
6974f0c4555e28 Daniel Micay 2017-07-12  250  }
6974f0c4555e28 Daniel Micay 2017-07-12  251  
:::::: The code at line 249 was first introduced by commit
:::::: 6974f0c4555e285ab217cee58b6e874f776ff409 include/linux/string.h: add the option of fortified string.h functions
:::::: TO: Daniel Micay <danielmicay(a)gmail.com>
:::::: CC: Linus Torvalds <torvalds(a)linux-foundation.org>
-- 
0-DAY CI Kernel Test Service
https://github.com/intel/lkp-tests/wiki
                    
                  
                  
                          
                            
                            1
                            
                          
                          
                            
                            0
                            
                          
                          
                            
    
                          
                        
                    
                    
                        Changes since V1:
Fixed the KABI broken warning.
Hou Tao (1):
  kernfs: also call kernfs_set_rev() for positive dentry
Ian Kent (6):
  kernfs: add a revision to identify directory node changes
  kernfs: use VFS negative dentry caching
  kernfs: switch kernfs to use an rwsem
  kernfs: use i_lock to protect concurrent inode updates
  kernfs: dont call d_splice_alias() under kernfs node lock
  kernfs: don't create a negative dentry if inactive node exists
 fs/kernfs/dir.c             | 165 +++++++++++++++++++++---------------
 fs/kernfs/file.c            |   4 +-
 fs/kernfs/inode.c           |  25 ++++--
 fs/kernfs/kernfs-internal.h |  24 +++++-
 fs/kernfs/mount.c           |  12 +--
 fs/kernfs/symlink.c         |   4 +-
 include/linux/kernfs.h      |   7 +-
 7 files changed, 150 insertions(+), 91 deletions(-)
-- 
2.39.2
                    
                  
                  
                          
                            
                            2
                            
                          
                          
                            
                            8
                            
                          
                          
                            
    
                          
                        
                     
                        
                    
                        
                            
                                
                            
                            [openeuler:openEuler-1.0-LTS 1753/1753] drivers/staging/erofs/unzip_vle_lz4.o: warning: objtool: missing symbol for section .text
                        
                        
by kernel test robot 09 Aug '25
                    by kernel test robot 09 Aug '25
09 Aug '25
                    
                        tree:   https://gitee.com/openeuler/kernel.git openEuler-1.0-LTS
head:   748957057cef2d3b3e35d749c9a1ade66e2b5e73
commit: 2e59f7a6876bfa6adca4ec9180ab3945e6ede7e1 [1753/1753] staging: erofs: compressed_pages should not be accessed again after freed
config: x86_64-buildonly-randconfig-2002-20250806 (https://download.01.org/0day-ci/archive/20250809/202508090402.PMJhGfxB-lkp@…)
compiler: gcc-11 (Debian 11.3.0-12) 11.3.0
reproduce (this is a W=1 build): (https://download.01.org/0day-ci/archive/20250809/202508090402.PMJhGfxB-lkp@…)
If you fix the issue in a separate patch/commit (i.e. not just a new version of
the same patch/commit), kindly add following tags
| Reported-by: kernel test robot <lkp(a)intel.com>
| Closes: https://lore.kernel.org/oe-kbuild-all/202508090402.PMJhGfxB-lkp@intel.com/
All warnings (new ones prefixed by >>):
>> drivers/staging/erofs/unzip_vle_lz4.o: warning: objtool: missing symbol for section .text
-- 
0-DAY CI Kernel Test Service
https://github.com/intel/lkp-tests/wiki
                    
                  
                  
                          
                            
                            1
                            
                          
                          
                            
                            0
                            
                          
                          
                            
    
                          
                        
                    
                    
                        Kernfs performance optimization.
Hou Tao (1):
  kernfs: also call kernfs_set_rev() for positive dentry
Ian Kent (6):
  kernfs: add a revision to identify directory node changes
  kernfs: use VFS negative dentry caching
  kernfs: switch kernfs to use an rwsem
  kernfs: use i_lock to protect concurrent inode updates
  kernfs: dont call d_splice_alias() under kernfs node lock
  kernfs: don't create a negative dentry if inactive node exists
 fs/kernfs/dir.c             | 165 +++++++++++++++++++++---------------
 fs/kernfs/file.c            |   4 +-
 fs/kernfs/inode.c           |  25 ++++--
 fs/kernfs/kernfs-internal.h |  24 +++++-
 fs/kernfs/mount.c           |  12 +--
 fs/kernfs/symlink.c         |   4 +-
 include/linux/kernfs.h      |   7 +-
 7 files changed, 150 insertions(+), 91 deletions(-)
-- 
2.39.2
                    
                  
                  
                          
                            
                            2
                            
                          
                          
                            
                            8
                            
                          
                          
                            
    
                          
                        
                     
                        
                    
                        
                            
                                
                            
                            [PATCH OLK-5.10 1/7] kernfs: add a revision to identify directory node changes
                        
                        
by Zizhi Wo 09 Aug '25
                    by Zizhi Wo 09 Aug '25
09 Aug '25
                    
                        From: Ian Kent <raven(a)themaw.net>
mainline inclusion
from mainline-v5.15-rc1
commit 895adbec302e92086359e6fd92611ac3be6d92c3
category: feature
bugzilla: https://gitee.com/openeuler/kernel/issues/ICRD6W
Reference: https://git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git/commit/?…
--------------------------------
Add a revision counter to kernfs directory nodes so it can be used
to detect if a directory node has changed during negative dentry
revalidation.
There's an assumption that sizeof(unsigned long) <= sizeof(pointer)
on all architectures and as far as I know that assumption holds.
So adding a revision counter to the struct kernfs_elem_dir variant of
the kernfs_node type union won't increase the size of the kernfs_node
struct. This is because struct kernfs_elem_dir is at least
sizeof(pointer) smaller than the largest union variant. It's tempting
to make the revision counter a u64 but that would increase the size of
kernfs_node on archs where sizeof(pointer) is smaller than the revision
counter.
Reviewed-by: Miklos Szeredi <mszeredi(a)redhat.com>
Signed-off-by: Ian Kent <raven(a)themaw.net>
Link: https://lore.kernel.org/r/162642769895.63632.8356662784964509867.stgit@web.…
Signed-off-by: Greg Kroah-Hartman <gregkh(a)linuxfoundation.org>
Signed-off-by: Zizhi Wo <wozizhi(a)huawei.com>
---
 fs/kernfs/dir.c             |  2 ++
 fs/kernfs/kernfs-internal.h | 19 +++++++++++++++++++
 include/linux/kernfs.h      |  5 +++++
 3 files changed, 26 insertions(+)
diff --git a/fs/kernfs/dir.c b/fs/kernfs/dir.c
index 0ba056e06e48..9bc73c8b6e3f 100644
--- a/fs/kernfs/dir.c
+++ b/fs/kernfs/dir.c
@@ -380,6 +380,7 @@ static int kernfs_link_sibling(struct kernfs_node *kn)
 	/* successfully added, account subdir number */
 	if (kernfs_type(kn) == KERNFS_DIR)
 		kn->parent->dir.subdirs++;
+	kernfs_inc_rev(kn->parent);
 
 	return 0;
 }
@@ -402,6 +403,7 @@ static bool kernfs_unlink_sibling(struct kernfs_node *kn)
 
 	if (kernfs_type(kn) == KERNFS_DIR)
 		kn->parent->dir.subdirs--;
+	kernfs_inc_rev(kn->parent);
 
 	rb_erase(&kn->rb, &kn->parent->dir.children);
 	RB_CLEAR_NODE(&kn->rb);
diff --git a/fs/kernfs/kernfs-internal.h b/fs/kernfs/kernfs-internal.h
index 7ee97ef59184..6a8d0ca26d03 100644
--- a/fs/kernfs/kernfs-internal.h
+++ b/fs/kernfs/kernfs-internal.h
@@ -81,6 +81,25 @@ static inline struct kernfs_node *kernfs_dentry_node(struct dentry *dentry)
 	return d_inode(dentry)->i_private;
 }
 
+static inline void kernfs_set_rev(struct kernfs_node *parent,
+				  struct dentry *dentry)
+{
+	dentry->d_time = parent->dir.rev;
+}
+
+static inline void kernfs_inc_rev(struct kernfs_node *parent)
+{
+	parent->dir.rev++;
+}
+
+static inline bool kernfs_dir_changed(struct kernfs_node *parent,
+				      struct dentry *dentry)
+{
+	if (parent->dir.rev != dentry->d_time)
+		return true;
+	return false;
+}
+
 extern const struct super_operations kernfs_sops;
 extern struct kmem_cache *kernfs_node_cache, *kernfs_iattrs_cache;
 
diff --git a/include/linux/kernfs.h b/include/linux/kernfs.h
index e2aa4910365d..236240a17017 100644
--- a/include/linux/kernfs.h
+++ b/include/linux/kernfs.h
@@ -99,6 +99,11 @@ struct kernfs_elem_dir {
 	 * better directly in kernfs_node but is here to save space.
 	 */
 	struct kernfs_root	*root;
+	/*
+	 * Monotonic revision counter, used to identify if a directory
+	 * node has changed during negative dentry revalidation.
+	 */
+	unsigned long		rev;
 };
 
 struct kernfs_elem_symlink {
-- 
2.39.2
                    
                  
                  
                          
                            
                            2
                            
                          
                          
                            
                            7
                            
                          
                          
                            
    
                          
                        
                     
                        
                    
                        
                            
                                
                            
                            [openeuler:openEuler-1.0-LTS 1753/1753] net/rxrpc/local_object.c:152:20: warning: this statement may fall through
                        
                        
by kernel test robot 09 Aug '25
                    by kernel test robot 09 Aug '25
09 Aug '25
                    
                        tree:   https://gitee.com/openeuler/kernel.git openEuler-1.0-LTS
head:   748957057cef2d3b3e35d749c9a1ade66e2b5e73
commit: 37a675e768d7606fe8a53e0c459c9b53e121ac20 [1753/1753] rxrpc: Fix transport sockopts to get IPv4 errors on an IPv6 socket
config: x86_64-buildonly-randconfig-2002-20250806 (https://download.01.org/0day-ci/archive/20250809/202508090248.C7MgyZmS-lkp@…)
compiler: gcc-11 (Debian 11.3.0-12) 11.3.0
reproduce (this is a W=1 build): (https://download.01.org/0day-ci/archive/20250809/202508090248.C7MgyZmS-lkp@…)
If you fix the issue in a separate patch/commit (i.e. not just a new version of
the same patch/commit), kindly add following tags
| Reported-by: kernel test robot <lkp(a)intel.com>
| Closes: https://lore.kernel.org/oe-kbuild-all/202508090248.C7MgyZmS-lkp@intel.com/
All warnings (new ones prefixed by >>):
   net/rxrpc/local_object.c: In function 'rxrpc_open_socket':
>> net/rxrpc/local_object.c:152:20: warning: this statement may fall through [-Wimplicit-fallthrough=]
     152 |                 if (ret < 0) {
         |                    ^
   net/rxrpc/local_object.c:161:9: note: here
     161 |         case AF_INET:
         |         ^~~~
vim +152 net/rxrpc/local_object.c
17926a79320afa net/rxrpc/ar-local.c     David Howells   2007-04-26  104  
17926a79320afa net/rxrpc/ar-local.c     David Howells   2007-04-26  105  /*
17926a79320afa net/rxrpc/ar-local.c     David Howells   2007-04-26  106   * create the local socket
4f95dd78a77edc net/rxrpc/local_object.c David Howells   2016-04-04  107   * - must be called with rxrpc_local_mutex locked
17926a79320afa net/rxrpc/ar-local.c     David Howells   2007-04-26  108   */
2baec2c3f854d1 net/rxrpc/local_object.c David Howells   2017-05-24  109  static int rxrpc_open_socket(struct rxrpc_local *local, struct net *net)
17926a79320afa net/rxrpc/ar-local.c     David Howells   2007-04-26  110  {
17926a79320afa net/rxrpc/ar-local.c     David Howells   2007-04-26  111  	struct sock *sock;
17926a79320afa net/rxrpc/ar-local.c     David Howells   2007-04-26  112  	int ret, opt;
17926a79320afa net/rxrpc/ar-local.c     David Howells   2007-04-26  113  
75b54cb57ca34c net/rxrpc/local_object.c David Howells   2016-09-13  114  	_enter("%p{%d,%d}",
75b54cb57ca34c net/rxrpc/local_object.c David Howells   2016-09-13  115  	       local, local->srx.transport_type, local->srx.transport.family);
17926a79320afa net/rxrpc/ar-local.c     David Howells   2007-04-26  116  
17926a79320afa net/rxrpc/ar-local.c     David Howells   2007-04-26  117  	/* create a socket to represent the local endpoint */
2baec2c3f854d1 net/rxrpc/local_object.c David Howells   2017-05-24  118  	ret = sock_create_kern(net, local->srx.transport.family,
aaa31cbc667333 net/rxrpc/local_object.c David Howells   2016-09-13  119  			       local->srx.transport_type, 0, &local->socket);
17926a79320afa net/rxrpc/ar-local.c     David Howells   2007-04-26  120  	if (ret < 0) {
17926a79320afa net/rxrpc/ar-local.c     David Howells   2007-04-26  121  		_leave(" = %d [socket]", ret);
17926a79320afa net/rxrpc/ar-local.c     David Howells   2007-04-26  122  		return ret;
17926a79320afa net/rxrpc/ar-local.c     David Howells   2007-04-26  123  	}
17926a79320afa net/rxrpc/ar-local.c     David Howells   2007-04-26  124  
17926a79320afa net/rxrpc/ar-local.c     David Howells   2007-04-26  125  	/* if a local address was supplied then bind it */
17926a79320afa net/rxrpc/ar-local.c     David Howells   2007-04-26  126  	if (local->srx.transport_len > sizeof(sa_family_t)) {
17926a79320afa net/rxrpc/ar-local.c     David Howells   2007-04-26  127  		_debug("bind");
17926a79320afa net/rxrpc/ar-local.c     David Howells   2007-04-26  128  		ret = kernel_bind(local->socket,
17926a79320afa net/rxrpc/ar-local.c     David Howells   2007-04-26  129  				  (struct sockaddr *)&local->srx.transport,
17926a79320afa net/rxrpc/ar-local.c     David Howells   2007-04-26  130  				  local->srx.transport_len);
17926a79320afa net/rxrpc/ar-local.c     David Howells   2007-04-26  131  		if (ret < 0) {
4f95dd78a77edc net/rxrpc/local_object.c David Howells   2016-04-04  132  			_debug("bind failed %d", ret);
17926a79320afa net/rxrpc/ar-local.c     David Howells   2007-04-26  133  			goto error;
17926a79320afa net/rxrpc/ar-local.c     David Howells   2007-04-26  134  		}
17926a79320afa net/rxrpc/ar-local.c     David Howells   2007-04-26  135  	}
17926a79320afa net/rxrpc/ar-local.c     David Howells   2007-04-26  136  
f2aeed3a591ff2 net/rxrpc/local_object.c David Howells   2018-05-10  137  	switch (local->srx.transport.family) {
37a675e768d760 net/rxrpc/local_object.c David Howells   2018-09-27  138  	case AF_INET6:
37a675e768d760 net/rxrpc/local_object.c David Howells   2018-09-27  139  		/* we want to receive ICMPv6 errors */
17926a79320afa net/rxrpc/ar-local.c     David Howells   2007-04-26  140  		opt = 1;
37a675e768d760 net/rxrpc/local_object.c David Howells   2018-09-27  141  		ret = kernel_setsockopt(local->socket, SOL_IPV6, IPV6_RECVERR,
17926a79320afa net/rxrpc/ar-local.c     David Howells   2007-04-26  142  					(char *) &opt, sizeof(opt));
17926a79320afa net/rxrpc/ar-local.c     David Howells   2007-04-26  143  		if (ret < 0) {
17926a79320afa net/rxrpc/ar-local.c     David Howells   2007-04-26  144  			_debug("setsockopt failed");
17926a79320afa net/rxrpc/ar-local.c     David Howells   2007-04-26  145  			goto error;
17926a79320afa net/rxrpc/ar-local.c     David Howells   2007-04-26  146  		}
17926a79320afa net/rxrpc/ar-local.c     David Howells   2007-04-26  147  
17926a79320afa net/rxrpc/ar-local.c     David Howells   2007-04-26  148  		/* we want to set the don't fragment bit */
37a675e768d760 net/rxrpc/local_object.c David Howells   2018-09-27  149  		opt = IPV6_PMTUDISC_DO;
37a675e768d760 net/rxrpc/local_object.c David Howells   2018-09-27  150  		ret = kernel_setsockopt(local->socket, SOL_IPV6, IPV6_MTU_DISCOVER,
17926a79320afa net/rxrpc/ar-local.c     David Howells   2007-04-26  151  					(char *) &opt, sizeof(opt));
17926a79320afa net/rxrpc/ar-local.c     David Howells   2007-04-26 @152  		if (ret < 0) {
17926a79320afa net/rxrpc/ar-local.c     David Howells   2007-04-26  153  			_debug("setsockopt failed");
17926a79320afa net/rxrpc/ar-local.c     David Howells   2007-04-26  154  			goto error;
17926a79320afa net/rxrpc/ar-local.c     David Howells   2007-04-26  155  		}
f2aeed3a591ff2 net/rxrpc/local_object.c David Howells   2018-05-10  156  
37a675e768d760 net/rxrpc/local_object.c David Howells   2018-09-27  157  		/* Fall through and set IPv4 options too otherwise we don't get
37a675e768d760 net/rxrpc/local_object.c David Howells   2018-09-27  158  		 * errors from IPv4 packets sent through the IPv6 socket.
37a675e768d760 net/rxrpc/local_object.c David Howells   2018-09-27  159  		 */
37a675e768d760 net/rxrpc/local_object.c David Howells   2018-09-27  160  
37a675e768d760 net/rxrpc/local_object.c David Howells   2018-09-27  161  	case AF_INET:
f2aeed3a591ff2 net/rxrpc/local_object.c David Howells   2018-05-10  162  		/* we want to receive ICMP errors */
f2aeed3a591ff2 net/rxrpc/local_object.c David Howells   2018-05-10  163  		opt = 1;
37a675e768d760 net/rxrpc/local_object.c David Howells   2018-09-27  164  		ret = kernel_setsockopt(local->socket, SOL_IP, IP_RECVERR,
f2aeed3a591ff2 net/rxrpc/local_object.c David Howells   2018-05-10  165  					(char *) &opt, sizeof(opt));
f2aeed3a591ff2 net/rxrpc/local_object.c David Howells   2018-05-10  166  		if (ret < 0) {
f2aeed3a591ff2 net/rxrpc/local_object.c David Howells   2018-05-10  167  			_debug("setsockopt failed");
f2aeed3a591ff2 net/rxrpc/local_object.c David Howells   2018-05-10  168  			goto error;
f2aeed3a591ff2 net/rxrpc/local_object.c David Howells   2018-05-10  169  		}
f2aeed3a591ff2 net/rxrpc/local_object.c David Howells   2018-05-10  170  
f2aeed3a591ff2 net/rxrpc/local_object.c David Howells   2018-05-10  171  		/* we want to set the don't fragment bit */
37a675e768d760 net/rxrpc/local_object.c David Howells   2018-09-27  172  		opt = IP_PMTUDISC_DO;
37a675e768d760 net/rxrpc/local_object.c David Howells   2018-09-27  173  		ret = kernel_setsockopt(local->socket, SOL_IP, IP_MTU_DISCOVER,
f2aeed3a591ff2 net/rxrpc/local_object.c David Howells   2018-05-10  174  					(char *) &opt, sizeof(opt));
f2aeed3a591ff2 net/rxrpc/local_object.c David Howells   2018-05-10  175  		if (ret < 0) {
f2aeed3a591ff2 net/rxrpc/local_object.c David Howells   2018-05-10  176  			_debug("setsockopt failed");
f2aeed3a591ff2 net/rxrpc/local_object.c David Howells   2018-05-10  177  			goto error;
f2aeed3a591ff2 net/rxrpc/local_object.c David Howells   2018-05-10  178  		}
b604dd9883f783 net/rxrpc/local_object.c David Howells   2018-09-27  179  
b604dd9883f783 net/rxrpc/local_object.c David Howells   2018-09-27  180  		/* We want receive timestamps. */
b604dd9883f783 net/rxrpc/local_object.c David Howells   2018-09-27  181  		opt = 1;
b604dd9883f783 net/rxrpc/local_object.c David Howells   2018-09-27  182  		ret = kernel_setsockopt(local->socket, SOL_SOCKET, SO_TIMESTAMPNS,
b604dd9883f783 net/rxrpc/local_object.c David Howells   2018-09-27  183  					(char *)&opt, sizeof(opt));
b604dd9883f783 net/rxrpc/local_object.c David Howells   2018-09-27  184  		if (ret < 0) {
b604dd9883f783 net/rxrpc/local_object.c David Howells   2018-09-27  185  			_debug("setsockopt failed");
b604dd9883f783 net/rxrpc/local_object.c David Howells   2018-09-27  186  			goto error;
b604dd9883f783 net/rxrpc/local_object.c David Howells   2018-09-27  187  		}
f2aeed3a591ff2 net/rxrpc/local_object.c David Howells   2018-05-10  188  		break;
f2aeed3a591ff2 net/rxrpc/local_object.c David Howells   2018-05-10  189  
f2aeed3a591ff2 net/rxrpc/local_object.c David Howells   2018-05-10  190  	default:
f2aeed3a591ff2 net/rxrpc/local_object.c David Howells   2018-05-10  191  		BUG();
f2aeed3a591ff2 net/rxrpc/local_object.c David Howells   2018-05-10  192  	}
17926a79320afa net/rxrpc/ar-local.c     David Howells   2007-04-26  193  
17926a79320afa net/rxrpc/ar-local.c     David Howells   2007-04-26  194  	/* set the socket up */
17926a79320afa net/rxrpc/ar-local.c     David Howells   2007-04-26  195  	sock = local->socket->sk;
17926a79320afa net/rxrpc/ar-local.c     David Howells   2007-04-26  196  	sock->sk_user_data	= local;
17926a79320afa net/rxrpc/ar-local.c     David Howells   2007-04-26  197  	sock->sk_data_ready	= rxrpc_data_ready;
abe89ef0ed1a50 net/rxrpc/local_object.c David Howells   2016-04-04  198  	sock->sk_error_report	= rxrpc_error_report;
17926a79320afa net/rxrpc/ar-local.c     David Howells   2007-04-26  199  	_leave(" = 0");
17926a79320afa net/rxrpc/ar-local.c     David Howells   2007-04-26  200  	return 0;
17926a79320afa net/rxrpc/ar-local.c     David Howells   2007-04-26  201  
17926a79320afa net/rxrpc/ar-local.c     David Howells   2007-04-26  202  error:
91cf45f02af5c8 net/rxrpc/ar-local.c     Trond Myklebust 2007-11-12  203  	kernel_sock_shutdown(local->socket, SHUT_RDWR);
17926a79320afa net/rxrpc/ar-local.c     David Howells   2007-04-26  204  	local->socket->sk->sk_user_data = NULL;
17926a79320afa net/rxrpc/ar-local.c     David Howells   2007-04-26  205  	sock_release(local->socket);
17926a79320afa net/rxrpc/ar-local.c     David Howells   2007-04-26  206  	local->socket = NULL;
17926a79320afa net/rxrpc/ar-local.c     David Howells   2007-04-26  207  
17926a79320afa net/rxrpc/ar-local.c     David Howells   2007-04-26  208  	_leave(" = %d", ret);
17926a79320afa net/rxrpc/ar-local.c     David Howells   2007-04-26  209  	return ret;
17926a79320afa net/rxrpc/ar-local.c     David Howells   2007-04-26  210  }
17926a79320afa net/rxrpc/ar-local.c     David Howells   2007-04-26  211  
:::::: The code at line 152 was first introduced by commit
:::::: 17926a79320afa9b95df6b977b40cca6d8713cea [AF_RXRPC]: Provide secure RxRPC sockets for use by userspace and kernel both
:::::: TO: David Howells <dhowells(a)redhat.com>
:::::: CC: David S. Miller <davem(a)davemloft.net>
-- 
0-DAY CI Kernel Test Service
https://github.com/intel/lkp-tests/wiki
                    
                  
                  
                          
                            
                            1
                            
                          
                          
                            
                            0