Kernel
Threads by month
- ----- 2025 -----
- March
- February
- January
- ----- 2024 -----
- December
- November
- October
- September
- August
- July
- June
- May
- April
- March
- February
- January
- ----- 2023 -----
- December
- November
- October
- September
- August
- July
- June
- May
- April
- March
- February
- January
- ----- 2022 -----
- December
- November
- October
- September
- August
- July
- June
- May
- April
- March
- February
- January
- ----- 2021 -----
- December
- November
- October
- September
- August
- July
- June
- May
- April
- March
- February
- January
- ----- 2020 -----
- December
- November
- October
- September
- August
- July
- June
- May
- April
- March
- February
- January
- ----- 2019 -----
- December
- 33 participants
- 17096 discussions
From: huangjun <huangjun63(a)huawei.com>
If we want acpi ged to support wake from freeze, we need to implement
the suspend/resume function. In these two methods, ACPI's _GPO, GPP
method is called to realize the setting of sleep flag and anti-shake.
Signed-off-by: Huangjun <huangjun63(a)huawei.com>
---
drivers/acpi/evged.c | 82 ++++++++++++++++++++++++++++++++++++++++++++
1 file changed, 82 insertions(+)
diff --git a/drivers/acpi/evged.c b/drivers/acpi/evged.c
index f13ba2c07667..84656fc09d15 100644
--- a/drivers/acpi/evged.c
+++ b/drivers/acpi/evged.c
@@ -46,11 +46,14 @@
#include <linux/list.h>
#include <linux/platform_device.h>
#include <linux/acpi.h>
+#include <linux/timer.h>
+#include <linux/jiffies.h>
#define MODULE_NAME "acpi-ged"
struct acpi_ged_device {
struct device *dev;
+ struct timer_list timer;
struct list_head event_list;
};
@@ -148,6 +151,8 @@ static int ged_probe(struct platform_device *pdev)
dev_err(&pdev->dev, "unable to parse the _CRS record\n");
return -EINVAL;
}
+
+ timer_setup(&geddev->timer, NULL, 0);
platform_set_drvdata(pdev, geddev);
return 0;
@@ -164,6 +169,7 @@ static void ged_shutdown(struct platform_device *pdev)
dev_dbg(geddev->dev, "GED releasing GSI %u @ IRQ %u\n",
event->gsi, event->irq);
}
+ del_timer(&geddev->timer);
}
static int ged_remove(struct platform_device *pdev)
@@ -177,6 +183,78 @@ static const struct acpi_device_id ged_acpi_ids[] = {
{},
};
+#ifdef CONFIG_PM_SLEEP
+static acpi_status ged_acpi_execute(struct device *dev, char* method, u64 arg)
+{
+ acpi_status acpi_ret;
+ acpi_handle method_handle;
+
+ acpi_ret = acpi_get_handle(ACPI_HANDLE(dev), method, &method_handle);
+
+ if (ACPI_FAILURE(acpi_ret)) {
+ dev_err(dev, "cannot locate %s method\n", method);
+ return AE_NOT_FOUND;
+ }
+
+ acpi_ret = acpi_execute_simple_method(method_handle, NULL, arg);
+ if (ACPI_FAILURE(acpi_ret)) {
+ dev_err(dev, "%s method execution failed\n", method);
+ return AE_ERROR;
+ }
+
+ return AE_OK;
+}
+
+static void ged_timer_callback(struct timer_list *t)
+{
+ struct acpi_ged_device *geddev = from_timer(geddev, t, timer);
+ struct acpi_ged_event *event, *next;
+
+ list_for_each_entry_safe(event, next, &geddev->event_list, node) {
+ ged_acpi_execute(geddev->dev, "_GPP", event->gsi);
+ }
+}
+
+static int ged_suspend(struct device *dev)
+{
+ struct acpi_ged_device *geddev = dev_get_drvdata(dev);
+ struct acpi_ged_event *event, *next;
+ acpi_status acpi_ret;
+
+ list_for_each_entry_safe(event, next, &geddev->event_list, node) {
+ acpi_ret = ged_acpi_execute(dev, "_GPO", event->gsi);
+
+ if (acpi_ret == AE_ERROR)
+ return -EINVAL;
+
+ enable_irq_wake(event->irq);
+ }
+ return 0;
+}
+
+static int ged_resume(struct device *dev)
+{
+ struct acpi_ged_device *geddev = dev_get_drvdata(dev);
+ struct acpi_ged_event *event, *next;
+
+ list_for_each_entry_safe(event, next, &geddev->event_list, node) {
+ disable_irq_wake(event->irq);
+ }
+
+ /* use timer to complete 4s anti-shake */
+ geddev->timer.expires = jiffies + (4 * HZ);
+ geddev->timer.function = ged_timer_callback;
+ add_timer(&geddev->timer);
+
+ return 0;
+}
+
+static const struct dev_pm_ops ged_pm_ops = {
+ SET_SYSTEM_SLEEP_PM_OPS(ged_suspend, ged_resume)
+};
+#endif
+
+
static struct platform_driver ged_driver = {
.probe = ged_probe,
.remove = ged_remove,
@@ -184,6 +262,10 @@ static struct platform_driver ged_driver = {
.driver = {
.name = MODULE_NAME,
.acpi_match_table = ACPI_PTR(ged_acpi_ids),
+#ifdef CONFIG_PM_SLEEP
+ .pm = &ged_pm_ops,
+#endif
+
},
};
builtin_platform_driver(ged_driver);
--
2.20.1
1
1

20 Apr '20
From: Sunnanyong <sunnanyong(a)huawei.com>
If we want to support S4 (suspend to Disk), it is necessary to guarantee
that the ITS tables are at the same address in the booting kernel and
the resumed kernel. That covers all the ITS tables and as well as the
RDs'.
To support this, allocting the itt memory from memory pool intead.
Signed-off-by: Sunnanyong <sunnanyong(a)huawei.com>
---
drivers/irqchip/irq-gic-v3-its.c | 72 ++++++++++++++++++++++++++++++--
1 file changed, 69 insertions(+), 3 deletions(-)
diff --git a/drivers/irqchip/irq-gic-v3-its.c b/drivers/irqchip/irq-gic-v3-its.c
index 860f3ef2969e..9de585fe74fb 100644
--- a/drivers/irqchip/irq-gic-v3-its.c
+++ b/drivers/irqchip/irq-gic-v3-its.c
@@ -189,6 +189,14 @@ static DEFINE_IDA(its_vpeid_ida);
#define gic_data_rdist_rd_base() (gic_data_rdist()->rd_base)
#define gic_data_rdist_vlpi_base() (gic_data_rdist_rd_base() + SZ_128K)
+static void *its_mem_pool_alloc(int dev_id);
+#define ITS_MEM_POOL_SIZE (SZ_4M)
+#define ITS_MEM_POOL_MAX (16)
+#define GITS_OTHER_OFFSET 0x20000
+#define GITS_OTHER_REG_SIZE 0x100
+#define GITS_FUNC_REG_OFFSET 0x80
+static void *its_mem_pool[ITS_MEM_POOL_MAX] = {0};
+
static u16 get_its_list(struct its_vm *vm)
{
struct its_node *its;
@@ -2436,7 +2444,11 @@ static struct its_device *its_create_device(struct its_node *its, u32 dev_id,
nr_ites = max(2, nvecs);
sz = nr_ites * its->ite_size;
sz = max(sz, ITS_ITT_ALIGN) + ITS_ITT_ALIGN - 1;
- itt = kzalloc_node(sz, GFP_KERNEL, its->numa_node);
+
+ itt = its_mem_pool_alloc(dev_id);
+ if (!itt)
+ itt = kzalloc_node(sz, GFP_KERNEL, its->numa_node);
+
if (alloc_lpis) {
lpi_map = its_lpi_alloc(nvecs, &lpi_base, &nr_lpis);
if (lpi_map)
@@ -2450,7 +2462,6 @@ static struct its_device *its_create_device(struct its_node *its, u32 dev_id,
if (!dev || !itt || !col_map || (!lpi_map && alloc_lpis)) {
kfree(dev);
- kfree(itt);
kfree(lpi_map);
kfree(col_map);
return NULL;
@@ -2486,7 +2497,6 @@ static void its_free_device(struct its_device *its_dev)
raw_spin_lock_irqsave(&its_dev->its->lock, flags);
list_del(&its_dev->entry);
raw_spin_unlock_irqrestore(&its_dev->its->lock, flags);
- kfree(its_dev->itt);
kfree(its_dev);
}
@@ -3798,11 +3808,41 @@ static int redist_disable_lpis(void)
return 0;
}
+static void its_cpu_clear_cache(void)
+{
+ struct its_node *its;
+ u64 val = 0;
+ void __iomem *func_base;
+
+ raw_spin_lock(&its_lock);
+
+ list_for_each_entry(its, &its_nodes, entry) {
+ func_base = ioremap(its->phys_base + GITS_OTHER_OFFSET,
+ GITS_OTHER_REG_SIZE);
+ if (!func_base) {
+ pr_err("ITS@%p : Unable to map ITS OTHER registers\n",
+ (void *)(its->phys_base + GITS_OTHER_OFFSET));
+ raw_spin_unlock(&its_lock);
+ return;
+ }
+
+ val = readl_relaxed(func_base + GITS_FUNC_REG_OFFSET);
+ val = val | (0x7 << 16);
+ writel_relaxed(val, func_base + GITS_FUNC_REG_OFFSET);
+ dsb(sy);
+ iounmap(func_base);
+ }
+
+ raw_spin_unlock(&its_lock);
+}
+
+
int its_cpu_init(void)
{
if (!list_empty(&its_nodes)) {
int ret;
+ its_cpu_clear_cache();
ret = redist_disable_lpis();
if (ret)
return ret;
@@ -4001,6 +4041,7 @@ int __init its_init(struct fwnode_handle *handle, struct rdists *rdists,
struct its_node *its;
bool has_v4 = false;
int err;
+ int i;
its_parent = parent_domain;
of_node = to_of_node(handle);
@@ -4014,6 +4055,16 @@ int __init its_init(struct fwnode_handle *handle, struct rdists *rdists,
return -ENXIO;
}
+ for (i = 0; i < ITS_MEM_POOL_MAX; i++) {
+ if (!its_mem_pool[i]) {
+ its_mem_pool[i] = (void *)__get_free_pages(GFP_KERNEL | __GFP_ZERO,
+ get_order(ITS_MEM_POOL_SIZE));
+ if (!its_mem_pool[i])
+ pr_err("err:[its mem[%d]] has no memory\n", i);
+ }
+ }
+
+
gic_rdists = rdists;
err = allocate_lpi_tables();
@@ -4035,3 +4086,18 @@ int __init its_init(struct fwnode_handle *handle, struct rdists *rdists,
return 0;
}
+
+void *its_mem_pool_alloc(int dev_id)
+{
+ int pool_num = dev_id / (ITS_MEM_POOL_SIZE / SZ_512);
+ int idx = dev_id % (ITS_MEM_POOL_SIZE / SZ_512);
+ void *addr = NULL;
+
+ if (pool_num >= ITS_MEM_POOL_MAX || !its_mem_pool[pool_num]) {
+ pr_err("[its mem[%d]] alloc error\n", pool_num);
+ return NULL;
+ }
+
+ addr = its_mem_pool[pool_num] + idx * SZ_512;
+ return addr;
+}
--
2.20.1
1
0
As a new x86 CPU Vendor, Chengdu Haiguang IC Design Co., Ltd (Hygon)
is a Joint Venture between AMD and Haiguang Information Technology Co.,
Ltd., and aims at providing high performance x86 processor for China
server market.
The first generation Hygon's processor(Dhyana) originates from AMD
technology and shares most of the architecture with AMD's family 17h,
but with different CPU Vendor ID("HygonGenuine")/PCIE Device Vendor ID
(0x1D94)/Family series number (Family 18h).
To enable the support of Linux kernel to Hygon's CPU, we added a new
vendor type (X86_VENDOR_HYGON, with value of 9) in arch/x86/include/
asm/processor.h, and shared most of kernel support codes with AMD
family 17h.
As Hygon will negotiate with AMD to make sure that only Hygon will
use family 18h, so try to minimize code modification and share most
codes with AMD under this consideration.
This patch series have been applied and tested successfully on Hygon
Dhyana SoC silicon. Also tested on AMD EPYC (Family 17h) processor,
it works fine and makes no harm to the existing codes.
This patch series are created for the current branch openEuler-1.0-LTS.
References:
[1] Linux kernel patches for Hygon Dhyana, merged in 4.20:
https://git.kernel.org/tip/c9661c1e80b609cd038db7c908e061f0535804ef
[2] MSR and CPUID definition:
https://www.amd.com/system/files/TechDocs/54945_PPR_Family_17h_Models_00h-0…
Pu Wen (22):
x86/cpu: Create Hygon Dhyana architecture support file
x86/cpu: Get cache info and setup cache cpumap for Hygon Dhyana
x86/cpu/mtrr: Support TOP_MEM2 and get MTRR number
x86/smpboot: Do not use BSP INIT delay and MWAIT to idle on Dhyana
x86/events: Add Hygon Dhyana support to PMU infrastructure
x86/alternative: Init ideal_nops for Hygon Dhyana
x86/amd_nb: Check vendor in AMD-only functions
x86/pci, x86/amd_nb: Add Hygon Dhyana support to PCI and northbridge
x86/apic: Add Hygon Dhyana support
x86/bugs: Add Hygon Dhyana to the respective mitigation machinery
x86/mce: Add Hygon Dhyana support to the MCA infrastructure
x86/kvm: Add Hygon Dhyana support to KVM
x86/xen: Add Hygon Dhyana support to Xen
ACPI: Add Hygon Dhyana support
cpufreq: Add Hygon Dhyana support
EDAC, amd64: Add Hygon Dhyana support
tools/cpupower: Add Hygon Dhyana support
hwmon: (k10temp) Add Hygon Dhyana support
x86/CPU/hygon: Fix phys_proc_id calculation logic for multi-die
processors
i2c-piix4: Add Hygon Dhyana SMBus support
x86/amd_nb: Make hygon_nb_misc_ids static
NTB: Add Hygon Device ID
Documentation/i2c/busses/i2c-piix4 | 2 +
MAINTAINERS | 6 +
arch/x86/Kconfig.cpu | 14 +
arch/x86/events/amd/core.c | 4 +
arch/x86/events/amd/uncore.c | 20 +-
arch/x86/events/core.c | 4 +
arch/x86/include/asm/amd_nb.h | 3 +
arch/x86/include/asm/cacheinfo.h | 1 +
arch/x86/include/asm/kvm_emulate.h | 4 +
arch/x86/include/asm/mce.h | 2 +
arch/x86/include/asm/processor.h | 3 +-
arch/x86/include/asm/virtext.h | 5 +-
arch/x86/kernel/alternative.c | 4 +
arch/x86/kernel/amd_nb.c | 49 ++-
arch/x86/kernel/apic/apic.c | 7 +
arch/x86/kernel/apic/probe_32.c | 1 +
arch/x86/kernel/cpu/Makefile | 1 +
arch/x86/kernel/cpu/bugs.c | 4 +-
arch/x86/kernel/cpu/cacheinfo.c | 31 +-
arch/x86/kernel/cpu/common.c | 4 +
arch/x86/kernel/cpu/cpu.h | 1 +
arch/x86/kernel/cpu/hygon.c | 413 ++++++++++++++++++
arch/x86/kernel/cpu/mce/core.c | 20 +-
arch/x86/kernel/cpu/mce/severity.c | 3 +-
arch/x86/kernel/cpu/mtrr/cleanup.c | 3 +-
arch/x86/kernel/cpu/mtrr/mtrr.c | 2 +-
arch/x86/kernel/cpu/perfctr-watchdog.c | 2 +
arch/x86/kernel/smpboot.c | 4 +-
arch/x86/kvm/emulate.c | 11 +-
arch/x86/pci/amd_bus.c | 6 +-
arch/x86/xen/pmu.c | 12 +-
drivers/acpi/acpi_pad.c | 1 +
drivers/acpi/processor_idle.c | 1 +
drivers/cpufreq/acpi-cpufreq.c | 5 +
drivers/cpufreq/amd_freq_sensitivity.c | 9 +-
drivers/edac/amd64_edac.c | 10 +-
drivers/edac/mce_amd.c | 4 +-
drivers/hwmon/k10temp.c | 3 +-
drivers/i2c/busses/Kconfig | 1 +
drivers/i2c/busses/i2c-piix4.c | 15 +-
drivers/ntb/hw/amd/ntb_hw_amd.c | 1 +
include/linux/pci_ids.h | 2 +
tools/power/cpupower/utils/cpufreq-info.c | 6 +-
tools/power/cpupower/utils/helpers/amd.c | 4 +-
tools/power/cpupower/utils/helpers/cpuid.c | 8 +-
tools/power/cpupower/utils/helpers/helpers.h | 2 +-
tools/power/cpupower/utils/helpers/misc.c | 2 +-
.../utils/idle_monitor/mperf_monitor.c | 3 +-
48 files changed, 668 insertions(+), 55 deletions(-)
create mode 100644 arch/x86/kernel/cpu/hygon.c
--
2.23.0
5
28
Alexander Usyskin (1):
mei: me: add cedar fork device ids
Amritha Nambiar (1):
net: Fix Tx hash bound checking
Arun KS (1):
arm64: Fix size of __early_cpu_boot_status
Avihai Horon (1):
RDMA/cm: Update num_paths in cma_resolve_iboe_route error flow
Chris Lew (1):
rpmsg: glink: Remove chunk size word align warning
Daniel Jordan (1):
padata: always acquire cpu_hotplug_lock before pinst->lock
David Ahern (1):
tools/accounting/getdelays.c: fix netlink attribute length
David Howells (1):
rxrpc: Fix sendmsg(MSG_WAITALL) handling
Eugene Syromiatnikov (1):
coresight: do not use the BIT() macro in the UAPI header
Eugeniy Paltsev (1):
initramfs: restore default compression behavior
Florian Fainelli (2):
net: dsa: bcm_sf2: Do not register slave MDIO bus with OF
net: dsa: bcm_sf2: Ensure correct sub-node is parsed
Geoffrey Allott (1):
ALSA: hda/ca0132 - Add Recon3Di quirk to handle integrated sound on
EVGA X99 Classified motherboard
Gerd Hoffmann (1):
drm/bochs: downgrade pci_request_region failure from error to warning
Greg Kroah-Hartman (1):
Linux 4.19.115
Hans Verkuil (1):
drm_dp_mst_topology: fix broken
drm_dp_sideband_parse_remote_dpcd_read()
Hans de Goede (2):
extcon: axp288: Add wakeup support
power: supply: axp288_charger: Add special handling for HP Pavilion x2
10
Ilya Dryomov (1):
ceph: canonicalize server path in place
James Zhu (1):
drm/amdgpu: fix typo for vcn1 idle check
Jarod Wilson (1):
ipv6: don't auto-add link-local address to lag ports
Jason A. Donenfeld (1):
random: always use batched entropy for get_random_u{32, 64}
Jason Gunthorpe (2):
RDMA/ucma: Put a lock around every call to the rdma_cm layer
RDMA/cma: Teach lockdep about the order of rtnl and lock
Jisheng Zhang (1):
net: stmmac: dwmac1000: fix out-of-bounds mac address reg setting
Kaike Wan (2):
IB/hfi1: Call kobject_put() when kobject_init_and_add() fails
IB/hfi1: Fix memory leaks in sysfs registration and unregistration
Kishon Vijay Abraham I (2):
misc: pci_endpoint_test: Fix to support > 10 pci-endpoint-test devices
misc: pci_endpoint_test: Avoid using module parameter to determine
irqtype
Len Brown (2):
tools/power turbostat: Fix gcc build warnings
tools/power turbostat: Fix missing SYS_LPI counter on some Chromebooks
Lucas Stach (1):
drm/etnaviv: replace MMU flush marker with flush sequence
Marcelo Ricardo Leitner (1):
sctp: fix possibly using a bad saddr with a given dst
Mario Kleiner (1):
drm/amd/display: Add link_rate quirk for Apple 15" MBP 2017
Martin Kaiser (1):
hwrng: imx-rngc - fix an error path
Oleksij Rempel (1):
net: phy: micrel: kszphy_resume(): add delay after genphy_resume()
before accessing PHY registers
Paul Cercueil (1):
ASoC: jz4740-i2s: Fix divider written at incorrect offset in register
Petr Machata (1):
mlxsw: spectrum_flower: Do not stop at FLOW_ACTION_VLAN_MANGLE
Prabhath Sajeepa (1):
nvme-rdma: Avoid double freeing of async event data
Qian Cai (1):
ipv4: fix a RCU-list lock in fib_triestat_seq_show
Qiujun Huang (3):
sctp: fix refcount bug in sctp_wfree
Bluetooth: RFCOMM: fix ODEBUG bug in rfcomm_dev_ioctl
fbcon: fix null-ptr-deref in fbcon_switch
Rob Clark (2):
drm/msm: stop abusing dma_map/unmap for cache
drm/msm: Use the correct dma_sync calls in msm_gem
Roger Quadros (1):
usb: dwc3: don't set gadget->is_otg flag
Sean Young (1):
media: rc: IR signal for Panasonic air conditioner too long
Taniya Das (1):
clk: qcom: rcg: Return failure for RCG update
Thinh Nguyen (1):
usb: dwc3: gadget: Wrap around when skip TRBs
William Dauchy (1):
net, ip_tunnel: fix interface lookup with no key
Xiubo Li (1):
ceph: remove the extra slashes in the server path
YueHaibing (1):
misc: rtsx: set correct pcr_ops for rts522A
Makefile | 2 +-
arch/arm64/kernel/head.S | 2 +-
drivers/char/hw_random/imx-rngc.c | 4 +-
drivers/char/random.c | 20 ++------
drivers/clk/qcom/clk-rcg2.c | 2 +-
drivers/extcon/extcon-axp288.c | 32 ++++++++++++
drivers/gpu/drm/amd/amdgpu/vcn_v1_0.c | 2 +-
drivers/gpu/drm/amd/display/dc/core/dc_link_dp.c | 11 +++++
drivers/gpu/drm/bochs/bochs_hw.c | 6 +--
drivers/gpu/drm/drm_dp_mst_topology.c | 1 +
drivers/gpu/drm/etnaviv/etnaviv_buffer.c | 10 ++--
drivers/gpu/drm/etnaviv/etnaviv_gpu.h | 1 +
drivers/gpu/drm/etnaviv/etnaviv_mmu.c | 6 +--
drivers/gpu/drm/etnaviv/etnaviv_mmu.h | 2 +-
drivers/gpu/drm/msm/msm_gem.c | 47 ++++++++++++++++--
drivers/infiniband/core/cma.c | 14 ++++++
drivers/infiniband/core/ucma.c | 49 ++++++++++++++++++-
drivers/infiniband/hw/hfi1/sysfs.c | 26 +++++++---
drivers/media/rc/lirc_dev.c | 2 +-
drivers/misc/cardreader/rts5227.c | 1 +
drivers/misc/mei/hw-me-regs.h | 2 +
drivers/misc/mei/pci-me.c | 2 +
drivers/misc/pci_endpoint_test.c | 14 ++++--
drivers/net/dsa/bcm_sf2.c | 9 +++-
.../net/ethernet/mellanox/mlxsw/spectrum_flower.c | 8 +--
.../net/ethernet/stmicro/stmmac/dwmac1000_core.c | 2 +-
drivers/net/phy/micrel.c | 7 +++
drivers/nvme/host/rdma.c | 8 +--
drivers/power/supply/axp288_charger.c | 57 +++++++++++++++++++++-
drivers/rpmsg/qcom_glink_native.c | 3 --
drivers/usb/dwc3/gadget.c | 3 +-
drivers/video/fbdev/core/fbcon.c | 3 ++
fs/ceph/super.c | 56 +++++++++++++--------
fs/ceph/super.h | 2 +-
include/uapi/linux/coresight-stm.h | 6 ++-
kernel/padata.c | 4 +-
net/bluetooth/rfcomm/tty.c | 4 +-
net/core/dev.c | 2 +
net/ipv4/fib_trie.c | 3 ++
net/ipv4/ip_tunnel.c | 6 +--
net/ipv6/addrconf.c | 4 ++
net/rxrpc/sendmsg.c | 4 +-
net/sctp/ipv6.c | 20 +++++---
net/sctp/protocol.c | 28 +++++++----
net/sctp/socket.c | 31 +++++++++---
sound/pci/hda/patch_ca0132.c | 1 +
sound/soc/jz4740/jz4740-i2s.c | 2 +-
tools/accounting/getdelays.c | 2 +-
tools/power/x86/turbostat/turbostat.c | 27 +++++-----
usr/Kconfig | 22 ++++-----
50 files changed, 434 insertions(+), 148 deletions(-)
--
1.8.3
1
52
From: "Paul E. McKenney" <paulmck(a)kernel.org>
mainline inclusion
from mainline-v5.6-rc1
commit 844a378de3372c923909681706d62336d702531e
category: bugfix
bugzilla: 28851
CVE: NA
-------------------------------------------------------------------------
The ->srcu_last_gp_end field is accessed from any CPU at any time
by synchronize_srcu(), so non-initialization references need to use
READ_ONCE() and WRITE_ONCE(). This commit therefore makes that change.
Reported-by: syzbot+08f3e9d26e5541e1ecf2(a)syzkaller.appspotmail.com
Acked-by: Marco Elver <elver(a)google.com>
Signed-off-by: Paul E. McKenney <paulmck(a)kernel.org>
Conflicts:
kernel/rcu/srcutree.c
Signed-off-by: Zhen Lei <thunder.leizhen(a)huawei.com>
Reviewed-by: Xie XiuQi <xiexiuqi(a)huawei.com>
Signed-off-by: Yang Yingliang <yangyingliang(a)huawei.com>
---
kernel/rcu/srcutree.c | 7 ++++---
1 file changed, 4 insertions(+), 3 deletions(-)
diff --git a/kernel/rcu/srcutree.c b/kernel/rcu/srcutree.c
index 4b0a6e3..7bd0204 100644
--- a/kernel/rcu/srcutree.c
+++ b/kernel/rcu/srcutree.c
@@ -552,7 +552,7 @@ static void srcu_gp_end(struct srcu_struct *sp)
idx = rcu_seq_state(sp->srcu_gp_seq);
WARN_ON_ONCE(idx != SRCU_STATE_SCAN2);
cbdelay = srcu_get_delay(sp);
- sp->srcu_last_gp_end = ktime_get_mono_fast_ns();
+ WRITE_ONCE(sp->srcu_last_gp_end, ktime_get_mono_fast_ns());
rcu_seq_end(&sp->srcu_gp_seq);
gpseq = rcu_seq_current(&sp->srcu_gp_seq);
if (ULONG_CMP_LT(sp->srcu_gp_seq_needed_exp, gpseq))
@@ -780,6 +780,7 @@ static bool srcu_might_be_idle(struct srcu_struct *sp)
unsigned long flags;
struct srcu_data *sdp;
unsigned long t;
+ unsigned long tlast;
/* If the local srcu_data structure has callbacks, not idle. */
local_irq_save(flags);
@@ -798,9 +799,9 @@ static bool srcu_might_be_idle(struct srcu_struct *sp)
/* First, see if enough time has passed since the last GP. */
t = ktime_get_mono_fast_ns();
+ tlast = READ_ONCE(sp->srcu_last_gp_end);
if (exp_holdoff == 0 ||
- time_in_range_open(t, sp->srcu_last_gp_end,
- sp->srcu_last_gp_end + exp_holdoff))
+ time_in_range_open(t, tlast, tlast + exp_holdoff))
return false; /* Too soon after last GP. */
/* Next, check for probable idleness. */
--
1.8.3
1
0

18 Apr '20
From: Yunsheng Lin <linyunsheng(a)huawei.com>
mainline inclusion
from mainline-v5.4-rc1
commit 6b0c54e7f2715997c366e8374209bc74259b0a59
category: bugfix
bugzilla: 21318
CVE: NA
-------------------------------------------------------------------------
The cookie is dereferenced before null checking in the function
iommu_dma_init_domain.
This patch moves the dereferencing after the null checking.
Fixes: fdbe574eb693 ("iommu/dma: Allow MSI-only cookies")
Signed-off-by: Yunsheng Lin <linyunsheng(a)huawei.com>
Signed-off-by: Joerg Roedel <jroedel(a)suse.de>
Conflicts:
drivers/iommu/dma-iommu.c
Signed-off-by: Zhen Lei <thunder.leizhen(a)huawei.com>
Reviewed-by: Hanjun Guo <guohanjun(a)huawei.com>
Signed-off-by: Yang Yingliang <yangyingliang(a)huawei.com>
---
drivers/iommu/dma-iommu.c | 4 +++-
1 file changed, 3 insertions(+), 1 deletion(-)
diff --git a/drivers/iommu/dma-iommu.c b/drivers/iommu/dma-iommu.c
index 64ae17e8b..b68d9fd 100644
--- a/drivers/iommu/dma-iommu.c
+++ b/drivers/iommu/dma-iommu.c
@@ -290,13 +290,15 @@ int iommu_dma_init_domain(struct iommu_domain *domain, dma_addr_t base,
u64 size, struct device *dev)
{
struct iommu_dma_cookie *cookie = domain->iova_cookie;
- struct iova_domain *iovad = &cookie->iovad;
unsigned long order, base_pfn, end_pfn;
+ struct iova_domain *iovad;
int attr;
if (!cookie || cookie->type != IOMMU_DMA_IOVA_COOKIE)
return -EINVAL;
+ iovad = &cookie->iovad;
+
/* Use the smallest supported page size for IOVA granularity */
order = __ffs(domain->pgsize_bitmap);
base_pfn = max_t(unsigned long, 1, base >> order);
--
1.8.3
1
1

18 Apr '20
From: Shaozhengchao <shaozhengchao(a)huawei.com>
driver inclusion
category: bugfix
bugzilla: 4472
-----------------------------------------------------------------------
Fix the problem that out-of-bounds access caused by user input
In order to solve the problem, restrictions are imposed on each input
which is done in kernel driver.
Signed-off-by: Shaozhengchao <shaozhengchao(a)huawei.com>
Reviewed-by: Luoshaokai <luoshaokai(a)huawei.com>
Signed-off-by: Yang Yingliang <yangyingliang(a)huawei.com>
---
drivers/net/ethernet/huawei/hinic/hinic_nictool.c | 18 ++++++++++++++++++
drivers/net/ethernet/huawei/hinic/hinic_nictool.h | 2 ++
drivers/net/ethernet/huawei/hinic/hinic_sml_counter.c | 16 +++++++++++++---
3 files changed, 33 insertions(+), 3 deletions(-)
diff --git a/drivers/net/ethernet/huawei/hinic/hinic_nictool.c b/drivers/net/ethernet/huawei/hinic/hinic_nictool.c
index 46dd9ec..df01088 100644
--- a/drivers/net/ethernet/huawei/hinic/hinic_nictool.c
+++ b/drivers/net/ethernet/huawei/hinic/hinic_nictool.c
@@ -1712,6 +1712,19 @@ static u32 get_up_timeout_val(enum hinic_mod_type mod, u8 cmd)
return UP_COMP_TIME_OUT_VAL;
}
+static int check_useparam_valid(struct msg_module *nt_msg, void *buf_in)
+{
+ struct csr_write_st *csr_write_msg = (struct csr_write_st *)buf_in;
+ u32 rd_len = csr_write_msg->rd_len;
+
+ if (rd_len > TOOL_COUNTER_MAX_LEN) {
+ pr_err("Csr read or write len is invalid!\n");
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
static int send_to_up(void *hwdev, struct msg_module *nt_msg,
void *buf_in, u32 in_size, void *buf_out, u32 *out_size)
{
@@ -1744,6 +1757,9 @@ static int send_to_up(void *hwdev, struct msg_module *nt_msg,
}
} else if (nt_msg->up_cmd.up_db.up_api_type == API_CHAIN) {
+ if (check_useparam_valid(nt_msg, buf_in))
+ return -EINVAL;
+
if (nt_msg->up_cmd.up_db.chipif_cmd == API_CSR_WRITE) {
ret = api_csr_write(hwdev, nt_msg, buf_in,
in_size, buf_out, out_size);
@@ -1994,6 +2010,8 @@ static int get_all_chip_id_cmd(struct msg_module *nt_msg)
{
struct nic_card_id card_id;
+ memset(&card_id, 0, sizeof(card_id));
+
hinic_get_all_chip_id((void *)&card_id);
if (copy_to_user(nt_msg->out_buf, &card_id, sizeof(card_id))) {
diff --git a/drivers/net/ethernet/huawei/hinic/hinic_nictool.h b/drivers/net/ethernet/huawei/hinic/hinic_nictool.h
index cfbe435..e8eccaf 100644
--- a/drivers/net/ethernet/huawei/hinic/hinic_nictool.h
+++ b/drivers/net/ethernet/huawei/hinic/hinic_nictool.h
@@ -285,4 +285,6 @@ struct hinic_pf_info {
extern void hinic_get_io_stats(struct hinic_nic_dev *nic_dev,
struct hinic_show_item *items);
+#define TOOL_COUNTER_MAX_LEN 512
+
#endif
diff --git a/drivers/net/ethernet/huawei/hinic/hinic_sml_counter.c b/drivers/net/ethernet/huawei/hinic/hinic_sml_counter.c
index 9536adf..eb35df6 100644
--- a/drivers/net/ethernet/huawei/hinic/hinic_sml_counter.c
+++ b/drivers/net/ethernet/huawei/hinic/hinic_sml_counter.c
@@ -253,9 +253,19 @@ int hinic_sm_ctr_rd64_pair(void *hwdev, u8 node, u8 instance,
ctr_rd_rsp_u rsp;
int ret;
- if (!hwdev || (0 != (ctr_id & 0x1)) || !value1 || !value2) {
- pr_err("Hwdev(0x%p) or value1(0x%p) or value2(0x%p) is NULL or ctr_id(%d) is odd number\n",
- hwdev, value1, value2, ctr_id);
+ if (!value1) {
+ pr_err("value1 is NULL for read 64 bit pair\n");
+ return -EFAULT;
+ }
+
+ if (!value2) {
+ pr_err("value2 is NULL for read 64 bit pair\n");
+ return -EFAULT;
+ }
+
+ if (!hwdev || (0 != (ctr_id & 0x1))) {
+ pr_err("Hwdev is NULL or ctr_id(%d) is odd number for read 64 bit pair\n",
+ ctr_id);
return -EFAULT;
}
--
1.8.3
1
0

18 Apr '20
From: Li Bin <huawei.libin(a)huawei.com>
hulk inclusion
category: bugfix
bugzilla: 34234
CVE: NA
--------------------------------
If the dxfer_len is greater than 256M then the request is invalid,
it should call sg_remove_request in sg_common_write.
Fixes: f930c7043663 ("scsi: sg: only check for dxfer_len greater than 256M")
Signed-off-by: Li Bin <huawei.libin(a)huawei.com>
Acked-by: Douglas Gilbert <dgilbert(a)interlog.com>
Reviewed-by: Hou Tao <houtao1(a)huawei.com>
Signed-off-by: Yang Yingliang <yangyingliang(a)huawei.com>
---
drivers/scsi/sg.c | 4 +++-
1 file changed, 3 insertions(+), 1 deletion(-)
diff --git a/drivers/scsi/sg.c b/drivers/scsi/sg.c
index c75324a..9c4b71e 100644
--- a/drivers/scsi/sg.c
+++ b/drivers/scsi/sg.c
@@ -808,8 +808,10 @@ static int sg_allow_access(struct file *filp, unsigned char *cmd)
"sg_common_write: scsi opcode=0x%02x, cmd_size=%d\n",
(int) cmnd[0], (int) hp->cmd_len));
- if (hp->dxfer_len >= SZ_256M)
+ if (hp->dxfer_len >= SZ_256M) {
+ sg_remove_request(sfp, srp);
return -EINVAL;
+ }
k = sg_start_req(srp, cmnd);
if (k) {
--
1.8.3
1
1

[PATCH 1/2] btrfs: extent_io: Handle errors better in extent_write_full_page()
by Yang Yingliang 18 Apr '20
by Yang Yingliang 18 Apr '20
18 Apr '20
From: Qu Wenruo <wqu(a)suse.com>
mainline inclusion
from mainline-v5.2-rc2
commit 3065976b045f77a910809fa7699f99a1e7c0dbbb
category: bugfix
bugzilla: 13690
CVE: CVE-2019-19377
Introduce end_write_bio() for CVE-2019-19377.
-------------------------------------------------
Since now flush_write_bio() could return error, kill the BUG_ON() first.
Then don't call flush_write_bio() unconditionally, instead we check the
return value from __extent_writepage() first.
If __extent_writepage() fails, we do cleanup, and return error without
submitting the possible corrupted or half-baked bio.
If __extent_writepage() successes, then we call flush_write_bio() and
return the result.
Signed-off-by: Qu Wenruo <wqu(a)suse.com>
Reviewed-by: David Sterba <dsterba(a)suse.com>
Signed-off-by: David Sterba <dsterba(a)suse.com>
Conflicts:
fs/btrfs/extent_io.c
[yyl: adjust context]
Signed-off-by: Yang Yingliang <yangyingliang(a)huawei.com>
Reviewed-by: Jason Yan <yanaijie(a)huawei.com>
Signed-off-by: Yang Yingliang <yangyingliang(a)huawei.com>
---
fs/btrfs/extent_io.c | 19 +++++++++++++++++++
1 file changed, 19 insertions(+)
diff --git a/fs/btrfs/extent_io.c b/fs/btrfs/extent_io.c
index 11efb4f..7f2990f 100644
--- a/fs/btrfs/extent_io.c
+++ b/fs/btrfs/extent_io.c
@@ -2731,6 +2731,16 @@ static int __must_check submit_one_bio(struct bio *bio, int mirror_num,
return blk_status_to_errno(ret);
}
+/* Cleanup unsubmitted bios */
+static void end_write_bio(struct extent_page_data *epd, int ret)
+{
+ if (epd->bio) {
+ epd->bio->bi_status = errno_to_blk_status(ret);
+ bio_endio(epd->bio);
+ epd->bio = NULL;
+ }
+}
+
/*
* @opf: bio REQ_OP_* and REQ_* flags as one value
* @tree: tree so we can call our merge_bio hook
@@ -3438,6 +3448,9 @@ static noinline_for_stack int __extent_writepage_io(struct inode *inode,
* records are inserted to lock ranges in the tree, and as dirty areas
* are found, they are marked writeback. Then the lock bits are removed
* and the end_io handler clears the writeback ranges
+ *
+ * Return 0 if everything goes well.
+ * Return <0 for error.
*/
static int __extent_writepage(struct page *page, struct writeback_control *wbc,
struct extent_page_data *epd)
@@ -3505,6 +3518,7 @@ static int __extent_writepage(struct page *page, struct writeback_control *wbc,
end_extent_writepage(page, ret, start, page_end);
}
unlock_page(page);
+ ASSERT(ret <= 0);
return ret;
done_unlocked:
@@ -4054,6 +4068,11 @@ int extent_write_full_page(struct page *page, struct writeback_control *wbc)
};
ret = __extent_writepage(page, wbc, &epd);
+ ASSERT(ret <= 0);
+ if (ret < 0) {
+ end_write_bio(&epd, ret);
+ return ret;
+ }
flush_write_bio(&epd);
return ret;
--
1.8.3
1
1
From: Shaozhengchao <shaozhengchao(a)huawei.com>
driver inclusion
category: cleanup
bugzilla: 4472
-----------------------------------------------------------------------
Delete useless header files
Signed-off-by: Shaozhengchao <shaozhengchao(a)huawei.com>
Reviewed-by: Luoshaokai <luoshaokai(a)huawei.com>
Signed-off-by: Yang Yingliang <yangyingliang(a)huawei.com>
---
drivers/net/ethernet/huawei/hinic/hinic_common.c | 80 -
drivers/net/ethernet/huawei/hinic/hinic_common.h | 38 -
drivers/net/ethernet/huawei/hinic/hinic_dev.h | 64 -
.../net/ethernet/huawei/hinic/hinic_hw_api_cmd.c | 978 -------
.../net/ethernet/huawei/hinic/hinic_hw_api_cmd.h | 208 --
drivers/net/ethernet/huawei/hinic/hinic_hw_cmdq.c | 947 -------
drivers/net/ethernet/huawei/hinic/hinic_hw_cmdq.h | 187 --
drivers/net/ethernet/huawei/hinic/hinic_hw_csr.h | 149 --
drivers/net/ethernet/huawei/hinic/hinic_hw_dev.c | 1010 --------
drivers/net/ethernet/huawei/hinic/hinic_hw_dev.h | 239 --
drivers/net/ethernet/huawei/hinic/hinic_hw_eqs.c | 886 -------
drivers/net/ethernet/huawei/hinic/hinic_hw_eqs.h | 265 --
drivers/net/ethernet/huawei/hinic/hinic_hw_if.c | 351 ---
drivers/net/ethernet/huawei/hinic/hinic_hw_if.h | 272 --
drivers/net/ethernet/huawei/hinic/hinic_hw_io.c | 533 ----
drivers/net/ethernet/huawei/hinic/hinic_hw_io.h | 97 -
drivers/net/ethernet/huawei/hinic/hinic_hw_mgmt.c | 597 -----
drivers/net/ethernet/huawei/hinic/hinic_hw_qp.c | 907 -------
drivers/net/ethernet/huawei/hinic/hinic_hw_qp.h | 205 --
.../net/ethernet/huawei/hinic/hinic_hw_qp_ctxt.h | 214 --
drivers/net/ethernet/huawei/hinic/hinic_hw_wq.c | 878 -------
drivers/net/ethernet/huawei/hinic/hinic_hw_wq.h | 117 -
drivers/net/ethernet/huawei/hinic/hinic_hw_wqe.h | 368 ---
drivers/net/ethernet/huawei/hinic/hinic_port.c | 379 ---
drivers/net/ethernet/huawei/hinic/hinic_port.h | 198 --
.../net/ethernet/huawei/hinic/hinic_sml_table.h | 2728 --------------------
.../ethernet/huawei/hinic/hinic_sml_table_pub.h | 277 --
27 files changed, 13172 deletions(-)
delete mode 100644 drivers/net/ethernet/huawei/hinic/hinic_common.c
delete mode 100644 drivers/net/ethernet/huawei/hinic/hinic_common.h
delete mode 100644 drivers/net/ethernet/huawei/hinic/hinic_dev.h
delete mode 100644 drivers/net/ethernet/huawei/hinic/hinic_hw_api_cmd.c
delete mode 100644 drivers/net/ethernet/huawei/hinic/hinic_hw_api_cmd.h
delete mode 100644 drivers/net/ethernet/huawei/hinic/hinic_hw_cmdq.c
delete mode 100644 drivers/net/ethernet/huawei/hinic/hinic_hw_cmdq.h
delete mode 100644 drivers/net/ethernet/huawei/hinic/hinic_hw_csr.h
delete mode 100644 drivers/net/ethernet/huawei/hinic/hinic_hw_dev.c
delete mode 100644 drivers/net/ethernet/huawei/hinic/hinic_hw_dev.h
delete mode 100644 drivers/net/ethernet/huawei/hinic/hinic_hw_eqs.c
delete mode 100644 drivers/net/ethernet/huawei/hinic/hinic_hw_eqs.h
delete mode 100644 drivers/net/ethernet/huawei/hinic/hinic_hw_if.c
delete mode 100644 drivers/net/ethernet/huawei/hinic/hinic_hw_if.h
delete mode 100644 drivers/net/ethernet/huawei/hinic/hinic_hw_io.c
delete mode 100644 drivers/net/ethernet/huawei/hinic/hinic_hw_io.h
delete mode 100644 drivers/net/ethernet/huawei/hinic/hinic_hw_mgmt.c
delete mode 100644 drivers/net/ethernet/huawei/hinic/hinic_hw_qp.c
delete mode 100644 drivers/net/ethernet/huawei/hinic/hinic_hw_qp.h
delete mode 100644 drivers/net/ethernet/huawei/hinic/hinic_hw_qp_ctxt.h
delete mode 100644 drivers/net/ethernet/huawei/hinic/hinic_hw_wq.c
delete mode 100644 drivers/net/ethernet/huawei/hinic/hinic_hw_wq.h
delete mode 100644 drivers/net/ethernet/huawei/hinic/hinic_hw_wqe.h
delete mode 100644 drivers/net/ethernet/huawei/hinic/hinic_port.c
delete mode 100644 drivers/net/ethernet/huawei/hinic/hinic_port.h
delete mode 100644 drivers/net/ethernet/huawei/hinic/hinic_sml_table.h
delete mode 100644 drivers/net/ethernet/huawei/hinic/hinic_sml_table_pub.h
diff --git a/drivers/net/ethernet/huawei/hinic/hinic_common.c b/drivers/net/ethernet/huawei/hinic/hinic_common.c
deleted file mode 100644
index 02c74fd..00000000
--- a/drivers/net/ethernet/huawei/hinic/hinic_common.c
+++ /dev/null
@@ -1,80 +0,0 @@
-/*
- * Huawei HiNIC PCI Express Linux driver
- * Copyright(c) 2017 Huawei Technologies Co., Ltd
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms and conditions of the GNU General Public License,
- * version 2, as published by the Free Software Foundation.
- *
- * This program is distributed in the hope it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
- * for more details.
- *
- */
-
-#include <linux/kernel.h>
-#include <linux/types.h>
-#include <asm/byteorder.h>
-
-#include "hinic_common.h"
-
-/**
- * hinic_cpu_to_be32 - convert data to big endian 32 bit format
- * @data: the data to convert
- * @len: length of data to convert
- **/
-void hinic_cpu_to_be32(void *data, int len)
-{
- u32 *mem = data;
- int i;
-
- len = len / sizeof(u32);
-
- for (i = 0; i < len; i++) {
- *mem = cpu_to_be32(*mem);
- mem++;
- }
-}
-
-/**
- * hinic_be32_to_cpu - convert data from big endian 32 bit format
- * @data: the data to convert
- * @len: length of data to convert
- **/
-void hinic_be32_to_cpu(void *data, int len)
-{
- u32 *mem = data;
- int i;
-
- len = len / sizeof(u32);
-
- for (i = 0; i < len; i++) {
- *mem = be32_to_cpu(*mem);
- mem++;
- }
-}
-
-/**
- * hinic_set_sge - set dma area in scatter gather entry
- * @sge: scatter gather entry
- * @addr: dma address
- * @len: length of relevant data in the dma address
- **/
-void hinic_set_sge(struct hinic_sge *sge, dma_addr_t addr, int len)
-{
- sge->hi_addr = upper_32_bits(addr);
- sge->lo_addr = lower_32_bits(addr);
- sge->len = len;
-}
-
-/**
- * hinic_sge_to_dma - get dma address from scatter gather entry
- * @sge: scatter gather entry
- *
- * Return dma address of sg entry
- **/
-dma_addr_t hinic_sge_to_dma(struct hinic_sge *sge)
-{
- return (dma_addr_t)((((u64)sge->hi_addr) << 32) | sge->lo_addr);
-}
diff --git a/drivers/net/ethernet/huawei/hinic/hinic_common.h b/drivers/net/ethernet/huawei/hinic/hinic_common.h
deleted file mode 100644
index 2c06b76..00000000
--- a/drivers/net/ethernet/huawei/hinic/hinic_common.h
+++ /dev/null
@@ -1,38 +0,0 @@
-/*
- * Huawei HiNIC PCI Express Linux driver
- * Copyright(c) 2017 Huawei Technologies Co., Ltd
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms and conditions of the GNU General Public License,
- * version 2, as published by the Free Software Foundation.
- *
- * This program is distributed in the hope it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
- * for more details.
- *
- */
-
-#ifndef HINIC_COMMON_H
-#define HINIC_COMMON_H
-
-#include <linux/types.h>
-
-#define UPPER_8_BITS(data) (((data) >> 8) & 0xFF)
-#define LOWER_8_BITS(data) ((data) & 0xFF)
-
-struct hinic_sge {
- u32 hi_addr;
- u32 lo_addr;
- u32 len;
-};
-
-void hinic_cpu_to_be32(void *data, int len);
-
-void hinic_be32_to_cpu(void *data, int len);
-
-void hinic_set_sge(struct hinic_sge *sge, dma_addr_t addr, int len);
-
-dma_addr_t hinic_sge_to_dma(struct hinic_sge *sge);
-
-#endif
diff --git a/drivers/net/ethernet/huawei/hinic/hinic_dev.h b/drivers/net/ethernet/huawei/hinic/hinic_dev.h
deleted file mode 100644
index 5186cc9..00000000
--- a/drivers/net/ethernet/huawei/hinic/hinic_dev.h
+++ /dev/null
@@ -1,64 +0,0 @@
-/*
- * Huawei HiNIC PCI Express Linux driver
- * Copyright(c) 2017 Huawei Technologies Co., Ltd
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms and conditions of the GNU General Public License,
- * version 2, as published by the Free Software Foundation.
- *
- * This program is distributed in the hope it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
- * for more details.
- *
- */
-
-#ifndef HINIC_DEV_H
-#define HINIC_DEV_H
-
-#include <linux/netdevice.h>
-#include <linux/types.h>
-#include <linux/semaphore.h>
-#include <linux/workqueue.h>
-#include <linux/bitops.h>
-
-#include "hinic_hw_dev.h"
-#include "hinic_tx.h"
-#include "hinic_rx.h"
-
-#define HINIC_DRV_NAME "hinic"
-
-enum hinic_flags {
- HINIC_LINK_UP = BIT(0),
- HINIC_INTF_UP = BIT(1),
-};
-
-struct hinic_rx_mode_work {
- struct work_struct work;
- u32 rx_mode;
-};
-
-struct hinic_dev {
- struct net_device *netdev;
- struct hinic_hwdev *hwdev;
-
- u32 msg_enable;
- unsigned int tx_weight;
- unsigned int rx_weight;
-
- unsigned int flags;
-
- struct semaphore mgmt_lock;
- unsigned long *vlan_bitmap;
-
- struct hinic_rx_mode_work rx_mode_work;
- struct workqueue_struct *workq;
-
- struct hinic_txq *txqs;
- struct hinic_rxq *rxqs;
-
- struct hinic_txq_stats tx_stats;
- struct hinic_rxq_stats rx_stats;
-};
-
-#endif
diff --git a/drivers/net/ethernet/huawei/hinic/hinic_hw_api_cmd.c b/drivers/net/ethernet/huawei/hinic/hinic_hw_api_cmd.c
deleted file mode 100644
index c40603a..00000000
--- a/drivers/net/ethernet/huawei/hinic/hinic_hw_api_cmd.c
+++ /dev/null
@@ -1,978 +0,0 @@
-/*
- * Huawei HiNIC PCI Express Linux driver
- * Copyright(c) 2017 Huawei Technologies Co., Ltd
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms and conditions of the GNU General Public License,
- * version 2, as published by the Free Software Foundation.
- *
- * This program is distributed in the hope it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
- * for more details.
- *
- */
-
-#include <linux/kernel.h>
-#include <linux/types.h>
-#include <linux/errno.h>
-#include <linux/pci.h>
-#include <linux/device.h>
-#include <linux/slab.h>
-#include <linux/dma-mapping.h>
-#include <linux/bitops.h>
-#include <linux/err.h>
-#include <linux/jiffies.h>
-#include <linux/delay.h>
-#include <linux/log2.h>
-#include <linux/semaphore.h>
-#include <asm/byteorder.h>
-#include <asm/barrier.h>
-
-#include "hinic_hw_csr.h"
-#include "hinic_hw_if.h"
-#include "hinic_hw_api_cmd.h"
-
-#define API_CHAIN_NUM_CELLS 32
-
-#define API_CMD_CELL_SIZE_SHIFT 6
-#define API_CMD_CELL_SIZE_MIN (BIT(API_CMD_CELL_SIZE_SHIFT))
-
-#define API_CMD_CELL_SIZE(cell_size) \
- (((cell_size) >= API_CMD_CELL_SIZE_MIN) ? \
- (1 << (fls(cell_size - 1))) : API_CMD_CELL_SIZE_MIN)
-
-#define API_CMD_CELL_SIZE_VAL(size) \
- ilog2((size) >> API_CMD_CELL_SIZE_SHIFT)
-
-#define API_CMD_BUF_SIZE 2048
-
-/* Sizes of the members in hinic_api_cmd_cell */
-#define API_CMD_CELL_DESC_SIZE 8
-#define API_CMD_CELL_DATA_ADDR_SIZE 8
-
-#define API_CMD_CELL_ALIGNMENT 8
-
-#define API_CMD_TIMEOUT 1000
-
-#define MASKED_IDX(chain, idx) ((idx) & ((chain)->num_cells - 1))
-
-#define SIZE_8BYTES(size) (ALIGN((size), 8) >> 3)
-#define SIZE_4BYTES(size) (ALIGN((size), 4) >> 2)
-
-#define RD_DMA_ATTR_DEFAULT 0
-#define WR_DMA_ATTR_DEFAULT 0
-
-enum api_cmd_data_format {
- SGE_DATA = 1, /* cell data is passed by hw address */
-};
-
-enum api_cmd_type {
- API_CMD_WRITE = 0,
-};
-
-enum api_cmd_bypass {
- NO_BYPASS = 0,
- BYPASS = 1,
-};
-
-enum api_cmd_xor_chk_level {
- XOR_CHK_DIS = 0,
-
- XOR_CHK_ALL = 3,
-};
-
-static u8 xor_chksum_set(void *data)
-{
- int idx;
- u8 *val, checksum = 0;
-
- val = data;
-
- for (idx = 0; idx < 7; idx++)
- checksum ^= val[idx];
-
- return checksum;
-}
-
-static void set_prod_idx(struct hinic_api_cmd_chain *chain)
-{
- enum hinic_api_cmd_chain_type chain_type = chain->chain_type;
- struct hinic_hwif *hwif = chain->hwif;
- u32 addr, prod_idx;
-
- addr = HINIC_CSR_API_CMD_CHAIN_PI_ADDR(chain_type);
- prod_idx = hinic_hwif_read_reg(hwif, addr);
-
- prod_idx = HINIC_API_CMD_PI_CLEAR(prod_idx, IDX);
-
- prod_idx |= HINIC_API_CMD_PI_SET(chain->prod_idx, IDX);
-
- hinic_hwif_write_reg(hwif, addr, prod_idx);
-}
-
-static u32 get_hw_cons_idx(struct hinic_api_cmd_chain *chain)
-{
- u32 addr, val;
-
- addr = HINIC_CSR_API_CMD_STATUS_ADDR(chain->chain_type);
- val = hinic_hwif_read_reg(chain->hwif, addr);
-
- return HINIC_API_CMD_STATUS_GET(val, CONS_IDX);
-}
-
-/**
- * chain_busy - check if the chain is still processing last requests
- * @chain: chain to check
- *
- * Return 0 - Success, negative - Failure
- **/
-static int chain_busy(struct hinic_api_cmd_chain *chain)
-{
- struct hinic_hwif *hwif = chain->hwif;
- struct pci_dev *pdev = hwif->pdev;
- u32 prod_idx;
-
- switch (chain->chain_type) {
- case HINIC_API_CMD_WRITE_TO_MGMT_CPU:
- chain->cons_idx = get_hw_cons_idx(chain);
- prod_idx = chain->prod_idx;
-
- /* check for a space for a new command */
- if (chain->cons_idx == MASKED_IDX(chain, prod_idx + 1)) {
- dev_err(&pdev->dev, "API CMD chain %d is busy\n",
- chain->chain_type);
- return -EBUSY;
- }
- break;
-
- default:
- dev_err(&pdev->dev, "Unknown API CMD Chain type\n");
- break;
- }
-
- return 0;
-}
-
-/**
- * get_cell_data_size - get the data size of a specific cell type
- * @type: chain type
- *
- * Return the data(Desc + Address) size in the cell
- **/
-static u8 get_cell_data_size(enum hinic_api_cmd_chain_type type)
-{
- u8 cell_data_size = 0;
-
- switch (type) {
- case HINIC_API_CMD_WRITE_TO_MGMT_CPU:
- cell_data_size = ALIGN(API_CMD_CELL_DESC_SIZE +
- API_CMD_CELL_DATA_ADDR_SIZE,
- API_CMD_CELL_ALIGNMENT);
- break;
- default:
- break;
- }
-
- return cell_data_size;
-}
-
-/**
- * prepare_cell_ctrl - prepare the ctrl of the cell for the command
- * @cell_ctrl: the control of the cell to set the control value into it
- * @data_size: the size of the data in the cell
- **/
-static void prepare_cell_ctrl(u64 *cell_ctrl, u16 data_size)
-{
- u8 chksum;
- u64 ctrl;
-
- ctrl = HINIC_API_CMD_CELL_CTRL_SET(SIZE_8BYTES(data_size), DATA_SZ) |
- HINIC_API_CMD_CELL_CTRL_SET(RD_DMA_ATTR_DEFAULT, RD_DMA_ATTR) |
- HINIC_API_CMD_CELL_CTRL_SET(WR_DMA_ATTR_DEFAULT, WR_DMA_ATTR);
-
- chksum = xor_chksum_set(&ctrl);
-
- ctrl |= HINIC_API_CMD_CELL_CTRL_SET(chksum, XOR_CHKSUM);
-
- /* The data in the HW should be in Big Endian Format */
- *cell_ctrl = cpu_to_be64(ctrl);
-}
-
-/**
- * prepare_api_cmd - prepare API CMD command
- * @chain: chain for the command
- * @dest: destination node on the card that will receive the command
- * @cmd: command data
- * @cmd_size: the command size
- **/
-static void prepare_api_cmd(struct hinic_api_cmd_chain *chain,
- enum hinic_node_id dest,
- void *cmd, u16 cmd_size)
-{
- struct hinic_api_cmd_cell *cell = chain->curr_node;
- struct hinic_api_cmd_cell_ctxt *cell_ctxt;
- struct hinic_hwif *hwif = chain->hwif;
- struct pci_dev *pdev = hwif->pdev;
-
- cell_ctxt = &chain->cell_ctxt[chain->prod_idx];
-
- switch (chain->chain_type) {
- case HINIC_API_CMD_WRITE_TO_MGMT_CPU:
- cell->desc = HINIC_API_CMD_DESC_SET(SGE_DATA, API_TYPE) |
- HINIC_API_CMD_DESC_SET(API_CMD_WRITE, RD_WR) |
- HINIC_API_CMD_DESC_SET(NO_BYPASS, MGMT_BYPASS);
- break;
-
- default:
- dev_err(&pdev->dev, "unknown Chain type\n");
- return;
- }
-
- cell->desc |= HINIC_API_CMD_DESC_SET(dest, DEST) |
- HINIC_API_CMD_DESC_SET(SIZE_4BYTES(cmd_size), SIZE);
-
- cell->desc |= HINIC_API_CMD_DESC_SET(xor_chksum_set(&cell->desc),
- XOR_CHKSUM);
-
- /* The data in the HW should be in Big Endian Format */
- cell->desc = cpu_to_be64(cell->desc);
-
- memcpy(cell_ctxt->api_cmd_vaddr, cmd, cmd_size);
-}
-
-/**
- * prepare_cell - prepare cell ctrl and cmd in the current cell
- * @chain: chain for the command
- * @dest: destination node on the card that will receive the command
- * @cmd: command data
- * @cmd_size: the command size
- *
- * Return 0 - Success, negative - Failure
- **/
-static void prepare_cell(struct hinic_api_cmd_chain *chain,
- enum hinic_node_id dest,
- void *cmd, u16 cmd_size)
-{
- struct hinic_api_cmd_cell *curr_node = chain->curr_node;
- u16 data_size = get_cell_data_size(chain->chain_type);
-
- prepare_cell_ctrl(&curr_node->ctrl, data_size);
- prepare_api_cmd(chain, dest, cmd, cmd_size);
-}
-
-static inline void cmd_chain_prod_idx_inc(struct hinic_api_cmd_chain *chain)
-{
- chain->prod_idx = MASKED_IDX(chain, chain->prod_idx + 1);
-}
-
-/**
- * api_cmd_status_update - update the status in the chain struct
- * @chain: chain to update
- **/
-static void api_cmd_status_update(struct hinic_api_cmd_chain *chain)
-{
- enum hinic_api_cmd_chain_type chain_type;
- struct hinic_api_cmd_status *wb_status;
- struct hinic_hwif *hwif = chain->hwif;
- struct pci_dev *pdev = hwif->pdev;
- u64 status_header;
- u32 status;
-
- wb_status = chain->wb_status;
- status_header = be64_to_cpu(wb_status->header);
-
- status = be32_to_cpu(wb_status->status);
- if (HINIC_API_CMD_STATUS_GET(status, CHKSUM_ERR)) {
- dev_err(&pdev->dev, "API CMD status: Xor check error\n");
- return;
- }
-
- chain_type = HINIC_API_CMD_STATUS_HEADER_GET(status_header, CHAIN_ID);
- if (chain_type >= HINIC_API_CMD_MAX) {
- dev_err(&pdev->dev, "unknown API CMD Chain %d\n", chain_type);
- return;
- }
-
- chain->cons_idx = HINIC_API_CMD_STATUS_GET(status, CONS_IDX);
-}
-
-/**
- * wait_for_status_poll - wait for write to api cmd command to complete
- * @chain: the chain of the command
- *
- * Return 0 - Success, negative - Failure
- **/
-static int wait_for_status_poll(struct hinic_api_cmd_chain *chain)
-{
- int err = -ETIMEDOUT;
- unsigned long end;
-
- end = jiffies + msecs_to_jiffies(API_CMD_TIMEOUT);
- do {
- api_cmd_status_update(chain);
-
- /* wait for CI to be updated - sign for completion */
- if (chain->cons_idx == chain->prod_idx) {
- err = 0;
- break;
- }
-
- msleep(20);
- } while (time_before(jiffies, end));
-
- return err;
-}
-
-/**
- * wait_for_api_cmd_completion - wait for command to complete
- * @chain: chain for the command
- *
- * Return 0 - Success, negative - Failure
- **/
-static int wait_for_api_cmd_completion(struct hinic_api_cmd_chain *chain)
-{
- struct hinic_hwif *hwif = chain->hwif;
- struct pci_dev *pdev = hwif->pdev;
- int err;
-
- switch (chain->chain_type) {
- case HINIC_API_CMD_WRITE_TO_MGMT_CPU:
- err = wait_for_status_poll(chain);
- if (err) {
- dev_err(&pdev->dev, "API CMD Poll status timeout\n");
- break;
- }
- break;
-
- default:
- dev_err(&pdev->dev, "unknown API CMD Chain type\n");
- err = -EINVAL;
- break;
- }
-
- return err;
-}
-
-/**
- * api_cmd - API CMD command
- * @chain: chain for the command
- * @dest: destination node on the card that will receive the command
- * @cmd: command data
- * @size: the command size
- *
- * Return 0 - Success, negative - Failure
- **/
-static int api_cmd(struct hinic_api_cmd_chain *chain,
- enum hinic_node_id dest, u8 *cmd, u16 cmd_size)
-{
- struct hinic_api_cmd_cell_ctxt *ctxt;
- int err;
-
- down(&chain->sem);
- if (chain_busy(chain)) {
- up(&chain->sem);
- return -EBUSY;
- }
-
- prepare_cell(chain, dest, cmd, cmd_size);
- cmd_chain_prod_idx_inc(chain);
-
- wmb(); /* inc pi before issue the command */
-
- set_prod_idx(chain); /* issue the command */
-
- ctxt = &chain->cell_ctxt[chain->prod_idx];
-
- chain->curr_node = ctxt->cell_vaddr;
-
- err = wait_for_api_cmd_completion(chain);
-
- up(&chain->sem);
- return err;
-}
-
-/**
- * hinic_api_cmd_write - Write API CMD command
- * @chain: chain for write command
- * @dest: destination node on the card that will receive the command
- * @cmd: command data
- * @size: the command size
- *
- * Return 0 - Success, negative - Failure
- **/
-int hinic_api_cmd_write(struct hinic_api_cmd_chain *chain,
- enum hinic_node_id dest, u8 *cmd, u16 size)
-{
- /* Verify the chain type */
- if (chain->chain_type == HINIC_API_CMD_WRITE_TO_MGMT_CPU)
- return api_cmd(chain, dest, cmd, size);
-
- return -EINVAL;
-}
-
-/**
- * api_cmd_hw_restart - restart the chain in the HW
- * @chain: the API CMD specific chain to restart
- *
- * Return 0 - Success, negative - Failure
- **/
-static int api_cmd_hw_restart(struct hinic_api_cmd_chain *chain)
-{
- struct hinic_hwif *hwif = chain->hwif;
- int err = -ETIMEDOUT;
- unsigned long end;
- u32 reg_addr, val;
-
- /* Read Modify Write */
- reg_addr = HINIC_CSR_API_CMD_CHAIN_REQ_ADDR(chain->chain_type);
- val = hinic_hwif_read_reg(hwif, reg_addr);
-
- val = HINIC_API_CMD_CHAIN_REQ_CLEAR(val, RESTART);
- val |= HINIC_API_CMD_CHAIN_REQ_SET(1, RESTART);
-
- hinic_hwif_write_reg(hwif, reg_addr, val);
-
- end = jiffies + msecs_to_jiffies(API_CMD_TIMEOUT);
- do {
- val = hinic_hwif_read_reg(hwif, reg_addr);
-
- if (!HINIC_API_CMD_CHAIN_REQ_GET(val, RESTART)) {
- err = 0;
- break;
- }
-
- msleep(20);
- } while (time_before(jiffies, end));
-
- return err;
-}
-
-/**
- * api_cmd_ctrl_init - set the control register of a chain
- * @chain: the API CMD specific chain to set control register for
- **/
-static void api_cmd_ctrl_init(struct hinic_api_cmd_chain *chain)
-{
- struct hinic_hwif *hwif = chain->hwif;
- u32 addr, ctrl;
- u16 cell_size;
-
- /* Read Modify Write */
- addr = HINIC_CSR_API_CMD_CHAIN_CTRL_ADDR(chain->chain_type);
-
- cell_size = API_CMD_CELL_SIZE_VAL(chain->cell_size);
-
- ctrl = hinic_hwif_read_reg(hwif, addr);
-
- ctrl = HINIC_API_CMD_CHAIN_CTRL_CLEAR(ctrl, RESTART_WB_STAT) &
- HINIC_API_CMD_CHAIN_CTRL_CLEAR(ctrl, XOR_ERR) &
- HINIC_API_CMD_CHAIN_CTRL_CLEAR(ctrl, AEQE_EN) &
- HINIC_API_CMD_CHAIN_CTRL_CLEAR(ctrl, XOR_CHK_EN) &
- HINIC_API_CMD_CHAIN_CTRL_CLEAR(ctrl, CELL_SIZE);
-
- ctrl |= HINIC_API_CMD_CHAIN_CTRL_SET(1, XOR_ERR) |
- HINIC_API_CMD_CHAIN_CTRL_SET(XOR_CHK_ALL, XOR_CHK_EN) |
- HINIC_API_CMD_CHAIN_CTRL_SET(cell_size, CELL_SIZE);
-
- hinic_hwif_write_reg(hwif, addr, ctrl);
-}
-
-/**
- * api_cmd_set_status_addr - set the status address of a chain in the HW
- * @chain: the API CMD specific chain to set in HW status address for
- **/
-static void api_cmd_set_status_addr(struct hinic_api_cmd_chain *chain)
-{
- struct hinic_hwif *hwif = chain->hwif;
- u32 addr, val;
-
- addr = HINIC_CSR_API_CMD_STATUS_HI_ADDR(chain->chain_type);
- val = upper_32_bits(chain->wb_status_paddr);
- hinic_hwif_write_reg(hwif, addr, val);
-
- addr = HINIC_CSR_API_CMD_STATUS_LO_ADDR(chain->chain_type);
- val = lower_32_bits(chain->wb_status_paddr);
- hinic_hwif_write_reg(hwif, addr, val);
-}
-
-/**
- * api_cmd_set_num_cells - set the number cells of a chain in the HW
- * @chain: the API CMD specific chain to set in HW the number of cells for
- **/
-static void api_cmd_set_num_cells(struct hinic_api_cmd_chain *chain)
-{
- struct hinic_hwif *hwif = chain->hwif;
- u32 addr, val;
-
- addr = HINIC_CSR_API_CMD_CHAIN_NUM_CELLS_ADDR(chain->chain_type);
- val = chain->num_cells;
- hinic_hwif_write_reg(hwif, addr, val);
-}
-
-/**
- * api_cmd_head_init - set the head of a chain in the HW
- * @chain: the API CMD specific chain to set in HW the head for
- **/
-static void api_cmd_head_init(struct hinic_api_cmd_chain *chain)
-{
- struct hinic_hwif *hwif = chain->hwif;
- u32 addr, val;
-
- addr = HINIC_CSR_API_CMD_CHAIN_HEAD_HI_ADDR(chain->chain_type);
- val = upper_32_bits(chain->head_cell_paddr);
- hinic_hwif_write_reg(hwif, addr, val);
-
- addr = HINIC_CSR_API_CMD_CHAIN_HEAD_LO_ADDR(chain->chain_type);
- val = lower_32_bits(chain->head_cell_paddr);
- hinic_hwif_write_reg(hwif, addr, val);
-}
-
-/**
- * api_cmd_chain_hw_clean - clean the HW
- * @chain: the API CMD specific chain
- **/
-static void api_cmd_chain_hw_clean(struct hinic_api_cmd_chain *chain)
-{
- struct hinic_hwif *hwif = chain->hwif;
- u32 addr, ctrl;
-
- addr = HINIC_CSR_API_CMD_CHAIN_CTRL_ADDR(chain->chain_type);
-
- ctrl = hinic_hwif_read_reg(hwif, addr);
- ctrl = HINIC_API_CMD_CHAIN_CTRL_CLEAR(ctrl, RESTART_WB_STAT) &
- HINIC_API_CMD_CHAIN_CTRL_CLEAR(ctrl, XOR_ERR) &
- HINIC_API_CMD_CHAIN_CTRL_CLEAR(ctrl, AEQE_EN) &
- HINIC_API_CMD_CHAIN_CTRL_CLEAR(ctrl, XOR_CHK_EN) &
- HINIC_API_CMD_CHAIN_CTRL_CLEAR(ctrl, CELL_SIZE);
-
- hinic_hwif_write_reg(hwif, addr, ctrl);
-}
-
-/**
- * api_cmd_chain_hw_init - initialize the chain in the HW
- * @chain: the API CMD specific chain to initialize in HW
- *
- * Return 0 - Success, negative - Failure
- **/
-static int api_cmd_chain_hw_init(struct hinic_api_cmd_chain *chain)
-{
- struct hinic_hwif *hwif = chain->hwif;
- struct pci_dev *pdev = hwif->pdev;
- int err;
-
- api_cmd_chain_hw_clean(chain);
-
- api_cmd_set_status_addr(chain);
-
- err = api_cmd_hw_restart(chain);
- if (err) {
- dev_err(&pdev->dev, "Failed to restart API CMD HW\n");
- return err;
- }
-
- api_cmd_ctrl_init(chain);
- api_cmd_set_num_cells(chain);
- api_cmd_head_init(chain);
- return 0;
-}
-
-/**
- * free_cmd_buf - free the dma buffer of API CMD command
- * @chain: the API CMD specific chain of the cmd
- * @cell_idx: the cell index of the cmd
- **/
-static void free_cmd_buf(struct hinic_api_cmd_chain *chain, int cell_idx)
-{
- struct hinic_api_cmd_cell_ctxt *cell_ctxt;
- struct hinic_hwif *hwif = chain->hwif;
- struct pci_dev *pdev = hwif->pdev;
-
- cell_ctxt = &chain->cell_ctxt[cell_idx];
-
- dma_free_coherent(&pdev->dev, API_CMD_BUF_SIZE,
- cell_ctxt->api_cmd_vaddr,
- cell_ctxt->api_cmd_paddr);
-}
-
-/**
- * alloc_cmd_buf - allocate a dma buffer for API CMD command
- * @chain: the API CMD specific chain for the cmd
- * @cell: the cell in the HW for the cmd
- * @cell_idx: the index of the cell
- *
- * Return 0 - Success, negative - Failure
- **/
-static int alloc_cmd_buf(struct hinic_api_cmd_chain *chain,
- struct hinic_api_cmd_cell *cell, int cell_idx)
-{
- struct hinic_api_cmd_cell_ctxt *cell_ctxt;
- struct hinic_hwif *hwif = chain->hwif;
- struct pci_dev *pdev = hwif->pdev;
- dma_addr_t cmd_paddr;
- u8 *cmd_vaddr;
- int err = 0;
-
- cmd_vaddr = dma_zalloc_coherent(&pdev->dev, API_CMD_BUF_SIZE,
- &cmd_paddr, GFP_KERNEL);
- if (!cmd_vaddr) {
- dev_err(&pdev->dev, "Failed to allocate API CMD DMA memory\n");
- return -ENOMEM;
- }
-
- cell_ctxt = &chain->cell_ctxt[cell_idx];
-
- cell_ctxt->api_cmd_vaddr = cmd_vaddr;
- cell_ctxt->api_cmd_paddr = cmd_paddr;
-
- /* set the cmd DMA address in the cell */
- switch (chain->chain_type) {
- case HINIC_API_CMD_WRITE_TO_MGMT_CPU:
- /* The data in the HW should be in Big Endian Format */
- cell->write.hw_cmd_paddr = cpu_to_be64(cmd_paddr);
- break;
-
- default:
- dev_err(&pdev->dev, "Unsupported API CMD chain type\n");
- free_cmd_buf(chain, cell_idx);
- err = -EINVAL;
- break;
- }
-
- return err;
-}
-
-/**
- * api_cmd_create_cell - create API CMD cell for specific chain
- * @chain: the API CMD specific chain to create its cell
- * @cell_idx: the index of the cell to create
- * @pre_node: previous cell
- * @node_vaddr: the returned virt addr of the cell
- *
- * Return 0 - Success, negative - Failure
- **/
-static int api_cmd_create_cell(struct hinic_api_cmd_chain *chain,
- int cell_idx,
- struct hinic_api_cmd_cell *pre_node,
- struct hinic_api_cmd_cell **node_vaddr)
-{
- struct hinic_api_cmd_cell_ctxt *cell_ctxt;
- struct hinic_hwif *hwif = chain->hwif;
- struct pci_dev *pdev = hwif->pdev;
- struct hinic_api_cmd_cell *node;
- dma_addr_t node_paddr;
- int err;
-
- node = dma_zalloc_coherent(&pdev->dev, chain->cell_size,
- &node_paddr, GFP_KERNEL);
- if (!node) {
- dev_err(&pdev->dev, "Failed to allocate dma API CMD cell\n");
- return -ENOMEM;
- }
-
- node->read.hw_wb_resp_paddr = 0;
-
- cell_ctxt = &chain->cell_ctxt[cell_idx];
- cell_ctxt->cell_vaddr = node;
- cell_ctxt->cell_paddr = node_paddr;
-
- if (!pre_node) {
- chain->head_cell_paddr = node_paddr;
- chain->head_node = node;
- } else {
- /* The data in the HW should be in Big Endian Format */
- pre_node->next_cell_paddr = cpu_to_be64(node_paddr);
- }
-
- switch (chain->chain_type) {
- case HINIC_API_CMD_WRITE_TO_MGMT_CPU:
- err = alloc_cmd_buf(chain, node, cell_idx);
- if (err) {
- dev_err(&pdev->dev, "Failed to allocate cmd buffer\n");
- goto err_alloc_cmd_buf;
- }
- break;
-
- default:
- dev_err(&pdev->dev, "Unsupported API CMD chain type\n");
- err = -EINVAL;
- goto err_alloc_cmd_buf;
- }
-
- *node_vaddr = node;
- return 0;
-
-err_alloc_cmd_buf:
- dma_free_coherent(&pdev->dev, chain->cell_size, node, node_paddr);
- return err;
-}
-
-/**
- * api_cmd_destroy_cell - destroy API CMD cell of specific chain
- * @chain: the API CMD specific chain to destroy its cell
- * @cell_idx: the cell to destroy
- **/
-static void api_cmd_destroy_cell(struct hinic_api_cmd_chain *chain,
- int cell_idx)
-{
- struct hinic_api_cmd_cell_ctxt *cell_ctxt;
- struct hinic_hwif *hwif = chain->hwif;
- struct pci_dev *pdev = hwif->pdev;
- struct hinic_api_cmd_cell *node;
- dma_addr_t node_paddr;
- size_t node_size;
-
- cell_ctxt = &chain->cell_ctxt[cell_idx];
-
- node = cell_ctxt->cell_vaddr;
- node_paddr = cell_ctxt->cell_paddr;
- node_size = chain->cell_size;
-
- if (cell_ctxt->api_cmd_vaddr) {
- switch (chain->chain_type) {
- case HINIC_API_CMD_WRITE_TO_MGMT_CPU:
- free_cmd_buf(chain, cell_idx);
- break;
- default:
- dev_err(&pdev->dev, "Unsupported API CMD chain type\n");
- break;
- }
-
- dma_free_coherent(&pdev->dev, node_size, node,
- node_paddr);
- }
-}
-
-/**
- * api_cmd_destroy_cells - destroy API CMD cells of specific chain
- * @chain: the API CMD specific chain to destroy its cells
- * @num_cells: number of cells to destroy
- **/
-static void api_cmd_destroy_cells(struct hinic_api_cmd_chain *chain,
- int num_cells)
-{
- int cell_idx;
-
- for (cell_idx = 0; cell_idx < num_cells; cell_idx++)
- api_cmd_destroy_cell(chain, cell_idx);
-}
-
-/**
- * api_cmd_create_cells - create API CMD cells for specific chain
- * @chain: the API CMD specific chain
- *
- * Return 0 - Success, negative - Failure
- **/
-static int api_cmd_create_cells(struct hinic_api_cmd_chain *chain)
-{
- struct hinic_api_cmd_cell *node = NULL, *pre_node = NULL;
- struct hinic_hwif *hwif = chain->hwif;
- struct pci_dev *pdev = hwif->pdev;
- int err, cell_idx;
-
- for (cell_idx = 0; cell_idx < chain->num_cells; cell_idx++) {
- err = api_cmd_create_cell(chain, cell_idx, pre_node, &node);
- if (err) {
- dev_err(&pdev->dev, "Failed to create API CMD cell\n");
- goto err_create_cell;
- }
-
- pre_node = node;
- }
-
- /* set the Final node to point on the start */
- node->next_cell_paddr = cpu_to_be64(chain->head_cell_paddr);
-
- /* set the current node to be the head */
- chain->curr_node = chain->head_node;
- return 0;
-
-err_create_cell:
- api_cmd_destroy_cells(chain, cell_idx);
- return err;
-}
-
-/**
- * api_chain_init - initialize API CMD specific chain
- * @chain: the API CMD specific chain to initialize
- * @attr: attributes to set in the chain
- *
- * Return 0 - Success, negative - Failure
- **/
-static int api_chain_init(struct hinic_api_cmd_chain *chain,
- struct hinic_api_cmd_chain_attr *attr)
-{
- struct hinic_hwif *hwif = attr->hwif;
- struct pci_dev *pdev = hwif->pdev;
- size_t cell_ctxt_size;
-
- chain->hwif = hwif;
- chain->chain_type = attr->chain_type;
- chain->num_cells = attr->num_cells;
- chain->cell_size = attr->cell_size;
-
- chain->prod_idx = 0;
- chain->cons_idx = 0;
-
- sema_init(&chain->sem, 1);
-
- cell_ctxt_size = chain->num_cells * sizeof(*chain->cell_ctxt);
- chain->cell_ctxt = devm_kzalloc(&pdev->dev, cell_ctxt_size, GFP_KERNEL);
- if (!chain->cell_ctxt)
- return -ENOMEM;
-
- chain->wb_status = dma_zalloc_coherent(&pdev->dev,
- sizeof(*chain->wb_status),
- &chain->wb_status_paddr,
- GFP_KERNEL);
- if (!chain->wb_status) {
- dev_err(&pdev->dev, "Failed to allocate DMA wb status\n");
- return -ENOMEM;
- }
-
- return 0;
-}
-
-/**
- * api_chain_free - free API CMD specific chain
- * @chain: the API CMD specific chain to free
- **/
-static void api_chain_free(struct hinic_api_cmd_chain *chain)
-{
- struct hinic_hwif *hwif = chain->hwif;
- struct pci_dev *pdev = hwif->pdev;
-
- dma_free_coherent(&pdev->dev, sizeof(*chain->wb_status),
- chain->wb_status, chain->wb_status_paddr);
-}
-
-/**
- * api_cmd_create_chain - create API CMD specific chain
- * @attr: attributes to set the chain
- *
- * Return the created chain
- **/
-static struct hinic_api_cmd_chain *
- api_cmd_create_chain(struct hinic_api_cmd_chain_attr *attr)
-{
- struct hinic_hwif *hwif = attr->hwif;
- struct pci_dev *pdev = hwif->pdev;
- struct hinic_api_cmd_chain *chain;
- int err;
-
- if (attr->num_cells & (attr->num_cells - 1)) {
- dev_err(&pdev->dev, "Invalid number of cells, must be power of 2\n");
- return ERR_PTR(-EINVAL);
- }
-
- chain = devm_kzalloc(&pdev->dev, sizeof(*chain), GFP_KERNEL);
- if (!chain)
- return ERR_PTR(-ENOMEM);
-
- err = api_chain_init(chain, attr);
- if (err) {
- dev_err(&pdev->dev, "Failed to initialize chain\n");
- return ERR_PTR(err);
- }
-
- err = api_cmd_create_cells(chain);
- if (err) {
- dev_err(&pdev->dev, "Failed to create cells for API CMD chain\n");
- goto err_create_cells;
- }
-
- err = api_cmd_chain_hw_init(chain);
- if (err) {
- dev_err(&pdev->dev, "Failed to initialize chain HW\n");
- goto err_chain_hw_init;
- }
-
- return chain;
-
-err_chain_hw_init:
- api_cmd_destroy_cells(chain, chain->num_cells);
-
-err_create_cells:
- api_chain_free(chain);
- return ERR_PTR(err);
-}
-
-/**
- * api_cmd_destroy_chain - destroy API CMD specific chain
- * @chain: the API CMD specific chain to destroy
- **/
-static void api_cmd_destroy_chain(struct hinic_api_cmd_chain *chain)
-{
- api_cmd_chain_hw_clean(chain);
- api_cmd_destroy_cells(chain, chain->num_cells);
- api_chain_free(chain);
-}
-
-/**
- * hinic_api_cmd_init - Initialize all the API CMD chains
- * @chain: the API CMD chains that are initialized
- * @hwif: the hardware interface of a pci function device
- *
- * Return 0 - Success, negative - Failure
- **/
-int hinic_api_cmd_init(struct hinic_api_cmd_chain **chain,
- struct hinic_hwif *hwif)
-{
- enum hinic_api_cmd_chain_type type, chain_type;
- struct hinic_api_cmd_chain_attr attr;
- struct pci_dev *pdev = hwif->pdev;
- size_t hw_cell_sz;
- int err;
-
- hw_cell_sz = sizeof(struct hinic_api_cmd_cell);
-
- attr.hwif = hwif;
- attr.num_cells = API_CHAIN_NUM_CELLS;
- attr.cell_size = API_CMD_CELL_SIZE(hw_cell_sz);
-
- chain_type = HINIC_API_CMD_WRITE_TO_MGMT_CPU;
- for ( ; chain_type < HINIC_API_CMD_MAX; chain_type++) {
- attr.chain_type = chain_type;
-
- if (chain_type != HINIC_API_CMD_WRITE_TO_MGMT_CPU)
- continue;
-
- chain[chain_type] = api_cmd_create_chain(&attr);
- if (IS_ERR(chain[chain_type])) {
- dev_err(&pdev->dev, "Failed to create chain %d\n",
- chain_type);
- err = PTR_ERR(chain[chain_type]);
- goto err_create_chain;
- }
- }
-
- return 0;
-
-err_create_chain:
- type = HINIC_API_CMD_WRITE_TO_MGMT_CPU;
- for ( ; type < chain_type; type++) {
- if (type != HINIC_API_CMD_WRITE_TO_MGMT_CPU)
- continue;
-
- api_cmd_destroy_chain(chain[type]);
- }
-
- return err;
-}
-
-/**
- * hinic_api_cmd_free - free the API CMD chains
- * @chain: the API CMD chains that are freed
- **/
-void hinic_api_cmd_free(struct hinic_api_cmd_chain **chain)
-{
- enum hinic_api_cmd_chain_type chain_type;
-
- chain_type = HINIC_API_CMD_WRITE_TO_MGMT_CPU;
- for ( ; chain_type < HINIC_API_CMD_MAX; chain_type++) {
- if (chain_type != HINIC_API_CMD_WRITE_TO_MGMT_CPU)
- continue;
-
- api_cmd_destroy_chain(chain[chain_type]);
- }
-}
diff --git a/drivers/net/ethernet/huawei/hinic/hinic_hw_api_cmd.h b/drivers/net/ethernet/huawei/hinic/hinic_hw_api_cmd.h
deleted file mode 100644
index 31b94d5..00000000
--- a/drivers/net/ethernet/huawei/hinic/hinic_hw_api_cmd.h
+++ /dev/null
@@ -1,208 +0,0 @@
-/*
- * Huawei HiNIC PCI Express Linux driver
- * Copyright(c) 2017 Huawei Technologies Co., Ltd
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms and conditions of the GNU General Public License,
- * version 2, as published by the Free Software Foundation.
- *
- * This program is distributed in the hope it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
- * for more details.
- *
- */
-
-#ifndef HINIC_HW_API_CMD_H
-#define HINIC_HW_API_CMD_H
-
-#include <linux/types.h>
-#include <linux/semaphore.h>
-
-#include "hinic_hw_if.h"
-
-#define HINIC_API_CMD_PI_IDX_SHIFT 0
-
-#define HINIC_API_CMD_PI_IDX_MASK 0xFFFFFF
-
-#define HINIC_API_CMD_PI_SET(val, member) \
- (((u32)(val) & HINIC_API_CMD_PI_##member##_MASK) << \
- HINIC_API_CMD_PI_##member##_SHIFT)
-
-#define HINIC_API_CMD_PI_CLEAR(val, member) \
- ((val) & (~(HINIC_API_CMD_PI_##member##_MASK \
- << HINIC_API_CMD_PI_##member##_SHIFT)))
-
-#define HINIC_API_CMD_CHAIN_REQ_RESTART_SHIFT 1
-
-#define HINIC_API_CMD_CHAIN_REQ_RESTART_MASK 0x1
-
-#define HINIC_API_CMD_CHAIN_REQ_SET(val, member) \
- (((u32)(val) & HINIC_API_CMD_CHAIN_REQ_##member##_MASK) << \
- HINIC_API_CMD_CHAIN_REQ_##member##_SHIFT)
-
-#define HINIC_API_CMD_CHAIN_REQ_GET(val, member) \
- (((val) >> HINIC_API_CMD_CHAIN_REQ_##member##_SHIFT) & \
- HINIC_API_CMD_CHAIN_REQ_##member##_MASK)
-
-#define HINIC_API_CMD_CHAIN_REQ_CLEAR(val, member) \
- ((val) & (~(HINIC_API_CMD_CHAIN_REQ_##member##_MASK \
- << HINIC_API_CMD_CHAIN_REQ_##member##_SHIFT)))
-
-#define HINIC_API_CMD_CHAIN_CTRL_RESTART_WB_STAT_SHIFT 1
-#define HINIC_API_CMD_CHAIN_CTRL_XOR_ERR_SHIFT 2
-#define HINIC_API_CMD_CHAIN_CTRL_AEQE_EN_SHIFT 4
-#define HINIC_API_CMD_CHAIN_CTRL_AEQ_ID_SHIFT 8
-#define HINIC_API_CMD_CHAIN_CTRL_XOR_CHK_EN_SHIFT 28
-#define HINIC_API_CMD_CHAIN_CTRL_CELL_SIZE_SHIFT 30
-
-#define HINIC_API_CMD_CHAIN_CTRL_RESTART_WB_STAT_MASK 0x1
-#define HINIC_API_CMD_CHAIN_CTRL_XOR_ERR_MASK 0x1
-#define HINIC_API_CMD_CHAIN_CTRL_AEQE_EN_MASK 0x1
-#define HINIC_API_CMD_CHAIN_CTRL_AEQ_ID_MASK 0x3
-#define HINIC_API_CMD_CHAIN_CTRL_XOR_CHK_EN_MASK 0x3
-#define HINIC_API_CMD_CHAIN_CTRL_CELL_SIZE_MASK 0x3
-
-#define HINIC_API_CMD_CHAIN_CTRL_SET(val, member) \
- (((u32)(val) & HINIC_API_CMD_CHAIN_CTRL_##member##_MASK) << \
- HINIC_API_CMD_CHAIN_CTRL_##member##_SHIFT)
-
-#define HINIC_API_CMD_CHAIN_CTRL_CLEAR(val, member) \
- ((val) & (~(HINIC_API_CMD_CHAIN_CTRL_##member##_MASK \
- << HINIC_API_CMD_CHAIN_CTRL_##member##_SHIFT)))
-
-#define HINIC_API_CMD_CELL_CTRL_DATA_SZ_SHIFT 0
-#define HINIC_API_CMD_CELL_CTRL_RD_DMA_ATTR_SHIFT 16
-#define HINIC_API_CMD_CELL_CTRL_WR_DMA_ATTR_SHIFT 24
-#define HINIC_API_CMD_CELL_CTRL_XOR_CHKSUM_SHIFT 56
-
-#define HINIC_API_CMD_CELL_CTRL_DATA_SZ_MASK 0x3F
-#define HINIC_API_CMD_CELL_CTRL_RD_DMA_ATTR_MASK 0x3F
-#define HINIC_API_CMD_CELL_CTRL_WR_DMA_ATTR_MASK 0x3F
-#define HINIC_API_CMD_CELL_CTRL_XOR_CHKSUM_MASK 0xFF
-
-#define HINIC_API_CMD_CELL_CTRL_SET(val, member) \
- ((((u64)val) & HINIC_API_CMD_CELL_CTRL_##member##_MASK) << \
- HINIC_API_CMD_CELL_CTRL_##member##_SHIFT)
-
-#define HINIC_API_CMD_DESC_API_TYPE_SHIFT 0
-#define HINIC_API_CMD_DESC_RD_WR_SHIFT 1
-#define HINIC_API_CMD_DESC_MGMT_BYPASS_SHIFT 2
-#define HINIC_API_CMD_DESC_DEST_SHIFT 32
-#define HINIC_API_CMD_DESC_SIZE_SHIFT 40
-#define HINIC_API_CMD_DESC_XOR_CHKSUM_SHIFT 56
-
-#define HINIC_API_CMD_DESC_API_TYPE_MASK 0x1
-#define HINIC_API_CMD_DESC_RD_WR_MASK 0x1
-#define HINIC_API_CMD_DESC_MGMT_BYPASS_MASK 0x1
-#define HINIC_API_CMD_DESC_DEST_MASK 0x1F
-#define HINIC_API_CMD_DESC_SIZE_MASK 0x7FF
-#define HINIC_API_CMD_DESC_XOR_CHKSUM_MASK 0xFF
-
-#define HINIC_API_CMD_DESC_SET(val, member) \
- ((((u64)val) & HINIC_API_CMD_DESC_##member##_MASK) << \
- HINIC_API_CMD_DESC_##member##_SHIFT)
-
-#define HINIC_API_CMD_STATUS_HEADER_CHAIN_ID_SHIFT 16
-
-#define HINIC_API_CMD_STATUS_HEADER_CHAIN_ID_MASK 0xFF
-
-#define HINIC_API_CMD_STATUS_HEADER_GET(val, member) \
- (((val) >> HINIC_API_CMD_STATUS_HEADER_##member##_SHIFT) & \
- HINIC_API_CMD_STATUS_HEADER_##member##_MASK)
-
-#define HINIC_API_CMD_STATUS_CONS_IDX_SHIFT 0
-#define HINIC_API_CMD_STATUS_CHKSUM_ERR_SHIFT 28
-
-#define HINIC_API_CMD_STATUS_CONS_IDX_MASK 0xFFFFFF
-#define HINIC_API_CMD_STATUS_CHKSUM_ERR_MASK 0x3
-
-#define HINIC_API_CMD_STATUS_GET(val, member) \
- (((val) >> HINIC_API_CMD_STATUS_##member##_SHIFT) & \
- HINIC_API_CMD_STATUS_##member##_MASK)
-
-enum hinic_api_cmd_chain_type {
- HINIC_API_CMD_WRITE_TO_MGMT_CPU = 2,
-
- HINIC_API_CMD_MAX,
-};
-
-struct hinic_api_cmd_chain_attr {
- struct hinic_hwif *hwif;
- enum hinic_api_cmd_chain_type chain_type;
-
- u32 num_cells;
- u16 cell_size;
-};
-
-struct hinic_api_cmd_status {
- u64 header;
- u32 status;
- u32 rsvd0;
- u32 rsvd1;
- u32 rsvd2;
- u64 rsvd3;
-};
-
-/* HW struct */
-struct hinic_api_cmd_cell {
- u64 ctrl;
-
- /* address is 64 bit in HW struct */
- u64 next_cell_paddr;
-
- u64 desc;
-
- /* HW struct */
- union {
- struct {
- u64 hw_cmd_paddr;
- } write;
-
- struct {
- u64 hw_wb_resp_paddr;
- u64 hw_cmd_paddr;
- } read;
- };
-};
-
-struct hinic_api_cmd_cell_ctxt {
- dma_addr_t cell_paddr;
- struct hinic_api_cmd_cell *cell_vaddr;
-
- dma_addr_t api_cmd_paddr;
- u8 *api_cmd_vaddr;
-};
-
-struct hinic_api_cmd_chain {
- struct hinic_hwif *hwif;
- enum hinic_api_cmd_chain_type chain_type;
-
- u32 num_cells;
- u16 cell_size;
-
- /* HW members in 24 bit format */
- u32 prod_idx;
- u32 cons_idx;
-
- struct semaphore sem;
-
- struct hinic_api_cmd_cell_ctxt *cell_ctxt;
-
- dma_addr_t wb_status_paddr;
- struct hinic_api_cmd_status *wb_status;
-
- dma_addr_t head_cell_paddr;
- struct hinic_api_cmd_cell *head_node;
- struct hinic_api_cmd_cell *curr_node;
-};
-
-int hinic_api_cmd_write(struct hinic_api_cmd_chain *chain,
- enum hinic_node_id dest, u8 *cmd, u16 size);
-
-int hinic_api_cmd_init(struct hinic_api_cmd_chain **chain,
- struct hinic_hwif *hwif);
-
-void hinic_api_cmd_free(struct hinic_api_cmd_chain **chain);
-
-#endif
diff --git a/drivers/net/ethernet/huawei/hinic/hinic_hw_cmdq.c b/drivers/net/ethernet/huawei/hinic/hinic_hw_cmdq.c
deleted file mode 100644
index 4d09ea7..00000000
--- a/drivers/net/ethernet/huawei/hinic/hinic_hw_cmdq.c
+++ /dev/null
@@ -1,947 +0,0 @@
-/*
- * Huawei HiNIC PCI Express Linux driver
- * Copyright(c) 2017 Huawei Technologies Co., Ltd
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms and conditions of the GNU General Public License,
- * version 2, as published by the Free Software Foundation.
- *
- * This program is distributed in the hope it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
- * for more details.
- *
- */
-
-#include <linux/kernel.h>
-#include <linux/types.h>
-#include <linux/errno.h>
-#include <linux/pci.h>
-#include <linux/device.h>
-#include <linux/slab.h>
-#include <linux/vmalloc.h>
-#include <linux/spinlock.h>
-#include <linux/sizes.h>
-#include <linux/atomic.h>
-#include <linux/log2.h>
-#include <linux/io.h>
-#include <linux/completion.h>
-#include <linux/err.h>
-#include <asm/byteorder.h>
-#include <asm/barrier.h>
-
-#include "hinic_common.h"
-#include "hinic_hw_if.h"
-#include "hinic_hw_eqs.h"
-#include "hinic_hw_mgmt.h"
-#include "hinic_hw_wqe.h"
-#include "hinic_hw_wq.h"
-#include "hinic_hw_cmdq.h"
-#include "hinic_hw_io.h"
-#include "hinic_hw_dev.h"
-
-#define CMDQ_CEQE_TYPE_SHIFT 0
-
-#define CMDQ_CEQE_TYPE_MASK 0x7
-
-#define CMDQ_CEQE_GET(val, member) \
- (((val) >> CMDQ_CEQE_##member##_SHIFT) \
- & CMDQ_CEQE_##member##_MASK)
-
-#define CMDQ_WQE_ERRCODE_VAL_SHIFT 20
-
-#define CMDQ_WQE_ERRCODE_VAL_MASK 0xF
-
-#define CMDQ_WQE_ERRCODE_GET(val, member) \
- (((val) >> CMDQ_WQE_ERRCODE_##member##_SHIFT) \
- & CMDQ_WQE_ERRCODE_##member##_MASK)
-
-#define CMDQ_DB_PI_OFF(pi) (((u16)LOWER_8_BITS(pi)) << 3)
-
-#define CMDQ_DB_ADDR(db_base, pi) ((db_base) + CMDQ_DB_PI_OFF(pi))
-
-#define CMDQ_WQE_HEADER(wqe) ((struct hinic_cmdq_header *)(wqe))
-
-#define CMDQ_WQE_COMPLETED(ctrl_info) \
- HINIC_CMDQ_CTRL_GET(ctrl_info, HW_BUSY_BIT)
-
-#define FIRST_DATA_TO_WRITE_LAST sizeof(u64)
-
-#define CMDQ_DB_OFF SZ_2K
-
-#define CMDQ_WQEBB_SIZE 64
-#define CMDQ_WQE_SIZE 64
-#define CMDQ_DEPTH SZ_4K
-
-#define CMDQ_WQ_PAGE_SIZE SZ_4K
-
-#define WQE_LCMD_SIZE 64
-#define WQE_SCMD_SIZE 64
-
-#define COMPLETE_LEN 3
-
-#define CMDQ_TIMEOUT 1000
-
-#define CMDQ_PFN(addr, page_size) ((addr) >> (ilog2(page_size)))
-
-#define cmdq_to_cmdqs(cmdq) container_of((cmdq) - (cmdq)->cmdq_type, \
- struct hinic_cmdqs, cmdq[0])
-
-#define cmdqs_to_func_to_io(cmdqs) container_of(cmdqs, \
- struct hinic_func_to_io, \
- cmdqs)
-
-enum cmdq_wqe_type {
- WQE_LCMD_TYPE = 0,
- WQE_SCMD_TYPE = 1,
-};
-
-enum completion_format {
- COMPLETE_DIRECT = 0,
- COMPLETE_SGE = 1,
-};
-
-enum data_format {
- DATA_SGE = 0,
- DATA_DIRECT = 1,
-};
-
-enum bufdesc_len {
- BUFDESC_LCMD_LEN = 2, /* 16 bytes - 2(8 byte unit) */
- BUFDESC_SCMD_LEN = 3, /* 24 bytes - 3(8 byte unit) */
-};
-
-enum ctrl_sect_len {
- CTRL_SECT_LEN = 1, /* 4 bytes (ctrl) - 1(8 byte unit) */
- CTRL_DIRECT_SECT_LEN = 2, /* 12 bytes (ctrl + rsvd) - 2(8 byte unit) */
-};
-
-enum cmdq_scmd_type {
- CMDQ_SET_ARM_CMD = 2,
-};
-
-enum cmdq_cmd_type {
- CMDQ_CMD_SYNC_DIRECT_RESP = 0,
- CMDQ_CMD_SYNC_SGE_RESP = 1,
-};
-
-enum completion_request {
- NO_CEQ = 0,
- CEQ_SET = 1,
-};
-
-/**
- * hinic_alloc_cmdq_buf - alloc buffer for sending command
- * @cmdqs: the cmdqs
- * @cmdq_buf: the buffer returned in this struct
- *
- * Return 0 - Success, negative - Failure
- **/
-int hinic_alloc_cmdq_buf(struct hinic_cmdqs *cmdqs,
- struct hinic_cmdq_buf *cmdq_buf)
-{
- struct hinic_hwif *hwif = cmdqs->hwif;
- struct pci_dev *pdev = hwif->pdev;
-
- cmdq_buf->buf = dma_pool_alloc(cmdqs->cmdq_buf_pool, GFP_KERNEL,
- &cmdq_buf->dma_addr);
- if (!cmdq_buf->buf) {
- dev_err(&pdev->dev, "Failed to allocate cmd from the pool\n");
- return -ENOMEM;
- }
-
- return 0;
-}
-
-/**
- * hinic_free_cmdq_buf - free buffer
- * @cmdqs: the cmdqs
- * @cmdq_buf: the buffer to free that is in this struct
- **/
-void hinic_free_cmdq_buf(struct hinic_cmdqs *cmdqs,
- struct hinic_cmdq_buf *cmdq_buf)
-{
- dma_pool_free(cmdqs->cmdq_buf_pool, cmdq_buf->buf, cmdq_buf->dma_addr);
-}
-
-static unsigned int cmdq_wqe_size_from_bdlen(enum bufdesc_len len)
-{
- unsigned int wqe_size = 0;
-
- switch (len) {
- case BUFDESC_LCMD_LEN:
- wqe_size = WQE_LCMD_SIZE;
- break;
- case BUFDESC_SCMD_LEN:
- wqe_size = WQE_SCMD_SIZE;
- break;
- }
-
- return wqe_size;
-}
-
-static void cmdq_set_sge_completion(struct hinic_cmdq_completion *completion,
- struct hinic_cmdq_buf *buf_out)
-{
- struct hinic_sge_resp *sge_resp = &completion->sge_resp;
-
- hinic_set_sge(&sge_resp->sge, buf_out->dma_addr, buf_out->size);
-}
-
-static void cmdq_prepare_wqe_ctrl(struct hinic_cmdq_wqe *wqe, int wrapped,
- enum hinic_cmd_ack_type ack_type,
- enum hinic_mod_type mod, u8 cmd, u16 prod_idx,
- enum completion_format complete_format,
- enum data_format data_format,
- enum bufdesc_len buf_len)
-{
- struct hinic_cmdq_wqe_lcmd *wqe_lcmd;
- struct hinic_cmdq_wqe_scmd *wqe_scmd;
- enum ctrl_sect_len ctrl_len;
- struct hinic_ctrl *ctrl;
- u32 saved_data;
-
- if (data_format == DATA_SGE) {
- wqe_lcmd = &wqe->wqe_lcmd;
-
- wqe_lcmd->status.status_info = 0;
- ctrl = &wqe_lcmd->ctrl;
- ctrl_len = CTRL_SECT_LEN;
- } else {
- wqe_scmd = &wqe->direct_wqe.wqe_scmd;
-
- wqe_scmd->status.status_info = 0;
- ctrl = &wqe_scmd->ctrl;
- ctrl_len = CTRL_DIRECT_SECT_LEN;
- }
-
- ctrl->ctrl_info = HINIC_CMDQ_CTRL_SET(prod_idx, PI) |
- HINIC_CMDQ_CTRL_SET(cmd, CMD) |
- HINIC_CMDQ_CTRL_SET(mod, MOD) |
- HINIC_CMDQ_CTRL_SET(ack_type, ACK_TYPE);
-
- CMDQ_WQE_HEADER(wqe)->header_info =
- HINIC_CMDQ_WQE_HEADER_SET(buf_len, BUFDESC_LEN) |
- HINIC_CMDQ_WQE_HEADER_SET(complete_format, COMPLETE_FMT) |
- HINIC_CMDQ_WQE_HEADER_SET(data_format, DATA_FMT) |
- HINIC_CMDQ_WQE_HEADER_SET(CEQ_SET, COMPLETE_REQ) |
- HINIC_CMDQ_WQE_HEADER_SET(COMPLETE_LEN, COMPLETE_SECT_LEN) |
- HINIC_CMDQ_WQE_HEADER_SET(ctrl_len, CTRL_LEN) |
- HINIC_CMDQ_WQE_HEADER_SET(wrapped, TOGGLED_WRAPPED);
-
- saved_data = CMDQ_WQE_HEADER(wqe)->saved_data;
- saved_data = HINIC_SAVED_DATA_CLEAR(saved_data, ARM);
-
- if ((cmd == CMDQ_SET_ARM_CMD) && (mod == HINIC_MOD_COMM))
- CMDQ_WQE_HEADER(wqe)->saved_data |=
- HINIC_SAVED_DATA_SET(1, ARM);
- else
- CMDQ_WQE_HEADER(wqe)->saved_data = saved_data;
-}
-
-static void cmdq_set_lcmd_bufdesc(struct hinic_cmdq_wqe_lcmd *wqe_lcmd,
- struct hinic_cmdq_buf *buf_in)
-{
- hinic_set_sge(&wqe_lcmd->buf_desc.sge, buf_in->dma_addr, buf_in->size);
-}
-
-static void cmdq_set_direct_wqe_data(struct hinic_cmdq_direct_wqe *wqe,
- void *buf_in, u32 in_size)
-{
- struct hinic_cmdq_wqe_scmd *wqe_scmd = &wqe->wqe_scmd;
-
- wqe_scmd->buf_desc.buf_len = in_size;
- memcpy(wqe_scmd->buf_desc.data, buf_in, in_size);
-}
-
-static void cmdq_set_lcmd_wqe(struct hinic_cmdq_wqe *wqe,
- enum cmdq_cmd_type cmd_type,
- struct hinic_cmdq_buf *buf_in,
- struct hinic_cmdq_buf *buf_out, int wrapped,
- enum hinic_cmd_ack_type ack_type,
- enum hinic_mod_type mod, u8 cmd, u16 prod_idx)
-{
- struct hinic_cmdq_wqe_lcmd *wqe_lcmd = &wqe->wqe_lcmd;
- enum completion_format complete_format;
-
- switch (cmd_type) {
- case CMDQ_CMD_SYNC_SGE_RESP:
- complete_format = COMPLETE_SGE;
- cmdq_set_sge_completion(&wqe_lcmd->completion, buf_out);
- break;
- case CMDQ_CMD_SYNC_DIRECT_RESP:
- complete_format = COMPLETE_DIRECT;
- wqe_lcmd->completion.direct_resp = 0;
- break;
- }
-
- cmdq_prepare_wqe_ctrl(wqe, wrapped, ack_type, mod, cmd,
- prod_idx, complete_format, DATA_SGE,
- BUFDESC_LCMD_LEN);
-
- cmdq_set_lcmd_bufdesc(wqe_lcmd, buf_in);
-}
-
-static void cmdq_set_direct_wqe(struct hinic_cmdq_wqe *wqe,
- enum cmdq_cmd_type cmd_type,
- void *buf_in, u16 in_size,
- struct hinic_cmdq_buf *buf_out, int wrapped,
- enum hinic_cmd_ack_type ack_type,
- enum hinic_mod_type mod, u8 cmd, u16 prod_idx)
-{
- struct hinic_cmdq_direct_wqe *direct_wqe = &wqe->direct_wqe;
- enum completion_format complete_format;
- struct hinic_cmdq_wqe_scmd *wqe_scmd;
-
- wqe_scmd = &direct_wqe->wqe_scmd;
-
- switch (cmd_type) {
- case CMDQ_CMD_SYNC_SGE_RESP:
- complete_format = COMPLETE_SGE;
- cmdq_set_sge_completion(&wqe_scmd->completion, buf_out);
- break;
- case CMDQ_CMD_SYNC_DIRECT_RESP:
- complete_format = COMPLETE_DIRECT;
- wqe_scmd->completion.direct_resp = 0;
- break;
- }
-
- cmdq_prepare_wqe_ctrl(wqe, wrapped, ack_type, mod, cmd, prod_idx,
- complete_format, DATA_DIRECT, BUFDESC_SCMD_LEN);
-
- cmdq_set_direct_wqe_data(direct_wqe, buf_in, in_size);
-}
-
-static void cmdq_wqe_fill(void *dst, void *src)
-{
- memcpy(dst + FIRST_DATA_TO_WRITE_LAST, src + FIRST_DATA_TO_WRITE_LAST,
- CMDQ_WQE_SIZE - FIRST_DATA_TO_WRITE_LAST);
-
- wmb(); /* The first 8 bytes should be written last */
-
- *(u64 *)dst = *(u64 *)src;
-}
-
-static void cmdq_fill_db(u32 *db_info,
- enum hinic_cmdq_type cmdq_type, u16 prod_idx)
-{
- *db_info = HINIC_CMDQ_DB_INFO_SET(UPPER_8_BITS(prod_idx), HI_PROD_IDX) |
- HINIC_CMDQ_DB_INFO_SET(HINIC_CTRL_PATH, PATH) |
- HINIC_CMDQ_DB_INFO_SET(cmdq_type, CMDQ_TYPE) |
- HINIC_CMDQ_DB_INFO_SET(HINIC_DB_CMDQ_TYPE, DB_TYPE);
-}
-
-static void cmdq_set_db(struct hinic_cmdq *cmdq,
- enum hinic_cmdq_type cmdq_type, u16 prod_idx)
-{
- u32 db_info;
-
- cmdq_fill_db(&db_info, cmdq_type, prod_idx);
-
- /* The data that is written to HW should be in Big Endian Format */
- db_info = cpu_to_be32(db_info);
-
- wmb(); /* write all before the doorbell */
-
- writel(db_info, CMDQ_DB_ADDR(cmdq->db_base, prod_idx));
-}
-
-static int cmdq_sync_cmd_direct_resp(struct hinic_cmdq *cmdq,
- enum hinic_mod_type mod, u8 cmd,
- struct hinic_cmdq_buf *buf_in,
- u64 *resp)
-{
- struct hinic_cmdq_wqe *curr_cmdq_wqe, cmdq_wqe;
- u16 curr_prod_idx, next_prod_idx;
- int errcode, wrapped, num_wqebbs;
- struct hinic_wq *wq = cmdq->wq;
- struct hinic_hw_wqe *hw_wqe;
- struct completion done;
-
- /* Keep doorbell index correct. bh - for tasklet(ceq). */
- spin_lock_bh(&cmdq->cmdq_lock);
-
- /* WQE_SIZE = WQEBB_SIZE, we will get the wq element and not shadow*/
- hw_wqe = hinic_get_wqe(wq, WQE_LCMD_SIZE, &curr_prod_idx);
- if (IS_ERR(hw_wqe)) {
- spin_unlock_bh(&cmdq->cmdq_lock);
- return -EBUSY;
- }
-
- curr_cmdq_wqe = &hw_wqe->cmdq_wqe;
-
- wrapped = cmdq->wrapped;
-
- num_wqebbs = ALIGN(WQE_LCMD_SIZE, wq->wqebb_size) / wq->wqebb_size;
- next_prod_idx = curr_prod_idx + num_wqebbs;
- if (next_prod_idx >= wq->q_depth) {
- cmdq->wrapped = !cmdq->wrapped;
- next_prod_idx -= wq->q_depth;
- }
-
- cmdq->errcode[curr_prod_idx] = &errcode;
-
- init_completion(&done);
- cmdq->done[curr_prod_idx] = &done;
-
- cmdq_set_lcmd_wqe(&cmdq_wqe, CMDQ_CMD_SYNC_DIRECT_RESP, buf_in, NULL,
- wrapped, HINIC_CMD_ACK_TYPE_CMDQ, mod, cmd,
- curr_prod_idx);
-
- /* The data that is written to HW should be in Big Endian Format */
- hinic_cpu_to_be32(&cmdq_wqe, WQE_LCMD_SIZE);
-
- /* CMDQ WQE is not shadow, therefore wqe will be written to wq */
- cmdq_wqe_fill(curr_cmdq_wqe, &cmdq_wqe);
-
- cmdq_set_db(cmdq, HINIC_CMDQ_SYNC, next_prod_idx);
-
- spin_unlock_bh(&cmdq->cmdq_lock);
-
- if (!wait_for_completion_timeout(&done, CMDQ_TIMEOUT)) {
- spin_lock_bh(&cmdq->cmdq_lock);
-
- if (cmdq->errcode[curr_prod_idx] == &errcode)
- cmdq->errcode[curr_prod_idx] = NULL;
-
- if (cmdq->done[curr_prod_idx] == &done)
- cmdq->done[curr_prod_idx] = NULL;
-
- spin_unlock_bh(&cmdq->cmdq_lock);
-
- return -ETIMEDOUT;
- }
-
- smp_rmb(); /* read error code after completion */
-
- if (resp) {
- struct hinic_cmdq_wqe_lcmd *wqe_lcmd = &curr_cmdq_wqe->wqe_lcmd;
-
- *resp = cpu_to_be64(wqe_lcmd->completion.direct_resp);
- }
-
- if (errcode != 0)
- return -EFAULT;
-
- return 0;
-}
-
-static int cmdq_set_arm_bit(struct hinic_cmdq *cmdq, void *buf_in,
- u16 in_size)
-{
- struct hinic_cmdq_wqe *curr_cmdq_wqe, cmdq_wqe;
- u16 curr_prod_idx, next_prod_idx;
- struct hinic_wq *wq = cmdq->wq;
- struct hinic_hw_wqe *hw_wqe;
- int wrapped, num_wqebbs;
-
- /* Keep doorbell index correct */
- spin_lock(&cmdq->cmdq_lock);
-
- /* WQE_SIZE = WQEBB_SIZE, we will get the wq element and not shadow*/
- hw_wqe = hinic_get_wqe(wq, WQE_SCMD_SIZE, &curr_prod_idx);
- if (IS_ERR(hw_wqe)) {
- spin_unlock(&cmdq->cmdq_lock);
- return -EBUSY;
- }
-
- curr_cmdq_wqe = &hw_wqe->cmdq_wqe;
-
- wrapped = cmdq->wrapped;
-
- num_wqebbs = ALIGN(WQE_SCMD_SIZE, wq->wqebb_size) / wq->wqebb_size;
- next_prod_idx = curr_prod_idx + num_wqebbs;
- if (next_prod_idx >= wq->q_depth) {
- cmdq->wrapped = !cmdq->wrapped;
- next_prod_idx -= wq->q_depth;
- }
-
- cmdq_set_direct_wqe(&cmdq_wqe, CMDQ_CMD_SYNC_DIRECT_RESP, buf_in,
- in_size, NULL, wrapped, HINIC_CMD_ACK_TYPE_CMDQ,
- HINIC_MOD_COMM, CMDQ_SET_ARM_CMD, curr_prod_idx);
-
- /* The data that is written to HW should be in Big Endian Format */
- hinic_cpu_to_be32(&cmdq_wqe, WQE_SCMD_SIZE);
-
- /* cmdq wqe is not shadow, therefore wqe will be written to wq */
- cmdq_wqe_fill(curr_cmdq_wqe, &cmdq_wqe);
-
- cmdq_set_db(cmdq, HINIC_CMDQ_SYNC, next_prod_idx);
-
- spin_unlock(&cmdq->cmdq_lock);
- return 0;
-}
-
-static int cmdq_params_valid(struct hinic_cmdq_buf *buf_in)
-{
- if (buf_in->size > HINIC_CMDQ_MAX_DATA_SIZE)
- return -EINVAL;
-
- return 0;
-}
-
-/**
- * hinic_cmdq_direct_resp - send command with direct data as resp
- * @cmdqs: the cmdqs
- * @mod: module on the card that will handle the command
- * @cmd: the command
- * @buf_in: the buffer for the command
- * @resp: the response to return
- *
- * Return 0 - Success, negative - Failure
- **/
-int hinic_cmdq_direct_resp(struct hinic_cmdqs *cmdqs,
- enum hinic_mod_type mod, u8 cmd,
- struct hinic_cmdq_buf *buf_in, u64 *resp)
-{
- struct hinic_hwif *hwif = cmdqs->hwif;
- struct pci_dev *pdev = hwif->pdev;
- int err;
-
- err = cmdq_params_valid(buf_in);
- if (err) {
- dev_err(&pdev->dev, "Invalid CMDQ parameters\n");
- return err;
- }
-
- return cmdq_sync_cmd_direct_resp(&cmdqs->cmdq[HINIC_CMDQ_SYNC],
- mod, cmd, buf_in, resp);
-}
-
-/**
- * hinic_set_arm_bit - set arm bit for enable interrupt again
- * @cmdqs: the cmdqs
- * @q_type: type of queue to set the arm bit for
- * @q_id: the queue number
- *
- * Return 0 - Success, negative - Failure
- **/
-int hinic_set_arm_bit(struct hinic_cmdqs *cmdqs,
- enum hinic_set_arm_qtype q_type, u32 q_id)
-{
- struct hinic_cmdq *cmdq = &cmdqs->cmdq[HINIC_CMDQ_SYNC];
- struct hinic_hwif *hwif = cmdqs->hwif;
- struct pci_dev *pdev = hwif->pdev;
- struct hinic_cmdq_arm_bit arm_bit;
- int err;
-
- arm_bit.q_type = q_type;
- arm_bit.q_id = q_id;
-
- err = cmdq_set_arm_bit(cmdq, &arm_bit, sizeof(arm_bit));
- if (err) {
- dev_err(&pdev->dev, "Failed to set arm for qid %d\n", q_id);
- return err;
- }
-
- return 0;
-}
-
-static void clear_wqe_complete_bit(struct hinic_cmdq *cmdq,
- struct hinic_cmdq_wqe *wqe)
-{
- u32 header_info = be32_to_cpu(CMDQ_WQE_HEADER(wqe)->header_info);
- unsigned int bufdesc_len, wqe_size;
- struct hinic_ctrl *ctrl;
-
- bufdesc_len = HINIC_CMDQ_WQE_HEADER_GET(header_info, BUFDESC_LEN);
- wqe_size = cmdq_wqe_size_from_bdlen(bufdesc_len);
- if (wqe_size == WQE_LCMD_SIZE) {
- struct hinic_cmdq_wqe_lcmd *wqe_lcmd = &wqe->wqe_lcmd;
-
- ctrl = &wqe_lcmd->ctrl;
- } else {
- struct hinic_cmdq_direct_wqe *direct_wqe = &wqe->direct_wqe;
- struct hinic_cmdq_wqe_scmd *wqe_scmd;
-
- wqe_scmd = &direct_wqe->wqe_scmd;
- ctrl = &wqe_scmd->ctrl;
- }
-
- /* clear HW busy bit */
- ctrl->ctrl_info = 0;
-
- wmb(); /* verify wqe is clear */
-}
-
-/**
- * cmdq_arm_ceq_handler - cmdq completion event handler for arm command
- * @cmdq: the cmdq of the arm command
- * @wqe: the wqe of the arm command
- *
- * Return 0 - Success, negative - Failure
- **/
-static int cmdq_arm_ceq_handler(struct hinic_cmdq *cmdq,
- struct hinic_cmdq_wqe *wqe)
-{
- struct hinic_cmdq_direct_wqe *direct_wqe = &wqe->direct_wqe;
- struct hinic_cmdq_wqe_scmd *wqe_scmd;
- struct hinic_ctrl *ctrl;
- u32 ctrl_info;
-
- wqe_scmd = &direct_wqe->wqe_scmd;
- ctrl = &wqe_scmd->ctrl;
- ctrl_info = be32_to_cpu(ctrl->ctrl_info);
-
- /* HW should toggle the HW BUSY BIT */
- if (!CMDQ_WQE_COMPLETED(ctrl_info))
- return -EBUSY;
-
- clear_wqe_complete_bit(cmdq, wqe);
-
- hinic_put_wqe(cmdq->wq, WQE_SCMD_SIZE);
- return 0;
-}
-
-static void cmdq_update_errcode(struct hinic_cmdq *cmdq, u16 prod_idx,
- int errcode)
-{
- if (cmdq->errcode[prod_idx])
- *cmdq->errcode[prod_idx] = errcode;
-}
-
-/**
- * cmdq_arm_ceq_handler - cmdq completion event handler for sync command
- * @cmdq: the cmdq of the command
- * @cons_idx: the consumer index to update the error code for
- * @errcode: the error code
- **/
-static void cmdq_sync_cmd_handler(struct hinic_cmdq *cmdq, u16 cons_idx,
- int errcode)
-{
- u16 prod_idx = cons_idx;
-
- spin_lock(&cmdq->cmdq_lock);
- cmdq_update_errcode(cmdq, prod_idx, errcode);
-
- wmb(); /* write all before update for the command request */
-
- if (cmdq->done[prod_idx])
- complete(cmdq->done[prod_idx]);
- spin_unlock(&cmdq->cmdq_lock);
-}
-
-static int cmdq_cmd_ceq_handler(struct hinic_cmdq *cmdq, u16 ci,
- struct hinic_cmdq_wqe *cmdq_wqe)
-{
- struct hinic_cmdq_wqe_lcmd *wqe_lcmd = &cmdq_wqe->wqe_lcmd;
- struct hinic_status *status = &wqe_lcmd->status;
- struct hinic_ctrl *ctrl = &wqe_lcmd->ctrl;
- int errcode;
-
- if (!CMDQ_WQE_COMPLETED(be32_to_cpu(ctrl->ctrl_info)))
- return -EBUSY;
-
- errcode = CMDQ_WQE_ERRCODE_GET(be32_to_cpu(status->status_info), VAL);
-
- cmdq_sync_cmd_handler(cmdq, ci, errcode);
-
- clear_wqe_complete_bit(cmdq, cmdq_wqe);
- hinic_put_wqe(cmdq->wq, WQE_LCMD_SIZE);
- return 0;
-}
-
-/**
- * cmdq_ceq_handler - cmdq completion event handler
- * @handle: private data for the handler(cmdqs)
- * @ceqe_data: ceq element data
- **/
-static void cmdq_ceq_handler(void *handle, u32 ceqe_data)
-{
- enum hinic_cmdq_type cmdq_type = CMDQ_CEQE_GET(ceqe_data, TYPE);
- struct hinic_cmdqs *cmdqs = (struct hinic_cmdqs *)handle;
- struct hinic_cmdq *cmdq = &cmdqs->cmdq[cmdq_type];
- struct hinic_cmdq_header *header;
- struct hinic_hw_wqe *hw_wqe;
- int err, set_arm = 0;
- u32 saved_data;
- u16 ci;
-
- /* Read the smallest wqe size for getting wqe size */
- while ((hw_wqe = hinic_read_wqe(cmdq->wq, WQE_SCMD_SIZE, &ci))) {
- if (IS_ERR(hw_wqe))
- break;
-
- header = CMDQ_WQE_HEADER(&hw_wqe->cmdq_wqe);
- saved_data = be32_to_cpu(header->saved_data);
-
- if (HINIC_SAVED_DATA_GET(saved_data, ARM)) {
- /* arm_bit was set until here */
- set_arm = 0;
-
- if (cmdq_arm_ceq_handler(cmdq, &hw_wqe->cmdq_wqe))
- break;
- } else {
- set_arm = 1;
-
- hw_wqe = hinic_read_wqe(cmdq->wq, WQE_LCMD_SIZE, &ci);
- if (IS_ERR(hw_wqe))
- break;
-
- if (cmdq_cmd_ceq_handler(cmdq, ci, &hw_wqe->cmdq_wqe))
- break;
- }
- }
-
- if (set_arm) {
- struct hinic_hwif *hwif = cmdqs->hwif;
- struct pci_dev *pdev = hwif->pdev;
-
- err = hinic_set_arm_bit(cmdqs, HINIC_SET_ARM_CMDQ, cmdq_type);
- if (err)
- dev_err(&pdev->dev, "Failed to set arm for CMDQ\n");
- }
-}
-
-/**
- * cmdq_init_queue_ctxt - init the queue ctxt of a cmdq
- * @cmdq_ctxt: cmdq ctxt to initialize
- * @cmdq: the cmdq
- * @cmdq_pages: the memory of the queue
- **/
-static void cmdq_init_queue_ctxt(struct hinic_cmdq_ctxt *cmdq_ctxt,
- struct hinic_cmdq *cmdq,
- struct hinic_cmdq_pages *cmdq_pages)
-{
- struct hinic_cmdq_ctxt_info *ctxt_info = &cmdq_ctxt->ctxt_info;
- u64 wq_first_page_paddr, cmdq_first_block_paddr, pfn;
- struct hinic_cmdqs *cmdqs = cmdq_to_cmdqs(cmdq);
- struct hinic_wq *wq = cmdq->wq;
-
- /* The data in the HW is in Big Endian Format */
- wq_first_page_paddr = be64_to_cpu(*wq->block_vaddr);
-
- pfn = CMDQ_PFN(wq_first_page_paddr, wq->wq_page_size);
-
- ctxt_info->curr_wqe_page_pfn =
- HINIC_CMDQ_CTXT_PAGE_INFO_SET(pfn, CURR_WQE_PAGE_PFN) |
- HINIC_CMDQ_CTXT_PAGE_INFO_SET(HINIC_CEQ_ID_CMDQ, EQ_ID) |
- HINIC_CMDQ_CTXT_PAGE_INFO_SET(1, CEQ_ARM) |
- HINIC_CMDQ_CTXT_PAGE_INFO_SET(1, CEQ_EN) |
- HINIC_CMDQ_CTXT_PAGE_INFO_SET(cmdq->wrapped, WRAPPED);
-
- /* block PFN - Read Modify Write */
- cmdq_first_block_paddr = cmdq_pages->page_paddr;
-
- pfn = CMDQ_PFN(cmdq_first_block_paddr, wq->wq_page_size);
-
- ctxt_info->wq_block_pfn =
- HINIC_CMDQ_CTXT_BLOCK_INFO_SET(pfn, WQ_BLOCK_PFN) |
- HINIC_CMDQ_CTXT_BLOCK_INFO_SET(atomic_read(&wq->cons_idx), CI);
-
- cmdq_ctxt->func_idx = HINIC_HWIF_FUNC_IDX(cmdqs->hwif);
- cmdq_ctxt->cmdq_type = cmdq->cmdq_type;
-}
-
-/**
- * init_cmdq - initialize cmdq
- * @cmdq: the cmdq
- * @wq: the wq attaced to the cmdq
- * @q_type: the cmdq type of the cmdq
- * @db_area: doorbell area for the cmdq
- *
- * Return 0 - Success, negative - Failure
- **/
-static int init_cmdq(struct hinic_cmdq *cmdq, struct hinic_wq *wq,
- enum hinic_cmdq_type q_type, void __iomem *db_area)
-{
- int err;
-
- cmdq->wq = wq;
- cmdq->cmdq_type = q_type;
- cmdq->wrapped = 1;
-
- spin_lock_init(&cmdq->cmdq_lock);
-
- cmdq->done = vzalloc(array_size(sizeof(*cmdq->done), wq->q_depth));
- if (!cmdq->done)
- return -ENOMEM;
-
- cmdq->errcode = vzalloc(array_size(sizeof(*cmdq->errcode),
- wq->q_depth));
- if (!cmdq->errcode) {
- err = -ENOMEM;
- goto err_errcode;
- }
-
- cmdq->db_base = db_area + CMDQ_DB_OFF;
- return 0;
-
-err_errcode:
- vfree(cmdq->done);
- return err;
-}
-
-/**
- * free_cmdq - Free cmdq
- * @cmdq: the cmdq to free
- **/
-static void free_cmdq(struct hinic_cmdq *cmdq)
-{
- vfree(cmdq->errcode);
- vfree(cmdq->done);
-}
-
-/**
- * init_cmdqs_ctxt - write the cmdq ctxt to HW after init all cmdq
- * @hwdev: the NIC HW device
- * @cmdqs: cmdqs to write the ctxts for
- * &db_area: db_area for all the cmdqs
- *
- * Return 0 - Success, negative - Failure
- **/
-static int init_cmdqs_ctxt(struct hinic_hwdev *hwdev,
- struct hinic_cmdqs *cmdqs, void __iomem **db_area)
-{
- struct hinic_hwif *hwif = hwdev->hwif;
- enum hinic_cmdq_type type, cmdq_type;
- struct hinic_cmdq_ctxt *cmdq_ctxts;
- struct pci_dev *pdev = hwif->pdev;
- struct hinic_pfhwdev *pfhwdev;
- size_t cmdq_ctxts_size;
- int err;
-
- if (!HINIC_IS_PF(hwif) && !HINIC_IS_PPF(hwif)) {
- dev_err(&pdev->dev, "Unsupported PCI function type\n");
- return -EINVAL;
- }
-
- cmdq_ctxts_size = HINIC_MAX_CMDQ_TYPES * sizeof(*cmdq_ctxts);
- cmdq_ctxts = devm_kzalloc(&pdev->dev, cmdq_ctxts_size, GFP_KERNEL);
- if (!cmdq_ctxts)
- return -ENOMEM;
-
- pfhwdev = container_of(hwdev, struct hinic_pfhwdev, hwdev);
-
- cmdq_type = HINIC_CMDQ_SYNC;
- for (; cmdq_type < HINIC_MAX_CMDQ_TYPES; cmdq_type++) {
- err = init_cmdq(&cmdqs->cmdq[cmdq_type],
- &cmdqs->saved_wqs[cmdq_type], cmdq_type,
- db_area[cmdq_type]);
- if (err) {
- dev_err(&pdev->dev, "Failed to initialize cmdq\n");
- goto err_init_cmdq;
- }
-
- cmdq_init_queue_ctxt(&cmdq_ctxts[cmdq_type],
- &cmdqs->cmdq[cmdq_type],
- &cmdqs->cmdq_pages);
- }
-
- /* Write the CMDQ ctxts */
- cmdq_type = HINIC_CMDQ_SYNC;
- for (; cmdq_type < HINIC_MAX_CMDQ_TYPES; cmdq_type++) {
- err = hinic_msg_to_mgmt(&pfhwdev->pf_to_mgmt, HINIC_MOD_COMM,
- HINIC_COMM_CMD_CMDQ_CTXT_SET,
- &cmdq_ctxts[cmdq_type],
- sizeof(cmdq_ctxts[cmdq_type]),
- NULL, NULL, HINIC_MGMT_MSG_SYNC);
- if (err) {
- dev_err(&pdev->dev, "Failed to set CMDQ CTXT type = %d\n",
- cmdq_type);
- goto err_write_cmdq_ctxt;
- }
- }
-
- devm_kfree(&pdev->dev, cmdq_ctxts);
- return 0;
-
-err_write_cmdq_ctxt:
- cmdq_type = HINIC_MAX_CMDQ_TYPES;
-
-err_init_cmdq:
- for (type = HINIC_CMDQ_SYNC; type < cmdq_type; type++)
- free_cmdq(&cmdqs->cmdq[type]);
-
- devm_kfree(&pdev->dev, cmdq_ctxts);
- return err;
-}
-
-/**
- * hinic_init_cmdqs - init all cmdqs
- * @cmdqs: cmdqs to init
- * @hwif: HW interface for accessing cmdqs
- * @db_area: doorbell areas for all the cmdqs
- *
- * Return 0 - Success, negative - Failure
- **/
-int hinic_init_cmdqs(struct hinic_cmdqs *cmdqs, struct hinic_hwif *hwif,
- void __iomem **db_area)
-{
- struct hinic_func_to_io *func_to_io = cmdqs_to_func_to_io(cmdqs);
- struct pci_dev *pdev = hwif->pdev;
- struct hinic_hwdev *hwdev;
- size_t saved_wqs_size;
- u16 max_wqe_size;
- int err;
-
- cmdqs->hwif = hwif;
- cmdqs->cmdq_buf_pool = dma_pool_create("hinic_cmdq", &pdev->dev,
- HINIC_CMDQ_BUF_SIZE,
- HINIC_CMDQ_BUF_SIZE, 0);
- if (!cmdqs->cmdq_buf_pool)
- return -ENOMEM;
-
- saved_wqs_size = HINIC_MAX_CMDQ_TYPES * sizeof(struct hinic_wq);
- cmdqs->saved_wqs = devm_kzalloc(&pdev->dev, saved_wqs_size, GFP_KERNEL);
- if (!cmdqs->saved_wqs) {
- err = -ENOMEM;
- goto err_saved_wqs;
- }
-
- max_wqe_size = WQE_LCMD_SIZE;
- err = hinic_wqs_cmdq_alloc(&cmdqs->cmdq_pages, cmdqs->saved_wqs, hwif,
- HINIC_MAX_CMDQ_TYPES, CMDQ_WQEBB_SIZE,
- CMDQ_WQ_PAGE_SIZE, CMDQ_DEPTH, max_wqe_size);
- if (err) {
- dev_err(&pdev->dev, "Failed to allocate CMDQ wqs\n");
- goto err_cmdq_wqs;
- }
-
- hwdev = container_of(func_to_io, struct hinic_hwdev, func_to_io);
- err = init_cmdqs_ctxt(hwdev, cmdqs, db_area);
- if (err) {
- dev_err(&pdev->dev, "Failed to write cmdq ctxt\n");
- goto err_cmdq_ctxt;
- }
-
- hinic_ceq_register_cb(&func_to_io->ceqs, HINIC_CEQ_CMDQ, cmdqs,
- cmdq_ceq_handler);
- return 0;
-
-err_cmdq_ctxt:
- hinic_wqs_cmdq_free(&cmdqs->cmdq_pages, cmdqs->saved_wqs,
- HINIC_MAX_CMDQ_TYPES);
-
-err_cmdq_wqs:
- devm_kfree(&pdev->dev, cmdqs->saved_wqs);
-
-err_saved_wqs:
- dma_pool_destroy(cmdqs->cmdq_buf_pool);
- return err;
-}
-
-/**
- * hinic_free_cmdqs - free all cmdqs
- * @cmdqs: cmdqs to free
- **/
-void hinic_free_cmdqs(struct hinic_cmdqs *cmdqs)
-{
- struct hinic_func_to_io *func_to_io = cmdqs_to_func_to_io(cmdqs);
- struct hinic_hwif *hwif = cmdqs->hwif;
- struct pci_dev *pdev = hwif->pdev;
- enum hinic_cmdq_type cmdq_type;
-
- hinic_ceq_unregister_cb(&func_to_io->ceqs, HINIC_CEQ_CMDQ);
-
- cmdq_type = HINIC_CMDQ_SYNC;
- for (; cmdq_type < HINIC_MAX_CMDQ_TYPES; cmdq_type++)
- free_cmdq(&cmdqs->cmdq[cmdq_type]);
-
- hinic_wqs_cmdq_free(&cmdqs->cmdq_pages, cmdqs->saved_wqs,
- HINIC_MAX_CMDQ_TYPES);
-
- devm_kfree(&pdev->dev, cmdqs->saved_wqs);
-
- dma_pool_destroy(cmdqs->cmdq_buf_pool);
-}
diff --git a/drivers/net/ethernet/huawei/hinic/hinic_hw_cmdq.h b/drivers/net/ethernet/huawei/hinic/hinic_hw_cmdq.h
deleted file mode 100644
index 23f8d39..00000000
--- a/drivers/net/ethernet/huawei/hinic/hinic_hw_cmdq.h
+++ /dev/null
@@ -1,187 +0,0 @@
-/*
- * Huawei HiNIC PCI Express Linux driver
- * Copyright(c) 2017 Huawei Technologies Co., Ltd
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms and conditions of the GNU General Public License,
- * version 2, as published by the Free Software Foundation.
- *
- * This program is distributed in the hope it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
- * for more details.
- *
- */
-
-#ifndef HINIC_CMDQ_H
-#define HINIC_CMDQ_H
-
-#include <linux/types.h>
-#include <linux/spinlock.h>
-#include <linux/completion.h>
-#include <linux/pci.h>
-
-#include "hinic_hw_if.h"
-#include "hinic_hw_wq.h"
-
-#define HINIC_CMDQ_CTXT_CURR_WQE_PAGE_PFN_SHIFT 0
-#define HINIC_CMDQ_CTXT_EQ_ID_SHIFT 56
-#define HINIC_CMDQ_CTXT_CEQ_ARM_SHIFT 61
-#define HINIC_CMDQ_CTXT_CEQ_EN_SHIFT 62
-#define HINIC_CMDQ_CTXT_WRAPPED_SHIFT 63
-
-#define HINIC_CMDQ_CTXT_CURR_WQE_PAGE_PFN_MASK 0xFFFFFFFFFFFFF
-#define HINIC_CMDQ_CTXT_EQ_ID_MASK 0x1F
-#define HINIC_CMDQ_CTXT_CEQ_ARM_MASK 0x1
-#define HINIC_CMDQ_CTXT_CEQ_EN_MASK 0x1
-#define HINIC_CMDQ_CTXT_WRAPPED_MASK 0x1
-
-#define HINIC_CMDQ_CTXT_PAGE_INFO_SET(val, member) \
- (((u64)(val) & HINIC_CMDQ_CTXT_##member##_MASK) \
- << HINIC_CMDQ_CTXT_##member##_SHIFT)
-
-#define HINIC_CMDQ_CTXT_PAGE_INFO_CLEAR(val, member) \
- ((val) & (~((u64)HINIC_CMDQ_CTXT_##member##_MASK \
- << HINIC_CMDQ_CTXT_##member##_SHIFT)))
-
-#define HINIC_CMDQ_CTXT_WQ_BLOCK_PFN_SHIFT 0
-#define HINIC_CMDQ_CTXT_CI_SHIFT 52
-
-#define HINIC_CMDQ_CTXT_WQ_BLOCK_PFN_MASK 0xFFFFFFFFFFFFF
-#define HINIC_CMDQ_CTXT_CI_MASK 0xFFF
-
-#define HINIC_CMDQ_CTXT_BLOCK_INFO_SET(val, member) \
- (((u64)(val) & HINIC_CMDQ_CTXT_##member##_MASK) \
- << HINIC_CMDQ_CTXT_##member##_SHIFT)
-
-#define HINIC_CMDQ_CTXT_BLOCK_INFO_CLEAR(val, member) \
- ((val) & (~((u64)HINIC_CMDQ_CTXT_##member##_MASK \
- << HINIC_CMDQ_CTXT_##member##_SHIFT)))
-
-#define HINIC_SAVED_DATA_ARM_SHIFT 31
-
-#define HINIC_SAVED_DATA_ARM_MASK 0x1
-
-#define HINIC_SAVED_DATA_SET(val, member) \
- (((u32)(val) & HINIC_SAVED_DATA_##member##_MASK) \
- << HINIC_SAVED_DATA_##member##_SHIFT)
-
-#define HINIC_SAVED_DATA_GET(val, member) \
- (((val) >> HINIC_SAVED_DATA_##member##_SHIFT) \
- & HINIC_SAVED_DATA_##member##_MASK)
-
-#define HINIC_SAVED_DATA_CLEAR(val, member) \
- ((val) & (~(HINIC_SAVED_DATA_##member##_MASK \
- << HINIC_SAVED_DATA_##member##_SHIFT)))
-
-#define HINIC_CMDQ_DB_INFO_HI_PROD_IDX_SHIFT 0
-#define HINIC_CMDQ_DB_INFO_PATH_SHIFT 23
-#define HINIC_CMDQ_DB_INFO_CMDQ_TYPE_SHIFT 24
-#define HINIC_CMDQ_DB_INFO_DB_TYPE_SHIFT 27
-
-#define HINIC_CMDQ_DB_INFO_HI_PROD_IDX_MASK 0xFF
-#define HINIC_CMDQ_DB_INFO_PATH_MASK 0x1
-#define HINIC_CMDQ_DB_INFO_CMDQ_TYPE_MASK 0x7
-#define HINIC_CMDQ_DB_INFO_DB_TYPE_MASK 0x1F
-
-#define HINIC_CMDQ_DB_INFO_SET(val, member) \
- (((u32)(val) & HINIC_CMDQ_DB_INFO_##member##_MASK) \
- << HINIC_CMDQ_DB_INFO_##member##_SHIFT)
-
-#define HINIC_CMDQ_BUF_SIZE 2048
-
-#define HINIC_CMDQ_BUF_HW_RSVD 8
-#define HINIC_CMDQ_MAX_DATA_SIZE (HINIC_CMDQ_BUF_SIZE - \
- HINIC_CMDQ_BUF_HW_RSVD)
-
-enum hinic_cmdq_type {
- HINIC_CMDQ_SYNC,
-
- HINIC_MAX_CMDQ_TYPES,
-};
-
-enum hinic_set_arm_qtype {
- HINIC_SET_ARM_CMDQ,
-};
-
-enum hinic_cmd_ack_type {
- HINIC_CMD_ACK_TYPE_CMDQ,
-};
-
-struct hinic_cmdq_buf {
- void *buf;
- dma_addr_t dma_addr;
- size_t size;
-};
-
-struct hinic_cmdq_arm_bit {
- u32 q_type;
- u32 q_id;
-};
-
-struct hinic_cmdq_ctxt_info {
- u64 curr_wqe_page_pfn;
- u64 wq_block_pfn;
-};
-
-struct hinic_cmdq_ctxt {
- u8 status;
- u8 version;
- u8 rsvd0[6];
-
- u16 func_idx;
- u8 cmdq_type;
- u8 rsvd1[1];
-
- u8 rsvd2[4];
-
- struct hinic_cmdq_ctxt_info ctxt_info;
-};
-
-struct hinic_cmdq {
- struct hinic_wq *wq;
-
- enum hinic_cmdq_type cmdq_type;
- int wrapped;
-
- /* Lock for keeping the doorbell order */
- spinlock_t cmdq_lock;
-
- struct completion **done;
- int **errcode;
-
- /* doorbell area */
- void __iomem *db_base;
-};
-
-struct hinic_cmdqs {
- struct hinic_hwif *hwif;
-
- struct dma_pool *cmdq_buf_pool;
-
- struct hinic_wq *saved_wqs;
-
- struct hinic_cmdq_pages cmdq_pages;
-
- struct hinic_cmdq cmdq[HINIC_MAX_CMDQ_TYPES];
-};
-
-int hinic_alloc_cmdq_buf(struct hinic_cmdqs *cmdqs,
- struct hinic_cmdq_buf *cmdq_buf);
-
-void hinic_free_cmdq_buf(struct hinic_cmdqs *cmdqs,
- struct hinic_cmdq_buf *cmdq_buf);
-
-int hinic_cmdq_direct_resp(struct hinic_cmdqs *cmdqs,
- enum hinic_mod_type mod, u8 cmd,
- struct hinic_cmdq_buf *buf_in, u64 *out_param);
-
-int hinic_set_arm_bit(struct hinic_cmdqs *cmdqs,
- enum hinic_set_arm_qtype q_type, u32 q_id);
-
-int hinic_init_cmdqs(struct hinic_cmdqs *cmdqs, struct hinic_hwif *hwif,
- void __iomem **db_area);
-
-void hinic_free_cmdqs(struct hinic_cmdqs *cmdqs);
-
-#endif
diff --git a/drivers/net/ethernet/huawei/hinic/hinic_hw_csr.h b/drivers/net/ethernet/huawei/hinic/hinic_hw_csr.h
deleted file mode 100644
index f39b184..00000000
--- a/drivers/net/ethernet/huawei/hinic/hinic_hw_csr.h
+++ /dev/null
@@ -1,149 +0,0 @@
-/*
- * Huawei HiNIC PCI Express Linux driver
- * Copyright(c) 2017 Huawei Technologies Co., Ltd
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms and conditions of the GNU General Public License,
- * version 2, as published by the Free Software Foundation.
- *
- * This program is distributed in the hope it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
- * for more details.
- *
- */
-
-#ifndef HINIC_HW_CSR_H
-#define HINIC_HW_CSR_H
-
-/* HW interface registers */
-#define HINIC_CSR_FUNC_ATTR0_ADDR 0x0
-#define HINIC_CSR_FUNC_ATTR1_ADDR 0x4
-
-#define HINIC_CSR_FUNC_ATTR4_ADDR 0x10
-#define HINIC_CSR_FUNC_ATTR5_ADDR 0x14
-
-#define HINIC_DMA_ATTR_BASE 0xC80
-#define HINIC_ELECTION_BASE 0x4200
-
-#define HINIC_DMA_ATTR_STRIDE 0x4
-#define HINIC_CSR_DMA_ATTR_ADDR(idx) \
- (HINIC_DMA_ATTR_BASE + (idx) * HINIC_DMA_ATTR_STRIDE)
-
-#define HINIC_PPF_ELECTION_STRIDE 0x4
-#define HINIC_CSR_MAX_PORTS 4
-
-#define HINIC_CSR_PPF_ELECTION_ADDR(idx) \
- (HINIC_ELECTION_BASE + (idx) * HINIC_PPF_ELECTION_STRIDE)
-
-/* API CMD registers */
-#define HINIC_CSR_API_CMD_BASE 0xF000
-
-#define HINIC_CSR_API_CMD_STRIDE 0x100
-
-#define HINIC_CSR_API_CMD_CHAIN_HEAD_HI_ADDR(idx) \
- (HINIC_CSR_API_CMD_BASE + 0x0 + (idx) * HINIC_CSR_API_CMD_STRIDE)
-
-#define HINIC_CSR_API_CMD_CHAIN_HEAD_LO_ADDR(idx) \
- (HINIC_CSR_API_CMD_BASE + 0x4 + (idx) * HINIC_CSR_API_CMD_STRIDE)
-
-#define HINIC_CSR_API_CMD_STATUS_HI_ADDR(idx) \
- (HINIC_CSR_API_CMD_BASE + 0x8 + (idx) * HINIC_CSR_API_CMD_STRIDE)
-
-#define HINIC_CSR_API_CMD_STATUS_LO_ADDR(idx) \
- (HINIC_CSR_API_CMD_BASE + 0xC + (idx) * HINIC_CSR_API_CMD_STRIDE)
-
-#define HINIC_CSR_API_CMD_CHAIN_NUM_CELLS_ADDR(idx) \
- (HINIC_CSR_API_CMD_BASE + 0x10 + (idx) * HINIC_CSR_API_CMD_STRIDE)
-
-#define HINIC_CSR_API_CMD_CHAIN_CTRL_ADDR(idx) \
- (HINIC_CSR_API_CMD_BASE + 0x14 + (idx) * HINIC_CSR_API_CMD_STRIDE)
-
-#define HINIC_CSR_API_CMD_CHAIN_PI_ADDR(idx) \
- (HINIC_CSR_API_CMD_BASE + 0x1C + (idx) * HINIC_CSR_API_CMD_STRIDE)
-
-#define HINIC_CSR_API_CMD_CHAIN_REQ_ADDR(idx) \
- (HINIC_CSR_API_CMD_BASE + 0x20 + (idx) * HINIC_CSR_API_CMD_STRIDE)
-
-#define HINIC_CSR_API_CMD_STATUS_ADDR(idx) \
- (HINIC_CSR_API_CMD_BASE + 0x30 + (idx) * HINIC_CSR_API_CMD_STRIDE)
-
-/* MSI-X registers */
-#define HINIC_CSR_MSIX_CTRL_BASE 0x2000
-#define HINIC_CSR_MSIX_CNT_BASE 0x2004
-
-#define HINIC_CSR_MSIX_STRIDE 0x8
-
-#define HINIC_CSR_MSIX_CTRL_ADDR(idx) \
- (HINIC_CSR_MSIX_CTRL_BASE + (idx) * HINIC_CSR_MSIX_STRIDE)
-
-#define HINIC_CSR_MSIX_CNT_ADDR(idx) \
- (HINIC_CSR_MSIX_CNT_BASE + (idx) * HINIC_CSR_MSIX_STRIDE)
-
-/* EQ registers */
-#define HINIC_AEQ_MTT_OFF_BASE_ADDR 0x200
-#define HINIC_CEQ_MTT_OFF_BASE_ADDR 0x400
-
-#define HINIC_EQ_MTT_OFF_STRIDE 0x40
-
-#define HINIC_CSR_AEQ_MTT_OFF(id) \
- (HINIC_AEQ_MTT_OFF_BASE_ADDR + (id) * HINIC_EQ_MTT_OFF_STRIDE)
-
-#define HINIC_CSR_CEQ_MTT_OFF(id) \
- (HINIC_CEQ_MTT_OFF_BASE_ADDR + (id) * HINIC_EQ_MTT_OFF_STRIDE)
-
-#define HINIC_CSR_EQ_PAGE_OFF_STRIDE 8
-
-#define HINIC_CSR_AEQ_HI_PHYS_ADDR_REG(q_id, pg_num) \
- (HINIC_CSR_AEQ_MTT_OFF(q_id) + \
- (pg_num) * HINIC_CSR_EQ_PAGE_OFF_STRIDE)
-
-#define HINIC_CSR_CEQ_HI_PHYS_ADDR_REG(q_id, pg_num) \
- (HINIC_CSR_CEQ_MTT_OFF(q_id) + \
- (pg_num) * HINIC_CSR_EQ_PAGE_OFF_STRIDE)
-
-#define HINIC_CSR_AEQ_LO_PHYS_ADDR_REG(q_id, pg_num) \
- (HINIC_CSR_AEQ_MTT_OFF(q_id) + \
- (pg_num) * HINIC_CSR_EQ_PAGE_OFF_STRIDE + 4)
-
-#define HINIC_CSR_CEQ_LO_PHYS_ADDR_REG(q_id, pg_num) \
- (HINIC_CSR_CEQ_MTT_OFF(q_id) + \
- (pg_num) * HINIC_CSR_EQ_PAGE_OFF_STRIDE + 4)
-
-#define HINIC_AEQ_CTRL_0_ADDR_BASE 0xE00
-#define HINIC_AEQ_CTRL_1_ADDR_BASE 0xE04
-#define HINIC_AEQ_CONS_IDX_ADDR_BASE 0xE08
-#define HINIC_AEQ_PROD_IDX_ADDR_BASE 0xE0C
-
-#define HINIC_CEQ_CTRL_0_ADDR_BASE 0x1000
-#define HINIC_CEQ_CTRL_1_ADDR_BASE 0x1004
-#define HINIC_CEQ_CONS_IDX_ADDR_BASE 0x1008
-#define HINIC_CEQ_PROD_IDX_ADDR_BASE 0x100C
-
-#define HINIC_EQ_OFF_STRIDE 0x80
-
-#define HINIC_CSR_AEQ_CTRL_0_ADDR(idx) \
- (HINIC_AEQ_CTRL_0_ADDR_BASE + (idx) * HINIC_EQ_OFF_STRIDE)
-
-#define HINIC_CSR_AEQ_CTRL_1_ADDR(idx) \
- (HINIC_AEQ_CTRL_1_ADDR_BASE + (idx) * HINIC_EQ_OFF_STRIDE)
-
-#define HINIC_CSR_AEQ_CONS_IDX_ADDR(idx) \
- (HINIC_AEQ_CONS_IDX_ADDR_BASE + (idx) * HINIC_EQ_OFF_STRIDE)
-
-#define HINIC_CSR_AEQ_PROD_IDX_ADDR(idx) \
- (HINIC_AEQ_PROD_IDX_ADDR_BASE + (idx) * HINIC_EQ_OFF_STRIDE)
-
-#define HINIC_CSR_CEQ_CTRL_0_ADDR(idx) \
- (HINIC_CEQ_CTRL_0_ADDR_BASE + (idx) * HINIC_EQ_OFF_STRIDE)
-
-#define HINIC_CSR_CEQ_CTRL_1_ADDR(idx) \
- (HINIC_CEQ_CTRL_1_ADDR_BASE + (idx) * HINIC_EQ_OFF_STRIDE)
-
-#define HINIC_CSR_CEQ_CONS_IDX_ADDR(idx) \
- (HINIC_CEQ_CONS_IDX_ADDR_BASE + (idx) * HINIC_EQ_OFF_STRIDE)
-
-#define HINIC_CSR_CEQ_PROD_IDX_ADDR(idx) \
- (HINIC_CEQ_PROD_IDX_ADDR_BASE + (idx) * HINIC_EQ_OFF_STRIDE)
-
-#endif
diff --git a/drivers/net/ethernet/huawei/hinic/hinic_hw_dev.c b/drivers/net/ethernet/huawei/hinic/hinic_hw_dev.c
deleted file mode 100644
index 6b19607..00000000
--- a/drivers/net/ethernet/huawei/hinic/hinic_hw_dev.c
+++ /dev/null
@@ -1,1010 +0,0 @@
-/*
- * Huawei HiNIC PCI Express Linux driver
- * Copyright(c) 2017 Huawei Technologies Co., Ltd
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms and conditions of the GNU General Public License,
- * version 2, as published by the Free Software Foundation.
- *
- * This program is distributed in the hope it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
- * for more details.
- *
- */
-
-#include <linux/kernel.h>
-#include <linux/types.h>
-#include <linux/pci.h>
-#include <linux/device.h>
-#include <linux/errno.h>
-#include <linux/slab.h>
-#include <linux/bitops.h>
-#include <linux/delay.h>
-#include <linux/jiffies.h>
-#include <linux/log2.h>
-#include <linux/err.h>
-
-#include "hinic_hw_if.h"
-#include "hinic_hw_eqs.h"
-#include "hinic_hw_mgmt.h"
-#include "hinic_hw_qp_ctxt.h"
-#include "hinic_hw_qp.h"
-#include "hinic_hw_io.h"
-#include "hinic_hw_dev.h"
-
-#define IO_STATUS_TIMEOUT 100
-#define OUTBOUND_STATE_TIMEOUT 100
-#define DB_STATE_TIMEOUT 100
-
-#define MAX_IRQS(max_qps, num_aeqs, num_ceqs) \
- (2 * (max_qps) + (num_aeqs) + (num_ceqs))
-
-#define ADDR_IN_4BYTES(addr) ((addr) >> 2)
-
-enum intr_type {
- INTR_MSIX_TYPE,
-};
-
-enum io_status {
- IO_STOPPED = 0,
- IO_RUNNING = 1,
-};
-
-enum hw_ioctxt_set_cmdq_depth {
- HW_IOCTXT_SET_CMDQ_DEPTH_DEFAULT,
-};
-
-/* HW struct */
-struct hinic_dev_cap {
- u8 status;
- u8 version;
- u8 rsvd0[6];
-
- u8 rsvd1[5];
- u8 intr_type;
- u8 rsvd2[66];
- u16 max_sqs;
- u16 max_rqs;
- u8 rsvd3[208];
-};
-
-/**
- * get_capability - convert device capabilities to NIC capabilities
- * @hwdev: the HW device to set and convert device capabilities for
- * @dev_cap: device capabilities from FW
- *
- * Return 0 - Success, negative - Failure
- **/
-static int get_capability(struct hinic_hwdev *hwdev,
- struct hinic_dev_cap *dev_cap)
-{
- struct hinic_cap *nic_cap = &hwdev->nic_cap;
- int num_aeqs, num_ceqs, num_irqs;
-
- if (!HINIC_IS_PF(hwdev->hwif) && !HINIC_IS_PPF(hwdev->hwif))
- return -EINVAL;
-
- if (dev_cap->intr_type != INTR_MSIX_TYPE)
- return -EFAULT;
-
- num_aeqs = HINIC_HWIF_NUM_AEQS(hwdev->hwif);
- num_ceqs = HINIC_HWIF_NUM_CEQS(hwdev->hwif);
- num_irqs = HINIC_HWIF_NUM_IRQS(hwdev->hwif);
-
- /* Each QP has its own (SQ + RQ) interrupts */
- nic_cap->num_qps = (num_irqs - (num_aeqs + num_ceqs)) / 2;
-
- if (nic_cap->num_qps > HINIC_Q_CTXT_MAX)
- nic_cap->num_qps = HINIC_Q_CTXT_MAX;
-
- /* num_qps must be power of 2 */
- nic_cap->num_qps = BIT(fls(nic_cap->num_qps) - 1);
-
- nic_cap->max_qps = dev_cap->max_sqs + 1;
- if (nic_cap->max_qps != (dev_cap->max_rqs + 1))
- return -EFAULT;
-
- if (nic_cap->num_qps > nic_cap->max_qps)
- nic_cap->num_qps = nic_cap->max_qps;
-
- return 0;
-}
-
-/**
- * get_cap_from_fw - get device capabilities from FW
- * @pfhwdev: the PF HW device to get capabilities for
- *
- * Return 0 - Success, negative - Failure
- **/
-static int get_cap_from_fw(struct hinic_pfhwdev *pfhwdev)
-{
- struct hinic_hwdev *hwdev = &pfhwdev->hwdev;
- struct hinic_hwif *hwif = hwdev->hwif;
- struct pci_dev *pdev = hwif->pdev;
- struct hinic_dev_cap dev_cap;
- u16 in_len, out_len;
- int err;
-
- in_len = 0;
- out_len = sizeof(dev_cap);
-
- err = hinic_msg_to_mgmt(&pfhwdev->pf_to_mgmt, HINIC_MOD_CFGM,
- HINIC_CFG_NIC_CAP, &dev_cap, in_len, &dev_cap,
- &out_len, HINIC_MGMT_MSG_SYNC);
- if (err) {
- dev_err(&pdev->dev, "Failed to get capability from FW\n");
- return err;
- }
-
- return get_capability(hwdev, &dev_cap);
-}
-
-/**
- * get_dev_cap - get device capabilities
- * @hwdev: the NIC HW device to get capabilities for
- *
- * Return 0 - Success, negative - Failure
- **/
-static int get_dev_cap(struct hinic_hwdev *hwdev)
-{
- struct hinic_hwif *hwif = hwdev->hwif;
- struct pci_dev *pdev = hwif->pdev;
- struct hinic_pfhwdev *pfhwdev;
- int err;
-
- switch (HINIC_FUNC_TYPE(hwif)) {
- case HINIC_PPF:
- case HINIC_PF:
- pfhwdev = container_of(hwdev, struct hinic_pfhwdev, hwdev);
-
- err = get_cap_from_fw(pfhwdev);
- if (err) {
- dev_err(&pdev->dev, "Failed to get capability from FW\n");
- return err;
- }
- break;
-
- default:
- dev_err(&pdev->dev, "Unsupported PCI Function type\n");
- return -EINVAL;
- }
-
- return 0;
-}
-
-/**
- * init_msix - enable the msix and save the entries
- * @hwdev: the NIC HW device
- *
- * Return 0 - Success, negative - Failure
- **/
-static int init_msix(struct hinic_hwdev *hwdev)
-{
- struct hinic_hwif *hwif = hwdev->hwif;
- struct pci_dev *pdev = hwif->pdev;
- int nr_irqs, num_aeqs, num_ceqs;
- size_t msix_entries_size;
- int i, err;
-
- num_aeqs = HINIC_HWIF_NUM_AEQS(hwif);
- num_ceqs = HINIC_HWIF_NUM_CEQS(hwif);
- nr_irqs = MAX_IRQS(HINIC_MAX_QPS, num_aeqs, num_ceqs);
- if (nr_irqs > HINIC_HWIF_NUM_IRQS(hwif))
- nr_irqs = HINIC_HWIF_NUM_IRQS(hwif);
-
- msix_entries_size = nr_irqs * sizeof(*hwdev->msix_entries);
- hwdev->msix_entries = devm_kzalloc(&pdev->dev, msix_entries_size,
- GFP_KERNEL);
- if (!hwdev->msix_entries)
- return -ENOMEM;
-
- for (i = 0; i < nr_irqs; i++)
- hwdev->msix_entries[i].entry = i;
-
- err = pci_enable_msix_exact(pdev, hwdev->msix_entries, nr_irqs);
- if (err) {
- dev_err(&pdev->dev, "Failed to enable pci msix\n");
- return err;
- }
-
- return 0;
-}
-
-/**
- * disable_msix - disable the msix
- * @hwdev: the NIC HW device
- **/
-static void disable_msix(struct hinic_hwdev *hwdev)
-{
- struct hinic_hwif *hwif = hwdev->hwif;
- struct pci_dev *pdev = hwif->pdev;
-
- pci_disable_msix(pdev);
-}
-
-/**
- * hinic_port_msg_cmd - send port msg to mgmt
- * @hwdev: the NIC HW device
- * @cmd: the port command
- * @buf_in: input buffer
- * @in_size: input size
- * @buf_out: output buffer
- * @out_size: returned output size
- *
- * Return 0 - Success, negative - Failure
- **/
-int hinic_port_msg_cmd(struct hinic_hwdev *hwdev, enum hinic_port_cmd cmd,
- void *buf_in, u16 in_size, void *buf_out, u16 *out_size)
-{
- struct hinic_hwif *hwif = hwdev->hwif;
- struct pci_dev *pdev = hwif->pdev;
- struct hinic_pfhwdev *pfhwdev;
-
- if (!HINIC_IS_PF(hwif) && !HINIC_IS_PPF(hwif)) {
- dev_err(&pdev->dev, "unsupported PCI Function type\n");
- return -EINVAL;
- }
-
- pfhwdev = container_of(hwdev, struct hinic_pfhwdev, hwdev);
-
- return hinic_msg_to_mgmt(&pfhwdev->pf_to_mgmt, HINIC_MOD_L2NIC, cmd,
- buf_in, in_size, buf_out, out_size,
- HINIC_MGMT_MSG_SYNC);
-}
-
-/**
- * init_fw_ctxt- Init Firmware tables before network mgmt and io operations
- * @hwdev: the NIC HW device
- *
- * Return 0 - Success, negative - Failure
- **/
-static int init_fw_ctxt(struct hinic_hwdev *hwdev)
-{
- struct hinic_hwif *hwif = hwdev->hwif;
- struct pci_dev *pdev = hwif->pdev;
- struct hinic_cmd_fw_ctxt fw_ctxt;
- u16 out_size;
- int err;
-
- if (!HINIC_IS_PF(hwif) && !HINIC_IS_PPF(hwif)) {
- dev_err(&pdev->dev, "Unsupported PCI Function type\n");
- return -EINVAL;
- }
-
- fw_ctxt.func_idx = HINIC_HWIF_FUNC_IDX(hwif);
- fw_ctxt.rx_buf_sz = HINIC_RX_BUF_SZ;
-
- err = hinic_port_msg_cmd(hwdev, HINIC_PORT_CMD_FWCTXT_INIT,
- &fw_ctxt, sizeof(fw_ctxt),
- &fw_ctxt, &out_size);
- if (err || (out_size != sizeof(fw_ctxt)) || fw_ctxt.status) {
- dev_err(&pdev->dev, "Failed to init FW ctxt, ret = %d\n",
- fw_ctxt.status);
- return -EFAULT;
- }
-
- return 0;
-}
-
-/**
- * set_hw_ioctxt - set the shape of the IO queues in FW
- * @hwdev: the NIC HW device
- * @rq_depth: rq depth
- * @sq_depth: sq depth
- *
- * Return 0 - Success, negative - Failure
- **/
-static int set_hw_ioctxt(struct hinic_hwdev *hwdev, unsigned int rq_depth,
- unsigned int sq_depth)
-{
- struct hinic_hwif *hwif = hwdev->hwif;
- struct hinic_cmd_hw_ioctxt hw_ioctxt;
- struct pci_dev *pdev = hwif->pdev;
- struct hinic_pfhwdev *pfhwdev;
-
- if (!HINIC_IS_PF(hwif) && !HINIC_IS_PPF(hwif)) {
- dev_err(&pdev->dev, "Unsupported PCI Function type\n");
- return -EINVAL;
- }
-
- hw_ioctxt.func_idx = HINIC_HWIF_FUNC_IDX(hwif);
-
- hw_ioctxt.set_cmdq_depth = HW_IOCTXT_SET_CMDQ_DEPTH_DEFAULT;
- hw_ioctxt.cmdq_depth = 0;
-
- hw_ioctxt.rq_depth = ilog2(rq_depth);
-
- hw_ioctxt.rx_buf_sz_idx = HINIC_RX_BUF_SZ_IDX;
-
- hw_ioctxt.sq_depth = ilog2(sq_depth);
-
- pfhwdev = container_of(hwdev, struct hinic_pfhwdev, hwdev);
-
- return hinic_msg_to_mgmt(&pfhwdev->pf_to_mgmt, HINIC_MOD_COMM,
- HINIC_COMM_CMD_HWCTXT_SET,
- &hw_ioctxt, sizeof(hw_ioctxt), NULL,
- NULL, HINIC_MGMT_MSG_SYNC);
-}
-
-static int wait_for_outbound_state(struct hinic_hwdev *hwdev)
-{
- enum hinic_outbound_state outbound_state;
- struct hinic_hwif *hwif = hwdev->hwif;
- struct pci_dev *pdev = hwif->pdev;
- unsigned long end;
-
- end = jiffies + msecs_to_jiffies(OUTBOUND_STATE_TIMEOUT);
- do {
- outbound_state = hinic_outbound_state_get(hwif);
-
- if (outbound_state == HINIC_OUTBOUND_ENABLE)
- return 0;
-
- msleep(20);
- } while (time_before(jiffies, end));
-
- dev_err(&pdev->dev, "Wait for OUTBOUND - Timeout\n");
- return -EFAULT;
-}
-
-static int wait_for_db_state(struct hinic_hwdev *hwdev)
-{
- struct hinic_hwif *hwif = hwdev->hwif;
- struct pci_dev *pdev = hwif->pdev;
- enum hinic_db_state db_state;
- unsigned long end;
-
- end = jiffies + msecs_to_jiffies(DB_STATE_TIMEOUT);
- do {
- db_state = hinic_db_state_get(hwif);
-
- if (db_state == HINIC_DB_ENABLE)
- return 0;
-
- msleep(20);
- } while (time_before(jiffies, end));
-
- dev_err(&pdev->dev, "Wait for DB - Timeout\n");
- return -EFAULT;
-}
-
-static int wait_for_io_stopped(struct hinic_hwdev *hwdev)
-{
- struct hinic_cmd_io_status cmd_io_status;
- struct hinic_hwif *hwif = hwdev->hwif;
- struct pci_dev *pdev = hwif->pdev;
- struct hinic_pfhwdev *pfhwdev;
- unsigned long end;
- u16 out_size;
- int err;
-
- if (!HINIC_IS_PF(hwif) && !HINIC_IS_PPF(hwif)) {
- dev_err(&pdev->dev, "Unsupported PCI Function type\n");
- return -EINVAL;
- }
-
- pfhwdev = container_of(hwdev, struct hinic_pfhwdev, hwdev);
-
- cmd_io_status.func_idx = HINIC_HWIF_FUNC_IDX(hwif);
-
- end = jiffies + msecs_to_jiffies(IO_STATUS_TIMEOUT);
- do {
- err = hinic_msg_to_mgmt(&pfhwdev->pf_to_mgmt, HINIC_MOD_COMM,
- HINIC_COMM_CMD_IO_STATUS_GET,
- &cmd_io_status, sizeof(cmd_io_status),
- &cmd_io_status, &out_size,
- HINIC_MGMT_MSG_SYNC);
- if ((err) || (out_size != sizeof(cmd_io_status))) {
- dev_err(&pdev->dev, "Failed to get IO status, ret = %d\n",
- err);
- return err;
- }
-
- if (cmd_io_status.status == IO_STOPPED) {
- dev_info(&pdev->dev, "IO stopped\n");
- return 0;
- }
-
- msleep(20);
- } while (time_before(jiffies, end));
-
- dev_err(&pdev->dev, "Wait for IO stopped - Timeout\n");
- return -ETIMEDOUT;
-}
-
-/**
- * clear_io_resource - set the IO resources as not active in the NIC
- * @hwdev: the NIC HW device
- *
- * Return 0 - Success, negative - Failure
- **/
-static int clear_io_resources(struct hinic_hwdev *hwdev)
-{
- struct hinic_cmd_clear_io_res cmd_clear_io_res;
- struct hinic_hwif *hwif = hwdev->hwif;
- struct pci_dev *pdev = hwif->pdev;
- struct hinic_pfhwdev *pfhwdev;
- int err;
-
- if (!HINIC_IS_PF(hwif) && !HINIC_IS_PPF(hwif)) {
- dev_err(&pdev->dev, "Unsupported PCI Function type\n");
- return -EINVAL;
- }
-
- err = wait_for_io_stopped(hwdev);
- if (err) {
- dev_err(&pdev->dev, "IO has not stopped yet\n");
- return err;
- }
-
- cmd_clear_io_res.func_idx = HINIC_HWIF_FUNC_IDX(hwif);
-
- pfhwdev = container_of(hwdev, struct hinic_pfhwdev, hwdev);
-
- err = hinic_msg_to_mgmt(&pfhwdev->pf_to_mgmt, HINIC_MOD_COMM,
- HINIC_COMM_CMD_IO_RES_CLEAR, &cmd_clear_io_res,
- sizeof(cmd_clear_io_res), NULL, NULL,
- HINIC_MGMT_MSG_SYNC);
- if (err) {
- dev_err(&pdev->dev, "Failed to clear IO resources\n");
- return err;
- }
-
- return 0;
-}
-
-/**
- * set_resources_state - set the state of the resources in the NIC
- * @hwdev: the NIC HW device
- * @state: the state to set
- *
- * Return 0 - Success, negative - Failure
- **/
-static int set_resources_state(struct hinic_hwdev *hwdev,
- enum hinic_res_state state)
-{
- struct hinic_cmd_set_res_state res_state;
- struct hinic_hwif *hwif = hwdev->hwif;
- struct pci_dev *pdev = hwif->pdev;
- struct hinic_pfhwdev *pfhwdev;
-
- if (!HINIC_IS_PF(hwif) && !HINIC_IS_PPF(hwif)) {
- dev_err(&pdev->dev, "Unsupported PCI Function type\n");
- return -EINVAL;
- }
-
- res_state.func_idx = HINIC_HWIF_FUNC_IDX(hwif);
- res_state.state = state;
-
- pfhwdev = container_of(hwdev, struct hinic_pfhwdev, hwdev);
-
- return hinic_msg_to_mgmt(&pfhwdev->pf_to_mgmt,
- HINIC_MOD_COMM,
- HINIC_COMM_CMD_RES_STATE_SET,
- &res_state, sizeof(res_state), NULL,
- NULL, HINIC_MGMT_MSG_SYNC);
-}
-
-/**
- * get_base_qpn - get the first qp number
- * @hwdev: the NIC HW device
- * @base_qpn: returned qp number
- *
- * Return 0 - Success, negative - Failure
- **/
-static int get_base_qpn(struct hinic_hwdev *hwdev, u16 *base_qpn)
-{
- struct hinic_cmd_base_qpn cmd_base_qpn;
- struct hinic_hwif *hwif = hwdev->hwif;
- struct pci_dev *pdev = hwif->pdev;
- u16 out_size;
- int err;
-
- cmd_base_qpn.func_idx = HINIC_HWIF_FUNC_IDX(hwif);
-
- err = hinic_port_msg_cmd(hwdev, HINIC_PORT_CMD_GET_GLOBAL_QPN,
- &cmd_base_qpn, sizeof(cmd_base_qpn),
- &cmd_base_qpn, &out_size);
- if (err || (out_size != sizeof(cmd_base_qpn)) || cmd_base_qpn.status) {
- dev_err(&pdev->dev, "Failed to get base qpn, status = %d\n",
- cmd_base_qpn.status);
- return -EFAULT;
- }
-
- *base_qpn = cmd_base_qpn.qpn;
- return 0;
-}
-
-/**
- * hinic_hwdev_ifup - Preparing the HW for passing IO
- * @hwdev: the NIC HW device
- *
- * Return 0 - Success, negative - Failure
- **/
-int hinic_hwdev_ifup(struct hinic_hwdev *hwdev)
-{
- struct hinic_func_to_io *func_to_io = &hwdev->func_to_io;
- struct hinic_cap *nic_cap = &hwdev->nic_cap;
- struct hinic_hwif *hwif = hwdev->hwif;
- int err, num_aeqs, num_ceqs, num_qps;
- struct msix_entry *ceq_msix_entries;
- struct msix_entry *sq_msix_entries;
- struct msix_entry *rq_msix_entries;
- struct pci_dev *pdev = hwif->pdev;
- u16 base_qpn;
-
- err = get_base_qpn(hwdev, &base_qpn);
- if (err) {
- dev_err(&pdev->dev, "Failed to get global base qp number\n");
- return err;
- }
-
- num_aeqs = HINIC_HWIF_NUM_AEQS(hwif);
- num_ceqs = HINIC_HWIF_NUM_CEQS(hwif);
-
- ceq_msix_entries = &hwdev->msix_entries[num_aeqs];
-
- err = hinic_io_init(func_to_io, hwif, nic_cap->max_qps, num_ceqs,
- ceq_msix_entries);
- if (err) {
- dev_err(&pdev->dev, "Failed to init IO channel\n");
- return err;
- }
-
- num_qps = nic_cap->num_qps;
- sq_msix_entries = &hwdev->msix_entries[num_aeqs + num_ceqs];
- rq_msix_entries = &hwdev->msix_entries[num_aeqs + num_ceqs + num_qps];
-
- err = hinic_io_create_qps(func_to_io, base_qpn, num_qps,
- sq_msix_entries, rq_msix_entries);
- if (err) {
- dev_err(&pdev->dev, "Failed to create QPs\n");
- goto err_create_qps;
- }
-
- err = wait_for_db_state(hwdev);
- if (err) {
- dev_warn(&pdev->dev, "db - disabled, try again\n");
- hinic_db_state_set(hwif, HINIC_DB_ENABLE);
- }
-
- err = set_hw_ioctxt(hwdev, HINIC_SQ_DEPTH, HINIC_RQ_DEPTH);
- if (err) {
- dev_err(&pdev->dev, "Failed to set HW IO ctxt\n");
- goto err_hw_ioctxt;
- }
-
- return 0;
-
-err_hw_ioctxt:
- hinic_io_destroy_qps(func_to_io, num_qps);
-
-err_create_qps:
- hinic_io_free(func_to_io);
- return err;
-}
-
-/**
- * hinic_hwdev_ifdown - Closing the HW for passing IO
- * @hwdev: the NIC HW device
- *
- **/
-void hinic_hwdev_ifdown(struct hinic_hwdev *hwdev)
-{
- struct hinic_func_to_io *func_to_io = &hwdev->func_to_io;
- struct hinic_cap *nic_cap = &hwdev->nic_cap;
-
- clear_io_resources(hwdev);
-
- hinic_io_destroy_qps(func_to_io, nic_cap->num_qps);
- hinic_io_free(func_to_io);
-}
-
-/**
- * hinic_hwdev_cb_register - register callback handler for MGMT events
- * @hwdev: the NIC HW device
- * @cmd: the mgmt event
- * @handle: private data for the handler
- * @handler: event handler
- **/
-void hinic_hwdev_cb_register(struct hinic_hwdev *hwdev,
- enum hinic_mgmt_msg_cmd cmd, void *handle,
- void (*handler)(void *handle, void *buf_in,
- u16 in_size, void *buf_out,
- u16 *out_size))
-{
- struct hinic_hwif *hwif = hwdev->hwif;
- struct pci_dev *pdev = hwif->pdev;
- struct hinic_pfhwdev *pfhwdev;
- struct hinic_nic_cb *nic_cb;
- u8 cmd_cb;
-
- if (!HINIC_IS_PF(hwif) && !HINIC_IS_PPF(hwif)) {
- dev_err(&pdev->dev, "unsupported PCI Function type\n");
- return;
- }
-
- pfhwdev = container_of(hwdev, struct hinic_pfhwdev, hwdev);
-
- cmd_cb = cmd - HINIC_MGMT_MSG_CMD_BASE;
- nic_cb = &pfhwdev->nic_cb[cmd_cb];
-
- nic_cb->handler = handler;
- nic_cb->handle = handle;
- nic_cb->cb_state = HINIC_CB_ENABLED;
-}
-
-/**
- * hinic_hwdev_cb_unregister - unregister callback handler for MGMT events
- * @hwdev: the NIC HW device
- * @cmd: the mgmt event
- **/
-void hinic_hwdev_cb_unregister(struct hinic_hwdev *hwdev,
- enum hinic_mgmt_msg_cmd cmd)
-{
- struct hinic_hwif *hwif = hwdev->hwif;
- struct pci_dev *pdev = hwif->pdev;
- struct hinic_pfhwdev *pfhwdev;
- struct hinic_nic_cb *nic_cb;
- u8 cmd_cb;
-
- if (!HINIC_IS_PF(hwif) && !HINIC_IS_PPF(hwif)) {
- dev_err(&pdev->dev, "unsupported PCI Function type\n");
- return;
- }
-
- pfhwdev = container_of(hwdev, struct hinic_pfhwdev, hwdev);
-
- cmd_cb = cmd - HINIC_MGMT_MSG_CMD_BASE;
- nic_cb = &pfhwdev->nic_cb[cmd_cb];
-
- nic_cb->cb_state &= ~HINIC_CB_ENABLED;
-
- while (nic_cb->cb_state & HINIC_CB_RUNNING)
- schedule();
-
- nic_cb->handler = NULL;
-}
-
-/**
- * nic_mgmt_msg_handler - nic mgmt event handler
- * @handle: private data for the handler
- * @buf_in: input buffer
- * @in_size: input size
- * @buf_out: output buffer
- * @out_size: returned output size
- **/
-static void nic_mgmt_msg_handler(void *handle, u8 cmd, void *buf_in,
- u16 in_size, void *buf_out, u16 *out_size)
-{
- struct hinic_pfhwdev *pfhwdev = handle;
- enum hinic_cb_state cb_state;
- struct hinic_nic_cb *nic_cb;
- struct hinic_hwdev *hwdev;
- struct hinic_hwif *hwif;
- struct pci_dev *pdev;
- u8 cmd_cb;
-
- hwdev = &pfhwdev->hwdev;
- hwif = hwdev->hwif;
- pdev = hwif->pdev;
-
- if ((cmd < HINIC_MGMT_MSG_CMD_BASE) ||
- (cmd >= HINIC_MGMT_MSG_CMD_MAX)) {
- dev_err(&pdev->dev, "unknown L2NIC event, cmd = %d\n", cmd);
- return;
- }
-
- cmd_cb = cmd - HINIC_MGMT_MSG_CMD_BASE;
-
- nic_cb = &pfhwdev->nic_cb[cmd_cb];
-
- cb_state = cmpxchg(&nic_cb->cb_state,
- HINIC_CB_ENABLED,
- HINIC_CB_ENABLED | HINIC_CB_RUNNING);
-
- if ((cb_state == HINIC_CB_ENABLED) && (nic_cb->handler))
- nic_cb->handler(nic_cb->handle, buf_in,
- in_size, buf_out, out_size);
- else
- dev_err(&pdev->dev, "Unhandled NIC Event %d\n", cmd);
-
- nic_cb->cb_state &= ~HINIC_CB_RUNNING;
-}
-
-/**
- * init_pfhwdev - Initialize the extended components of PF
- * @pfhwdev: the HW device for PF
- *
- * Return 0 - success, negative - failure
- **/
-static int init_pfhwdev(struct hinic_pfhwdev *pfhwdev)
-{
- struct hinic_hwdev *hwdev = &pfhwdev->hwdev;
- struct hinic_hwif *hwif = hwdev->hwif;
- struct pci_dev *pdev = hwif->pdev;
- int err;
-
- err = hinic_pf_to_mgmt_init(&pfhwdev->pf_to_mgmt, hwif);
- if (err) {
- dev_err(&pdev->dev, "Failed to initialize PF to MGMT channel\n");
- return err;
- }
-
- hinic_register_mgmt_msg_cb(&pfhwdev->pf_to_mgmt, HINIC_MOD_L2NIC,
- pfhwdev, nic_mgmt_msg_handler);
-
- hinic_set_pf_action(hwif, HINIC_PF_MGMT_ACTIVE);
- return 0;
-}
-
-/**
- * free_pfhwdev - Free the extended components of PF
- * @pfhwdev: the HW device for PF
- **/
-static void free_pfhwdev(struct hinic_pfhwdev *pfhwdev)
-{
- struct hinic_hwdev *hwdev = &pfhwdev->hwdev;
-
- hinic_set_pf_action(hwdev->hwif, HINIC_PF_MGMT_INIT);
-
- hinic_unregister_mgmt_msg_cb(&pfhwdev->pf_to_mgmt, HINIC_MOD_L2NIC);
-
- hinic_pf_to_mgmt_free(&pfhwdev->pf_to_mgmt);
-}
-
-/**
- * hinic_init_hwdev - Initialize the NIC HW
- * @pdev: the NIC pci device
- *
- * Return initialized NIC HW device
- *
- * Initialize the NIC HW device and return a pointer to it
- **/
-struct hinic_hwdev *hinic_init_hwdev(struct pci_dev *pdev)
-{
- struct hinic_pfhwdev *pfhwdev;
- struct hinic_hwdev *hwdev;
- struct hinic_hwif *hwif;
- int err, num_aeqs;
-
- hwif = devm_kzalloc(&pdev->dev, sizeof(*hwif), GFP_KERNEL);
- if (!hwif)
- return ERR_PTR(-ENOMEM);
-
- err = hinic_init_hwif(hwif, pdev);
- if (err) {
- dev_err(&pdev->dev, "Failed to init HW interface\n");
- return ERR_PTR(err);
- }
-
- if (!HINIC_IS_PF(hwif) && !HINIC_IS_PPF(hwif)) {
- dev_err(&pdev->dev, "Unsupported PCI Function type\n");
- err = -EFAULT;
- goto err_func_type;
- }
-
- pfhwdev = devm_kzalloc(&pdev->dev, sizeof(*pfhwdev), GFP_KERNEL);
- if (!pfhwdev) {
- err = -ENOMEM;
- goto err_pfhwdev_alloc;
- }
-
- hwdev = &pfhwdev->hwdev;
- hwdev->hwif = hwif;
-
- err = init_msix(hwdev);
- if (err) {
- dev_err(&pdev->dev, "Failed to init msix\n");
- goto err_init_msix;
- }
-
- err = wait_for_outbound_state(hwdev);
- if (err) {
- dev_warn(&pdev->dev, "outbound - disabled, try again\n");
- hinic_outbound_state_set(hwif, HINIC_OUTBOUND_ENABLE);
- }
-
- num_aeqs = HINIC_HWIF_NUM_AEQS(hwif);
-
- err = hinic_aeqs_init(&hwdev->aeqs, hwif, num_aeqs,
- HINIC_DEFAULT_AEQ_LEN, HINIC_EQ_PAGE_SIZE,
- hwdev->msix_entries);
- if (err) {
- dev_err(&pdev->dev, "Failed to init async event queues\n");
- goto err_aeqs_init;
- }
-
- err = init_pfhwdev(pfhwdev);
- if (err) {
- dev_err(&pdev->dev, "Failed to init PF HW device\n");
- goto err_init_pfhwdev;
- }
-
- err = get_dev_cap(hwdev);
- if (err) {
- dev_err(&pdev->dev, "Failed to get device capabilities\n");
- goto err_dev_cap;
- }
-
- err = init_fw_ctxt(hwdev);
- if (err) {
- dev_err(&pdev->dev, "Failed to init function table\n");
- goto err_init_fw_ctxt;
- }
-
- err = set_resources_state(hwdev, HINIC_RES_ACTIVE);
- if (err) {
- dev_err(&pdev->dev, "Failed to set resources state\n");
- goto err_resources_state;
- }
-
- return hwdev;
-
-err_resources_state:
-err_init_fw_ctxt:
-err_dev_cap:
- free_pfhwdev(pfhwdev);
-
-err_init_pfhwdev:
- hinic_aeqs_free(&hwdev->aeqs);
-
-err_aeqs_init:
- disable_msix(hwdev);
-
-err_init_msix:
-err_pfhwdev_alloc:
-err_func_type:
- hinic_free_hwif(hwif);
- return ERR_PTR(err);
-}
-
-/**
- * hinic_free_hwdev - Free the NIC HW device
- * @hwdev: the NIC HW device
- **/
-void hinic_free_hwdev(struct hinic_hwdev *hwdev)
-{
- struct hinic_pfhwdev *pfhwdev = container_of(hwdev,
- struct hinic_pfhwdev,
- hwdev);
-
- set_resources_state(hwdev, HINIC_RES_CLEAN);
-
- free_pfhwdev(pfhwdev);
-
- hinic_aeqs_free(&hwdev->aeqs);
-
- disable_msix(hwdev);
-
- hinic_free_hwif(hwdev->hwif);
-}
-
-/**
- * hinic_hwdev_num_qps - return the number QPs available for use
- * @hwdev: the NIC HW device
- *
- * Return number QPs available for use
- **/
-int hinic_hwdev_num_qps(struct hinic_hwdev *hwdev)
-{
- struct hinic_cap *nic_cap = &hwdev->nic_cap;
-
- return nic_cap->num_qps;
-}
-
-/**
- * hinic_hwdev_get_sq - get SQ
- * @hwdev: the NIC HW device
- * @i: the position of the SQ
- *
- * Return: the SQ in the i position
- **/
-struct hinic_sq *hinic_hwdev_get_sq(struct hinic_hwdev *hwdev, int i)
-{
- struct hinic_func_to_io *func_to_io = &hwdev->func_to_io;
- struct hinic_qp *qp = &func_to_io->qps[i];
-
- if (i >= hinic_hwdev_num_qps(hwdev))
- return NULL;
-
- return &qp->sq;
-}
-
-/**
- * hinic_hwdev_get_sq - get RQ
- * @hwdev: the NIC HW device
- * @i: the position of the RQ
- *
- * Return: the RQ in the i position
- **/
-struct hinic_rq *hinic_hwdev_get_rq(struct hinic_hwdev *hwdev, int i)
-{
- struct hinic_func_to_io *func_to_io = &hwdev->func_to_io;
- struct hinic_qp *qp = &func_to_io->qps[i];
-
- if (i >= hinic_hwdev_num_qps(hwdev))
- return NULL;
-
- return &qp->rq;
-}
-
-/**
- * hinic_hwdev_msix_cnt_set - clear message attribute counters for msix entry
- * @hwdev: the NIC HW device
- * @msix_index: msix_index
- *
- * Return 0 - Success, negative - Failure
- **/
-int hinic_hwdev_msix_cnt_set(struct hinic_hwdev *hwdev, u16 msix_index)
-{
- return hinic_msix_attr_cnt_clear(hwdev->hwif, msix_index);
-}
-
-/**
- * hinic_hwdev_msix_set - set message attribute for msix entry
- * @hwdev: the NIC HW device
- * @msix_index: msix_index
- * @pending_limit: the maximum pending interrupt events (unit 8)
- * @coalesc_timer: coalesc period for interrupt (unit 8 us)
- * @lli_timer: replenishing period for low latency credit (unit 8 us)
- * @lli_credit_limit: maximum credits for low latency msix messages (unit 8)
- * @resend_timer: maximum wait for resending msix (unit coalesc period)
- *
- * Return 0 - Success, negative - Failure
- **/
-int hinic_hwdev_msix_set(struct hinic_hwdev *hwdev, u16 msix_index,
- u8 pending_limit, u8 coalesc_timer,
- u8 lli_timer_cfg, u8 lli_credit_limit,
- u8 resend_timer)
-{
- return hinic_msix_attr_set(hwdev->hwif, msix_index,
- pending_limit, coalesc_timer,
- lli_timer_cfg, lli_credit_limit,
- resend_timer);
-}
-
-/**
- * hinic_hwdev_hw_ci_addr_set - set cons idx addr and attributes in HW for sq
- * @hwdev: the NIC HW device
- * @sq: send queue
- * @pending_limit: the maximum pending update ci events (unit 8)
- * @coalesc_timer: coalesc period for update ci (unit 8 us)
- *
- * Return 0 - Success, negative - Failure
- **/
-int hinic_hwdev_hw_ci_addr_set(struct hinic_hwdev *hwdev, struct hinic_sq *sq,
- u8 pending_limit, u8 coalesc_timer)
-{
- struct hinic_qp *qp = container_of(sq, struct hinic_qp, sq);
- struct hinic_hwif *hwif = hwdev->hwif;
- struct pci_dev *pdev = hwif->pdev;
- struct hinic_pfhwdev *pfhwdev;
- struct hinic_cmd_hw_ci hw_ci;
-
- if (!HINIC_IS_PF(hwif) && !HINIC_IS_PPF(hwif)) {
- dev_err(&pdev->dev, "Unsupported PCI Function type\n");
- return -EINVAL;
- }
-
- hw_ci.dma_attr_off = 0;
- hw_ci.pending_limit = pending_limit;
- hw_ci.coalesc_timer = coalesc_timer;
-
- hw_ci.msix_en = 1;
- hw_ci.msix_entry_idx = sq->msix_entry;
-
- hw_ci.func_idx = HINIC_HWIF_FUNC_IDX(hwif);
-
- hw_ci.sq_id = qp->q_id;
-
- hw_ci.ci_addr = ADDR_IN_4BYTES(sq->hw_ci_dma_addr);
-
- pfhwdev = container_of(hwdev, struct hinic_pfhwdev, hwdev);
- return hinic_msg_to_mgmt(&pfhwdev->pf_to_mgmt,
- HINIC_MOD_COMM,
- HINIC_COMM_CMD_SQ_HI_CI_SET,
- &hw_ci, sizeof(hw_ci), NULL,
- NULL, HINIC_MGMT_MSG_SYNC);
-}
diff --git a/drivers/net/ethernet/huawei/hinic/hinic_hw_dev.h b/drivers/net/ethernet/huawei/hinic/hinic_hw_dev.h
deleted file mode 100644
index 0f5563f..00000000
--- a/drivers/net/ethernet/huawei/hinic/hinic_hw_dev.h
+++ /dev/null
@@ -1,239 +0,0 @@
-/*
- * Huawei HiNIC PCI Express Linux driver
- * Copyright(c) 2017 Huawei Technologies Co., Ltd
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms and conditions of the GNU General Public License,
- * version 2, as published by the Free Software Foundation.
- *
- * This program is distributed in the hope it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
- * for more details.
- *
- */
-
-#ifndef HINIC_HW_DEV_H
-#define HINIC_HW_DEV_H
-
-#include <linux/pci.h>
-#include <linux/types.h>
-#include <linux/bitops.h>
-
-#include "hinic_hw_if.h"
-#include "hinic_hw_eqs.h"
-#include "hinic_hw_mgmt.h"
-#include "hinic_hw_qp.h"
-#include "hinic_hw_io.h"
-
-#define HINIC_MAX_QPS 32
-
-#define HINIC_MGMT_NUM_MSG_CMD (HINIC_MGMT_MSG_CMD_MAX - \
- HINIC_MGMT_MSG_CMD_BASE)
-
-struct hinic_cap {
- u16 max_qps;
- u16 num_qps;
-};
-
-enum hinic_port_cmd {
- HINIC_PORT_CMD_CHANGE_MTU = 2,
-
- HINIC_PORT_CMD_ADD_VLAN = 3,
- HINIC_PORT_CMD_DEL_VLAN = 4,
-
- HINIC_PORT_CMD_SET_MAC = 9,
- HINIC_PORT_CMD_GET_MAC = 10,
- HINIC_PORT_CMD_DEL_MAC = 11,
-
- HINIC_PORT_CMD_SET_RX_MODE = 12,
-
- HINIC_PORT_CMD_GET_LINK_STATE = 24,
-
- HINIC_PORT_CMD_SET_PORT_STATE = 41,
-
- HINIC_PORT_CMD_FWCTXT_INIT = 69,
-
- HINIC_PORT_CMD_SET_FUNC_STATE = 93,
-
- HINIC_PORT_CMD_GET_GLOBAL_QPN = 102,
-
- HINIC_PORT_CMD_GET_CAP = 170,
-};
-
-enum hinic_mgmt_msg_cmd {
- HINIC_MGMT_MSG_CMD_BASE = 160,
-
- HINIC_MGMT_MSG_CMD_LINK_STATUS = 160,
-
- HINIC_MGMT_MSG_CMD_MAX,
-};
-
-enum hinic_cb_state {
- HINIC_CB_ENABLED = BIT(0),
- HINIC_CB_RUNNING = BIT(1),
-};
-
-enum hinic_res_state {
- HINIC_RES_CLEAN = 0,
- HINIC_RES_ACTIVE = 1,
-};
-
-struct hinic_cmd_fw_ctxt {
- u8 status;
- u8 version;
- u8 rsvd0[6];
-
- u16 func_idx;
- u16 rx_buf_sz;
-
- u32 rsvd1;
-};
-
-struct hinic_cmd_hw_ioctxt {
- u8 status;
- u8 version;
- u8 rsvd0[6];
-
- u16 func_idx;
-
- u16 rsvd1;
-
- u8 set_cmdq_depth;
- u8 cmdq_depth;
-
- u8 rsvd2;
- u8 rsvd3;
- u8 rsvd4;
- u8 rsvd5;
-
- u16 rq_depth;
- u16 rx_buf_sz_idx;
- u16 sq_depth;
-};
-
-struct hinic_cmd_io_status {
- u8 status;
- u8 version;
- u8 rsvd0[6];
-
- u16 func_idx;
- u8 rsvd1;
- u8 rsvd2;
- u32 io_status;
-};
-
-struct hinic_cmd_clear_io_res {
- u8 status;
- u8 version;
- u8 rsvd0[6];
-
- u16 func_idx;
- u8 rsvd1;
- u8 rsvd2;
-};
-
-struct hinic_cmd_set_res_state {
- u8 status;
- u8 version;
- u8 rsvd0[6];
-
- u16 func_idx;
- u8 state;
- u8 rsvd1;
- u32 rsvd2;
-};
-
-struct hinic_cmd_base_qpn {
- u8 status;
- u8 version;
- u8 rsvd0[6];
-
- u16 func_idx;
- u16 qpn;
-};
-
-struct hinic_cmd_hw_ci {
- u8 status;
- u8 version;
- u8 rsvd0[6];
-
- u16 func_idx;
-
- u8 dma_attr_off;
- u8 pending_limit;
- u8 coalesc_timer;
-
- u8 msix_en;
- u16 msix_entry_idx;
-
- u32 sq_id;
- u32 rsvd1;
- u64 ci_addr;
-};
-
-struct hinic_hwdev {
- struct hinic_hwif *hwif;
- struct msix_entry *msix_entries;
-
- struct hinic_aeqs aeqs;
- struct hinic_func_to_io func_to_io;
-
- struct hinic_cap nic_cap;
-};
-
-struct hinic_nic_cb {
- void (*handler)(void *handle, void *buf_in,
- u16 in_size, void *buf_out,
- u16 *out_size);
-
- void *handle;
- unsigned long cb_state;
-};
-
-struct hinic_pfhwdev {
- struct hinic_hwdev hwdev;
-
- struct hinic_pf_to_mgmt pf_to_mgmt;
-
- struct hinic_nic_cb nic_cb[HINIC_MGMT_NUM_MSG_CMD];
-};
-
-void hinic_hwdev_cb_register(struct hinic_hwdev *hwdev,
- enum hinic_mgmt_msg_cmd cmd, void *handle,
- void (*handler)(void *handle, void *buf_in,
- u16 in_size, void *buf_out,
- u16 *out_size));
-
-void hinic_hwdev_cb_unregister(struct hinic_hwdev *hwdev,
- enum hinic_mgmt_msg_cmd cmd);
-
-int hinic_port_msg_cmd(struct hinic_hwdev *hwdev, enum hinic_port_cmd cmd,
- void *buf_in, u16 in_size, void *buf_out,
- u16 *out_size);
-
-int hinic_hwdev_ifup(struct hinic_hwdev *hwdev);
-
-void hinic_hwdev_ifdown(struct hinic_hwdev *hwdev);
-
-struct hinic_hwdev *hinic_init_hwdev(struct pci_dev *pdev);
-
-void hinic_free_hwdev(struct hinic_hwdev *hwdev);
-
-int hinic_hwdev_num_qps(struct hinic_hwdev *hwdev);
-
-struct hinic_sq *hinic_hwdev_get_sq(struct hinic_hwdev *hwdev, int i);
-
-struct hinic_rq *hinic_hwdev_get_rq(struct hinic_hwdev *hwdev, int i);
-
-int hinic_hwdev_msix_cnt_set(struct hinic_hwdev *hwdev, u16 msix_index);
-
-int hinic_hwdev_msix_set(struct hinic_hwdev *hwdev, u16 msix_index,
- u8 pending_limit, u8 coalesc_timer,
- u8 lli_timer_cfg, u8 lli_credit_limit,
- u8 resend_timer);
-
-int hinic_hwdev_hw_ci_addr_set(struct hinic_hwdev *hwdev, struct hinic_sq *sq,
- u8 pending_limit, u8 coalesc_timer);
-
-#endif
diff --git a/drivers/net/ethernet/huawei/hinic/hinic_hw_eqs.c b/drivers/net/ethernet/huawei/hinic/hinic_hw_eqs.c
deleted file mode 100644
index 7cb8b9b9..00000000
--- a/drivers/net/ethernet/huawei/hinic/hinic_hw_eqs.c
+++ /dev/null
@@ -1,886 +0,0 @@
-/*
- * Huawei HiNIC PCI Express Linux driver
- * Copyright(c) 2017 Huawei Technologies Co., Ltd
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms and conditions of the GNU General Public License,
- * version 2, as published by the Free Software Foundation.
- *
- * This program is distributed in the hope it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
- * for more details.
- *
- */
-
-#include <linux/kernel.h>
-#include <linux/types.h>
-#include <linux/errno.h>
-#include <linux/pci.h>
-#include <linux/device.h>
-#include <linux/workqueue.h>
-#include <linux/interrupt.h>
-#include <linux/slab.h>
-#include <linux/dma-mapping.h>
-#include <linux/log2.h>
-#include <asm/byteorder.h>
-#include <asm/barrier.h>
-
-#include "hinic_hw_csr.h"
-#include "hinic_hw_if.h"
-#include "hinic_hw_eqs.h"
-
-#define HINIC_EQS_WQ_NAME "hinic_eqs"
-
-#define GET_EQ_NUM_PAGES(eq, pg_size) \
- (ALIGN((eq)->q_len * (eq)->elem_size, pg_size) / (pg_size))
-
-#define GET_EQ_NUM_ELEMS_IN_PG(eq, pg_size) ((pg_size) / (eq)->elem_size)
-
-#define EQ_CONS_IDX_REG_ADDR(eq) (((eq)->type == HINIC_AEQ) ? \
- HINIC_CSR_AEQ_CONS_IDX_ADDR((eq)->q_id) : \
- HINIC_CSR_CEQ_CONS_IDX_ADDR((eq)->q_id))
-
-#define EQ_PROD_IDX_REG_ADDR(eq) (((eq)->type == HINIC_AEQ) ? \
- HINIC_CSR_AEQ_PROD_IDX_ADDR((eq)->q_id) : \
- HINIC_CSR_CEQ_PROD_IDX_ADDR((eq)->q_id))
-
-#define EQ_HI_PHYS_ADDR_REG(eq, pg_num) (((eq)->type == HINIC_AEQ) ? \
- HINIC_CSR_AEQ_HI_PHYS_ADDR_REG((eq)->q_id, pg_num) : \
- HINIC_CSR_CEQ_HI_PHYS_ADDR_REG((eq)->q_id, pg_num))
-
-#define EQ_LO_PHYS_ADDR_REG(eq, pg_num) (((eq)->type == HINIC_AEQ) ? \
- HINIC_CSR_AEQ_LO_PHYS_ADDR_REG((eq)->q_id, pg_num) : \
- HINIC_CSR_CEQ_LO_PHYS_ADDR_REG((eq)->q_id, pg_num))
-
-#define GET_EQ_ELEMENT(eq, idx) \
- ((eq)->virt_addr[(idx) / (eq)->num_elem_in_pg] + \
- (((idx) & ((eq)->num_elem_in_pg - 1)) * (eq)->elem_size))
-
-#define GET_AEQ_ELEM(eq, idx) ((struct hinic_aeq_elem *) \
- GET_EQ_ELEMENT(eq, idx))
-
-#define GET_CEQ_ELEM(eq, idx) ((u32 *) \
- GET_EQ_ELEMENT(eq, idx))
-
-#define GET_CURR_AEQ_ELEM(eq) GET_AEQ_ELEM(eq, (eq)->cons_idx)
-
-#define GET_CURR_CEQ_ELEM(eq) GET_CEQ_ELEM(eq, (eq)->cons_idx)
-
-#define PAGE_IN_4K(page_size) ((page_size) >> 12)
-#define EQ_SET_HW_PAGE_SIZE_VAL(eq) (ilog2(PAGE_IN_4K((eq)->page_size)))
-
-#define ELEMENT_SIZE_IN_32B(eq) (((eq)->elem_size) >> 5)
-#define EQ_SET_HW_ELEM_SIZE_VAL(eq) (ilog2(ELEMENT_SIZE_IN_32B(eq)))
-
-#define EQ_MAX_PAGES 8
-
-#define CEQE_TYPE_SHIFT 23
-#define CEQE_TYPE_MASK 0x7
-
-#define CEQE_TYPE(ceqe) (((ceqe) >> CEQE_TYPE_SHIFT) & \
- CEQE_TYPE_MASK)
-
-#define CEQE_DATA_MASK 0x3FFFFFF
-#define CEQE_DATA(ceqe) ((ceqe) & CEQE_DATA_MASK)
-
-#define aeq_to_aeqs(eq) \
- container_of((eq) - (eq)->q_id, struct hinic_aeqs, aeq[0])
-
-#define ceq_to_ceqs(eq) \
- container_of((eq) - (eq)->q_id, struct hinic_ceqs, ceq[0])
-
-#define work_to_aeq_work(work) \
- container_of(work, struct hinic_eq_work, work)
-
-#define DMA_ATTR_AEQ_DEFAULT 0
-#define DMA_ATTR_CEQ_DEFAULT 0
-
-/* No coalescence */
-#define THRESH_CEQ_DEFAULT 0
-
-enum eq_int_mode {
- EQ_INT_MODE_ARMED,
- EQ_INT_MODE_ALWAYS
-};
-
-enum eq_arm_state {
- EQ_NOT_ARMED,
- EQ_ARMED
-};
-
-/**
- * hinic_aeq_register_hw_cb - register AEQ callback for specific event
- * @aeqs: pointer to Async eqs of the chip
- * @event: aeq event to register callback for it
- * @handle: private data will be used by the callback
- * @hw_handler: callback function
- **/
-void hinic_aeq_register_hw_cb(struct hinic_aeqs *aeqs,
- enum hinic_aeq_type event, void *handle,
- void (*hwe_handler)(void *handle, void *data,
- u8 size))
-{
- struct hinic_hw_event_cb *hwe_cb = &aeqs->hwe_cb[event];
-
- hwe_cb->hwe_handler = hwe_handler;
- hwe_cb->handle = handle;
- hwe_cb->hwe_state = HINIC_EQE_ENABLED;
-}
-
-/**
- * hinic_aeq_unregister_hw_cb - unregister the AEQ callback for specific event
- * @aeqs: pointer to Async eqs of the chip
- * @event: aeq event to unregister callback for it
- **/
-void hinic_aeq_unregister_hw_cb(struct hinic_aeqs *aeqs,
- enum hinic_aeq_type event)
-{
- struct hinic_hw_event_cb *hwe_cb = &aeqs->hwe_cb[event];
-
- hwe_cb->hwe_state &= ~HINIC_EQE_ENABLED;
-
- while (hwe_cb->hwe_state & HINIC_EQE_RUNNING)
- schedule();
-
- hwe_cb->hwe_handler = NULL;
-}
-
-/**
- * hinic_ceq_register_cb - register CEQ callback for specific event
- * @ceqs: pointer to Completion eqs part of the chip
- * @event: ceq event to register callback for it
- * @handle: private data will be used by the callback
- * @handler: callback function
- **/
-void hinic_ceq_register_cb(struct hinic_ceqs *ceqs,
- enum hinic_ceq_type event, void *handle,
- void (*handler)(void *handle, u32 ceqe_data))
-{
- struct hinic_ceq_cb *ceq_cb = &ceqs->ceq_cb[event];
-
- ceq_cb->handler = handler;
- ceq_cb->handle = handle;
- ceq_cb->ceqe_state = HINIC_EQE_ENABLED;
-}
-
-/**
- * hinic_ceq_unregister_cb - unregister the CEQ callback for specific event
- * @ceqs: pointer to Completion eqs part of the chip
- * @event: ceq event to unregister callback for it
- **/
-void hinic_ceq_unregister_cb(struct hinic_ceqs *ceqs,
- enum hinic_ceq_type event)
-{
- struct hinic_ceq_cb *ceq_cb = &ceqs->ceq_cb[event];
-
- ceq_cb->ceqe_state &= ~HINIC_EQE_ENABLED;
-
- while (ceq_cb->ceqe_state & HINIC_EQE_RUNNING)
- schedule();
-
- ceq_cb->handler = NULL;
-}
-
-static u8 eq_cons_idx_checksum_set(u32 val)
-{
- u8 checksum = 0;
- int idx;
-
- for (idx = 0; idx < 32; idx += 4)
- checksum ^= ((val >> idx) & 0xF);
-
- return (checksum & 0xF);
-}
-
-/**
- * eq_update_ci - update the HW cons idx of event queue
- * @eq: the event queue to update the cons idx for
- **/
-static void eq_update_ci(struct hinic_eq *eq)
-{
- u32 val, addr = EQ_CONS_IDX_REG_ADDR(eq);
-
- /* Read Modify Write */
- val = hinic_hwif_read_reg(eq->hwif, addr);
-
- val = HINIC_EQ_CI_CLEAR(val, IDX) &
- HINIC_EQ_CI_CLEAR(val, WRAPPED) &
- HINIC_EQ_CI_CLEAR(val, INT_ARMED) &
- HINIC_EQ_CI_CLEAR(val, XOR_CHKSUM);
-
- val |= HINIC_EQ_CI_SET(eq->cons_idx, IDX) |
- HINIC_EQ_CI_SET(eq->wrapped, WRAPPED) |
- HINIC_EQ_CI_SET(EQ_ARMED, INT_ARMED);
-
- val |= HINIC_EQ_CI_SET(eq_cons_idx_checksum_set(val), XOR_CHKSUM);
-
- hinic_hwif_write_reg(eq->hwif, addr, val);
-}
-
-/**
- * aeq_irq_handler - handler for the AEQ event
- * @eq: the Async Event Queue that received the event
- **/
-static void aeq_irq_handler(struct hinic_eq *eq)
-{
- struct hinic_aeqs *aeqs = aeq_to_aeqs(eq);
- struct hinic_hwif *hwif = aeqs->hwif;
- struct pci_dev *pdev = hwif->pdev;
- struct hinic_aeq_elem *aeqe_curr;
- struct hinic_hw_event_cb *hwe_cb;
- enum hinic_aeq_type event;
- unsigned long eqe_state;
- u32 aeqe_desc;
- int i, size;
-
- for (i = 0; i < eq->q_len; i++) {
- aeqe_curr = GET_CURR_AEQ_ELEM(eq);
-
- /* Data in HW is in Big endian Format */
- aeqe_desc = be32_to_cpu(aeqe_curr->desc);
-
- /* HW toggles the wrapped bit, when it adds eq element */
- if (HINIC_EQ_ELEM_DESC_GET(aeqe_desc, WRAPPED) == eq->wrapped)
- break;
-
- event = HINIC_EQ_ELEM_DESC_GET(aeqe_desc, TYPE);
- if (event >= HINIC_MAX_AEQ_EVENTS) {
- dev_err(&pdev->dev, "Unknown AEQ Event %d\n", event);
- return;
- }
-
- if (!HINIC_EQ_ELEM_DESC_GET(aeqe_desc, SRC)) {
- hwe_cb = &aeqs->hwe_cb[event];
-
- size = HINIC_EQ_ELEM_DESC_GET(aeqe_desc, SIZE);
-
- eqe_state = cmpxchg(&hwe_cb->hwe_state,
- HINIC_EQE_ENABLED,
- HINIC_EQE_ENABLED |
- HINIC_EQE_RUNNING);
- if ((eqe_state == HINIC_EQE_ENABLED) &&
- (hwe_cb->hwe_handler))
- hwe_cb->hwe_handler(hwe_cb->handle,
- aeqe_curr->data, size);
- else
- dev_err(&pdev->dev, "Unhandled AEQ Event %d\n",
- event);
-
- hwe_cb->hwe_state &= ~HINIC_EQE_RUNNING;
- }
-
- eq->cons_idx++;
-
- if (eq->cons_idx == eq->q_len) {
- eq->cons_idx = 0;
- eq->wrapped = !eq->wrapped;
- }
- }
-}
-
-/**
- * ceq_event_handler - handler for the ceq events
- * @ceqs: ceqs part of the chip
- * @ceqe: ceq element that describes the event
- **/
-static void ceq_event_handler(struct hinic_ceqs *ceqs, u32 ceqe)
-{
- struct hinic_hwif *hwif = ceqs->hwif;
- struct pci_dev *pdev = hwif->pdev;
- struct hinic_ceq_cb *ceq_cb;
- enum hinic_ceq_type event;
- unsigned long eqe_state;
-
- event = CEQE_TYPE(ceqe);
- if (event >= HINIC_MAX_CEQ_EVENTS) {
- dev_err(&pdev->dev, "Unknown CEQ event, event = %d\n", event);
- return;
- }
-
- ceq_cb = &ceqs->ceq_cb[event];
-
- eqe_state = cmpxchg(&ceq_cb->ceqe_state,
- HINIC_EQE_ENABLED,
- HINIC_EQE_ENABLED | HINIC_EQE_RUNNING);
-
- if ((eqe_state == HINIC_EQE_ENABLED) && (ceq_cb->handler))
- ceq_cb->handler(ceq_cb->handle, CEQE_DATA(ceqe));
- else
- dev_err(&pdev->dev, "Unhandled CEQ Event %d\n", event);
-
- ceq_cb->ceqe_state &= ~HINIC_EQE_RUNNING;
-}
-
-/**
- * ceq_irq_handler - handler for the CEQ event
- * @eq: the Completion Event Queue that received the event
- **/
-static void ceq_irq_handler(struct hinic_eq *eq)
-{
- struct hinic_ceqs *ceqs = ceq_to_ceqs(eq);
- u32 ceqe;
- int i;
-
- for (i = 0; i < eq->q_len; i++) {
- ceqe = *(GET_CURR_CEQ_ELEM(eq));
-
- /* Data in HW is in Big endian Format */
- ceqe = be32_to_cpu(ceqe);
-
- /* HW toggles the wrapped bit, when it adds eq element event */
- if (HINIC_EQ_ELEM_DESC_GET(ceqe, WRAPPED) == eq->wrapped)
- break;
-
- ceq_event_handler(ceqs, ceqe);
-
- eq->cons_idx++;
-
- if (eq->cons_idx == eq->q_len) {
- eq->cons_idx = 0;
- eq->wrapped = !eq->wrapped;
- }
- }
-}
-
-/**
- * eq_irq_handler - handler for the EQ event
- * @data: the Event Queue that received the event
- **/
-static void eq_irq_handler(void *data)
-{
- struct hinic_eq *eq = data;
-
- if (eq->type == HINIC_AEQ)
- aeq_irq_handler(eq);
- else if (eq->type == HINIC_CEQ)
- ceq_irq_handler(eq);
-
- eq_update_ci(eq);
-}
-
-/**
- * eq_irq_work - the work of the EQ that received the event
- * @work: the work struct that is associated with the EQ
- **/
-static void eq_irq_work(struct work_struct *work)
-{
- struct hinic_eq_work *aeq_work = work_to_aeq_work(work);
- struct hinic_eq *aeq;
-
- aeq = aeq_work->data;
- eq_irq_handler(aeq);
-}
-
-/**
- * ceq_tasklet - the tasklet of the EQ that received the event
- * @ceq_data: the eq
- **/
-static void ceq_tasklet(unsigned long ceq_data)
-{
- struct hinic_eq *ceq = (struct hinic_eq *)ceq_data;
-
- eq_irq_handler(ceq);
-}
-
-/**
- * aeq_interrupt - aeq interrupt handler
- * @irq: irq number
- * @data: the Async Event Queue that collected the event
- **/
-static irqreturn_t aeq_interrupt(int irq, void *data)
-{
- struct hinic_eq_work *aeq_work;
- struct hinic_eq *aeq = data;
- struct hinic_aeqs *aeqs;
-
- /* clear resend timer cnt register */
- hinic_msix_attr_cnt_clear(aeq->hwif, aeq->msix_entry.entry);
-
- aeq_work = &aeq->aeq_work;
- aeq_work->data = aeq;
-
- aeqs = aeq_to_aeqs(aeq);
- queue_work(aeqs->workq, &aeq_work->work);
-
- return IRQ_HANDLED;
-}
-
-/**
- * ceq_interrupt - ceq interrupt handler
- * @irq: irq number
- * @data: the Completion Event Queue that collected the event
- **/
-static irqreturn_t ceq_interrupt(int irq, void *data)
-{
- struct hinic_eq *ceq = data;
-
- /* clear resend timer cnt register */
- hinic_msix_attr_cnt_clear(ceq->hwif, ceq->msix_entry.entry);
-
- tasklet_schedule(&ceq->ceq_tasklet);
-
- return IRQ_HANDLED;
-}
-
-static void set_ctrl0(struct hinic_eq *eq)
-{
- struct msix_entry *msix_entry = &eq->msix_entry;
- enum hinic_eq_type type = eq->type;
- u32 addr, val, ctrl0;
-
- if (type == HINIC_AEQ) {
- /* RMW Ctrl0 */
- addr = HINIC_CSR_AEQ_CTRL_0_ADDR(eq->q_id);
-
- val = hinic_hwif_read_reg(eq->hwif, addr);
-
- val = HINIC_AEQ_CTRL_0_CLEAR(val, INT_IDX) &
- HINIC_AEQ_CTRL_0_CLEAR(val, DMA_ATTR) &
- HINIC_AEQ_CTRL_0_CLEAR(val, PCI_INTF_IDX) &
- HINIC_AEQ_CTRL_0_CLEAR(val, INT_MODE);
-
- ctrl0 = HINIC_AEQ_CTRL_0_SET(msix_entry->entry, INT_IDX) |
- HINIC_AEQ_CTRL_0_SET(DMA_ATTR_AEQ_DEFAULT, DMA_ATTR) |
- HINIC_AEQ_CTRL_0_SET(HINIC_HWIF_PCI_INTF(eq->hwif),
- PCI_INTF_IDX) |
- HINIC_AEQ_CTRL_0_SET(EQ_INT_MODE_ARMED, INT_MODE);
-
- val |= ctrl0;
-
- hinic_hwif_write_reg(eq->hwif, addr, val);
- } else if (type == HINIC_CEQ) {
- /* RMW Ctrl0 */
- addr = HINIC_CSR_CEQ_CTRL_0_ADDR(eq->q_id);
-
- val = hinic_hwif_read_reg(eq->hwif, addr);
-
- val = HINIC_CEQ_CTRL_0_CLEAR(val, INTR_IDX) &
- HINIC_CEQ_CTRL_0_CLEAR(val, DMA_ATTR) &
- HINIC_CEQ_CTRL_0_CLEAR(val, KICK_THRESH) &
- HINIC_CEQ_CTRL_0_CLEAR(val, PCI_INTF_IDX) &
- HINIC_CEQ_CTRL_0_CLEAR(val, INTR_MODE);
-
- ctrl0 = HINIC_CEQ_CTRL_0_SET(msix_entry->entry, INTR_IDX) |
- HINIC_CEQ_CTRL_0_SET(DMA_ATTR_CEQ_DEFAULT, DMA_ATTR) |
- HINIC_CEQ_CTRL_0_SET(THRESH_CEQ_DEFAULT, KICK_THRESH) |
- HINIC_CEQ_CTRL_0_SET(HINIC_HWIF_PCI_INTF(eq->hwif),
- PCI_INTF_IDX) |
- HINIC_CEQ_CTRL_0_SET(EQ_INT_MODE_ARMED, INTR_MODE);
-
- val |= ctrl0;
-
- hinic_hwif_write_reg(eq->hwif, addr, val);
- }
-}
-
-static void set_ctrl1(struct hinic_eq *eq)
-{
- enum hinic_eq_type type = eq->type;
- u32 page_size_val, elem_size;
- u32 addr, val, ctrl1;
-
- if (type == HINIC_AEQ) {
- /* RMW Ctrl1 */
- addr = HINIC_CSR_AEQ_CTRL_1_ADDR(eq->q_id);
-
- page_size_val = EQ_SET_HW_PAGE_SIZE_VAL(eq);
- elem_size = EQ_SET_HW_ELEM_SIZE_VAL(eq);
-
- val = hinic_hwif_read_reg(eq->hwif, addr);
-
- val = HINIC_AEQ_CTRL_1_CLEAR(val, LEN) &
- HINIC_AEQ_CTRL_1_CLEAR(val, ELEM_SIZE) &
- HINIC_AEQ_CTRL_1_CLEAR(val, PAGE_SIZE);
-
- ctrl1 = HINIC_AEQ_CTRL_1_SET(eq->q_len, LEN) |
- HINIC_AEQ_CTRL_1_SET(elem_size, ELEM_SIZE) |
- HINIC_AEQ_CTRL_1_SET(page_size_val, PAGE_SIZE);
-
- val |= ctrl1;
-
- hinic_hwif_write_reg(eq->hwif, addr, val);
- } else if (type == HINIC_CEQ) {
- /* RMW Ctrl1 */
- addr = HINIC_CSR_CEQ_CTRL_1_ADDR(eq->q_id);
-
- page_size_val = EQ_SET_HW_PAGE_SIZE_VAL(eq);
-
- val = hinic_hwif_read_reg(eq->hwif, addr);
-
- val = HINIC_CEQ_CTRL_1_CLEAR(val, LEN) &
- HINIC_CEQ_CTRL_1_CLEAR(val, PAGE_SIZE);
-
- ctrl1 = HINIC_CEQ_CTRL_1_SET(eq->q_len, LEN) |
- HINIC_CEQ_CTRL_1_SET(page_size_val, PAGE_SIZE);
-
- val |= ctrl1;
-
- hinic_hwif_write_reg(eq->hwif, addr, val);
- }
-}
-
-/**
- * set_eq_ctrls - setting eq's ctrl registers
- * @eq: the Event Queue for setting
- **/
-static void set_eq_ctrls(struct hinic_eq *eq)
-{
- set_ctrl0(eq);
- set_ctrl1(eq);
-}
-
-/**
- * aeq_elements_init - initialize all the elements in the aeq
- * @eq: the Async Event Queue
- * @init_val: value to initialize the elements with it
- **/
-static void aeq_elements_init(struct hinic_eq *eq, u32 init_val)
-{
- struct hinic_aeq_elem *aeqe;
- int i;
-
- for (i = 0; i < eq->q_len; i++) {
- aeqe = GET_AEQ_ELEM(eq, i);
- aeqe->desc = cpu_to_be32(init_val);
- }
-
- wmb(); /* Write the initilzation values */
-}
-
-/**
- * ceq_elements_init - Initialize all the elements in the ceq
- * @eq: the event queue
- * @init_val: value to init with it the elements
- **/
-static void ceq_elements_init(struct hinic_eq *eq, u32 init_val)
-{
- u32 *ceqe;
- int i;
-
- for (i = 0; i < eq->q_len; i++) {
- ceqe = GET_CEQ_ELEM(eq, i);
- *(ceqe) = cpu_to_be32(init_val);
- }
-
- wmb(); /* Write the initilzation values */
-}
-
-/**
- * alloc_eq_pages - allocate the pages for the queue
- * @eq: the event queue
- *
- * Return 0 - Success, Negative - Failure
- **/
-static int alloc_eq_pages(struct hinic_eq *eq)
-{
- struct hinic_hwif *hwif = eq->hwif;
- struct pci_dev *pdev = hwif->pdev;
- u32 init_val, addr, val;
- size_t addr_size;
- int err, pg;
-
- addr_size = eq->num_pages * sizeof(*eq->dma_addr);
- eq->dma_addr = devm_kzalloc(&pdev->dev, addr_size, GFP_KERNEL);
- if (!eq->dma_addr)
- return -ENOMEM;
-
- addr_size = eq->num_pages * sizeof(*eq->virt_addr);
- eq->virt_addr = devm_kzalloc(&pdev->dev, addr_size, GFP_KERNEL);
- if (!eq->virt_addr) {
- err = -ENOMEM;
- goto err_virt_addr_alloc;
- }
-
- for (pg = 0; pg < eq->num_pages; pg++) {
- eq->virt_addr[pg] = dma_zalloc_coherent(&pdev->dev,
- eq->page_size,
- &eq->dma_addr[pg],
- GFP_KERNEL);
- if (!eq->virt_addr[pg]) {
- err = -ENOMEM;
- goto err_dma_alloc;
- }
-
- addr = EQ_HI_PHYS_ADDR_REG(eq, pg);
- val = upper_32_bits(eq->dma_addr[pg]);
-
- hinic_hwif_write_reg(hwif, addr, val);
-
- addr = EQ_LO_PHYS_ADDR_REG(eq, pg);
- val = lower_32_bits(eq->dma_addr[pg]);
-
- hinic_hwif_write_reg(hwif, addr, val);
- }
-
- init_val = HINIC_EQ_ELEM_DESC_SET(eq->wrapped, WRAPPED);
-
- if (eq->type == HINIC_AEQ)
- aeq_elements_init(eq, init_val);
- else if (eq->type == HINIC_CEQ)
- ceq_elements_init(eq, init_val);
-
- return 0;
-
-err_dma_alloc:
- while (--pg >= 0)
- dma_free_coherent(&pdev->dev, eq->page_size,
- eq->virt_addr[pg],
- eq->dma_addr[pg]);
-
- devm_kfree(&pdev->dev, eq->virt_addr);
-
-err_virt_addr_alloc:
- devm_kfree(&pdev->dev, eq->dma_addr);
- return err;
-}
-
-/**
- * free_eq_pages - free the pages of the queue
- * @eq: the Event Queue
- **/
-static void free_eq_pages(struct hinic_eq *eq)
-{
- struct hinic_hwif *hwif = eq->hwif;
- struct pci_dev *pdev = hwif->pdev;
- int pg;
-
- for (pg = 0; pg < eq->num_pages; pg++)
- dma_free_coherent(&pdev->dev, eq->page_size,
- eq->virt_addr[pg],
- eq->dma_addr[pg]);
-
- devm_kfree(&pdev->dev, eq->virt_addr);
- devm_kfree(&pdev->dev, eq->dma_addr);
-}
-
-/**
- * init_eq - initialize Event Queue
- * @eq: the event queue
- * @hwif: the HW interface of a PCI function device
- * @type: the type of the event queue, aeq or ceq
- * @q_id: Queue id number
- * @q_len: the number of EQ elements
- * @page_size: the page size of the pages in the event queue
- * @entry: msix entry associated with the event queue
- *
- * Return 0 - Success, Negative - Failure
- **/
-static int init_eq(struct hinic_eq *eq, struct hinic_hwif *hwif,
- enum hinic_eq_type type, int q_id, u32 q_len, u32 page_size,
- struct msix_entry entry)
-{
- struct pci_dev *pdev = hwif->pdev;
- int err;
-
- eq->hwif = hwif;
- eq->type = type;
- eq->q_id = q_id;
- eq->q_len = q_len;
- eq->page_size = page_size;
-
- /* Clear PI and CI, also clear the ARM bit */
- hinic_hwif_write_reg(eq->hwif, EQ_CONS_IDX_REG_ADDR(eq), 0);
- hinic_hwif_write_reg(eq->hwif, EQ_PROD_IDX_REG_ADDR(eq), 0);
-
- eq->cons_idx = 0;
- eq->wrapped = 0;
-
- if (type == HINIC_AEQ) {
- eq->elem_size = HINIC_AEQE_SIZE;
- } else if (type == HINIC_CEQ) {
- eq->elem_size = HINIC_CEQE_SIZE;
- } else {
- dev_err(&pdev->dev, "Invalid EQ type\n");
- return -EINVAL;
- }
-
- eq->num_pages = GET_EQ_NUM_PAGES(eq, page_size);
- eq->num_elem_in_pg = GET_EQ_NUM_ELEMS_IN_PG(eq, page_size);
-
- eq->msix_entry = entry;
-
- if (eq->num_elem_in_pg & (eq->num_elem_in_pg - 1)) {
- dev_err(&pdev->dev, "num elements in eq page != power of 2\n");
- return -EINVAL;
- }
-
- if (eq->num_pages > EQ_MAX_PAGES) {
- dev_err(&pdev->dev, "too many pages for eq\n");
- return -EINVAL;
- }
-
- set_eq_ctrls(eq);
- eq_update_ci(eq);
-
- err = alloc_eq_pages(eq);
- if (err) {
- dev_err(&pdev->dev, "Failed to allocate pages for eq\n");
- return err;
- }
-
- if (type == HINIC_AEQ) {
- struct hinic_eq_work *aeq_work = &eq->aeq_work;
-
- INIT_WORK(&aeq_work->work, eq_irq_work);
- } else if (type == HINIC_CEQ) {
- tasklet_init(&eq->ceq_tasklet, ceq_tasklet,
- (unsigned long)eq);
- }
-
- /* set the attributes of the msix entry */
- hinic_msix_attr_set(eq->hwif, eq->msix_entry.entry,
- HINIC_EQ_MSIX_PENDING_LIMIT_DEFAULT,
- HINIC_EQ_MSIX_COALESC_TIMER_DEFAULT,
- HINIC_EQ_MSIX_LLI_TIMER_DEFAULT,
- HINIC_EQ_MSIX_LLI_CREDIT_LIMIT_DEFAULT,
- HINIC_EQ_MSIX_RESEND_TIMER_DEFAULT);
-
- if (type == HINIC_AEQ)
- err = request_irq(entry.vector, aeq_interrupt, 0,
- "hinic_aeq", eq);
- else if (type == HINIC_CEQ)
- err = request_irq(entry.vector, ceq_interrupt, 0,
- "hinic_ceq", eq);
-
- if (err) {
- dev_err(&pdev->dev, "Failed to request irq for the EQ\n");
- goto err_req_irq;
- }
-
- return 0;
-
-err_req_irq:
- free_eq_pages(eq);
- return err;
-}
-
-/**
- * remove_eq - remove Event Queue
- * @eq: the event queue
- **/
-static void remove_eq(struct hinic_eq *eq)
-{
- struct msix_entry *entry = &eq->msix_entry;
-
- free_irq(entry->vector, eq);
-
- if (eq->type == HINIC_AEQ) {
- struct hinic_eq_work *aeq_work = &eq->aeq_work;
-
- cancel_work_sync(&aeq_work->work);
- } else if (eq->type == HINIC_CEQ) {
- tasklet_kill(&eq->ceq_tasklet);
- }
-
- free_eq_pages(eq);
-}
-
-/**
- * hinic_aeqs_init - initialize all the aeqs
- * @aeqs: pointer to Async eqs of the chip
- * @hwif: the HW interface of a PCI function device
- * @num_aeqs: number of AEQs
- * @q_len: number of EQ elements
- * @page_size: the page size of the pages in the event queue
- * @msix_entries: msix entries associated with the event queues
- *
- * Return 0 - Success, negative - Failure
- **/
-int hinic_aeqs_init(struct hinic_aeqs *aeqs, struct hinic_hwif *hwif,
- int num_aeqs, u32 q_len, u32 page_size,
- struct msix_entry *msix_entries)
-{
- struct pci_dev *pdev = hwif->pdev;
- int err, i, q_id;
-
- aeqs->workq = create_singlethread_workqueue(HINIC_EQS_WQ_NAME);
- if (!aeqs->workq)
- return -ENOMEM;
-
- aeqs->hwif = hwif;
- aeqs->num_aeqs = num_aeqs;
-
- for (q_id = 0; q_id < num_aeqs; q_id++) {
- err = init_eq(&aeqs->aeq[q_id], hwif, HINIC_AEQ, q_id, q_len,
- page_size, msix_entries[q_id]);
- if (err) {
- dev_err(&pdev->dev, "Failed to init aeq %d\n", q_id);
- goto err_init_aeq;
- }
- }
-
- return 0;
-
-err_init_aeq:
- for (i = 0; i < q_id; i++)
- remove_eq(&aeqs->aeq[i]);
-
- destroy_workqueue(aeqs->workq);
- return err;
-}
-
-/**
- * hinic_aeqs_free - free all the aeqs
- * @aeqs: pointer to Async eqs of the chip
- **/
-void hinic_aeqs_free(struct hinic_aeqs *aeqs)
-{
- int q_id;
-
- for (q_id = 0; q_id < aeqs->num_aeqs ; q_id++)
- remove_eq(&aeqs->aeq[q_id]);
-
- destroy_workqueue(aeqs->workq);
-}
-
-/**
- * hinic_ceqs_init - init all the ceqs
- * @ceqs: ceqs part of the chip
- * @hwif: the hardware interface of a pci function device
- * @num_ceqs: number of CEQs
- * @q_len: number of EQ elements
- * @page_size: the page size of the event queue
- * @msix_entries: msix entries associated with the event queues
- *
- * Return 0 - Success, Negative - Failure
- **/
-int hinic_ceqs_init(struct hinic_ceqs *ceqs, struct hinic_hwif *hwif,
- int num_ceqs, u32 q_len, u32 page_size,
- struct msix_entry *msix_entries)
-{
- struct pci_dev *pdev = hwif->pdev;
- int i, q_id, err;
-
- ceqs->hwif = hwif;
- ceqs->num_ceqs = num_ceqs;
-
- for (q_id = 0; q_id < num_ceqs; q_id++) {
- err = init_eq(&ceqs->ceq[q_id], hwif, HINIC_CEQ, q_id, q_len,
- page_size, msix_entries[q_id]);
- if (err) {
- dev_err(&pdev->dev, "Failed to init ceq %d\n", q_id);
- goto err_init_ceq;
- }
- }
-
- return 0;
-
-err_init_ceq:
- for (i = 0; i < q_id; i++)
- remove_eq(&ceqs->ceq[i]);
-
- return err;
-}
-
-/**
- * hinic_ceqs_free - free all the ceqs
- * @ceqs: ceqs part of the chip
- **/
-void hinic_ceqs_free(struct hinic_ceqs *ceqs)
-{
- int q_id;
-
- for (q_id = 0; q_id < ceqs->num_ceqs; q_id++)
- remove_eq(&ceqs->ceq[q_id]);
-}
diff --git a/drivers/net/ethernet/huawei/hinic/hinic_hw_eqs.h b/drivers/net/ethernet/huawei/hinic/hinic_hw_eqs.h
deleted file mode 100644
index ecb9c2b..00000000
--- a/drivers/net/ethernet/huawei/hinic/hinic_hw_eqs.h
+++ /dev/null
@@ -1,265 +0,0 @@
-/*
- * Huawei HiNIC PCI Express Linux driver
- * Copyright(c) 2017 Huawei Technologies Co., Ltd
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms and conditions of the GNU General Public License,
- * version 2, as published by the Free Software Foundation.
- *
- * This program is distributed in the hope it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
- * for more details.
- *
- */
-
-#ifndef HINIC_HW_EQS_H
-#define HINIC_HW_EQS_H
-
-#include <linux/types.h>
-#include <linux/workqueue.h>
-#include <linux/pci.h>
-#include <linux/sizes.h>
-#include <linux/bitops.h>
-#include <linux/interrupt.h>
-
-#include "hinic_hw_if.h"
-
-#define HINIC_AEQ_CTRL_0_INT_IDX_SHIFT 0
-#define HINIC_AEQ_CTRL_0_DMA_ATTR_SHIFT 12
-#define HINIC_AEQ_CTRL_0_PCI_INTF_IDX_SHIFT 20
-#define HINIC_AEQ_CTRL_0_INT_MODE_SHIFT 31
-
-#define HINIC_AEQ_CTRL_0_INT_IDX_MASK 0x3FF
-#define HINIC_AEQ_CTRL_0_DMA_ATTR_MASK 0x3F
-#define HINIC_AEQ_CTRL_0_PCI_INTF_IDX_MASK 0x3
-#define HINIC_AEQ_CTRL_0_INT_MODE_MASK 0x1
-
-#define HINIC_AEQ_CTRL_0_SET(val, member) \
- (((u32)(val) & HINIC_AEQ_CTRL_0_##member##_MASK) << \
- HINIC_AEQ_CTRL_0_##member##_SHIFT)
-
-#define HINIC_AEQ_CTRL_0_CLEAR(val, member) \
- ((val) & (~(HINIC_AEQ_CTRL_0_##member##_MASK \
- << HINIC_AEQ_CTRL_0_##member##_SHIFT)))
-
-#define HINIC_AEQ_CTRL_1_LEN_SHIFT 0
-#define HINIC_AEQ_CTRL_1_ELEM_SIZE_SHIFT 24
-#define HINIC_AEQ_CTRL_1_PAGE_SIZE_SHIFT 28
-
-#define HINIC_AEQ_CTRL_1_LEN_MASK 0x1FFFFF
-#define HINIC_AEQ_CTRL_1_ELEM_SIZE_MASK 0x3
-#define HINIC_AEQ_CTRL_1_PAGE_SIZE_MASK 0xF
-
-#define HINIC_AEQ_CTRL_1_SET(val, member) \
- (((u32)(val) & HINIC_AEQ_CTRL_1_##member##_MASK) << \
- HINIC_AEQ_CTRL_1_##member##_SHIFT)
-
-#define HINIC_AEQ_CTRL_1_CLEAR(val, member) \
- ((val) & (~(HINIC_AEQ_CTRL_1_##member##_MASK \
- << HINIC_AEQ_CTRL_1_##member##_SHIFT)))
-
-#define HINIC_CEQ_CTRL_0_INTR_IDX_SHIFT 0
-#define HINIC_CEQ_CTRL_0_DMA_ATTR_SHIFT 12
-#define HINIC_CEQ_CTRL_0_KICK_THRESH_SHIFT 20
-#define HINIC_CEQ_CTRL_0_PCI_INTF_IDX_SHIFT 24
-#define HINIC_CEQ_CTRL_0_INTR_MODE_SHIFT 31
-
-#define HINIC_CEQ_CTRL_0_INTR_IDX_MASK 0x3FF
-#define HINIC_CEQ_CTRL_0_DMA_ATTR_MASK 0x3F
-#define HINIC_CEQ_CTRL_0_KICK_THRESH_MASK 0xF
-#define HINIC_CEQ_CTRL_0_PCI_INTF_IDX_MASK 0x3
-#define HINIC_CEQ_CTRL_0_INTR_MODE_MASK 0x1
-
-#define HINIC_CEQ_CTRL_0_SET(val, member) \
- (((u32)(val) & HINIC_CEQ_CTRL_0_##member##_MASK) << \
- HINIC_CEQ_CTRL_0_##member##_SHIFT)
-
-#define HINIC_CEQ_CTRL_0_CLEAR(val, member) \
- ((val) & (~(HINIC_CEQ_CTRL_0_##member##_MASK \
- << HINIC_CEQ_CTRL_0_##member##_SHIFT)))
-
-#define HINIC_CEQ_CTRL_1_LEN_SHIFT 0
-#define HINIC_CEQ_CTRL_1_PAGE_SIZE_SHIFT 28
-
-#define HINIC_CEQ_CTRL_1_LEN_MASK 0x1FFFFF
-#define HINIC_CEQ_CTRL_1_PAGE_SIZE_MASK 0xF
-
-#define HINIC_CEQ_CTRL_1_SET(val, member) \
- (((u32)(val) & HINIC_CEQ_CTRL_1_##member##_MASK) << \
- HINIC_CEQ_CTRL_1_##member##_SHIFT)
-
-#define HINIC_CEQ_CTRL_1_CLEAR(val, member) \
- ((val) & (~(HINIC_CEQ_CTRL_1_##member##_MASK \
- << HINIC_CEQ_CTRL_1_##member##_SHIFT)))
-
-#define HINIC_EQ_ELEM_DESC_TYPE_SHIFT 0
-#define HINIC_EQ_ELEM_DESC_SRC_SHIFT 7
-#define HINIC_EQ_ELEM_DESC_SIZE_SHIFT 8
-#define HINIC_EQ_ELEM_DESC_WRAPPED_SHIFT 31
-
-#define HINIC_EQ_ELEM_DESC_TYPE_MASK 0x7F
-#define HINIC_EQ_ELEM_DESC_SRC_MASK 0x1
-#define HINIC_EQ_ELEM_DESC_SIZE_MASK 0xFF
-#define HINIC_EQ_ELEM_DESC_WRAPPED_MASK 0x1
-
-#define HINIC_EQ_ELEM_DESC_SET(val, member) \
- (((u32)(val) & HINIC_EQ_ELEM_DESC_##member##_MASK) << \
- HINIC_EQ_ELEM_DESC_##member##_SHIFT)
-
-#define HINIC_EQ_ELEM_DESC_GET(val, member) \
- (((val) >> HINIC_EQ_ELEM_DESC_##member##_SHIFT) & \
- HINIC_EQ_ELEM_DESC_##member##_MASK)
-
-#define HINIC_EQ_CI_IDX_SHIFT 0
-#define HINIC_EQ_CI_WRAPPED_SHIFT 20
-#define HINIC_EQ_CI_XOR_CHKSUM_SHIFT 24
-#define HINIC_EQ_CI_INT_ARMED_SHIFT 31
-
-#define HINIC_EQ_CI_IDX_MASK 0xFFFFF
-#define HINIC_EQ_CI_WRAPPED_MASK 0x1
-#define HINIC_EQ_CI_XOR_CHKSUM_MASK 0xF
-#define HINIC_EQ_CI_INT_ARMED_MASK 0x1
-
-#define HINIC_EQ_CI_SET(val, member) \
- (((u32)(val) & HINIC_EQ_CI_##member##_MASK) << \
- HINIC_EQ_CI_##member##_SHIFT)
-
-#define HINIC_EQ_CI_CLEAR(val, member) \
- ((val) & (~(HINIC_EQ_CI_##member##_MASK \
- << HINIC_EQ_CI_##member##_SHIFT)))
-
-#define HINIC_MAX_AEQS 4
-#define HINIC_MAX_CEQS 32
-
-#define HINIC_AEQE_SIZE 64
-#define HINIC_CEQE_SIZE 4
-
-#define HINIC_AEQE_DESC_SIZE 4
-#define HINIC_AEQE_DATA_SIZE \
- (HINIC_AEQE_SIZE - HINIC_AEQE_DESC_SIZE)
-
-#define HINIC_DEFAULT_AEQ_LEN 64
-#define HINIC_DEFAULT_CEQ_LEN 1024
-
-#define HINIC_EQ_PAGE_SIZE SZ_4K
-
-#define HINIC_CEQ_ID_CMDQ 0
-
-enum hinic_eq_type {
- HINIC_AEQ,
- HINIC_CEQ,
-};
-
-enum hinic_aeq_type {
- HINIC_MSG_FROM_MGMT_CPU = 2,
-
- HINIC_MAX_AEQ_EVENTS,
-};
-
-enum hinic_ceq_type {
- HINIC_CEQ_CMDQ = 3,
-
- HINIC_MAX_CEQ_EVENTS,
-};
-
-enum hinic_eqe_state {
- HINIC_EQE_ENABLED = BIT(0),
- HINIC_EQE_RUNNING = BIT(1),
-};
-
-struct hinic_aeq_elem {
- u8 data[HINIC_AEQE_DATA_SIZE];
- u32 desc;
-};
-
-struct hinic_eq_work {
- struct work_struct work;
- void *data;
-};
-
-struct hinic_eq {
- struct hinic_hwif *hwif;
-
- enum hinic_eq_type type;
- int q_id;
- u32 q_len;
- u32 page_size;
-
- u32 cons_idx;
- int wrapped;
-
- size_t elem_size;
- int num_pages;
- int num_elem_in_pg;
-
- struct msix_entry msix_entry;
-
- dma_addr_t *dma_addr;
- void **virt_addr;
-
- struct hinic_eq_work aeq_work;
-
- struct tasklet_struct ceq_tasklet;
-};
-
-struct hinic_hw_event_cb {
- void (*hwe_handler)(void *handle, void *data, u8 size);
- void *handle;
- unsigned long hwe_state;
-};
-
-struct hinic_aeqs {
- struct hinic_hwif *hwif;
-
- struct hinic_eq aeq[HINIC_MAX_AEQS];
- int num_aeqs;
-
- struct hinic_hw_event_cb hwe_cb[HINIC_MAX_AEQ_EVENTS];
-
- struct workqueue_struct *workq;
-};
-
-struct hinic_ceq_cb {
- void (*handler)(void *handle, u32 ceqe_data);
- void *handle;
- enum hinic_eqe_state ceqe_state;
-};
-
-struct hinic_ceqs {
- struct hinic_hwif *hwif;
-
- struct hinic_eq ceq[HINIC_MAX_CEQS];
- int num_ceqs;
-
- struct hinic_ceq_cb ceq_cb[HINIC_MAX_CEQ_EVENTS];
-};
-
-void hinic_aeq_register_hw_cb(struct hinic_aeqs *aeqs,
- enum hinic_aeq_type event, void *handle,
- void (*hwe_handler)(void *handle, void *data,
- u8 size));
-
-void hinic_aeq_unregister_hw_cb(struct hinic_aeqs *aeqs,
- enum hinic_aeq_type event);
-
-void hinic_ceq_register_cb(struct hinic_ceqs *ceqs,
- enum hinic_ceq_type event, void *handle,
- void (*ceq_cb)(void *handle, u32 ceqe_data));
-
-void hinic_ceq_unregister_cb(struct hinic_ceqs *ceqs,
- enum hinic_ceq_type event);
-
-int hinic_aeqs_init(struct hinic_aeqs *aeqs, struct hinic_hwif *hwif,
- int num_aeqs, u32 q_len, u32 page_size,
- struct msix_entry *msix_entries);
-
-void hinic_aeqs_free(struct hinic_aeqs *aeqs);
-
-int hinic_ceqs_init(struct hinic_ceqs *ceqs, struct hinic_hwif *hwif,
- int num_ceqs, u32 q_len, u32 page_size,
- struct msix_entry *msix_entries);
-
-void hinic_ceqs_free(struct hinic_ceqs *ceqs);
-
-#endif
diff --git a/drivers/net/ethernet/huawei/hinic/hinic_hw_if.c b/drivers/net/ethernet/huawei/hinic/hinic_hw_if.c
deleted file mode 100644
index 823a170..00000000
--- a/drivers/net/ethernet/huawei/hinic/hinic_hw_if.c
+++ /dev/null
@@ -1,351 +0,0 @@
-/*
- * Huawei HiNIC PCI Express Linux driver
- * Copyright(c) 2017 Huawei Technologies Co., Ltd
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms and conditions of the GNU General Public License,
- * version 2, as published by the Free Software Foundation.
- *
- * This program is distributed in the hope it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
- * for more details.
- *
- */
-
-#include <linux/pci.h>
-#include <linux/device.h>
-#include <linux/errno.h>
-#include <linux/io.h>
-#include <linux/types.h>
-#include <linux/bitops.h>
-
-#include "hinic_hw_csr.h"
-#include "hinic_hw_if.h"
-
-#define PCIE_ATTR_ENTRY 0
-
-#define VALID_MSIX_IDX(attr, msix_index) ((msix_index) < (attr)->num_irqs)
-
-/**
- * hinic_msix_attr_set - set message attribute for msix entry
- * @hwif: the HW interface of a pci function device
- * @msix_index: msix_index
- * @pending_limit: the maximum pending interrupt events (unit 8)
- * @coalesc_timer: coalesc period for interrupt (unit 8 us)
- * @lli_timer: replenishing period for low latency credit (unit 8 us)
- * @lli_credit_limit: maximum credits for low latency msix messages (unit 8)
- * @resend_timer: maximum wait for resending msix (unit coalesc period)
- *
- * Return 0 - Success, negative - Failure
- **/
-int hinic_msix_attr_set(struct hinic_hwif *hwif, u16 msix_index,
- u8 pending_limit, u8 coalesc_timer,
- u8 lli_timer, u8 lli_credit_limit,
- u8 resend_timer)
-{
- u32 msix_ctrl, addr;
-
- if (!VALID_MSIX_IDX(&hwif->attr, msix_index))
- return -EINVAL;
-
- msix_ctrl = HINIC_MSIX_ATTR_SET(pending_limit, PENDING_LIMIT) |
- HINIC_MSIX_ATTR_SET(coalesc_timer, COALESC_TIMER) |
- HINIC_MSIX_ATTR_SET(lli_timer, LLI_TIMER) |
- HINIC_MSIX_ATTR_SET(lli_credit_limit, LLI_CREDIT) |
- HINIC_MSIX_ATTR_SET(resend_timer, RESEND_TIMER);
-
- addr = HINIC_CSR_MSIX_CTRL_ADDR(msix_index);
-
- hinic_hwif_write_reg(hwif, addr, msix_ctrl);
- return 0;
-}
-
-/**
- * hinic_msix_attr_get - get message attribute of msix entry
- * @hwif: the HW interface of a pci function device
- * @msix_index: msix_index
- * @pending_limit: the maximum pending interrupt events (unit 8)
- * @coalesc_timer: coalesc period for interrupt (unit 8 us)
- * @lli_timer: replenishing period for low latency credit (unit 8 us)
- * @lli_credit_limit: maximum credits for low latency msix messages (unit 8)
- * @resend_timer: maximum wait for resending msix (unit coalesc period)
- *
- * Return 0 - Success, negative - Failure
- **/
-int hinic_msix_attr_get(struct hinic_hwif *hwif, u16 msix_index,
- u8 *pending_limit, u8 *coalesc_timer,
- u8 *lli_timer, u8 *lli_credit_limit,
- u8 *resend_timer)
-{
- u32 addr, val;
-
- if (!VALID_MSIX_IDX(&hwif->attr, msix_index))
- return -EINVAL;
-
- addr = HINIC_CSR_MSIX_CTRL_ADDR(msix_index);
- val = hinic_hwif_read_reg(hwif, addr);
-
- *pending_limit = HINIC_MSIX_ATTR_GET(val, PENDING_LIMIT);
- *coalesc_timer = HINIC_MSIX_ATTR_GET(val, COALESC_TIMER);
- *lli_timer = HINIC_MSIX_ATTR_GET(val, LLI_TIMER);
- *lli_credit_limit = HINIC_MSIX_ATTR_GET(val, LLI_CREDIT);
- *resend_timer = HINIC_MSIX_ATTR_GET(val, RESEND_TIMER);
- return 0;
-}
-
-/**
- * hinic_msix_attr_cnt_clear - clear message attribute counters for msix entry
- * @hwif: the HW interface of a pci function device
- * @msix_index: msix_index
- *
- * Return 0 - Success, negative - Failure
- **/
-int hinic_msix_attr_cnt_clear(struct hinic_hwif *hwif, u16 msix_index)
-{
- u32 msix_ctrl, addr;
-
- if (!VALID_MSIX_IDX(&hwif->attr, msix_index))
- return -EINVAL;
-
- msix_ctrl = HINIC_MSIX_CNT_SET(1, RESEND_TIMER);
- addr = HINIC_CSR_MSIX_CNT_ADDR(msix_index);
-
- hinic_hwif_write_reg(hwif, addr, msix_ctrl);
- return 0;
-}
-
-/**
- * hinic_set_pf_action - set action on pf channel
- * @hwif: the HW interface of a pci function device
- * @action: action on pf channel
- *
- * Return 0 - Success, negative - Failure
- **/
-void hinic_set_pf_action(struct hinic_hwif *hwif, enum hinic_pf_action action)
-{
- u32 attr5 = hinic_hwif_read_reg(hwif, HINIC_CSR_FUNC_ATTR5_ADDR);
-
- attr5 = HINIC_FA5_CLEAR(attr5, PF_ACTION);
- attr5 |= HINIC_FA5_SET(action, PF_ACTION);
-
- hinic_hwif_write_reg(hwif, HINIC_CSR_FUNC_ATTR5_ADDR, attr5);
-}
-
-enum hinic_outbound_state hinic_outbound_state_get(struct hinic_hwif *hwif)
-{
- u32 attr4 = hinic_hwif_read_reg(hwif, HINIC_CSR_FUNC_ATTR4_ADDR);
-
- return HINIC_FA4_GET(attr4, OUTBOUND_STATE);
-}
-
-void hinic_outbound_state_set(struct hinic_hwif *hwif,
- enum hinic_outbound_state outbound_state)
-{
- u32 attr4 = hinic_hwif_read_reg(hwif, HINIC_CSR_FUNC_ATTR4_ADDR);
-
- attr4 = HINIC_FA4_CLEAR(attr4, OUTBOUND_STATE);
- attr4 |= HINIC_FA4_SET(outbound_state, OUTBOUND_STATE);
-
- hinic_hwif_write_reg(hwif, HINIC_CSR_FUNC_ATTR4_ADDR, attr4);
-}
-
-enum hinic_db_state hinic_db_state_get(struct hinic_hwif *hwif)
-{
- u32 attr4 = hinic_hwif_read_reg(hwif, HINIC_CSR_FUNC_ATTR4_ADDR);
-
- return HINIC_FA4_GET(attr4, DB_STATE);
-}
-
-void hinic_db_state_set(struct hinic_hwif *hwif,
- enum hinic_db_state db_state)
-{
- u32 attr4 = hinic_hwif_read_reg(hwif, HINIC_CSR_FUNC_ATTR4_ADDR);
-
- attr4 = HINIC_FA4_CLEAR(attr4, DB_STATE);
- attr4 |= HINIC_FA4_SET(db_state, DB_STATE);
-
- hinic_hwif_write_reg(hwif, HINIC_CSR_FUNC_ATTR4_ADDR, attr4);
-}
-
-/**
- * hwif_ready - test if the HW is ready for use
- * @hwif: the HW interface of a pci function device
- *
- * Return 0 - Success, negative - Failure
- **/
-static int hwif_ready(struct hinic_hwif *hwif)
-{
- struct pci_dev *pdev = hwif->pdev;
- u32 addr, attr1;
-
- addr = HINIC_CSR_FUNC_ATTR1_ADDR;
- attr1 = hinic_hwif_read_reg(hwif, addr);
-
- if (!HINIC_FA1_GET(attr1, INIT_STATUS)) {
- dev_err(&pdev->dev, "hwif status is not ready\n");
- return -EFAULT;
- }
-
- return 0;
-}
-
-/**
- * set_hwif_attr - set the attributes in the relevant members in hwif
- * @hwif: the HW interface of a pci function device
- * @attr0: the first attribute that was read from the hw
- * @attr1: the second attribute that was read from the hw
- **/
-static void set_hwif_attr(struct hinic_hwif *hwif, u32 attr0, u32 attr1)
-{
- hwif->attr.func_idx = HINIC_FA0_GET(attr0, FUNC_IDX);
- hwif->attr.pf_idx = HINIC_FA0_GET(attr0, PF_IDX);
- hwif->attr.pci_intf_idx = HINIC_FA0_GET(attr0, PCI_INTF_IDX);
- hwif->attr.func_type = HINIC_FA0_GET(attr0, FUNC_TYPE);
-
- hwif->attr.num_aeqs = BIT(HINIC_FA1_GET(attr1, AEQS_PER_FUNC));
- hwif->attr.num_ceqs = BIT(HINIC_FA1_GET(attr1, CEQS_PER_FUNC));
- hwif->attr.num_irqs = BIT(HINIC_FA1_GET(attr1, IRQS_PER_FUNC));
- hwif->attr.num_dma_attr = BIT(HINIC_FA1_GET(attr1, DMA_ATTR_PER_FUNC));
-}
-
-/**
- * read_hwif_attr - read the attributes and set members in hwif
- * @hwif: the HW interface of a pci function device
- **/
-static void read_hwif_attr(struct hinic_hwif *hwif)
-{
- u32 addr, attr0, attr1;
-
- addr = HINIC_CSR_FUNC_ATTR0_ADDR;
- attr0 = hinic_hwif_read_reg(hwif, addr);
-
- addr = HINIC_CSR_FUNC_ATTR1_ADDR;
- attr1 = hinic_hwif_read_reg(hwif, addr);
-
- set_hwif_attr(hwif, attr0, attr1);
-}
-
-/**
- * set_ppf - try to set hwif as ppf and set the type of hwif in this case
- * @hwif: the HW interface of a pci function device
- **/
-static void set_ppf(struct hinic_hwif *hwif)
-{
- struct hinic_func_attr *attr = &hwif->attr;
- u32 addr, val, ppf_election;
-
- /* Read Modify Write */
- addr = HINIC_CSR_PPF_ELECTION_ADDR(HINIC_HWIF_PCI_INTF(hwif));
-
- val = hinic_hwif_read_reg(hwif, addr);
- val = HINIC_PPF_ELECTION_CLEAR(val, IDX);
-
- ppf_election = HINIC_PPF_ELECTION_SET(HINIC_HWIF_FUNC_IDX(hwif), IDX);
-
- val |= ppf_election;
- hinic_hwif_write_reg(hwif, addr, val);
-
- /* check PPF */
- val = hinic_hwif_read_reg(hwif, addr);
-
- attr->ppf_idx = HINIC_PPF_ELECTION_GET(val, IDX);
- if (attr->ppf_idx == HINIC_HWIF_FUNC_IDX(hwif))
- attr->func_type = HINIC_PPF;
-}
-
-/**
- * set_dma_attr - set the dma attributes in the HW
- * @hwif: the HW interface of a pci function device
- * @entry_idx: the entry index in the dma table
- * @st: PCIE TLP steering tag
- * @at: PCIE TLP AT field
- * @ph: PCIE TLP Processing Hint field
- * @no_snooping: PCIE TLP No snooping
- * @tph_en: PCIE TLP Processing Hint Enable
- **/
-static void set_dma_attr(struct hinic_hwif *hwif, u32 entry_idx,
- u8 st, u8 at, u8 ph,
- enum hinic_pcie_nosnoop no_snooping,
- enum hinic_pcie_tph tph_en)
-{
- u32 addr, val, dma_attr_entry;
-
- /* Read Modify Write */
- addr = HINIC_CSR_DMA_ATTR_ADDR(entry_idx);
-
- val = hinic_hwif_read_reg(hwif, addr);
- val = HINIC_DMA_ATTR_CLEAR(val, ST) &
- HINIC_DMA_ATTR_CLEAR(val, AT) &
- HINIC_DMA_ATTR_CLEAR(val, PH) &
- HINIC_DMA_ATTR_CLEAR(val, NO_SNOOPING) &
- HINIC_DMA_ATTR_CLEAR(val, TPH_EN);
-
- dma_attr_entry = HINIC_DMA_ATTR_SET(st, ST) |
- HINIC_DMA_ATTR_SET(at, AT) |
- HINIC_DMA_ATTR_SET(ph, PH) |
- HINIC_DMA_ATTR_SET(no_snooping, NO_SNOOPING) |
- HINIC_DMA_ATTR_SET(tph_en, TPH_EN);
-
- val |= dma_attr_entry;
- hinic_hwif_write_reg(hwif, addr, val);
-}
-
-/**
- * dma_attr_table_init - initialize the the default dma attributes
- * @hwif: the HW interface of a pci function device
- **/
-static void dma_attr_init(struct hinic_hwif *hwif)
-{
- set_dma_attr(hwif, PCIE_ATTR_ENTRY, HINIC_PCIE_ST_DISABLE,
- HINIC_PCIE_AT_DISABLE, HINIC_PCIE_PH_DISABLE,
- HINIC_PCIE_SNOOP, HINIC_PCIE_TPH_DISABLE);
-}
-
-/**
- * hinic_init_hwif - initialize the hw interface
- * @hwif: the HW interface of a pci function device
- * @pdev: the pci device for acessing PCI resources
- *
- * Return 0 - Success, negative - Failure
- **/
-int hinic_init_hwif(struct hinic_hwif *hwif, struct pci_dev *pdev)
-{
- int err;
-
- hwif->pdev = pdev;
-
- hwif->cfg_regs_bar = pci_ioremap_bar(pdev, HINIC_PCI_CFG_REGS_BAR);
- if (!hwif->cfg_regs_bar) {
- dev_err(&pdev->dev, "Failed to map configuration regs\n");
- return -ENOMEM;
- }
-
- err = hwif_ready(hwif);
- if (err) {
- dev_err(&pdev->dev, "HW interface is not ready\n");
- goto err_hwif_ready;
- }
-
- read_hwif_attr(hwif);
-
- if (HINIC_IS_PF(hwif))
- set_ppf(hwif);
-
- /* No transactionss before DMA is initialized */
- dma_attr_init(hwif);
- return 0;
-
-err_hwif_ready:
- iounmap(hwif->cfg_regs_bar);
- return err;
-}
-
-/**
- * hinic_free_hwif - free the HW interface
- * @hwif: the HW interface of a pci function device
- **/
-void hinic_free_hwif(struct hinic_hwif *hwif)
-{
- iounmap(hwif->cfg_regs_bar);
-}
diff --git a/drivers/net/ethernet/huawei/hinic/hinic_hw_if.h b/drivers/net/ethernet/huawei/hinic/hinic_hw_if.h
deleted file mode 100644
index 5b4760c..00000000
--- a/drivers/net/ethernet/huawei/hinic/hinic_hw_if.h
+++ /dev/null
@@ -1,272 +0,0 @@
-/*
- * Huawei HiNIC PCI Express Linux driver
- * Copyright(c) 2017 Huawei Technologies Co., Ltd
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms and conditions of the GNU General Public License,
- * version 2, as published by the Free Software Foundation.
- *
- * This program is distributed in the hope it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
- * for more details.
- *
- */
-
-#ifndef HINIC_HW_IF_H
-#define HINIC_HW_IF_H
-
-#include <linux/pci.h>
-#include <linux/io.h>
-#include <linux/types.h>
-#include <asm/byteorder.h>
-
-#define HINIC_DMA_ATTR_ST_SHIFT 0
-#define HINIC_DMA_ATTR_AT_SHIFT 8
-#define HINIC_DMA_ATTR_PH_SHIFT 10
-#define HINIC_DMA_ATTR_NO_SNOOPING_SHIFT 12
-#define HINIC_DMA_ATTR_TPH_EN_SHIFT 13
-
-#define HINIC_DMA_ATTR_ST_MASK 0xFF
-#define HINIC_DMA_ATTR_AT_MASK 0x3
-#define HINIC_DMA_ATTR_PH_MASK 0x3
-#define HINIC_DMA_ATTR_NO_SNOOPING_MASK 0x1
-#define HINIC_DMA_ATTR_TPH_EN_MASK 0x1
-
-#define HINIC_DMA_ATTR_SET(val, member) \
- (((u32)(val) & HINIC_DMA_ATTR_##member##_MASK) << \
- HINIC_DMA_ATTR_##member##_SHIFT)
-
-#define HINIC_DMA_ATTR_CLEAR(val, member) \
- ((val) & (~(HINIC_DMA_ATTR_##member##_MASK \
- << HINIC_DMA_ATTR_##member##_SHIFT)))
-
-#define HINIC_FA0_FUNC_IDX_SHIFT 0
-#define HINIC_FA0_PF_IDX_SHIFT 10
-#define HINIC_FA0_PCI_INTF_IDX_SHIFT 14
-/* reserved members - off 16 */
-#define HINIC_FA0_FUNC_TYPE_SHIFT 24
-
-#define HINIC_FA0_FUNC_IDX_MASK 0x3FF
-#define HINIC_FA0_PF_IDX_MASK 0xF
-#define HINIC_FA0_PCI_INTF_IDX_MASK 0x3
-#define HINIC_FA0_FUNC_TYPE_MASK 0x1
-
-#define HINIC_FA0_GET(val, member) \
- (((val) >> HINIC_FA0_##member##_SHIFT) & HINIC_FA0_##member##_MASK)
-
-#define HINIC_FA1_AEQS_PER_FUNC_SHIFT 8
-/* reserved members - off 10 */
-#define HINIC_FA1_CEQS_PER_FUNC_SHIFT 12
-/* reserved members - off 15 */
-#define HINIC_FA1_IRQS_PER_FUNC_SHIFT 20
-#define HINIC_FA1_DMA_ATTR_PER_FUNC_SHIFT 24
-/* reserved members - off 27 */
-#define HINIC_FA1_INIT_STATUS_SHIFT 30
-
-#define HINIC_FA1_AEQS_PER_FUNC_MASK 0x3
-#define HINIC_FA1_CEQS_PER_FUNC_MASK 0x7
-#define HINIC_FA1_IRQS_PER_FUNC_MASK 0xF
-#define HINIC_FA1_DMA_ATTR_PER_FUNC_MASK 0x7
-#define HINIC_FA1_INIT_STATUS_MASK 0x1
-
-#define HINIC_FA1_GET(val, member) \
- (((val) >> HINIC_FA1_##member##_SHIFT) & HINIC_FA1_##member##_MASK)
-
-#define HINIC_FA4_OUTBOUND_STATE_SHIFT 0
-#define HINIC_FA4_DB_STATE_SHIFT 1
-
-#define HINIC_FA4_OUTBOUND_STATE_MASK 0x1
-#define HINIC_FA4_DB_STATE_MASK 0x1
-
-#define HINIC_FA4_GET(val, member) \
- (((val) >> HINIC_FA4_##member##_SHIFT) & HINIC_FA4_##member##_MASK)
-
-#define HINIC_FA4_SET(val, member) \
- ((((u32)val) & HINIC_FA4_##member##_MASK) << HINIC_FA4_##member##_SHIFT)
-
-#define HINIC_FA4_CLEAR(val, member) \
- ((val) & (~(HINIC_FA4_##member##_MASK << HINIC_FA4_##member##_SHIFT)))
-
-#define HINIC_FA5_PF_ACTION_SHIFT 0
-#define HINIC_FA5_PF_ACTION_MASK 0xFFFF
-
-#define HINIC_FA5_SET(val, member) \
- (((u32)(val) & HINIC_FA5_##member##_MASK) << HINIC_FA5_##member##_SHIFT)
-
-#define HINIC_FA5_CLEAR(val, member) \
- ((val) & (~(HINIC_FA5_##member##_MASK << HINIC_FA5_##member##_SHIFT)))
-
-#define HINIC_PPF_ELECTION_IDX_SHIFT 0
-#define HINIC_PPF_ELECTION_IDX_MASK 0x1F
-
-#define HINIC_PPF_ELECTION_SET(val, member) \
- (((u32)(val) & HINIC_PPF_ELECTION_##member##_MASK) << \
- HINIC_PPF_ELECTION_##member##_SHIFT)
-
-#define HINIC_PPF_ELECTION_GET(val, member) \
- (((val) >> HINIC_PPF_ELECTION_##member##_SHIFT) & \
- HINIC_PPF_ELECTION_##member##_MASK)
-
-#define HINIC_PPF_ELECTION_CLEAR(val, member) \
- ((val) & (~(HINIC_PPF_ELECTION_##member##_MASK \
- << HINIC_PPF_ELECTION_##member##_SHIFT)))
-
-#define HINIC_MSIX_PENDING_LIMIT_SHIFT 0
-#define HINIC_MSIX_COALESC_TIMER_SHIFT 8
-#define HINIC_MSIX_LLI_TIMER_SHIFT 16
-#define HINIC_MSIX_LLI_CREDIT_SHIFT 24
-#define HINIC_MSIX_RESEND_TIMER_SHIFT 29
-
-#define HINIC_MSIX_PENDING_LIMIT_MASK 0xFF
-#define HINIC_MSIX_COALESC_TIMER_MASK 0xFF
-#define HINIC_MSIX_LLI_TIMER_MASK 0xFF
-#define HINIC_MSIX_LLI_CREDIT_MASK 0x1F
-#define HINIC_MSIX_RESEND_TIMER_MASK 0x7
-
-#define HINIC_MSIX_ATTR_SET(val, member) \
- (((u32)(val) & HINIC_MSIX_##member##_MASK) << \
- HINIC_MSIX_##member##_SHIFT)
-
-#define HINIC_MSIX_ATTR_GET(val, member) \
- (((val) >> HINIC_MSIX_##member##_SHIFT) & \
- HINIC_MSIX_##member##_MASK)
-
-#define HINIC_MSIX_CNT_RESEND_TIMER_SHIFT 29
-
-#define HINIC_MSIX_CNT_RESEND_TIMER_MASK 0x1
-
-#define HINIC_MSIX_CNT_SET(val, member) \
- (((u32)(val) & HINIC_MSIX_CNT_##member##_MASK) << \
- HINIC_MSIX_CNT_##member##_SHIFT)
-
-#define HINIC_HWIF_NUM_AEQS(hwif) ((hwif)->attr.num_aeqs)
-#define HINIC_HWIF_NUM_CEQS(hwif) ((hwif)->attr.num_ceqs)
-#define HINIC_HWIF_NUM_IRQS(hwif) ((hwif)->attr.num_irqs)
-#define HINIC_HWIF_FUNC_IDX(hwif) ((hwif)->attr.func_idx)
-#define HINIC_HWIF_PCI_INTF(hwif) ((hwif)->attr.pci_intf_idx)
-#define HINIC_HWIF_PF_IDX(hwif) ((hwif)->attr.pf_idx)
-
-#define HINIC_FUNC_TYPE(hwif) ((hwif)->attr.func_type)
-#define HINIC_IS_PF(hwif) (HINIC_FUNC_TYPE(hwif) == HINIC_PF)
-#define HINIC_IS_PPF(hwif) (HINIC_FUNC_TYPE(hwif) == HINIC_PPF)
-
-#define HINIC_PCI_CFG_REGS_BAR 0
-#define HINIC_PCI_DB_BAR 4
-
-#define HINIC_PCIE_ST_DISABLE 0
-#define HINIC_PCIE_AT_DISABLE 0
-#define HINIC_PCIE_PH_DISABLE 0
-
-#define HINIC_EQ_MSIX_PENDING_LIMIT_DEFAULT 0 /* Disabled */
-#define HINIC_EQ_MSIX_COALESC_TIMER_DEFAULT 0xFF /* max */
-#define HINIC_EQ_MSIX_LLI_TIMER_DEFAULT 0 /* Disabled */
-#define HINIC_EQ_MSIX_LLI_CREDIT_LIMIT_DEFAULT 0 /* Disabled */
-#define HINIC_EQ_MSIX_RESEND_TIMER_DEFAULT 7 /* max */
-
-enum hinic_pcie_nosnoop {
- HINIC_PCIE_SNOOP = 0,
- HINIC_PCIE_NO_SNOOP = 1,
-};
-
-enum hinic_pcie_tph {
- HINIC_PCIE_TPH_DISABLE = 0,
- HINIC_PCIE_TPH_ENABLE = 1,
-};
-
-enum hinic_func_type {
- HINIC_PF = 0,
- HINIC_PPF = 2,
-};
-
-enum hinic_mod_type {
- HINIC_MOD_COMM = 0, /* HW communication module */
- HINIC_MOD_L2NIC = 1, /* L2NIC module */
- HINIC_MOD_CFGM = 7, /* Configuration module */
-
- HINIC_MOD_MAX = 15
-};
-
-enum hinic_node_id {
- HINIC_NODE_ID_MGMT = 21,
-};
-
-enum hinic_pf_action {
- HINIC_PF_MGMT_INIT = 0x0,
-
- HINIC_PF_MGMT_ACTIVE = 0x11,
-};
-
-enum hinic_outbound_state {
- HINIC_OUTBOUND_ENABLE = 0,
- HINIC_OUTBOUND_DISABLE = 1,
-};
-
-enum hinic_db_state {
- HINIC_DB_ENABLE = 0,
- HINIC_DB_DISABLE = 1,
-};
-
-struct hinic_func_attr {
- u16 func_idx;
- u8 pf_idx;
- u8 pci_intf_idx;
-
- enum hinic_func_type func_type;
-
- u8 ppf_idx;
-
- u16 num_irqs;
- u8 num_aeqs;
- u8 num_ceqs;
-
- u8 num_dma_attr;
-};
-
-struct hinic_hwif {
- struct pci_dev *pdev;
- void __iomem *cfg_regs_bar;
-
- struct hinic_func_attr attr;
-};
-
-static inline u32 hinic_hwif_read_reg(struct hinic_hwif *hwif, u32 reg)
-{
- return be32_to_cpu(readl(hwif->cfg_regs_bar + reg));
-}
-
-static inline void hinic_hwif_write_reg(struct hinic_hwif *hwif, u32 reg,
- u32 val)
-{
- writel(cpu_to_be32(val), hwif->cfg_regs_bar + reg);
-}
-
-int hinic_msix_attr_set(struct hinic_hwif *hwif, u16 msix_index,
- u8 pending_limit, u8 coalesc_timer,
- u8 lli_timer_cfg, u8 lli_credit_limit,
- u8 resend_timer);
-
-int hinic_msix_attr_get(struct hinic_hwif *hwif, u16 msix_index,
- u8 *pending_limit, u8 *coalesc_timer_cfg,
- u8 *lli_timer, u8 *lli_credit_limit,
- u8 *resend_timer);
-
-int hinic_msix_attr_cnt_clear(struct hinic_hwif *hwif, u16 msix_index);
-
-void hinic_set_pf_action(struct hinic_hwif *hwif, enum hinic_pf_action action);
-
-enum hinic_outbound_state hinic_outbound_state_get(struct hinic_hwif *hwif);
-
-void hinic_outbound_state_set(struct hinic_hwif *hwif,
- enum hinic_outbound_state outbound_state);
-
-enum hinic_db_state hinic_db_state_get(struct hinic_hwif *hwif);
-
-void hinic_db_state_set(struct hinic_hwif *hwif,
- enum hinic_db_state db_state);
-
-int hinic_init_hwif(struct hinic_hwif *hwif, struct pci_dev *pdev);
-
-void hinic_free_hwif(struct hinic_hwif *hwif);
-
-#endif
diff --git a/drivers/net/ethernet/huawei/hinic/hinic_hw_io.c b/drivers/net/ethernet/huawei/hinic/hinic_hw_io.c
deleted file mode 100644
index 8e58976..00000000
--- a/drivers/net/ethernet/huawei/hinic/hinic_hw_io.c
+++ /dev/null
@@ -1,533 +0,0 @@
-/*
- * Huawei HiNIC PCI Express Linux driver
- * Copyright(c) 2017 Huawei Technologies Co., Ltd
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms and conditions of the GNU General Public License,
- * version 2, as published by the Free Software Foundation.
- *
- * This program is distributed in the hope it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
- * for more details.
- *
- */
-
-#include <linux/kernel.h>
-#include <linux/types.h>
-#include <linux/pci.h>
-#include <linux/device.h>
-#include <linux/errno.h>
-#include <linux/slab.h>
-#include <linux/semaphore.h>
-#include <linux/dma-mapping.h>
-#include <linux/io.h>
-#include <linux/err.h>
-
-#include "hinic_hw_if.h"
-#include "hinic_hw_eqs.h"
-#include "hinic_hw_wqe.h"
-#include "hinic_hw_wq.h"
-#include "hinic_hw_cmdq.h"
-#include "hinic_hw_qp_ctxt.h"
-#include "hinic_hw_qp.h"
-#include "hinic_hw_io.h"
-
-#define CI_Q_ADDR_SIZE sizeof(u32)
-
-#define CI_ADDR(base_addr, q_id) ((base_addr) + \
- (q_id) * CI_Q_ADDR_SIZE)
-
-#define CI_TABLE_SIZE(num_qps) ((num_qps) * CI_Q_ADDR_SIZE)
-
-#define DB_IDX(db, db_base) \
- (((unsigned long)(db) - (unsigned long)(db_base)) / HINIC_DB_PAGE_SIZE)
-
-enum io_cmd {
- IO_CMD_MODIFY_QUEUE_CTXT = 0,
-};
-
-static void init_db_area_idx(struct hinic_free_db_area *free_db_area)
-{
- int i;
-
- for (i = 0; i < HINIC_DB_MAX_AREAS; i++)
- free_db_area->db_idx[i] = i;
-
- free_db_area->alloc_pos = 0;
- free_db_area->return_pos = HINIC_DB_MAX_AREAS;
-
- free_db_area->num_free = HINIC_DB_MAX_AREAS;
-
- sema_init(&free_db_area->idx_lock, 1);
-}
-
-static void __iomem *get_db_area(struct hinic_func_to_io *func_to_io)
-{
- struct hinic_free_db_area *free_db_area = &func_to_io->free_db_area;
- int pos, idx;
-
- down(&free_db_area->idx_lock);
-
- free_db_area->num_free--;
-
- if (free_db_area->num_free < 0) {
- free_db_area->num_free++;
- up(&free_db_area->idx_lock);
- return ERR_PTR(-ENOMEM);
- }
-
- pos = free_db_area->alloc_pos++;
- pos &= HINIC_DB_MAX_AREAS - 1;
-
- idx = free_db_area->db_idx[pos];
-
- free_db_area->db_idx[pos] = -1;
-
- up(&free_db_area->idx_lock);
-
- return func_to_io->db_base + idx * HINIC_DB_PAGE_SIZE;
-}
-
-static void return_db_area(struct hinic_func_to_io *func_to_io,
- void __iomem *db_base)
-{
- struct hinic_free_db_area *free_db_area = &func_to_io->free_db_area;
- int pos, idx = DB_IDX(db_base, func_to_io->db_base);
-
- down(&free_db_area->idx_lock);
-
- pos = free_db_area->return_pos++;
- pos &= HINIC_DB_MAX_AREAS - 1;
-
- free_db_area->db_idx[pos] = idx;
-
- free_db_area->num_free++;
-
- up(&free_db_area->idx_lock);
-}
-
-static int write_sq_ctxts(struct hinic_func_to_io *func_to_io, u16 base_qpn,
- u16 num_sqs)
-{
- struct hinic_hwif *hwif = func_to_io->hwif;
- struct hinic_sq_ctxt_block *sq_ctxt_block;
- struct pci_dev *pdev = hwif->pdev;
- struct hinic_cmdq_buf cmdq_buf;
- struct hinic_sq_ctxt *sq_ctxt;
- struct hinic_qp *qp;
- u64 out_param;
- int err, i;
-
- err = hinic_alloc_cmdq_buf(&func_to_io->cmdqs, &cmdq_buf);
- if (err) {
- dev_err(&pdev->dev, "Failed to allocate cmdq buf\n");
- return err;
- }
-
- sq_ctxt_block = cmdq_buf.buf;
- sq_ctxt = sq_ctxt_block->sq_ctxt;
-
- hinic_qp_prepare_header(&sq_ctxt_block->hdr, HINIC_QP_CTXT_TYPE_SQ,
- num_sqs, func_to_io->max_qps);
- for (i = 0; i < num_sqs; i++) {
- qp = &func_to_io->qps[i];
-
- hinic_sq_prepare_ctxt(&sq_ctxt[i], &qp->sq,
- base_qpn + qp->q_id);
- }
-
- cmdq_buf.size = HINIC_SQ_CTXT_SIZE(num_sqs);
-
- err = hinic_cmdq_direct_resp(&func_to_io->cmdqs, HINIC_MOD_L2NIC,
- IO_CMD_MODIFY_QUEUE_CTXT, &cmdq_buf,
- &out_param);
- if ((err) || (out_param != 0)) {
- dev_err(&pdev->dev, "Failed to set SQ ctxts\n");
- err = -EFAULT;
- }
-
- hinic_free_cmdq_buf(&func_to_io->cmdqs, &cmdq_buf);
- return err;
-}
-
-static int write_rq_ctxts(struct hinic_func_to_io *func_to_io, u16 base_qpn,
- u16 num_rqs)
-{
- struct hinic_hwif *hwif = func_to_io->hwif;
- struct hinic_rq_ctxt_block *rq_ctxt_block;
- struct pci_dev *pdev = hwif->pdev;
- struct hinic_cmdq_buf cmdq_buf;
- struct hinic_rq_ctxt *rq_ctxt;
- struct hinic_qp *qp;
- u64 out_param;
- int err, i;
-
- err = hinic_alloc_cmdq_buf(&func_to_io->cmdqs, &cmdq_buf);
- if (err) {
- dev_err(&pdev->dev, "Failed to allocate cmdq buf\n");
- return err;
- }
-
- rq_ctxt_block = cmdq_buf.buf;
- rq_ctxt = rq_ctxt_block->rq_ctxt;
-
- hinic_qp_prepare_header(&rq_ctxt_block->hdr, HINIC_QP_CTXT_TYPE_RQ,
- num_rqs, func_to_io->max_qps);
- for (i = 0; i < num_rqs; i++) {
- qp = &func_to_io->qps[i];
-
- hinic_rq_prepare_ctxt(&rq_ctxt[i], &qp->rq,
- base_qpn + qp->q_id);
- }
-
- cmdq_buf.size = HINIC_RQ_CTXT_SIZE(num_rqs);
-
- err = hinic_cmdq_direct_resp(&func_to_io->cmdqs, HINIC_MOD_L2NIC,
- IO_CMD_MODIFY_QUEUE_CTXT, &cmdq_buf,
- &out_param);
- if ((err) || (out_param != 0)) {
- dev_err(&pdev->dev, "Failed to set RQ ctxts\n");
- err = -EFAULT;
- }
-
- hinic_free_cmdq_buf(&func_to_io->cmdqs, &cmdq_buf);
- return err;
-}
-
-/**
- * write_qp_ctxts - write the qp ctxt to HW
- * @func_to_io: func to io channel that holds the IO components
- * @base_qpn: first qp number
- * @num_qps: number of qps to write
- *
- * Return 0 - Success, negative - Failure
- **/
-static int write_qp_ctxts(struct hinic_func_to_io *func_to_io, u16 base_qpn,
- u16 num_qps)
-{
- return (write_sq_ctxts(func_to_io, base_qpn, num_qps) ||
- write_rq_ctxts(func_to_io, base_qpn, num_qps));
-}
-
-/**
- * init_qp - Initialize a Queue Pair
- * @func_to_io: func to io channel that holds the IO components
- * @qp: pointer to the qp to initialize
- * @q_id: the id of the qp
- * @sq_msix_entry: msix entry for sq
- * @rq_msix_entry: msix entry for rq
- *
- * Return 0 - Success, negative - Failure
- **/
-static int init_qp(struct hinic_func_to_io *func_to_io,
- struct hinic_qp *qp, int q_id,
- struct msix_entry *sq_msix_entry,
- struct msix_entry *rq_msix_entry)
-{
- struct hinic_hwif *hwif = func_to_io->hwif;
- struct pci_dev *pdev = hwif->pdev;
- void __iomem *db_base;
- int err;
-
- qp->q_id = q_id;
-
- err = hinic_wq_allocate(&func_to_io->wqs, &func_to_io->sq_wq[q_id],
- HINIC_SQ_WQEBB_SIZE, HINIC_SQ_PAGE_SIZE,
- HINIC_SQ_DEPTH, HINIC_SQ_WQE_MAX_SIZE);
- if (err) {
- dev_err(&pdev->dev, "Failed to allocate WQ for SQ\n");
- return err;
- }
-
- err = hinic_wq_allocate(&func_to_io->wqs, &func_to_io->rq_wq[q_id],
- HINIC_RQ_WQEBB_SIZE, HINIC_RQ_PAGE_SIZE,
- HINIC_RQ_DEPTH, HINIC_RQ_WQE_SIZE);
- if (err) {
- dev_err(&pdev->dev, "Failed to allocate WQ for RQ\n");
- goto err_rq_alloc;
- }
-
- db_base = get_db_area(func_to_io);
- if (IS_ERR(db_base)) {
- dev_err(&pdev->dev, "Failed to get DB area for SQ\n");
- err = PTR_ERR(db_base);
- goto err_get_db;
- }
-
- func_to_io->sq_db[q_id] = db_base;
-
- err = hinic_init_sq(&qp->sq, hwif, &func_to_io->sq_wq[q_id],
- sq_msix_entry,
- CI_ADDR(func_to_io->ci_addr_base, q_id),
- CI_ADDR(func_to_io->ci_dma_base, q_id), db_base);
- if (err) {
- dev_err(&pdev->dev, "Failed to init SQ\n");
- goto err_sq_init;
- }
-
- err = hinic_init_rq(&qp->rq, hwif, &func_to_io->rq_wq[q_id],
- rq_msix_entry);
- if (err) {
- dev_err(&pdev->dev, "Failed to init RQ\n");
- goto err_rq_init;
- }
-
- return 0;
-
-err_rq_init:
- hinic_clean_sq(&qp->sq);
-
-err_sq_init:
- return_db_area(func_to_io, db_base);
-
-err_get_db:
- hinic_wq_free(&func_to_io->wqs, &func_to_io->rq_wq[q_id]);
-
-err_rq_alloc:
- hinic_wq_free(&func_to_io->wqs, &func_to_io->sq_wq[q_id]);
- return err;
-}
-
-/**
- * destroy_qp - Clean the resources of a Queue Pair
- * @func_to_io: func to io channel that holds the IO components
- * @qp: pointer to the qp to clean
- **/
-static void destroy_qp(struct hinic_func_to_io *func_to_io,
- struct hinic_qp *qp)
-{
- int q_id = qp->q_id;
-
- hinic_clean_rq(&qp->rq);
- hinic_clean_sq(&qp->sq);
-
- return_db_area(func_to_io, func_to_io->sq_db[q_id]);
-
- hinic_wq_free(&func_to_io->wqs, &func_to_io->rq_wq[q_id]);
- hinic_wq_free(&func_to_io->wqs, &func_to_io->sq_wq[q_id]);
-}
-
-/**
- * hinic_io_create_qps - Create Queue Pairs
- * @func_to_io: func to io channel that holds the IO components
- * @base_qpn: base qp number
- * @num_qps: number queue pairs to create
- * @sq_msix_entry: msix entries for sq
- * @rq_msix_entry: msix entries for rq
- *
- * Return 0 - Success, negative - Failure
- **/
-int hinic_io_create_qps(struct hinic_func_to_io *func_to_io,
- u16 base_qpn, int num_qps,
- struct msix_entry *sq_msix_entries,
- struct msix_entry *rq_msix_entries)
-{
- struct hinic_hwif *hwif = func_to_io->hwif;
- struct pci_dev *pdev = hwif->pdev;
- size_t qps_size, wq_size, db_size;
- void *ci_addr_base;
- int i, j, err;
-
- qps_size = num_qps * sizeof(*func_to_io->qps);
- func_to_io->qps = devm_kzalloc(&pdev->dev, qps_size, GFP_KERNEL);
- if (!func_to_io->qps)
- return -ENOMEM;
-
- wq_size = num_qps * sizeof(*func_to_io->sq_wq);
- func_to_io->sq_wq = devm_kzalloc(&pdev->dev, wq_size, GFP_KERNEL);
- if (!func_to_io->sq_wq) {
- err = -ENOMEM;
- goto err_sq_wq;
- }
-
- wq_size = num_qps * sizeof(*func_to_io->rq_wq);
- func_to_io->rq_wq = devm_kzalloc(&pdev->dev, wq_size, GFP_KERNEL);
- if (!func_to_io->rq_wq) {
- err = -ENOMEM;
- goto err_rq_wq;
- }
-
- db_size = num_qps * sizeof(*func_to_io->sq_db);
- func_to_io->sq_db = devm_kzalloc(&pdev->dev, db_size, GFP_KERNEL);
- if (!func_to_io->sq_db) {
- err = -ENOMEM;
- goto err_sq_db;
- }
-
- ci_addr_base = dma_zalloc_coherent(&pdev->dev, CI_TABLE_SIZE(num_qps),
- &func_to_io->ci_dma_base,
- GFP_KERNEL);
- if (!ci_addr_base) {
- dev_err(&pdev->dev, "Failed to allocate CI area\n");
- err = -ENOMEM;
- goto err_ci_base;
- }
-
- func_to_io->ci_addr_base = ci_addr_base;
-
- for (i = 0; i < num_qps; i++) {
- err = init_qp(func_to_io, &func_to_io->qps[i], i,
- &sq_msix_entries[i], &rq_msix_entries[i]);
- if (err) {
- dev_err(&pdev->dev, "Failed to create QP %d\n", i);
- goto err_init_qp;
- }
- }
-
- err = write_qp_ctxts(func_to_io, base_qpn, num_qps);
- if (err) {
- dev_err(&pdev->dev, "Failed to init QP ctxts\n");
- goto err_write_qp_ctxts;
- }
-
- return 0;
-
-err_write_qp_ctxts:
-err_init_qp:
- for (j = 0; j < i; j++)
- destroy_qp(func_to_io, &func_to_io->qps[j]);
-
- dma_free_coherent(&pdev->dev, CI_TABLE_SIZE(num_qps),
- func_to_io->ci_addr_base, func_to_io->ci_dma_base);
-
-err_ci_base:
- devm_kfree(&pdev->dev, func_to_io->sq_db);
-
-err_sq_db:
- devm_kfree(&pdev->dev, func_to_io->rq_wq);
-
-err_rq_wq:
- devm_kfree(&pdev->dev, func_to_io->sq_wq);
-
-err_sq_wq:
- devm_kfree(&pdev->dev, func_to_io->qps);
- return err;
-}
-
-/**
- * hinic_io_destroy_qps - Destroy the IO Queue Pairs
- * @func_to_io: func to io channel that holds the IO components
- * @num_qps: number queue pairs to destroy
- **/
-void hinic_io_destroy_qps(struct hinic_func_to_io *func_to_io, int num_qps)
-{
- struct hinic_hwif *hwif = func_to_io->hwif;
- struct pci_dev *pdev = hwif->pdev;
- size_t ci_table_size;
- int i;
-
- ci_table_size = CI_TABLE_SIZE(num_qps);
-
- for (i = 0; i < num_qps; i++)
- destroy_qp(func_to_io, &func_to_io->qps[i]);
-
- dma_free_coherent(&pdev->dev, ci_table_size, func_to_io->ci_addr_base,
- func_to_io->ci_dma_base);
-
- devm_kfree(&pdev->dev, func_to_io->sq_db);
-
- devm_kfree(&pdev->dev, func_to_io->rq_wq);
- devm_kfree(&pdev->dev, func_to_io->sq_wq);
-
- devm_kfree(&pdev->dev, func_to_io->qps);
-}
-
-/**
- * hinic_io_init - Initialize the IO components
- * @func_to_io: func to io channel that holds the IO components
- * @hwif: HW interface for accessing IO
- * @max_qps: maximum QPs in HW
- * @num_ceqs: number completion event queues
- * @ceq_msix_entries: msix entries for ceqs
- *
- * Return 0 - Success, negative - Failure
- **/
-int hinic_io_init(struct hinic_func_to_io *func_to_io,
- struct hinic_hwif *hwif, u16 max_qps, int num_ceqs,
- struct msix_entry *ceq_msix_entries)
-{
- struct pci_dev *pdev = hwif->pdev;
- enum hinic_cmdq_type cmdq, type;
- void __iomem *db_area;
- int err;
-
- func_to_io->hwif = hwif;
- func_to_io->qps = NULL;
- func_to_io->max_qps = max_qps;
-
- err = hinic_ceqs_init(&func_to_io->ceqs, hwif, num_ceqs,
- HINIC_DEFAULT_CEQ_LEN, HINIC_EQ_PAGE_SIZE,
- ceq_msix_entries);
- if (err) {
- dev_err(&pdev->dev, "Failed to init CEQs\n");
- return err;
- }
-
- err = hinic_wqs_alloc(&func_to_io->wqs, 2 * max_qps, hwif);
- if (err) {
- dev_err(&pdev->dev, "Failed to allocate WQS for IO\n");
- goto err_wqs_alloc;
- }
-
- func_to_io->db_base = pci_ioremap_bar(pdev, HINIC_PCI_DB_BAR);
- if (!func_to_io->db_base) {
- dev_err(&pdev->dev, "Failed to remap IO DB area\n");
- err = -ENOMEM;
- goto err_db_ioremap;
- }
-
- init_db_area_idx(&func_to_io->free_db_area);
-
- for (cmdq = HINIC_CMDQ_SYNC; cmdq < HINIC_MAX_CMDQ_TYPES; cmdq++) {
- db_area = get_db_area(func_to_io);
- if (IS_ERR(db_area)) {
- dev_err(&pdev->dev, "Failed to get cmdq db area\n");
- err = PTR_ERR(db_area);
- goto err_db_area;
- }
-
- func_to_io->cmdq_db_area[cmdq] = db_area;
- }
-
- err = hinic_init_cmdqs(&func_to_io->cmdqs, hwif,
- func_to_io->cmdq_db_area);
- if (err) {
- dev_err(&pdev->dev, "Failed to initialize cmdqs\n");
- goto err_init_cmdqs;
- }
-
- return 0;
-
-err_init_cmdqs:
-err_db_area:
- for (type = HINIC_CMDQ_SYNC; type < cmdq; type++)
- return_db_area(func_to_io, func_to_io->cmdq_db_area[type]);
-
- iounmap(func_to_io->db_base);
-
-err_db_ioremap:
- hinic_wqs_free(&func_to_io->wqs);
-
-err_wqs_alloc:
- hinic_ceqs_free(&func_to_io->ceqs);
- return err;
-}
-
-/**
- * hinic_io_free - Free the IO components
- * @func_to_io: func to io channel that holds the IO components
- **/
-void hinic_io_free(struct hinic_func_to_io *func_to_io)
-{
- enum hinic_cmdq_type cmdq;
-
- hinic_free_cmdqs(&func_to_io->cmdqs);
-
- for (cmdq = HINIC_CMDQ_SYNC; cmdq < HINIC_MAX_CMDQ_TYPES; cmdq++)
- return_db_area(func_to_io, func_to_io->cmdq_db_area[cmdq]);
-
- iounmap(func_to_io->db_base);
- hinic_wqs_free(&func_to_io->wqs);
- hinic_ceqs_free(&func_to_io->ceqs);
-}
diff --git a/drivers/net/ethernet/huawei/hinic/hinic_hw_io.h b/drivers/net/ethernet/huawei/hinic/hinic_hw_io.h
deleted file mode 100644
index adb6417..00000000
--- a/drivers/net/ethernet/huawei/hinic/hinic_hw_io.h
+++ /dev/null
@@ -1,97 +0,0 @@
-/*
- * Huawei HiNIC PCI Express Linux driver
- * Copyright(c) 2017 Huawei Technologies Co., Ltd
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms and conditions of the GNU General Public License,
- * version 2, as published by the Free Software Foundation.
- *
- * This program is distributed in the hope it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
- * for more details.
- *
- */
-
-#ifndef HINIC_HW_IO_H
-#define HINIC_HW_IO_H
-
-#include <linux/types.h>
-#include <linux/pci.h>
-#include <linux/semaphore.h>
-#include <linux/sizes.h>
-
-#include "hinic_hw_if.h"
-#include "hinic_hw_eqs.h"
-#include "hinic_hw_wq.h"
-#include "hinic_hw_cmdq.h"
-#include "hinic_hw_qp.h"
-
-#define HINIC_DB_PAGE_SIZE SZ_4K
-#define HINIC_DB_SIZE SZ_4M
-
-#define HINIC_DB_MAX_AREAS (HINIC_DB_SIZE / HINIC_DB_PAGE_SIZE)
-
-enum hinic_db_type {
- HINIC_DB_CMDQ_TYPE,
- HINIC_DB_SQ_TYPE,
-};
-
-enum hinic_io_path {
- HINIC_CTRL_PATH,
- HINIC_DATA_PATH,
-};
-
-struct hinic_free_db_area {
- int db_idx[HINIC_DB_MAX_AREAS];
-
- int alloc_pos;
- int return_pos;
-
- int num_free;
-
- /* Lock for getting db area */
- struct semaphore idx_lock;
-};
-
-struct hinic_func_to_io {
- struct hinic_hwif *hwif;
-
- struct hinic_ceqs ceqs;
-
- struct hinic_wqs wqs;
-
- struct hinic_wq *sq_wq;
- struct hinic_wq *rq_wq;
-
- struct hinic_qp *qps;
- u16 max_qps;
-
- void __iomem **sq_db;
- void __iomem *db_base;
-
- void *ci_addr_base;
- dma_addr_t ci_dma_base;
-
- struct hinic_free_db_area free_db_area;
-
- void __iomem *cmdq_db_area[HINIC_MAX_CMDQ_TYPES];
-
- struct hinic_cmdqs cmdqs;
-};
-
-int hinic_io_create_qps(struct hinic_func_to_io *func_to_io,
- u16 base_qpn, int num_qps,
- struct msix_entry *sq_msix_entries,
- struct msix_entry *rq_msix_entries);
-
-void hinic_io_destroy_qps(struct hinic_func_to_io *func_to_io,
- int num_qps);
-
-int hinic_io_init(struct hinic_func_to_io *func_to_io,
- struct hinic_hwif *hwif, u16 max_qps, int num_ceqs,
- struct msix_entry *ceq_msix_entries);
-
-void hinic_io_free(struct hinic_func_to_io *func_to_io);
-
-#endif
diff --git a/drivers/net/ethernet/huawei/hinic/hinic_hw_mgmt.c b/drivers/net/ethernet/huawei/hinic/hinic_hw_mgmt.c
deleted file mode 100644
index 278dc13..00000000
--- a/drivers/net/ethernet/huawei/hinic/hinic_hw_mgmt.c
+++ /dev/null
@@ -1,597 +0,0 @@
-/*
- * Huawei HiNIC PCI Express Linux driver
- * Copyright(c) 2017 Huawei Technologies Co., Ltd
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms and conditions of the GNU General Public License,
- * version 2, as published by the Free Software Foundation.
- *
- * This program is distributed in the hope it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
- * for more details.
- *
- */
-
-#include <linux/kernel.h>
-#include <linux/types.h>
-#include <linux/errno.h>
-#include <linux/pci.h>
-#include <linux/device.h>
-#include <linux/semaphore.h>
-#include <linux/completion.h>
-#include <linux/slab.h>
-#include <asm/barrier.h>
-
-#include "hinic_hw_if.h"
-#include "hinic_hw_eqs.h"
-#include "hinic_hw_api_cmd.h"
-#include "hinic_hw_mgmt.h"
-#include "hinic_hw_dev.h"
-
-#define SYNC_MSG_ID_MASK 0x1FF
-
-#define SYNC_MSG_ID(pf_to_mgmt) ((pf_to_mgmt)->sync_msg_id)
-
-#define SYNC_MSG_ID_INC(pf_to_mgmt) (SYNC_MSG_ID(pf_to_mgmt) = \
- ((SYNC_MSG_ID(pf_to_mgmt) + 1) & \
- SYNC_MSG_ID_MASK))
-
-#define MSG_SZ_IS_VALID(in_size) ((in_size) <= MAX_MSG_LEN)
-
-#define MGMT_MSG_LEN_MIN 20
-#define MGMT_MSG_LEN_STEP 16
-#define MGMT_MSG_RSVD_FOR_DEV 8
-
-#define SEGMENT_LEN 48
-
-#define MAX_PF_MGMT_BUF_SIZE 2048
-
-/* Data should be SEG LEN size aligned */
-#define MAX_MSG_LEN 2016
-
-#define MSG_NOT_RESP 0xFFFF
-
-#define MGMT_MSG_TIMEOUT 1000
-
-#define mgmt_to_pfhwdev(pf_mgmt) \
- container_of(pf_mgmt, struct hinic_pfhwdev, pf_to_mgmt)
-
-enum msg_segment_type {
- NOT_LAST_SEGMENT = 0,
- LAST_SEGMENT = 1,
-};
-
-enum mgmt_direction_type {
- MGMT_DIRECT_SEND = 0,
- MGMT_RESP = 1,
-};
-
-enum msg_ack_type {
- MSG_ACK = 0,
- MSG_NO_ACK = 1,
-};
-
-/**
- * hinic_register_mgmt_msg_cb - register msg handler for a msg from a module
- * @pf_to_mgmt: PF to MGMT channel
- * @mod: module in the chip that this handler will handle its messages
- * @handle: private data for the callback
- * @callback: the handler that will handle messages
- **/
-void hinic_register_mgmt_msg_cb(struct hinic_pf_to_mgmt *pf_to_mgmt,
- enum hinic_mod_type mod,
- void *handle,
- void (*callback)(void *handle,
- u8 cmd, void *buf_in,
- u16 in_size, void *buf_out,
- u16 *out_size))
-{
- struct hinic_mgmt_cb *mgmt_cb = &pf_to_mgmt->mgmt_cb[mod];
-
- mgmt_cb->cb = callback;
- mgmt_cb->handle = handle;
- mgmt_cb->state = HINIC_MGMT_CB_ENABLED;
-}
-
-/**
- * hinic_unregister_mgmt_msg_cb - unregister msg handler for a msg from a module
- * @pf_to_mgmt: PF to MGMT channel
- * @mod: module in the chip that this handler handles its messages
- **/
-void hinic_unregister_mgmt_msg_cb(struct hinic_pf_to_mgmt *pf_to_mgmt,
- enum hinic_mod_type mod)
-{
- struct hinic_mgmt_cb *mgmt_cb = &pf_to_mgmt->mgmt_cb[mod];
-
- mgmt_cb->state &= ~HINIC_MGMT_CB_ENABLED;
-
- while (mgmt_cb->state & HINIC_MGMT_CB_RUNNING)
- schedule();
-
- mgmt_cb->cb = NULL;
-}
-
-/**
- * prepare_header - prepare the header of the message
- * @pf_to_mgmt: PF to MGMT channel
- * @msg_len: the length of the message
- * @mod: module in the chip that will get the message
- * @ack_type: ask for response
- * @direction: the direction of the message
- * @cmd: command of the message
- * @msg_id: message id
- *
- * Return the prepared header value
- **/
-static u64 prepare_header(struct hinic_pf_to_mgmt *pf_to_mgmt,
- u16 msg_len, enum hinic_mod_type mod,
- enum msg_ack_type ack_type,
- enum mgmt_direction_type direction,
- u16 cmd, u16 msg_id)
-{
- struct hinic_hwif *hwif = pf_to_mgmt->hwif;
-
- return HINIC_MSG_HEADER_SET(msg_len, MSG_LEN) |
- HINIC_MSG_HEADER_SET(mod, MODULE) |
- HINIC_MSG_HEADER_SET(SEGMENT_LEN, SEG_LEN) |
- HINIC_MSG_HEADER_SET(ack_type, NO_ACK) |
- HINIC_MSG_HEADER_SET(0, ASYNC_MGMT_TO_PF) |
- HINIC_MSG_HEADER_SET(0, SEQID) |
- HINIC_MSG_HEADER_SET(LAST_SEGMENT, LAST) |
- HINIC_MSG_HEADER_SET(direction, DIRECTION) |
- HINIC_MSG_HEADER_SET(cmd, CMD) |
- HINIC_MSG_HEADER_SET(HINIC_HWIF_PCI_INTF(hwif), PCI_INTF) |
- HINIC_MSG_HEADER_SET(HINIC_HWIF_PF_IDX(hwif), PF_IDX) |
- HINIC_MSG_HEADER_SET(msg_id, MSG_ID);
-}
-
-/**
- * prepare_mgmt_cmd - prepare the mgmt command
- * @mgmt_cmd: pointer to the command to prepare
- * @header: pointer of the header for the message
- * @msg: the data of the message
- * @msg_len: the length of the message
- **/
-static void prepare_mgmt_cmd(u8 *mgmt_cmd, u64 *header, u8 *msg, u16 msg_len)
-{
- memset(mgmt_cmd, 0, MGMT_MSG_RSVD_FOR_DEV);
-
- mgmt_cmd += MGMT_MSG_RSVD_FOR_DEV;
- memcpy(mgmt_cmd, header, sizeof(*header));
-
- mgmt_cmd += sizeof(*header);
- memcpy(mgmt_cmd, msg, msg_len);
-}
-
-/**
- * mgmt_msg_len - calculate the total message length
- * @msg_data_len: the length of the message data
- *
- * Return the total message length
- **/
-static u16 mgmt_msg_len(u16 msg_data_len)
-{
- /* RSVD + HEADER_SIZE + DATA_LEN */
- u16 msg_len = MGMT_MSG_RSVD_FOR_DEV + sizeof(u64) + msg_data_len;
-
- if (msg_len > MGMT_MSG_LEN_MIN)
- msg_len = MGMT_MSG_LEN_MIN +
- ALIGN((msg_len - MGMT_MSG_LEN_MIN),
- MGMT_MSG_LEN_STEP);
- else
- msg_len = MGMT_MSG_LEN_MIN;
-
- return msg_len;
-}
-
-/**
- * send_msg_to_mgmt - send message to mgmt by API CMD
- * @pf_to_mgmt: PF to MGMT channel
- * @mod: module in the chip that will get the message
- * @cmd: command of the message
- * @data: the msg data
- * @data_len: the msg data length
- * @ack_type: ask for response
- * @direction: the direction of the original message
- * @resp_msg_id: msg id to response for
- *
- * Return 0 - Success, negative - Failure
- **/
-static int send_msg_to_mgmt(struct hinic_pf_to_mgmt *pf_to_mgmt,
- enum hinic_mod_type mod, u8 cmd,
- u8 *data, u16 data_len,
- enum msg_ack_type ack_type,
- enum mgmt_direction_type direction,
- u16 resp_msg_id)
-{
- struct hinic_api_cmd_chain *chain;
- u64 header;
- u16 msg_id;
-
- msg_id = SYNC_MSG_ID(pf_to_mgmt);
-
- if (direction == MGMT_RESP) {
- header = prepare_header(pf_to_mgmt, data_len, mod, ack_type,
- direction, cmd, resp_msg_id);
- } else {
- SYNC_MSG_ID_INC(pf_to_mgmt);
- header = prepare_header(pf_to_mgmt, data_len, mod, ack_type,
- direction, cmd, msg_id);
- }
-
- prepare_mgmt_cmd(pf_to_mgmt->sync_msg_buf, &header, data, data_len);
-
- chain = pf_to_mgmt->cmd_chain[HINIC_API_CMD_WRITE_TO_MGMT_CPU];
- return hinic_api_cmd_write(chain, HINIC_NODE_ID_MGMT,
- pf_to_mgmt->sync_msg_buf,
- mgmt_msg_len(data_len));
-}
-
-/**
- * msg_to_mgmt_sync - send sync message to mgmt
- * @pf_to_mgmt: PF to MGMT channel
- * @mod: module in the chip that will get the message
- * @cmd: command of the message
- * @buf_in: the msg data
- * @in_size: the msg data length
- * @buf_out: response
- * @out_size: response length
- * @direction: the direction of the original message
- * @resp_msg_id: msg id to response for
- *
- * Return 0 - Success, negative - Failure
- **/
-static int msg_to_mgmt_sync(struct hinic_pf_to_mgmt *pf_to_mgmt,
- enum hinic_mod_type mod, u8 cmd,
- u8 *buf_in, u16 in_size,
- u8 *buf_out, u16 *out_size,
- enum mgmt_direction_type direction,
- u16 resp_msg_id)
-{
- struct hinic_hwif *hwif = pf_to_mgmt->hwif;
- struct pci_dev *pdev = hwif->pdev;
- struct hinic_recv_msg *recv_msg;
- struct completion *recv_done;
- u16 msg_id;
- int err;
-
- /* Lock the sync_msg_buf */
- down(&pf_to_mgmt->sync_msg_lock);
-
- recv_msg = &pf_to_mgmt->recv_resp_msg_from_mgmt;
- recv_done = &recv_msg->recv_done;
-
- if (resp_msg_id == MSG_NOT_RESP)
- msg_id = SYNC_MSG_ID(pf_to_mgmt);
- else
- msg_id = resp_msg_id;
-
- init_completion(recv_done);
-
- err = send_msg_to_mgmt(pf_to_mgmt, mod, cmd, buf_in, in_size,
- MSG_ACK, direction, resp_msg_id);
- if (err) {
- dev_err(&pdev->dev, "Failed to send sync msg to mgmt\n");
- goto unlock_sync_msg;
- }
-
- if (!wait_for_completion_timeout(recv_done, MGMT_MSG_TIMEOUT)) {
- dev_err(&pdev->dev, "MGMT timeout, MSG id = %d\n", msg_id);
- err = -ETIMEDOUT;
- goto unlock_sync_msg;
- }
-
- smp_rmb(); /* verify reading after completion */
-
- if (recv_msg->msg_id != msg_id) {
- dev_err(&pdev->dev, "incorrect MSG for id = %d\n", msg_id);
- err = -EFAULT;
- goto unlock_sync_msg;
- }
-
- if ((buf_out) && (recv_msg->msg_len <= MAX_PF_MGMT_BUF_SIZE)) {
- memcpy(buf_out, recv_msg->msg, recv_msg->msg_len);
- *out_size = recv_msg->msg_len;
- }
-
-unlock_sync_msg:
- up(&pf_to_mgmt->sync_msg_lock);
- return err;
-}
-
-/**
- * msg_to_mgmt_async - send message to mgmt without response
- * @pf_to_mgmt: PF to MGMT channel
- * @mod: module in the chip that will get the message
- * @cmd: command of the message
- * @buf_in: the msg data
- * @in_size: the msg data length
- * @direction: the direction of the original message
- * @resp_msg_id: msg id to response for
- *
- * Return 0 - Success, negative - Failure
- **/
-static int msg_to_mgmt_async(struct hinic_pf_to_mgmt *pf_to_mgmt,
- enum hinic_mod_type mod, u8 cmd,
- u8 *buf_in, u16 in_size,
- enum mgmt_direction_type direction,
- u16 resp_msg_id)
-{
- int err;
-
- /* Lock the sync_msg_buf */
- down(&pf_to_mgmt->sync_msg_lock);
-
- err = send_msg_to_mgmt(pf_to_mgmt, mod, cmd, buf_in, in_size,
- MSG_NO_ACK, direction, resp_msg_id);
-
- up(&pf_to_mgmt->sync_msg_lock);
- return err;
-}
-
-/**
- * hinic_msg_to_mgmt - send message to mgmt
- * @pf_to_mgmt: PF to MGMT channel
- * @mod: module in the chip that will get the message
- * @cmd: command of the message
- * @buf_in: the msg data
- * @in_size: the msg data length
- * @buf_out: response
- * @out_size: returned response length
- * @sync: sync msg or async msg
- *
- * Return 0 - Success, negative - Failure
- **/
-int hinic_msg_to_mgmt(struct hinic_pf_to_mgmt *pf_to_mgmt,
- enum hinic_mod_type mod, u8 cmd,
- void *buf_in, u16 in_size, void *buf_out, u16 *out_size,
- enum hinic_mgmt_msg_type sync)
-{
- struct hinic_hwif *hwif = pf_to_mgmt->hwif;
- struct pci_dev *pdev = hwif->pdev;
-
- if (sync != HINIC_MGMT_MSG_SYNC) {
- dev_err(&pdev->dev, "Invalid MGMT msg type\n");
- return -EINVAL;
- }
-
- if (!MSG_SZ_IS_VALID(in_size)) {
- dev_err(&pdev->dev, "Invalid MGMT msg buffer size\n");
- return -EINVAL;
- }
-
- return msg_to_mgmt_sync(pf_to_mgmt, mod, cmd, buf_in, in_size,
- buf_out, out_size, MGMT_DIRECT_SEND,
- MSG_NOT_RESP);
-}
-
-/**
- * mgmt_recv_msg_handler - handler for message from mgmt cpu
- * @pf_to_mgmt: PF to MGMT channel
- * @recv_msg: received message details
- **/
-static void mgmt_recv_msg_handler(struct hinic_pf_to_mgmt *pf_to_mgmt,
- struct hinic_recv_msg *recv_msg)
-{
- struct hinic_hwif *hwif = pf_to_mgmt->hwif;
- struct pci_dev *pdev = hwif->pdev;
- u8 *buf_out = recv_msg->buf_out;
- struct hinic_mgmt_cb *mgmt_cb;
- unsigned long cb_state;
- u16 out_size = 0;
-
- if (recv_msg->mod >= HINIC_MOD_MAX) {
- dev_err(&pdev->dev, "Unknown MGMT MSG module = %d\n",
- recv_msg->mod);
- return;
- }
-
- mgmt_cb = &pf_to_mgmt->mgmt_cb[recv_msg->mod];
-
- cb_state = cmpxchg(&mgmt_cb->state,
- HINIC_MGMT_CB_ENABLED,
- HINIC_MGMT_CB_ENABLED | HINIC_MGMT_CB_RUNNING);
-
- if ((cb_state == HINIC_MGMT_CB_ENABLED) && (mgmt_cb->cb))
- mgmt_cb->cb(mgmt_cb->handle, recv_msg->cmd,
- recv_msg->msg, recv_msg->msg_len,
- buf_out, &out_size);
- else
- dev_err(&pdev->dev, "No MGMT msg handler, mod = %d\n",
- recv_msg->mod);
-
- mgmt_cb->state &= ~HINIC_MGMT_CB_RUNNING;
-
- if (!recv_msg->async_mgmt_to_pf)
- /* MGMT sent sync msg, send the response */
- msg_to_mgmt_async(pf_to_mgmt, recv_msg->mod, recv_msg->cmd,
- buf_out, out_size, MGMT_RESP,
- recv_msg->msg_id);
-}
-
-/**
- * mgmt_resp_msg_handler - handler for a response message from mgmt cpu
- * @pf_to_mgmt: PF to MGMT channel
- * @recv_msg: received message details
- **/
-static void mgmt_resp_msg_handler(struct hinic_pf_to_mgmt *pf_to_mgmt,
- struct hinic_recv_msg *recv_msg)
-{
- wmb(); /* verify writing all, before reading */
-
- complete(&recv_msg->recv_done);
-}
-
-/**
- * recv_mgmt_msg_handler - handler for a message from mgmt cpu
- * @pf_to_mgmt: PF to MGMT channel
- * @header: the header of the message
- * @recv_msg: received message details
- **/
-static void recv_mgmt_msg_handler(struct hinic_pf_to_mgmt *pf_to_mgmt,
- u64 *header, struct hinic_recv_msg *recv_msg)
-{
- struct hinic_hwif *hwif = pf_to_mgmt->hwif;
- struct pci_dev *pdev = hwif->pdev;
- int seq_id, seg_len;
- u8 *msg_body;
-
- seq_id = HINIC_MSG_HEADER_GET(*header, SEQID);
- seg_len = HINIC_MSG_HEADER_GET(*header, SEG_LEN);
-
- if (seq_id >= (MAX_MSG_LEN / SEGMENT_LEN)) {
- dev_err(&pdev->dev, "recv big mgmt msg\n");
- return;
- }
-
- msg_body = (u8 *)header + sizeof(*header);
- memcpy(recv_msg->msg + seq_id * SEGMENT_LEN, msg_body, seg_len);
-
- if (!HINIC_MSG_HEADER_GET(*header, LAST))
- return;
-
- recv_msg->cmd = HINIC_MSG_HEADER_GET(*header, CMD);
- recv_msg->mod = HINIC_MSG_HEADER_GET(*header, MODULE);
- recv_msg->async_mgmt_to_pf = HINIC_MSG_HEADER_GET(*header,
- ASYNC_MGMT_TO_PF);
- recv_msg->msg_len = HINIC_MSG_HEADER_GET(*header, MSG_LEN);
- recv_msg->msg_id = HINIC_MSG_HEADER_GET(*header, MSG_ID);
-
- if (HINIC_MSG_HEADER_GET(*header, DIRECTION) == MGMT_RESP)
- mgmt_resp_msg_handler(pf_to_mgmt, recv_msg);
- else
- mgmt_recv_msg_handler(pf_to_mgmt, recv_msg);
-}
-
-/**
- * mgmt_msg_aeqe_handler - handler for a mgmt message event
- * @handle: PF to MGMT channel
- * @data: the header of the message
- * @size: unused
- **/
-static void mgmt_msg_aeqe_handler(void *handle, void *data, u8 size)
-{
- struct hinic_pf_to_mgmt *pf_to_mgmt = handle;
- struct hinic_recv_msg *recv_msg;
- u64 *header = (u64 *)data;
-
- recv_msg = HINIC_MSG_HEADER_GET(*header, DIRECTION) ==
- MGMT_DIRECT_SEND ?
- &pf_to_mgmt->recv_msg_from_mgmt :
- &pf_to_mgmt->recv_resp_msg_from_mgmt;
-
- recv_mgmt_msg_handler(pf_to_mgmt, header, recv_msg);
-}
-
-/**
- * alloc_recv_msg - allocate receive message memory
- * @pf_to_mgmt: PF to MGMT channel
- * @recv_msg: pointer that will hold the allocated data
- *
- * Return 0 - Success, negative - Failure
- **/
-static int alloc_recv_msg(struct hinic_pf_to_mgmt *pf_to_mgmt,
- struct hinic_recv_msg *recv_msg)
-{
- struct hinic_hwif *hwif = pf_to_mgmt->hwif;
- struct pci_dev *pdev = hwif->pdev;
-
- recv_msg->msg = devm_kzalloc(&pdev->dev, MAX_PF_MGMT_BUF_SIZE,
- GFP_KERNEL);
- if (!recv_msg->msg)
- return -ENOMEM;
-
- recv_msg->buf_out = devm_kzalloc(&pdev->dev, MAX_PF_MGMT_BUF_SIZE,
- GFP_KERNEL);
- if (!recv_msg->buf_out)
- return -ENOMEM;
-
- return 0;
-}
-
-/**
- * alloc_msg_buf - allocate all the message buffers of PF to MGMT channel
- * @pf_to_mgmt: PF to MGMT channel
- *
- * Return 0 - Success, negative - Failure
- **/
-static int alloc_msg_buf(struct hinic_pf_to_mgmt *pf_to_mgmt)
-{
- struct hinic_hwif *hwif = pf_to_mgmt->hwif;
- struct pci_dev *pdev = hwif->pdev;
- int err;
-
- err = alloc_recv_msg(pf_to_mgmt,
- &pf_to_mgmt->recv_msg_from_mgmt);
- if (err) {
- dev_err(&pdev->dev, "Failed to allocate recv msg\n");
- return err;
- }
-
- err = alloc_recv_msg(pf_to_mgmt,
- &pf_to_mgmt->recv_resp_msg_from_mgmt);
- if (err) {
- dev_err(&pdev->dev, "Failed to allocate resp recv msg\n");
- return err;
- }
-
- pf_to_mgmt->sync_msg_buf = devm_kzalloc(&pdev->dev,
- MAX_PF_MGMT_BUF_SIZE,
- GFP_KERNEL);
- if (!pf_to_mgmt->sync_msg_buf)
- return -ENOMEM;
-
- return 0;
-}
-
-/**
- * hinic_pf_to_mgmt_init - initialize PF to MGMT channel
- * @pf_to_mgmt: PF to MGMT channel
- * @hwif: HW interface the PF to MGMT will use for accessing HW
- *
- * Return 0 - Success, negative - Failure
- **/
-int hinic_pf_to_mgmt_init(struct hinic_pf_to_mgmt *pf_to_mgmt,
- struct hinic_hwif *hwif)
-{
- struct hinic_pfhwdev *pfhwdev = mgmt_to_pfhwdev(pf_to_mgmt);
- struct hinic_hwdev *hwdev = &pfhwdev->hwdev;
- struct pci_dev *pdev = hwif->pdev;
- int err;
-
- pf_to_mgmt->hwif = hwif;
-
- sema_init(&pf_to_mgmt->sync_msg_lock, 1);
- pf_to_mgmt->sync_msg_id = 0;
-
- err = alloc_msg_buf(pf_to_mgmt);
- if (err) {
- dev_err(&pdev->dev, "Failed to allocate msg buffers\n");
- return err;
- }
-
- err = hinic_api_cmd_init(pf_to_mgmt->cmd_chain, hwif);
- if (err) {
- dev_err(&pdev->dev, "Failed to initialize cmd chains\n");
- return err;
- }
-
- hinic_aeq_register_hw_cb(&hwdev->aeqs, HINIC_MSG_FROM_MGMT_CPU,
- pf_to_mgmt,
- mgmt_msg_aeqe_handler);
- return 0;
-}
-
-/**
- * hinic_pf_to_mgmt_free - free PF to MGMT channel
- * @pf_to_mgmt: PF to MGMT channel
- **/
-void hinic_pf_to_mgmt_free(struct hinic_pf_to_mgmt *pf_to_mgmt)
-{
- struct hinic_pfhwdev *pfhwdev = mgmt_to_pfhwdev(pf_to_mgmt);
- struct hinic_hwdev *hwdev = &pfhwdev->hwdev;
-
- hinic_aeq_unregister_hw_cb(&hwdev->aeqs, HINIC_MSG_FROM_MGMT_CPU);
- hinic_api_cmd_free(pf_to_mgmt->cmd_chain);
-}
diff --git a/drivers/net/ethernet/huawei/hinic/hinic_hw_qp.c b/drivers/net/ethernet/huawei/hinic/hinic_hw_qp.c
deleted file mode 100644
index cb23962..00000000
--- a/drivers/net/ethernet/huawei/hinic/hinic_hw_qp.c
+++ /dev/null
@@ -1,907 +0,0 @@
-/*
- * Huawei HiNIC PCI Express Linux driver
- * Copyright(c) 2017 Huawei Technologies Co., Ltd
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms and conditions of the GNU General Public License,
- * version 2, as published by the Free Software Foundation.
- *
- * This program is distributed in the hope it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
- * for more details.
- *
- */
-
-#include <linux/kernel.h>
-#include <linux/types.h>
-#include <linux/pci.h>
-#include <linux/device.h>
-#include <linux/dma-mapping.h>
-#include <linux/vmalloc.h>
-#include <linux/errno.h>
-#include <linux/sizes.h>
-#include <linux/atomic.h>
-#include <linux/skbuff.h>
-#include <linux/io.h>
-#include <asm/barrier.h>
-#include <asm/byteorder.h>
-
-#include "hinic_common.h"
-#include "hinic_hw_if.h"
-#include "hinic_hw_wqe.h"
-#include "hinic_hw_wq.h"
-#include "hinic_hw_qp_ctxt.h"
-#include "hinic_hw_qp.h"
-#include "hinic_hw_io.h"
-
-#define SQ_DB_OFF SZ_2K
-
-/* The number of cache line to prefetch Until threshold state */
-#define WQ_PREFETCH_MAX 2
-/* The number of cache line to prefetch After threshold state */
-#define WQ_PREFETCH_MIN 1
-/* Threshold state */
-#define WQ_PREFETCH_THRESHOLD 256
-
-/* sizes of the SQ/RQ ctxt */
-#define Q_CTXT_SIZE 48
-#define CTXT_RSVD 240
-
-#define SQ_CTXT_OFFSET(max_sqs, max_rqs, q_id) \
- (((max_rqs) + (max_sqs)) * CTXT_RSVD + (q_id) * Q_CTXT_SIZE)
-
-#define RQ_CTXT_OFFSET(max_sqs, max_rqs, q_id) \
- (((max_rqs) + (max_sqs)) * CTXT_RSVD + \
- (max_sqs + (q_id)) * Q_CTXT_SIZE)
-
-#define SIZE_16BYTES(size) (ALIGN(size, 16) >> 4)
-#define SIZE_8BYTES(size) (ALIGN(size, 8) >> 3)
-#define SECT_SIZE_FROM_8BYTES(size) ((size) << 3)
-
-#define SQ_DB_PI_HI_SHIFT 8
-#define SQ_DB_PI_HI(prod_idx) ((prod_idx) >> SQ_DB_PI_HI_SHIFT)
-
-#define SQ_DB_PI_LOW_MASK 0xFF
-#define SQ_DB_PI_LOW(prod_idx) ((prod_idx) & SQ_DB_PI_LOW_MASK)
-
-#define SQ_DB_ADDR(sq, pi) ((u64 *)((sq)->db_base) + SQ_DB_PI_LOW(pi))
-
-#define SQ_MASKED_IDX(sq, idx) ((idx) & (sq)->wq->mask)
-#define RQ_MASKED_IDX(rq, idx) ((idx) & (rq)->wq->mask)
-
-#define TX_MAX_MSS_DEFAULT 0x3E00
-
-enum sq_wqe_type {
- SQ_NORMAL_WQE = 0,
-};
-
-enum rq_completion_fmt {
- RQ_COMPLETE_SGE = 1
-};
-
-void hinic_qp_prepare_header(struct hinic_qp_ctxt_header *qp_ctxt_hdr,
- enum hinic_qp_ctxt_type ctxt_type,
- u16 num_queues, u16 max_queues)
-{
- u16 max_sqs = max_queues;
- u16 max_rqs = max_queues;
-
- qp_ctxt_hdr->num_queues = num_queues;
- qp_ctxt_hdr->queue_type = ctxt_type;
-
- if (ctxt_type == HINIC_QP_CTXT_TYPE_SQ)
- qp_ctxt_hdr->addr_offset = SQ_CTXT_OFFSET(max_sqs, max_rqs, 0);
- else
- qp_ctxt_hdr->addr_offset = RQ_CTXT_OFFSET(max_sqs, max_rqs, 0);
-
- qp_ctxt_hdr->addr_offset = SIZE_16BYTES(qp_ctxt_hdr->addr_offset);
-
- hinic_cpu_to_be32(qp_ctxt_hdr, sizeof(*qp_ctxt_hdr));
-}
-
-void hinic_sq_prepare_ctxt(struct hinic_sq_ctxt *sq_ctxt,
- struct hinic_sq *sq, u16 global_qid)
-{
- u32 wq_page_pfn_hi, wq_page_pfn_lo, wq_block_pfn_hi, wq_block_pfn_lo;
- u64 wq_page_addr, wq_page_pfn, wq_block_pfn;
- u16 pi_start, ci_start;
- struct hinic_wq *wq;
-
- wq = sq->wq;
- ci_start = atomic_read(&wq->cons_idx);
- pi_start = atomic_read(&wq->prod_idx);
-
- /* Read the first page paddr from the WQ page paddr ptrs */
- wq_page_addr = be64_to_cpu(*wq->block_vaddr);
-
- wq_page_pfn = HINIC_WQ_PAGE_PFN(wq_page_addr);
- wq_page_pfn_hi = upper_32_bits(wq_page_pfn);
- wq_page_pfn_lo = lower_32_bits(wq_page_pfn);
-
- wq_block_pfn = HINIC_WQ_BLOCK_PFN(wq->block_paddr);
- wq_block_pfn_hi = upper_32_bits(wq_block_pfn);
- wq_block_pfn_lo = lower_32_bits(wq_block_pfn);
-
- sq_ctxt->ceq_attr = HINIC_SQ_CTXT_CEQ_ATTR_SET(global_qid,
- GLOBAL_SQ_ID) |
- HINIC_SQ_CTXT_CEQ_ATTR_SET(0, EN);
-
- sq_ctxt->ci_wrapped = HINIC_SQ_CTXT_CI_SET(ci_start, IDX) |
- HINIC_SQ_CTXT_CI_SET(1, WRAPPED);
-
- sq_ctxt->wq_hi_pfn_pi =
- HINIC_SQ_CTXT_WQ_PAGE_SET(wq_page_pfn_hi, HI_PFN) |
- HINIC_SQ_CTXT_WQ_PAGE_SET(pi_start, PI);
-
- sq_ctxt->wq_lo_pfn = wq_page_pfn_lo;
-
- sq_ctxt->pref_cache =
- HINIC_SQ_CTXT_PREF_SET(WQ_PREFETCH_MIN, CACHE_MIN) |
- HINIC_SQ_CTXT_PREF_SET(WQ_PREFETCH_MAX, CACHE_MAX) |
- HINIC_SQ_CTXT_PREF_SET(WQ_PREFETCH_THRESHOLD, CACHE_THRESHOLD);
-
- sq_ctxt->pref_wrapped = 1;
-
- sq_ctxt->pref_wq_hi_pfn_ci =
- HINIC_SQ_CTXT_PREF_SET(ci_start, CI) |
- HINIC_SQ_CTXT_PREF_SET(wq_page_pfn_hi, WQ_HI_PFN);
-
- sq_ctxt->pref_wq_lo_pfn = wq_page_pfn_lo;
-
- sq_ctxt->wq_block_hi_pfn =
- HINIC_SQ_CTXT_WQ_BLOCK_SET(wq_block_pfn_hi, HI_PFN);
-
- sq_ctxt->wq_block_lo_pfn = wq_block_pfn_lo;
-
- hinic_cpu_to_be32(sq_ctxt, sizeof(*sq_ctxt));
-}
-
-void hinic_rq_prepare_ctxt(struct hinic_rq_ctxt *rq_ctxt,
- struct hinic_rq *rq, u16 global_qid)
-{
- u32 wq_page_pfn_hi, wq_page_pfn_lo, wq_block_pfn_hi, wq_block_pfn_lo;
- u64 wq_page_addr, wq_page_pfn, wq_block_pfn;
- u16 pi_start, ci_start;
- struct hinic_wq *wq;
-
- wq = rq->wq;
- ci_start = atomic_read(&wq->cons_idx);
- pi_start = atomic_read(&wq->prod_idx);
-
- /* Read the first page paddr from the WQ page paddr ptrs */
- wq_page_addr = be64_to_cpu(*wq->block_vaddr);
-
- wq_page_pfn = HINIC_WQ_PAGE_PFN(wq_page_addr);
- wq_page_pfn_hi = upper_32_bits(wq_page_pfn);
- wq_page_pfn_lo = lower_32_bits(wq_page_pfn);
-
- wq_block_pfn = HINIC_WQ_BLOCK_PFN(wq->block_paddr);
- wq_block_pfn_hi = upper_32_bits(wq_block_pfn);
- wq_block_pfn_lo = lower_32_bits(wq_block_pfn);
-
- rq_ctxt->ceq_attr = HINIC_RQ_CTXT_CEQ_ATTR_SET(0, EN) |
- HINIC_RQ_CTXT_CEQ_ATTR_SET(1, WRAPPED);
-
- rq_ctxt->pi_intr_attr = HINIC_RQ_CTXT_PI_SET(pi_start, IDX) |
- HINIC_RQ_CTXT_PI_SET(rq->msix_entry, INTR);
-
- rq_ctxt->wq_hi_pfn_ci = HINIC_RQ_CTXT_WQ_PAGE_SET(wq_page_pfn_hi,
- HI_PFN) |
- HINIC_RQ_CTXT_WQ_PAGE_SET(ci_start, CI);
-
- rq_ctxt->wq_lo_pfn = wq_page_pfn_lo;
-
- rq_ctxt->pref_cache =
- HINIC_RQ_CTXT_PREF_SET(WQ_PREFETCH_MIN, CACHE_MIN) |
- HINIC_RQ_CTXT_PREF_SET(WQ_PREFETCH_MAX, CACHE_MAX) |
- HINIC_RQ_CTXT_PREF_SET(WQ_PREFETCH_THRESHOLD, CACHE_THRESHOLD);
-
- rq_ctxt->pref_wrapped = 1;
-
- rq_ctxt->pref_wq_hi_pfn_ci =
- HINIC_RQ_CTXT_PREF_SET(wq_page_pfn_hi, WQ_HI_PFN) |
- HINIC_RQ_CTXT_PREF_SET(ci_start, CI);
-
- rq_ctxt->pref_wq_lo_pfn = wq_page_pfn_lo;
-
- rq_ctxt->pi_paddr_hi = upper_32_bits(rq->pi_dma_addr);
- rq_ctxt->pi_paddr_lo = lower_32_bits(rq->pi_dma_addr);
-
- rq_ctxt->wq_block_hi_pfn =
- HINIC_RQ_CTXT_WQ_BLOCK_SET(wq_block_pfn_hi, HI_PFN);
-
- rq_ctxt->wq_block_lo_pfn = wq_block_pfn_lo;
-
- hinic_cpu_to_be32(rq_ctxt, sizeof(*rq_ctxt));
-}
-
-/**
- * alloc_sq_skb_arr - allocate sq array for saved skb
- * @sq: HW Send Queue
- *
- * Return 0 - Success, negative - Failure
- **/
-static int alloc_sq_skb_arr(struct hinic_sq *sq)
-{
- struct hinic_wq *wq = sq->wq;
- size_t skb_arr_size;
-
- skb_arr_size = wq->q_depth * sizeof(*sq->saved_skb);
- sq->saved_skb = vzalloc(skb_arr_size);
- if (!sq->saved_skb)
- return -ENOMEM;
-
- return 0;
-}
-
-/**
- * free_sq_skb_arr - free sq array for saved skb
- * @sq: HW Send Queue
- **/
-static void free_sq_skb_arr(struct hinic_sq *sq)
-{
- vfree(sq->saved_skb);
-}
-
-/**
- * alloc_rq_skb_arr - allocate rq array for saved skb
- * @rq: HW Receive Queue
- *
- * Return 0 - Success, negative - Failure
- **/
-static int alloc_rq_skb_arr(struct hinic_rq *rq)
-{
- struct hinic_wq *wq = rq->wq;
- size_t skb_arr_size;
-
- skb_arr_size = wq->q_depth * sizeof(*rq->saved_skb);
- rq->saved_skb = vzalloc(skb_arr_size);
- if (!rq->saved_skb)
- return -ENOMEM;
-
- return 0;
-}
-
-/**
- * free_rq_skb_arr - free rq array for saved skb
- * @rq: HW Receive Queue
- **/
-static void free_rq_skb_arr(struct hinic_rq *rq)
-{
- vfree(rq->saved_skb);
-}
-
-/**
- * hinic_init_sq - Initialize HW Send Queue
- * @sq: HW Send Queue
- * @hwif: HW Interface for accessing HW
- * @wq: Work Queue for the data of the SQ
- * @entry: msix entry for sq
- * @ci_addr: address for reading the current HW consumer index
- * @ci_dma_addr: dma address for reading the current HW consumer index
- * @db_base: doorbell base address
- *
- * Return 0 - Success, negative - Failure
- **/
-int hinic_init_sq(struct hinic_sq *sq, struct hinic_hwif *hwif,
- struct hinic_wq *wq, struct msix_entry *entry,
- void *ci_addr, dma_addr_t ci_dma_addr,
- void __iomem *db_base)
-{
- sq->hwif = hwif;
-
- sq->wq = wq;
-
- sq->irq = entry->vector;
- sq->msix_entry = entry->entry;
-
- sq->hw_ci_addr = ci_addr;
- sq->hw_ci_dma_addr = ci_dma_addr;
-
- sq->db_base = db_base + SQ_DB_OFF;
-
- return alloc_sq_skb_arr(sq);
-}
-
-/**
- * hinic_clean_sq - Clean HW Send Queue's Resources
- * @sq: Send Queue
- **/
-void hinic_clean_sq(struct hinic_sq *sq)
-{
- free_sq_skb_arr(sq);
-}
-
-/**
- * alloc_rq_cqe - allocate rq completion queue elements
- * @rq: HW Receive Queue
- *
- * Return 0 - Success, negative - Failure
- **/
-static int alloc_rq_cqe(struct hinic_rq *rq)
-{
- struct hinic_hwif *hwif = rq->hwif;
- struct pci_dev *pdev = hwif->pdev;
- size_t cqe_dma_size, cqe_size;
- struct hinic_wq *wq = rq->wq;
- int j, i;
-
- cqe_size = wq->q_depth * sizeof(*rq->cqe);
- rq->cqe = vzalloc(cqe_size);
- if (!rq->cqe)
- return -ENOMEM;
-
- cqe_dma_size = wq->q_depth * sizeof(*rq->cqe_dma);
- rq->cqe_dma = vzalloc(cqe_dma_size);
- if (!rq->cqe_dma)
- goto err_cqe_dma_arr_alloc;
-
- for (i = 0; i < wq->q_depth; i++) {
- rq->cqe[i] = dma_zalloc_coherent(&pdev->dev,
- sizeof(*rq->cqe[i]),
- &rq->cqe_dma[i], GFP_KERNEL);
- if (!rq->cqe[i])
- goto err_cqe_alloc;
- }
-
- return 0;
-
-err_cqe_alloc:
- for (j = 0; j < i; j++)
- dma_free_coherent(&pdev->dev, sizeof(*rq->cqe[j]), rq->cqe[j],
- rq->cqe_dma[j]);
-
- vfree(rq->cqe_dma);
-
-err_cqe_dma_arr_alloc:
- vfree(rq->cqe);
- return -ENOMEM;
-}
-
-/**
- * free_rq_cqe - free rq completion queue elements
- * @rq: HW Receive Queue
- **/
-static void free_rq_cqe(struct hinic_rq *rq)
-{
- struct hinic_hwif *hwif = rq->hwif;
- struct pci_dev *pdev = hwif->pdev;
- struct hinic_wq *wq = rq->wq;
- int i;
-
- for (i = 0; i < wq->q_depth; i++)
- dma_free_coherent(&pdev->dev, sizeof(*rq->cqe[i]), rq->cqe[i],
- rq->cqe_dma[i]);
-
- vfree(rq->cqe_dma);
- vfree(rq->cqe);
-}
-
-/**
- * hinic_init_rq - Initialize HW Receive Queue
- * @rq: HW Receive Queue
- * @hwif: HW Interface for accessing HW
- * @wq: Work Queue for the data of the RQ
- * @entry: msix entry for rq
- *
- * Return 0 - Success, negative - Failure
- **/
-int hinic_init_rq(struct hinic_rq *rq, struct hinic_hwif *hwif,
- struct hinic_wq *wq, struct msix_entry *entry)
-{
- struct pci_dev *pdev = hwif->pdev;
- size_t pi_size;
- int err;
-
- rq->hwif = hwif;
-
- rq->wq = wq;
-
- rq->irq = entry->vector;
- rq->msix_entry = entry->entry;
-
- rq->buf_sz = HINIC_RX_BUF_SZ;
-
- err = alloc_rq_skb_arr(rq);
- if (err) {
- dev_err(&pdev->dev, "Failed to allocate rq priv data\n");
- return err;
- }
-
- err = alloc_rq_cqe(rq);
- if (err) {
- dev_err(&pdev->dev, "Failed to allocate rq cqe\n");
- goto err_alloc_rq_cqe;
- }
-
- /* HW requirements: Must be at least 32 bit */
- pi_size = ALIGN(sizeof(*rq->pi_virt_addr), sizeof(u32));
- rq->pi_virt_addr = dma_zalloc_coherent(&pdev->dev, pi_size,
- &rq->pi_dma_addr, GFP_KERNEL);
- if (!rq->pi_virt_addr) {
- dev_err(&pdev->dev, "Failed to allocate PI address\n");
- err = -ENOMEM;
- goto err_pi_virt;
- }
-
- return 0;
-
-err_pi_virt:
- free_rq_cqe(rq);
-
-err_alloc_rq_cqe:
- free_rq_skb_arr(rq);
- return err;
-}
-
-/**
- * hinic_clean_rq - Clean HW Receive Queue's Resources
- * @rq: HW Receive Queue
- **/
-void hinic_clean_rq(struct hinic_rq *rq)
-{
- struct hinic_hwif *hwif = rq->hwif;
- struct pci_dev *pdev = hwif->pdev;
- size_t pi_size;
-
- pi_size = ALIGN(sizeof(*rq->pi_virt_addr), sizeof(u32));
- dma_free_coherent(&pdev->dev, pi_size, rq->pi_virt_addr,
- rq->pi_dma_addr);
-
- free_rq_cqe(rq);
- free_rq_skb_arr(rq);
-}
-
-/**
- * hinic_get_sq_free_wqebbs - return number of free wqebbs for use
- * @sq: send queue
- *
- * Return number of free wqebbs
- **/
-int hinic_get_sq_free_wqebbs(struct hinic_sq *sq)
-{
- struct hinic_wq *wq = sq->wq;
-
- return atomic_read(&wq->delta) - 1;
-}
-
-/**
- * hinic_get_rq_free_wqebbs - return number of free wqebbs for use
- * @rq: recv queue
- *
- * Return number of free wqebbs
- **/
-int hinic_get_rq_free_wqebbs(struct hinic_rq *rq)
-{
- struct hinic_wq *wq = rq->wq;
-
- return atomic_read(&wq->delta) - 1;
-}
-
-static void sq_prepare_ctrl(struct hinic_sq_ctrl *ctrl, u16 prod_idx,
- int nr_descs)
-{
- u32 ctrl_size, task_size, bufdesc_size;
-
- ctrl_size = SIZE_8BYTES(sizeof(struct hinic_sq_ctrl));
- task_size = SIZE_8BYTES(sizeof(struct hinic_sq_task));
- bufdesc_size = nr_descs * sizeof(struct hinic_sq_bufdesc);
- bufdesc_size = SIZE_8BYTES(bufdesc_size);
-
- ctrl->ctrl_info = HINIC_SQ_CTRL_SET(bufdesc_size, BUFDESC_SECT_LEN) |
- HINIC_SQ_CTRL_SET(task_size, TASKSECT_LEN) |
- HINIC_SQ_CTRL_SET(SQ_NORMAL_WQE, DATA_FORMAT) |
- HINIC_SQ_CTRL_SET(ctrl_size, LEN);
-
- ctrl->queue_info = HINIC_SQ_CTRL_SET(TX_MAX_MSS_DEFAULT,
- QUEUE_INFO_MSS);
-}
-
-static void sq_prepare_task(struct hinic_sq_task *task)
-{
- task->pkt_info0 =
- HINIC_SQ_TASK_INFO0_SET(0, L2HDR_LEN) |
- HINIC_SQ_TASK_INFO0_SET(HINIC_L4_OFF_DISABLE, L4_OFFLOAD) |
- HINIC_SQ_TASK_INFO0_SET(HINIC_OUTER_L3TYPE_UNKNOWN,
- INNER_L3TYPE) |
- HINIC_SQ_TASK_INFO0_SET(HINIC_VLAN_OFF_DISABLE,
- VLAN_OFFLOAD) |
- HINIC_SQ_TASK_INFO0_SET(HINIC_PKT_NOT_PARSED, PARSE_FLAG);
-
- task->pkt_info1 =
- HINIC_SQ_TASK_INFO1_SET(HINIC_MEDIA_UNKNOWN, MEDIA_TYPE) |
- HINIC_SQ_TASK_INFO1_SET(0, INNER_L4_LEN) |
- HINIC_SQ_TASK_INFO1_SET(0, INNER_L3_LEN);
-
- task->pkt_info2 =
- HINIC_SQ_TASK_INFO2_SET(0, TUNNEL_L4_LEN) |
- HINIC_SQ_TASK_INFO2_SET(0, OUTER_L3_LEN) |
- HINIC_SQ_TASK_INFO2_SET(HINIC_TUNNEL_L4TYPE_UNKNOWN,
- TUNNEL_L4TYPE) |
- HINIC_SQ_TASK_INFO2_SET(HINIC_OUTER_L3TYPE_UNKNOWN,
- OUTER_L3TYPE);
-
- task->ufo_v6_identify = 0;
-
- task->pkt_info4 = HINIC_SQ_TASK_INFO4_SET(HINIC_L2TYPE_ETH, L2TYPE);
-
- task->zero_pad = 0;
-}
-
-/**
- * hinic_sq_prepare_wqe - prepare wqe before insert to the queue
- * @sq: send queue
- * @prod_idx: pi value
- * @sq_wqe: wqe to prepare
- * @sges: sges for use by the wqe for send for buf addresses
- * @nr_sges: number of sges
- **/
-void hinic_sq_prepare_wqe(struct hinic_sq *sq, u16 prod_idx,
- struct hinic_sq_wqe *sq_wqe, struct hinic_sge *sges,
- int nr_sges)
-{
- int i;
-
- sq_prepare_ctrl(&sq_wqe->ctrl, prod_idx, nr_sges);
-
- sq_prepare_task(&sq_wqe->task);
-
- for (i = 0; i < nr_sges; i++)
- sq_wqe->buf_descs[i].sge = sges[i];
-}
-
-/**
- * sq_prepare_db - prepare doorbell to write
- * @sq: send queue
- * @prod_idx: pi value for the doorbell
- * @cos: cos of the doorbell
- *
- * Return db value
- **/
-static u32 sq_prepare_db(struct hinic_sq *sq, u16 prod_idx, unsigned int cos)
-{
- struct hinic_qp *qp = container_of(sq, struct hinic_qp, sq);
- u8 hi_prod_idx = SQ_DB_PI_HI(SQ_MASKED_IDX(sq, prod_idx));
-
- /* Data should be written to HW in Big Endian Format */
- return cpu_to_be32(HINIC_SQ_DB_INFO_SET(hi_prod_idx, PI_HI) |
- HINIC_SQ_DB_INFO_SET(HINIC_DB_SQ_TYPE, TYPE) |
- HINIC_SQ_DB_INFO_SET(HINIC_DATA_PATH, PATH) |
- HINIC_SQ_DB_INFO_SET(cos, COS) |
- HINIC_SQ_DB_INFO_SET(qp->q_id, QID));
-}
-
-/**
- * hinic_sq_write_db- write doorbell
- * @sq: send queue
- * @prod_idx: pi value for the doorbell
- * @wqe_size: wqe size
- * @cos: cos of the wqe
- **/
-void hinic_sq_write_db(struct hinic_sq *sq, u16 prod_idx, unsigned int wqe_size,
- unsigned int cos)
-{
- struct hinic_wq *wq = sq->wq;
-
- /* increment prod_idx to the next */
- prod_idx += ALIGN(wqe_size, wq->wqebb_size) / wq->wqebb_size;
-
- wmb(); /* Write all before the doorbell */
-
- writel(sq_prepare_db(sq, prod_idx, cos), SQ_DB_ADDR(sq, prod_idx));
-}
-
-/**
- * hinic_sq_get_wqe - get wqe ptr in the current pi and update the pi
- * @sq: sq to get wqe from
- * @wqe_size: wqe size
- * @prod_idx: returned pi
- *
- * Return wqe pointer
- **/
-struct hinic_sq_wqe *hinic_sq_get_wqe(struct hinic_sq *sq,
- unsigned int wqe_size, u16 *prod_idx)
-{
- struct hinic_hw_wqe *hw_wqe = hinic_get_wqe(sq->wq, wqe_size,
- prod_idx);
-
- if (IS_ERR(hw_wqe))
- return NULL;
-
- return &hw_wqe->sq_wqe;
-}
-
-/**
- * hinic_sq_write_wqe - write the wqe to the sq
- * @sq: send queue
- * @prod_idx: pi of the wqe
- * @sq_wqe: the wqe to write
- * @skb: skb to save
- * @wqe_size: the size of the wqe
- **/
-void hinic_sq_write_wqe(struct hinic_sq *sq, u16 prod_idx,
- struct hinic_sq_wqe *sq_wqe,
- struct sk_buff *skb, unsigned int wqe_size)
-{
- struct hinic_hw_wqe *hw_wqe = (struct hinic_hw_wqe *)sq_wqe;
-
- sq->saved_skb[prod_idx] = skb;
-
- /* The data in the HW should be in Big Endian Format */
- hinic_cpu_to_be32(sq_wqe, wqe_size);
-
- hinic_write_wqe(sq->wq, hw_wqe, wqe_size);
-}
-
-/**
- * hinic_sq_read_wqebb - read wqe ptr in the current ci and update the ci, the
- * wqe only have one wqebb
- * @sq: send queue
- * @skb: return skb that was saved
- * @wqe_size: the wqe size ptr
- * @cons_idx: consumer index of the wqe
- *
- * Return wqe in ci position
- **/
-struct hinic_sq_wqe *hinic_sq_read_wqebb(struct hinic_sq *sq,
- struct sk_buff **skb,
- unsigned int *wqe_size, u16 *cons_idx)
-{
- struct hinic_hw_wqe *hw_wqe;
- struct hinic_sq_wqe *sq_wqe;
- struct hinic_sq_ctrl *ctrl;
- unsigned int buf_sect_len;
- u32 ctrl_info;
-
- /* read the ctrl section for getting wqe size */
- hw_wqe = hinic_read_wqe(sq->wq, sizeof(*ctrl), cons_idx);
- if (IS_ERR(hw_wqe))
- return NULL;
-
- *skb = sq->saved_skb[*cons_idx];
-
- sq_wqe = &hw_wqe->sq_wqe;
- ctrl = &sq_wqe->ctrl;
- ctrl_info = be32_to_cpu(ctrl->ctrl_info);
- buf_sect_len = HINIC_SQ_CTRL_GET(ctrl_info, BUFDESC_SECT_LEN);
-
- *wqe_size = sizeof(*ctrl) + sizeof(sq_wqe->task);
- *wqe_size += SECT_SIZE_FROM_8BYTES(buf_sect_len);
- *wqe_size = ALIGN(*wqe_size, sq->wq->wqebb_size);
-
- return &hw_wqe->sq_wqe;
-}
-
-/**
- * hinic_sq_read_wqe - read wqe ptr in the current ci and update the ci
- * @sq: send queue
- * @skb: return skb that was saved
- * @wqe_size: the size of the wqe
- * @cons_idx: consumer index of the wqe
- *
- * Return wqe in ci position
- **/
-struct hinic_sq_wqe *hinic_sq_read_wqe(struct hinic_sq *sq,
- struct sk_buff **skb,
- unsigned int wqe_size, u16 *cons_idx)
-{
- struct hinic_hw_wqe *hw_wqe;
-
- hw_wqe = hinic_read_wqe(sq->wq, wqe_size, cons_idx);
- *skb = sq->saved_skb[*cons_idx];
-
- return &hw_wqe->sq_wqe;
-}
-
-/**
- * hinic_sq_put_wqe - release the ci for new wqes
- * @sq: send queue
- * @wqe_size: the size of the wqe
- **/
-void hinic_sq_put_wqe(struct hinic_sq *sq, unsigned int wqe_size)
-{
- hinic_put_wqe(sq->wq, wqe_size);
-}
-
-/**
- * hinic_sq_get_sges - get sges from the wqe
- * @sq_wqe: wqe to get the sges from its buffer addresses
- * @sges: returned sges
- * @nr_sges: number sges to return
- **/
-void hinic_sq_get_sges(struct hinic_sq_wqe *sq_wqe, struct hinic_sge *sges,
- int nr_sges)
-{
- int i;
-
- for (i = 0; i < nr_sges && i < HINIC_MAX_SQ_BUFDESCS; i++) {
- sges[i] = sq_wqe->buf_descs[i].sge;
- hinic_be32_to_cpu(&sges[i], sizeof(sges[i]));
- }
-}
-
-/**
- * hinic_rq_get_wqe - get wqe ptr in the current pi and update the pi
- * @rq: rq to get wqe from
- * @wqe_size: wqe size
- * @prod_idx: returned pi
- *
- * Return wqe pointer
- **/
-struct hinic_rq_wqe *hinic_rq_get_wqe(struct hinic_rq *rq,
- unsigned int wqe_size, u16 *prod_idx)
-{
- struct hinic_hw_wqe *hw_wqe = hinic_get_wqe(rq->wq, wqe_size,
- prod_idx);
-
- if (IS_ERR(hw_wqe))
- return NULL;
-
- return &hw_wqe->rq_wqe;
-}
-
-/**
- * hinic_rq_write_wqe - write the wqe to the rq
- * @rq: recv queue
- * @prod_idx: pi of the wqe
- * @rq_wqe: the wqe to write
- * @skb: skb to save
- **/
-void hinic_rq_write_wqe(struct hinic_rq *rq, u16 prod_idx,
- struct hinic_rq_wqe *rq_wqe, struct sk_buff *skb)
-{
- struct hinic_hw_wqe *hw_wqe = (struct hinic_hw_wqe *)rq_wqe;
-
- rq->saved_skb[prod_idx] = skb;
-
- /* The data in the HW should be in Big Endian Format */
- hinic_cpu_to_be32(rq_wqe, sizeof(*rq_wqe));
-
- hinic_write_wqe(rq->wq, hw_wqe, sizeof(*rq_wqe));
-}
-
-/**
- * hinic_rq_read_wqe - read wqe ptr in the current ci and update the ci
- * @rq: recv queue
- * @wqe_size: the size of the wqe
- * @skb: return saved skb
- * @cons_idx: consumer index of the wqe
- *
- * Return wqe in ci position
- **/
-struct hinic_rq_wqe *hinic_rq_read_wqe(struct hinic_rq *rq,
- unsigned int wqe_size,
- struct sk_buff **skb, u16 *cons_idx)
-{
- struct hinic_hw_wqe *hw_wqe;
- struct hinic_rq_cqe *cqe;
- int rx_done;
- u32 status;
-
- hw_wqe = hinic_read_wqe(rq->wq, wqe_size, cons_idx);
- if (IS_ERR(hw_wqe))
- return NULL;
-
- cqe = rq->cqe[*cons_idx];
-
- status = be32_to_cpu(cqe->status);
-
- rx_done = HINIC_RQ_CQE_STATUS_GET(status, RXDONE);
- if (!rx_done)
- return NULL;
-
- *skb = rq->saved_skb[*cons_idx];
-
- return &hw_wqe->rq_wqe;
-}
-
-/**
- * hinic_rq_read_next_wqe - increment ci and read the wqe in ci position
- * @rq: recv queue
- * @wqe_size: the size of the wqe
- * @skb: return saved skb
- * @cons_idx: consumer index in the wq
- *
- * Return wqe in incremented ci position
- **/
-struct hinic_rq_wqe *hinic_rq_read_next_wqe(struct hinic_rq *rq,
- unsigned int wqe_size,
- struct sk_buff **skb,
- u16 *cons_idx)
-{
- struct hinic_wq *wq = rq->wq;
- struct hinic_hw_wqe *hw_wqe;
- unsigned int num_wqebbs;
-
- wqe_size = ALIGN(wqe_size, wq->wqebb_size);
- num_wqebbs = wqe_size / wq->wqebb_size;
-
- *cons_idx = RQ_MASKED_IDX(rq, *cons_idx + num_wqebbs);
-
- *skb = rq->saved_skb[*cons_idx];
-
- hw_wqe = hinic_read_wqe_direct(wq, *cons_idx);
-
- return &hw_wqe->rq_wqe;
-}
-
-/**
- * hinic_put_wqe - release the ci for new wqes
- * @rq: recv queue
- * @cons_idx: consumer index of the wqe
- * @wqe_size: the size of the wqe
- **/
-void hinic_rq_put_wqe(struct hinic_rq *rq, u16 cons_idx,
- unsigned int wqe_size)
-{
- struct hinic_rq_cqe *cqe = rq->cqe[cons_idx];
- u32 status = be32_to_cpu(cqe->status);
-
- status = HINIC_RQ_CQE_STATUS_CLEAR(status, RXDONE);
-
- /* Rx WQE size is 1 WQEBB, no wq shadow*/
- cqe->status = cpu_to_be32(status);
-
- wmb(); /* clear done flag */
-
- hinic_put_wqe(rq->wq, wqe_size);
-}
-
-/**
- * hinic_rq_get_sge - get sge from the wqe
- * @rq: recv queue
- * @rq_wqe: wqe to get the sge from its buf address
- * @cons_idx: consumer index
- * @sge: returned sge
- **/
-void hinic_rq_get_sge(struct hinic_rq *rq, struct hinic_rq_wqe *rq_wqe,
- u16 cons_idx, struct hinic_sge *sge)
-{
- struct hinic_rq_cqe *cqe = rq->cqe[cons_idx];
- u32 len = be32_to_cpu(cqe->len);
-
- sge->hi_addr = be32_to_cpu(rq_wqe->buf_desc.hi_addr);
- sge->lo_addr = be32_to_cpu(rq_wqe->buf_desc.lo_addr);
- sge->len = HINIC_RQ_CQE_SGE_GET(len, LEN);
-}
-
-/**
- * hinic_rq_prepare_wqe - prepare wqe before insert to the queue
- * @rq: recv queue
- * @prod_idx: pi value
- * @rq_wqe: the wqe
- * @sge: sge for use by the wqe for recv buf address
- **/
-void hinic_rq_prepare_wqe(struct hinic_rq *rq, u16 prod_idx,
- struct hinic_rq_wqe *rq_wqe, struct hinic_sge *sge)
-{
- struct hinic_rq_cqe_sect *cqe_sect = &rq_wqe->cqe_sect;
- struct hinic_rq_bufdesc *buf_desc = &rq_wqe->buf_desc;
- struct hinic_rq_cqe *cqe = rq->cqe[prod_idx];
- struct hinic_rq_ctrl *ctrl = &rq_wqe->ctrl;
- dma_addr_t cqe_dma = rq->cqe_dma[prod_idx];
-
- ctrl->ctrl_info =
- HINIC_RQ_CTRL_SET(SIZE_8BYTES(sizeof(*ctrl)), LEN) |
- HINIC_RQ_CTRL_SET(SIZE_8BYTES(sizeof(*cqe_sect)),
- COMPLETE_LEN) |
- HINIC_RQ_CTRL_SET(SIZE_8BYTES(sizeof(*buf_desc)),
- BUFDESC_SECT_LEN) |
- HINIC_RQ_CTRL_SET(RQ_COMPLETE_SGE, COMPLETE_FORMAT);
-
- hinic_set_sge(&cqe_sect->sge, cqe_dma, sizeof(*cqe));
-
- buf_desc->hi_addr = sge->hi_addr;
- buf_desc->lo_addr = sge->lo_addr;
-}
-
-/**
- * hinic_rq_update - update pi of the rq
- * @rq: recv queue
- * @prod_idx: pi value
- **/
-void hinic_rq_update(struct hinic_rq *rq, u16 prod_idx)
-{
- *rq->pi_virt_addr = cpu_to_be16(RQ_MASKED_IDX(rq, prod_idx + 1));
-}
diff --git a/drivers/net/ethernet/huawei/hinic/hinic_hw_qp.h b/drivers/net/ethernet/huawei/hinic/hinic_hw_qp.h
deleted file mode 100644
index 6c84f83..00000000
--- a/drivers/net/ethernet/huawei/hinic/hinic_hw_qp.h
+++ /dev/null
@@ -1,205 +0,0 @@
-/*
- * Huawei HiNIC PCI Express Linux driver
- * Copyright(c) 2017 Huawei Technologies Co., Ltd
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms and conditions of the GNU General Public License,
- * version 2, as published by the Free Software Foundation.
- *
- * This program is distributed in the hope it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
- * for more details.
- *
- */
-
-#ifndef HINIC_HW_QP_H
-#define HINIC_HW_QP_H
-
-#include <linux/kernel.h>
-#include <linux/types.h>
-#include <linux/sizes.h>
-#include <linux/pci.h>
-#include <linux/skbuff.h>
-
-#include "hinic_common.h"
-#include "hinic_hw_if.h"
-#include "hinic_hw_wqe.h"
-#include "hinic_hw_wq.h"
-#include "hinic_hw_qp_ctxt.h"
-
-#define HINIC_SQ_DB_INFO_PI_HI_SHIFT 0
-#define HINIC_SQ_DB_INFO_QID_SHIFT 8
-#define HINIC_SQ_DB_INFO_PATH_SHIFT 23
-#define HINIC_SQ_DB_INFO_COS_SHIFT 24
-#define HINIC_SQ_DB_INFO_TYPE_SHIFT 27
-
-#define HINIC_SQ_DB_INFO_PI_HI_MASK 0xFF
-#define HINIC_SQ_DB_INFO_QID_MASK 0x3FF
-#define HINIC_SQ_DB_INFO_PATH_MASK 0x1
-#define HINIC_SQ_DB_INFO_COS_MASK 0x7
-#define HINIC_SQ_DB_INFO_TYPE_MASK 0x1F
-
-#define HINIC_SQ_DB_INFO_SET(val, member) \
- (((u32)(val) & HINIC_SQ_DB_INFO_##member##_MASK) \
- << HINIC_SQ_DB_INFO_##member##_SHIFT)
-
-#define HINIC_SQ_WQEBB_SIZE 64
-#define HINIC_RQ_WQEBB_SIZE 32
-
-#define HINIC_SQ_PAGE_SIZE SZ_4K
-#define HINIC_RQ_PAGE_SIZE SZ_4K
-
-#define HINIC_SQ_DEPTH SZ_4K
-#define HINIC_RQ_DEPTH SZ_4K
-
-/* In any change to HINIC_RX_BUF_SZ, HINIC_RX_BUF_SZ_IDX must be changed */
-#define HINIC_RX_BUF_SZ 2048
-#define HINIC_RX_BUF_SZ_IDX HINIC_RX_BUF_SZ_2048_IDX
-
-#define HINIC_MIN_TX_WQE_SIZE(wq) \
- ALIGN(HINIC_SQ_WQE_SIZE(1), (wq)->wqebb_size)
-
-#define HINIC_MIN_TX_NUM_WQEBBS(sq) \
- (HINIC_MIN_TX_WQE_SIZE((sq)->wq) / (sq)->wq->wqebb_size)
-
-enum hinic_rx_buf_sz_idx {
- HINIC_RX_BUF_SZ_32_IDX,
- HINIC_RX_BUF_SZ_64_IDX,
- HINIC_RX_BUF_SZ_96_IDX,
- HINIC_RX_BUF_SZ_128_IDX,
- HINIC_RX_BUF_SZ_192_IDX,
- HINIC_RX_BUF_SZ_256_IDX,
- HINIC_RX_BUF_SZ_384_IDX,
- HINIC_RX_BUF_SZ_512_IDX,
- HINIC_RX_BUF_SZ_768_IDX,
- HINIC_RX_BUF_SZ_1024_IDX,
- HINIC_RX_BUF_SZ_1536_IDX,
- HINIC_RX_BUF_SZ_2048_IDX,
- HINIC_RX_BUF_SZ_3072_IDX,
- HINIC_RX_BUF_SZ_4096_IDX,
- HINIC_RX_BUF_SZ_8192_IDX,
- HINIC_RX_BUF_SZ_16384_IDX,
-};
-
-struct hinic_sq {
- struct hinic_hwif *hwif;
-
- struct hinic_wq *wq;
-
- u32 irq;
- u16 msix_entry;
-
- void *hw_ci_addr;
- dma_addr_t hw_ci_dma_addr;
-
- void __iomem *db_base;
-
- struct sk_buff **saved_skb;
-};
-
-struct hinic_rq {
- struct hinic_hwif *hwif;
-
- struct hinic_wq *wq;
-
- u32 irq;
- u16 msix_entry;
-
- size_t buf_sz;
-
- struct sk_buff **saved_skb;
-
- struct hinic_rq_cqe **cqe;
- dma_addr_t *cqe_dma;
-
- u16 *pi_virt_addr;
- dma_addr_t pi_dma_addr;
-};
-
-struct hinic_qp {
- struct hinic_sq sq;
- struct hinic_rq rq;
-
- u16 q_id;
-};
-
-void hinic_qp_prepare_header(struct hinic_qp_ctxt_header *qp_ctxt_hdr,
- enum hinic_qp_ctxt_type ctxt_type,
- u16 num_queues, u16 max_queues);
-
-void hinic_sq_prepare_ctxt(struct hinic_sq_ctxt *sq_ctxt,
- struct hinic_sq *sq, u16 global_qid);
-
-void hinic_rq_prepare_ctxt(struct hinic_rq_ctxt *rq_ctxt,
- struct hinic_rq *rq, u16 global_qid);
-
-int hinic_init_sq(struct hinic_sq *sq, struct hinic_hwif *hwif,
- struct hinic_wq *wq, struct msix_entry *entry, void *ci_addr,
- dma_addr_t ci_dma_addr, void __iomem *db_base);
-
-void hinic_clean_sq(struct hinic_sq *sq);
-
-int hinic_init_rq(struct hinic_rq *rq, struct hinic_hwif *hwif,
- struct hinic_wq *wq, struct msix_entry *entry);
-
-void hinic_clean_rq(struct hinic_rq *rq);
-
-int hinic_get_sq_free_wqebbs(struct hinic_sq *sq);
-
-int hinic_get_rq_free_wqebbs(struct hinic_rq *rq);
-
-void hinic_sq_prepare_wqe(struct hinic_sq *sq, u16 prod_idx,
- struct hinic_sq_wqe *wqe, struct hinic_sge *sges,
- int nr_sges);
-
-void hinic_sq_write_db(struct hinic_sq *sq, u16 prod_idx, unsigned int wqe_size,
- unsigned int cos);
-
-struct hinic_sq_wqe *hinic_sq_get_wqe(struct hinic_sq *sq,
- unsigned int wqe_size, u16 *prod_idx);
-
-void hinic_sq_write_wqe(struct hinic_sq *sq, u16 prod_idx,
- struct hinic_sq_wqe *wqe, struct sk_buff *skb,
- unsigned int wqe_size);
-
-struct hinic_sq_wqe *hinic_sq_read_wqe(struct hinic_sq *sq,
- struct sk_buff **skb,
- unsigned int wqe_size, u16 *cons_idx);
-
-struct hinic_sq_wqe *hinic_sq_read_wqebb(struct hinic_sq *sq,
- struct sk_buff **skb,
- unsigned int *wqe_size, u16 *cons_idx);
-
-void hinic_sq_put_wqe(struct hinic_sq *sq, unsigned int wqe_size);
-
-void hinic_sq_get_sges(struct hinic_sq_wqe *wqe, struct hinic_sge *sges,
- int nr_sges);
-
-struct hinic_rq_wqe *hinic_rq_get_wqe(struct hinic_rq *rq,
- unsigned int wqe_size, u16 *prod_idx);
-
-void hinic_rq_write_wqe(struct hinic_rq *rq, u16 prod_idx,
- struct hinic_rq_wqe *wqe, struct sk_buff *skb);
-
-struct hinic_rq_wqe *hinic_rq_read_wqe(struct hinic_rq *rq,
- unsigned int wqe_size,
- struct sk_buff **skb, u16 *cons_idx);
-
-struct hinic_rq_wqe *hinic_rq_read_next_wqe(struct hinic_rq *rq,
- unsigned int wqe_size,
- struct sk_buff **skb,
- u16 *cons_idx);
-
-void hinic_rq_put_wqe(struct hinic_rq *rq, u16 cons_idx,
- unsigned int wqe_size);
-
-void hinic_rq_get_sge(struct hinic_rq *rq, struct hinic_rq_wqe *wqe,
- u16 cons_idx, struct hinic_sge *sge);
-
-void hinic_rq_prepare_wqe(struct hinic_rq *rq, u16 prod_idx,
- struct hinic_rq_wqe *wqe, struct hinic_sge *sge);
-
-void hinic_rq_update(struct hinic_rq *rq, u16 prod_idx);
-
-#endif
diff --git a/drivers/net/ethernet/huawei/hinic/hinic_hw_qp_ctxt.h b/drivers/net/ethernet/huawei/hinic/hinic_hw_qp_ctxt.h
deleted file mode 100644
index 376abf0..00000000
--- a/drivers/net/ethernet/huawei/hinic/hinic_hw_qp_ctxt.h
+++ /dev/null
@@ -1,214 +0,0 @@
-/*
- * Huawei HiNIC PCI Express Linux driver
- * Copyright(c) 2017 Huawei Technologies Co., Ltd
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms and conditions of the GNU General Public License,
- * version 2, as published by the Free Software Foundation.
- *
- * This program is distributed in the hope it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
- * for more details.
- *
- */
-
-#ifndef HINIC_HW_QP_CTXT_H
-#define HINIC_HW_QP_CTXT_H
-
-#include <linux/types.h>
-
-#include "hinic_hw_cmdq.h"
-
-#define HINIC_SQ_CTXT_CEQ_ATTR_GLOBAL_SQ_ID_SHIFT 13
-#define HINIC_SQ_CTXT_CEQ_ATTR_EN_SHIFT 23
-
-#define HINIC_SQ_CTXT_CEQ_ATTR_GLOBAL_SQ_ID_MASK 0x3FF
-#define HINIC_SQ_CTXT_CEQ_ATTR_EN_MASK 0x1
-
-#define HINIC_SQ_CTXT_CEQ_ATTR_SET(val, member) \
- (((u32)(val) & HINIC_SQ_CTXT_CEQ_ATTR_##member##_MASK) \
- << HINIC_SQ_CTXT_CEQ_ATTR_##member##_SHIFT)
-
-#define HINIC_SQ_CTXT_CI_IDX_SHIFT 11
-#define HINIC_SQ_CTXT_CI_WRAPPED_SHIFT 23
-
-#define HINIC_SQ_CTXT_CI_IDX_MASK 0xFFF
-#define HINIC_SQ_CTXT_CI_WRAPPED_MASK 0x1
-
-#define HINIC_SQ_CTXT_CI_SET(val, member) \
- (((u32)(val) & HINIC_SQ_CTXT_CI_##member##_MASK) \
- << HINIC_SQ_CTXT_CI_##member##_SHIFT)
-
-#define HINIC_SQ_CTXT_WQ_PAGE_HI_PFN_SHIFT 0
-#define HINIC_SQ_CTXT_WQ_PAGE_PI_SHIFT 20
-
-#define HINIC_SQ_CTXT_WQ_PAGE_HI_PFN_MASK 0xFFFFF
-#define HINIC_SQ_CTXT_WQ_PAGE_PI_MASK 0xFFF
-
-#define HINIC_SQ_CTXT_WQ_PAGE_SET(val, member) \
- (((u32)(val) & HINIC_SQ_CTXT_WQ_PAGE_##member##_MASK) \
- << HINIC_SQ_CTXT_WQ_PAGE_##member##_SHIFT)
-
-#define HINIC_SQ_CTXT_PREF_CACHE_THRESHOLD_SHIFT 0
-#define HINIC_SQ_CTXT_PREF_CACHE_MAX_SHIFT 14
-#define HINIC_SQ_CTXT_PREF_CACHE_MIN_SHIFT 25
-
-#define HINIC_SQ_CTXT_PREF_CACHE_THRESHOLD_MASK 0x3FFF
-#define HINIC_SQ_CTXT_PREF_CACHE_MAX_MASK 0x7FF
-#define HINIC_SQ_CTXT_PREF_CACHE_MIN_MASK 0x7F
-
-#define HINIC_SQ_CTXT_PREF_WQ_HI_PFN_SHIFT 0
-#define HINIC_SQ_CTXT_PREF_CI_SHIFT 20
-
-#define HINIC_SQ_CTXT_PREF_WQ_HI_PFN_MASK 0xFFFFF
-#define HINIC_SQ_CTXT_PREF_CI_MASK 0xFFF
-
-#define HINIC_SQ_CTXT_PREF_SET(val, member) \
- (((u32)(val) & HINIC_SQ_CTXT_PREF_##member##_MASK) \
- << HINIC_SQ_CTXT_PREF_##member##_SHIFT)
-
-#define HINIC_SQ_CTXT_WQ_BLOCK_HI_PFN_SHIFT 0
-
-#define HINIC_SQ_CTXT_WQ_BLOCK_HI_PFN_MASK 0x7FFFFF
-
-#define HINIC_SQ_CTXT_WQ_BLOCK_SET(val, member) \
- (((u32)(val) & HINIC_SQ_CTXT_WQ_BLOCK_##member##_MASK) \
- << HINIC_SQ_CTXT_WQ_BLOCK_##member##_SHIFT)
-
-#define HINIC_RQ_CTXT_CEQ_ATTR_EN_SHIFT 0
-#define HINIC_RQ_CTXT_CEQ_ATTR_WRAPPED_SHIFT 1
-
-#define HINIC_RQ_CTXT_CEQ_ATTR_EN_MASK 0x1
-#define HINIC_RQ_CTXT_CEQ_ATTR_WRAPPED_MASK 0x1
-
-#define HINIC_RQ_CTXT_CEQ_ATTR_SET(val, member) \
- (((u32)(val) & HINIC_RQ_CTXT_CEQ_ATTR_##member##_MASK) \
- << HINIC_RQ_CTXT_CEQ_ATTR_##member##_SHIFT)
-
-#define HINIC_RQ_CTXT_PI_IDX_SHIFT 0
-#define HINIC_RQ_CTXT_PI_INTR_SHIFT 22
-
-#define HINIC_RQ_CTXT_PI_IDX_MASK 0xFFF
-#define HINIC_RQ_CTXT_PI_INTR_MASK 0x3FF
-
-#define HINIC_RQ_CTXT_PI_SET(val, member) \
- (((u32)(val) & HINIC_RQ_CTXT_PI_##member##_MASK) << \
- HINIC_RQ_CTXT_PI_##member##_SHIFT)
-
-#define HINIC_RQ_CTXT_WQ_PAGE_HI_PFN_SHIFT 0
-#define HINIC_RQ_CTXT_WQ_PAGE_CI_SHIFT 20
-
-#define HINIC_RQ_CTXT_WQ_PAGE_HI_PFN_MASK 0xFFFFF
-#define HINIC_RQ_CTXT_WQ_PAGE_CI_MASK 0xFFF
-
-#define HINIC_RQ_CTXT_WQ_PAGE_SET(val, member) \
- (((u32)(val) & HINIC_RQ_CTXT_WQ_PAGE_##member##_MASK) << \
- HINIC_RQ_CTXT_WQ_PAGE_##member##_SHIFT)
-
-#define HINIC_RQ_CTXT_PREF_CACHE_THRESHOLD_SHIFT 0
-#define HINIC_RQ_CTXT_PREF_CACHE_MAX_SHIFT 14
-#define HINIC_RQ_CTXT_PREF_CACHE_MIN_SHIFT 25
-
-#define HINIC_RQ_CTXT_PREF_CACHE_THRESHOLD_MASK 0x3FFF
-#define HINIC_RQ_CTXT_PREF_CACHE_MAX_MASK 0x7FF
-#define HINIC_RQ_CTXT_PREF_CACHE_MIN_MASK 0x7F
-
-#define HINIC_RQ_CTXT_PREF_WQ_HI_PFN_SHIFT 0
-#define HINIC_RQ_CTXT_PREF_CI_SHIFT 20
-
-#define HINIC_RQ_CTXT_PREF_WQ_HI_PFN_MASK 0xFFFFF
-#define HINIC_RQ_CTXT_PREF_CI_MASK 0xFFF
-
-#define HINIC_RQ_CTXT_PREF_SET(val, member) \
- (((u32)(val) & HINIC_RQ_CTXT_PREF_##member##_MASK) << \
- HINIC_RQ_CTXT_PREF_##member##_SHIFT)
-
-#define HINIC_RQ_CTXT_WQ_BLOCK_HI_PFN_SHIFT 0
-
-#define HINIC_RQ_CTXT_WQ_BLOCK_HI_PFN_MASK 0x7FFFFF
-
-#define HINIC_RQ_CTXT_WQ_BLOCK_SET(val, member) \
- (((u32)(val) & HINIC_RQ_CTXT_WQ_BLOCK_##member##_MASK) << \
- HINIC_RQ_CTXT_WQ_BLOCK_##member##_SHIFT)
-
-#define HINIC_SQ_CTXT_SIZE(num_sqs) (sizeof(struct hinic_qp_ctxt_header) \
- + (num_sqs) * sizeof(struct hinic_sq_ctxt))
-
-#define HINIC_RQ_CTXT_SIZE(num_rqs) (sizeof(struct hinic_qp_ctxt_header) \
- + (num_rqs) * sizeof(struct hinic_rq_ctxt))
-
-#define HINIC_WQ_PAGE_PFN_SHIFT 12
-#define HINIC_WQ_BLOCK_PFN_SHIFT 9
-
-#define HINIC_WQ_PAGE_PFN(page_addr) ((page_addr) >> HINIC_WQ_PAGE_PFN_SHIFT)
-#define HINIC_WQ_BLOCK_PFN(page_addr) ((page_addr) >> \
- HINIC_WQ_BLOCK_PFN_SHIFT)
-
-#define HINIC_Q_CTXT_MAX \
- ((HINIC_CMDQ_BUF_SIZE - sizeof(struct hinic_qp_ctxt_header)) \
- / sizeof(struct hinic_sq_ctxt))
-
-enum hinic_qp_ctxt_type {
- HINIC_QP_CTXT_TYPE_SQ,
- HINIC_QP_CTXT_TYPE_RQ
-};
-
-struct hinic_qp_ctxt_header {
- u16 num_queues;
- u16 queue_type;
- u32 addr_offset;
-};
-
-struct hinic_sq_ctxt {
- u32 ceq_attr;
-
- u32 ci_wrapped;
-
- u32 wq_hi_pfn_pi;
- u32 wq_lo_pfn;
-
- u32 pref_cache;
- u32 pref_wrapped;
- u32 pref_wq_hi_pfn_ci;
- u32 pref_wq_lo_pfn;
-
- u32 rsvd0;
- u32 rsvd1;
-
- u32 wq_block_hi_pfn;
- u32 wq_block_lo_pfn;
-};
-
-struct hinic_rq_ctxt {
- u32 ceq_attr;
-
- u32 pi_intr_attr;
-
- u32 wq_hi_pfn_ci;
- u32 wq_lo_pfn;
-
- u32 pref_cache;
- u32 pref_wrapped;
-
- u32 pref_wq_hi_pfn_ci;
- u32 pref_wq_lo_pfn;
-
- u32 pi_paddr_hi;
- u32 pi_paddr_lo;
-
- u32 wq_block_hi_pfn;
- u32 wq_block_lo_pfn;
-};
-
-struct hinic_sq_ctxt_block {
- struct hinic_qp_ctxt_header hdr;
- struct hinic_sq_ctxt sq_ctxt[HINIC_Q_CTXT_MAX];
-};
-
-struct hinic_rq_ctxt_block {
- struct hinic_qp_ctxt_header hdr;
- struct hinic_rq_ctxt rq_ctxt[HINIC_Q_CTXT_MAX];
-};
-
-#endif
diff --git a/drivers/net/ethernet/huawei/hinic/hinic_hw_wq.c b/drivers/net/ethernet/huawei/hinic/hinic_hw_wq.c
deleted file mode 100644
index 3e3181c08..00000000
--- a/drivers/net/ethernet/huawei/hinic/hinic_hw_wq.c
+++ /dev/null
@@ -1,878 +0,0 @@
-/*
- * Huawei HiNIC PCI Express Linux driver
- * Copyright(c) 2017 Huawei Technologies Co., Ltd
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms and conditions of the GNU General Public License,
- * version 2, as published by the Free Software Foundation.
- *
- * This program is distributed in the hope it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
- * for more details.
- *
- */
-
-#include <linux/kernel.h>
-#include <linux/types.h>
-#include <linux/pci.h>
-#include <linux/device.h>
-#include <linux/dma-mapping.h>
-#include <linux/slab.h>
-#include <linux/atomic.h>
-#include <linux/semaphore.h>
-#include <linux/errno.h>
-#include <linux/vmalloc.h>
-#include <linux/err.h>
-#include <asm/byteorder.h>
-
-#include "hinic_hw_if.h"
-#include "hinic_hw_wqe.h"
-#include "hinic_hw_wq.h"
-#include "hinic_hw_cmdq.h"
-
-#define WQS_BLOCKS_PER_PAGE 4
-
-#define WQ_BLOCK_SIZE 4096
-#define WQS_PAGE_SIZE (WQS_BLOCKS_PER_PAGE * WQ_BLOCK_SIZE)
-
-#define WQS_MAX_NUM_BLOCKS 128
-#define WQS_FREE_BLOCKS_SIZE(wqs) (WQS_MAX_NUM_BLOCKS * \
- sizeof((wqs)->free_blocks[0]))
-
-#define WQ_SIZE(wq) ((wq)->q_depth * (wq)->wqebb_size)
-
-#define WQ_PAGE_ADDR_SIZE sizeof(u64)
-#define WQ_MAX_PAGES (WQ_BLOCK_SIZE / WQ_PAGE_ADDR_SIZE)
-
-#define CMDQ_BLOCK_SIZE 512
-#define CMDQ_PAGE_SIZE 4096
-
-#define CMDQ_WQ_MAX_PAGES (CMDQ_BLOCK_SIZE / WQ_PAGE_ADDR_SIZE)
-
-#define WQ_BASE_VADDR(wqs, wq) \
- ((void *)((wqs)->page_vaddr[(wq)->page_idx]) \
- + (wq)->block_idx * WQ_BLOCK_SIZE)
-
-#define WQ_BASE_PADDR(wqs, wq) \
- ((wqs)->page_paddr[(wq)->page_idx] \
- + (wq)->block_idx * WQ_BLOCK_SIZE)
-
-#define WQ_BASE_ADDR(wqs, wq) \
- ((void *)((wqs)->shadow_page_vaddr[(wq)->page_idx]) \
- + (wq)->block_idx * WQ_BLOCK_SIZE)
-
-#define CMDQ_BASE_VADDR(cmdq_pages, wq) \
- ((void *)((cmdq_pages)->page_vaddr) \
- + (wq)->block_idx * CMDQ_BLOCK_SIZE)
-
-#define CMDQ_BASE_PADDR(cmdq_pages, wq) \
- ((cmdq_pages)->page_paddr \
- + (wq)->block_idx * CMDQ_BLOCK_SIZE)
-
-#define CMDQ_BASE_ADDR(cmdq_pages, wq) \
- ((void *)((cmdq_pages)->shadow_page_vaddr) \
- + (wq)->block_idx * CMDQ_BLOCK_SIZE)
-
-#define WQE_PAGE_OFF(wq, idx) (((idx) & ((wq)->num_wqebbs_per_page - 1)) * \
- (wq)->wqebb_size)
-
-#define WQE_PAGE_NUM(wq, idx) (((idx) / ((wq)->num_wqebbs_per_page)) \
- & ((wq)->num_q_pages - 1))
-
-#define WQ_PAGE_ADDR(wq, idx) \
- ((wq)->shadow_block_vaddr[WQE_PAGE_NUM(wq, idx)])
-
-#define MASKED_WQE_IDX(wq, idx) ((idx) & (wq)->mask)
-
-#define WQE_IN_RANGE(wqe, start, end) \
- (((unsigned long)(wqe) >= (unsigned long)(start)) && \
- ((unsigned long)(wqe) < (unsigned long)(end)))
-
-#define WQE_SHADOW_PAGE(wq, wqe) \
- (((unsigned long)(wqe) - (unsigned long)(wq)->shadow_wqe) \
- / (wq)->max_wqe_size)
-
-/**
- * queue_alloc_page - allocate page for Queue
- * @hwif: HW interface for allocating DMA
- * @vaddr: virtual address will be returned in this address
- * @paddr: physical address will be returned in this address
- * @shadow_vaddr: VM area will be return here for holding WQ page addresses
- * @page_sz: page size of each WQ page
- *
- * Return 0 - Success, negative - Failure
- **/
-static int queue_alloc_page(struct hinic_hwif *hwif, u64 **vaddr, u64 *paddr,
- void ***shadow_vaddr, size_t page_sz)
-{
- struct pci_dev *pdev = hwif->pdev;
- dma_addr_t dma_addr;
-
- *vaddr = dma_zalloc_coherent(&pdev->dev, page_sz, &dma_addr,
- GFP_KERNEL);
- if (!*vaddr) {
- dev_err(&pdev->dev, "Failed to allocate dma for wqs page\n");
- return -ENOMEM;
- }
-
- *paddr = (u64)dma_addr;
-
- /* use vzalloc for big mem */
- *shadow_vaddr = vzalloc(page_sz);
- if (!*shadow_vaddr)
- goto err_shadow_vaddr;
-
- return 0;
-
-err_shadow_vaddr:
- dma_free_coherent(&pdev->dev, page_sz, *vaddr, dma_addr);
- return -ENOMEM;
-}
-
-/**
- * wqs_allocate_page - allocate page for WQ set
- * @wqs: Work Queue Set
- * @page_idx: the page index of the page will be allocated
- *
- * Return 0 - Success, negative - Failure
- **/
-static int wqs_allocate_page(struct hinic_wqs *wqs, int page_idx)
-{
- return queue_alloc_page(wqs->hwif, &wqs->page_vaddr[page_idx],
- &wqs->page_paddr[page_idx],
- &wqs->shadow_page_vaddr[page_idx],
- WQS_PAGE_SIZE);
-}
-
-/**
- * wqs_free_page - free page of WQ set
- * @wqs: Work Queue Set
- * @page_idx: the page index of the page will be freed
- **/
-static void wqs_free_page(struct hinic_wqs *wqs, int page_idx)
-{
- struct hinic_hwif *hwif = wqs->hwif;
- struct pci_dev *pdev = hwif->pdev;
-
- dma_free_coherent(&pdev->dev, WQS_PAGE_SIZE,
- wqs->page_vaddr[page_idx],
- (dma_addr_t)wqs->page_paddr[page_idx]);
- vfree(wqs->shadow_page_vaddr[page_idx]);
-}
-
-/**
- * cmdq_allocate_page - allocate page for cmdq
- * @cmdq_pages: the pages of the cmdq queue struct to hold the page
- *
- * Return 0 - Success, negative - Failure
- **/
-static int cmdq_allocate_page(struct hinic_cmdq_pages *cmdq_pages)
-{
- return queue_alloc_page(cmdq_pages->hwif, &cmdq_pages->page_vaddr,
- &cmdq_pages->page_paddr,
- &cmdq_pages->shadow_page_vaddr,
- CMDQ_PAGE_SIZE);
-}
-
-/**
- * cmdq_free_page - free page from cmdq
- * @cmdq_pages: the pages of the cmdq queue struct that hold the page
- *
- * Return 0 - Success, negative - Failure
- **/
-static void cmdq_free_page(struct hinic_cmdq_pages *cmdq_pages)
-{
- struct hinic_hwif *hwif = cmdq_pages->hwif;
- struct pci_dev *pdev = hwif->pdev;
-
- dma_free_coherent(&pdev->dev, CMDQ_PAGE_SIZE,
- cmdq_pages->page_vaddr,
- (dma_addr_t)cmdq_pages->page_paddr);
- vfree(cmdq_pages->shadow_page_vaddr);
-}
-
-static int alloc_page_arrays(struct hinic_wqs *wqs)
-{
- struct hinic_hwif *hwif = wqs->hwif;
- struct pci_dev *pdev = hwif->pdev;
- size_t size;
-
- size = wqs->num_pages * sizeof(*wqs->page_paddr);
- wqs->page_paddr = devm_kzalloc(&pdev->dev, size, GFP_KERNEL);
- if (!wqs->page_paddr)
- return -ENOMEM;
-
- size = wqs->num_pages * sizeof(*wqs->page_vaddr);
- wqs->page_vaddr = devm_kzalloc(&pdev->dev, size, GFP_KERNEL);
- if (!wqs->page_vaddr)
- goto err_page_vaddr;
-
- size = wqs->num_pages * sizeof(*wqs->shadow_page_vaddr);
- wqs->shadow_page_vaddr = devm_kzalloc(&pdev->dev, size, GFP_KERNEL);
- if (!wqs->shadow_page_vaddr)
- goto err_page_shadow_vaddr;
-
- return 0;
-
-err_page_shadow_vaddr:
- devm_kfree(&pdev->dev, wqs->page_vaddr);
-
-err_page_vaddr:
- devm_kfree(&pdev->dev, wqs->page_paddr);
- return -ENOMEM;
-}
-
-static void free_page_arrays(struct hinic_wqs *wqs)
-{
- struct hinic_hwif *hwif = wqs->hwif;
- struct pci_dev *pdev = hwif->pdev;
-
- devm_kfree(&pdev->dev, wqs->shadow_page_vaddr);
- devm_kfree(&pdev->dev, wqs->page_vaddr);
- devm_kfree(&pdev->dev, wqs->page_paddr);
-}
-
-static int wqs_next_block(struct hinic_wqs *wqs, int *page_idx,
- int *block_idx)
-{
- int pos;
-
- down(&wqs->alloc_blocks_lock);
-
- wqs->num_free_blks--;
-
- if (wqs->num_free_blks < 0) {
- wqs->num_free_blks++;
- up(&wqs->alloc_blocks_lock);
- return -ENOMEM;
- }
-
- pos = wqs->alloc_blk_pos++;
- pos &= WQS_MAX_NUM_BLOCKS - 1;
-
- *page_idx = wqs->free_blocks[pos].page_idx;
- *block_idx = wqs->free_blocks[pos].block_idx;
-
- wqs->free_blocks[pos].page_idx = -1;
- wqs->free_blocks[pos].block_idx = -1;
-
- up(&wqs->alloc_blocks_lock);
- return 0;
-}
-
-static void wqs_return_block(struct hinic_wqs *wqs, int page_idx,
- int block_idx)
-{
- int pos;
-
- down(&wqs->alloc_blocks_lock);
-
- pos = wqs->return_blk_pos++;
- pos &= WQS_MAX_NUM_BLOCKS - 1;
-
- wqs->free_blocks[pos].page_idx = page_idx;
- wqs->free_blocks[pos].block_idx = block_idx;
-
- wqs->num_free_blks++;
-
- up(&wqs->alloc_blocks_lock);
-}
-
-static void init_wqs_blocks_arr(struct hinic_wqs *wqs)
-{
- int page_idx, blk_idx, pos = 0;
-
- for (page_idx = 0; page_idx < wqs->num_pages; page_idx++) {
- for (blk_idx = 0; blk_idx < WQS_BLOCKS_PER_PAGE; blk_idx++) {
- wqs->free_blocks[pos].page_idx = page_idx;
- wqs->free_blocks[pos].block_idx = blk_idx;
- pos++;
- }
- }
-
- wqs->alloc_blk_pos = 0;
- wqs->return_blk_pos = pos;
- wqs->num_free_blks = pos;
-
- sema_init(&wqs->alloc_blocks_lock, 1);
-}
-
-/**
- * hinic_wqs_alloc - allocate Work Queues set
- * @wqs: Work Queue Set
- * @max_wqs: maximum wqs to allocate
- * @hwif: HW interface for use for the allocation
- *
- * Return 0 - Success, negative - Failure
- **/
-int hinic_wqs_alloc(struct hinic_wqs *wqs, int max_wqs,
- struct hinic_hwif *hwif)
-{
- struct pci_dev *pdev = hwif->pdev;
- int err, i, page_idx;
-
- max_wqs = ALIGN(max_wqs, WQS_BLOCKS_PER_PAGE);
- if (max_wqs > WQS_MAX_NUM_BLOCKS) {
- dev_err(&pdev->dev, "Invalid max_wqs = %d\n", max_wqs);
- return -EINVAL;
- }
-
- wqs->hwif = hwif;
- wqs->num_pages = max_wqs / WQS_BLOCKS_PER_PAGE;
-
- if (alloc_page_arrays(wqs)) {
- dev_err(&pdev->dev,
- "Failed to allocate mem for page addresses\n");
- return -ENOMEM;
- }
-
- for (page_idx = 0; page_idx < wqs->num_pages; page_idx++) {
- err = wqs_allocate_page(wqs, page_idx);
- if (err) {
- dev_err(&pdev->dev, "Failed wq page allocation\n");
- goto err_wq_allocate_page;
- }
- }
-
- wqs->free_blocks = devm_kzalloc(&pdev->dev, WQS_FREE_BLOCKS_SIZE(wqs),
- GFP_KERNEL);
- if (!wqs->free_blocks) {
- err = -ENOMEM;
- goto err_alloc_blocks;
- }
-
- init_wqs_blocks_arr(wqs);
- return 0;
-
-err_alloc_blocks:
-err_wq_allocate_page:
- for (i = 0; i < page_idx; i++)
- wqs_free_page(wqs, i);
-
- free_page_arrays(wqs);
- return err;
-}
-
-/**
- * hinic_wqs_free - free Work Queues set
- * @wqs: Work Queue Set
- **/
-void hinic_wqs_free(struct hinic_wqs *wqs)
-{
- struct hinic_hwif *hwif = wqs->hwif;
- struct pci_dev *pdev = hwif->pdev;
- int page_idx;
-
- devm_kfree(&pdev->dev, wqs->free_blocks);
-
- for (page_idx = 0; page_idx < wqs->num_pages; page_idx++)
- wqs_free_page(wqs, page_idx);
-
- free_page_arrays(wqs);
-}
-
-/**
- * alloc_wqes_shadow - allocate WQE shadows for WQ
- * @wq: WQ to allocate shadows for
- *
- * Return 0 - Success, negative - Failure
- **/
-static int alloc_wqes_shadow(struct hinic_wq *wq)
-{
- struct hinic_hwif *hwif = wq->hwif;
- struct pci_dev *pdev = hwif->pdev;
- size_t size;
-
- size = wq->num_q_pages * wq->max_wqe_size;
- wq->shadow_wqe = devm_kzalloc(&pdev->dev, size, GFP_KERNEL);
- if (!wq->shadow_wqe)
- return -ENOMEM;
-
- size = wq->num_q_pages * sizeof(wq->prod_idx);
- wq->shadow_idx = devm_kzalloc(&pdev->dev, size, GFP_KERNEL);
- if (!wq->shadow_idx)
- goto err_shadow_idx;
-
- return 0;
-
-err_shadow_idx:
- devm_kfree(&pdev->dev, wq->shadow_wqe);
- return -ENOMEM;
-}
-
-/**
- * free_wqes_shadow - free WQE shadows of WQ
- * @wq: WQ to free shadows from
- **/
-static void free_wqes_shadow(struct hinic_wq *wq)
-{
- struct hinic_hwif *hwif = wq->hwif;
- struct pci_dev *pdev = hwif->pdev;
-
- devm_kfree(&pdev->dev, wq->shadow_idx);
- devm_kfree(&pdev->dev, wq->shadow_wqe);
-}
-
-/**
- * free_wq_pages - free pages of WQ
- * @hwif: HW interface for releasing dma addresses
- * @wq: WQ to free pages from
- * @num_q_pages: number pages to free
- **/
-static void free_wq_pages(struct hinic_wq *wq, struct hinic_hwif *hwif,
- int num_q_pages)
-{
- struct pci_dev *pdev = hwif->pdev;
- int i;
-
- for (i = 0; i < num_q_pages; i++) {
- void **vaddr = &wq->shadow_block_vaddr[i];
- u64 *paddr = &wq->block_vaddr[i];
- dma_addr_t dma_addr;
-
- dma_addr = (dma_addr_t)be64_to_cpu(*paddr);
- dma_free_coherent(&pdev->dev, wq->wq_page_size, *vaddr,
- dma_addr);
- }
-
- free_wqes_shadow(wq);
-}
-
-/**
- * alloc_wq_pages - alloc pages for WQ
- * @hwif: HW interface for allocating dma addresses
- * @wq: WQ to allocate pages for
- * @max_pages: maximum pages allowed
- *
- * Return 0 - Success, negative - Failure
- **/
-static int alloc_wq_pages(struct hinic_wq *wq, struct hinic_hwif *hwif,
- int max_pages)
-{
- struct pci_dev *pdev = hwif->pdev;
- int i, err, num_q_pages;
-
- num_q_pages = ALIGN(WQ_SIZE(wq), wq->wq_page_size) / wq->wq_page_size;
- if (num_q_pages > max_pages) {
- dev_err(&pdev->dev, "Number wq pages exceeds the limit\n");
- return -EINVAL;
- }
-
- if (num_q_pages & (num_q_pages - 1)) {
- dev_err(&pdev->dev, "Number wq pages must be power of 2\n");
- return -EINVAL;
- }
-
- wq->num_q_pages = num_q_pages;
-
- err = alloc_wqes_shadow(wq);
- if (err) {
- dev_err(&pdev->dev, "Failed to allocate wqe shadow\n");
- return err;
- }
-
- for (i = 0; i < num_q_pages; i++) {
- void **vaddr = &wq->shadow_block_vaddr[i];
- u64 *paddr = &wq->block_vaddr[i];
- dma_addr_t dma_addr;
-
- *vaddr = dma_zalloc_coherent(&pdev->dev, wq->wq_page_size,
- &dma_addr, GFP_KERNEL);
- if (!*vaddr) {
- dev_err(&pdev->dev, "Failed to allocate wq page\n");
- goto err_alloc_wq_pages;
- }
-
- /* HW uses Big Endian Format */
- *paddr = cpu_to_be64(dma_addr);
- }
-
- return 0;
-
-err_alloc_wq_pages:
- free_wq_pages(wq, hwif, i);
- return -ENOMEM;
-}
-
-/**
- * hinic_wq_allocate - Allocate the WQ resources from the WQS
- * @wqs: WQ set from which to allocate the WQ resources
- * @wq: WQ to allocate resources for it from the WQ set
- * @wqebb_size: Work Queue Block Byte Size
- * @wq_page_size: the page size in the Work Queue
- * @q_depth: number of wqebbs in WQ
- * @max_wqe_size: maximum WQE size that will be used in the WQ
- *
- * Return 0 - Success, negative - Failure
- **/
-int hinic_wq_allocate(struct hinic_wqs *wqs, struct hinic_wq *wq,
- u16 wqebb_size, u16 wq_page_size, u16 q_depth,
- u16 max_wqe_size)
-{
- struct hinic_hwif *hwif = wqs->hwif;
- struct pci_dev *pdev = hwif->pdev;
- u16 num_wqebbs_per_page;
- int err;
-
- if (wqebb_size == 0) {
- dev_err(&pdev->dev, "wqebb_size must be > 0\n");
- return -EINVAL;
- }
-
- if (wq_page_size == 0) {
- dev_err(&pdev->dev, "wq_page_size must be > 0\n");
- return -EINVAL;
- }
-
- if (q_depth & (q_depth - 1)) {
- dev_err(&pdev->dev, "WQ q_depth must be power of 2\n");
- return -EINVAL;
- }
-
- num_wqebbs_per_page = ALIGN(wq_page_size, wqebb_size) / wqebb_size;
-
- if (num_wqebbs_per_page & (num_wqebbs_per_page - 1)) {
- dev_err(&pdev->dev, "num wqebbs per page must be power of 2\n");
- return -EINVAL;
- }
-
- wq->hwif = hwif;
-
- err = wqs_next_block(wqs, &wq->page_idx, &wq->block_idx);
- if (err) {
- dev_err(&pdev->dev, "Failed to get free wqs next block\n");
- return err;
- }
-
- wq->wqebb_size = wqebb_size;
- wq->wq_page_size = wq_page_size;
- wq->q_depth = q_depth;
- wq->max_wqe_size = max_wqe_size;
- wq->num_wqebbs_per_page = num_wqebbs_per_page;
-
- wq->block_vaddr = WQ_BASE_VADDR(wqs, wq);
- wq->shadow_block_vaddr = WQ_BASE_ADDR(wqs, wq);
- wq->block_paddr = WQ_BASE_PADDR(wqs, wq);
-
- err = alloc_wq_pages(wq, wqs->hwif, WQ_MAX_PAGES);
- if (err) {
- dev_err(&pdev->dev, "Failed to allocate wq pages\n");
- goto err_alloc_wq_pages;
- }
-
- atomic_set(&wq->cons_idx, 0);
- atomic_set(&wq->prod_idx, 0);
- atomic_set(&wq->delta, q_depth);
- wq->mask = q_depth - 1;
-
- return 0;
-
-err_alloc_wq_pages:
- wqs_return_block(wqs, wq->page_idx, wq->block_idx);
- return err;
-}
-
-/**
- * hinic_wq_free - Free the WQ resources to the WQS
- * @wqs: WQ set to free the WQ resources to it
- * @wq: WQ to free its resources to the WQ set resources
- **/
-void hinic_wq_free(struct hinic_wqs *wqs, struct hinic_wq *wq)
-{
- free_wq_pages(wq, wqs->hwif, wq->num_q_pages);
-
- wqs_return_block(wqs, wq->page_idx, wq->block_idx);
-}
-
-/**
- * hinic_wqs_cmdq_alloc - Allocate wqs for cmdqs
- * @cmdq_pages: will hold the pages of the cmdq
- * @wq: returned wqs
- * @hwif: HW interface
- * @cmdq_blocks: number of cmdq blocks/wq to allocate
- * @wqebb_size: Work Queue Block Byte Size
- * @wq_page_size: the page size in the Work Queue
- * @q_depth: number of wqebbs in WQ
- * @max_wqe_size: maximum WQE size that will be used in the WQ
- *
- * Return 0 - Success, negative - Failure
- **/
-int hinic_wqs_cmdq_alloc(struct hinic_cmdq_pages *cmdq_pages,
- struct hinic_wq *wq, struct hinic_hwif *hwif,
- int cmdq_blocks, u16 wqebb_size, u16 wq_page_size,
- u16 q_depth, u16 max_wqe_size)
-{
- struct pci_dev *pdev = hwif->pdev;
- u16 num_wqebbs_per_page;
- int i, j, err = -ENOMEM;
-
- if (wqebb_size == 0) {
- dev_err(&pdev->dev, "wqebb_size must be > 0\n");
- return -EINVAL;
- }
-
- if (wq_page_size == 0) {
- dev_err(&pdev->dev, "wq_page_size must be > 0\n");
- return -EINVAL;
- }
-
- if (q_depth & (q_depth - 1)) {
- dev_err(&pdev->dev, "WQ q_depth must be power of 2\n");
- return -EINVAL;
- }
-
- num_wqebbs_per_page = ALIGN(wq_page_size, wqebb_size) / wqebb_size;
-
- if (num_wqebbs_per_page & (num_wqebbs_per_page - 1)) {
- dev_err(&pdev->dev, "num wqebbs per page must be power of 2\n");
- return -EINVAL;
- }
-
- cmdq_pages->hwif = hwif;
-
- err = cmdq_allocate_page(cmdq_pages);
- if (err) {
- dev_err(&pdev->dev, "Failed to allocate CMDQ page\n");
- return err;
- }
-
- for (i = 0; i < cmdq_blocks; i++) {
- wq[i].hwif = hwif;
- wq[i].page_idx = 0;
- wq[i].block_idx = i;
-
- wq[i].wqebb_size = wqebb_size;
- wq[i].wq_page_size = wq_page_size;
- wq[i].q_depth = q_depth;
- wq[i].max_wqe_size = max_wqe_size;
- wq[i].num_wqebbs_per_page = num_wqebbs_per_page;
-
- wq[i].block_vaddr = CMDQ_BASE_VADDR(cmdq_pages, &wq[i]);
- wq[i].shadow_block_vaddr = CMDQ_BASE_ADDR(cmdq_pages, &wq[i]);
- wq[i].block_paddr = CMDQ_BASE_PADDR(cmdq_pages, &wq[i]);
-
- err = alloc_wq_pages(&wq[i], cmdq_pages->hwif,
- CMDQ_WQ_MAX_PAGES);
- if (err) {
- dev_err(&pdev->dev, "Failed to alloc CMDQ blocks\n");
- goto err_cmdq_block;
- }
-
- atomic_set(&wq[i].cons_idx, 0);
- atomic_set(&wq[i].prod_idx, 0);
- atomic_set(&wq[i].delta, q_depth);
- wq[i].mask = q_depth - 1;
- }
-
- return 0;
-
-err_cmdq_block:
- for (j = 0; j < i; j++)
- free_wq_pages(&wq[j], cmdq_pages->hwif, wq[j].num_q_pages);
-
- cmdq_free_page(cmdq_pages);
- return err;
-}
-
-/**
- * hinic_wqs_cmdq_free - Free wqs from cmdqs
- * @cmdq_pages: hold the pages of the cmdq
- * @wq: wqs to free
- * @cmdq_blocks: number of wqs to free
- **/
-void hinic_wqs_cmdq_free(struct hinic_cmdq_pages *cmdq_pages,
- struct hinic_wq *wq, int cmdq_blocks)
-{
- int i;
-
- for (i = 0; i < cmdq_blocks; i++)
- free_wq_pages(&wq[i], cmdq_pages->hwif, wq[i].num_q_pages);
-
- cmdq_free_page(cmdq_pages);
-}
-
-static void copy_wqe_to_shadow(struct hinic_wq *wq, void *shadow_addr,
- int num_wqebbs, u16 idx)
-{
- void *wqebb_addr;
- int i;
-
- for (i = 0; i < num_wqebbs; i++, idx++) {
- idx = MASKED_WQE_IDX(wq, idx);
- wqebb_addr = WQ_PAGE_ADDR(wq, idx) +
- WQE_PAGE_OFF(wq, idx);
-
- memcpy(shadow_addr, wqebb_addr, wq->wqebb_size);
-
- shadow_addr += wq->wqebb_size;
- }
-}
-
-static void copy_wqe_from_shadow(struct hinic_wq *wq, void *shadow_addr,
- int num_wqebbs, u16 idx)
-{
- void *wqebb_addr;
- int i;
-
- for (i = 0; i < num_wqebbs; i++, idx++) {
- idx = MASKED_WQE_IDX(wq, idx);
- wqebb_addr = WQ_PAGE_ADDR(wq, idx) +
- WQE_PAGE_OFF(wq, idx);
-
- memcpy(wqebb_addr, shadow_addr, wq->wqebb_size);
- shadow_addr += wq->wqebb_size;
- }
-}
-
-/**
- * hinic_get_wqe - get wqe ptr in the current pi and update the pi
- * @wq: wq to get wqe from
- * @wqe_size: wqe size
- * @prod_idx: returned pi
- *
- * Return wqe pointer
- **/
-struct hinic_hw_wqe *hinic_get_wqe(struct hinic_wq *wq, unsigned int wqe_size,
- u16 *prod_idx)
-{
- int curr_pg, end_pg, num_wqebbs;
- u16 curr_prod_idx, end_prod_idx;
-
- *prod_idx = MASKED_WQE_IDX(wq, atomic_read(&wq->prod_idx));
-
- num_wqebbs = ALIGN(wqe_size, wq->wqebb_size) / wq->wqebb_size;
-
- if (atomic_sub_return(num_wqebbs, &wq->delta) <= 0) {
- atomic_add(num_wqebbs, &wq->delta);
- return ERR_PTR(-EBUSY);
- }
-
- end_prod_idx = atomic_add_return(num_wqebbs, &wq->prod_idx);
-
- end_prod_idx = MASKED_WQE_IDX(wq, end_prod_idx);
- curr_prod_idx = end_prod_idx - num_wqebbs;
- curr_prod_idx = MASKED_WQE_IDX(wq, curr_prod_idx);
-
- /* end prod index points to the next wqebb, therefore minus 1 */
- end_prod_idx = MASKED_WQE_IDX(wq, end_prod_idx - 1);
-
- curr_pg = WQE_PAGE_NUM(wq, curr_prod_idx);
- end_pg = WQE_PAGE_NUM(wq, end_prod_idx);
-
- *prod_idx = curr_prod_idx;
-
- if (curr_pg != end_pg) {
- void *shadow_addr = &wq->shadow_wqe[curr_pg * wq->max_wqe_size];
-
- copy_wqe_to_shadow(wq, shadow_addr, num_wqebbs, *prod_idx);
-
- wq->shadow_idx[curr_pg] = *prod_idx;
- return shadow_addr;
- }
-
- return WQ_PAGE_ADDR(wq, *prod_idx) + WQE_PAGE_OFF(wq, *prod_idx);
-}
-
-/**
- * hinic_put_wqe - return the wqe place to use for a new wqe
- * @wq: wq to return wqe
- * @wqe_size: wqe size
- **/
-void hinic_put_wqe(struct hinic_wq *wq, unsigned int wqe_size)
-{
- int num_wqebbs = ALIGN(wqe_size, wq->wqebb_size) / wq->wqebb_size;
-
- atomic_add(num_wqebbs, &wq->cons_idx);
-
- atomic_add(num_wqebbs, &wq->delta);
-}
-
-/**
- * hinic_read_wqe - read wqe ptr in the current ci
- * @wq: wq to get read from
- * @wqe_size: wqe size
- * @cons_idx: returned ci
- *
- * Return wqe pointer
- **/
-struct hinic_hw_wqe *hinic_read_wqe(struct hinic_wq *wq, unsigned int wqe_size,
- u16 *cons_idx)
-{
- int num_wqebbs = ALIGN(wqe_size, wq->wqebb_size) / wq->wqebb_size;
- u16 curr_cons_idx, end_cons_idx;
- int curr_pg, end_pg;
-
- if ((atomic_read(&wq->delta) + num_wqebbs) > wq->q_depth)
- return ERR_PTR(-EBUSY);
-
- curr_cons_idx = atomic_read(&wq->cons_idx);
-
- curr_cons_idx = MASKED_WQE_IDX(wq, curr_cons_idx);
- end_cons_idx = MASKED_WQE_IDX(wq, curr_cons_idx + num_wqebbs - 1);
-
- curr_pg = WQE_PAGE_NUM(wq, curr_cons_idx);
- end_pg = WQE_PAGE_NUM(wq, end_cons_idx);
-
- *cons_idx = curr_cons_idx;
-
- if (curr_pg != end_pg) {
- void *shadow_addr = &wq->shadow_wqe[curr_pg * wq->max_wqe_size];
-
- copy_wqe_to_shadow(wq, shadow_addr, num_wqebbs, *cons_idx);
- return shadow_addr;
- }
-
- return WQ_PAGE_ADDR(wq, *cons_idx) + WQE_PAGE_OFF(wq, *cons_idx);
-}
-
-/**
- * hinic_read_wqe_direct - read wqe directly from ci position
- * @wq: wq
- * @cons_idx: ci position
- *
- * Return wqe
- **/
-struct hinic_hw_wqe *hinic_read_wqe_direct(struct hinic_wq *wq, u16 cons_idx)
-{
- return WQ_PAGE_ADDR(wq, cons_idx) + WQE_PAGE_OFF(wq, cons_idx);
-}
-
-/**
- * wqe_shadow - check if a wqe is shadow
- * @wq: wq of the wqe
- * @wqe: the wqe for shadow checking
- *
- * Return true - shadow, false - Not shadow
- **/
-static inline bool wqe_shadow(struct hinic_wq *wq, struct hinic_hw_wqe *wqe)
-{
- size_t wqe_shadow_size = wq->num_q_pages * wq->max_wqe_size;
-
- return WQE_IN_RANGE(wqe, wq->shadow_wqe,
- &wq->shadow_wqe[wqe_shadow_size]);
-}
-
-/**
- * hinic_write_wqe - write the wqe to the wq
- * @wq: wq to write wqe to
- * @wqe: wqe to write
- * @wqe_size: wqe size
- **/
-void hinic_write_wqe(struct hinic_wq *wq, struct hinic_hw_wqe *wqe,
- unsigned int wqe_size)
-{
- int curr_pg, num_wqebbs;
- void *shadow_addr;
- u16 prod_idx;
-
- if (wqe_shadow(wq, wqe)) {
- curr_pg = WQE_SHADOW_PAGE(wq, wqe);
-
- prod_idx = wq->shadow_idx[curr_pg];
- num_wqebbs = ALIGN(wqe_size, wq->wqebb_size) / wq->wqebb_size;
- shadow_addr = &wq->shadow_wqe[curr_pg * wq->max_wqe_size];
-
- copy_wqe_from_shadow(wq, shadow_addr, num_wqebbs, prod_idx);
- }
-}
diff --git a/drivers/net/ethernet/huawei/hinic/hinic_hw_wq.h b/drivers/net/ethernet/huawei/hinic/hinic_hw_wq.h
deleted file mode 100644
index 9c030a0..00000000
--- a/drivers/net/ethernet/huawei/hinic/hinic_hw_wq.h
+++ /dev/null
@@ -1,117 +0,0 @@
-/*
- * Huawei HiNIC PCI Express Linux driver
- * Copyright(c) 2017 Huawei Technologies Co., Ltd
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms and conditions of the GNU General Public License,
- * version 2, as published by the Free Software Foundation.
- *
- * This program is distributed in the hope it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
- * for more details.
- *
- */
-
-#ifndef HINIC_HW_WQ_H
-#define HINIC_HW_WQ_H
-
-#include <linux/types.h>
-#include <linux/semaphore.h>
-#include <linux/atomic.h>
-
-#include "hinic_hw_if.h"
-#include "hinic_hw_wqe.h"
-
-struct hinic_free_block {
- int page_idx;
- int block_idx;
-};
-
-struct hinic_wq {
- struct hinic_hwif *hwif;
-
- int page_idx;
- int block_idx;
-
- u16 wqebb_size;
- u16 wq_page_size;
- u16 q_depth;
- u16 max_wqe_size;
- u16 num_wqebbs_per_page;
-
- /* The addresses are 64 bit in the HW */
- u64 block_paddr;
- void **shadow_block_vaddr;
- u64 *block_vaddr;
-
- int num_q_pages;
- u8 *shadow_wqe;
- u16 *shadow_idx;
-
- atomic_t cons_idx;
- atomic_t prod_idx;
- atomic_t delta;
- u16 mask;
-};
-
-struct hinic_wqs {
- struct hinic_hwif *hwif;
- int num_pages;
-
- /* The addresses are 64 bit in the HW */
- u64 *page_paddr;
- u64 **page_vaddr;
- void ***shadow_page_vaddr;
-
- struct hinic_free_block *free_blocks;
- int alloc_blk_pos;
- int return_blk_pos;
- int num_free_blks;
-
- /* Lock for getting a free block from the WQ set */
- struct semaphore alloc_blocks_lock;
-};
-
-struct hinic_cmdq_pages {
- /* The addresses are 64 bit in the HW */
- u64 page_paddr;
- u64 *page_vaddr;
- void **shadow_page_vaddr;
-
- struct hinic_hwif *hwif;
-};
-
-int hinic_wqs_cmdq_alloc(struct hinic_cmdq_pages *cmdq_pages,
- struct hinic_wq *wq, struct hinic_hwif *hwif,
- int cmdq_blocks, u16 wqebb_size, u16 wq_page_size,
- u16 q_depth, u16 max_wqe_size);
-
-void hinic_wqs_cmdq_free(struct hinic_cmdq_pages *cmdq_pages,
- struct hinic_wq *wq, int cmdq_blocks);
-
-int hinic_wqs_alloc(struct hinic_wqs *wqs, int num_wqs,
- struct hinic_hwif *hwif);
-
-void hinic_wqs_free(struct hinic_wqs *wqs);
-
-int hinic_wq_allocate(struct hinic_wqs *wqs, struct hinic_wq *wq,
- u16 wqebb_size, u16 wq_page_size, u16 q_depth,
- u16 max_wqe_size);
-
-void hinic_wq_free(struct hinic_wqs *wqs, struct hinic_wq *wq);
-
-struct hinic_hw_wqe *hinic_get_wqe(struct hinic_wq *wq, unsigned int wqe_size,
- u16 *prod_idx);
-
-void hinic_put_wqe(struct hinic_wq *wq, unsigned int wqe_size);
-
-struct hinic_hw_wqe *hinic_read_wqe(struct hinic_wq *wq, unsigned int wqe_size,
- u16 *cons_idx);
-
-struct hinic_hw_wqe *hinic_read_wqe_direct(struct hinic_wq *wq, u16 cons_idx);
-
-void hinic_write_wqe(struct hinic_wq *wq, struct hinic_hw_wqe *wqe,
- unsigned int wqe_size);
-
-#endif
diff --git a/drivers/net/ethernet/huawei/hinic/hinic_hw_wqe.h b/drivers/net/ethernet/huawei/hinic/hinic_hw_wqe.h
deleted file mode 100644
index bc73485..00000000
--- a/drivers/net/ethernet/huawei/hinic/hinic_hw_wqe.h
+++ /dev/null
@@ -1,368 +0,0 @@
-/*
- * Huawei HiNIC PCI Express Linux driver
- * Copyright(c) 2017 Huawei Technologies Co., Ltd
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms and conditions of the GNU General Public License,
- * version 2, as published by the Free Software Foundation.
- *
- * This program is distributed in the hope it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
- * for more details.
- *
- */
-
-#ifndef HINIC_HW_WQE_H
-#define HINIC_HW_WQE_H
-
-#include "hinic_common.h"
-
-#define HINIC_CMDQ_CTRL_PI_SHIFT 0
-#define HINIC_CMDQ_CTRL_CMD_SHIFT 16
-#define HINIC_CMDQ_CTRL_MOD_SHIFT 24
-#define HINIC_CMDQ_CTRL_ACK_TYPE_SHIFT 29
-#define HINIC_CMDQ_CTRL_HW_BUSY_BIT_SHIFT 31
-
-#define HINIC_CMDQ_CTRL_PI_MASK 0xFFFF
-#define HINIC_CMDQ_CTRL_CMD_MASK 0xFF
-#define HINIC_CMDQ_CTRL_MOD_MASK 0x1F
-#define HINIC_CMDQ_CTRL_ACK_TYPE_MASK 0x3
-#define HINIC_CMDQ_CTRL_HW_BUSY_BIT_MASK 0x1
-
-#define HINIC_CMDQ_CTRL_SET(val, member) \
- (((u32)(val) & HINIC_CMDQ_CTRL_##member##_MASK) \
- << HINIC_CMDQ_CTRL_##member##_SHIFT)
-
-#define HINIC_CMDQ_CTRL_GET(val, member) \
- (((val) >> HINIC_CMDQ_CTRL_##member##_SHIFT) \
- & HINIC_CMDQ_CTRL_##member##_MASK)
-
-#define HINIC_CMDQ_WQE_HEADER_BUFDESC_LEN_SHIFT 0
-#define HINIC_CMDQ_WQE_HEADER_COMPLETE_FMT_SHIFT 15
-#define HINIC_CMDQ_WQE_HEADER_DATA_FMT_SHIFT 22
-#define HINIC_CMDQ_WQE_HEADER_COMPLETE_REQ_SHIFT 23
-#define HINIC_CMDQ_WQE_HEADER_COMPLETE_SECT_LEN_SHIFT 27
-#define HINIC_CMDQ_WQE_HEADER_CTRL_LEN_SHIFT 29
-#define HINIC_CMDQ_WQE_HEADER_TOGGLED_WRAPPED_SHIFT 31
-
-#define HINIC_CMDQ_WQE_HEADER_BUFDESC_LEN_MASK 0xFF
-#define HINIC_CMDQ_WQE_HEADER_COMPLETE_FMT_MASK 0x1
-#define HINIC_CMDQ_WQE_HEADER_DATA_FMT_MASK 0x1
-#define HINIC_CMDQ_WQE_HEADER_COMPLETE_REQ_MASK 0x1
-#define HINIC_CMDQ_WQE_HEADER_COMPLETE_SECT_LEN_MASK 0x3
-#define HINIC_CMDQ_WQE_HEADER_CTRL_LEN_MASK 0x3
-#define HINIC_CMDQ_WQE_HEADER_TOGGLED_WRAPPED_MASK 0x1
-
-#define HINIC_CMDQ_WQE_HEADER_SET(val, member) \
- (((u32)(val) & HINIC_CMDQ_WQE_HEADER_##member##_MASK) \
- << HINIC_CMDQ_WQE_HEADER_##member##_SHIFT)
-
-#define HINIC_CMDQ_WQE_HEADER_GET(val, member) \
- (((val) >> HINIC_CMDQ_WQE_HEADER_##member##_SHIFT) \
- & HINIC_CMDQ_WQE_HEADER_##member##_MASK)
-
-#define HINIC_SQ_CTRL_BUFDESC_SECT_LEN_SHIFT 0
-#define HINIC_SQ_CTRL_TASKSECT_LEN_SHIFT 16
-#define HINIC_SQ_CTRL_DATA_FORMAT_SHIFT 22
-#define HINIC_SQ_CTRL_LEN_SHIFT 29
-
-#define HINIC_SQ_CTRL_BUFDESC_SECT_LEN_MASK 0xFF
-#define HINIC_SQ_CTRL_TASKSECT_LEN_MASK 0x1F
-#define HINIC_SQ_CTRL_DATA_FORMAT_MASK 0x1
-#define HINIC_SQ_CTRL_LEN_MASK 0x3
-
-#define HINIC_SQ_CTRL_QUEUE_INFO_MSS_SHIFT 13
-
-#define HINIC_SQ_CTRL_QUEUE_INFO_MSS_MASK 0x3FFF
-
-#define HINIC_SQ_CTRL_SET(val, member) \
- (((u32)(val) & HINIC_SQ_CTRL_##member##_MASK) \
- << HINIC_SQ_CTRL_##member##_SHIFT)
-
-#define HINIC_SQ_CTRL_GET(val, member) \
- (((val) >> HINIC_SQ_CTRL_##member##_SHIFT) \
- & HINIC_SQ_CTRL_##member##_MASK)
-
-#define HINIC_SQ_TASK_INFO0_L2HDR_LEN_SHIFT 0
-#define HINIC_SQ_TASK_INFO0_L4_OFFLOAD_SHIFT 8
-#define HINIC_SQ_TASK_INFO0_INNER_L3TYPE_SHIFT 10
-#define HINIC_SQ_TASK_INFO0_VLAN_OFFLOAD_SHIFT 12
-#define HINIC_SQ_TASK_INFO0_PARSE_FLAG_SHIFT 13
-/* 1 bit reserved */
-#define HINIC_SQ_TASK_INFO0_TSO_FLAG_SHIFT 15
-#define HINIC_SQ_TASK_INFO0_VLAN_TAG_SHIFT 16
-
-#define HINIC_SQ_TASK_INFO0_L2HDR_LEN_MASK 0xFF
-#define HINIC_SQ_TASK_INFO0_L4_OFFLOAD_MASK 0x3
-#define HINIC_SQ_TASK_INFO0_INNER_L3TYPE_MASK 0x3
-#define HINIC_SQ_TASK_INFO0_VLAN_OFFLOAD_MASK 0x1
-#define HINIC_SQ_TASK_INFO0_PARSE_FLAG_MASK 0x1
-/* 1 bit reserved */
-#define HINIC_SQ_TASK_INFO0_TSO_FLAG_MASK 0x1
-#define HINIC_SQ_TASK_INFO0_VLAN_TAG_MASK 0xFFFF
-
-#define HINIC_SQ_TASK_INFO0_SET(val, member) \
- (((u32)(val) & HINIC_SQ_TASK_INFO0_##member##_MASK) << \
- HINIC_SQ_TASK_INFO0_##member##_SHIFT)
-
-/* 8 bits reserved */
-#define HINIC_SQ_TASK_INFO1_MEDIA_TYPE_SHIFT 8
-#define HINIC_SQ_TASK_INFO1_INNER_L4_LEN_SHIFT 16
-#define HINIC_SQ_TASK_INFO1_INNER_L3_LEN_SHIFT 24
-
-/* 8 bits reserved */
-#define HINIC_SQ_TASK_INFO1_MEDIA_TYPE_MASK 0xFF
-#define HINIC_SQ_TASK_INFO1_INNER_L4_LEN_MASK 0xFF
-#define HINIC_SQ_TASK_INFO1_INNER_L3_LEN_MASK 0xFF
-
-#define HINIC_SQ_TASK_INFO1_SET(val, member) \
- (((u32)(val) & HINIC_SQ_TASK_INFO1_##member##_MASK) << \
- HINIC_SQ_TASK_INFO1_##member##_SHIFT)
-
-#define HINIC_SQ_TASK_INFO2_TUNNEL_L4_LEN_SHIFT 0
-#define HINIC_SQ_TASK_INFO2_OUTER_L3_LEN_SHIFT 12
-#define HINIC_SQ_TASK_INFO2_TUNNEL_L4TYPE_SHIFT 19
-/* 1 bit reserved */
-#define HINIC_SQ_TASK_INFO2_OUTER_L3TYPE_SHIFT 22
-/* 8 bits reserved */
-
-#define HINIC_SQ_TASK_INFO2_TUNNEL_L4_LEN_MASK 0xFFF
-#define HINIC_SQ_TASK_INFO2_OUTER_L3_LEN_MASK 0x7F
-#define HINIC_SQ_TASK_INFO2_TUNNEL_L4TYPE_MASK 0x3
-/* 1 bit reserved */
-#define HINIC_SQ_TASK_INFO2_OUTER_L3TYPE_MASK 0x3
-/* 8 bits reserved */
-
-#define HINIC_SQ_TASK_INFO2_SET(val, member) \
- (((u32)(val) & HINIC_SQ_TASK_INFO2_##member##_MASK) << \
- HINIC_SQ_TASK_INFO2_##member##_SHIFT)
-
-/* 31 bits reserved */
-#define HINIC_SQ_TASK_INFO4_L2TYPE_SHIFT 31
-
-/* 31 bits reserved */
-#define HINIC_SQ_TASK_INFO4_L2TYPE_MASK 0x1
-
-#define HINIC_SQ_TASK_INFO4_SET(val, member) \
- (((u32)(val) & HINIC_SQ_TASK_INFO4_##member##_MASK) << \
- HINIC_SQ_TASK_INFO4_##member##_SHIFT)
-
-#define HINIC_RQ_CQE_STATUS_RXDONE_SHIFT 31
-
-#define HINIC_RQ_CQE_STATUS_RXDONE_MASK 0x1
-
-#define HINIC_RQ_CQE_STATUS_GET(val, member) \
- (((val) >> HINIC_RQ_CQE_STATUS_##member##_SHIFT) & \
- HINIC_RQ_CQE_STATUS_##member##_MASK)
-
-#define HINIC_RQ_CQE_STATUS_CLEAR(val, member) \
- ((val) & (~(HINIC_RQ_CQE_STATUS_##member##_MASK << \
- HINIC_RQ_CQE_STATUS_##member##_SHIFT)))
-
-#define HINIC_RQ_CQE_SGE_LEN_SHIFT 16
-
-#define HINIC_RQ_CQE_SGE_LEN_MASK 0xFFFF
-
-#define HINIC_RQ_CQE_SGE_GET(val, member) \
- (((val) >> HINIC_RQ_CQE_SGE_##member##_SHIFT) & \
- HINIC_RQ_CQE_SGE_##member##_MASK)
-
-#define HINIC_RQ_CTRL_BUFDESC_SECT_LEN_SHIFT 0
-#define HINIC_RQ_CTRL_COMPLETE_FORMAT_SHIFT 15
-#define HINIC_RQ_CTRL_COMPLETE_LEN_SHIFT 27
-#define HINIC_RQ_CTRL_LEN_SHIFT 29
-
-#define HINIC_RQ_CTRL_BUFDESC_SECT_LEN_MASK 0xFF
-#define HINIC_RQ_CTRL_COMPLETE_FORMAT_MASK 0x1
-#define HINIC_RQ_CTRL_COMPLETE_LEN_MASK 0x3
-#define HINIC_RQ_CTRL_LEN_MASK 0x3
-
-#define HINIC_RQ_CTRL_SET(val, member) \
- (((u32)(val) & HINIC_RQ_CTRL_##member##_MASK) << \
- HINIC_RQ_CTRL_##member##_SHIFT)
-
-#define HINIC_SQ_WQE_SIZE(nr_sges) \
- (sizeof(struct hinic_sq_ctrl) + \
- sizeof(struct hinic_sq_task) + \
- (nr_sges) * sizeof(struct hinic_sq_bufdesc))
-
-#define HINIC_SCMD_DATA_LEN 16
-
-#define HINIC_MAX_SQ_BUFDESCS 17
-
-#define HINIC_SQ_WQE_MAX_SIZE 320
-#define HINIC_RQ_WQE_SIZE 32
-
-enum hinic_l4offload_type {
- HINIC_L4_OFF_DISABLE = 0,
- HINIC_TCP_OFFLOAD_ENABLE = 1,
- HINIC_SCTP_OFFLOAD_ENABLE = 2,
- HINIC_UDP_OFFLOAD_ENABLE = 3,
-};
-
-enum hinic_vlan_offload {
- HINIC_VLAN_OFF_DISABLE = 0,
- HINIC_VLAN_OFF_ENABLE = 1,
-};
-
-enum hinic_pkt_parsed {
- HINIC_PKT_NOT_PARSED = 0,
- HINIC_PKT_PARSED = 1,
-};
-
-enum hinic_outer_l3type {
- HINIC_OUTER_L3TYPE_UNKNOWN = 0,
- HINIC_OUTER_L3TYPE_IPV6 = 1,
- HINIC_OUTER_L3TYPE_IPV4_NO_CHKSUM = 2,
- HINIC_OUTER_L3TYPE_IPV4_CHKSUM = 3,
-};
-
-enum hinic_media_type {
- HINIC_MEDIA_UNKNOWN = 0,
-};
-
-enum hinic_l2type {
- HINIC_L2TYPE_ETH = 0,
-};
-
-enum hinc_tunnel_l4type {
- HINIC_TUNNEL_L4TYPE_UNKNOWN = 0,
-};
-
-struct hinic_cmdq_header {
- u32 header_info;
- u32 saved_data;
-};
-
-struct hinic_status {
- u32 status_info;
-};
-
-struct hinic_ctrl {
- u32 ctrl_info;
-};
-
-struct hinic_sge_resp {
- struct hinic_sge sge;
- u32 rsvd;
-};
-
-struct hinic_cmdq_completion {
- /* HW Format */
- union {
- struct hinic_sge_resp sge_resp;
- u64 direct_resp;
- };
-};
-
-struct hinic_scmd_bufdesc {
- u32 buf_len;
- u32 rsvd;
- u8 data[HINIC_SCMD_DATA_LEN];
-};
-
-struct hinic_lcmd_bufdesc {
- struct hinic_sge sge;
- u32 rsvd1;
- u64 rsvd2;
- u64 rsvd3;
-};
-
-struct hinic_cmdq_wqe_scmd {
- struct hinic_cmdq_header header;
- u64 rsvd;
- struct hinic_status status;
- struct hinic_ctrl ctrl;
- struct hinic_cmdq_completion completion;
- struct hinic_scmd_bufdesc buf_desc;
-};
-
-struct hinic_cmdq_wqe_lcmd {
- struct hinic_cmdq_header header;
- struct hinic_status status;
- struct hinic_ctrl ctrl;
- struct hinic_cmdq_completion completion;
- struct hinic_lcmd_bufdesc buf_desc;
-};
-
-struct hinic_cmdq_direct_wqe {
- struct hinic_cmdq_wqe_scmd wqe_scmd;
-};
-
-struct hinic_cmdq_wqe {
- /* HW Format */
- union {
- struct hinic_cmdq_direct_wqe direct_wqe;
- struct hinic_cmdq_wqe_lcmd wqe_lcmd;
- };
-};
-
-struct hinic_sq_ctrl {
- u32 ctrl_info;
- u32 queue_info;
-};
-
-struct hinic_sq_task {
- u32 pkt_info0;
- u32 pkt_info1;
- u32 pkt_info2;
- u32 ufo_v6_identify;
- u32 pkt_info4;
- u32 zero_pad;
-};
-
-struct hinic_sq_bufdesc {
- struct hinic_sge sge;
- u32 rsvd;
-};
-
-struct hinic_sq_wqe {
- struct hinic_sq_ctrl ctrl;
- struct hinic_sq_task task;
- struct hinic_sq_bufdesc buf_descs[HINIC_MAX_SQ_BUFDESCS];
-};
-
-struct hinic_rq_cqe {
- u32 status;
- u32 len;
-
- u32 rsvd2;
- u32 rsvd3;
- u32 rsvd4;
- u32 rsvd5;
- u32 rsvd6;
- u32 rsvd7;
-};
-
-struct hinic_rq_ctrl {
- u32 ctrl_info;
-};
-
-struct hinic_rq_cqe_sect {
- struct hinic_sge sge;
- u32 rsvd;
-};
-
-struct hinic_rq_bufdesc {
- u32 hi_addr;
- u32 lo_addr;
-};
-
-struct hinic_rq_wqe {
- struct hinic_rq_ctrl ctrl;
- u32 rsvd;
- struct hinic_rq_cqe_sect cqe_sect;
- struct hinic_rq_bufdesc buf_desc;
-};
-
-struct hinic_hw_wqe {
- /* HW Format */
- union {
- struct hinic_cmdq_wqe cmdq_wqe;
- struct hinic_sq_wqe sq_wqe;
- struct hinic_rq_wqe rq_wqe;
- };
-};
-
-#endif
diff --git a/drivers/net/ethernet/huawei/hinic/hinic_port.c b/drivers/net/ethernet/huawei/hinic/hinic_port.c
deleted file mode 100644
index 4d4e3f0..00000000
--- a/drivers/net/ethernet/huawei/hinic/hinic_port.c
+++ /dev/null
@@ -1,379 +0,0 @@
-/*
- * Huawei HiNIC PCI Express Linux driver
- * Copyright(c) 2017 Huawei Technologies Co., Ltd
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms and conditions of the GNU General Public License,
- * version 2, as published by the Free Software Foundation.
- *
- * This program is distributed in the hope it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
- * for more details.
- *
- */
-
-#include <linux/types.h>
-#include <linux/netdevice.h>
-#include <linux/etherdevice.h>
-#include <linux/if_vlan.h>
-#include <linux/pci.h>
-#include <linux/device.h>
-#include <linux/errno.h>
-
-#include "hinic_hw_if.h"
-#include "hinic_hw_dev.h"
-#include "hinic_port.h"
-#include "hinic_dev.h"
-
-#define HINIC_MIN_MTU_SIZE 256
-#define HINIC_MAX_JUMBO_FRAME_SIZE 15872
-
-enum mac_op {
- MAC_DEL,
- MAC_SET,
-};
-
-/**
- * change_mac - change(add or delete) mac address
- * @nic_dev: nic device
- * @addr: mac address
- * @vlan_id: vlan number to set with the mac
- * @op: add or delete the mac
- *
- * Return 0 - Success, negative - Failure
- **/
-static int change_mac(struct hinic_dev *nic_dev, const u8 *addr,
- u16 vlan_id, enum mac_op op)
-{
- struct net_device *netdev = nic_dev->netdev;
- struct hinic_hwdev *hwdev = nic_dev->hwdev;
- struct hinic_port_mac_cmd port_mac_cmd;
- struct hinic_hwif *hwif = hwdev->hwif;
- struct pci_dev *pdev = hwif->pdev;
- enum hinic_port_cmd cmd;
- u16 out_size;
- int err;
-
- if (vlan_id >= VLAN_N_VID) {
- netif_err(nic_dev, drv, netdev, "Invalid VLAN number\n");
- return -EINVAL;
- }
-
- if (op == MAC_SET)
- cmd = HINIC_PORT_CMD_SET_MAC;
- else
- cmd = HINIC_PORT_CMD_DEL_MAC;
-
- port_mac_cmd.func_idx = HINIC_HWIF_FUNC_IDX(hwif);
- port_mac_cmd.vlan_id = vlan_id;
- memcpy(port_mac_cmd.mac, addr, ETH_ALEN);
-
- err = hinic_port_msg_cmd(hwdev, cmd, &port_mac_cmd,
- sizeof(port_mac_cmd),
- &port_mac_cmd, &out_size);
- if (err || (out_size != sizeof(port_mac_cmd)) || port_mac_cmd.status) {
- dev_err(&pdev->dev, "Failed to change MAC, ret = %d\n",
- port_mac_cmd.status);
- return -EFAULT;
- }
-
- return 0;
-}
-
-/**
- * hinic_port_add_mac - add mac address
- * @nic_dev: nic device
- * @addr: mac address
- * @vlan_id: vlan number to set with the mac
- *
- * Return 0 - Success, negative - Failure
- **/
-int hinic_port_add_mac(struct hinic_dev *nic_dev,
- const u8 *addr, u16 vlan_id)
-{
- return change_mac(nic_dev, addr, vlan_id, MAC_SET);
-}
-
-/**
- * hinic_port_del_mac - remove mac address
- * @nic_dev: nic device
- * @addr: mac address
- * @vlan_id: vlan number that is connected to the mac
- *
- * Return 0 - Success, negative - Failure
- **/
-int hinic_port_del_mac(struct hinic_dev *nic_dev, const u8 *addr,
- u16 vlan_id)
-{
- return change_mac(nic_dev, addr, vlan_id, MAC_DEL);
-}
-
-/**
- * hinic_port_get_mac - get the mac address of the nic device
- * @nic_dev: nic device
- * @addr: returned mac address
- *
- * Return 0 - Success, negative - Failure
- **/
-int hinic_port_get_mac(struct hinic_dev *nic_dev, u8 *addr)
-{
- struct hinic_hwdev *hwdev = nic_dev->hwdev;
- struct hinic_port_mac_cmd port_mac_cmd;
- struct hinic_hwif *hwif = hwdev->hwif;
- struct pci_dev *pdev = hwif->pdev;
- u16 out_size;
- int err;
-
- port_mac_cmd.func_idx = HINIC_HWIF_FUNC_IDX(hwif);
-
- err = hinic_port_msg_cmd(hwdev, HINIC_PORT_CMD_GET_MAC,
- &port_mac_cmd, sizeof(port_mac_cmd),
- &port_mac_cmd, &out_size);
- if (err || (out_size != sizeof(port_mac_cmd)) || port_mac_cmd.status) {
- dev_err(&pdev->dev, "Failed to get mac, ret = %d\n",
- port_mac_cmd.status);
- return -EFAULT;
- }
-
- memcpy(addr, port_mac_cmd.mac, ETH_ALEN);
- return 0;
-}
-
-/**
- * hinic_port_set_mtu - set mtu
- * @nic_dev: nic device
- * @new_mtu: new mtu
- *
- * Return 0 - Success, negative - Failure
- **/
-int hinic_port_set_mtu(struct hinic_dev *nic_dev, int new_mtu)
-{
- struct net_device *netdev = nic_dev->netdev;
- struct hinic_hwdev *hwdev = nic_dev->hwdev;
- struct hinic_port_mtu_cmd port_mtu_cmd;
- struct hinic_hwif *hwif = hwdev->hwif;
- struct pci_dev *pdev = hwif->pdev;
- int err, max_frame;
- u16 out_size;
-
- if (new_mtu < HINIC_MIN_MTU_SIZE) {
- netif_err(nic_dev, drv, netdev, "mtu < MIN MTU size");
- return -EINVAL;
- }
-
- max_frame = new_mtu + ETH_HLEN + ETH_FCS_LEN;
- if (max_frame > HINIC_MAX_JUMBO_FRAME_SIZE) {
- netif_err(nic_dev, drv, netdev, "mtu > MAX MTU size");
- return -EINVAL;
- }
-
- port_mtu_cmd.func_idx = HINIC_HWIF_FUNC_IDX(hwif);
- port_mtu_cmd.mtu = new_mtu;
-
- err = hinic_port_msg_cmd(hwdev, HINIC_PORT_CMD_CHANGE_MTU,
- &port_mtu_cmd, sizeof(port_mtu_cmd),
- &port_mtu_cmd, &out_size);
- if (err || (out_size != sizeof(port_mtu_cmd)) || port_mtu_cmd.status) {
- dev_err(&pdev->dev, "Failed to set mtu, ret = %d\n",
- port_mtu_cmd.status);
- return -EFAULT;
- }
-
- return 0;
-}
-
-/**
- * hinic_port_add_vlan - add vlan to the nic device
- * @nic_dev: nic device
- * @vlan_id: the vlan number to add
- *
- * Return 0 - Success, negative - Failure
- **/
-int hinic_port_add_vlan(struct hinic_dev *nic_dev, u16 vlan_id)
-{
- struct hinic_hwdev *hwdev = nic_dev->hwdev;
- struct hinic_port_vlan_cmd port_vlan_cmd;
-
- port_vlan_cmd.func_idx = HINIC_HWIF_FUNC_IDX(hwdev->hwif);
- port_vlan_cmd.vlan_id = vlan_id;
-
- return hinic_port_msg_cmd(hwdev, HINIC_PORT_CMD_ADD_VLAN,
- &port_vlan_cmd, sizeof(port_vlan_cmd),
- NULL, NULL);
-}
-
-/**
- * hinic_port_del_vlan - delete vlan from the nic device
- * @nic_dev: nic device
- * @vlan_id: the vlan number to delete
- *
- * Return 0 - Success, negative - Failure
- **/
-int hinic_port_del_vlan(struct hinic_dev *nic_dev, u16 vlan_id)
-{
- struct hinic_hwdev *hwdev = nic_dev->hwdev;
- struct hinic_port_vlan_cmd port_vlan_cmd;
-
- port_vlan_cmd.func_idx = HINIC_HWIF_FUNC_IDX(hwdev->hwif);
- port_vlan_cmd.vlan_id = vlan_id;
-
- return hinic_port_msg_cmd(hwdev, HINIC_PORT_CMD_DEL_VLAN,
- &port_vlan_cmd, sizeof(port_vlan_cmd),
- NULL, NULL);
-}
-
-/**
- * hinic_port_set_rx_mode - set rx mode in the nic device
- * @nic_dev: nic device
- * @rx_mode: the rx mode to set
- *
- * Return 0 - Success, negative - Failure
- **/
-int hinic_port_set_rx_mode(struct hinic_dev *nic_dev, u32 rx_mode)
-{
- struct hinic_hwdev *hwdev = nic_dev->hwdev;
- struct hinic_port_rx_mode_cmd rx_mode_cmd;
-
- rx_mode_cmd.func_idx = HINIC_HWIF_FUNC_IDX(hwdev->hwif);
- rx_mode_cmd.rx_mode = rx_mode;
-
- return hinic_port_msg_cmd(hwdev, HINIC_PORT_CMD_SET_RX_MODE,
- &rx_mode_cmd, sizeof(rx_mode_cmd),
- NULL, NULL);
-}
-
-/**
- * hinic_port_link_state - get the link state
- * @nic_dev: nic device
- * @link_state: the returned link state
- *
- * Return 0 - Success, negative - Failure
- **/
-int hinic_port_link_state(struct hinic_dev *nic_dev,
- enum hinic_port_link_state *link_state)
-{
- struct hinic_hwdev *hwdev = nic_dev->hwdev;
- struct hinic_hwif *hwif = hwdev->hwif;
- struct hinic_port_link_cmd link_cmd;
- struct pci_dev *pdev = hwif->pdev;
- u16 out_size;
- int err;
-
- if (!HINIC_IS_PF(hwif) && !HINIC_IS_PPF(hwif)) {
- dev_err(&pdev->dev, "unsupported PCI Function type\n");
- return -EINVAL;
- }
-
- link_cmd.func_idx = HINIC_HWIF_FUNC_IDX(hwif);
-
- err = hinic_port_msg_cmd(hwdev, HINIC_PORT_CMD_GET_LINK_STATE,
- &link_cmd, sizeof(link_cmd),
- &link_cmd, &out_size);
- if (err || (out_size != sizeof(link_cmd)) || link_cmd.status) {
- dev_err(&pdev->dev, "Failed to get link state, ret = %d\n",
- link_cmd.status);
- return -EINVAL;
- }
-
- *link_state = link_cmd.state;
- return 0;
-}
-
-/**
- * hinic_port_set_state - set port state
- * @nic_dev: nic device
- * @state: the state to set
- *
- * Return 0 - Success, negative - Failure
- **/
-int hinic_port_set_state(struct hinic_dev *nic_dev, enum hinic_port_state state)
-{
- struct hinic_hwdev *hwdev = nic_dev->hwdev;
- struct hinic_port_state_cmd port_state;
- struct hinic_hwif *hwif = hwdev->hwif;
- struct pci_dev *pdev = hwif->pdev;
- u16 out_size;
- int err;
-
- if (!HINIC_IS_PF(hwif) && !HINIC_IS_PPF(hwif)) {
- dev_err(&pdev->dev, "unsupported PCI Function type\n");
- return -EINVAL;
- }
-
- port_state.state = state;
-
- err = hinic_port_msg_cmd(hwdev, HINIC_PORT_CMD_SET_PORT_STATE,
- &port_state, sizeof(port_state),
- &port_state, &out_size);
- if (err || (out_size != sizeof(port_state)) || port_state.status) {
- dev_err(&pdev->dev, "Failed to set port state, ret = %d\n",
- port_state.status);
- return -EFAULT;
- }
-
- return 0;
-}
-
-/**
- * hinic_port_set_func_state- set func device state
- * @nic_dev: nic device
- * @state: the state to set
- *
- * Return 0 - Success, negative - Failure
- **/
-int hinic_port_set_func_state(struct hinic_dev *nic_dev,
- enum hinic_func_port_state state)
-{
- struct hinic_port_func_state_cmd func_state;
- struct hinic_hwdev *hwdev = nic_dev->hwdev;
- struct hinic_hwif *hwif = hwdev->hwif;
- struct pci_dev *pdev = hwif->pdev;
- u16 out_size;
- int err;
-
- func_state.func_idx = HINIC_HWIF_FUNC_IDX(hwif);
- func_state.state = state;
-
- err = hinic_port_msg_cmd(hwdev, HINIC_PORT_CMD_SET_FUNC_STATE,
- &func_state, sizeof(func_state),
- &func_state, &out_size);
- if (err || (out_size != sizeof(func_state)) || func_state.status) {
- dev_err(&pdev->dev, "Failed to set port func state, ret = %d\n",
- func_state.status);
- return -EFAULT;
- }
-
- return 0;
-}
-
-/**
- * hinic_port_get_cap - get port capabilities
- * @nic_dev: nic device
- * @port_cap: returned port capabilities
- *
- * Return 0 - Success, negative - Failure
- **/
-int hinic_port_get_cap(struct hinic_dev *nic_dev,
- struct hinic_port_cap *port_cap)
-{
- struct hinic_hwdev *hwdev = nic_dev->hwdev;
- struct hinic_hwif *hwif = hwdev->hwif;
- struct pci_dev *pdev = hwif->pdev;
- u16 out_size;
- int err;
-
- port_cap->func_idx = HINIC_HWIF_FUNC_IDX(hwif);
-
- err = hinic_port_msg_cmd(hwdev, HINIC_PORT_CMD_GET_CAP,
- port_cap, sizeof(*port_cap),
- port_cap, &out_size);
- if (err || (out_size != sizeof(*port_cap)) || port_cap->status) {
- dev_err(&pdev->dev,
- "Failed to get port capabilities, ret = %d\n",
- port_cap->status);
- return -EINVAL;
- }
-
- return 0;
-}
diff --git a/drivers/net/ethernet/huawei/hinic/hinic_port.h b/drivers/net/ethernet/huawei/hinic/hinic_port.h
deleted file mode 100644
index 9404365..00000000
--- a/drivers/net/ethernet/huawei/hinic/hinic_port.h
+++ /dev/null
@@ -1,198 +0,0 @@
-/*
- * Huawei HiNIC PCI Express Linux driver
- * Copyright(c) 2017 Huawei Technologies Co., Ltd
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms and conditions of the GNU General Public License,
- * version 2, as published by the Free Software Foundation.
- *
- * This program is distributed in the hope it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
- * for more details.
- *
- */
-
-#ifndef HINIC_PORT_H
-#define HINIC_PORT_H
-
-#include <linux/types.h>
-#include <linux/etherdevice.h>
-#include <linux/bitops.h>
-
-#include "hinic_dev.h"
-
-enum hinic_rx_mode {
- HINIC_RX_MODE_UC = BIT(0),
- HINIC_RX_MODE_MC = BIT(1),
- HINIC_RX_MODE_BC = BIT(2),
- HINIC_RX_MODE_MC_ALL = BIT(3),
- HINIC_RX_MODE_PROMISC = BIT(4),
-};
-
-enum hinic_port_link_state {
- HINIC_LINK_STATE_DOWN,
- HINIC_LINK_STATE_UP,
-};
-
-enum hinic_port_state {
- HINIC_PORT_DISABLE = 0,
- HINIC_PORT_ENABLE = 3,
-};
-
-enum hinic_func_port_state {
- HINIC_FUNC_PORT_DISABLE = 0,
- HINIC_FUNC_PORT_ENABLE = 2,
-};
-
-enum hinic_autoneg_cap {
- HINIC_AUTONEG_UNSUPPORTED,
- HINIC_AUTONEG_SUPPORTED,
-};
-
-enum hinic_autoneg_state {
- HINIC_AUTONEG_DISABLED,
- HINIC_AUTONEG_ACTIVE,
-};
-
-enum hinic_duplex {
- HINIC_DUPLEX_HALF,
- HINIC_DUPLEX_FULL,
-};
-
-enum hinic_speed {
- HINIC_SPEED_10MB_LINK = 0,
- HINIC_SPEED_100MB_LINK,
- HINIC_SPEED_1000MB_LINK,
- HINIC_SPEED_10GB_LINK,
- HINIC_SPEED_25GB_LINK,
- HINIC_SPEED_40GB_LINK,
- HINIC_SPEED_100GB_LINK,
-
- HINIC_SPEED_UNKNOWN = 0xFF,
-};
-
-struct hinic_port_mac_cmd {
- u8 status;
- u8 version;
- u8 rsvd0[6];
-
- u16 func_idx;
- u16 vlan_id;
- u16 rsvd1;
- unsigned char mac[ETH_ALEN];
-};
-
-struct hinic_port_mtu_cmd {
- u8 status;
- u8 version;
- u8 rsvd0[6];
-
- u16 func_idx;
- u16 rsvd1;
- u32 mtu;
-};
-
-struct hinic_port_vlan_cmd {
- u8 status;
- u8 version;
- u8 rsvd0[6];
-
- u16 func_idx;
- u16 vlan_id;
-};
-
-struct hinic_port_rx_mode_cmd {
- u8 status;
- u8 version;
- u8 rsvd0[6];
-
- u16 func_idx;
- u16 rsvd;
- u32 rx_mode;
-};
-
-struct hinic_port_link_cmd {
- u8 status;
- u8 version;
- u8 rsvd0[6];
-
- u16 func_idx;
- u8 state;
- u8 rsvd1;
-};
-
-struct hinic_port_state_cmd {
- u8 status;
- u8 version;
- u8 rsvd0[6];
-
- u8 state;
- u8 rsvd1[3];
-};
-
-struct hinic_port_link_status {
- u8 status;
- u8 version;
- u8 rsvd0[6];
-
- u16 rsvd1;
- u8 link;
- u8 rsvd2;
-};
-
-struct hinic_port_func_state_cmd {
- u8 status;
- u8 version;
- u8 rsvd0[6];
-
- u16 func_idx;
- u16 rsvd1;
- u8 state;
- u8 rsvd2[3];
-};
-
-struct hinic_port_cap {
- u8 status;
- u8 version;
- u8 rsvd0[6];
-
- u16 func_idx;
- u16 rsvd1;
- u8 port_type;
- u8 autoneg_cap;
- u8 autoneg_state;
- u8 duplex;
- u8 speed;
- u8 rsvd2[3];
-};
-
-int hinic_port_add_mac(struct hinic_dev *nic_dev, const u8 *addr,
- u16 vlan_id);
-
-int hinic_port_del_mac(struct hinic_dev *nic_dev, const u8 *addr,
- u16 vlan_id);
-
-int hinic_port_get_mac(struct hinic_dev *nic_dev, u8 *addr);
-
-int hinic_port_set_mtu(struct hinic_dev *nic_dev, int new_mtu);
-
-int hinic_port_add_vlan(struct hinic_dev *nic_dev, u16 vlan_id);
-
-int hinic_port_del_vlan(struct hinic_dev *nic_dev, u16 vlan_id);
-
-int hinic_port_set_rx_mode(struct hinic_dev *nic_dev, u32 rx_mode);
-
-int hinic_port_link_state(struct hinic_dev *nic_dev,
- enum hinic_port_link_state *link_state);
-
-int hinic_port_set_state(struct hinic_dev *nic_dev,
- enum hinic_port_state state);
-
-int hinic_port_set_func_state(struct hinic_dev *nic_dev,
- enum hinic_func_port_state state);
-
-int hinic_port_get_cap(struct hinic_dev *nic_dev,
- struct hinic_port_cap *port_cap);
-
-#endif
diff --git a/drivers/net/ethernet/huawei/hinic/hinic_sml_table.h b/drivers/net/ethernet/huawei/hinic/hinic_sml_table.h
deleted file mode 100644
index b837dab..00000000
--- a/drivers/net/ethernet/huawei/hinic/hinic_sml_table.h
+++ /dev/null
@@ -1,2728 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0*/
-/* Huawei HiNIC PCI Express Linux driver
- * Copyright(c) 2017 Huawei Technologies Co., Ltd
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms and conditions of the GNU General Public License,
- * version 2, as published by the Free Software Foundation.
- *
- * This program is distributed in the hope it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
- * for more details.
- *
- */
-
-#ifndef __SML_TABLE_H__
-#define __SML_TABLE_H__
-
-#include "hinic_sml_table_pub.h"
-
-#ifdef __cplusplus
-#if __cplusplus
-extern "C" {
-#endif
-#endif /* __cplusplus */
-
-#define TBL_ID_CTR_DFX_S32_SM_NODE 11
-#define TBL_ID_CTR_DFX_S32_SM_INST 20
-
-#define TBL_ID_CTR_DFX_PAIR_SM_NODE 10
-#define TBL_ID_CTR_DFX_PAIR_SM_INST 24
-
-#define TBL_ID_CTR_DFX_S64_SM_NODE 11
-#define TBL_ID_CTR_DFX_S64_SM_INST 21
-
-#if (!defined(__UP_FPGA__) && (!defined(HI1822_MODE_FPGA)) && \
- (!defined(__FPGA__)))
-
-#define TBL_ID_GLOBAL_SM_NODE 10
-#define TBL_ID_GLOBAL_SM_INST 1
-
-#define TBL_ID_PORT_CFG_SM_NODE 10
-#define TBL_ID_PORT_CFG_SM_INST 2
-
-#define TBL_ID_VLAN_SM_NODE 10
-#define TBL_ID_VLAN_SM_INST 3
-
-#define TBL_ID_MULTICAST_SM_NODE 10
-#define TBL_ID_MULTICAST_SM_INST 4
-
-#define TBL_ID_MISC_RSS_HASH0_SM_NODE 10
-#define TBL_ID_MISC_RSS_HASH0_SM_INST 5
-
-#define TBL_ID_FIC_VOQ_MAP_SM_NODE 10
-#define TBL_ID_FIC_VOQ_MAP_SM_INST 6
-
-#define TBL_ID_CAR_SM_NODE 10
-#define TBL_ID_CAR_SM_INST 7
-
-#define TBL_ID_IPMAC_FILTER_SM_NODE 10
-#define TBL_ID_IPMAC_FILTER_SM_INST 8
-
-#define TBL_ID_GLOBAL_QUE_MAP_SM_NODE 10
-#define TBL_ID_GLOBAL_QUE_MAP_SM_INST 9
-
-#define TBL_ID_CTR_VSW_FUNC_MIB_SM_NODE 10
-#define TBL_ID_CTR_VSW_FUNC_MIB_SM_INST 10
-
-#define TBL_ID_UCODE_EXEC_INFO_SM_NODE 10
-#define TBL_ID_UCODE_EXEC_INFO_SM_INST 11
-
-#define TBL_ID_RQ_IQ_MAPPING_SM_NODE 10
-#define TBL_ID_RQ_IQ_MAPPING_SM_INST 12
-
-#define TBL_ID_MAC_SM_NODE 10
-#define TBL_ID_MAC_SM_INST 21
-
-#define TBL_ID_MAC_BHEAP_SM_NODE 10
-#define TBL_ID_MAC_BHEAP_SM_INST 22
-
-#define TBL_ID_MAC_MISC_SM_NODE 10
-#define TBL_ID_MAC_MISC_SM_INST 23
-
-#define TBL_ID_FUNC_CFG_SM_NODE 11
-#define TBL_ID_FUNC_CFG_SM_INST 1
-
-#define TBL_ID_TRUNK_FWD_SM_NODE 11
-#define TBL_ID_TRUNK_FWD_SM_INST 2
-
-#define TBL_ID_VLAN_FILTER_SM_NODE 11
-#define TBL_ID_VLAN_FILTER_SM_INST 3
-
-#define TBL_ID_ELB_SM_NODE 11
-#define TBL_ID_ELB_SM_INST 4
-
-#define TBL_ID_MISC_RSS_HASH1_SM_NODE 11
-#define TBL_ID_MISC_RSS_HASH1_SM_INST 5
-
-#define TBL_ID_RSS_CONTEXT_SM_NODE 11
-#define TBL_ID_RSS_CONTEXT_SM_INST 6
-
-#define TBL_ID_ETHERTYPE_FILTER_SM_NODE 11
-#define TBL_ID_ETHERTYPE_FILTER_SM_INST 7
-
-#define TBL_ID_VTEP_IP_SM_NODE 11
-#define TBL_ID_VTEP_IP_SM_INST 8
-
-#define TBL_ID_NAT_SM_NODE 11
-#define TBL_ID_NAT_SM_INST 9
-
-#define TBL_ID_BHEAP_LRO_AGING_SM_NODE 11
-#define TBL_ID_BHEAP_LRO_AGING_SM_INST 10
-
-#define TBL_ID_MISC_LRO_AGING_SM_NODE 11
-#define TBL_ID_MISC_LRO_AGING_SM_INST 11
-
-#define TBL_ID_BHEAP_CQE_AGING_SM_NODE 11
-#define TBL_ID_BHEAP_CQE_AGING_SM_INST 12
-
-#define TBL_ID_MISC_CQE_AGING_SM_NODE 11
-#define TBL_ID_MISC_CQE_AGING_SM_INST 13
-
-#define TBL_ID_DFX_LOG_POINTER_SM_NODE 11
-#define TBL_ID_DFX_LOG_POINTER_SM_INST 14
-
-#define TBL_ID_CTR_VSW_FUNC_S32_DROP_ERR_SM_NODE 11
-#define TBL_ID_CTR_VSW_FUNC_S32_DROP_ERR_SM_INST 15
-
-#define TBL_ID_CTR_VSW_FUNC_S32_DFX_SM_NODE 11
-#define TBL_ID_CTR_VSW_FUNC_S32_DFX_SM_INST 16
-
-#define TBL_ID_CTR_COMM_FUNC_S32_SM_NODE 11
-#define TBL_ID_CTR_COMM_FUNC_S32_SM_INST 17
-
-#define TBL_ID_CTR_SRIOV_FUNC_PAIR_SM_NODE 11
-#define TBL_ID_CTR_SRIOV_FUNC_PAIR_SM_INST 41
-
-#define TBL_ID_CTR_SRIOV_FUNC_S32_SM_NODE 11
-#define TBL_ID_CTR_SRIOV_FUNC_S32_SM_INST 42
-
-#define TBL_ID_CTR_OVS_FUNC_S64_SM_NODE 11
-#define TBL_ID_CTR_OVS_FUNC_S64_SM_INST 43
-
-#define TBL_ID_CTR_XOE_FUNC_PAIR_SM_NODE 11
-#define TBL_ID_CTR_XOE_FUNC_PAIR_SM_INST 44
-
-#define TBL_ID_CTR_XOE_FUNC_S32_SM_NODE 11
-#define TBL_ID_CTR_XOE_FUNC_S32_SM_INST 45
-
-#define TBL_ID_CTR_SYS_GLB_S32_SM_NODE 11
-#define TBL_ID_CTR_SYS_GLB_S32_SM_INST 46
-
-#define TBL_ID_CTR_VSW_GLB_S32_SM_NODE 11
-#define TBL_ID_CTR_VSW_GLB_S32_SM_INST 47
-
-#define TBL_ID_CTR_ROCE_GLB_S32_SM_NODE 11
-#define TBL_ID_CTR_ROCE_GLB_S32_SM_INST 48
-
-#define TBL_ID_CTR_COMM_GLB_S32_SM_NODE 11
-#define TBL_ID_CTR_COMM_GLB_S32_SM_INST 49
-
-#define TBL_ID_CTR_XOE_GLB_S32_SM_NODE 11
-#define TBL_ID_CTR_XOE_GLB_S32_SM_INST 50
-
-#define TBL_ID_CTR_OVS_GLB_S64_SM_NODE 11
-#define TBL_ID_CTR_OVS_GLB_S64_SM_INST 51
-
-#define TBL_ID_RWLOCK_ROCE_SM_NODE 11
-#define TBL_ID_RWLOCK_ROCE_SM_INST 30
-
-#define TBL_ID_CQE_ADDR_SM_NODE 11
-#define TBL_ID_CQE_ADDR_SM_INST 31
-
-#else
-
-#define TBL_ID_GLOBAL_SM_NODE 10
-#define TBL_ID_GLOBAL_SM_INST 1
-
-#define TBL_ID_PORT_CFG_SM_NODE 10
-#define TBL_ID_PORT_CFG_SM_INST 2
-
-#define TBL_ID_VLAN_SM_NODE 10
-#define TBL_ID_VLAN_SM_INST 3
-
-#define TBL_ID_MULTICAST_SM_NODE 10
-#define TBL_ID_MULTICAST_SM_INST 4
-
-#define TBL_ID_MISC_RSS_HASH0_SM_NODE 10
-#define TBL_ID_MISC_RSS_HASH0_SM_INST 5
-
-#define TBL_ID_FIC_VOQ_MAP_SM_NODE 10
-#define TBL_ID_FIC_VOQ_MAP_SM_INST 6
-
-#define TBL_ID_CAR_SM_NODE 10
-#define TBL_ID_CAR_SM_INST 7
-
-#define TBL_ID_IPMAC_FILTER_SM_NODE 10
-#define TBL_ID_IPMAC_FILTER_SM_INST 8
-
-#define TBL_ID_GLOBAL_QUE_MAP_SM_NODE 10
-#define TBL_ID_GLOBAL_QUE_MAP_SM_INST 9
-
-#define TBL_ID_CTR_VSW_FUNC_MIB_SM_NODE 10
-#define TBL_ID_CTR_VSW_FUNC_MIB_SM_INST 10
-
-#define TBL_ID_UCODE_EXEC_INFO_SM_NODE 10
-#define TBL_ID_UCODE_EXEC_INFO_SM_INST 11
-
-#define TBL_ID_RQ_IQ_MAPPING_SM_NODE 10
-#define TBL_ID_RQ_IQ_MAPPING_SM_INST 12
-
-#define TBL_ID_MAC_SM_NODE 10
-#define TBL_ID_MAC_SM_INST 13
-
-#define TBL_ID_MAC_BHEAP_SM_NODE 10
-#define TBL_ID_MAC_BHEAP_SM_INST 14
-
-#define TBL_ID_MAC_MISC_SM_NODE 10
-#define TBL_ID_MAC_MISC_SM_INST 15
-
-#define TBL_ID_FUNC_CFG_SM_NODE 10
-#define TBL_ID_FUNC_CFG_SM_INST 16
-
-#define TBL_ID_TRUNK_FWD_SM_NODE 10
-#define TBL_ID_TRUNK_FWD_SM_INST 17
-
-#define TBL_ID_VLAN_FILTER_SM_NODE 10
-#define TBL_ID_VLAN_FILTER_SM_INST 18
-
-#define TBL_ID_ELB_SM_NODE 10
-#define TBL_ID_ELB_SM_INST 19
-
-#define TBL_ID_MISC_RSS_HASH1_SM_NODE 10
-#define TBL_ID_MISC_RSS_HASH1_SM_INST 20
-
-#define TBL_ID_RSS_CONTEXT_SM_NODE 10
-#define TBL_ID_RSS_CONTEXT_SM_INST 21
-
-#define TBL_ID_ETHERTYPE_FILTER_SM_NODE 10
-#define TBL_ID_ETHERTYPE_FILTER_SM_INST 22
-
-#define TBL_ID_VTEP_IP_SM_NODE 10
-#define TBL_ID_VTEP_IP_SM_INST 23
-
-#define TBL_ID_NAT_SM_NODE 10
-#define TBL_ID_NAT_SM_INST 24
-
-#define TBL_ID_BHEAP_LRO_AGING_SM_NODE 10
-#define TBL_ID_BHEAP_LRO_AGING_SM_INST 25
-
-#define TBL_ID_MISC_LRO_AGING_SM_NODE 10
-#define TBL_ID_MISC_LRO_AGING_SM_INST 26
-
-#define TBL_ID_BHEAP_CQE_AGING_SM_NODE 10
-#define TBL_ID_BHEAP_CQE_AGING_SM_INST 27
-
-#define TBL_ID_MISC_CQE_AGING_SM_NODE 10
-#define TBL_ID_MISC_CQE_AGING_SM_INST 28
-
-#define TBL_ID_DFX_LOG_POINTER_SM_NODE 10
-#define TBL_ID_DFX_LOG_POINTER_SM_INST 29
-
-#define TBL_ID_CTR_VSW_FUNC_S32_DROP_ERR_SM_NODE 10
-#define TBL_ID_CTR_VSW_FUNC_S32_DROP_ERR_SM_INST 40
-
-#define TBL_ID_CTR_VSW_FUNC_S32_DFX_SM_NODE 10
-#define TBL_ID_CTR_VSW_FUNC_S32_DFX_SM_INST 41
-
-#define TBL_ID_CTR_COMM_FUNC_S32_SM_NODE 10
-#define TBL_ID_CTR_COMM_FUNC_S32_SM_INST 42
-
-#define TBL_ID_CTR_SRIOV_FUNC_PAIR_SM_NODE 10
-#define TBL_ID_CTR_SRIOV_FUNC_PAIR_SM_INST 43
-
-#define TBL_ID_CTR_SRIOV_FUNC_S32_SM_NODE 10
-#define TBL_ID_CTR_SRIOV_FUNC_S32_SM_INST 44
-
-#define TBL_ID_CTR_OVS_FUNC_S64_SM_NODE 10
-#define TBL_ID_CTR_OVS_FUNC_S64_SM_INST 45
-
-#define TBL_ID_CTR_XOE_FUNC_PAIR_SM_NODE 10
-#define TBL_ID_CTR_XOE_FUNC_PAIR_SM_INST 46
-
-#define TBL_ID_CTR_XOE_FUNC_S32_SM_NODE 10
-#define TBL_ID_CTR_XOE_FUNC_S32_SM_INST 47
-
-#define TBL_ID_CTR_SYS_GLB_S32_SM_NODE 10
-#define TBL_ID_CTR_SYS_GLB_S32_SM_INST 48
-
-#define TBL_ID_CTR_VSW_GLB_S32_SM_NODE 10
-#define TBL_ID_CTR_VSW_GLB_S32_SM_INST 49
-
-#define TBL_ID_CTR_ROCE_GLB_S32_SM_NODE 10
-#define TBL_ID_CTR_ROCE_GLB_S32_SM_INST 50
-
-#define TBL_ID_CTR_COMM_GLB_S32_SM_NODE 10
-#define TBL_ID_CTR_COMM_GLB_S32_SM_INST 51
-
-#define TBL_ID_CTR_XOE_GLB_S32_SM_NODE 10
-#define TBL_ID_CTR_XOE_GLB_S32_SM_INST 52
-
-#define TBL_ID_CTR_OVS_GLB_S64_SM_NODE 10
-#define TBL_ID_CTR_OVS_GLB_S64_SM_INST 53
-
-#define TBL_ID_RWLOCK_ROCE_SM_NODE 10
-#define TBL_ID_RWLOCK_ROCE_SM_INST 30
-
-#define TBL_ID_CQE_ADDR_SM_NODE 10
-#define TBL_ID_CQE_ADDR_SM_INST 31
-
-#endif
-
-#define TBL_ID_MISC_RSS_HASH_SM_NODE TBL_ID_MISC_RSS_HASH0_SM_NODE
-#define TBL_ID_MISC_RSS_HASH_SM_INST TBL_ID_MISC_RSS_HASH0_SM_INST
-
-/*rx cqe checksum err*/
-#define NIC_RX_CSUM_IP_CSUM_ERR BIT(0)
-#define NIC_RX_CSUM_TCP_CSUM_ERR BIT(1)
-#define NIC_RX_CSUM_UDP_CSUM_ERR BIT(2)
-#define NIC_RX_CSUM_IGMP_CSUM_ERR BIT(3)
-#define NIC_RX_CSUM_ICMPV4_CSUM_ERR BIT(4)
-#define NIC_RX_CSUM_ICMPV6_CSUM_ERR BIT(5)
-#define NIC_RX_CSUM_SCTP_CRC_ERR BIT(6)
-#define NIC_RX_CSUM_HW_BYPASS_ERR BIT(7)
-
-typedef struct tag_log_ctrl {
-#if (__BYTE_ORDER__ == __BIG_ENDIAN__)
- u32 mod_name:4;
- u32 log_level:4;
- u32 rsvd:8;
- u32 line_num:16;
-#else
- u32 line_num:16;
- u32 rsvd:8;
- u32 log_level:4;
- u32 mod_name:4;
-#endif
-} log_ctrl;
-
-/**
- * 1. bank GPA address is HOST-based, every host has 4 bank GPA,
- * total size 4*32B
- * 2. Allocated space for storing
- * Two global entry are allocated for storing bank GPA,
- * which are index5 and index6. (Note index start value is 0)
- * The index5 top 32B store the bank GPA of host 0;
- * Remain 32B store the bank GPA of host 1.
- * The index6 top 32B store the bank GPA of host 2,
- * the remain 32B store the bank GPA of host 3.
- * Bank GPA corresponding to the each host is based on the following format)
- */
-typedef struct tag_sml_global_bank_gpa {
- u32 bank0_gpa_h32;
- u32 bank0_gpa_l32;
-
- u32 bank1_gpa_h32;
- u32 bank1_gpa_l32;
-
- u32 bank2_gpa_h32;
- u32 bank2_gpa_l32;
-
- u32 bank3_gpa_h32;
- u32 bank3_gpa_l32;
-} global_bank_gpa_s;
-
-/**
- * Struct name: sml_global_table_s
- * @brief: global_table structure
- * Description: global configuration table
- */
-typedef struct tag_sml_global_table {
- union {
- struct {
-#if (__BYTE_ORDER__ == __BIG_ENDIAN__)
- u32 port_mode:1; /*portmode:0-eth;1-fic */
- /* dualplaneenable:0-disable;1-enable */
- u32 dual_plane_en:1;
- /* fourrouteenable:0-disable;1-enable */
- u32 four_route_en:1;
- /* ficworkmode:0-fabric;1-fullmesh.*/
- u32 fic_work_mode:1;
- /* unicast/multicastmode:0-drop;
- * 1-broadcastinvlandomain
- */
- u32 un_mc_mode:1;
- /* maclearnenable:1-enable */
- u32 mac_learn_en:1;
- u32 qcn_en:1;
- u32 esl_run_flag:1;
- /* 1-special protocal pkt to up; 0-to x86 */
- u32 special_pro_to_up_flag:1;
- u32 vf_mask:4;
- u32 dif_ser_type:2;
- u32 rsvd0:1;
- u32 board_num:16; /*boardnumber */
-#else
- u32 board_num:16; /*boardnumber */
- u32 rsvd0:1;
- u32 dif_ser_type:2;
- u32 vf_mask:4;
- /*1-special protocal pkt to up; 0-to x86 */
- u32 special_pro_to_up_flag:1;
- u32 esl_run_flag:1;
- u32 qcn_en:1;
- u32 mac_learn_en:1; /*maclearnenable:1-enable */
- /*unicast/multicastmode:0-drop;1-broadcastinvlandomain*/
- u32 un_mc_mode:1;
- /* ficworkmode:0-fabric;1-fullmesh.*/
- u32 fic_work_mode:1;
- /*fourrouteenable:0-disable;1-enable */
- u32 four_route_en:1;
- /*dualplaneenable:0-disable;1-enable */
- u32 dual_plane_en:1;
- u32 port_mode:1; /*portmode:0-eth;1-fic */
-#endif
- } bs;
- u32 value;
- } dw0;
-
- union {
- struct {
-#if (__BYTE_ORDER__ == __BIG_ENDIAN__)
- u32 bc_offset:16; /*broadcastoffset */
- u32 mc_offset:16; /*multicastoffset */
-#else
- u32 mc_offset:16; /*multicastoffset */
- u32 bc_offset:16; /*broadcastoffset */
-#endif
- } bs;
- u32 value;
- } dw1;
-
- union {
- struct {
-#if (__BYTE_ORDER__ == __BIG_ENDIAN__)
- u32 net_src_type:8; /* eth-FWD_PORT, fic-FWD_FIC */
- u32 xrc_pl_dec:1;
- u32 sq_cqn:20;
- u32 qpc_stg:1;
- u32 qpc_state_err:1;
- u32 qpc_wb_flag:1;
-#else
- u32 qpc_wb_flag:1;
- u32 qpc_state_err:1;
- u32 qpc_stg:1;
- u32 sq_cqn:20;
- u32 xrc_pl_dec:1;
- u32 net_src_type:8; /* eth-FWD_PORT, fic-FWD_FIC */
-#endif
- } bs;
-
- u32 value;
- } dw2;
-
- union {
- struct {
-#if (__BYTE_ORDER__ == __BIG_ENDIAN__)
- u32 drop_cause_id:16;
- u32 pkt_len:16;
-#else
- u32 pkt_len:16;
- u32 drop_cause_id:16;
-#endif
- } bs;
-
- u32 value;
- } dw3;
-
- u8 fcoe_vf_table[12];
-
- union {
- struct {
- /* [31:30]Pipeline number mode. */
- u32 cfg_mode_pn:2;
- /* [29:28]initial default fq mode for traffic
- * from rx side
- */
- u32 cfg_mode_init_def_fq:2;
- /* [27:16]base fqid for initial default fqs
- * (for packest from rx side only).
- */
- u32 cfg_base_init_def_fq:12;
- /* [15:15]push doorbell as new packet to tile
- * via command path enable.
- */
- u32 cfg_psh_msg_en:1;
- /* [14:14]1,enable asc for scanning
- * active fq.0,disable.
- */
- u32 enable_asc:1;
- /* [13:13]1,enable pro for commands process.0,disable.*/
- u32 enable_pro:1;
- /* [12:12]1,ngsf mode.0,ethernet mode. */
- u32 cfg_ngsf_mod:1;
- /* [11:11]Stateful process enable. */
- u32 enable_stf:1;
- /* [10:9]initial default fq mode for
- * traffic from tx side.
- */
- u32 cfg_mode_init_def_fq_tx:2;
- /* [8:0]maximum allocable oeid configuration. */
- u32 cfg_max_oeid:9;
- } bs;
- u32 value;
- } fq_mode;
-
- u32 rsvd2[8];
-} sml_global_table_s;
-
-/**
- * Struct name: sml_fic_config_table_s
- * @brief: global_table structure
- * Description: global configuration table
- */
-typedef struct tag_sml_fic_config_table {
- union {
- struct {
-#if (__BYTE_ORDER__ == __BIG_ENDIAN__)
- /*dualplaneenable:0-disable;1-enable */
- u32 dual_plane_en:1;
- /*fourrouteenable:0-disable;1-enable */
- u32 four_route_en:1;
- /* ficworkmode:0-fabric;1-fullmesh.*/
- u32 fic_work_mode:1;
- u32 mac_learn_en:1; /*maclearnenable:1-enable */
- u32 rsvd:12;
- u32 board_num:16; /*boardnumber */
-#else
- u32 board_num:16; /*boardnumber */
- u32 rsvd:12;
- u32 mac_learn_en:1;
- /* ficworkmode:0-fabric;1-fullmesh.*/
- u32 fic_work_mode:1;
- /* fourrouteenable:0-disable;1-enable */
- u32 four_route_en:1;
- /* dualplaneenable:0-disable;1-enable */
- u32 dual_plane_en:1;
-#endif
- } bs;
- u32 value;
- } dw0;
-
- union {
- struct {
-#if (__BYTE_ORDER__ == __BIG_ENDIAN__)
- u32 bc_offset:16; /*broadcastoffset */
- u32 mc_offset:16; /*multicastoffset */
-#else
- u32 mc_offset:16; /*multicastoffset */
- u32 bc_offset:16; /*broadcastoffset */
-#endif
- } bs;
- u32 value;
- } dw1;
-
- u32 rsvd2[14];
-} sml_fic_config_table_s;
-
-/**
- * Struct name: sml_ucode_version_info_table_s
- * @brief: microcode version information structure
- * Description: global configuration table entry data structure of index 1
- */
-typedef struct tag_sml_ucode_version_info_table {
- u32 ucode_version[4];
- u32 ucode_compile_time[5];
- u32 rsvd[7];
-} sml_ucode_version_info_table_s;
-
-/**
- * Struct name: sml_funcfg_tbl_s
- * @brief: Function Configuration Table
- * Description: Function Configuration attribute table
- */
-typedef struct tag_sml_funcfg_tbl {
- union {
- struct {
-#if (__BYTE_ORDER__ == __BIG_ENDIAN__)
- /* function valid: 0-invalid; 1-valid */
- u32 valid:1;
- /* mac learn enable: 0-disable; 1-enable */
- u32 learn_en:1;
- /* lli enable: 0-disable; 1-enable */
- u32 lli_en:1;
- /* rss enable: 0-disable; 1-enable */
- u32 rss_en:1;
- /* rx vlan offload enable: 0-disable; 1-enable */
- u32 rxvlan_offload_en:1;
- /* tso local coalesce enable: 0-disable; 1-enable */
- u32 tso_local_coalesce:1;
- u32 rsvd1:1;
- u32 rsvd2:1;
- /* qos rx car enable: 0-disable; 1-enable */
- u32 qos_rx_car_en:1;
- /* mac filter enable: 0-disable; 1-enable */
- u32 mac_filter_en:1;
- /* ipmac filter enable: 0-disable; 1-enable */
- u32 ipmac_filter_en:1;
- /* ethtype filter enable: 0-disable; 1-enable */
- u32 ethtype_filter_en:1;
- /* mc bc limit enable: 0-disable; 1-enable */
- u32 mc_bc_limit_en:1;
- /* acl tx enable: 0-disable; 1-enable */
- u32 acl_tx_en:1;
- /* acl rx enable: 0-disable; 1-enable */
- u32 acl_rx_en:1;
- /* ovs function enable: 0-disable; 1-enable */
- u32 ovs_func_en:1;
- /* ucode capture enable: 0-disable; 1-enable */
- u32 ucapture_en:1;
- /* fic car enable: 0-disable; 1-enable */
- u32 fic_car_en:1;
- u32 tso_en:1;
- u32 nic_rx_mode:5; /* nic_rx_mode:
- * 0b00001: unicast mode
- * 0b00010: multicast mode
- * 0b00100: broadcast mode
- * 0b01000: all multicast mode
- * 0b10000: promisc mod
- */
- u32 rsvd4:3;
- u32 def_pri:3; /* default priority */
- /* host id: [0~3]. support up to 4 Host. */
- u32 host_id:2;
-#else
- u32 host_id:2;
- u32 def_pri:3;
- u32 rsvd4:3;
- u32 nic_rx_mode:5;
- u32 tso_en:1;
- u32 fic_car_en:1;
- /* ucode capture enable: 0-disable; 1-enable */
- u32 ucapture_en:1;
- u32 ovs_func_en:1;
- u32 acl_rx_en:1;
- u32 acl_tx_en:1;
- u32 mc_bc_limit_en:1;
- u32 ethtype_filter_en:1;
- u32 ipmac_filter_en:1;
- u32 mac_filter_en:1;
- u32 qos_rx_car_en:1;
- u32 rsvd2:1;
- u32 rsvd1:1;
- u32 tso_local_coalesce:1;
- u32 rxvlan_offload_en:1;
- u32 rss_en:1;
- u32 lli_en:1;
- u32 learn_en:1;
- u32 valid:1;
-#endif
- } bs;
-
- u32 value;
- } dw0;
-
- union {
- struct {
-#if (__BYTE_ORDER__ == __BIG_ENDIAN__)
- u32 mtu:16; /* mtu value: [64-15500] */
- u32 rsvd1:1;
- /* vlan mode: 0-all; 1-access; 2-trunk;
- * 3-hybrid(unsupport); 4-qinq port;
- */
- u32 vlan_mode:3;
- u32 vlan_id:12; /* vlan id: [0~4095] */
-#else
- u32 vlan_id:12;
- u32 vlan_mode:3;
- u32 rsvd1:1;
- u32 mtu:16;
-#endif
- } bs;
-
- u32 value;
- } dw1;
-
- union {
- struct {
-#if (__BYTE_ORDER__ == __BIG_ENDIAN__)
- u32 lli_mode:1; /* lli mode */
- /* er forward trunk type: 0-ethernet type, 1-fic type */
- u32 er_fwd_trunk_type:1;
- /* er forward trunk mode:
- * 0-standby; 1-smac; 2-dmac; 3-smacdmac; 4-sip; 5-dip;
- * 6-sipdip; 7-5tuples; 8-lacp
- */
- u32 er_fwd_trunk_mode:4;
- /* edge relay mode: 0-VEB; 1-VEPA(unsupport);
- * 2-Multi-Channel(unsupport)
- */
- u32 er_mode:2;
- /* edge relay id: [0~15]. support up to 16 er. */
- u32 er_id:4;
- /* er forward type: 2-port; 3-fic;
- * 4-trunk; other-unsupport
- */
- u32 er_fwd_type:4;
- /* er forward id:
- * fwd_type=2: forward ethernet port id
- * fwd_type=3: forward fic id(tb+tp)
- * fwd_type=4: forward trunk id
- */
- u32 er_fwd_id:16;
-#else
- u32 er_fwd_id:16;
- u32 er_fwd_type:4;
- u32 er_id:4;
- u32 er_mode:2;
- u32 er_fwd_trunk_mode:4;
- u32 er_fwd_trunk_type:1;
- u32 lli_mode:1;
-#endif
- } bs;
-
- u32 value;
- } dw2;
-
- union {
- struct {
-#if (__BYTE_ORDER__ == __BIG_ENDIAN__)
- u32 pfc_en:1;
- u32 rsvd1:7;
- u32 ovs_invld_tcp_action:1;
- u32 ovs_ip_frag_action:1;
- u32 rsvd2:2;
- u32 roce_en:1;
- u32 iwarp_en:1;
- u32 fcoe_en:1;
- u32 toe_en:1;
- u32 rsvd3:8;
- u32 ethtype_group_id:8;
-#else
- u32 ethtype_group_id:8;
- u32 rsvd3:8;
- u32 toe_en:1;
- u32 fcoe_en:1;
- u32 iwarp_en:1;
- u32 roce_en:1;
- u32 rsvd2:2;
- u32 ovs_ip_frag_action:1;
- u32 ovs_invld_tcp_action:1;
- u32 rsvd1:7;
- u32 pfc_en:1;
-#endif
- } bs;
-
- u32 value;
- } dw3;
-
- union {
- struct {
-#if (__BYTE_ORDER__ == __BIG_ENDIAN__)
- u32 rsvd1:8;
- u32 vni:24;
-#else
- u32 vni:24;
- u32 rsvd1:8;
-#endif
- } bs;
-
- u32 value;
- } dw4;
-
- union {
- struct {
-#if (__BYTE_ORDER__ == __BIG_ENDIAN__)
- u32 rsvd1;
-#else
- u32 rsvd1;
-#endif
- } bs;
-
- u32 value;
- } dw5;
-
- union {
- struct {
-#if (__BYTE_ORDER__ == __BIG_ENDIAN__)
- u32 rsvd1:8;
- u32 rq_thd:13;
- u32 host_car_id:11; /* host car id */
-#else
- u32 host_car_id:11;
- u32 rq_thd:13;
- u32 rsvd1:8;
-#endif
- } bs;
-
- u32 value;
- } dw6;
-
- union {
- struct {
-#if (__BYTE_ORDER__ == __BIG_ENDIAN__)
- u32 rsvd1:5;
- u32 fic_uc_car_id:11; /* fic unicast car id */
- u32 rsvd2:5;
- u32 fic_mc_car_id:11; /* fic multicast car id */
-#else
- u32 fic_mc_car_id:11;
- u32 rsvd2:5;
- u32 fic_uc_car_id:11;
- u32 rsvd1:5;
-#endif
- } fic_bs;
-
- u32 value;
- } dw7;
-
- union {
- struct {
-#if (__BYTE_ORDER__ == __BIG_ENDIAN__)
- /* safe group identifier valid: 0-invalid; 1-valid */
- u32 sg_id_valid:1;
- u32 sg_id:10; /* safe group identifier */
- u32 rsvd9:1;
- /* rq priority enable: 0-disable; 1-enable */
- u32 rq_pri_en:1;
- /* rq priority num: 0-1pri; 1-2pri; 2-4pri; 3-8pri */
- u32 rq_pri_num:3;
- /* one wqe buffer size, default is 2K bytes */
- u32 rx_wqe_buffer_size:16;
-#else
- u32 rx_wqe_buffer_size:16;
- u32 rq_pri_num:3;
- u32 rq_pri_en:1;
- u32 rsvd9:1;
- u32 sg_id:10;
- u32 sg_id_valid:1;
-#endif
- } bs;
-
- u32 value;
- } dw8;
-
- union {
- struct {
-#if (__BYTE_ORDER__ == __BIG_ENDIAN__)
- /* IPv4 LRO enable: 0-disable; 1-enable; */
- u32 lro_ipv4_en:1;
- /* IPv6 LRO enable: 0-disable; 1-enable; */
- u32 lro_ipv6_en:1;
- /* LRO pkt max wqe buffer number */
- u32 lro_max_wqe_num:6;
- /* Each group occupies 3bits,
- * 8 group share allocation 24bits,
- * group 0 corresponds to the low 3bits
- */
- u32 vlan_pri_map_group:24;
-#else
- u32 vlan_pri_map_group:24;
- u32 lro_max_wqe_num:6;
- u32 lro_ipv6_en:1;
- u32 lro_ipv4_en:1;
-#endif
- } bs;
-
- u32 value;
- } dw9;
-
- union {
- struct {
-#if (__BYTE_ORDER__ == __BIG_ENDIAN__)
- u32 rss_group_id:4;
- u32 lli_frame_size:12;
- u32 smac_h16:16;
-#else
- u32 smac_h16:16;
- u32 lli_frame_size:12;
- u32 rss_group_id:4;
-#endif
- } bs;
-
- u32 value;
- } dw10;
-
- u32 smac_l32;
-
- union {
- struct {
-#if (__BYTE_ORDER__ == __BIG_ENDIAN__)
- u32 oqid:16;
- u32 vf_map_pf_id:4;
- /*lro change; 0:changing 1:change done */
- u32 lro_change_flag:1;
- u32 rsvd11:1;
- u32 base_qid:10;
-#else
- u32 base_qid:10;
- u32 rsvd11:1;
- u32 lro_change_flag:1;
- u32 vf_map_pf_id:4;
- u32 oqid:16;
-#endif
- } bs;
-
- u32 value;
- } dw12;
-
- union {
- struct {
-#if (__BYTE_ORDER__ == __BIG_ENDIAN__)
- u32 rsvd1:2;
- u32 cfg_rq_depth:6;
- u32 cfg_q_num:6;
- u32 fc_port_id:4;
- u32 rsvd2:14;
-#else
- u32 rsvd2:14;
- u32 fc_port_id:4;
- u32 cfg_q_num:6;
- u32 cfg_rq_depth:6;
- u32 rsvd1:2;
-#endif
- } bs;
-
- u32 value;
- } dw13;
-
- union {
- struct {
-#if (__BYTE_ORDER__ == __BIG_ENDIAN__)
- u32 rsvd1;
-#else
- u32 rsvd1;
-#endif
- } bs;
-
- u32 value;
-
- } dw14;
-
- union {
- struct {
-#if (__BYTE_ORDER__ == __BIG_ENDIAN__)
- u32 rsvd3:2;
- u32 bond3_hash_policy:3;
- u32 bond3_mode:3;
- u32 rsvd2:2;
- u32 bond2_hash_policy:3;
- u32 bond2_mode:3;
- u32 rsvd1:2;
- u32 bond1_hash_policy:3;
- u32 bond1_mode:3;
- u32 rsvd0:2;
- u32 bond0_hash_policy:3;
- u32 bond0_mode:3;
-#else
- u32 bond0_mode:3;
- u32 bond0_hash_policy:3;
- u32 rsvd0:2;
- u32 bond1_mode:3;
- u32 bond1_hash_policy:3;
- u32 rsvd1:2;
- u32 bond2_mode:3;
- u32 bond2_hash_policy:3;
- u32 rsvd2:2;
- u32 bond3_mode:3;
- u32 bond3_hash_policy:3;
- u32 rsvd3:2;
-#endif
- } bs;
-
- u32 value;
-
- } dw15;
-} sml_funcfg_tbl_s;
-
-/**
- * Struct name: sml_portcfg_tbl_s
- * @brief: Port Configuration Table
- * Description: Port Configuration attribute table
- */
-typedef struct tag_sml_portcfg_tbl {
- union {
- struct {
-#if (__BYTE_ORDER__ == __BIG_ENDIAN__)
- u32 valid:1; /* valid:0-invalid; 1-valid */
- /* mac learn enable: 0-disable; 1-enable */
- u32 learn_en:1;
- u32 trunk_en:1; /* trunk enable: 0-disable; 1-enable */
- /* broadcast suppression enable: 0-disable; 1-enable */
- u32 bc_sups_en:1;
- /* unknown multicast suppression enable:
- * 0-disable; 1-enable
- */
- u32 un_mc_sups_en:1;
- /* unknown unicast suppression enable:
- * 0-disable; 1-enable
- */
- u32 un_uc_sups_en:1;
- u32 ovs_mirror_tx_en:1;
- /* ovs port enable: 0-disable; 1-enable */
- u32 ovs_port_en:1;
- u32 ovs_mirror_rx_en:1;
- u32 qcn_en:1; /* qcn enable: 0-disable; 1-enable */
- /* ucode capture enable: 0-disable; 1-enable */
- u32 ucapture_en:1;
- u32 ovs_invld_tcp_action:1;
- u32 ovs_ip_frag_action:1;
- u32 def_pri:3; /* default priority */
- u32 rsvd3:2;
- /* edge relay mode: 0-VEB; 1-VEPA(unsupport);
- * 2-Multi-Channel(unsupport)
- */
- u32 er_mode:2;
- /* edge relay identifier: [0~15]. support up to 16 er */
- u32 er_id:4;
- u32 trunk_id:8; /* trunk identifier: [0~255] */
-#else
- u32 trunk_id:8;
- u32 er_id:4;
- u32 er_mode:2;
- u32 rsvd3:2;
- u32 def_pri:3;
- u32 ovs_ip_frag_action:1;
- u32 ovs_invld_tcp_action:1;
- u32 ucapture_en:1;
- u32 qcn_en:1;
- u32 ovs_mirror_rx_en:1;
- u32 ovs_port_en:1;
- u32 ovs_mirror_tx_en:1;
- u32 un_uc_sups_en:1;
- u32 un_mc_sups_en:1;
- u32 bc_sups_en:1;
- u32 trunk_en:1;
- u32 learn_en:1;
- u32 valid:1;
-#endif
- } bs;
- u32 value;
- } dw0;
-
- union {
- struct {
-#if (__BYTE_ORDER__ == __BIG_ENDIAN__)
- u32 rsvd2:2;
- u32 mtu:14;
- u32 rsvd3:1;
- u32 vlan_mode:3;
- u32 vlan_id:12;
-#else
- u32 vlan_id:12;
- u32 vlan_mode:3;
- u32 rsvd3:1;
- u32 mtu:14;
- u32 rsvd2:2;
-#endif
- } bs;
- u32 value;
- } dw1;
-
- union {
- struct {
-#if (__BYTE_ORDER__ == __BIG_ENDIAN__)
- /* q7_cos : ... : q0_cos = 4bits : ... : 4bits */
- u32 ovs_queue_cos;
-#else
- u32 ovs_queue_cos;
-#endif
- } bs;
- u32 value;
- } dw2;
-
- union {
- struct {
-#if (__BYTE_ORDER__ == __BIG_ENDIAN__)
- u32 rsvd1:10;
- u32 un_mc_car_id:11;
- u32 un_uc_car_id:11;
-#else
- u32 un_uc_car_id:11;
- u32 un_mc_car_id:11;
- u32 rsvd1:10;
-#endif
- } bs;
- u32 value;
- } dw3;
-
- union {
- struct {
-#if (__BYTE_ORDER__ == __BIG_ENDIAN__)
- u32 rsvd6:5;
- u32 bc_car_id:11;
- u32 pf_promiscuous_bitmap:16;
-#else
- u32 pf_promiscuous_bitmap:16;
- u32 bc_car_id:11;
- u32 rsvd6:5;
-#endif
- } bs;
- u32 value;
- } dw4;
-
- union {
- struct {
- u32 fc_map;
-
- } fcoe_bs;
- struct {
-#if (__BYTE_ORDER__ == __BIG_ENDIAN__)
- u32 start_queue:8;
- u32 queue_size:8;
- u32 mirror_func_id:16;
-#else
- u32 mirror_func_id:16;
- u32 queue_size:8;
- u32 start_queue:8;
-#endif
- } ovs_mirror_bs;
- u32 value;
- } dw5;
-
- union {
- struct {
-#if (__BYTE_ORDER__ == __BIG_ENDIAN__)
- u16 vlan;
- u16 dmac_h16;
-#else
- u16 dmac_h16;
- u16 vlan;
-#endif
- } fcoe_bs;
- u32 value;
- } dw6;
-
- union {
- struct {
- u32 dmac_l32;
-
- } fcoe_bs;
- u32 value;
- } dw7;
-
-} sml_portcfg_tbl_s;
-
-/**
- * Struct name: sml_taggedlist_tbl_s
- * @brief: Tagged List Table
- * Description: VLAN filtering Trunk/Hybrid type tagged list table
- */
-typedef struct tag_sml_taggedlist_tbl {
- u32 bitmap[TBL_ID_TAGGEDLIST_BITMAP32_NUM];
-} sml_taggedlist_tbl_s;
-
-/**
- * Struct name: sml_untaggedlist_tbl_s
- * @brief: Untagged List Table
- * Description: VLAN filtering Hybrid type Untagged list table
- */
-typedef struct tag_sml_untaggedlist_tbl {
- u32 bitmap[TBL_ID_UNTAGGEDLIST_BITMAP32_NUM];
-} sml_untaggedlist_tbl_s;
-
-/**
- * Struct name: sml_trunkfwd_tbl_s
- * @brief: Trunk Forward Table
- * Description: port aggregation Eth-Trunk forwarding table
- */
-typedef struct tag_sml_trunkfwd_tbl {
- u16 fwd_id[TBL_ID_TRUNKFWD_ENTRY_ELEM_NUM]; /* dw0-dw15 */
-} sml_trunkfwd_tbl_s;
-
-/**
- * Struct name: sml_mac_tbl_head_u
- * @brief: Mac table request/response head
- * Description: MAC table, Hash API header
- */
-typedef union tag_sml_mac_tbl_head {
- struct {
-#if (__BYTE_ORDER__ == __BIG_ENDIAN__)
- u32 src:5;
- u32 instance_id:6;
- u32 opid:5;
- u32 A:1;
- u32 S:1;
- u32 rsvd:14;
-#elif (__BYTE_ORDER__ == __LITTLE_ENDIAN__)
- u32 rsvd:14;
- u32 S:1;
- u32 A:1;
- u32 opid:5;
- u32 instance_id:6;
- u32 src:5;
-#endif
- } req_bs;
-
- struct {
-#if (__BYTE_ORDER__ == __BIG_ENDIAN__)
- u32 code:2;
- u32 subcode:2;
- u32 node_index:28;
-#elif (__BYTE_ORDER__ == __LITTLE_ENDIAN__)
- u32 node_index:28;
- u32 subcode:2;
- u32 code:2;
-#endif
- } rsp_bs;
-
- u32 value;
-} sml_mac_tbl_head_u;
-
-/**
- * Struct name: sml_mac_tbl_8_4_key_u
- * @brief: Mac Table Key
- * Description: MAC table key
- */
-typedef union tag_sml_mac_tbl_8_4_key {
- struct {
- u32 val0;
- u32 val1;
- } value;
-
- struct {
-#if (__BYTE_ORDER__ == __BIG_ENDIAN__)
- u32 er_id:4;
- u32 vlan_id:12;
- u32 mac_h16:16;
-
- u32 mac_m16:16;
- u32 mac_l16:16;
-#elif (__BYTE_ORDER__ == __LITTLE_ENDIAN__)
- u32 mac_h16:16;
- u32 vlan_id:12;
- u32 er_id:4;
-
- u32 mac_l16:16;
- u32 mac_m16:16;
-#endif
- } bs;
-
- struct {
-#if (__BYTE_ORDER__ == __BIG_ENDIAN__)
- u32 er_id:4;
- u32 vlan_id:12;
- u32 mac0:8;
- u32 mac1:8;
-
- u32 mac2:8;
- u32 mac3:8;
- u32 mac4:8;
- u32 mac5:8;
-#elif (__BYTE_ORDER__ == __LITTLE_ENDIAN__)
- u32 mac1:8;
- u32 mac0:8;
- u32 vlan_id:12;
- u32 er_id:4;
-
- u32 mac5:8;
- u32 mac4:8;
- u32 mac3:8;
- u32 mac2:8;
-#endif
- } mac_bs;
-} sml_mac_tbl_8_4_key_u;
-
-/**
- * Struct name: sml_mac_tbl_8_4_item_u
- * @brief: Mac Table Item
- * Description: xxxxxxxxxxxxxxx
- */
-typedef union tag_sml_mac_tbl_8_4_item {
- u32 value;
-
- struct {
-#if (__BYTE_ORDER__ == __BIG_ENDIAN__)
- u32 rsvd:10;
- u32 host_id:2;
- u32 fwd_type:4;
- u32 fwd_id:16;
-#elif (__BYTE_ORDER__ == __LITTLE_ENDIAN__)
- u32 fwd_id:16;
- u32 fwd_type:4;
- u32 host_id:2;
- u32 rsvd:10;
-#endif
- } bs;
-} sml_mac_tbl_8_4_item_u;
-
-/**
- * Struct name: sml_mac_tbl_key_item_s
- * @brief: Mac Table( 8 + 4 )
- * Description: MAC table Key + Item
- */
-typedef struct tag_sml_mac_tbl_8_4 {
- sml_mac_tbl_head_u head;
- sml_mac_tbl_8_4_key_u key;
- sml_mac_tbl_8_4_item_u item;
-} sml_mac_tbl_8_4_s;
-
-/**
- * Struct name: sml_vtep_tbl_8_20_key_s
- * @brief: Vtep Table Key
- * Description: xxxxxxxxxxxxxxx
- */
-typedef struct tag_sml_vtep_tbl_8_20_key {
- u32 vtep_remote_ip;
- u32 rsvd;
-} sml_vtep_tbl_8_20_key_s;
-
-/**
- * Struct name: dmac_smac_u
- * @brief: Dmac & Smac for VxLAN encapsulation
- * Description: xxxxxxxxxxxxxxx
- */
-typedef union tag_dmac_smac {
- u16 mac_addr[6];
- struct {
-#if (__BYTE_ORDER__ == __BIG_ENDIAN__)
- u16 d_mac0:8;
- u16 d_mac1:8;
- u16 d_mac2:8;
- u16 d_mac3:8;
-
- u16 d_mac4:8;
- u16 d_mac5:8;
- u16 s_mac0:8;
- u16 s_mac1:8;
-
- u16 s_mac2:8;
- u16 s_mac3:8;
- u16 s_mac4:8;
- u16 s_mac5:8;
-#elif (__BYTE_ORDER__ == __LITTLE_ENDIAN__)
- u16 d_mac1:8;
- u16 d_mac0:8;
- u16 d_mac3:8;
- u16 d_mac2:8;
-
- u16 d_mac5:8;
- u16 d_mac4:8;
- u16 s_mac1:8;
- u16 s_mac0:8;
-
- u16 s_mac3:8;
- u16 s_mac2:8;
- u16 s_mac5:8;
- u16 s_mac4:8;
-#endif
- } bs;
-} dmac_smac_u;
-
-/**
- * Struct name: sml_vtep_tbl_8_20_item_u
- * @brief: Vtep Table Item
- * Description: xxxxxxxxxxxxxxx
- */
-typedef struct tag_sml_vtep_tbl_8_20_item {
- dmac_smac_u dmac_smac;
- u32 source_ip;
-
- union {
- struct {
-#if (__BYTE_ORDER__ == __BIG_ENDIAN__)
- u32 er_id:4;
- u32 rsvd:12;
- u32 vlan:16; /* The PRI*/
-#else
- u32 vlan:16; /* The PRI*/
- u32 rsvd:12;
- u32 er_id:4;
-#endif
- } bs;
-
- u32 value;
- } misc;
-} sml_vtep_tbl_8_20_item_s;
-
-/**
- * Struct name: sml_vtep_tbl_8_20_s
- * @brief: Vtep Table( 8 + 20)
- * Description: xxxxxxxxxxxxxxx
- */
-typedef struct tag_sml_vtep_tbl_8_20 {
- sml_mac_tbl_head_u head; /*first 4 bytes , the same as mac tbl */
- sml_vtep_tbl_8_20_key_s key;
- sml_vtep_tbl_8_20_item_s item;
-} sml_vtep_tbl_8_20_s;
-
-/**
- * Struct name: sml_vtep_tbl_8_20_key_s
- * @brief: Vtep Table Key
- * Description: xxxxxxxxxxxxxxx
- */
-typedef struct tag_sml_vxlan_udp_portcfg_4_8_key {
- u32 udp_dest_port;
- u32 rsvd;
-} sml_vxlan_udp_portcfg_4_8_key_s;
-
-/**
- * Struct name: sml_vtep_tbl_8_20_item_u
- * @brief: Vtep Table Item
- * Description: xxxxxxxxxxxxxxx
- */
-typedef struct tag_sml_vxlan_udp_portcfg_4_8_item {
- union {
- struct {
-#if (__BYTE_ORDER__ == __BIG_ENDIAN__)
- u32 odp_port:12;
- u32 dp_id:2;
- u32 resvd:20;
-#else
- u32 resvd:20;
- u32 dp_id:2;
- u32 odp_port:12;
-#endif
- } bs;
-
- u32 value;
- } dw0;
-} sml_vxlan_udp_portcfg_4_8_item_s;
-
-/**
- * Struct name: sml_vxlan_udp_portcfg_4_8_s
- * @brief: Vxlan Dest Udp Port Table( 8 + 20)
- * Description: xxxxxxxxxxxxxxx
- */
-typedef struct tag_sml_vxlan_udp_portcfg_4_8 {
- sml_mac_tbl_head_u head; /*first 4 bytes , the same as mac tbl */
- sml_vxlan_udp_portcfg_4_8_key_s key;
- sml_vxlan_udp_portcfg_4_8_item_s item;
-} sml_vxlan_udp_portcfg_4_8_s;
-
-/**
- * Struct name: sml_vtep_er_info_s
- * @brief: Vtep Er Info Table
- * Description: xxxxxxxxxxxxxxx
- */
-typedef struct tag_sml_vtep_er_info {
- union {
- struct {
-#if (__BYTE_ORDER__ == __BIG_ENDIAN__)
- u32 lli_mode:1;
- /* ER bound to the outbound port is Eth-Trunk,
- * type (FIC/Port)
- */
- u32 er_fwd_trunk_type:1;
- /* ER bound to the outbound port is Eth-Trunk,
- * port aggregation mode (Standby/LoadBalance/LACP)
- */
- u32 er_fwd_trunk_mode:4;
- u32 er_mode:2; /* ER mode (VEB/VEPA)*/
- /* er_id as LT index but also used as entries,
- * facilitating service
- */
- u32 er_id:4;
- /* Type of the ER bound to the outbound port
- * (Port/FIC/Eth-Trunk)
- */
- u32 er_fwd_type:4;
- /* ER bound egress ID(PortID/FICID/TrunkID)*/
- u32 er_fwd_id:16;
-#else
- u32 er_fwd_id:16;
- u32 er_fwd_type:4;
- u32 er_id:4;
- u32 er_mode:2;
- u32 er_fwd_trunk_mode:4;
- u32 er_fwd_trunk_type:1;
- u32 lli_mode:1;
-#endif
- } bs;
-
- u32 value;
- } dw0;
-} sml_vtep_er_info_s;
-
-/**
- * Struct name: sml_logic_port_cfg_tbl_s
- * @brief: Logic Port Cfg Table
- * Description: xxxxxxxxxxxxxxx
- */
-typedef struct tag_sm_logic_port_cfg {
- union {
- struct {
-#if (__BYTE_ORDER__ == __BIG_ENDIAN__)
- /* Input switch port (or DP_MAX_PORTS). */
- u32 odp_port:12;
- u32 dp_id:2; /* datapath id */
- u32 er_id:4;
- /* logic port MAC Learning enable or disable */
- u32 learn_en:1;
- u32 resvd:13;
-#else
- u32 resvd:13;
- /* logic port MAC Learning enable or disable */
- u32 learn_en:1;
- u32 er_id:4;
- u32 dp_id:2; /* datapath id */
- /* Input switch port (or DP_MAX_PORTS). */
- u32 odp_port:12;
-#endif
- } bs;
-
- u32 value;
- } dw0;
-
- union {
- struct {
-#if (__BYTE_ORDER__ == __BIG_ENDIAN__)
- u32 rsvd4:1;
- u32 er_fwd_trunk_type:1;
- u32 er_fwd_trunk_mode:4;
- u32 er_mode:2;
- u32 er_id:4;
- u32 er_fwd_type:4;
- u32 er_fwd_id:16;
-#else
- u32 er_fwd_id:16;
- u32 er_fwd_type:4;
- u32 er_id:4;
- u32 er_mode:2;
- u32 er_fwd_trunk_mode:4;
- u32 er_fwd_trunk_type:1;
- u32 rsvd4:1;
-#endif
- } bs;
-
- u32 value;
- } dw1;
-} sml_logic_port_cfg_tbl_s;
-
-/* vport stats counter */
-typedef struct tag_vport_stats_ctr {
- u16 rx_packets; /* total packets received */
- u16 tx_packets; /* total packets transmitted */
- u16 rx_bytes; /* total bytes received */
- u16 tx_bytes; /* total bytes transmitted */
- u16 rx_errors; /* bad packets received */
- u16 tx_errors; /* packet transmit problems */
- u16 rx_dropped; /* no space in linux buffers */
- u16 tx_dropped; /* no space available in linux */
-} vport_stats_ctr_s;
-
-/**
- * Struct name: vport_s
- * @brief: Datapath Cfg Table
- * Description: xxxxxxxxxxxxxxx
- */
-typedef struct tag_vport {
-#if (__BYTE_ORDER__ == __BIG_ENDIAN__)
- /* dw0 */
- u32 valid:1;
- u32 learn_en:1;
- u32 type:4;
- u32 dp_id:2;
- /* The type of Vport mapping port, 0:VF, 1:Logic Port */
- u32 mapping_type:4;
- u32 mapping_port:12; /* odp_port mapping on VF or ER Logic Port */
- u32 rsvd:8;
-
- /* dw1 */
- u32 srctagl:12; /* the function used by parent context */
- /* parent context XID used to upcall missed packet to ovs-vswitchd */
- u32 xid:20;
-
- /* dw2 */
- u32 odp_port:12; /* on datapath port id */
- /* parent context CID used to upcall missed packet to ovs-vswitchd */
- u32 cid:20;
-#else
- /* dw0 */
- u32 rsvd:8;
- u32 mapping_port:12; /* odp_port mapping on VF or ER Logic Port */
- /* The type of Vport mapping port, 0:VF, 1:Logic Port */
- u32 mapping_type:4;
- u32 dp_id:2;
- u32 type:4;
- u32 learn_en:1;
- u32 valid:1;
-
- /* dw1 */
- /* parent context XID used to upcall missed packet to ovs-vswitchd */
- u32 xid:20;
- u32 srctagl:12; /* the function used by parent context */
-
- /* dw2 */
- /* parent context CID used to upcall missed packet to ovs-vswitchd */
- u32 cid:20;
- u32 odp_port:12; /* on datapath port id */
-#endif
-
- /* dw3 is er information and it is valid only
- * when mapping_type=1(logic port)
- */
- union {
- struct {
-#if (__BYTE_ORDER__ == __BIG_ENDIAN__)
- u32 lli_mode:1;
- /* ER bound to the outbound port is Eth-Trunk,
- * type (FIC/Port)
- */
- u32 er_fwd_trunk_type:1;
- /* ER bound to the outbound port is Eth-Trunk,
- * port aggregation mode (Standby/LoadBalance/LACP)
- */
- u32 er_fwd_trunk_mode:4;
- u32 er_mode:2; /* ER mode (VEB/VEPA)*/
- u32 er_id:4; /* ERID */
- /* Type of the ER bound to the outbound port
- * (Port/FIC/Eth-Trunk)
- */
- u32 er_fwd_type:4;
- /*ER bound egress ID(PortID/FICID/TrunkID)*/
- u32 er_fwd_id:16;
-#else
- u32 er_fwd_id:16;
- u32 er_fwd_type:4;
- u32 er_id:4;
- u32 er_mode:2;
- u32 er_fwd_trunk_mode:4;
- /* ER bound to the outbound port is Eth-Trunk,
- * type (FIC/Port)
- */
- u32 er_fwd_trunk_type:1;
- u32 lli_mode:1;
-#endif
- } bs;
- u32 value;
- } dw3;
-
- /* dw4~dw7 */
- vport_stats_ctr_s stats; /* vport stats counters */
-
-} vport_s;
-
-/**
- * Struct name: sml_elb_tbl_elem_u
- * @brief: ELB Table Elem
- * Description: ELB leaf table members
- */
-typedef union tag_sml_elb_tbl_elem {
- struct {
- u32 fwd_val;
- u32 next_val;
- } value;
-
- struct {
-#if (__BYTE_ORDER__ == __BIG_ENDIAN__)
- u32 rsvd0:12;
- u32 fwd_type:4;
- u32 fwd_id:16;
-
- u32 rsvd1:17;
- u32 elb_index_next:15;
-#elif (__BYTE_ORDER__ == __LITTLE_ENDIAN__)
- u32 fwd_id:16;
- u32 fwd_type:4;
- u32 rsvd0:12;
-
- u32 elb_index_next:15;
- u32 rsvd1:17;
-#endif
- } bs;
-} sml_elb_tbl_elem_u;
-
-/**
- * Struct name: sml_elb_tbl_s
- * @brief ELB Table
- * Description: ELB leaf table Entry
- */
-typedef struct tag_sml_elb_tbl {
- sml_elb_tbl_elem_u elem[TBL_ID_ELB_ENTRY_ELEM_NUM];
-} sml_elb_tbl_s;
-
-/**
- * Struct name: sml_vlan_tbl_elem_u
- * @brief: VLAN Table Elem
- * Description: VLAN broadcast table members
- */
-typedef union tag_sml_vlan_tbl_elem {
- u16 value;
-
- struct {
-#if (__BYTE_ORDER__ == __BIG_ENDIAN__)
- u16 learn_en:1;
- u16 elb_index:15;
-#elif (__BYTE_ORDER__ == __LITTLE_ENDIAN__)
- u16 elb_index:15;
- u16 learn_en:1;
-#endif
- } bs;
-} sml_vlan_tbl_elem_u;
-
-/**
- * Struct name: sml_vlan_tbl_s
- * @brief: VLAN Table
- * Entry Description: VLAN broadcast table
- */
-typedef struct tag_sml_vlan_tbl {
- sml_vlan_tbl_elem_u elem[TBL_ID_VLAN_ENTRY_ELEM_NUM];
-} sml_vlan_tbl_s;
-
-/**
- * Struct name: sml_multicast_tbl_array_u
- * @brief: Multicast Table Elem
- * Description: multicast table members
- */
-typedef union tag_sml_multicast_tbl_elem {
- struct {
- u32 route_val;
- u32 next_val;
- } value;
-
- struct {
-#if (__BYTE_ORDER__ == __BIG_ENDIAN__)
- u32 rsvd0:12;
- u32 route_fwd_type:4;
- u32 route_fwd_id:16;
-
- u32 rsvd1:17;
- u32 elb_index:15;
-#elif (__BYTE_ORDER__ == __LITTLE_ENDIAN__)
- u32 route_fwd_id:16;
- u32 route_fwd_type:4;
- u32 rsvd0:12;
-
- u32 elb_index:15;
- u32 rsvd1:17;
-#endif
- } bs;
-} sml_multicast_tbl_elem_u;
-
-/* Struct name: sml_multicast_tbl_s
- * @brief: Multicast Table
- * Entry Description: multicast table
- */
-typedef struct tag_sml_multicast_tbl {
- sml_multicast_tbl_elem_u elem[TBL_ID_MULTICAST_ENTRY_ELEM_NUM];
-} sml_multicast_tbl_s;
-
-/* Struct name: sml_observe_port_s
- * @brief: Observe Port Table
- * Description: observing port entries defined
- */
-typedef struct tag_sml_observe_port {
- union {
- struct {
-#if (__BYTE_ORDER__ == __BIG_ENDIAN__)
- u32 valid:1;
- u32 rsvd0:11;
- u32 dst_type:4;
- u32 dst_id:16;
-#else
- u32 dst_id:16;
- u32 dst_type:4;
- u32 rsvd0:11;
- u32 valid:1;
-#endif
- } bs;
- u32 value;
- } dw0;
-
- union {
- struct {
-#if (__BYTE_ORDER__ == __BIG_ENDIAN__)
- u32 rsvd1:4;
- u32 vlan_id:12;
- u32 rsvd2:2;
- u32 cut_len:14;
-#else
- u32 cut_len:14;
- u32 rsvd2:2;
- u32 vlan_id:12;
- u32 rsvd1:4;
-#endif
- } bs;
- u32 value;
- } dw1;
-
- u32 rsvd_pad[2];
-} sml_observe_port_s;
-
-/* Struct name: sml_ipmac_tbl_16_12_key_s
- * @brief ipmac filter table key
- * Description: ipmac filter key define
- */
-typedef struct tag_sml_ipmac_tbl_16_12_key {
-#if (__BYTE_ORDER__ == __BIG_ENDIAN__)
- u32 func_id:16;
- u32 mac_h16:16;
-#else
- u32 mac_h16:16;
- u32 func_id:16;
-#endif
-
-#if (__BYTE_ORDER__ == __BIG_ENDIAN__)
- u32 mac_m16:16;
- u32 mac_l16:16;
-#else
- u32 mac_l16:16;
- u32 mac_m16:16;
-#endif
-
- u32 ip;
- u32 rsvd;
-} sml_ipmac_tbl_16_12_key_s;
-
-/* Struct name: sml_ipmac_tbl_16_12_item_s
- * @brief ipmac filter table item
- * Description: ipmac filter item define
- */
-typedef struct tag_sml_ipmac_tbl_16_12_item {
- u32 rsvd[3];
-} sml_ipmac_tbl_16_12_item_s;
-
-/* Struct name: sml_ethtype_tbl_8_4_key_s
- * @brief: ethtype filter table key
- * Description: ethtype filter key define
- */
-typedef struct tag_sml_ethtype_tbl_8_4_key {
-#if (__BYTE_ORDER__ == __BIG_ENDIAN__)
- u32 group_id:16;
- u32 ethtype:16;
-#else
- u32 ethtype:16;
- u32 group_id:16;
-#endif
-
- u32 rsvd;
-} sml_ethtype_tbl_8_4_key_s;
-
-/* Struct name: sml_ethtype_tbl_8_4_item_s
- * @brief ethtype filter table item
- * Description: ethtype filter item define
- */
-typedef struct tag_sml_ethtype_tbl_8_4_item {
- u32 rsvd;
-} sml_ethtype_tbl_8_4_item_s;
-
-/* ACL to dfx record packets*/
-typedef enum {
- ACL_PKT_TX = 0,
- ACL_PKT_RX = 1,
-} sml_acl_pkt_dir_e;
-
-/* ACL policy table item*/
-typedef struct tag_sml_acl_policy_tbl {
- union {
- struct {
-#if (__BYTE_ORDER__ == __BIG_ENDIAN__)
- u32 drop:1;
- u32 car_en:1;
- u32 car_id:12;
- u32 counter_type:2;
- u32 counter_id:16;
-#else
- u32 counter_id:16;
- u32 counter_type:2;
- u32 car_id:12;
- u32 car_en:1;
- u32 drop:1;
-#endif
- } bs;
-
- u32 value;
- } dw0;
-
- union {
- struct {
-#if (__BYTE_ORDER__ == __BIG_ENDIAN__)
- u32 rsvd1:7;
- u32 mirrior_en:1;
- u32 observer_port:10;
- u32 change_dscp:1;
- u32 new_dscp:6;
- u32 change_pkt_pri:1;
- u32 new_pkt_pri:3;
- u32 redirect_en:3;
-#else
- u32 redirect_en:3;
- u32 new_pkt_pri:3;
- u32 change_pkt_pri:1;
- u32 new_dscp:6;
- u32 change_dscp:1;
- u32 observer_port:10;
- u32 mirrior_en:1;
- u32 rsvd1:7;
-#endif
- } bs;
-
- u32 value;
- } dw1;
-
- u32 redirect_data;
- u32 rsvd2;
-} sml_acl_policy_tbl_s;
-
-typedef struct tag_sml_acl_ipv4_key {
- union {
- struct {
-#if (__BYTE_ORDER__ == __BIG_ENDIAN__)
- /* The alignment, match_key_type and
- * later field is a KEY value
- */
- u32 padding:16;
- u32 tid0:2;
- u32 match_key_type:3; /* Matching type*/
- u32 rsvd:11; /* Reserved field*/
-#else
- u32 rsvd:11;
- u32 match_key_type:3;
- u32 tid0:2;
- u32 padding:16;
-#endif
- } bs;
- u32 value;
- } dw0;
-
- /* dw1&dw2 */
- u32 sipv4;
- u32 dipv4;
-
- union {
- struct {
-#if (__BYTE_ORDER__ == __BIG_ENDIAN__)
- u32 l4_sport:16;
- u32 l4_dport:16;
-#else
- u32 l4_dport:16;
- u32 l4_sport:16;
-#endif
- } bs;
- u32 value;
- } dw3;
-
- union {
- struct {
-#if (__BYTE_ORDER__ == __BIG_ENDIAN__)
- u32 l4_protocol:8;
- u32 rsvd0:8;
- u32 seg_id:10;
- u32 rsvd1:6;
-#else
- u32 rsvd1:6;
- u32 seg_id:10;
- u32 rsvd0:8;
- u32 l4_protocol:8;
-#endif
- } bs;
- u32 value;
- } dw4;
-
- union {
- struct {
-#if (__BYTE_ORDER__ == __BIG_ENDIAN__)
- u32 tid1:2;
- u32 rsvd:14;
- u32 padding:16;
-#else
- u32 padding:16;
- u32 rsvd:14;
- u32 tid1:2;
-#endif
- } bs;
- u32 value;
- } dw5;
-} sml_acl_ipv4_key_s;
-
-typedef struct tag_sml_acl_ipv6_key {
- union {
- struct {
-#if (__BYTE_ORDER__ == __BIG_ENDIAN__)
- /* The alignment, match_key_type and
- * later field is a KEY value
- */
- u32 padding:16;
- u32 tid0:2;
- u32 match_key_type:3; /* Matching type*/
- u32 rsvd:11; /* Reserved field*/
-#else
- u32 rsvd:11;
- u32 match_key_type:3;
- u32 tid0:2;
- u32 padding:16;
-#endif
- } bs;
- u32 value;
- } dw0;
-
- /*dw1~dw4 */
- u32 sipv6[4];
-
- union {
- struct {
-#if (__BYTE_ORDER__ == __BIG_ENDIAN__)
- u32 tid1:2;
- u32 rsvd1:14;
- u32 tid2:2;
- u32 rsvd2:14;
-#else
- u32 rsvd2:14;
- u32 tid2:2;
- u32 rsvd1:14;
- u32 tid1:2;
-#endif
- } bs;
- u32 value;
- } dw5;
-
- /*dw6~dw9 */
- u32 dipv6[4];
-
- union {
- struct {
-#if (__BYTE_ORDER__ == __BIG_ENDIAN__)
- u32 tid3:2;
- u32 rsvd3:14;
- u32 tid4:2;
- u32 rsvd4:14;
-#else
- u32 rsvd4:14;
- u32 tid4:2;
- u32 rsvd3:14;
- u32 tid3:2;
-#endif
- } bs;
- u32 value;
- } dw10;
-
- union {
- struct {
-#if (__BYTE_ORDER__ == __BIG_ENDIAN__)
- u32 l4_sport:16;
- u32 l4_dport:16;
-#else
- u32 l4_dport:16;
- u32 l4_sport:16;
-#endif
- } bs;
- u32 value;
- } dw11;
-
- union {
- struct {
-#if (__BYTE_ORDER__ == __BIG_ENDIAN__)
- u32 l4_protocol:8;
- u32 rsvd0:8;
- u32 seg_id:10;
- u32 rsvd1:6;
-#else
- u32 rsvd1:6;
- u32 seg_id:10;
- u32 rsvd0:8;
- u32 l4_protocol:8;
-#endif
- } bs;
- u32 value;
- } dw12;
-
- u32 dw13;
- u32 dw14;
-
- union {
- struct {
-#if (__BYTE_ORDER__ == __BIG_ENDIAN__)
- u32 tid5:2;
- u32 rsvd5:14;
- u32 tid6:2;
- u32 rsvd6:14;
-#else
- u32 rsvd6:14;
- u32 tid6:2;
- u32 rsvd5:14;
- u32 tid5:2;
-#endif
- } bs;
- u32 value;
- } dw15;
-
- u32 dw16;
- u32 dw17;
- u32 dw18;
- u32 dw19;
-
- union {
- struct {
-#if (__BYTE_ORDER__ == __BIG_ENDIAN__)
- u32 tid7:2;
- u32 rsvd7:30;
-#else
- u32 rsvd7:30;
- u32 tid7:2;
-#endif
- } bs;
- u32 value;
- } dw20;
-} sml_acl_ipv6_key_s;
-
-/**
- * Struct name: sml_voq_map_table_s
- * @brief: voq_map_table
- * Description: xxxxxxxxxxxxxxx
- */
-typedef struct tag_sml_voq_map_table {
- u16 voq_base[8];
-} sml_voq_map_table_s;
-
-/**
- * Struct name: sml_rss_context_u
- * @brief: rss_context
- * Description: xxxxxxxxxxxxxxx
- */
-typedef union tag_sml_rss_context {
- struct {
-#if (__BYTE_ORDER__ == __BIG_ENDIAN__)
- u32 udp_ipv4:1;
- u32 udp_ipv6:1;
- u32 ipv4:1;
- u32 tcp_ipv4:1;
- u32 ipv6:1;
- u32 tcp_ipv6:1;
- u32 ipv6_ext:1;
- u32 tcp_ipv6_ext:1;
- u32 valid:1;
- u32 rsvd1:13;
- u32 def_qpn:10;
-#else
- u32 def_qpn:10;
- u32 rsvd1:13;
- u32 valid:1;
- u32 tcp_ipv6_ext:1;
- u32 ipv6_ext:1;
- u32 tcp_ipv6:1;
- u32 ipv6:1;
- u32 tcp_ipv4:1;
- u32 ipv4:1;
- u32 udp_ipv6:1;
- u32 udp_ipv4:1;
-#endif
- } bs;
-
- u32 value;
-} sml_rss_context_u;
-
-typedef struct tag_sml_rss_context_tbl {
- sml_rss_context_u element[TBL_ID_RSS_CONTEXT_NUM];
-} sml_rss_context_tbl_s;
-
-/**
- * Struct name: sml_rss_hash_u
- * @brief: rss_hash
- * Description: xxxxxxxxxxxxxxx
- */
-typedef union tag_sml_rss_hash {
- u8 rq_index[256];
-} sml_rss_hash_u;
-
-typedef struct tag_sml_rss_hash_tbl {
- sml_rss_hash_u element[TBL_ID_RSS_HASH_NUM];
-} sml_rss_hash_tbl_s;
-
-/**
- * Struct name: sml_lli_5tuple_key_s
- * @brief: lli_5tuple_key
- * Description: xxxxxxxxxxxxxxx
- */
-typedef struct tag_sml_lli_5tuple_key {
- union {
- struct {
-/** Define the struct bits */
-#if (__BYTE_ORDER__ == __BIG_ENDIAN__)
- u32 src:5;
- /* The tile need fill the Dest */
- u32 rt:1;
- u32 key_size:2;
- /* determines which action that engine will take */
- u32 profile_id:3;
- /* indicates that requestor expect
- * to receive a response data
- */
- u32 op_id:5;
- u32 a:1;
- u32 rsvd:12;
- u32 vld:1;
- u32 xy:1;
- u32 at:1;
-#else
- u32 at:1;
- u32 xy:1;
- u32 vld:1;
- /* indicates that requestor expect to
- * receive a response data
- */
- u32 rsvd:12;
- /* determines which action that engine will take*/
- u32 a:1;
- u32 op_id:5;
- u32 profile_id:3;
- u32 key_size:2;
- u32 rt:1;
- u32 src:5;
-#endif
- } bs;
-
-/* Define an unsigned member */
- u32 value;
- } dw0;
- union {
- struct {
- u32 rsvd:1;
- /* The tile need fill the Dest */
- u32 address:15;
-
- u32 table_type:5;
- u32 ip_type:1;
- u32 func_id:10;
- } bs;
-
- u32 value;
- } misc;
-
- u32 src_ip[4];
- u32 dst_ip[4];
-
- u16 src_port;
- u16 dst_port;
-
- u8 protocol;
- u8 tcp_flag;
- u8 fcoe_rctl;
- u8 fcoe_type;
- u16 eth_type;
-} sml_lli_5tuple_key_s;
-
-/**
- * Struct name: sml_lli_5tuple_rsp_s
- * @brief: lli_5tuple_rsp
- * Description: xxxxxxxxxxxxxxx
- */
-typedef struct tag_sml_lli_5tuple_rsp {
- union {
- struct {
- u32 state:4;
- u32 rsvd:28;
- } bs;
-
- u32 value;
- } dw0;
-
- u32 dw1;
-
- union {
- struct {
- u32 frame_size:16;
- u32 lli_en:8;
- u32 rsvd:8;
- } bs;
-
- u32 value;
- } dw2;
-
- u32 dw3;
-} sml_lli_5tuple_rsp_s;
-
-/**
- * Struct name: l2nic_rx_cqe_s.
- * @brief: l2nic_rx_cqe_s data structure.
- * Description:
- */
-typedef struct tag_l2nic_rx_cqe {
- union {
- struct {
-#if (__BYTE_ORDER__ == __BIG_ENDIAN__)
- u32 rx_done:1;
- u32 bp_en:1;
- u32 rsvd1:6;
- u32 lro_num:8;
- u32 checksum_err:16;
-#else
- u32 checksum_err:16;
- u32 lro_num:8;
- u32 rsvd1:6;
- u32 bp_en:1;
- u32 rx_done:1;
-#endif
- } bs;
- u32 value;
- } dw0;
-
- union {
- struct {
-#if (__BYTE_ORDER__ == __BIG_ENDIAN__)
- u32 length:16;
- u32 vlan:16;
-#else
- u32 vlan:16;
- u32 length:16;
-#endif
- } bs;
- u32 value;
- } dw1;
-
- union {
- struct {
-#if (__BYTE_ORDER__ == __BIG_ENDIAN__)
- u32 rss_type:8;
- u32 rsvd0:2;
- u32 vlan_offload_en:1;
- u32 umbcast:2;
- u32 rsvd1:7;
- u32 pkt_types:12;
-#else
- u32 pkt_types:12;
- u32 rsvd1:7;
- u32 umbcast:2;
- u32 vlan_offload_en:1;
- u32 rsvd0:2;
- u32 rss_type:8;
-#endif
- } bs;
- u32 value;
- } dw2;
-
- union {
- struct {
- u32 rss_hash_value;
- } bs;
- u32 value;
- } dw3;
-
- union {
- struct {
-#if (__BYTE_ORDER__ == __BIG_ENDIAN__)
- u32 if_1588:1;
- u32 if_tx_ts:1;
- u32 if_rx_ts:1;
- u32 rsvd:1;
- u32 msg_1588_type:4;
- u32 msg_1588_offset:8;
- u32 tx_ts_seq:16;
-#else
- u32 tx_ts_seq:16;
- u32 msg_1588_offset:8;
- u32 msg_1588_type:4;
- u32 rsvd:1;
- u32 if_rx_ts:1;
- u32 if_tx_ts:1;
- u32 if_1588:1;
-#endif
- } bs;
- u32 value;
- } dw4;
-
- union {
- struct {
- u32 msg_1588_ts;
- } bs;
-
- struct {
- u32 rsvd0:12;
- /* for ovs. traffic type: 0-default l2nic pkt,
- * 1-fallback traffic, 2-miss upcall traffic,
- * 2-command
- */
- u32 traffic_type:4;
- /* for ovs. traffic from: vf_id,
- * only support traffic_type=0(default l2nic)
- * or 2(miss upcall)
- */
- u32 traffic_from:16;
- } ovs_bs;
-
- u32 value;
- } dw5;
-
- union {
- struct {
- u32 lro_ts;
- } bs;
- u32 value;
- } dw6;
-
- union {
- struct {
- u32 rsvd0;
- } bs;
-
- u32 localtag; /* for ovs */
-
- u32 value;
- } dw7;
-} l2nic_rx_cqe_s;
-
-typedef union tag_sml_global_queue_tbl_elem {
- struct {
-#if (__BYTE_ORDER__ == __BIG_ENDIAN__)
- u32 src_tag_l:16;
- u32 local_qid:8;
- u32 rsvd:8;
-#elif (__BYTE_ORDER__ == __LITTLE_ENDIAN__)
- u32 rsvd:8;
- u32 local_qid:8;
- u32 src_tag_l:16;
-#endif
- } bs;
-
- u32 value;
-} sml_global_queue_tbl_elem_u;
-
-typedef struct tag_sml_global_queue_tbl {
- sml_global_queue_tbl_elem_u element[TBL_ID_GLOBAL_QUEUE_NUM];
-} sml_global_queue_tbl_s;
-
-typedef struct tag_sml_dfx_log_tbl {
- u32 wr_init_pc_h32; /* Initial value of write_pc*/
- u32 wr_init_pc_l32;
-
- union {
- struct {
-#if (__BYTE_ORDER__ == __BIG_ENDIAN__)
- u32 state:8;
- u32 func_en:1;
- u32 srctag:12;
- u32 max_num:11; /* Data block highest value*/
-#else
- u32 max_num:11;
- u32 srctag:12;
- u32 func_en:1;
- u32 state:8;
-#endif
- } bs;
- u32 value;
- } dw2;
-
- u32 ci_index;
-} sml_dfx_log_tbl_s;
-
-typedef struct tag_sml_glb_capture_tbl {
- union {
- struct {
-#if (__BYTE_ORDER__ == __BIG_ENDIAN__)
- u32 valid:1;
- u32 max_num:15;
- u32 rsvd:16;
-#else
- u32 rsvd:16;
- u32 max_num:15;
- u32 valid:1;
-#endif
- } bs;
- u32 value;
- } dw0;
-
- u32 discard_addr_h32;
- u32 discard_addr_l32;
-
- u32 rsvd0;
-
- union {
- struct {
-#if (__BYTE_ORDER__ == __BIG_ENDIAN__)
- u32 valid:1;
- u32 mode:5;
- u32 direct:2;
- u32 offset:8;
- u32 cos:3;
- u32 max_num:13;
-#else
- u32 max_num:13;
- u32 cos:3;
- u32 offset:8;
- u32 direct:2;
- u32 mode:5;
- u32 valid:1;
-#endif
- } bs;
- u32 value;
- } dw4;
-
- u32 data_vlan;
-
- u32 condition_addr_h32;
- u32 condition_addr_l32;
-
-} sml_glb_capture_tbl_s;
-
-typedef struct tag_sml_cqe_addr_tbl {
- u32 cqe_first_addr_h32;
- u32 cqe_first_addr_l32;
- u32 cqe_last_addr_h32;
- u32 cqe_last_addr_l32;
-
-} sml_cqe_addr_tbl_s;
-
-/**
- * Struct name: sml_ucode_exec_info_tbl_s
- * @brief: ucode execption info Table
- * Description: microcode exception information table
- */
-typedef struct tag_ucode_exec_info_tbl {
- union {
- struct {
-#if (__BYTE_ORDER__ == __BIG_ENDIAN__)
- u32 wptr_cpb_ack_str:4;
- u32 mem_cpb_ack_cnums_dma:4;
- u32 mem_cpb_ack_cmd_mode:2;
- u32 pr_ret_vld:1;
- u32 oeid_pd_pkt:1;
- u32 rptr_cmd:4;
- u32 wptr_cmd:4;
- u32 src_tag_l:12;
-#else
- u32 src_tag_l:12;
- u32 wptr_cmd:4;
- u32 rptr_cmd:4;
- u32 oeid_pd_pkt:1;
- u32 pr_ret_vld:1;
- u32 mem_cpb_ack_cmd_mode:2;
- u32 mem_cpb_ack_cnums_dma:4;
- u32 wptr_cpb_ack_str:4;
-#endif
- } bs;
-
- u32 value;
- } dw0;
-
- union {
- struct {
-#if (__BYTE_ORDER__ == __BIG_ENDIAN__)
- u32 fq:16;
- u32 exception_type:4;
- u32 rptr_cpb_ack_str:4;
- u32 header_oeid:8;
-#else
- u32 header_oeid:8;
- u32 rptr_cpb_ack_str:4;
- u32 exception_type:4;
- u32 fq:16;
-#endif
- } bs;
-
- u32 value;
- } dw1;
-
- u32 oeid_pd_data_l32;
- u32 oeid_pd_data_m32;
-} sml_ucode_exec_info_s;
-
-typedef struct rq_iq_mapping_tbl {
- union {
- struct {
-#if (__BYTE_ORDER__ == __BIG_ENDIAN__)
- u32 rqid:16;
- u32 iqid:8;
- u32 rsvd:8;
-#else
- u32 rsvd:8;
- u32 iqid:8;
- u32 rqid:16;
-#endif
- } bs;
- u32 value;
- } dw[4];
-} sml_rq_iq_mapping_tbl_s;
-
-/* nic_ucode_rq_ctx table define
- */
-typedef struct nic_ucode_rq_ctx {
- union {
- struct {
-#if (__BYTE_ORDER__ == __BIG_ENDIAN__)
- u32 max_count:10;
- u32 cqe_tmpl:6;
- u32 pkt_tmpl:6;
- u32 wqe_tmpl:6;
- u32 psge_valid:1;
- u32 rsvd1:1;
- u32 owner:1;
- u32 ceq_en:1;
-#else
- u32 ceq_en:1;
- u32 owner:1;
- u32 rsvd1:1;
- u32 psge_valid:1;
- u32 wqe_tmpl:6;
- u32 pkt_tmpl:6;
- u32 cqe_tmpl:6;
- u32 max_count:10;
-#endif
- } bs;
- u32 dw0;
- };
-
- union {
- struct {
-#if (__BYTE_ORDER__ == __BIG_ENDIAN__)
- /* Interrupt number that L2NIC engine tell SW
- * if generate int instead of CEQ
- */
- u32 int_num:10;
- u32 ceq_count:10;
- /* product index */
- u32 pi:12;
-#else
- /* product index */
- u32 pi:12;
- u32 ceq_count:10;
- /* Interrupt number that L2NIC engine tell SW
- * if generate int instead of CEQ
- */
- u32 int_num:10;
-#endif
- } bs0;
- struct {
-#if (__BYTE_ORDER__ == __BIG_ENDIAN__)
- /* CEQ arm, L2NIC engine will clear it after send ceq,
- * driver should set it by CMD Q after receive all pkt.
- */
- u32 ceq_arm:1;
- u32 eq_id:5;
- u32 rsvd2:4;
- u32 ceq_count:10;
- /* product index */
- u32 pi:12;
-#else
- /* product index */
- u32 pi:12;
- u32 ceq_count:10;
- u32 rsvd2:4;
- u32 eq_id:5;
- /* CEQ arm, L2NIC engine will clear it after send ceq,
- * driver should set it by CMD Q after receive all pkt.
- */
- u32 ceq_arm:1;
-#endif
- } bs1;
- u32 dw1;
- };
-
- union {
- struct {
-#if (__BYTE_ORDER__ == __BIG_ENDIAN__)
- /* consumer index */
- u32 ci:12;
- /* WQE page address of current CI point to, high part */
- u32 ci_wqe_page_addr_hi:20;
-#else
- /* WQE page address of current CI point to, high part */
- u32 ci_wqe_page_addr_hi:20;
- /* consumer index */
- u32 ci:12;
-#endif
- } bs2;
- u32 dw2;
- };
-
- /* WQE page address of current CI point to, low part */
- u32 ci_wqe_page_addr_lo;
-
- union {
- struct {
-#if (__BYTE_ORDER__ == __BIG_ENDIAN__)
- u32 prefetch_min:7;
- u32 prefetch_max:11;
- u32 prefetch_cache_threshold:14;
-#else
- u32 prefetch_cache_threshold:14;
- u32 prefetch_max:11;
- u32 prefetch_min:7;
-#endif
- } bs3;
- u32 dw3;
- };
-
- union {
- struct {
-#if (__BYTE_ORDER__ == __BIG_ENDIAN__)
- u32 rsvd3:31;
- /* ownership of WQE */
- u32 prefetch_owner:1;
-#else
- /* ownership of WQE */
- u32 prefetch_owner:1;
- u32 rsvd3:31;
-#endif
- } bs4;
- u32 dw4;
- };
-
- union {
- struct {
-#if (__BYTE_ORDER__ == __BIG_ENDIAN__)
- u32 prefetch_ci:12;
- /* high part */
- u32 prefetch_ci_wqe_page_addr_hi:20;
-#else
- /* high part */
- u32 prefetch_ci_wqe_page_addr_hi:20;
- u32 prefetch_ci:12;
-#endif
- } bs5;
- u32 dw5;
- };
-
- /* low part */
- u32 prefetch_ci_wqe_page_addr_lo;
- /* host mem GPA, high part */
- u32 pi_gpa_hi;
- /* host mem GPA, low part */
- u32 pi_gpa_lo;
-
- union {
- struct {
-#if (__BYTE_ORDER__ == __BIG_ENDIAN__)
- u32 rsvd4:9;
- u32 ci_cla_tbl_addr_hi:23;
-#else
- u32 ci_cla_tbl_addr_hi:23;
- u32 rsvd4:9;
-#endif
- } bs6;
- u32 dw6;
- };
-
- u32 ci_cla_tbl_addr_lo;
-
-} nic_ucode_rq_ctx_s;
-
-#define LRO_TSO_SPACE_SIZE (240) /* (15 * 16) */
-#define RQ_CTX_SIZE (48)
-
-#ifdef __cplusplus
-#if __cplusplus
-}
-#endif
-#endif /* __cplusplus */
-#endif /* __L2_TABLE_H__ */
diff --git a/drivers/net/ethernet/huawei/hinic/hinic_sml_table_pub.h b/drivers/net/ethernet/huawei/hinic/hinic_sml_table_pub.h
deleted file mode 100644
index 39d0516c..00000000
--- a/drivers/net/ethernet/huawei/hinic/hinic_sml_table_pub.h
+++ /dev/null
@@ -1,277 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0*/
-/* Huawei HiNIC PCI Express Linux driver
- * Copyright(c) 2017 Huawei Technologies Co., Ltd
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms and conditions of the GNU General Public License,
- * version 2, as published by the Free Software Foundation.
- *
- * This program is distributed in the hope it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
- * for more details.
- *
- */
-
-#ifndef __SML_TABLE_PUB_H__
-#define __SML_TABLE_PUB_H__
-
-#ifdef __cplusplus
-#if __cplusplus
-extern "C" {
-#endif
-#endif /* __cplusplus */
-
-/* Un-FPGA(ESL/EMU/EDA) specification */
-#if (!defined(__UP_FPGA__) && (!defined(HI1822_MODE_FPGA)))
-/* ER specification*/
-#define L2_ER_SPEC (16)
-
-/* Entry specification*/
-#define TBL_ID_FUNC_CFG_SPEC (512)
-#define TBL_ID_PORT_CFG_SPEC (16)
-#define TBL_ID_MAC_SPEC (4096)
-#define TBL_ID_MULTICAST_SPEC (1024)
-#define TBL_ID_TRUNK_SPEC (256)
-#define TBL_ID_ELB_SPEC (18432)
-#define TBL_ID_TAGGEDLIST_SPEC (80)
-#define TBL_ID_UNTAGGEDLIST_SPEC (16)
-
-/* VLAN specification*/
-#define VSW_VLAN_SPEC (4096)
-
-#else /* FPGA scenario specifications */
-
-/* ER specification*/
-#define L2_ER_SPEC (4)
-
-/* Entry specification*/
-#define TBL_ID_FUNC_CFG_SPEC (64)
-#define TBL_ID_PORT_CFG_SPEC (16)
-#define TBL_ID_MAC_SPEC (256)
-#define TBL_ID_MULTICAST_SPEC (32)
-#define TBL_ID_TRUNK_SPEC (16)
-#define TBL_ID_ELB_SPEC (1152)
-#define TBL_ID_TAGGEDLIST_SPEC (20)
-#define TBL_ID_UNTAGGEDLIST_SPEC (4)
-
-/* VLAN specification*/
-#define VSW_VLAN_SPEC (1024)
-#endif
-
-/**
- * Number of entries elements defined
- */
-#define TBL_ID_ELB_ENTRY_ELEM_NUM 2
-#define TBL_ID_VLAN_ENTRY_ELEM_NUM 8
-#define TBL_ID_MULTICAST_ENTRY_ELEM_NUM 2
-#define TBL_ID_TRUNKFWD_ENTRY_ELEM_NUM 32
-#define TBL_ID_TAGGEDLIST_BITMAP32_NUM 4
-#define TBL_ID_UNTAGGEDLIST_BITMAP32_NUM 4
-#define TBL_ID_GLOBAL_QUEUE_NUM 4
-#define TBL_ID_RSS_CONTEXT_NUM 4
-#define TBL_ID_RSS_HASH_NUM 4
-
-/**
- * NIC receiving mode defined
- */
-#define NIC_RX_MODE_UC 0x01 /* 0b00001 */
-#define NIC_RX_MODE_MC 0x02 /* 0b00010 */
-#define NIC_RX_MODE_BC 0x04 /* 0b00100 */
-#define NIC_RX_MODE_MC_ALL 0x08 /* 0b01000 */
-#define NIC_RX_MODE_PROMISC 0x10 /* 0b10000 */
-
-/**
- * Maximum number of HCAR
- */
-#define QOS_MAX_HCAR_NUM (12)
-
-/**
- * VLAN Table, Multicast Table, ELB Table Definitions
- * The Table index and sub id index
- */
-#define VSW_DEFAULT_VLAN0 (0)
-#define INVALID_ELB_INDEX (0)
-
-#if (!defined(__UP_FPGA__) && (!defined(HI1822_MODE_FPGA)))
-/* Supports ESL/EMU/EDA 16ER * 4K VLAN, 1 entry stored 8 vlan*/
-#define GET_VLAN_TABLE_INDEX(er_id, vlan_id) \
- ((((er_id) & 0xF) << 9) | (((vlan_id) & 0xFFF) >> 3))
-#else
-/*FPGA supports only 4ER * 1K VLAN, 1 entry stored 8 vlan*/
-#define GET_VLAN_TABLE_INDEX(er_id, vlan_id) \
- ((((er_id) & 0x3) << 7) | (((vlan_id) & 0x3FF) >> 3))
-#endif
-#define GET_VLAN_ENTRY_SUBID(vlan_id) ((vlan_id) & 0x7)
-
-#define GET_MULTICAST_TABLE_INDEX(mc_id) ((mc_id) >> 1)
-#define GET_MULTICAST_ENTRY_SUBID(mc_id) ((mc_id) & 0x1)
-
-#define GET_ELB_TABLE_INDEX(elb_id) ((elb_id) >> 1)
-#define GET_ELB_ENTRY_SUBID(elb_id) ((elb_id) & 0x1)
-
-/**
- * taggedlist_table and untaggedlist_table access offset calculation
- */
-#define GET_TAGLIST_TABLE_INDEX(list_id, vlan_id) \
- (((list_id) << 5) | (((vlan_id) & 0xFFF) >> 7))
-#define GET_TAGLIST_TABLE_BITMAP_IDX(vlan_id) (((vlan_id) >> 5) & 0x3)
-#define GET_TAGLIST_TABLE_VLAN_BIT(vlan_id) \
- (0x1UL << ((vlan_id) & 0x1F))
-
-#define TRUNK_FWDID_NOPORT 0xFFFF
-
-/**
- * MAC type definition
- */
-typedef enum {
- MAC_TYPE_UC = 0,
- MAC_TYPE_BC,
- MAC_TYPE_MC,
- MAC_TYPE_RSV,
-} mac_type_e;
-
-/**
- * Ethernet port definition
- */
-typedef enum {
- MAG_ETH_PORT0 = 0,
- MAG_ETH_PORT1,
- MAG_ETH_PORT2,
- MAG_ETH_PORT3,
- MAG_ETH_PORT4,
- MAG_ETH_PORT5,
- MAG_ETH_PORT6,
- MAG_ETH_PORT7,
- MAG_ETH_PORT8,
- MAG_ETH_PORT9,
-} mag_eth_port_e;
-
-/**
- * vlan filter type defined
- */
-typedef enum {
- VSW_VLAN_MODE_ALL = 0,
- VSW_VLAN_MODE_ACCESS,
- VSW_VLAN_MODE_TRUNK,
- VSW_VLAN_MODE_HYBRID,
- VSW_VLAN_MODE_QINQ,
- VSW_VLAN_MODE_MAX,
-} vsw_vlan_mode_e;
-
-/**
- * MAC table query forwarding port type definition
- */
-typedef enum {
- VSW_FWD_TYPE_FUNCTION = 0, /* forward type function */
- VSW_FWD_TYPE_VMDQ, /* forward type function-queue(vmdq) */
- VSW_FWD_TYPE_PORT, /* forward type port */
- VSW_FWD_TYPE_FIC, /* forward type fic */
- VSW_FWD_TYPE_TRUNK, /* forward type trunk */
- VSW_FWD_TYPE_DP, /* forward type DP */
- VSW_FWD_TYPE_MC, /* forward type multicast */
-
- /* START: is not used and has to be removed */
- VSW_FWD_TYPE_BC, /* forward type broadcast */
- VSW_FWD_TYPE_PF, /* forward type pf */
- /* END: is not used and has to be removed */
-
- VSW_FWD_TYPE_NULL, /* forward type null */
-} vsw_fwd_type_e;
-
-/**
- * Eth-Trunk port aggregation mode
- */
-typedef enum {
- VSW_ETRK_MODE_STANDBY,
- VSW_ETRK_MODE_SMAC,
- VSW_ETRK_MODE_DMAC,
- VSW_ETRK_MODE_SMACDMAC,
- VSW_ETRK_MODE_SIP,
- VSW_ETRK_MODE_DIP,
- VSW_ETRK_MODE_SIPDIP,
- VSW_ETRK_MODE_5TUPLES,
- VSW_ETRK_MODE_LACP,
- VSW_ETRK_MODE_MAX,
-} vsw_etrk_mode_e;
-
-/**
- * Eth-Trunk port aggregation mode
- */
-typedef enum {
- TRUNK_MODE_STANDBY,
- TRUNK_MODE_SMAC,
- TRUNK_MODE_DMAC,
- TRUNK_MODE_SMACDMAC,
- TRUNK_MODE_SIP,
- TRUNK_MODE_DIP,
- TRUNK_MODE_SIPDIP,
- TRUNK_MODE_5TUPLES,
- TRUNK_MODE_SIPV6,
- TRUNK_MODE_DIPV6,
- TRUNK_MODE_SIPDIPV6,
- TRUNK_MODE_5TUPLESV6,
- TRUNK_MODE_LACP,
-} trunk_mode_s;
-
-/* ACL key type */
-enum {
- ACL_KEY_IPV4 = 0,
- ACL_KEY_IPV6
-};
-
-/* ACL filter action */
-enum {
- ACL_ACTION_PERMIT = 0,
- ACL_ACTION_DENY
-};
-
-/* ACL action button*/
-enum {
- ACL_ACTION_OFF = 0,
- ACL_ACTION_ON,
-};
-
-/* ACL statistic action*/
-enum {
- ACL_ACTION_NO_COUNTER = 0,
- ACL_ACTION_COUNT_PKT,
- ACL_ACTION_COUNT_PKT_LEN,
-};
-
-/* ACL redirect action*/
-enum {
- ACL_ACTION_FORWAR_UP = 1,
- ACL_ACTION_FORWAR_PORT,
- ACL_ACTION_FORWAR_NEXT_HOP,
- ACL_ACTION_FORWAR_OTHER,
-};
-
-enum {
- CEQ_TIMER_STOP = 0,
- CEQ_TIMER_START,
-};
-
-enum {
- CEQ_API_DISPATCH = 0,
- CEQ_API_NOT_DISPATCH,
-};
-
-enum {
- CEQ_MODE = 1,
- INT_MODE,
-};
-
-enum {
- ER_MODE_VEB,
- ER_MODE_VEPA,
- ER_MODE_MULTI,
- ER_MODE_NULL,
-};
-
-#ifdef __cplusplus
-#if __cplusplus
-}
-#endif
-#endif /* __cplusplus */
-#endif /* __L2_TABLE_PUB_H__ */
--
1.8.3
1
0