Kernel
Threads by month
- ----- 2025 -----
- July
- June
- May
- April
- March
- February
- January
- ----- 2024 -----
- December
- November
- October
- September
- August
- July
- June
- May
- April
- March
- February
- January
- ----- 2023 -----
- December
- November
- October
- September
- August
- July
- June
- May
- April
- March
- February
- January
- ----- 2022 -----
- December
- November
- October
- September
- August
- July
- June
- May
- April
- March
- February
- January
- ----- 2021 -----
- December
- November
- October
- September
- August
- July
- June
- May
- April
- March
- February
- January
- ----- 2020 -----
- December
- November
- October
- September
- August
- July
- June
- May
- April
- March
- February
- January
- ----- 2019 -----
- December
- 46 participants
- 19081 discussions
hulk inclusion
category: kabi
bugzilla: NA
CVE: CVE-2020-10741, CVE-2020-12826
---------------------------
Commit d1e7fd6462ca ("signal: Extend exec_id to 64bits") can fixes
CVE-2020-10741 and CVE-2020-12826, but it introduces a kabi change
in struct task_strcut. Fix this kabi broken by using another new
64bits variables parent_exec_id_u64 and self_exec_id_u64.
Signed-off-by: Yang Yingliang <yangyingliang(a)huawei.com>
Reviewed-by: Xie XiuQi <xiexiuqi(a)huawei.com>
Signed-off-by: Yang Yingliang <yangyingliang(a)huawei.com>
---
fs/exec.c | 1 +
include/linux/sched.h | 9 +++++++--
kernel/fork.c | 2 ++
kernel/signal.c | 2 +-
4 files changed, 11 insertions(+), 3 deletions(-)
diff --git a/fs/exec.c b/fs/exec.c
index 15d9974..19c0700 100644
--- a/fs/exec.c
+++ b/fs/exec.c
@@ -1380,6 +1380,7 @@ void setup_new_exec(struct linux_binprm * bprm)
/* An exec changes our domain. We are no longer part of the thread
group */
WRITE_ONCE(current->self_exec_id, current->self_exec_id + 1);
+ WRITE_ONCE(current->self_exec_id_u64, current->self_exec_id_u64 + 1);
flush_signal_handlers(current, 0);
}
EXPORT_SYMBOL(setup_new_exec);
diff --git a/include/linux/sched.h b/include/linux/sched.h
index 1d15ab4..302fa00 100644
--- a/include/linux/sched.h
+++ b/include/linux/sched.h
@@ -892,8 +892,8 @@ struct task_struct {
struct seccomp seccomp;
/* Thread group tracking: */
- u64 parent_exec_id;
- u64 self_exec_id;
+ u32 parent_exec_id;
+ u32 self_exec_id;
/* Protection against (de-)allocation: mm, files, fs, tty, keyrings, mems_allowed, mempolicy: */
spinlock_t alloc_lock;
@@ -1212,8 +1212,13 @@ struct task_struct {
*/
randomized_struct_fields_end
+#ifndef __GENKSYMS__
+ u64 parent_exec_id_u64;
+ u64 self_exec_id_u64;
+#else
KABI_RESERVE(1)
KABI_RESERVE(2)
+#endif
KABI_RESERVE(3)
KABI_RESERVE(4)
KABI_RESERVE(5)
diff --git a/kernel/fork.c b/kernel/fork.c
index 2839961..951aa6f 100644
--- a/kernel/fork.c
+++ b/kernel/fork.c
@@ -2055,9 +2055,11 @@ static __latent_entropy struct task_struct *copy_process(
if (clone_flags & (CLONE_PARENT|CLONE_THREAD)) {
p->real_parent = current->real_parent;
p->parent_exec_id = current->parent_exec_id;
+ p->parent_exec_id_u64 = current->parent_exec_id_u64;
} else {
p->real_parent = current;
p->parent_exec_id = current->self_exec_id;
+ p->parent_exec_id_u64 = current->self_exec_id_u64;
}
klp_copy_process(p);
diff --git a/kernel/signal.c b/kernel/signal.c
index 60ea2ee..a58af7d 100644
--- a/kernel/signal.c
+++ b/kernel/signal.c
@@ -1827,7 +1827,7 @@ bool do_notify_parent(struct task_struct *tsk, int sig)
* This is only possible if parent == real_parent.
* Check if it has changed security domain.
*/
- if (tsk->parent_exec_id != READ_ONCE(tsk->parent->self_exec_id))
+ if (tsk->parent_exec_id_u64 != READ_ONCE(tsk->parent->self_exec_id_u64))
sig = SIGCHLD;
}
--
1.8.3
1
0

[PATCH 01/14] net: hns3: solve the unlock 2 times when rocee init fault
by Yang Yingliang 20 May '20
by Yang Yingliang 20 May '20
20 May '20
From: Hao Shen <shenhao21(a)huawei.com>
driver inclusion
category: bugfix
bugzilla: NA
CVE: NA
--------------------------------------------------------
When rocee init fault, the rtnl_unlock execute 2 times
Signed-off-by: Hao Shen <shenhao21(a)huawei.com>
Reviewed-by: Zhong Zhaohui <zhongzhaohui(a)huawei.com>
Signed-off-by: Yang Yingliang <yangyingliang(a)huawei.com>
---
drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c | 2 +-
1 file changed, 1 insertion(+), 1 deletion(-)
diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c
index 0b7e159..3e0afc4 100644
--- a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c
+++ b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c
@@ -3865,7 +3865,7 @@ static int hclge_resume(struct hnae3_ae_dev *ae_dev)
ret = hclge_notify_roce_client(hdev, HNAE3_INIT_CLIENT);
if (ret)
- goto err_reset_lock;
+ return ret;
rtnl_lock();
--
1.8.3
1
13

13 May '20
From: Miaohe Lin <linmiaohe(a)huawei.com>
mainline inclusion
from mainline-v5.6-rc4
commit d80b64ff297e40c2b6f7d7abc1b3eba70d22a068
category: bugfix
bugzilla: 13690
CVE: CVE-2020-12768
-------------------------------------------------
When kmalloc memory for sd->sev_vmcbs failed, we forget to free the page
held by sd->save_area. Also get rid of the var r as '-ENOMEM' is actually
the only possible outcome here.
Reviewed-by: Liran Alon <liran.alon(a)oracle.com>
Reviewed-by: Vitaly Kuznetsov <vkuznets(a)redhat.com>
Signed-off-by: Miaohe Lin <linmiaohe(a)huawei.com>
Signed-off-by: Paolo Bonzini <pbonzini(a)redhat.com>
Signed-off-by: Yang Yingliang <yangyingliang(a)huawei.com>
Reviewed-by: Jason Yan <yanaijie(a)huawei.com>
Signed-off-by: Yang Yingliang <yangyingliang(a)huawei.com>
---
arch/x86/kvm/svm.c | 13 ++++++-------
1 file changed, 6 insertions(+), 7 deletions(-)
diff --git a/arch/x86/kvm/svm.c b/arch/x86/kvm/svm.c
index df22744..226db3dc 100644
--- a/arch/x86/kvm/svm.c
+++ b/arch/x86/kvm/svm.c
@@ -998,33 +998,32 @@ static void svm_cpu_uninit(int cpu)
static int svm_cpu_init(int cpu)
{
struct svm_cpu_data *sd;
- int r;
sd = kzalloc(sizeof(struct svm_cpu_data), GFP_KERNEL);
if (!sd)
return -ENOMEM;
sd->cpu = cpu;
- r = -ENOMEM;
sd->save_area = alloc_page(GFP_KERNEL);
if (!sd->save_area)
- goto err_1;
+ goto free_cpu_data;
if (svm_sev_enabled()) {
- r = -ENOMEM;
sd->sev_vmcbs = kmalloc_array(max_sev_asid + 1,
sizeof(void *),
GFP_KERNEL);
if (!sd->sev_vmcbs)
- goto err_1;
+ goto free_save_area;
}
per_cpu(svm_data, cpu) = sd;
return 0;
-err_1:
+free_save_area:
+ __free_page(sd->save_area);
+free_cpu_data:
kfree(sd);
- return r;
+ return -ENOMEM;
}
--
1.8.3
1
1

13 May '20
hulk inclusion
category: config
bugzilla: NA
CVE: NA
-------------------------------------------------
Introduced by 05460849c3b5 ("arm64: errata: Hide CTR_EL0.DIC on systems affected by Neoverse-N1 #1542419").
Disable CONFIG_ARM64_ERRATUM_1542419 by default.
Reviewed-by: Xuefeng Wang <wxf.wang(a)hisilicon.com>
Signed-off-by: Yang Yingliang <yangyingliang(a)huawei.com>
---
arch/arm64/configs/euleros_defconfig | 1 +
arch/arm64/configs/hulk_defconfig | 1 +
arch/arm64/configs/openeuler_defconfig | 1 +
arch/arm64/configs/storage_ci_defconfig | 1 +
arch/arm64/configs/syzkaller_defconfig | 1 +
5 files changed, 5 insertions(+)
diff --git a/arch/arm64/configs/euleros_defconfig b/arch/arm64/configs/euleros_defconfig
index 04456f3..360e692 100644
--- a/arch/arm64/configs/euleros_defconfig
+++ b/arch/arm64/configs/euleros_defconfig
@@ -387,6 +387,7 @@ CONFIG_ARM64_ERRATUM_845719=y
CONFIG_ARM64_ERRATUM_843419=y
CONFIG_ARM64_ERRATUM_1024718=y
# CONFIG_ARM64_ERRATUM_1463225 is not set
+# CONFIG_ARM64_ERRATUM_1542419 is not set
CONFIG_CAVIUM_ERRATUM_22375=y
CONFIG_CAVIUM_ERRATUM_23144=y
CONFIG_CAVIUM_ERRATUM_23154=y
diff --git a/arch/arm64/configs/hulk_defconfig b/arch/arm64/configs/hulk_defconfig
index 6ee2472..bb020a1 100644
--- a/arch/arm64/configs/hulk_defconfig
+++ b/arch/arm64/configs/hulk_defconfig
@@ -386,6 +386,7 @@ CONFIG_ARM64_ERRATUM_845719=y
# CONFIG_ARM64_ERRATUM_843419 is not set
# CONFIG_ARM64_ERRATUM_1024718 is not set
# CONFIG_ARM64_ERRATUM_1463225 is not set
+# CONFIG_ARM64_ERRATUM_1542419 is not set
# CONFIG_CAVIUM_ERRATUM_22375 is not set
# CONFIG_CAVIUM_ERRATUM_23144 is not set
# CONFIG_CAVIUM_ERRATUM_23154 is not set
diff --git a/arch/arm64/configs/openeuler_defconfig b/arch/arm64/configs/openeuler_defconfig
index 9e6f560..b943729 100644
--- a/arch/arm64/configs/openeuler_defconfig
+++ b/arch/arm64/configs/openeuler_defconfig
@@ -389,6 +389,7 @@ CONFIG_ARM64_ERRATUM_845719=y
CONFIG_ARM64_ERRATUM_843419=y
CONFIG_ARM64_ERRATUM_1024718=y
# CONFIG_ARM64_ERRATUM_1463225 is not set
+# CONFIG_ARM64_ERRATUM_1542419 is not set
CONFIG_CAVIUM_ERRATUM_22375=y
CONFIG_CAVIUM_ERRATUM_23144=y
CONFIG_CAVIUM_ERRATUM_23154=y
diff --git a/arch/arm64/configs/storage_ci_defconfig b/arch/arm64/configs/storage_ci_defconfig
index 2ad533d..b7f23ca 100644
--- a/arch/arm64/configs/storage_ci_defconfig
+++ b/arch/arm64/configs/storage_ci_defconfig
@@ -373,6 +373,7 @@ CONFIG_ARM64_ERRATUM_845719=y
CONFIG_ARM64_ERRATUM_843419=y
CONFIG_ARM64_ERRATUM_1024718=y
# CONFIG_ARM64_ERRATUM_1463225 is not set
+# CONFIG_ARM64_ERRATUM_1542419 is not set
CONFIG_CAVIUM_ERRATUM_22375=y
CONFIG_CAVIUM_ERRATUM_23144=y
CONFIG_CAVIUM_ERRATUM_23154=y
diff --git a/arch/arm64/configs/syzkaller_defconfig b/arch/arm64/configs/syzkaller_defconfig
index 509713a..eaf5e32 100644
--- a/arch/arm64/configs/syzkaller_defconfig
+++ b/arch/arm64/configs/syzkaller_defconfig
@@ -385,6 +385,7 @@ CONFIG_ARM64_ERRATUM_845719=y
# CONFIG_ARM64_ERRATUM_843419 is not set
# CONFIG_ARM64_ERRATUM_1024718 is not set
# CONFIG_ARM64_ERRATUM_1463225 is not set
+# CONFIG_ARM64_ERRATUM_1542419 is not set
# CONFIG_CAVIUM_ERRATUM_22375 is not set
# CONFIG_CAVIUM_ERRATUM_23144 is not set
# CONFIG_CAVIUM_ERRATUM_23154 is not set
--
1.8.3
1
0

[PATCH 01/33] vhost: vsock: kick send_pkt worker once device is started
by Yang Yingliang 13 May '20
by Yang Yingliang 13 May '20
13 May '20
From: Jia He <justin.he(a)arm.com>
commit 0b841030625cde5f784dd62aec72d6a766faae70 upstream.
Ning Bo reported an abnormal 2-second gap when booting Kata container [1].
The unconditional timeout was caused by VSOCK_DEFAULT_CONNECT_TIMEOUT of
connecting from the client side. The vhost vsock client tries to connect
an initializing virtio vsock server.
The abnormal flow looks like:
host-userspace vhost vsock guest vsock
============== =========== ============
connect() --------> vhost_transport_send_pkt_work() initializing
| vq->private_data==NULL
| will not be queued
V
schedule_timeout(2s)
vhost_vsock_start() <--------- device ready
set vq->private_data
wait for 2s and failed
connect() again vq->private_data!=NULL recv connecting pkt
Details:
1. Host userspace sends a connect pkt, at that time, guest vsock is under
initializing, hence the vhost_vsock_start has not been called. So
vq->private_data==NULL, and the pkt is not been queued to send to guest
2. Then it sleeps for 2s
3. After guest vsock finishes initializing, vq->private_data is set
4. When host userspace wakes up after 2s, send connecting pkt again,
everything is fine.
As suggested by Stefano Garzarella, this fixes it by additional kicking the
send_pkt worker in vhost_vsock_start once the virtio device is started. This
makes the pending pkt sent again.
After this patch, kata-runtime (with vsock enabled) boot time is reduced
from 3s to 1s on a ThunderX2 arm64 server.
[1] https://github.com/kata-containers/runtime/issues/1917
Reported-by: Ning Bo <n.b(a)live.com>
Suggested-by: Stefano Garzarella <sgarzare(a)redhat.com>
Signed-off-by: Jia He <justin.he(a)arm.com>
Link: https://lore.kernel.org/r/20200501043840.186557-1-justin.he@arm.com
Signed-off-by: Michael S. Tsirkin <mst(a)redhat.com>
Reviewed-by: Stefano Garzarella <sgarzare(a)redhat.com>
Signed-off-by: Greg Kroah-Hartman <gregkh(a)linuxfoundation.org>
---
drivers/vhost/vsock.c | 5 +++++
1 file changed, 5 insertions(+)
diff --git a/drivers/vhost/vsock.c b/drivers/vhost/vsock.c
index 5f5c5de..bac1365 100644
--- a/drivers/vhost/vsock.c
+++ b/drivers/vhost/vsock.c
@@ -499,6 +499,11 @@ static int vhost_vsock_start(struct vhost_vsock *vsock)
mutex_unlock(&vq->mutex);
}
+ /* Some packets may have been queued before the device was started,
+ * let's kick the send worker to send them.
+ */
+ vhost_work_queue(&vsock->dev, &vsock->send_pkt_work);
+
mutex_unlock(&vsock->dev.mutex);
return 0;
--
1.8.3
1
32

12 May '20
From: Ville Syrjälä <ville.syrjala(a)linux.intel.com>
commit 6292b8efe32e6be408af364132f09572aed14382 upstream.
The DispID DTD pixel clock is documented as:
"00 00 00 h → FF FF FF h | Pixel clock ÷ 10,000 0.01 → 167,772.16 Mega Pixels per Sec"
Which seems to imply that we to add one to the raw value.
Reality seems to agree as there are tiled displays in the wild
which currently show a 10kHz difference in the pixel clock
between the tiles (one tile gets its mode from the base EDID,
the other from the DispID block).
Cc: stable(a)vger.kernel.org
References: https://gitlab.freedesktop.org/drm/intel/-/issues/27
Signed-off-by: Ville Syrjälä <ville.syrjala(a)linux.intel.com>
Link: https://patchwork.freedesktop.org/patch/msgid/20200423151743.18767-1-ville.…
Reviewed-by: Manasi Navare <manasi.d.navare(a)intel.com>
Signed-off-by: Greg Kroah-Hartman <gregkh(a)linuxfoundation.org>
---
drivers/gpu/drm/drm_edid.c | 2 +-
1 file changed, 1 insertion(+), 1 deletion(-)
diff --git a/drivers/gpu/drm/drm_edid.c b/drivers/gpu/drm/drm_edid.c
index f5926bf5..d5dcee7 100644
--- a/drivers/gpu/drm/drm_edid.c
+++ b/drivers/gpu/drm/drm_edid.c
@@ -4706,7 +4706,7 @@ static struct drm_display_mode *drm_mode_displayid_detailed(struct drm_device *d
struct drm_display_mode *mode;
unsigned pixel_clock = (timings->pixel_clock[0] |
(timings->pixel_clock[1] << 8) |
- (timings->pixel_clock[2] << 16));
+ (timings->pixel_clock[2] << 16)) + 1;
unsigned hactive = (timings->hactive[0] | timings->hactive[1] << 8) + 1;
unsigned hblank = (timings->hblank[0] | timings->hblank[1] << 8) + 1;
unsigned hsync = (timings->hsync[0] | (timings->hsync[1] & 0x7f) << 8) + 1;
--
1.8.3
1
75

12 May '20
From: Clement Leger <cleger(a)kalray.eu>
commit 00a0eec59ddbb1ce966b19097d8a8d2f777e726a upstream.
Index of rvring is computed using pointer arithmetic. However, since
rvring->rvdev->vring is the base of the vring array, computation
of rvring idx should be reversed. It previously lead to writing at negative
indices in the resource table.
Signed-off-by: Clement Leger <cleger(a)kalray.eu>
Link: https://lore.kernel.org/r/20191004073736.8327-1-cleger@kalray.eu
Signed-off-by: Bjorn Andersson <bjorn.andersson(a)linaro.org>
Cc: Doug Anderson <dianders(a)chromium.org>
Signed-off-by: Greg Kroah-Hartman <gregkh(a)linuxfoundation.org>
---
drivers/remoteproc/remoteproc_core.c | 2 +-
1 file changed, 1 insertion(+), 1 deletion(-)
diff --git a/drivers/remoteproc/remoteproc_core.c b/drivers/remoteproc/remoteproc_core.c
index abbef17..d5ff272 100644
--- a/drivers/remoteproc/remoteproc_core.c
+++ b/drivers/remoteproc/remoteproc_core.c
@@ -289,7 +289,7 @@ void rproc_free_vring(struct rproc_vring *rvring)
{
int size = PAGE_ALIGN(vring_size(rvring->len, rvring->align));
struct rproc *rproc = rvring->rvdev->rproc;
- int idx = rvring->rvdev->vring - rvring;
+ int idx = rvring - rvring->rvdev->vring;
struct fw_rsc_vdev *rsc;
dma_free_coherent(rproc->dev.parent, size, rvring->va, rvring->dma);
--
1.8.3
1
95

12 May '20
From: Dmitry Monakhov <dmonakhov(a)gmail.com>
[ Upstream commit 4068664e3cd2312610ceac05b74c4cf1853b8325 ]
Extents are cached in read_extent_tree_block(); as a result, extents
are not cached for inodes with depth == 0 when we try to find the
extent using ext4_find_extent(). The result of the lookup is cached
in ext4_map_blocks() but is only a subset of the extent on disk. As a
result, the contents of extents status cache can get very badly
fragmented for certain workloads, such as a random 4k read workload.
File size of /mnt/test is 33554432 (8192 blocks of 4096 bytes)
ext: logical_offset: physical_offset: length: expected: flags:
0: 0.. 8191: 40960.. 49151: 8192: last,eof
$ perf record -e 'ext4:ext4_es_*' /root/bin/fio --name=t --direct=0 --rw=randread --bs=4k --filesize=32M --size=32M --filename=/mnt/test
$ perf script | grep ext4_es_insert_extent | head -n 10
fio 131 [000] 13.975421: ext4:ext4_es_insert_extent: dev 253,0 ino 12 es [494/1) mapped 41454 status W
fio 131 [000] 13.975939: ext4:ext4_es_insert_extent: dev 253,0 ino 12 es [6064/1) mapped 47024 status W
fio 131 [000] 13.976467: ext4:ext4_es_insert_extent: dev 253,0 ino 12 es [6907/1) mapped 47867 status W
fio 131 [000] 13.976937: ext4:ext4_es_insert_extent: dev 253,0 ino 12 es [3850/1) mapped 44810 status W
fio 131 [000] 13.977440: ext4:ext4_es_insert_extent: dev 253,0 ino 12 es [3292/1) mapped 44252 status W
fio 131 [000] 13.977931: ext4:ext4_es_insert_extent: dev 253,0 ino 12 es [6882/1) mapped 47842 status W
fio 131 [000] 13.978376: ext4:ext4_es_insert_extent: dev 253,0 ino 12 es [3117/1) mapped 44077 status W
fio 131 [000] 13.978957: ext4:ext4_es_insert_extent: dev 253,0 ino 12 es [2896/1) mapped 43856 status W
fio 131 [000] 13.979474: ext4:ext4_es_insert_extent: dev 253,0 ino 12 es [7479/1) mapped 48439 status W
Fix this by caching the extents for inodes with depth == 0 in
ext4_find_extent().
[ Renamed ext4_es_cache_extents() to ext4_cache_extents() since this
newly added function is not in extents_cache.c, and to avoid
potential visual confusion with ext4_es_cache_extent(). -TYT ]
Signed-off-by: Dmitry Monakhov <dmonakhov(a)gmail.com>
Link: https://lore.kernel.org/r/20191106122502.19986-1-dmonakhov@gmail.com
Signed-off-by: Theodore Ts'o <tytso(a)mit.edu>
Signed-off-by: Sasha Levin <sashal(a)kernel.org>
---
fs/ext4/extents.c | 47 +++++++++++++++++++++++++++--------------------
1 file changed, 27 insertions(+), 20 deletions(-)
diff --git a/fs/ext4/extents.c b/fs/ext4/extents.c
index 90862ee..0f17c54 100644
--- a/fs/ext4/extents.c
+++ b/fs/ext4/extents.c
@@ -510,6 +510,30 @@ int ext4_ext_check_inode(struct inode *inode)
return ext4_ext_check(inode, ext_inode_hdr(inode), ext_depth(inode), 0);
}
+static void ext4_cache_extents(struct inode *inode,
+ struct ext4_extent_header *eh)
+{
+ struct ext4_extent *ex = EXT_FIRST_EXTENT(eh);
+ ext4_lblk_t prev = 0;
+ int i;
+
+ for (i = le16_to_cpu(eh->eh_entries); i > 0; i--, ex++) {
+ unsigned int status = EXTENT_STATUS_WRITTEN;
+ ext4_lblk_t lblk = le32_to_cpu(ex->ee_block);
+ int len = ext4_ext_get_actual_len(ex);
+
+ if (prev && (prev != lblk))
+ ext4_es_cache_extent(inode, prev, lblk - prev, ~0,
+ EXTENT_STATUS_HOLE);
+
+ if (ext4_ext_is_unwritten(ex))
+ status = EXTENT_STATUS_UNWRITTEN;
+ ext4_es_cache_extent(inode, lblk, len,
+ ext4_ext_pblock(ex), status);
+ prev = lblk + len;
+ }
+}
+
static struct buffer_head *
__read_extent_tree_block(const char *function, unsigned int line,
struct inode *inode, ext4_fsblk_t pblk, int depth,
@@ -544,26 +568,7 @@ int ext4_ext_check_inode(struct inode *inode)
*/
if (!(flags & EXT4_EX_NOCACHE) && depth == 0) {
struct ext4_extent_header *eh = ext_block_hdr(bh);
- struct ext4_extent *ex = EXT_FIRST_EXTENT(eh);
- ext4_lblk_t prev = 0;
- int i;
-
- for (i = le16_to_cpu(eh->eh_entries); i > 0; i--, ex++) {
- unsigned int status = EXTENT_STATUS_WRITTEN;
- ext4_lblk_t lblk = le32_to_cpu(ex->ee_block);
- int len = ext4_ext_get_actual_len(ex);
-
- if (prev && (prev != lblk))
- ext4_es_cache_extent(inode, prev,
- lblk - prev, ~0,
- EXTENT_STATUS_HOLE);
-
- if (ext4_ext_is_unwritten(ex))
- status = EXTENT_STATUS_UNWRITTEN;
- ext4_es_cache_extent(inode, lblk, len,
- ext4_ext_pblock(ex), status);
- prev = lblk + len;
- }
+ ext4_cache_extents(inode, eh);
}
return bh;
errout:
@@ -911,6 +916,8 @@ struct ext4_ext_path *
path[0].p_bh = NULL;
i = depth;
+ if (!(flags & EXT4_EX_NOCACHE) && depth == 0)
+ ext4_cache_extents(inode, eh);
/* walk through the tree */
while (i) {
ext_debug("depth %d: num %d, max %d\n",
--
1.8.3
1
247

11 May '20
From: fengsheng <fengsheng5(a)huawei.com>
driver inclusion
category: feature
bugzilla: NA
CVE: NA
1. add interface: sysctl_pmbus_write_common
2. add interface: sysctl_pmbus_read_common
Signed-off-by: fengsheng <fengsheng5(a)huawei.com>
Reviewed-by: wuyang <wuyang7(a)huawei.com>
Signed-off-by: Yang Yingliang <yangyingliang(a)huawei.com>
---
drivers/soc/hisilicon/sysctl/sysctl_drv.c | 2 +-
drivers/soc/hisilicon/sysctl/sysctl_pmbus.c | 113 ++++++++++++++++++++++------
drivers/soc/hisilicon/sysctl/sysctl_pmbus.h | 16 ++++
3 files changed, 108 insertions(+), 23 deletions(-)
diff --git a/drivers/soc/hisilicon/sysctl/sysctl_drv.c b/drivers/soc/hisilicon/sysctl/sysctl_drv.c
index 3899fac..c4cb157 100644
--- a/drivers/soc/hisilicon/sysctl/sysctl_drv.c
+++ b/drivers/soc/hisilicon/sysctl/sysctl_drv.c
@@ -48,7 +48,7 @@
#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
#define DEBUG
-#define SYSCTL_DRIVER_VERSION "1.9.39.1"
+#define SYSCTL_DRIVER_VERSION "1.9.60.0"
unsigned int g_sysctrl_debug;
diff --git a/drivers/soc/hisilicon/sysctl/sysctl_pmbus.c b/drivers/soc/hisilicon/sysctl/sysctl_pmbus.c
index 74d06a8..92eeba6 100644
--- a/drivers/soc/hisilicon/sysctl/sysctl_pmbus.c
+++ b/drivers/soc/hisilicon/sysctl/sysctl_pmbus.c
@@ -190,6 +190,9 @@ int InitPmbus(u8 chip_id)
his_sysctrl_reg_wr(base, I2C_SS_SCL_HCNT_OFFSET, I2C_SS_SCLHCNT);
/* ulSclLow > 1.5us */
his_sysctrl_reg_wr(base, I2C_SS_SCL_LCNT_OFFSET, I2C_SS_SCLLCNT);
+ /* set sda_hold_fs 1us > 250ns */
+ his_sysctrl_reg_wr(base, I2C_SDA_HOLD_OFFSET, I2C_SS_SDA_HOLD_FS);
+
his_sysctrl_reg_wr(base, I2C_ENABLE_OFFSET, 0x1);
debug_sysctrl_print("Initialize Pmbus end\n");
@@ -233,35 +236,36 @@ int sysctl_pmbus_cfg(u8 chip_id, u8 addr, u8 page, u32 slave_addr)
return 0;
}
-int sysctl_pmbus_write(u8 chip_id, u8 addr, u32 slave_addr, u32 data_len, u32 buf)
+int sysctl_pmbus_write_common(u8 chip_id, u32 slave_addr, u32 data_len, u8 *buf)
{
u32 i = 0;
u32 temp = 0;
u32 loop = 0x1000;
- u32 temp_data = addr;
+ u32 temp_data;
void __iomem *base = NULL;
if ((chip_id >= CHIP_ID_NUM_MAX) ||
- (data_len > DATA_NUM_MAX) ||
- (slave_addr >= SLAVE_ADDR_MAX)) {
+ (slave_addr >= SLAVE_ADDR_MAX) ||
+ (!data_len) || (data_len > PMBUS_WRITE_LEN_MAX) ||
+ (!buf)) {
pr_err("[sysctl pmbus] write param err,chipid=0x%x,data_len=0x%x,slave_addr=0x%x!\n",
chip_id, data_len, slave_addr);
return SYSCTL_ERR_PARAM;
}
+ /* clear all interrupt */
base = g_sysctl_pmbus_base[chip_id];
-
his_sysctrl_reg_wr(base, I2C_INTR_RAW_OFFSET, 0x3ffff);
+ /* send: slave_addr[7bit] + write[1bit] */
his_sysctrl_reg_wr(base, I2C_DATA_CMD_OFFSET, (0x2 << 0x8) | slave_addr);
- if (data_len != 0) {
- his_sysctrl_reg_wr(base, I2C_DATA_CMD_OFFSET, addr);
- for (i = 0; i < data_len - 1; i++)
- his_sysctrl_reg_wr(base, I2C_DATA_CMD_OFFSET, 0xff & (buf >> (i * 0x8)));
+ /* write data */
+ for (i = 0; i < data_len - 1; i++)
+ his_sysctrl_reg_wr(base, I2C_DATA_CMD_OFFSET, buf[i]);
- temp_data = (0xff & (buf >> (i * 0x8)));
- }
+ /* last data should send stop */
+ temp_data = buf[i];
his_sysctrl_reg_wr(base, I2C_DATA_CMD_OFFSET, (0x4 << 0x8) | temp_data);
/* poll until send done */
@@ -273,14 +277,15 @@ int sysctl_pmbus_write(u8 chip_id, u8 addr, u32 slave_addr, u32 data_len, u32 bu
/* send data failed */
if (temp & I2C_TX_ABRT) {
his_sysctrl_reg_rd(base, I2C_TX_ABRT_SRC_REG, &temp);
- pr_err("[sysctl pmbus]write data fail, chip_id:0x%x,slave_addr:0x%x, addr:0x%x!\r\n",
- chip_id, slave_addr, addr);
+ pr_err("[sysctl pmbus]write data fail, chip_id:0x%x,slave_addr:0x%x\r\n",
+ chip_id, slave_addr);
his_sysctrl_reg_rd(base, I2C_CLR_TX_ABRT_REG, &temp);
return SYSCTL_ERR_FAILED;
}
his_sysctrl_reg_rd(base, I2C_STATUS_REG, &temp);
+ /* send done */
if (temp & I2C_TX_FIFO_EMPTY) {
his_sysctrl_reg_rd(base, I2C_TX_FIFO_DATA_NUM_REG, &temp);
if (temp == 0)
@@ -289,8 +294,8 @@ int sysctl_pmbus_write(u8 chip_id, u8 addr, u32 slave_addr, u32 data_len, u32 bu
loop--;
if (loop == 0) {
- pr_err("[sysctl pmbus]write data retry fail, chip_id:0x%x,slave_addr:0x%x, addr:0x%x!\r\n",
- chip_id, slave_addr, addr);
+ pr_err("[sysctl pmbus]write data retry fail, chip_id:0x%x,slave_addr:0x%x\r\n",
+ chip_id, slave_addr);
return SYSCTL_ERR_FAILED;
}
}
@@ -298,7 +303,25 @@ int sysctl_pmbus_write(u8 chip_id, u8 addr, u32 slave_addr, u32 data_len, u32 bu
return SYSCTL_ERR_OK;
}
-static int sysctl_pmbus_read_pre(void __iomem *base, u8 addr, u32 slave_addr, u32 data_len)
+int sysctl_pmbus_write(u8 chip_id, u8 addr, u32 slave_addr, u32 data_len, u32 buf)
+{
+#define TMP_LEN_MAX 5
+ u8 i;
+ u8 tmp[TMP_LEN_MAX] = {0};
+
+ if (data_len > DATA_NUM_MAX) {
+ pr_err("[sysctl pmbus] write param err,data_len=0x%x!\n", data_len);
+ return SYSCTL_ERR_PARAM;
+ }
+
+ tmp[0] = addr;
+ for (i = 0; i < data_len; i++)
+ tmp[i + 1] = (buf >> (i * 0x8)) & 0xff;
+
+ return sysctl_pmbus_write_common(chip_id, slave_addr, data_len + sizeof(addr), &tmp[0]);
+}
+
+static int sysctl_pmbus_read_pre(void __iomem *base, u32 cmd_len, u8 *cmd, u32 slave_addr, u32 data_len)
{
u32 i = 0;
u32 fifo_num = 0;
@@ -309,22 +332,29 @@ static int sysctl_pmbus_read_pre(void __iomem *base, u8 addr, u32 slave_addr, u3
return SYSCTL_ERR_PARAM;
}
+ /* clear all interrupt */
his_sysctrl_reg_wr(base, I2C_INTR_RAW_OFFSET, 0x3ffff);
+ /* clear rx fifo */
his_sysctrl_reg_rd(base, I2C_RXFLR_OFFSET, &fifo_num);
- debug_sysctrl_print("[sysctl_pmbus_read_byte]read pmbus , read empty rx fifo num:%d\r\n", fifo_num);
for (i = 0; i < fifo_num; i++)
his_sysctrl_reg_rd(base, I2C_DATA_CMD_OFFSET, &temp_byte);
- his_sysctrl_reg_wr(base, I2C_DATA_CMD_OFFSET, (0x2 << 0x8) | slave_addr);
- his_sysctrl_reg_wr(base, I2C_DATA_CMD_OFFSET, addr);
- his_sysctrl_reg_wr(base, I2C_DATA_CMD_OFFSET, (0x3 << 0x8) | slave_addr);
+ /* send cmd */
+ if (cmd_len) {
+ his_sysctrl_reg_wr(base, I2C_DATA_CMD_OFFSET, (0x2 << 0x8) | slave_addr);
+ for (i = 0; i < cmd_len; i++)
+ his_sysctrl_reg_wr(base, I2C_DATA_CMD_OFFSET, cmd[i]);
+ }
+ /* read data */
+ his_sysctrl_reg_wr(base, I2C_DATA_CMD_OFFSET, (0x3 << 0x8) | slave_addr);
i = data_len;
while ((i - 1) > 0) {
his_sysctrl_reg_wr(base, I2C_DATA_CMD_OFFSET, 0x100);
i--;
}
+ /* last data should send stop */
his_sysctrl_reg_wr(base, I2C_DATA_CMD_OFFSET, 0x500);
return 0;
@@ -366,6 +396,43 @@ static int sysctl_pmbus_wait_data(void __iomem *base, u32 data_len)
return SYSCTL_ERR_OK;
}
+int sysctl_pmbus_read_common(u8 chip_id, struct pmbus_read_op *op)
+{
+ u32 ret;
+ u32 i = 0;
+ u32 temp_byte = 0;
+ void __iomem *base = NULL;
+
+ if ((chip_id >= CHIP_ID_NUM_MAX) || (!op)) {
+ pr_err("[sysctl pmbus]read param err,chipid=0x%x!\n", chip_id);
+ return SYSCTL_ERR_PARAM;
+ }
+
+ if ((op->slave_addr >= SLAVE_ADDR_MAX) ||
+ (!op->data_len) || ((op->cmd_len + op->data_len) > PMBUS_READ_LEN_MAX) ||
+ (!op->data) || ((op->cmd_len) && (!op->cmd))) {
+ pr_err("[sysctl pmbus]read param err,data_len=0x%x,cmd_len=0x%x,slave_addr=0x%x\n",
+ op->data_len, op->cmd_len, op->slave_addr);
+ return SYSCTL_ERR_PARAM;
+ }
+
+ base = g_sysctl_pmbus_base[chip_id];
+ ret = sysctl_pmbus_read_pre(base, op->cmd_len, op->cmd, op->slave_addr, op->data_len);
+ if (ret != SYSCTL_ERR_OK)
+ return ret;
+
+ ret = sysctl_pmbus_wait_data(base, op->data_len);
+ if (ret != SYSCTL_ERR_OK)
+ return ret;
+
+ for (i = 0; i < op->data_len; i++) {
+ his_sysctrl_reg_rd(base, I2C_DATA_CMD_OFFSET, &temp_byte);
+ op->data[i] = temp_byte & 0xff;
+ }
+
+ return SYSCTL_ERR_OK;
+}
+
int sysctl_pmbus_read(u8 chip_id, u8 addr, u32 slave_addr, u32 data_len, u32 *buf)
{
u32 ret;
@@ -385,7 +452,7 @@ int sysctl_pmbus_read(u8 chip_id, u8 addr, u32 slave_addr, u32 data_len, u32 *bu
base = g_sysctl_pmbus_base[chip_id];
- ret = sysctl_pmbus_read_pre(base, addr, slave_addr, data_len);
+ ret = sysctl_pmbus_read_pre(base, sizeof(addr), &addr, slave_addr, data_len);
if (ret != SYSCTL_ERR_OK)
return ret;
@@ -566,7 +633,7 @@ static int sysctl_cpu_convert_vol_to_vid(u32 vid_table, u32 value, u32 *vid)
return SYSCTL_ERR_OK;
}
-int sysctl_cpu_voltage_adjust (u8 chip_id, u8 loop, u32 slave_addr, u32 value)
+int sysctl_cpu_voltage_adjust(u8 chip_id, u8 loop, u32 slave_addr, u32 value)
{
u32 ret;
u32 vid;
@@ -647,5 +714,7 @@ void hip_sysctl_pmbus_exit(void)
EXPORT_SYMBOL(sysctl_cpu_voltage_adjust);
EXPORT_SYMBOL(sysctl_pmbus_write);
EXPORT_SYMBOL(sysctl_pmbus_read);
+EXPORT_SYMBOL(sysctl_pmbus_write_common);
+EXPORT_SYMBOL(sysctl_pmbus_read_common);
EXPORT_SYMBOL(InitPmbus);
EXPORT_SYMBOL(DeInitPmbus);
diff --git a/drivers/soc/hisilicon/sysctl/sysctl_pmbus.h b/drivers/soc/hisilicon/sysctl/sysctl_pmbus.h
index e1a4742..ce1d83d 100644
--- a/drivers/soc/hisilicon/sysctl/sysctl_pmbus.h
+++ b/drivers/soc/hisilicon/sysctl/sysctl_pmbus.h
@@ -20,6 +20,13 @@
#define PAGE_NUM_MAX (0x6f)
#define DATA_NUM_MAX (0x4)
+#define I2C_FIFO_DEPTH (256)
+
+/* slave_addr use 1 fifo */
+#define PMBUS_READ_LEN_MAX (I2C_FIFO_DEPTH - 1)
+/* slave_addr use 2 fifo */
+#define PMBUS_WRITE_LEN_MAX (I2C_FIFO_DEPTH - 2)
+
#define I2C_TX_ABRT (0x040)
#define I2C_TX_ABRT_SRC_REG (0x0880)
#define I2C_CLR_TX_ABRT_REG (0x0854)
@@ -29,6 +36,7 @@
#define I2C_SS_SCLHCNT 0x3db
#define I2C_SS_SCLLCNT 0x3e6
+#define I2C_SS_SDA_HOLD_FS 0xfa
/* AVS_REG_GEN */
#define AVS_WR_OPEN_OFFSET 0x0004
@@ -77,6 +85,14 @@
#define STATUS_RPT_OFFSET 0x0AA4
#define STATUS_ERR_RPT_OFFSET 0x0AA8
+struct pmbus_read_op {
+ u32 slave_addr;
+ u32 cmd_len;
+ u32 data_len;
+ u8 *cmd;
+ u8 *data;
+};
+
/* Define the union pmbus_vout_mode */
typedef union {
/* Define the struct bits */
--
1.8.3
1
1

11 May '20
From: Luke Nelson <lukenels(a)cs.washington.edu>
commit 4178417cc5359c329790a4a8f4a6604612338cca upstream.
This patch fixes an incorrect check in how immediate memory offsets are
computed for BPF_DW on arm.
For BPF_LDX/ST/STX + BPF_DW, the 32-bit arm JIT breaks down an 8-byte
access into two separate 4-byte accesses using off+0 and off+4. If off
fits in imm12, the JIT emits a ldr/str instruction with the immediate
and avoids the use of a temporary register. While the current check off
<= 0xfff ensures that the first immediate off+0 doesn't overflow imm12,
it's not sufficient for the second immediate off+4, which may cause the
second access of BPF_DW to read/write the wrong address.
This patch fixes the problem by changing the check to
off <= 0xfff - 4 for BPF_DW, ensuring off+4 will never overflow.
A side effect of simplifying the check is that it now allows using
negative immediate offsets in ldr/str. This means that small negative
offsets can also avoid the use of a temporary register.
This patch introduces no new failures in test_verifier or test_bpf.c.
Fixes: c5eae692571d6 ("ARM: net: bpf: improve 64-bit store implementation")
Fixes: ec19e02b343db ("ARM: net: bpf: fix LDX instructions")
Co-developed-by: Xi Wang <xi.wang(a)gmail.com>
Signed-off-by: Xi Wang <xi.wang(a)gmail.com>
Signed-off-by: Luke Nelson <luke.r.nels(a)gmail.com>
Signed-off-by: Daniel Borkmann <daniel(a)iogearbox.net>
Link: https://lore.kernel.org/bpf/20200409221752.28448-1-luke.r.nels@gmail.com
Signed-off-by: Greg Kroah-Hartman <gregkh(a)linuxfoundation.org>
---
arch/arm/net/bpf_jit_32.c | 40 ++++++++++++++++++++++++----------------
1 file changed, 24 insertions(+), 16 deletions(-)
diff --git a/arch/arm/net/bpf_jit_32.c b/arch/arm/net/bpf_jit_32.c
index 25b3ee8..d06293a 100644
--- a/arch/arm/net/bpf_jit_32.c
+++ b/arch/arm/net/bpf_jit_32.c
@@ -993,21 +993,35 @@ static inline void emit_a32_mul_r64(const s8 dst[], const s8 src[],
arm_bpf_put_reg32(dst_hi, rd[0], ctx);
}
+static bool is_ldst_imm(s16 off, const u8 size)
+{
+ s16 off_max = 0;
+
+ switch (size) {
+ case BPF_B:
+ case BPF_W:
+ off_max = 0xfff;
+ break;
+ case BPF_H:
+ off_max = 0xff;
+ break;
+ case BPF_DW:
+ /* Need to make sure off+4 does not overflow. */
+ off_max = 0xfff - 4;
+ break;
+ }
+ return -off_max <= off && off <= off_max;
+}
+
/* *(size *)(dst + off) = src */
static inline void emit_str_r(const s8 dst, const s8 src[],
- s32 off, struct jit_ctx *ctx, const u8 sz){
+ s16 off, struct jit_ctx *ctx, const u8 sz){
const s8 *tmp = bpf2a32[TMP_REG_1];
- s32 off_max;
s8 rd;
rd = arm_bpf_get_reg32(dst, tmp[1], ctx);
- if (sz == BPF_H)
- off_max = 0xff;
- else
- off_max = 0xfff;
-
- if (off < 0 || off > off_max) {
+ if (!is_ldst_imm(off, sz)) {
emit_a32_mov_i(tmp[0], off, ctx);
emit(ARM_ADD_R(tmp[0], tmp[0], rd), ctx);
rd = tmp[0];
@@ -1036,18 +1050,12 @@ static inline void emit_str_r(const s8 dst, const s8 src[],
/* dst = *(size*)(src + off) */
static inline void emit_ldx_r(const s8 dst[], const s8 src,
- s32 off, struct jit_ctx *ctx, const u8 sz){
+ s16 off, struct jit_ctx *ctx, const u8 sz){
const s8 *tmp = bpf2a32[TMP_REG_1];
const s8 *rd = is_stacked(dst_lo) ? tmp : dst;
s8 rm = src;
- s32 off_max;
-
- if (sz == BPF_H)
- off_max = 0xff;
- else
- off_max = 0xfff;
- if (off < 0 || off > off_max) {
+ if (!is_ldst_imm(off, sz)) {
emit_a32_mov_i(tmp[0], off, ctx);
emit(ARM_ADD_R(tmp[0], tmp[0], src), ctx);
rm = tmp[0];
--
1.8.3
1
61
openEuler Kernel SIG:你好!
内核社区(包括Linus)怀疑kfifo有潜在的BUG,见
https://git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git/commit/l…
我于去年底向社区提交了一份分析报告,用litmus验证了问题的可能性,梳理了所有kfifo相关函数和宏,提出了建议。但杳无音信,不知是丢了,还是因无实践
BUG而未受重视。
ARM是典型的弱内存序架构,因此想到转给你们供参考。如无兴趣请忽略:-)
此致
laokz
2
4
From: Chiqijun <chiqijun(a)huawei.com>
driver inclusion
category: bugfix
bugzilla: 4472
-----------------------------------------------------------------------
The current driver does not support ipip tunnel packet parsing. When the
csum of the device is turned on, driver will get wrong inner headers,
and causing device error.
Signed-off-by: Chiqijun <chiqijun(a)huawei.com>
Reviewed-by: Luoshaokai <luoshaokai(a)huawei.com>
Signed-off-by: Yang Yingliang <yangyingliang(a)huawei.com>
---
drivers/net/ethernet/huawei/hinic/hinic_tx.c | 31 ++++++++++++++++++++++------
1 file changed, 25 insertions(+), 6 deletions(-)
diff --git a/drivers/net/ethernet/huawei/hinic/hinic_tx.c b/drivers/net/ethernet/huawei/hinic/hinic_tx.c
index 05291a5..c8492ae 100644
--- a/drivers/net/ethernet/huawei/hinic/hinic_tx.c
+++ b/drivers/net/ethernet/huawei/hinic/hinic_tx.c
@@ -373,26 +373,45 @@ static int hinic_tx_csum(struct hinic_sq_task *task, u32 *queue_info,
if (ip.v4->version == 4) {
l3_type = IPV4_PKT_NO_CHKSUM_OFFLOAD;
+ l4_proto = ip.v4->protocol;
} else if (ip.v4->version == 6) {
+ unsigned char *exthdr;
+ __be16 frag_off;
+
l3_type = IPV6_PKT;
#ifdef HAVE_OUTER_IPV6_TUNNEL_OFFLOAD
tunnel_type = TUNNEL_UDP_CSUM;
#endif
+ exthdr = ip.hdr + sizeof(*ip.v6);
+ l4_proto = ip.v6->nexthdr;
+ l4.hdr = skb_transport_header(skb);
+ if (l4.hdr != exthdr)
+ ipv6_skip_exthdr(skb, exthdr - skb->data,
+ &l4_proto, &frag_off);
} else {
l3_type = UNKNOWN_L3TYPE;
+ l4_proto = IPPROTO_RAW;
}
hinic_task_set_outter_l3(task, l3_type,
skb_network_header_len(skb));
- l4_tunnel_len = skb_inner_network_offset(skb) -
- skb_transport_offset(skb);
+ if (l4_proto == IPPROTO_UDP || l4_proto == IPPROTO_GRE) {
+ l4_tunnel_len = skb_inner_network_offset(skb) -
+ skb_transport_offset(skb);
+ ip.hdr = skb_inner_network_header(skb);
+ l4.hdr = skb_inner_transport_header(skb);
+ network_hdr_len = skb_inner_network_header_len(skb);
+ } else {
+ tunnel_type = NOT_TUNNEL;
+ l4_tunnel_len = 0;
- hinic_task_set_tunnel_l4(task, tunnel_type, l4_tunnel_len);
+ ip.hdr = skb_inner_network_header(skb);
+ l4.hdr = skb_transport_header(skb);
+ network_hdr_len = skb_network_header_len(skb);
+ }
- ip.hdr = skb_inner_network_header(skb);
- l4.hdr = skb_inner_transport_header(skb);
- network_hdr_len = skb_inner_network_header_len(skb);
+ hinic_task_set_tunnel_l4(task, tunnel_type, l4_tunnel_len);
} else {
ip.hdr = skb_network_header(skb);
l4.hdr = skb_transport_header(skb);
--
1.8.3
1
0

09 May '20
From: Alan Stern <stern(a)rowland.harvard.edu>
mainline inclusion
from mainline-v5.7-rc3
commit 056ad39ee9253873522f6469c3364964a322912b
category: bugfix
bugzilla: 13690
CVE: CVE-2020-12464
-------------------------------------------------
FuzzUSB (a variant of syzkaller) found a free-while-still-in-use bug
in the USB scatter-gather library:
BUG: KASAN: use-after-free in atomic_read
include/asm-generic/atomic-instrumented.h:26 [inline]
BUG: KASAN: use-after-free in usb_hcd_unlink_urb+0x5f/0x170
drivers/usb/core/hcd.c:1607
Read of size 4 at addr ffff888065379610 by task kworker/u4:1/27
CPU: 1 PID: 27 Comm: kworker/u4:1 Not tainted 5.5.11 #2
Hardware name: QEMU Standard PC (i440FX + PIIX, 1996), BIOS
1.10.2-1ubuntu1 04/01/2014
Workqueue: scsi_tmf_2 scmd_eh_abort_handler
Call Trace:
__dump_stack lib/dump_stack.c:77 [inline]
dump_stack+0xce/0x128 lib/dump_stack.c:118
print_address_description.constprop.4+0x21/0x3c0 mm/kasan/report.c:374
__kasan_report+0x153/0x1cb mm/kasan/report.c:506
kasan_report+0x12/0x20 mm/kasan/common.c:639
check_memory_region_inline mm/kasan/generic.c:185 [inline]
check_memory_region+0x152/0x1b0 mm/kasan/generic.c:192
__kasan_check_read+0x11/0x20 mm/kasan/common.c:95
atomic_read include/asm-generic/atomic-instrumented.h:26 [inline]
usb_hcd_unlink_urb+0x5f/0x170 drivers/usb/core/hcd.c:1607
usb_unlink_urb+0x72/0xb0 drivers/usb/core/urb.c:657
usb_sg_cancel+0x14e/0x290 drivers/usb/core/message.c:602
usb_stor_stop_transport+0x5e/0xa0 drivers/usb/storage/transport.c:937
This bug occurs when cancellation of the S-G transfer races with
transfer completion. When that happens, usb_sg_cancel() may continue
to access the transfer's URBs after usb_sg_wait() has freed them.
The bug is caused by the fact that usb_sg_cancel() does not take any
sort of reference to the transfer, and so there is nothing to prevent
the URBs from being deallocated while the routine is trying to use
them. The fix is to take such a reference by incrementing the
transfer's io->count field while the cancellation is in progres and
decrementing it afterward. The transfer's URBs are not deallocated
until io->complete is triggered, which happens when io->count reaches
zero.
Signed-off-by: Alan Stern <stern(a)rowland.harvard.edu>
Reported-and-tested-by: Kyungtae Kim <kt0755(a)gmail.com>
CC: <stable(a)vger.kernel.org>
Link: https://lore.kernel.org/r/Pine.LNX.4.44L0.2003281615140.14837-100000@netrid…
Signed-off-by: Greg Kroah-Hartman <gregkh(a)linuxfoundation.org>
Signed-off-by: Yang Yingliang <yangyingliang(a)huawei.com>
Reviewed-by: Jason Yan <yanaijie(a)huawei.com>
Signed-off-by: Yang Yingliang <yangyingliang(a)huawei.com>
---
drivers/usb/core/message.c | 9 ++++++++-
1 file changed, 8 insertions(+), 1 deletion(-)
diff --git a/drivers/usb/core/message.c b/drivers/usb/core/message.c
index 0d3fd20..fcf84bf 100644
--- a/drivers/usb/core/message.c
+++ b/drivers/usb/core/message.c
@@ -588,12 +588,13 @@ void usb_sg_cancel(struct usb_sg_request *io)
int i, retval;
spin_lock_irqsave(&io->lock, flags);
- if (io->status) {
+ if (io->status || io->count == 0) {
spin_unlock_irqrestore(&io->lock, flags);
return;
}
/* shut everything down */
io->status = -ECONNRESET;
+ io->count++; /* Keep the request alive until we're done */
spin_unlock_irqrestore(&io->lock, flags);
for (i = io->entries - 1; i >= 0; --i) {
@@ -607,6 +608,12 @@ void usb_sg_cancel(struct usb_sg_request *io)
dev_warn(&io->dev->dev, "%s, unlink --> %d\n",
__func__, retval);
}
+
+ spin_lock_irqsave(&io->lock, flags);
+ io->count--;
+ if (!io->count)
+ complete(&io->complete);
+ spin_unlock_irqrestore(&io->lock, flags);
}
EXPORT_SYMBOL_GPL(usb_sg_cancel);
--
1.8.3
1
2
From: Shengzui You <youshengzui(a)huawei.com>
driver inclusion
category: other
bugzilla: NA
CVE: NA
---------------------------------
This patch is used to modify the hns3 driver version to 1.9.37.9
Signed-off-by: Shengzui You <youshengzui(a)huawei.com>
Reviewed-by: Weiwei Deng <dengweiwei(a)huawei.com>
Reviewed-by: Zhaohui Zhong <zhongzhaohui(a)huawei.com>
Reviewed-by: Junxin Chen <chenjunxin1(a)huawei.com>
Signed-off-by: Yang Yingliang <yangyingliang(a)huawei.com>
---
drivers/net/ethernet/hisilicon/hns3/hnae3.h | 2 +-
drivers/net/ethernet/hisilicon/hns3/hns3_cae/hns3_cae_version.h | 2 +-
drivers/net/ethernet/hisilicon/hns3/hns3_enet.h | 2 +-
drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.h | 2 +-
drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_main.h | 2 +-
5 files changed, 5 insertions(+), 5 deletions(-)
diff --git a/drivers/net/ethernet/hisilicon/hns3/hnae3.h b/drivers/net/ethernet/hisilicon/hns3/hnae3.h
index 5253374..14b3991 100644
--- a/drivers/net/ethernet/hisilicon/hns3/hnae3.h
+++ b/drivers/net/ethernet/hisilicon/hns3/hnae3.h
@@ -30,7 +30,7 @@
#include <linux/pci.h>
#include <linux/types.h>
-#define HNAE3_MOD_VERSION "1.9.37.8"
+#define HNAE3_MOD_VERSION "1.9.37.9"
#define HNAE3_MIN_VECTOR_NUM 2 /* first one for misc, another for IO */
diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3_cae/hns3_cae_version.h b/drivers/net/ethernet/hisilicon/hns3/hns3_cae/hns3_cae_version.h
index 34ff097..b89b110 100644
--- a/drivers/net/ethernet/hisilicon/hns3/hns3_cae/hns3_cae_version.h
+++ b/drivers/net/ethernet/hisilicon/hns3/hns3_cae/hns3_cae_version.h
@@ -4,7 +4,7 @@
#ifndef __HNS3_CAE_VERSION_H__
#define __HNS3_CAE_VERSION_H__
-#define HNS3_CAE_MOD_VERSION "1.9.37.8"
+#define HNS3_CAE_MOD_VERSION "1.9.37.9"
#define CMT_ID_LEN 8
#define RESV_LEN 3
diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3_enet.h b/drivers/net/ethernet/hisilicon/hns3/hns3_enet.h
index 4567c2c..62f34e9 100644
--- a/drivers/net/ethernet/hisilicon/hns3/hns3_enet.h
+++ b/drivers/net/ethernet/hisilicon/hns3/hns3_enet.h
@@ -8,7 +8,7 @@
#include "hnae3.h"
-#define HNS3_MOD_VERSION "1.9.37.8"
+#define HNS3_MOD_VERSION "1.9.37.9"
extern char hns3_driver_version[];
diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.h b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.h
index 80270e4..15af034 100644
--- a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.h
+++ b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.h
@@ -12,7 +12,7 @@
#include "hclge_cmd.h"
#include "hnae3.h"
-#define HCLGE_MOD_VERSION "1.9.37.8"
+#define HCLGE_MOD_VERSION "1.9.37.9"
#define HCLGE_DRIVER_NAME "hclge"
#define HCLGE_MAX_PF_NUM 8
diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_main.h b/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_main.h
index bd24595..f89ee1b 100644
--- a/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_main.h
+++ b/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_main.h
@@ -10,7 +10,7 @@
#include "hclgevf_cmd.h"
#include "hnae3.h"
-#define HCLGEVF_MOD_VERSION "1.9.37.8"
+#define HCLGEVF_MOD_VERSION "1.9.37.9"
#define HCLGEVF_DRIVER_NAME "hclgevf"
#define HCLGEVF_MAX_VLAN_ID 4095
--
1.8.3
1
1
From: Ye Bin <yebin10(a)huawei.com>
hulk inclusion
category: bugfix
bugzilla: 34271
CVE: NA
--------------------------------
This reverts commit fd72360bc94ad304136beb56e8ff2ec089113bb8.
test setp:
...
rmmod hisi_sas_v3_hw
lsmod
fdisk -l
insomd hisi_sas_v3_hw.ko
lsmod
fdisk -l
....
We get follow error when we test by above test steps.
[ 3660.259153] [ffff00000116f000] pgd=00002027ffffe003, pud=00002027ffffd003,
pmd=00002027cdf28003, pte=0000000000000000
[ 3660.269719] Internal error: Oops: 96000007 [#1] PREEMPT SMP
[ 3660.275266] Modules linked in: hisi_sas_v3_hw(+) hisi_sas_main hns_roce_hw_v2(O)
hns_roce(O) rpcrdma ib_isert iscsi_target_mod ib_iser libiscsi scsi_transport_iscsi
ib_ipoib ib_umad realtek hns3(O) hclge(O) hnae3(O) crc32_ce crct10dif_ce hisi_hpre
hisi_zip hisi_qm uacce hisi_trng_v2 rng_core sfc lbc ip_tables x_tables libsas
scsi_transport_sas [last unloaded: hisi_sas_main]
[ 3660.308227] Process smartd (pid: 19570, stack limit = 0x000000001103634d)
[ 3660.314985] CPU: 31 PID: 19570 Comm: smartd Kdump: loaded Tainted: G O 4.19.36-g32894fc #1
[ 3660.324504] Hardware name: Huawei TaiShan 200 (Model 2280)/BC82AMDD,
BIOS 2280-V2 CS V3.B220.02 03/27/2020
[ 3660.334110] pstate: 60400009 (nZCv daif +PAN -UAO)
[ 3660.338882] pc : scsi_device_put+0x18/0x38
[ 3660.342961] lr : scsi_disk_put+0x3c/0x58
[ 3660.346865] sp : ffff0000158a3cb0
[ 3660.350164] x29: ffff0000158a3cb0 x28: ffff8027b8111000
[ 3660.355451] x27: 00000000080a005d x26: 0000000000000000
[ 3660.360738] x25: ffff8027c6310398 x24: ffff8027cd2ec410
[ 3660.366025] x23: ffff000009811000 x22: ffff80276d274750
[ 3660.371312] x21: ffff8027abdd5000 x20: ffff8027b8110800
[ 3660.376599] x19: ffff8027abdd5000 x18: 0000000000000000
[ 3660.381886] x17: 0000000000000000 x16: 0000000000000000
[ 3660.387172] x15: 0000000000000000 x14: 0000000000000000
[ 3660.392459] x13: ffff000009996cd0 x12: ffffffffffffffff
[ 3660.397746] x11: ffff000009996cc8 x10: 0000000000000000
[ 3660.403033] x9 : 0000000000000000 x8 : 0000000040000000
[ 3660.408320] x7 : ffff0000098116c8 x6 : 0000000000000000
[ 3660.413607] x5 : ffff00000820ebbc x4 : ffff7e009eb8fb20
[ 3660.418894] x3 : 0000000080400009 x2 : ffff8027ae3ec600
[ 3660.424180] x1 : 71b6030ca20bb300 x0 : ffff00000116f000
[ 3660.429467] Call trace:
[ 3660.431904] scsi_device_put+0x18/0x38
[ 3660.435636] scsi_disk_put+0x3c/0x58
[ 3660.439195] sd_release+0x50/0xc0
[ 3660.442496] __blkdev_put+0x20c/0x220
[ 3660.446141] blkdev_put+0x4c/0x110
[ 3660.449527] blkdev_close+0x1c/0x28
[ 3660.453000] __fput+0x88/0x1b8
[ 3660.456042] ____fput+0xc/0x18
[ 3660.459085] task_work_run+0x94/0xb0
[ 3660.462646] do_notify_resume+0x17c/0x180
[ 3660.466637] work_pending+0x8/0x10
[ 3660.470022] Code: f9000bf3 aa0003f3 f9400000 f9404c00 (f9400000)
[ 3660.476089] ---[ end trace ca1d0144f9241f71 ]---
void scsi_device_put(struct scsi_device *sdev)
{
module_put(sdev->host->hostt->module); ---> error code
put_device(&sdev->sdev_gendev);
}
When access "sdev->host->hostt" occurs exception, as "sdev->host->hostt" is point
to the module address space which is already removed. module_delete first check
module reference count, then call module exit function. So after pass
module reference count check and before call module exit, we can call
scsi_device_get function successfully.
As "scsi: fix failing unload of a LLDD module" lead to call scsi_device_get
success during remove module. We revert this patch, "scsi: fixup kernel warning
during rmmod()" already fixed previous error.
Signed-off-by: Ye Bin <yebin10(a)huawei.com>
Reviewed-by: Hou Tao <houtao1(a)huawei.com>
Signed-off-by: Yang Yingliang <yangyingliang(a)huawei.com>
---
drivers/scsi/scsi.c | 11 +++++++----
1 file changed, 7 insertions(+), 4 deletions(-)
diff --git a/drivers/scsi/scsi.c b/drivers/scsi/scsi.c
index 7d472c2..fc1356d 100644
--- a/drivers/scsi/scsi.c
+++ b/drivers/scsi/scsi.c
@@ -544,6 +544,9 @@ int scsi_report_opcode(struct scsi_device *sdev, unsigned char *buffer,
* Description: Gets a reference to the scsi_device and increments the use count
* of the underlying LLDD module. You must hold host_lock of the
* parent Scsi_Host or already have a reference when calling this.
+ *
+ * This will fail if a device is deleted or cancelled, or when the LLD module
+ * is in the process of being unloaded.
*/
int scsi_device_get(struct scsi_device *sdev)
{
@@ -551,12 +554,12 @@ int scsi_device_get(struct scsi_device *sdev)
goto fail;
if (!get_device(&sdev->sdev_gendev))
goto fail;
- /* We can fail try_module_get if we're doing SCSI operations
- * from module exit (like cache flush)
- */
- __module_get(sdev->host->hostt->module);
+ if (!try_module_get(sdev->host->hostt->module))
+ goto fail_put_device;
return 0;
+fail_put_device:
+ put_device(&sdev->sdev_gendev);
fail:
return -ENXIO;
}
--
1.8.3
1
0

[PATCH] s390/mm: fix page table upgrade vs 2ndary address mode accesses
by Yang Yingliang 09 May '20
by Yang Yingliang 09 May '20
09 May '20
From: Christian Borntraeger <borntraeger(a)de.ibm.com>
mainline inclusion
from mainline-v5.7-rc4
commit 316ec154810960052d4586b634156c54d0778f74
category: bugfix
bugzilla: 13690
CVE: CVE-2020-11884
-------------------------------------------------
A page table upgrade in a kernel section that uses secondary address
mode will mess up the kernel instructions as follows:
Consider the following scenario: two threads are sharing memory.
On CPU1 thread 1 does e.g. strnlen_user(). That gets to
old_fs = enable_sacf_uaccess();
len = strnlen_user_srst(src, size);
and
" la %2,0(%1)\n"
" la %3,0(%0,%1)\n"
" slgr %0,%0\n"
" sacf 256\n"
"0: srst %3,%2\n"
in strnlen_user_srst(). At that point we are in secondary space mode,
control register 1 points to kernel page table and instruction fetching
happens via c1, rather than usual c13. Interrupts are not disabled, for
obvious reasons.
On CPU2 thread 2 does MAP_FIXED mmap(), forcing the upgrade of page table
from 3-level to e.g. 4-level one. We'd allocated new top-level table,
set it up and now we hit this:
notify = 1;
spin_unlock_bh(&mm->page_table_lock);
}
if (notify)
on_each_cpu(__crst_table_upgrade, mm, 0);
OK, we need to actually change over to use of new page table and we
need that to happen in all threads that are currently running. Which
happens to include the thread 1. IPI is delivered and we have
static void __crst_table_upgrade(void *arg)
{
struct mm_struct *mm = arg;
if (current->active_mm == mm)
set_user_asce(mm);
__tlb_flush_local();
}
run on CPU1. That does
static inline void set_user_asce(struct mm_struct *mm)
{
S390_lowcore.user_asce = mm->context.asce;
OK, user page table address updated...
__ctl_load(S390_lowcore.user_asce, 1, 1);
... and control register 1 set to it.
clear_cpu_flag(CIF_ASCE_PRIMARY);
}
IPI is run in home space mode, so it's fine - insns are fetched
using c13, which always points to kernel page table. But as soon
as we return from the interrupt, previous PSW is restored, putting
CPU1 back into secondary space mode, at which point we no longer
get the kernel instructions from the kernel mapping.
The fix is to only fixup the control registers that are currently in use
for user processes during the page table update. We must also disable
interrupts in enable_sacf_uaccess to synchronize the cr and
thread.mm_segment updates against the on_each-cpu.
Fixes: 0aaba41b58bc ("s390: remove all code using the access register mode")
Cc: stable(a)vger.kernel.org # 4.15+
Reported-by: Al Viro <viro(a)zeniv.linux.org.uk>
Reviewed-by: Gerald Schaefer <gerald.schaefer(a)de.ibm.com>
References: CVE-2020-11884
Signed-off-by: Christian Borntraeger <borntraeger(a)de.ibm.com>
Signed-off-by: Yang Yingliang <yangyingliang(a)huawei.com>
Reviewed-by: Hanjun Guo <guohanjun(a)huawei.com>
Reviewed-by: Jason Yan <yanaijie(a)huawei.com>
Signed-off-by: Yang Yingliang <yangyingliang(a)huawei.com>
---
arch/s390/lib/uaccess.c | 4 ++++
arch/s390/mm/pgalloc.c | 16 ++++++++++++++--
2 files changed, 18 insertions(+), 2 deletions(-)
diff --git a/arch/s390/lib/uaccess.c b/arch/s390/lib/uaccess.c
index c4f8039..0267405 100644
--- a/arch/s390/lib/uaccess.c
+++ b/arch/s390/lib/uaccess.c
@@ -64,10 +64,13 @@ mm_segment_t enable_sacf_uaccess(void)
{
mm_segment_t old_fs;
unsigned long asce, cr;
+ unsigned long flags;
old_fs = current->thread.mm_segment;
if (old_fs & 1)
return old_fs;
+ /* protect against a concurrent page table upgrade */
+ local_irq_save(flags);
current->thread.mm_segment |= 1;
asce = S390_lowcore.kernel_asce;
if (likely(old_fs == USER_DS)) {
@@ -83,6 +86,7 @@ mm_segment_t enable_sacf_uaccess(void)
__ctl_load(asce, 7, 7);
set_cpu_flag(CIF_ASCE_SECONDARY);
}
+ local_irq_restore(flags);
return old_fs;
}
EXPORT_SYMBOL(enable_sacf_uaccess);
diff --git a/arch/s390/mm/pgalloc.c b/arch/s390/mm/pgalloc.c
index 814f265..f3bc9c9 100644
--- a/arch/s390/mm/pgalloc.c
+++ b/arch/s390/mm/pgalloc.c
@@ -72,8 +72,20 @@ static void __crst_table_upgrade(void *arg)
{
struct mm_struct *mm = arg;
- if (current->active_mm == mm)
- set_user_asce(mm);
+ /* we must change all active ASCEs to avoid the creation of new TLBs */
+ if (current->active_mm == mm) {
+ S390_lowcore.user_asce = mm->context.asce;
+ if (current->thread.mm_segment == USER_DS) {
+ __ctl_load(S390_lowcore.user_asce, 1, 1);
+ /* Mark user-ASCE present in CR1 */
+ clear_cpu_flag(CIF_ASCE_PRIMARY);
+ }
+ if (current->thread.mm_segment == USER_DS_SACF) {
+ __ctl_load(S390_lowcore.user_asce, 7, 7);
+ /* enable_sacf_uaccess does all or nothing */
+ WARN_ON(!test_cpu_flag(CIF_ASCE_SECONDARY));
+ }
+ }
__tlb_flush_local();
}
--
1.8.3
1
0
just for testing openeuler kernel ci.
Signed-off-by: Xie XiuQi <xiexiuqi(a)huawei.com>
---
init/version.c | 2 +-
1 file changed, 1 insertion(+), 1 deletion(-)
diff --git a/init/version.c b/init/version.c
index ef4012ec4375..b8871517a6b2 100644
--- a/init/version.c
+++ b/init/version.c
@@ -43,7 +43,7 @@ EXPORT_SYMBOL_GPL(init_uts_ns);
/* FIXED STRINGS! Don't touch! */
const char linux_banner[] =
- "Linux version " UTS_RELEASE " (" LINUX_COMPILE_BY "@"
+ "Linux Version " UTS_RELEASE " (" LINUX_COMPILE_BY "@"
LINUX_COMPILE_HOST ") (" LINUX_COMPILER ") " UTS_VERSION "\n";
const char linux_proc_banner[] =
--
2.20.1
1
0
just for testing openeuler kernel ci.
Signed-off-by: Xie XiuQi <xiexiuqi(a)huawei.com>
---
init/version.c | 2 +-
1 file changed, 1 insertion(+), 1 deletion(-)
diff --git a/init/version.c b/init/version.c
index ef4012ec4375..b8871517a6b2 100644
--- a/init/version.c
+++ b/init/version.c
@@ -43,7 +43,7 @@ EXPORT_SYMBOL_GPL(init_uts_ns);
/* FIXED STRINGS! Don't touch! */
const char linux_banner[] =
- "Linux version " UTS_RELEASE " (" LINUX_COMPILE_BY "@"
+ "Linux Version " UTS_RELEASE " (" LINUX_COMPILE_BY "@"
LINUX_COMPILE_HOST ") (" LINUX_COMPILER ") " UTS_VERSION "\n";
const char linux_proc_banner[] =
--
2.20.1
1
0

06 May '20
From: yu kuai <yukuai3(a)huawei.com>
hulk inclusion
category: bugfix
bugzilla: 34280
CVE: NA
---------------------------
tags->rqs[] will not been cleaned when free driver tag to avoid
an extra store on a shared area in the per io path. But there
is a window between get driver tag and write tags->rqs[], so we
may see stale rq in tags->rqs[] which may have been freed, as
the following case:
blk_mq_get_request blk_mq_queue_tag_busy_iter
-> blk_mq_get_tag
-> bt_for_each
-> bt_iter
-> rq = tags->rqs[]
-> rq->q
-> blk_mq_rq_ctx_init
-> data->hctx->tags->rqs[rq->tag] = rq;
In additiion, tags->rqs[] only contains the requests that get
driver tag. It is not accurate for io-scheduler case when account
busy tags in part_in_flight.
To fix both of them, the blk_mq_queue_tag_busy_iter is changed
in this patch to use tags->static_rqs[] instead of tags->rqs[].
We have to identify whether there is a io scheduler attached to
decide to use hctx->tags or hctx->sched_tags. And we will try to
get a non-zero q_usage_counter before that, then could avoid race
with update nr_hw_queues, switch io-scheduler and even queue cleanup.
Add 'inflight' parameter to determine to iterate in-flight
requests or just busy tags and add a new helper interface
blk_mq_queue_tag_inflight_iter to iterate all of the in-flight
tags and export this interface for drivers.
Signed-off-by: yu kuai <yukuai3(a)huawei.com>
Reviewed-by: Hou Tao <houtao1(a)huawei.com>
Signed-off-by: Yang Yingliang <yangyingliang(a)huawei.com>
---
block/blk-mq-tag.c | 77 ++++++++++++++++++++++++++++++++++++++++----------
block/blk-mq.c | 6 ++--
include/linux/blk-mq.h | 3 +-
3 files changed, 67 insertions(+), 19 deletions(-)
diff --git a/block/blk-mq-tag.c b/block/blk-mq-tag.c
index 41317c5..323bbca 100644
--- a/block/blk-mq-tag.c
+++ b/block/blk-mq-tag.c
@@ -216,37 +216,51 @@ struct bt_iter_data {
busy_iter_fn *fn;
void *data;
bool reserved;
+ bool inflight;
};
static bool bt_iter(struct sbitmap *bitmap, unsigned int bitnr, void *data)
{
struct bt_iter_data *iter_data = data;
struct blk_mq_hw_ctx *hctx = iter_data->hctx;
- struct blk_mq_tags *tags = hctx->tags;
bool reserved = iter_data->reserved;
+ struct blk_mq_tags *tags;
struct request *rq;
+ tags = hctx->sched_tags ? hctx->sched_tags : hctx->tags;
+
if (!reserved)
bitnr += tags->nr_reserved_tags;
- rq = tags->rqs[bitnr];
/*
- * We can hit rq == NULL here, because the tagging functions
- * test and set the bit before assining ->rqs[].
+ * Because tags->rqs[] will not been cleaned when free driver tag
+ * and there is a window between get driver tag and write tags->rqs[],
+ * so we may see stale rq in tags->rqs[] which may have been freed.
+ * Using static_rqs[] is safer.
*/
- if (rq && rq->q == hctx->queue)
+ rq = tags->static_rqs[bitnr];
+
+ /*
+ * There is a small window between get tag and blk_mq_rq_ctx_init,
+ * so rq->q and rq->mq_hctx maybe different.
+ */
+ if (rq && rq->q == hctx->queue &&
+ (!iter_data->inflight ||
+ blk_mq_rq_state(rq) == MQ_RQ_IN_FLIGHT))
iter_data->fn(hctx, rq, iter_data->data, reserved);
return true;
}
-static void bt_for_each(struct blk_mq_hw_ctx *hctx, struct sbitmap_queue *bt,
- busy_iter_fn *fn, void *data, bool reserved)
+static void bt_for_each(struct blk_mq_hw_ctx *hctx,
+ struct sbitmap_queue *bt, busy_iter_fn *fn,
+ void *data, bool reserved, bool inflight)
{
struct bt_iter_data iter_data = {
.hctx = hctx,
.fn = fn,
.data = data,
.reserved = reserved,
+ .inflight = inflight,
};
sbitmap_for_each_set(&bt->sb, bt_iter, &iter_data);
@@ -314,22 +328,23 @@ void blk_mq_tagset_busy_iter(struct blk_mq_tag_set *tagset,
}
EXPORT_SYMBOL(blk_mq_tagset_busy_iter);
-void blk_mq_queue_tag_busy_iter(struct request_queue *q, busy_iter_fn *fn,
- void *priv)
+static void __blk_mq_queue_tag_busy_iter(struct request_queue *q,
+ busy_iter_fn *fn, void *priv, bool inflight)
{
struct blk_mq_hw_ctx *hctx;
int i;
/*
- * __blk_mq_update_nr_hw_queues will update the nr_hw_queues and
- * queue_hw_ctx after freeze the queue, so we use q_usage_counter
- * to avoid race with it.
+ * Get a reference of the queue unless it has been zero. We use this
+ * to avoid the race with the code that would modify the hctxs after
+ * freeze and drain the queue, including updating nr_hw_queues, io
+ * scheduler switching and queue clean up.
*/
if (!percpu_ref_tryget(&q->q_usage_counter))
return;
queue_for_each_hw_ctx(q, hctx, i) {
- struct blk_mq_tags *tags = hctx->tags;
+ struct blk_mq_tags *tags;
/*
* If not software queues are currently mapped to this
@@ -338,13 +353,45 @@ void blk_mq_queue_tag_busy_iter(struct request_queue *q, busy_iter_fn *fn,
if (!blk_mq_hw_queue_mapped(hctx))
continue;
+ tags = hctx->sched_tags ? hctx->sched_tags : hctx->tags;
+
if (tags->nr_reserved_tags)
- bt_for_each(hctx, &tags->breserved_tags, fn, priv, true);
- bt_for_each(hctx, &tags->bitmap_tags, fn, priv, false);
+ bt_for_each(hctx, &tags->breserved_tags,
+ fn, priv, true, inflight);
+ bt_for_each(hctx, &tags->bitmap_tags,
+ fn, priv, false, inflight);
+ /*
+ * flush_rq represents the rq with REQ_PREFLUSH and REQ_FUA
+ * (if FUA is not supported by device) to be issued to
+ * device. So we need to consider it when iterate inflight
+ * rqs, but needn't to count it when iterate busy tags.
+ */
+ if (inflight &&
+ blk_mq_rq_state(hctx->fq->flush_rq) == MQ_RQ_IN_FLIGHT)
+ fn(hctx, hctx->fq->flush_rq, priv, false);
}
blk_queue_exit(q);
}
+/*
+ * Iterate all the busy tags including pending and in-flight ones.
+ */
+void blk_mq_queue_tag_busy_iter(struct request_queue *q, busy_iter_fn *fn,
+ void *priv)
+{
+ __blk_mq_queue_tag_busy_iter(q, fn, priv, false);
+}
+
+/*
+ * Iterate all the inflight tags.
+ */
+void blk_mq_queue_tag_inflight_iter(struct request_queue *q,
+ busy_iter_fn *fn, void *priv)
+{
+ __blk_mq_queue_tag_busy_iter(q, fn, priv, true);
+}
+EXPORT_SYMBOL(blk_mq_queue_tag_inflight_iter);
+
static int bt_alloc(struct sbitmap_queue *bt, unsigned int depth,
bool round_robin, int node)
{
diff --git a/block/blk-mq.c b/block/blk-mq.c
index 8a7c3d8..ee07575 100644
--- a/block/blk-mq.c
+++ b/block/blk-mq.c
@@ -112,7 +112,7 @@ void blk_mq_in_flight(struct request_queue *q, struct hd_struct *part,
struct mq_inflight mi = { .part = part, .inflight = inflight, };
inflight[0] = inflight[1] = 0;
- blk_mq_queue_tag_busy_iter(q, blk_mq_check_inflight, &mi);
+ blk_mq_queue_tag_inflight_iter(q, blk_mq_check_inflight, &mi);
}
static void blk_mq_check_inflight_rw(struct blk_mq_hw_ctx *hctx,
@@ -131,7 +131,7 @@ void blk_mq_in_flight_rw(struct request_queue *q, struct hd_struct *part,
struct mq_inflight mi = { .part = part, .inflight = inflight, };
inflight[0] = inflight[1] = 0;
- blk_mq_queue_tag_busy_iter(q, blk_mq_check_inflight_rw, &mi);
+ blk_mq_queue_tag_inflight_iter(q, blk_mq_check_inflight_rw, &mi);
}
void blk_freeze_queue_start(struct request_queue *q)
@@ -875,7 +875,7 @@ static void blk_mq_timeout_work(struct work_struct *work)
if (!percpu_ref_tryget(&q->q_usage_counter))
return;
- blk_mq_queue_tag_busy_iter(q, blk_mq_check_expired, &next);
+ blk_mq_queue_tag_inflight_iter(q, blk_mq_check_expired, &next);
if (next != 0) {
mod_timer(&q->timeout, next);
diff --git a/include/linux/blk-mq.h b/include/linux/blk-mq.h
index 6578070..149d411 100644
--- a/include/linux/blk-mq.h
+++ b/include/linux/blk-mq.h
@@ -320,7 +320,8 @@ void blk_mq_tagset_busy_iter(struct blk_mq_tag_set *tagset,
void blk_mq_freeze_queue_wait(struct request_queue *q);
int blk_mq_freeze_queue_wait_timeout(struct request_queue *q,
unsigned long timeout);
-
+void blk_mq_queue_tag_inflight_iter(struct request_queue *q, busy_iter_fn *fn,
+ void *priv);
int blk_mq_map_queues(struct blk_mq_tag_set *set);
void blk_mq_update_nr_hw_queues(struct blk_mq_tag_set *set, int nr_hw_queues);
--
1.8.3
1
2
From: Changbin Du <changbin.du(a)gmail.com>
mainline inclusion
from mainline-5.6
commit 0ada120c883d4f1f6aafd01cf0fbb10d8bbba015
category: bugfix
bugzilla: 34555
CVE: NA
-------------------------------------------------
libbfd has changed the bfd_section_* macros to inline functions
bfd_section_<field> since 2019-09-18. See below two commits:
o http://www.sourceware.org/ml/gdb-cvs/2019-09/msg00064.html
o https://www.sourceware.org/ml/gdb-cvs/2019-09/msg00072.html
This fix make perf able to build with both old and new libbfd.
Signed-off-by: Changbin Du <changbin.du(a)gmail.com>
Acked-by: Jiri Olsa <jolsa(a)redhat.com>
Cc: Peter Zijlstra <peterz(a)infradead.org>
Link: http://lore.kernel.org/lkml/20200128152938.31413-1-changbin.du@gmail.com
Signed-off-by: Arnaldo Carvalho de Melo <acme(a)redhat.com>
Signed-off-by: Xie XiuQi <xiexiuqi(a)huawei.com>
Reviewed-by: Xiongfeng Wang <wangxiongfeng2(a)huawei.com>
Signed-off-by: Yang Yingliang <yangyingliang(a)huawei.com>
---
tools/perf/util/srcline.c | 16 +++++++++++++++-
1 file changed, 15 insertions(+), 1 deletion(-)
diff --git a/tools/perf/util/srcline.c b/tools/perf/util/srcline.c
index af3f9b9..b8e7761 100644
--- a/tools/perf/util/srcline.c
+++ b/tools/perf/util/srcline.c
@@ -191,16 +191,30 @@ static void find_address_in_section(bfd *abfd, asection *section, void *data)
bfd_vma pc, vma;
bfd_size_type size;
struct a2l_data *a2l = data;
+ flagword flags;
if (a2l->found)
return;
- if ((bfd_get_section_flags(abfd, section) & SEC_ALLOC) == 0)
+#ifdef bfd_get_section_flags
+ flags = bfd_get_section_flags(abfd, section);
+#else
+ flags = bfd_section_flags(section);
+#endif
+ if ((flags & SEC_ALLOC) == 0)
return;
pc = a2l->addr;
+#ifdef bfd_get_section_vma
vma = bfd_get_section_vma(abfd, section);
+#else
+ vma = bfd_section_vma(section);
+#endif
+#ifdef bfd_get_section_size
size = bfd_get_section_size(section);
+#else
+ size = bfd_section_size(section);
+#endif
if (pc < vma || pc >= vma + size)
return;
--
1.8.3
1
0
subscribe linux-kernel
3
2
From: Liu Yanshi <liuyanshi(a)huawei.com>
driver inclusion
category: feature
bugzilla: NA
CVE: NA
pcie_cae add interface to get the chipnums of the current system.
Signed-off-by: Liu Yanshi <liuyanshi(a)huawei.com>
Reviewed-by: Zhu Xiongxiong <zhuxiongxiong(a)huawei.com>
Signed-off-by: Yang Yingliang <yangyingliang(a)huawei.com>
---
.../controller/hisi-pcie-customer/hisi_pcie_cae.c | 37 ++++++++++++++++++++--
1 file changed, 34 insertions(+), 3 deletions(-)
diff --git a/drivers/pci/controller/hisi-pcie-customer/hisi_pcie_cae.c b/drivers/pci/controller/hisi-pcie-customer/hisi_pcie_cae.c
index 8d0c801..6229a54 100644
--- a/drivers/pci/controller/hisi-pcie-customer/hisi_pcie_cae.c
+++ b/drivers/pci/controller/hisi-pcie-customer/hisi_pcie_cae.c
@@ -3,6 +3,7 @@
#include <linux/mm.h>
#include <linux/miscdevice.h>
+#include <linux/uaccess.h>
#include <linux/device.h>
#include <linux/module.h>
#include <linux/io.h>
@@ -21,6 +22,7 @@
#define CHIP_INFO_REG_SIZE 4
#define TYPE_SHIFT 4
#define BIT_SHIFT_8 8
+#define PCIE_CMD_GET_CHIPNUMS 0x01
#define DEVICE_NAME "pcie_reg_dev"
@@ -37,7 +39,7 @@ enum {
MMAP_TYPE_VIRTIO
};
-static int current_chip_nums;
+static u32 current_chip_nums;
static const struct vm_operations_struct mmap_pcie_mem_ops = {
#ifdef CONFIG_HAVE_IOREMAP_PROT
@@ -100,10 +102,10 @@ static int pcie_reg_mmap(struct file *filep, struct vm_area_struct *vma)
return 0;
}
-int pcie_get_chipnums(u32 cpu_info)
+u32 pcie_get_chipnums(u32 cpu_info)
{
int i;
- int chip_count = 0;
+ u32 chip_count = 0;
u32 chip_i_info;
for (i = 0; i < MAX_CHIP_NUM; i++) {
@@ -144,12 +146,41 @@ static int pcie_release(struct inode *inode, struct file *f)
return 0;
}
+static long pcie_reg_ioctl(struct file *pfile, unsigned int cmd,
+ unsigned long arg)
+{
+ int ret = 0;
+
+ switch (cmd) {
+ case PCIE_CMD_GET_CHIPNUMS:
+ if ((void *)arg == NULL) {
+ pr_info("[PCIe Base] invalid arg address\n");
+ ret = -EINVAL;
+ break;
+ }
+
+ if (copy_to_user((void *)arg, (void *)¤t_chip_nums,
+ sizeof(int))) {
+ pr_info("[PCIe Base] copy chip_nums to usr failed\n");
+ ret = -EINVAL;
+ }
+ break;
+
+ default:
+ pr_info("[PCIe Base] invalid pcie ioctl cmd:%u\n", cmd);
+ break;
+ }
+
+ return ret;
+}
+
static const struct file_operations pcie_dfx_fops = {
.owner = THIS_MODULE,
.open = pcie_open,
.release = pcie_release,
.llseek = noop_llseek,
.mmap = pcie_reg_mmap,
+ .unlocked_ioctl = pcie_reg_ioctl,
};
static struct miscdevice pcie_dfx_misc = {
--
1.8.3
1
0
category: other
bugzilla: NA
CVE: NA
----------------------------------
The ccflags removed in this patch shouldn't be in open kernel tree.
Especially for -fstack-protector-strong, there will be a compile error
when CONFIG_STACKPROTECTOR_STRONG isn't enabled.
Signed-off-by: Zhengyuan Liu <liuzhengyuan(a)tj.kylinos.cn>
---
drivers/net/ethernet/hisilicon/hns3/Makefile | 9 ---------
1 file changed, 9 deletions(-)
diff --git a/drivers/net/ethernet/hisilicon/hns3/Makefile b/drivers/net/ethernet/hisilicon/hns3/Makefile
index 6653e81..0365f1c 100644
--- a/drivers/net/ethernet/hisilicon/hns3/Makefile
+++ b/drivers/net/ethernet/hisilicon/hns3/Makefile
@@ -3,15 +3,6 @@
# Makefile for the HISILICON network device drivers.
#
-# Add security options
-ccflags-y += -fstack-protector-strong
-ccflags-y += -Wl,-z,relro,-z,now
-ccflags-y += -Wl,-z,noexecstack
-ccflags-y += -D_FORTIFY_SOURCE=2 -O2
-ccflags-y += -fvisibility=hidden
-ccflags-y += -Wformat=2 -Wfloat-equal
-ccflags-y += -fsigned-char
-
ccflags-y += -DCONFIG_IT_VALIDATION
ccflags-y += -DCONFIG_HNS3_TEST
--
2.7.4
3
2
cc kernel(a)openeuler.org.
Just for members who may not know, dwadm(a)pcl.ac.cn is from 鹏程实验室 and will
help on issues related with VMs in 鹏程实验室。
On Sun, Apr 26, 2020 at 10:08 AM Xie XiuQi via Infra <infra(a)openeuler.org>
wrote:
> 感谢你的热心参与,我们正在和鹏程实验室的兄弟联合定位
> On 2020/4/25 13:30, Qichen Zhang wrote:
>
>
> Qichen Zhang
> 邮箱:17852657226(a)163.com
>
> <https://maas.mail.163.com/dashi-web-extend/html/proSignature.html?ftlId=1&n…>
>
> 签名由 网易邮箱大师 <https://mail.163.com/dashi/dlpro.html?from=mail88> 定制
>
> --------- 转发的邮件 ---------
> 发件人: Qichen Zhang <17852657226(a)163.com>
> 发送日期: 2020年04月25日 13:26
> 收件人: Xiehong (Cynthia) <xiehong(a)huawei.com>
> 抄送人:
> 主题: 回复:gitee上已更新issue,请及时查收
> 各位工程师
> 你们好!
>
> bug依然存在,还是报Message
> kernel watchdog Bug soft lockup
> 在issue上已发布,昨晚上七点多一直等到今天凌晨一点,依然存在bug。
>
> 请各位工程师看看如何解决?
> 上周五发现的这个bug,不能这么愣着啊
> 你们需要我做什么,阔以直接给我说哈,
> 我叫张琦琛,手机号同微信号17852657226
> 希望大家一起努力尽快解决这个bug
>
> 盼回复
>
>
> Qichen Zhang
> 邮箱:17852657226(a)163.com
>
> <https://maas.mail.163.com/dashi-web-extend/html/proSignature.html?ftlId=1&n…>
>
> 签名由 网易邮箱大师 <https://mail.163.com/dashi/dlpro.html?from=mail88> 定制
>
> 在2020年04月21日 14:17,Xiehong (Cynthia) <xiehong(a)huawei.com> 写道:
>
> HI
>
>
>
> 多谢!
>
>
>
> 我们会在社区上的ISSUE系统上继续跟踪。
>
>
>
>
>
> *发件人:* Qichen Zhang [mailto:17852657226@163.com]
> *发送时间:* 2020年4月21日 13:27
> *收件人:* Xiehong (Cynthia) <xiehong(a)huawei.com>
> *主题:* gitee上已更新issue,请及时查收
>
>
>
> 尊敬的工程师
>
> 您好!
>
>
>
> 我已在 gitee 上已更新issue,请及时查收
>
> https://gitee.com/openeuler/community/issues/I1F13I?from=project-issue
>
> 很乐意和大家一块解决这个bug。
>
> — —
>
> Qichen Zhang
>
>
> _______________________________________________
> Community mailing list -- community(a)openeuler.org
> To unsubscribe send an email to community-leave(a)openeuler.org
>
> _______________________________________________
> Infra mailing list -- infra(a)openeuler.org
> To unsubscribe send an email to infra-leave(a)openeuler.org
>
--
Regards
Fred Li (李永乐)
1
0

[PATCH 01/25] net: hns3: merge mac state HCLGE_MAC_TO_DEL and HCLGE_MAC_DEL_FAIL
by Yang Yingliang 26 Apr '20
by Yang Yingliang 26 Apr '20
26 Apr '20
From: shenhao <shenhao21(a)huawei.com>
driver inclusion
category: bugfix
bugzilla: NA
CVE: NA
--------------------------------------------
HCLGE_MAC_DEL_FAIL is a middle state for mac address handling,
it can be merged with HCLGE_MAC_TO_DEL.
Btw, this patch also change the enum name from HCLGE_MAC_ADDR_STATE
to HCLGE_MAC_NODE_STATE, for it's used to indicate the state of
mac node.
Signed-off-by: Jian Shen <shenjian15(a)huawei.com>
Signed-off-by: shenhao <shenhao21(a)huawei.com>
Reviewed-by: Zhong Zhaohui <zhongzhaohui(a)huawei.com>
Signed-off-by: Yang Yingliang <yangyingliang(a)huawei.com>
---
.../ethernet/hisilicon/hns3/hns3pf/hclge_main.c | 46 ++++++++++------------
.../ethernet/hisilicon/hns3/hns3pf/hclge_main.h | 9 ++---
2 files changed, 25 insertions(+), 30 deletions(-)
diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c
index 478a3b5..d985c68 100644
--- a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c
+++ b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c
@@ -7422,14 +7422,13 @@ static void hclge_update_umv_space(struct hclge_vport *vport, bool is_free)
return NULL;
}
-static void hclge_mac_node_convert(struct hclge_vport_mac_addr_cfg *mac_node,
- enum HCLGE_MAC_ADDR_STATE state)
+static void hclge_update_mac_node(struct hclge_vport_mac_addr_cfg *mac_node,
+ enum HCLGE_MAC_NODE_STATE state)
{
switch (state) {
/* from set_rx_mode or tmp_add_list */
case HCLGE_MAC_TO_ADD:
- if (mac_node->state == HCLGE_MAC_TO_DEL ||
- mac_node->state == HCLGE_MAC_DEL_FAIL)
+ if (mac_node->state == HCLGE_MAC_TO_DEL)
mac_node->state = HCLGE_MAC_ACTIVE;
break;
/* only from set_rx_mode */
@@ -7442,14 +7441,7 @@ static void hclge_mac_node_convert(struct hclge_vport_mac_addr_cfg *mac_node,
}
break;
/* only from tmp_add_list, the mac_node->state won't be
- * HCLGE_MAC_ACTIVE/HCLGE_MAC_DEL_FAIL/HCLGE_MAC_ADD_FAIL
- */
- case HCLGE_MAC_DEL_FAIL:
- if (mac_node->state == HCLGE_MAC_TO_ADD)
- mac_node->state = HCLGE_MAC_ACTIVE;
- break;
- /* only from tmp_add_list, the mac_node->state won't be
- * HCLGE_MAC_ACTIVE/HCLGE_MAC_DEL_FAIL/HCLGE_MAC_ADD_FAIL
+ * ACTIVE.
*/
case HCLGE_MAC_ACTIVE:
if (mac_node->state == HCLGE_MAC_TO_ADD)
@@ -7460,7 +7452,7 @@ static void hclge_mac_node_convert(struct hclge_vport_mac_addr_cfg *mac_node,
}
int hclge_update_mac_list(struct hclge_vport *vport,
- enum HCLGE_MAC_ADDR_STATE state,
+ enum HCLGE_MAC_NODE_STATE state,
enum HCLGE_MAC_ADDR_TYPE mac_type,
const unsigned char *addr)
{
@@ -7480,7 +7472,7 @@ int hclge_update_mac_list(struct hclge_vport *vport,
*/
mac_node = hclge_find_mac_node(list, addr);
if (mac_node) {
- hclge_mac_node_convert(mac_node, state);
+ hclge_update_mac_node(mac_node, state);
spin_unlock_bh(&vport->mac_list_lock);
return 0;
}
@@ -7731,7 +7723,6 @@ static void hclge_unsync_mac_list(struct hclge_vport *vport,
list_del(&mac_node->node);
kfree(mac_node);
} else {
- mac_node->state = HCLGE_MAC_DEL_FAIL;
set_bit(HCLGE_VPORT_STATE_MAC_TBL_CHANGE,
&vport->state);
break;
@@ -7753,13 +7744,13 @@ static bool hclge_sync_from_add_list(struct list_head *add_list,
* uc/mc_mac_list, it means have received a TO_DEL request
* during the time window of adding the mac address into mac
* table. if mac_node state is ACTIVE, then change it to TO_DEL,
- * then it will be removed at next time. else it must be TO_ADD
- * or ADD_FAIL, this address hasn't been added into mac table,
+ * then it will be removed at next time. else it must be TO_ADD,
+ * this address hasn't been added into mac table,
* so just remove the mac node.
*/
new_node = hclge_find_mac_node(mac_list, mac_node->mac_addr);
if (new_node) {
- hclge_mac_node_convert(new_node, mac_node->state);
+ hclge_update_mac_node(new_node, mac_node->state);
list_del(&mac_node->node);
kfree(mac_node);
} else if (mac_node->state == HCLGE_MAC_ACTIVE) {
@@ -7782,7 +7773,16 @@ static void hclge_sync_from_del_list(struct list_head *del_list,
list_for_each_entry_safe(mac_node, tmp, del_list, node) {
new_node = hclge_find_mac_node(mac_list, mac_node->mac_addr);
if (new_node) {
- hclge_mac_node_convert(new_node, mac_node->state);
+ /* If the mac addr exists in the mac list, it means
+ * received a new TO_ADD request during the time window
+ * of configuring the mac address. For the mac node
+ * state is TO_ADD, and the address is already in the
+ * in the hardware(due to delete fail), so we just need
+ * to change the mac node state to ACTIVE.
+ */
+ new_node->state = HCLGE_MAC_ACTIVE;
+ list_del(&mac_node->node);
+ kfree(mac_node);
} else {
list_del(&mac_node->node);
list_add_tail(&mac_node->node, mac_list);
@@ -7850,7 +7850,6 @@ static void hclge_sync_vport_mac_table(struct hclge_vport *vport,
list_for_each_entry_safe(mac_node, tmp, list, node) {
switch (mac_node->state) {
case HCLGE_MAC_TO_DEL:
- case HCLGE_MAC_DEL_FAIL:
list_del(&mac_node->node);
list_add_tail(&mac_node->node, &tmp_del_list);
break;
@@ -7962,7 +7961,6 @@ void hclge_rm_vport_all_mac_table(struct hclge_vport *vport, bool is_del_list,
list_for_each_entry_safe(mac_cfg, tmp, list, node) {
switch (mac_cfg->state) {
case HCLGE_MAC_TO_DEL:
- case HCLGE_MAC_DEL_FAIL:
case HCLGE_MAC_ACTIVE:
list_del(&mac_cfg->node);
list_add_tail(&mac_cfg->node, &tmp_del_list);
@@ -8021,7 +8019,6 @@ static void hclge_uninit_mac_list(struct hclge_vport *vport,
list_for_each_entry_safe(mac_node, tmp, list, node) {
switch (mac_node->state) {
case HCLGE_MAC_TO_DEL:
- case HCLGE_MAC_DEL_FAIL:
case HCLGE_MAC_ACTIVE:
list_del(&mac_node->node);
list_add_tail(&mac_node->node, &tmp_del_list);
@@ -8221,7 +8218,7 @@ void hclge_replace_mac_node(struct list_head *list, const u8 *old_addr,
}
void hclge_modify_mac_node_state(struct list_head *list, const u8 *addr,
- enum HCLGE_MAC_ADDR_STATE state)
+ enum HCLGE_MAC_NODE_STATE state)
{
struct hclge_vport_mac_addr_cfg *mac_node;
@@ -8989,8 +8986,7 @@ static void hclge_mac_node_convert_for_reset(struct list_head *list)
list_for_each_entry_safe(mac_node, tmp, list, node) {
if (mac_node->state == HCLGE_MAC_ACTIVE) {
mac_node->state = HCLGE_MAC_TO_ADD;
- } else if (mac_node->state == HCLGE_MAC_TO_DEL ||
- mac_node->state == HCLGE_MAC_DEL_FAIL) {
+ } else if (mac_node->state == HCLGE_MAC_TO_DEL) {
list_del(&mac_node->node);
kfree(mac_node);
}
diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.h b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.h
index 5e462cf..a1fa782 100644
--- a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.h
+++ b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.h
@@ -640,16 +640,15 @@ struct hclge_fd_ad_data {
u16 rule_id;
};
-enum HCLGE_MAC_ADDR_STATE {
+enum HCLGE_MAC_NODE_STATE {
HCLGE_MAC_TO_ADD,
HCLGE_MAC_TO_DEL,
- HCLGE_MAC_DEL_FAIL,
HCLGE_MAC_ACTIVE
};
struct hclge_vport_mac_addr_cfg {
struct list_head node;
- enum HCLGE_MAC_ADDR_STATE state;
+ enum HCLGE_MAC_NODE_STATE state;
u8 mac_addr[ETH_ALEN];
};
@@ -1016,13 +1015,13 @@ int hclge_set_vlan_filter(struct hnae3_handle *handle, __be16 proto,
int hclge_notify_client(struct hclge_dev *hdev,
enum hnae3_reset_notify_type type);
int hclge_update_mac_list(struct hclge_vport *vport,
- enum HCLGE_MAC_ADDR_STATE state,
+ enum HCLGE_MAC_NODE_STATE state,
enum HCLGE_MAC_ADDR_TYPE mac_type,
const unsigned char *addr);
void hclge_replace_mac_node(struct list_head *list, const u8 *old_addr,
const u8 *new_addr, bool keep_old);
void hclge_modify_mac_node_state(struct list_head *list, const u8 *addr,
- enum HCLGE_MAC_ADDR_STATE state);
+ enum HCLGE_MAC_NODE_STATE state);
void hclge_rm_vport_all_mac_table(struct hclge_vport *vport, bool is_del_list,
enum HCLGE_MAC_ADDR_TYPE mac_type);
void hclge_rm_vport_all_vlan_table(struct hclge_vport *vport, bool is_del_list);
--
1.8.3
1
24
From: Huang jun <huangjun61(a)huawei.com>
If we want acpi ged to support wake from freeze, we need to implement
the suspend/resume function. In these two methods, ACPI's _GPO, GPP
method is called to realize the setting of sleep flag and anti-shake.
Signed-off-by: Huangjun <huangjun63(a)huawei.com>
---
drivers/acpi/evged.c | 108 +++++++++++++++++++++++++++++++++++++++++++
1 file changed, 108 insertions(+)
diff --git a/drivers/acpi/evged.c b/drivers/acpi/evged.c
index f13ba2c07667..dde8fbff8d19 100644
--- a/drivers/acpi/evged.c
+++ b/drivers/acpi/evged.c
@@ -46,11 +46,20 @@
#include <linux/list.h>
#include <linux/platform_device.h>
#include <linux/acpi.h>
+#include <linux/timer.h>
+#include <linux/jiffies.h>
#define MODULE_NAME "acpi-ged"
+struct acpi_ged_handle {
+ struct timer_list timer;/* For 4s anti-shake of power button */
+ acpi_handle gpp_handle; /* ACPI Handle: enable shutdown */
+ acpi_handle gpo_handle; /* ACPI Handle: set sleep flag */
+};
+
struct acpi_ged_device {
struct device *dev;
+ struct acpi_ged_handle *wakeup_handle;
struct list_head event_list;
};
@@ -131,6 +140,34 @@ static acpi_status acpi_ged_request_interrupt(struct acpi_resource *ares,
return AE_OK;
}
+#ifdef CONFIG_PM_SLEEP
+static void init_ged_handle(struct acpi_ged_device *geddev) {
+ struct acpi_ged_handle *wakeup_handle;
+ acpi_handle gpo_handle = NULL;
+ acpi_handle gpp_handle = NULL;
+ acpi_status acpi_ret;
+
+ wakeup_handle = devm_kzalloc(geddev->dev, sizeof(*wakeup_handle), GFP_KERNEL);
+ if (!wakeup_handle)
+ return;
+
+ geddev->wakeup_handle = wakeup_handle;
+
+ /* Initialize wakeup_handle, prepare for ged suspend and resume */
+ timer_setup(&wakeup_handle->timer, NULL, 0);
+
+ acpi_ret = acpi_get_handle(ACPI_HANDLE(geddev->dev), "_GPO", &gpo_handle);
+ if (ACPI_FAILURE(acpi_ret))
+ dev_info(geddev->dev, "cannot locate _GPO method\n");
+ wakeup_handle->gpo_handle = gpo_handle;
+
+ acpi_ret = acpi_get_handle(ACPI_HANDLE(geddev->dev), "_GPP", &gpp_handle);
+ if (ACPI_FAILURE(acpi_ret))
+ dev_info(geddev->dev, "cannot locate _GPP method\n");
+ wakeup_handle->gpp_handle = gpp_handle;
+}
+#endif
+
static int ged_probe(struct platform_device *pdev)
{
struct acpi_ged_device *geddev;
@@ -149,6 +186,9 @@ static int ged_probe(struct platform_device *pdev)
return -EINVAL;
}
platform_set_drvdata(pdev, geddev);
+#ifdef CONFIG_PM_SLEEP
+ init_ged_handle(geddev);
+#endif
return 0;
}
@@ -164,6 +204,10 @@ static void ged_shutdown(struct platform_device *pdev)
dev_dbg(geddev->dev, "GED releasing GSI %u @ IRQ %u\n",
event->gsi, event->irq);
}
+
+ if (geddev->wakeup_handle)
+ del_timer(&geddev->wakeup_handle->timer);
+
}
static int ged_remove(struct platform_device *pdev)
@@ -172,6 +216,67 @@ static int ged_remove(struct platform_device *pdev)
return 0;
}
+#ifdef CONFIG_PM_SLEEP
+static void ged_timer_callback(struct timer_list *t)
+{
+ struct acpi_ged_handle *wakeup_handle = from_timer(wakeup_handle, t, timer);
+ acpi_status acpi_ret;
+
+ /* _GPP method enable power button */
+ if (wakeup_handle && wakeup_handle->gpp_handle) {
+ acpi_ret = acpi_execute_simple_method(wakeup_handle->gpp_handle, NULL, ACPI_IRQ_MODEL_GIC);
+ if (ACPI_FAILURE(acpi_ret))
+ pr_warn("_GPP method execution failed\n");
+ }
+}
+
+static int ged_suspend(struct device *dev)
+{
+ struct acpi_ged_device *geddev = dev_get_drvdata(dev);
+ struct acpi_ged_handle *wakeup_handle = geddev->wakeup_handle;
+ struct acpi_ged_event *event, *next;
+ acpi_status acpi_ret;
+
+ /* _GPO method set sleep flag */
+ if (wakeup_handle && wakeup_handle->gpo_handle) {
+ acpi_ret = acpi_execute_simple_method(wakeup_handle->gpo_handle, NULL, ACPI_IRQ_MODEL_GIC);
+ if (ACPI_FAILURE(acpi_ret)) {
+ pr_warn("_GPO method execution failed\n");
+ return AE_ERROR;
+ }
+ }
+
+ list_for_each_entry_safe(event, next, &geddev->event_list, node)
+ enable_irq_wake(event->irq);
+
+ return 0;
+}
+
+static int ged_resume(struct device *dev)
+{
+ struct acpi_ged_device *geddev = dev_get_drvdata(dev);
+ struct acpi_ged_handle *wakeup_handle = geddev->wakeup_handle;
+ struct acpi_ged_event *event, *next;
+
+ /* use timer to complete 4s anti-shake */
+ if (wakeup_handle && wakeup_handle->gpp_handle) {
+ wakeup_handle->timer.expires = jiffies + (4 * HZ);
+ wakeup_handle->timer.function = ged_timer_callback;
+ add_timer(&wakeup_handle->timer);
+ }
+
+ list_for_each_entry_safe(event, next, &geddev->event_list, node)
+ disable_irq_wake(event->irq);
+
+ return 0;
+}
+
+static const struct dev_pm_ops ged_pm_ops = {
+ SET_SYSTEM_SLEEP_PM_OPS(ged_suspend, ged_resume)
+};
+#endif
+
+
static const struct acpi_device_id ged_acpi_ids[] = {
{"ACPI0013"},
{},
@@ -184,6 +289,9 @@ static struct platform_driver ged_driver = {
.driver = {
.name = MODULE_NAME,
.acpi_match_table = ACPI_PTR(ged_acpi_ids),
+#ifdef CONFIG_PM_SLEEP
+ .pm = &ged_pm_ops,
+#endif
},
};
builtin_platform_driver(ged_driver);
--
2.20.1
1
1
Alexei Avshalom Lazar (1):
wil6210: add general initialization/size checks
Amir Goldstein (1):
ovl: fix value of i_ino for lower hardlink corner case
Austin Kim (1):
mm/vmalloc.c: move 'area->pages' after if statement
Can Guo (1):
scsi: ufs: Fix ufshcd_hold() caused scheduling while atomic
Colin Ian King (2):
ASoC: Intel: mrfld: fix incorrect check on p->sink
ASoC: Intel: mrfld: return error codes when an error occurs
DENG Qingfang (1):
net: dsa: mt7530: fix tagged frames pass-through in VLAN-unaware mode
Dedy Lansky (2):
wil6210: check rx_buff_mgmt before accessing it
wil6210: make sure Rx ring sizes are correlated
Florian Fainelli (1):
net: stmmac: dwmac-sunxi: Provide TX and RX fifo sizes
Greg Kroah-Hartman (1):
Linux 4.19.117
James Morse (1):
x86/resctrl: Preserve CDP enable over CPU hotplug
Jan Kara (1):
ext4: do not zeroout extents beyond i_disksize
Jim Mattson (1):
kvm: x86: Host feature SSBD doesn't imply guest feature SPEC_CTRL_SSBD
John Allen (1):
x86/microcode/AMD: Increase microcode PATCH_MAX_SIZE
Josef Bacik (1):
btrfs: check commit root generation in should_ignore_root
Josh Triplett (2):
ext4: fix incorrect group count in ext4_fill_super error message
ext4: fix incorrect inodes per group in error message
Karthick Gopalasubramanian (1):
wil6210: remove reset file from debugfs
Konstantin Khlebnikov (1):
net: revert default NAPI poll timeout to 2 jiffies
Maurizio Lombardi (2):
scsi: target: remove boilerplate code
scsi: target: fix hang when multiple threads try to destroy the same
iscsi session
Maya Erez (1):
wil6210: ignore HALP ICR if already handled
Reinette Chatre (1):
x86/resctrl: Fix invalid attempt at removing the default resource
group
Sasha Levin (1):
usb: dwc3: gadget: don't enable interrupt when disabling endpoint
Sebastian Andrzej Siewior (1):
amd-xgbe: Use __napi_schedule() in BH context
Sergei Lopatin (1):
drm/amd/powerplay: force the trim of the mclk dpm_levels if OD is
enabled
Sven Van Asbroeck (1):
pwm: pca9685: Fix PWM/GPIO inter-operation
Taehee Yoo (1):
hsr: check protocol version in hsr_newlink()
Takashi Iwai (4):
ALSA: usb-audio: Filter error from connector kctl ops, too
ALSA: usb-audio: Don't override ignore_ctl_error value from the map
ALSA: usb-audio: Don't create jack controls for PCM terminals
ALSA: usb-audio: Check mapping at creating connector controls, too
Taras Chornyi (1):
net: ipv4: devinet: Fix crash when add/del multicast IP with autojoin
Thinh Nguyen (1):
usb: dwc3: gadget: Don't clear flags before transfer ended
Tim Stallard (1):
net: ipv6: do not consider routes via gateways for anycast address
check
Tuomas Tynkkynen (1):
mac80211_hwsim: Use kstrndup() in place of kasprintf()
Vasily Averin (1):
keys: Fix proc_keys_next to increase position index
Wang Wenhu (1):
net: qrtr: send msgs from local of same id as broadcast
Xiao Yang (1):
tracing: Fix the race between registering 'snapshot' event trigger and
triggering 'snapshot' operation
zhangyi (F) (1):
jbd2: improve comments about freeing data buffers whose page mapping
is NULL
Makefile | 2 +-
arch/x86/include/asm/microcode_amd.h | 2 +-
arch/x86/kernel/cpu/intel_rdt.c | 2 +
arch/x86/kernel/cpu/intel_rdt.h | 1 +
arch/x86/kernel/cpu/intel_rdt_rdtgroup.c | 16 ++++-
arch/x86/kvm/cpuid.c | 3 +-
drivers/gpu/drm/amd/powerplay/hwmgr/smu7_hwmgr.c | 5 +-
drivers/net/dsa/mt7530.c | 18 +++--
drivers/net/dsa/mt7530.h | 7 ++
drivers/net/ethernet/amd/xgbe/xgbe-drv.c | 2 +-
drivers/net/ethernet/stmicro/stmmac/dwmac-sunxi.c | 2 +
drivers/net/wireless/ath/wil6210/debugfs.c | 29 +-------
drivers/net/wireless/ath/wil6210/interrupt.c | 12 ++--
drivers/net/wireless/ath/wil6210/main.c | 5 +-
drivers/net/wireless/ath/wil6210/txrx.c | 4 +-
drivers/net/wireless/ath/wil6210/txrx_edma.c | 14 +++-
drivers/net/wireless/ath/wil6210/wil6210.h | 3 +-
drivers/net/wireless/ath/wil6210/wmi.c | 2 +-
drivers/net/wireless/mac80211_hwsim.c | 12 ++--
drivers/pwm/pwm-pca9685.c | 85 +++++++++++++----------
drivers/scsi/ufs/ufshcd.c | 5 ++
drivers/target/iscsi/iscsi_target.c | 79 ++++++---------------
drivers/target/iscsi/iscsi_target.h | 1 -
drivers/target/iscsi/iscsi_target_configfs.c | 5 +-
drivers/target/iscsi/iscsi_target_login.c | 5 +-
drivers/usb/dwc3/gadget.c | 18 ++---
fs/btrfs/relocation.c | 4 +-
fs/ext4/extents.c | 8 +--
fs/ext4/super.c | 6 +-
fs/jbd2/commit.c | 7 +-
fs/overlayfs/inode.c | 4 +-
include/net/ip6_route.h | 1 +
include/target/iscsi/iscsi_target_core.h | 2 +-
kernel/trace/trace_events_trigger.c | 10 +--
mm/vmalloc.c | 8 ++-
net/core/dev.c | 3 +-
net/hsr/hsr_netlink.c | 10 ++-
net/ipv4/devinet.c | 13 ++--
net/qrtr/qrtr.c | 7 +-
security/keys/proc.c | 2 +
sound/soc/intel/atom/sst-atom-controls.c | 2 +-
sound/soc/intel/atom/sst/sst_pci.c | 2 +-
sound/usb/mixer.c | 31 +++++----
sound/usb/mixer_maps.c | 4 +-
44 files changed, 251 insertions(+), 212 deletions(-)
--
1.8.3
1
41

[PATCH] btrfs: tree-checker: Enhance chunk checker to validate chunk profile
by Yang Yingliang 22 Apr '20
by Yang Yingliang 22 Apr '20
22 Apr '20
From: Qu Wenruo <wqu(a)suse.com>
mainline inclusion
from mainline-v5.2-rc1
commit 80e46cf22ba0bcb57b39c7c3b52961ab3a0fd5f2
category: bugfix
bugzilla: 13690
CVE: CVE-2019-19378
-------------------------------------------------
Btrfs-progs already have a comprehensive type checker, to ensure there
is only 0 (SINGLE profile) or 1 (DUP/RAID0/1/5/6/10) bit set for chunk
profile bits.
Do the same work for kernel.
Reported-by: Yoon Jungyeon <jungyeon(a)gatech.edu>
Link: https://bugzilla.kernel.org/show_bug.cgi?id=202765
Reviewed-by: Nikolay Borisov <nborisov(a)suse.com>
Reviewed-by: Johannes Thumshirn <jthumshirn(a)suse.de>
Signed-off-by: Qu Wenruo <wqu(a)suse.com>
Reviewed-by: David Sterba <dsterba(a)suse.com>
Signed-off-by: David Sterba <dsterba(a)suse.com>
Conflicts:
fs/btrfs/volumes.c
[yyl: btrfs_check_chunk_valid() is defined in fs/btrfs/volumes.c]
Signed-off-by: Yang Yingliang <yangyingliang(a)huawei.com>
Reviewed-by: Jason Yan <yanaijie(a)huawei.com>
Signed-off-by: Yang Yingliang <yangyingliang(a)huawei.com>
---
fs/btrfs/volumes.c | 8 ++++++++
1 file changed, 8 insertions(+)
diff --git a/fs/btrfs/volumes.c b/fs/btrfs/volumes.c
index 99260d2..110cdfd 100644
--- a/fs/btrfs/volumes.c
+++ b/fs/btrfs/volumes.c
@@ -6391,6 +6391,14 @@ static int btrfs_check_chunk_valid(struct btrfs_fs_info *fs_info,
return -EIO;
}
+ if (!is_power_of_2(type & BTRFS_BLOCK_GROUP_PROFILE_MASK) &&
+ (type & BTRFS_BLOCK_GROUP_PROFILE_MASK) != 0) {
+ btrfs_err(fs_info,
+ "invalid chunk profile flag: 0x%llx, expect 0 or 1 bit set",
+ type & BTRFS_BLOCK_GROUP_PROFILE_MASK);
+ return -EIO;
+ }
+
if ((type & BTRFS_BLOCK_GROUP_TYPE_MASK) == 0) {
btrfs_err(fs_info, "missing chunk type flag: 0x%llx", type);
return -EIO;
--
1.8.3
1
0
From: Cheng Jian <cj.chengjian(a)huawei.com>
hulk inclusion
category: feature
bugzilla: 5391/28338/24634
CVE: NA
-----------------------------------------------
The previous patch added a field klp_rel_state in the
module structure, which caused KABI changes, so fix
this problem
Signed-off-by: Cheng Jian <cj.chengjian(a)huawei.com>
Reviewed-by: Xie XiuQi <xiexiuqi(a)huawei.com>
Signed-off-by: Yang Yingliang <yangyingliang(a)huawei.com>
---
include/linux/module.h | 33 ++++++++++++++++++++-------------
1 file changed, 20 insertions(+), 13 deletions(-)
diff --git a/include/linux/module.h b/include/linux/module.h
index 4994243..e1f3418 100644
--- a/include/linux/module.h
+++ b/include/linux/module.h
@@ -469,19 +469,6 @@ struct module {
/* Elf information */
struct klp_modinfo *klp_info;
- /*
- * livepatch should relocate the key of jump_label by
- * using klp_write_module_reloc. So it's necessary to
- * do jump_label_apply_nops() and jump_label_add_module()
- * later after livepatch relocation finised.
- *
- * for normal module :
- * always MODULE_KLP_REL_DONE.
- * for livepatch module :
- * init as MODULE_KLP_REL_UNDO,
- * set to MODULE_KLP_REL_DONE when relocate completed.
- */
- enum MODULE_KLP_REL_STATE klp_rel_state;
#endif
#ifdef CONFIG_MODULE_UNLOAD
@@ -507,7 +494,27 @@ struct module {
unsigned int num_ei_funcs;
#endif
+#if defined(CONFIG_LIVEPATCH) && !defined(__GENKSYMS__)
+ union {
+ /*
+ * livepatch should relocate the key of jump_label by
+ * using klp_write_module_reloc. So it's necessary to
+ * do jump_label_apply_nops() and jump_label_add_module()
+ * later after livepatch relocation finised.
+ *
+ * for normal module :
+ * always MODULE_KLP_REL_DONE.
+ * for livepatch module :
+ * init as MODULE_KLP_REL_UNDO,
+ * set to MODULE_KLP_REL_DONE when relocate completed.
+ */
+ enum MODULE_KLP_REL_STATE klp_rel_state;
+ long klp_rel_state_KABI;
+ };
+#else
KABI_RESERVE(1)
+#endif
+
KABI_RESERVE(2)
KABI_RESERVE(3)
KABI_RESERVE(4)
--
1.8.3
1
0

[PATCH v1] scsi: hisi_sas: do not reset the timer to wait for phyup when phy already up
by Luo Jiaxing 22 Apr '20
by Luo Jiaxing 22 Apr '20
22 Apr '20
We found out that after phy up, the hardware report another oob interrupt,
but did not follow a phy up interrupt. like:
oob ready -> phy up -> DEV found -> oob read -> wait phy up -> timeout
We run link reset when wait phy up timeout, and it make a normal disk into
reset processing. So we made some circumvention action in the code, so that
this abnormal oob interrupt will not start the timer to wait for phy up.
Signed-off-by: Luo Jiaxing <luojiaxing(a)huawei.com>
Signed-off-by: John Garry <john.garry(a)huawei.com>
---
drivers/scsi/hisi_sas/hisi_sas_v3_hw.c | 5 ++++-
1 file changed, 4 insertions(+), 1 deletion(-)
diff --git a/drivers/scsi/hisi_sas/hisi_sas_v3_hw.c b/drivers/scsi/hisi_sas/hisi_sas_v3_hw.c
index acf2fc6..5b80856 100644
--- a/drivers/scsi/hisi_sas/hisi_sas_v3_hw.c
+++ b/drivers/scsi/hisi_sas/hisi_sas_v3_hw.c
@@ -1898,8 +1898,11 @@ static void handle_chl_int0_v3_hw(struct hisi_hba *hisi_hba, int phy_no)
if (irq_value0 & CHL_INT0_PHY_RDY_MSK) {
struct hisi_sas_phy *phy = &hisi_hba->phy[phy_no];
+ dev_dbg(dev, "phy%d OOB ready\n", phy_no);
+ if (phy->phy_attached)
+ return;
+
if (!timer_pending(&phy->timer)) {
- dev_dbg(dev, "phy%d OOB ready\n", phy_no);
phy->timer.function = wait_phyup_timedout_v3_hw;
phy->timer.expires = jiffies +
WAIT_PHYUP_TIMEOUT_V3_HW * HZ;
--
2.7.4
1
0
Hi all,
We are frequently asked what patches kernel has comparing with Kernel 4.19.
[1] shows how to find them and hope it helps. If more information is
needed, please leave messages in the issue [1].
[1] https://gitee.com/openeuler/kernel/issues/I1F430
--
Regards
Fred Li (李永乐)
1
0
From: Hao Shen <shenhao21(a)huawei.com>
The suspend and resume feature is useful when you want to
save the current state of your machine, and continue work
later from the same state. This patch makes the hns3 net
device work form the same state after s3 and s4.
Signed-off-by: Weiwei Deng <dengweiwei(a)huawei.com>
Signed-off-by: Hao Shen <shenhao21(a)huawei.com>
Signed-off-by: Jian Shen <shenjian15(a)huawei.com>
---
drivers/net/ethernet/hisilicon/hns3/hnae3.h | 2 +
drivers/net/ethernet/hisilicon/hns3/hns3_enet.c | 32 ++++++++++
.../ethernet/hisilicon/hns3/hns3pf/hclge_main.c | 70 ++++++++++++++++++++++
3 files changed, 104 insertions(+)
diff --git a/drivers/net/ethernet/hisilicon/hns3/hnae3.h b/drivers/net/ethernet/hisilicon/hns3/hnae3.h
index bb29b46..8b1e690 100644
--- a/drivers/net/ethernet/hisilicon/hns3/hnae3.h
+++ b/drivers/net/ethernet/hisilicon/hns3/hnae3.h
@@ -561,6 +561,8 @@ struct hnae3_ae_ops {
int (*set_vf_mac)(struct hnae3_handle *handle, int vf, u8 *p);
int (*get_module_eeprom)(struct hnae3_handle *handle, u32 offset,
u32 len, u8 *data);
+ int (*suspend)(struct hnae3_ae_dev *ae_dev);
+ int (*resume)(struct hnae3_ae_dev *ae_dev);
};
struct hnae3_dcb_ops {
diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3_enet.c b/drivers/net/ethernet/hisilicon/hns3/hns3_enet.c
index 4eca8cf..de3d37c3 100644
--- a/drivers/net/ethernet/hisilicon/hns3/hns3_enet.c
+++ b/drivers/net/ethernet/hisilicon/hns3/hns3_enet.c
@@ -2226,6 +2226,34 @@ static void hns3_shutdown(struct pci_dev *pdev)
pci_set_power_state(pdev, PCI_D3hot);
}
+#ifdef CONFIG_PM
+static int hns3_suspend(struct pci_dev *pdev, pm_message_t state)
+{
+ struct hnae3_ae_dev *ae_dev = pci_get_drvdata(pdev);
+
+ if (ae_dev->ops->suspend)
+ ae_dev->ops->suspend(ae_dev);
+
+ pci_save_state(pdev);
+ pci_set_power_state(pdev, PCI_D3hot);
+
+ return 0;
+}
+
+static int hns3_resume(struct pci_dev *pdev)
+{
+ struct hnae3_ae_dev *ae_dev = pci_get_drvdata(pdev);
+
+ pci_set_power_state(pdev, PCI_D0);
+ pci_restore_state(pdev);
+
+ if (ae_dev->ops->resume)
+ return ae_dev->ops->resume(ae_dev);
+
+ return 0;
+}
+#endif
+
static pci_ers_result_t hns3_error_detected(struct pci_dev *pdev,
pci_channel_state_t state)
{
@@ -2310,6 +2338,10 @@ struct pci_driver hns3_driver = {
.probe = hns3_probe,
.remove = hns3_remove,
.shutdown = hns3_shutdown,
+#ifdef CONFIG_PM
+ .suspend = hns3_suspend,
+ .resume = hns3_resume,
+#endif
.sriov_configure = hns3_pci_sriov_configure,
.err_handler = &hns3_err_handler,
};
diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c
index c2ae05f..478a3b5 100644
--- a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c
+++ b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c
@@ -3820,6 +3820,72 @@ static void hclge_reset(struct hclge_dev *hdev)
hclge_reset_task_schedule(hdev);
}
+#ifdef CONFIG_PM
+static int hclge_suspend(struct hnae3_ae_dev *ae_dev)
+{
+ struct hclge_dev *hdev = ae_dev->priv;
+ int ret;
+
+ ret = hclge_notify_roce_client(hdev, HNAE3_DOWN_CLIENT);
+ if (ret)
+ return ret;
+
+ rtnl_lock();
+
+ ret = hclge_notify_client(hdev, HNAE3_DOWN_CLIENT);
+ if (ret)
+ goto err_reset_lock;
+
+ ret = hclge_notify_client(hdev, HNAE3_UNINIT_CLIENT);
+ if (ret)
+ goto err_reset_lock;
+
+ rtnl_unlock();
+
+ return hclge_notify_roce_client(hdev, HNAE3_UNINIT_CLIENT);
+
+err_reset_lock:
+ rtnl_unlock();
+ return ret;
+}
+
+static int hclge_resume(struct hnae3_ae_dev *ae_dev)
+{
+ struct hclge_dev *hdev = ae_dev->priv;
+ int ret;
+
+ rtnl_lock();
+
+ ret = hclge_reset_ae_dev(hdev->ae_dev);
+ if (ret)
+ goto err_reset_lock;
+
+ ret = hclge_notify_client(hdev, HNAE3_INIT_CLIENT);
+ if (ret)
+ goto err_reset_lock;
+
+ rtnl_unlock();
+
+ ret = hclge_notify_roce_client(hdev, HNAE3_INIT_CLIENT);
+ if (ret)
+ goto err_reset_lock;
+
+ rtnl_lock();
+
+ ret = hclge_notify_client(hdev, HNAE3_UP_CLIENT);
+ if (ret)
+ goto err_reset_lock;
+
+ rtnl_unlock();
+
+ return hclge_notify_roce_client(hdev, HNAE3_UP_CLIENT);
+
+err_reset_lock:
+ rtnl_unlock();
+ return ret;
+}
+#endif
+
static void hclge_reset_event(struct pci_dev *pdev, struct hnae3_handle *handle)
{
struct hnae3_ae_dev *ae_dev = pci_get_drvdata(pdev);
@@ -11525,6 +11591,10 @@ struct hnae3_ae_ops hclge_ops = {
.set_vf_rate = hclge_set_vf_rate,
.set_vf_mac = hclge_set_vf_mac,
.get_module_eeprom = hclge_get_module_eeprom,
+#ifdef CONFIG_PM
+ .suspend = hclge_suspend,
+ .resume = hclge_resume,
+#endif
};
static struct hnae3_ae_algo ae_algo = {
--
1.9.1
1
0
From: Hao Shen <shenhao21(a)huawei.com>
The suspend and resume feature is useful when you want to
save the current state of your machine, and continue work
later from the same state. This patch makes the hns3 net
device work form the same state after s3 and s4.
Signed-off-by: Weiwei Deng <dengweiwei(a)huawei.com>
Signed-off-by: Hao Shen <shenhao21(a)huawei.com>
Signed-off-by: Jian Shen <shenjian15(a)huawei.com>
---
drivers/net/ethernet/hisilicon/hns3/hnae3.h | 2 +
drivers/net/ethernet/hisilicon/hns3/hns3_enet.c | 32 ++++++++++
.../ethernet/hisilicon/hns3/hns3pf/hclge_main.c | 70 ++++++++++++++++++++++
3 files changed, 104 insertions(+)
diff --git a/drivers/net/ethernet/hisilicon/hns3/hnae3.h b/drivers/net/ethernet/hisilicon/hns3/hnae3.h
index bb29b46..8b1e690 100644
--- a/drivers/net/ethernet/hisilicon/hns3/hnae3.h
+++ b/drivers/net/ethernet/hisilicon/hns3/hnae3.h
@@ -561,6 +561,8 @@ struct hnae3_ae_ops {
int (*set_vf_mac)(struct hnae3_handle *handle, int vf, u8 *p);
int (*get_module_eeprom)(struct hnae3_handle *handle, u32 offset,
u32 len, u8 *data);
+ int (*suspend)(struct hnae3_ae_dev *ae_dev);
+ int (*resume)(struct hnae3_ae_dev *ae_dev);
};
struct hnae3_dcb_ops {
diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3_enet.c b/drivers/net/ethernet/hisilicon/hns3/hns3_enet.c
index 4eca8cf..de3d37c3 100644
--- a/drivers/net/ethernet/hisilicon/hns3/hns3_enet.c
+++ b/drivers/net/ethernet/hisilicon/hns3/hns3_enet.c
@@ -2226,6 +2226,34 @@ static void hns3_shutdown(struct pci_dev *pdev)
pci_set_power_state(pdev, PCI_D3hot);
}
+#ifdef CONFIG_PM
+static int hns3_suspend(struct pci_dev *pdev, pm_message_t state)
+{
+ struct hnae3_ae_dev *ae_dev = pci_get_drvdata(pdev);
+
+ if (ae_dev->ops->suspend)
+ ae_dev->ops->suspend(ae_dev);
+
+ pci_save_state(pdev);
+ pci_set_power_state(pdev, PCI_D3hot);
+
+ return 0;
+}
+
+static int hns3_resume(struct pci_dev *pdev)
+{
+ struct hnae3_ae_dev *ae_dev = pci_get_drvdata(pdev);
+
+ pci_set_power_state(pdev, PCI_D0);
+ pci_restore_state(pdev);
+
+ if (ae_dev->ops->resume)
+ return ae_dev->ops->resume(ae_dev);
+
+ return 0;
+}
+#endif
+
static pci_ers_result_t hns3_error_detected(struct pci_dev *pdev,
pci_channel_state_t state)
{
@@ -2310,6 +2338,10 @@ struct pci_driver hns3_driver = {
.probe = hns3_probe,
.remove = hns3_remove,
.shutdown = hns3_shutdown,
+#ifdef CONFIG_PM
+ .suspend = hns3_suspend,
+ .resume = hns3_resume,
+#endif
.sriov_configure = hns3_pci_sriov_configure,
.err_handler = &hns3_err_handler,
};
diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c
index c2ae05f..478a3b5 100644
--- a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c
+++ b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c
@@ -3820,6 +3820,72 @@ static void hclge_reset(struct hclge_dev *hdev)
hclge_reset_task_schedule(hdev);
}
+#ifdef CONFIG_PM
+static int hclge_suspend(struct hnae3_ae_dev *ae_dev)
+{
+ struct hclge_dev *hdev = ae_dev->priv;
+ int ret;
+
+ ret = hclge_notify_roce_client(hdev, HNAE3_DOWN_CLIENT);
+ if (ret)
+ return ret;
+
+ rtnl_lock();
+
+ ret = hclge_notify_client(hdev, HNAE3_DOWN_CLIENT);
+ if (ret)
+ goto err_reset_lock;
+
+ ret = hclge_notify_client(hdev, HNAE3_UNINIT_CLIENT);
+ if (ret)
+ goto err_reset_lock;
+
+ rtnl_unlock();
+
+ return hclge_notify_roce_client(hdev, HNAE3_UNINIT_CLIENT);
+
+err_reset_lock:
+ rtnl_unlock();
+ return ret;
+}
+
+static int hclge_resume(struct hnae3_ae_dev *ae_dev)
+{
+ struct hclge_dev *hdev = ae_dev->priv;
+ int ret;
+
+ rtnl_lock();
+
+ ret = hclge_reset_ae_dev(hdev->ae_dev);
+ if (ret)
+ goto err_reset_lock;
+
+ ret = hclge_notify_client(hdev, HNAE3_INIT_CLIENT);
+ if (ret)
+ goto err_reset_lock;
+
+ rtnl_unlock();
+
+ ret = hclge_notify_roce_client(hdev, HNAE3_INIT_CLIENT);
+ if (ret)
+ goto err_reset_lock;
+
+ rtnl_lock();
+
+ ret = hclge_notify_client(hdev, HNAE3_UP_CLIENT);
+ if (ret)
+ goto err_reset_lock;
+
+ rtnl_unlock();
+
+ return hclge_notify_roce_client(hdev, HNAE3_UP_CLIENT);
+
+err_reset_lock:
+ rtnl_unlock();
+ return ret;
+}
+#endif
+
static void hclge_reset_event(struct pci_dev *pdev, struct hnae3_handle *handle)
{
struct hnae3_ae_dev *ae_dev = pci_get_drvdata(pdev);
@@ -11525,6 +11591,10 @@ struct hnae3_ae_ops hclge_ops = {
.set_vf_rate = hclge_set_vf_rate,
.set_vf_mac = hclge_set_vf_mac,
.get_module_eeprom = hclge_get_module_eeprom,
+#ifdef CONFIG_PM
+ .suspend = hclge_suspend,
+ .resume = hclge_resume,
+#endif
};
static struct hnae3_ae_algo ae_algo = {
--
1.9.1
1
0

[PATCH hulk-4.19-next] net: hns3: add suspend/resume function for hns3 driver
by Jian Shen 21 Apr '20
by Jian Shen 21 Apr '20
21 Apr '20
From: Hao Shen <shenhao21(a)huawei.com>
The suspend and resume feature is useful when you want to
save the current state of your machine, and continue work
later from the same state. This patch makes the hns3 net
device work form the same state after s3 and s4.
Signed-off-by: Weiwei Deng <dengweiwei(a)huawei.com>
Signed-off-by: Hao Shen <shenhao21(a)huawei.com>
Signed-off-by: Jian Shen <shenjian15(a)huawei.com>
---
drivers/net/ethernet/hisilicon/hns3/hnae3.h | 2 +
drivers/net/ethernet/hisilicon/hns3/hns3_enet.c | 32 ++++++++++
.../ethernet/hisilicon/hns3/hns3pf/hclge_main.c | 70 ++++++++++++++++++++++
3 files changed, 104 insertions(+)
diff --git a/drivers/net/ethernet/hisilicon/hns3/hnae3.h b/drivers/net/ethernet/hisilicon/hns3/hnae3.h
index bb29b46..8b1e690 100644
--- a/drivers/net/ethernet/hisilicon/hns3/hnae3.h
+++ b/drivers/net/ethernet/hisilicon/hns3/hnae3.h
@@ -561,6 +561,8 @@ struct hnae3_ae_ops {
int (*set_vf_mac)(struct hnae3_handle *handle, int vf, u8 *p);
int (*get_module_eeprom)(struct hnae3_handle *handle, u32 offset,
u32 len, u8 *data);
+ int (*suspend)(struct hnae3_ae_dev *ae_dev);
+ int (*resume)(struct hnae3_ae_dev *ae_dev);
};
struct hnae3_dcb_ops {
diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3_enet.c b/drivers/net/ethernet/hisilicon/hns3/hns3_enet.c
index 4eca8cf..de3d37c3 100644
--- a/drivers/net/ethernet/hisilicon/hns3/hns3_enet.c
+++ b/drivers/net/ethernet/hisilicon/hns3/hns3_enet.c
@@ -2226,6 +2226,34 @@ static void hns3_shutdown(struct pci_dev *pdev)
pci_set_power_state(pdev, PCI_D3hot);
}
+#ifdef CONFIG_PM
+static int hns3_suspend(struct pci_dev *pdev, pm_message_t state)
+{
+ struct hnae3_ae_dev *ae_dev = pci_get_drvdata(pdev);
+
+ if (ae_dev->ops->suspend)
+ ae_dev->ops->suspend(ae_dev);
+
+ pci_save_state(pdev);
+ pci_set_power_state(pdev, PCI_D3hot);
+
+ return 0;
+}
+
+static int hns3_resume(struct pci_dev *pdev)
+{
+ struct hnae3_ae_dev *ae_dev = pci_get_drvdata(pdev);
+
+ pci_set_power_state(pdev, PCI_D0);
+ pci_restore_state(pdev);
+
+ if (ae_dev->ops->resume)
+ return ae_dev->ops->resume(ae_dev);
+
+ return 0;
+}
+#endif
+
static pci_ers_result_t hns3_error_detected(struct pci_dev *pdev,
pci_channel_state_t state)
{
@@ -2310,6 +2338,10 @@ struct pci_driver hns3_driver = {
.probe = hns3_probe,
.remove = hns3_remove,
.shutdown = hns3_shutdown,
+#ifdef CONFIG_PM
+ .suspend = hns3_suspend,
+ .resume = hns3_resume,
+#endif
.sriov_configure = hns3_pci_sriov_configure,
.err_handler = &hns3_err_handler,
};
diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c
index c2ae05f..478a3b5 100644
--- a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c
+++ b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c
@@ -3820,6 +3820,72 @@ static void hclge_reset(struct hclge_dev *hdev)
hclge_reset_task_schedule(hdev);
}
+#ifdef CONFIG_PM
+static int hclge_suspend(struct hnae3_ae_dev *ae_dev)
+{
+ struct hclge_dev *hdev = ae_dev->priv;
+ int ret;
+
+ ret = hclge_notify_roce_client(hdev, HNAE3_DOWN_CLIENT);
+ if (ret)
+ return ret;
+
+ rtnl_lock();
+
+ ret = hclge_notify_client(hdev, HNAE3_DOWN_CLIENT);
+ if (ret)
+ goto err_reset_lock;
+
+ ret = hclge_notify_client(hdev, HNAE3_UNINIT_CLIENT);
+ if (ret)
+ goto err_reset_lock;
+
+ rtnl_unlock();
+
+ return hclge_notify_roce_client(hdev, HNAE3_UNINIT_CLIENT);
+
+err_reset_lock:
+ rtnl_unlock();
+ return ret;
+}
+
+static int hclge_resume(struct hnae3_ae_dev *ae_dev)
+{
+ struct hclge_dev *hdev = ae_dev->priv;
+ int ret;
+
+ rtnl_lock();
+
+ ret = hclge_reset_ae_dev(hdev->ae_dev);
+ if (ret)
+ goto err_reset_lock;
+
+ ret = hclge_notify_client(hdev, HNAE3_INIT_CLIENT);
+ if (ret)
+ goto err_reset_lock;
+
+ rtnl_unlock();
+
+ ret = hclge_notify_roce_client(hdev, HNAE3_INIT_CLIENT);
+ if (ret)
+ goto err_reset_lock;
+
+ rtnl_lock();
+
+ ret = hclge_notify_client(hdev, HNAE3_UP_CLIENT);
+ if (ret)
+ goto err_reset_lock;
+
+ rtnl_unlock();
+
+ return hclge_notify_roce_client(hdev, HNAE3_UP_CLIENT);
+
+err_reset_lock:
+ rtnl_unlock();
+ return ret;
+}
+#endif
+
static void hclge_reset_event(struct pci_dev *pdev, struct hnae3_handle *handle)
{
struct hnae3_ae_dev *ae_dev = pci_get_drvdata(pdev);
@@ -11525,6 +11591,10 @@ struct hnae3_ae_ops hclge_ops = {
.set_vf_rate = hclge_set_vf_rate,
.set_vf_mac = hclge_set_vf_mac,
.get_module_eeprom = hclge_get_module_eeprom,
+#ifdef CONFIG_PM
+ .suspend = hclge_suspend,
+ .resume = hclge_resume,
+#endif
};
static struct hnae3_ae_algo ae_algo = {
--
1.9.1
1
0
Alain Volmat (1):
i2c: st: fix missing struct parameter description
Alex Vesker (1):
IB/mlx5: Replace tunnel mpls capability bits for tunnel_offloads
Alexander Duyck (1):
mm: Use fixed constant in page_frag_alloc instead of size + 1
Alexander Sverdlin (1):
genirq/irqdomain: Check pointer in irq_domain_alloc_irqs_hierarchy()
Alexey Dobriyan (1):
null_blk: fix spurious IO errors after failed past-wp access
Andrei Botila (1):
crypto: caam - update xts sector size for large input length
Andy Lutomirski (1):
selftests/x86/ptrace_syscall_32: Fix no-vDSO segfault
Andy Shevchenko (1):
mfd: dln2: Fix sanity checking for endpoints
Aneesh Kumar K.V (1):
powerpc/hash64/devmap: Use H_PAGE_THP_HUGE when setting up huge devmap
PTE entries
Anssi Hannula (1):
tools: gpio: Fix out-of-tree build regression
Ard Biesheuvel (1):
efi/x86: Ignore the memory attributes table on i386
Arvind Sankar (1):
x86/boot: Use unsigned comparison for addresses
Bart Van Assche (2):
null_blk: Fix the null_add_dev() error path
null_blk: Handle null_add_dev() failures properly
Benoit Parrot (1):
media: ti-vpe: cal: fix disable_irqs to only the intended target
Bob Liu (1):
dm zoned: remove duplicate nr_rnd_zones increase in dmz_init_zone()
Bob Peterson (1):
gfs2: Don't demote a glock until its revokes are written
Boqun Feng (1):
locking/lockdep: Avoid recursion in
lockdep_count_{for,back}ward_deps()
Changwei Ge (1):
ocfs2: no need try to truncate file beyond i_size
Chris Wilson (1):
drm: Remove PageReserved manipulation from drm_pci_alloc
Christian Gmeiner (2):
drm/etnaviv: rework perfmon query infrastructure
etnaviv: perfmon: fix total and idle HI cyleces readout
Christoph Niedermaier (1):
cpufreq: imx6q: Fixes unwanted cpu overclocking on i.MX6ULL
Christophe Leroy (1):
powerpc/kprobes: Ignore traps that happened in real mode
Clement Courbet (1):
powerpc: Make setjmp/longjmp signature standard
Cédric Le Goater (1):
powerpc/xive: Use XIVE_BAD_IRQ instead of zero to catch non configured
IPIs
David Hildenbrand (2):
KVM: s390: vsie: Fix region 1 ASCE sanity shadow address checks
KVM: s390: vsie: Fix delivery of addressing exceptions
Dongchun Zhu (1):
media: i2c: ov5695: Fix power on and off sequences
Eric Biggers (2):
fs/filesystems.c: downgrade user-reachable WARN_ONCE() to
pr_warn_once()
kmod: make request_module() return an error when autoloading is
disabled
Eric W. Biederman (1):
signal: Extend exec_id to 64bits
Filipe Manana (2):
Btrfs: fix crash during unmount due to race with delayed inode workers
btrfs: fix missing file extent item for hole after ranged fsync
Fredrik Strupe (1):
arm64: armv8_deprecated: Fix undef_hook mask for thumb setend
Frieder Schrempf (2):
mtd: spinand: Stop using spinand->oobbuf for buffering bad block
markers
mtd: spinand: Do not erase the block before writing a bad block marker
Gao Xiang (1):
erofs: correct the remaining shrink objects
Gary Lin (1):
efi/x86: Fix the deletion of variables in mixed mode
Gilad Ben-Yossef (4):
crypto: ccree - zero out internal struct before use
crypto: ccree - don't mangle the request assoclen
crypto: ccree - dec auth tag size from cryptlen map
crypto: ccree - only try to map auth tag if needed
Greg Kroah-Hartman (1):
Linux 4.19.116
Guoqing Jiang (1):
md: check arrays is suspended in mddev_detach before call quiesce
operations
Gustavo A. R. Silva (1):
MIPS: OCTEON: irq: Fix potential NULL pointer dereference
Hadar Gat (1):
crypto: ccree - improve error handling
Hans de Goede (1):
Input: i8042 - add Acer Aspire 5738z to nomux list
Huacai Chen (1):
MIPS/tlbex: Fix LDDIR usage in setup_pw() for Loongson-3
James Morse (1):
firmware: arm_sdei: fix double-lock on hibernate with shared events
James Smart (2):
nvme-fc: Revert "add module to ops template to allow module
references"
nvme: Treat discovery subsystems as unique subsystems
Jan Engelhardt (1):
acpi/x86: ignore unspecified bit positions in the ACPI global lock
field
John Garry (1):
libata: Remove extra scsi_host_put() in ata_scsi_add_hosts()
Josef Bacik (5):
btrfs: remove a BUG_ON() from merge_reloc_roots()
btrfs: track reloc roots based on their commit root bytenr
btrfs: set update the uuid generation as soon as possible
btrfs: drop block from cache on error in relocation
btrfs: use nofs allocations for running delayed items
Juergen Gross (1):
xen/blkfront: fix memory allocation flags in blkfront_setup_indirect()
Junyong Sun (1):
firmware: fix a double abort case with fw_load_sysfs_fallback
Kai-Heng Feng (1):
libata: Return correct status in sata_pmp_eh_recover_pm() when
ATA_DFLAG_DETACH is set
Kees Cook (1):
slub: improve bit diffusion for freelist ptr obfuscation
Kishon Vijay Abraham I (1):
PCI: endpoint: Fix for concurrent memory allocation in OB address
region
Konstantin Khlebnikov (1):
block: keep bdi->io_pages in sync with max_sectors_kb for stacked
devices
Laurentiu Tudor (1):
powerpc/fsl_booke: Avoid creating duplicate tlb1 entry
Libor Pechacek (1):
powerpc/pseries: Avoid NULL pointer dereference when drmem is
unavailable
Logan Gunthorpe (1):
PCI/switchtec: Fix init_completion race condition with poll_wait()
Lukas Wunner (1):
PCI: pciehp: Fix indefinite wait on sysfs requests
Lyude Paul (1):
drm/dp_mst: Fix clearing payload state on topology disable
Marc Zyngier (1):
irqchip/gic-v4: Provide irq_retrigger to avoid circular locking
dependency
Martin Blumenstingl (1):
thermal: devfreq_cooling: inline all stubs for
CONFIG_DEVFREQ_THERMAL=n
Masami Hiramatsu (1):
ftrace/kprobe: Show the maxactive number on kprobe_events
Mathias Nyman (1):
xhci: bail out early if driver can't accress host in resume
Matt Ranostay (1):
media: i2c: video-i2c: fix build errors due to 'imply hwmon'
Matthew Garrett (1):
tpm: Don't make log failures fatal
Maxime Ripard (1):
arm64: dts: allwinner: h6: Fix PMU compatible
Michael Ellerman (1):
powerpc/64/tm: Don't let userspace set regs->trap via sigreturn
Michael Mueller (1):
s390/diag: fix display of diagnose call statistics
Michael Wang (1):
sched: Avoid scale real weight down to zero
Michal Hocko (1):
selftests: vm: drop dependencies on page flags from mlock2 tests
Mikulas Patocka (1):
dm writecache: add cond_resched to avoid CPU hangs
Nathan Chancellor (2):
rtc: omap: Use define directive for PIN_CONFIG_ACTIVE_HIGH
misc: echo: Remove unnecessary parentheses and simplify check for zero
Neil Armstrong (1):
usb: dwc3: core: add support for disabling SS instances in park mode
Oliver O'Halloran (1):
cpufreq: powernv: Fix use-after-free
Ondrej Jirman (2):
ARM: dts: sun8i-a83t-tbs-a711: HM5065 doesn't like such a high voltage
bus: sunxi-rsb: Return correct data when mixing 16-bit and 8-bit reads
Paul Cercueil (1):
clk: ingenic/jz4770: Exit with error if CGU init failed
Qian Cai (1):
ext4: fix a data race at inode->i_blocks
Qu Wenruo (1):
btrfs: qgroup: ensure qgroup_rescan_running is only set when the
worker is at least queued
Raju Rangoju (1):
cxgb4/ptp: pass the sign of offset delta in FW CMD
Remi Pommarel (1):
ath9k: Handle txpower changes even when TPC is disabled
Robbie Ko (1):
btrfs: fix missing semaphore unlock in btrfs_sync_file
Rosioru Dragos (1):
crypto: mxs-dcp - fix scatterlist linearization for hash
Sahitya Tummala (1):
block: Fix use-after-free issue accessing struct io_cq
Sam Lunt (1):
perf tools: Support Python 3.8+ in Makefile
Sasha Levin (1):
Revert "drm/dp_mst: Remove VCPI while disabling topology mgr"
Sean Christopherson (4):
KVM: nVMX: Properly handle userspace interrupt window request
KVM: x86: Allocate new rmap and large page tracking when moving
memslot
KVM: VMX: Always VMCLEAR in-use VMCSes during crash with kexec support
KVM: x86: Gracefully handle __vmalloc() failure during VM allocation
Sean V Kelley (1):
PCI: Add boot interrupt quirk mechanism for Xeon chipsets
Segher Boessenkool (1):
powerpc: Add attributes for setjmp/longjmp
Shetty, Harshini X (EXT-Sony Mobile) (1):
dm verity fec: fix memory leak in verity_fec_dtr
Simon Gander (1):
hfsplus: fix crash and filesystem corruption when deleting files
Sreekanth Reddy (1):
scsi: mpt3sas: Fix kernel panic observed on soft HBA unplug
Sriharsha Allenki (1):
usb: gadget: f_fs: Fix use after free issue as part of queue failure
Steffen Maier (1):
scsi: zfcp: fix missing erp_lock in port recovery trigger for
point-to-point
Stephan Gerhold (1):
media: venus: hfi_parser: Ignore HEVC encoding for V1
Subash Abhinov Kasiviswanathan (1):
net: qualcomm: rmnet: Allow configuration updates to existing devices
Sungbo Eo (2):
irqchip/versatile-fpga: Handle chained IRQs properly
irqchip/versatile-fpga: Apply clear-mask earlier
Takashi Iwai (6):
ALSA: usb-audio: Add mixer workaround for TRX40 and co
ALSA: hda: Add driver blacklist
ALSA: hda: Fix potential access overflow in beep helper
ALSA: ice1724: Fix invalid access for enumerated ctl items
ALSA: pcm: oss: Fix regression by buffer overflow fix
ALSA: hda/realtek - Add quirk for MSI GL63
Thinh Nguyen (1):
usb: gadget: composite: Inform controller driver of self-powered
Thomas Gleixner (1):
x86/entry/32: Add missing ASM_CLAC to general_protection entry
Thomas Hebb (3):
ALSA: doc: Document PC Beep Hidden Register on Realtek ALC256
ALSA: hda/realtek - Set principled PC Beep configuration for ALC256
ALSA: hda/realtek - Remove now-unnecessary XPS 13 headphone noise
fixups
Thomas Hellstrom (1):
x86: Don't let pgprot_modify() change the page encryption bit
Trond Myklebust (1):
NFS: Fix a page leak in nfs_destroy_unlinked_subrequests()
Vasily Averin (3):
tpm: tpm1_bios_measurements_next should increase position index
tpm: tpm2_bios_measurements_next should increase position index
pstore: pstore_ftrace_seq_next should increase position index
Vitaly Kuznetsov (1):
KVM: VMX: fix crash cleanup when KVM wasn't used
Wen Yang (1):
ipmi: fix hung processes in __get_guid()
Xu Wang (1):
qlcnic: Fix bad kzalloc null test
Yang Xu (1):
KEYS: reaching the keys quotas correctly
Yicong Yang (1):
PCI/ASPM: Clear the correct bits when enabling L1 substates
YueHaibing (1):
powerpc/pseries: Drop pointless static qualifier in vpa_debugfs_init()
Yury Norov (1):
uapi: rename ext2_swab() to swab() and share globally in swab.h
Zheng Wei (1):
net: vxge: fix wrong __VA_ARGS__ usage
Zhenzhong Duan (1):
x86/speculation: Remove redundant arch_smt_update() invocation
Zhiqiang Liu (1):
block, bfq: fix use-after-free in bfq_idle_slice_timer_body
chenqiwu (1):
pstore/platform: fix potential mem leak if pstore_init_fs failed
이경택 (4):
ASoC: fix regwmask
ASoC: dapm: connect virtual mux with default value
ASoC: dpcm: allow start or stop during pause for backend
ASoC: topology: use name_prefix for new kcontrol
Documentation/sound/hd-audio/index.rst | 1 +
Documentation/sound/hd-audio/models.rst | 2 -
Documentation/sound/hd-audio/realtek-pc-beep.rst | 129 ++++++++++++
Makefile | 2 +-
arch/arm/boot/dts/sun8i-a83t-tbs-a711.dts | 4 +-
arch/arm64/boot/dts/allwinner/sun50i-h6.dtsi | 3 +-
arch/arm64/kernel/armv8_deprecated.c | 2 +-
arch/mips/cavium-octeon/octeon-irq.c | 3 +
arch/mips/mm/tlbex.c | 5 +-
arch/powerpc/include/asm/book3s/64/hash-4k.h | 6 +
arch/powerpc/include/asm/book3s/64/hash-64k.h | 8 +-
arch/powerpc/include/asm/book3s/64/pgtable.h | 4 +-
arch/powerpc/include/asm/book3s/64/radix.h | 5 +
arch/powerpc/include/asm/drmem.h | 4 +-
arch/powerpc/include/asm/setjmp.h | 6 +-
arch/powerpc/kernel/Makefile | 3 -
arch/powerpc/kernel/kprobes.c | 3 +
arch/powerpc/kernel/signal_64.c | 4 +-
arch/powerpc/mm/tlb_nohash_low.S | 12 +-
arch/powerpc/platforms/pseries/hotplug-memory.c | 8 +-
arch/powerpc/platforms/pseries/lpar.c | 2 +-
arch/powerpc/sysdev/xive/common.c | 12 +-
arch/powerpc/sysdev/xive/native.c | 4 +-
arch/powerpc/sysdev/xive/spapr.c | 4 +-
arch/powerpc/sysdev/xive/xive-internal.h | 7 +
arch/powerpc/xmon/Makefile | 3 -
arch/s390/kernel/diag.c | 2 +-
arch/s390/kvm/vsie.c | 1 +
arch/s390/mm/gmap.c | 6 +-
arch/x86/boot/compressed/head_32.S | 2 +-
arch/x86/boot/compressed/head_64.S | 4 +-
arch/x86/entry/entry_32.S | 1 +
arch/x86/include/asm/kvm_host.h | 2 +-
arch/x86/include/asm/pgtable.h | 7 +-
arch/x86/include/asm/pgtable_types.h | 2 +-
arch/x86/kernel/acpi/boot.c | 2 +-
arch/x86/kvm/svm.c | 4 +
arch/x86/kvm/vmx.c | 110 ++++------
arch/x86/kvm/x86.c | 21 +-
arch/x86/platform/efi/efi_64.c | 4 +-
block/bfq-iosched.c | 16 +-
block/blk-ioc.c | 7 +
block/blk-settings.c | 3 +
drivers/ata/libata-pmp.c | 1 +
drivers/ata/libata-scsi.c | 9 +-
drivers/base/firmware_loader/fallback.c | 2 +-
drivers/block/null_blk_main.c | 10 +-
drivers/block/xen-blkfront.c | 17 +-
drivers/bus/sunxi-rsb.c | 2 +-
drivers/char/ipmi/ipmi_msghandler.c | 4 +-
drivers/char/tpm/eventlog/common.c | 12 +-
drivers/char/tpm/eventlog/tpm1.c | 2 +-
drivers/char/tpm/eventlog/tpm2.c | 2 +-
drivers/char/tpm/tpm-chip.c | 4 +-
drivers/char/tpm/tpm.h | 2 +-
drivers/clk/ingenic/jz4770-cgu.c | 4 +-
drivers/cpufreq/imx6q-cpufreq.c | 3 +
drivers/cpufreq/powernv-cpufreq.c | 6 +
drivers/crypto/caam/caamalg_desc.c | 16 +-
drivers/crypto/ccree/cc_aead.c | 56 +++--
drivers/crypto/ccree/cc_aead.h | 1 +
drivers/crypto/ccree/cc_buffer_mgr.c | 108 +++++-----
drivers/crypto/mxs-dcp.c | 58 +++--
drivers/firmware/arm_sdei.c | 32 ++-
drivers/firmware/efi/efi.c | 2 +-
drivers/gpu/drm/drm_dp_mst_topology.c | 19 +-
drivers/gpu/drm/drm_pci.c | 25 +--
drivers/gpu/drm/etnaviv/etnaviv_perfmon.c | 103 +++++++--
drivers/i2c/busses/i2c-st.c | 1 +
drivers/infiniband/hw/mlx5/main.c | 6 +-
drivers/input/serio/i8042-x86ia64io.h | 11 +
drivers/irqchip/irq-gic-v3-its.c | 6 +
drivers/irqchip/irq-versatile-fpga.c | 18 +-
drivers/md/dm-verity-fec.c | 1 +
drivers/md/dm-writecache.c | 6 +-
drivers/md/dm-zoned-metadata.c | 1 -
drivers/md/md.c | 2 +-
drivers/media/i2c/ov5695.c | 49 +++--
drivers/media/i2c/video-i2c.c | 2 +-
drivers/media/platform/qcom/venus/hfi_parser.c | 1 +
drivers/media/platform/ti-vpe/cal.c | 16 +-
drivers/mfd/dln2.c | 9 +-
drivers/misc/echo/echo.c | 2 +-
drivers/mtd/nand/spi/core.c | 17 +-
drivers/net/ethernet/chelsio/cxgb4/cxgb4_ptp.c | 3 +
drivers/net/ethernet/neterion/vxge/vxge-config.h | 2 +-
drivers/net/ethernet/neterion/vxge/vxge-main.h | 14 +-
.../net/ethernet/qlogic/qlcnic/qlcnic_83xx_init.c | 2 +-
drivers/net/ethernet/qualcomm/rmnet/rmnet_config.c | 31 +--
drivers/net/wireless/ath/ath9k/main.c | 3 +
drivers/nvme/host/core.c | 11 +
drivers/nvme/host/fc.c | 14 +-
drivers/nvme/target/fcloop.c | 1 -
drivers/pci/endpoint/pci-epc-mem.c | 10 +-
drivers/pci/hotplug/pciehp_hpc.c | 14 +-
drivers/pci/pcie/aspm.c | 4 +-
drivers/pci/quirks.c | 80 ++++++-
drivers/pci/switch/switchtec.c | 2 +-
drivers/rtc/rtc-omap.c | 4 +-
drivers/s390/scsi/zfcp_erp.c | 2 +-
drivers/scsi/lpfc/lpfc_nvme.c | 2 -
drivers/scsi/mpt3sas/mpt3sas_scsih.c | 8 +-
drivers/scsi/qla2xxx/qla_nvme.c | 1 -
drivers/staging/erofs/utils.c | 2 +-
drivers/usb/dwc3/core.c | 5 +
drivers/usb/dwc3/core.h | 4 +
drivers/usb/gadget/composite.c | 9 +
drivers/usb/gadget/function/f_fs.c | 1 +
drivers/usb/host/xhci.c | 4 +-
fs/btrfs/async-thread.c | 8 +
fs/btrfs/async-thread.h | 1 +
fs/btrfs/delayed-inode.c | 13 ++
fs/btrfs/disk-io.c | 27 ++-
fs/btrfs/file.c | 11 +
fs/btrfs/qgroup.c | 11 +-
fs/btrfs/relocation.c | 35 ++--
fs/exec.c | 2 +-
fs/ext4/inode.c | 2 +-
fs/filesystems.c | 4 +-
fs/gfs2/glock.c | 3 +
fs/hfsplus/attributes.c | 4 +
fs/nfs/write.c | 1 +
fs/ocfs2/alloc.c | 4 +
fs/pstore/inode.c | 5 +-
fs/pstore/platform.c | 4 +-
include/linux/devfreq_cooling.h | 2 +-
include/linux/iocontext.h | 1 +
include/linux/mlx5/mlx5_ifc.h | 9 +-
include/linux/nvme-fc-driver.h | 4 -
include/linux/pci-epc.h | 3 +
include/linux/sched.h | 4 +-
include/linux/swab.h | 1 +
include/uapi/linux/swab.h | 10 +
kernel/cpu.c | 5 +-
kernel/irq/irqdomain.c | 10 +-
kernel/kmod.c | 4 +-
kernel/locking/lockdep.c | 4 +
kernel/sched/sched.h | 8 +-
kernel/signal.c | 2 +-
kernel/trace/trace_kprobe.c | 2 +
lib/find_bit.c | 16 +-
mm/page_alloc.c | 8 +-
mm/slub.c | 2 +-
security/keys/key.c | 2 +-
security/keys/keyctl.c | 4 +-
sound/core/oss/pcm_plugin.c | 32 ++-
sound/pci/hda/hda_beep.c | 6 +-
sound/pci/hda/hda_intel.c | 16 ++
sound/pci/hda/patch_realtek.c | 50 +----
sound/pci/ice1712/prodigy_hifi.c | 4 +-
sound/soc/soc-dapm.c | 8 +-
sound/soc/soc-ops.c | 4 +-
sound/soc/soc-pcm.c | 6 +-
sound/soc/soc-topology.c | 2 +-
sound/usb/mixer_maps.c | 28 +++
tools/gpio/Makefile | 2 +-
tools/perf/Makefile.config | 11 +-
tools/testing/selftests/vm/mlock2-tests.c | 233 ++++-----------------
tools/testing/selftests/x86/ptrace_syscall.c | 8 +-
159 files changed, 1190 insertions(+), 777 deletions(-)
create mode 100644 Documentation/sound/hd-audio/realtek-pc-beep.rst
--
1.8.3
1
144
From: huangjun <huangjun63(a)huawei.com>
If we want acpi ged to support wake from freeze, we need to implement
the suspend/resume function. In these two methods, ACPI's _GPO, GPP
method is called to realize the setting of sleep flag and anti-shake.
Signed-off-by: Huangjun <huangjun63(a)huawei.com>
---
drivers/acpi/evged.c | 82 ++++++++++++++++++++++++++++++++++++++++++++
1 file changed, 82 insertions(+)
diff --git a/drivers/acpi/evged.c b/drivers/acpi/evged.c
index f13ba2c07667..84656fc09d15 100644
--- a/drivers/acpi/evged.c
+++ b/drivers/acpi/evged.c
@@ -46,11 +46,14 @@
#include <linux/list.h>
#include <linux/platform_device.h>
#include <linux/acpi.h>
+#include <linux/timer.h>
+#include <linux/jiffies.h>
#define MODULE_NAME "acpi-ged"
struct acpi_ged_device {
struct device *dev;
+ struct timer_list timer;
struct list_head event_list;
};
@@ -148,6 +151,8 @@ static int ged_probe(struct platform_device *pdev)
dev_err(&pdev->dev, "unable to parse the _CRS record\n");
return -EINVAL;
}
+
+ timer_setup(&geddev->timer, NULL, 0);
platform_set_drvdata(pdev, geddev);
return 0;
@@ -164,6 +169,7 @@ static void ged_shutdown(struct platform_device *pdev)
dev_dbg(geddev->dev, "GED releasing GSI %u @ IRQ %u\n",
event->gsi, event->irq);
}
+ del_timer(&geddev->timer);
}
static int ged_remove(struct platform_device *pdev)
@@ -177,6 +183,78 @@ static const struct acpi_device_id ged_acpi_ids[] = {
{},
};
+#ifdef CONFIG_PM_SLEEP
+static acpi_status ged_acpi_execute(struct device *dev, char* method, u64 arg)
+{
+ acpi_status acpi_ret;
+ acpi_handle method_handle;
+
+ acpi_ret = acpi_get_handle(ACPI_HANDLE(dev), method, &method_handle);
+
+ if (ACPI_FAILURE(acpi_ret)) {
+ dev_err(dev, "cannot locate %s method\n", method);
+ return AE_NOT_FOUND;
+ }
+
+ acpi_ret = acpi_execute_simple_method(method_handle, NULL, arg);
+ if (ACPI_FAILURE(acpi_ret)) {
+ dev_err(dev, "%s method execution failed\n", method);
+ return AE_ERROR;
+ }
+
+ return AE_OK;
+}
+
+static void ged_timer_callback(struct timer_list *t)
+{
+ struct acpi_ged_device *geddev = from_timer(geddev, t, timer);
+ struct acpi_ged_event *event, *next;
+
+ list_for_each_entry_safe(event, next, &geddev->event_list, node) {
+ ged_acpi_execute(geddev->dev, "_GPP", event->gsi);
+ }
+}
+
+static int ged_suspend(struct device *dev)
+{
+ struct acpi_ged_device *geddev = dev_get_drvdata(dev);
+ struct acpi_ged_event *event, *next;
+ acpi_status acpi_ret;
+
+ list_for_each_entry_safe(event, next, &geddev->event_list, node) {
+ acpi_ret = ged_acpi_execute(dev, "_GPO", event->gsi);
+
+ if (acpi_ret == AE_ERROR)
+ return -EINVAL;
+
+ enable_irq_wake(event->irq);
+ }
+ return 0;
+}
+
+static int ged_resume(struct device *dev)
+{
+ struct acpi_ged_device *geddev = dev_get_drvdata(dev);
+ struct acpi_ged_event *event, *next;
+
+ list_for_each_entry_safe(event, next, &geddev->event_list, node) {
+ disable_irq_wake(event->irq);
+ }
+
+ /* use timer to complete 4s anti-shake */
+ geddev->timer.expires = jiffies + (4 * HZ);
+ geddev->timer.function = ged_timer_callback;
+ add_timer(&geddev->timer);
+
+ return 0;
+}
+
+static const struct dev_pm_ops ged_pm_ops = {
+ SET_SYSTEM_SLEEP_PM_OPS(ged_suspend, ged_resume)
+};
+#endif
+
+
static struct platform_driver ged_driver = {
.probe = ged_probe,
.remove = ged_remove,
@@ -184,6 +262,10 @@ static struct platform_driver ged_driver = {
.driver = {
.name = MODULE_NAME,
.acpi_match_table = ACPI_PTR(ged_acpi_ids),
+#ifdef CONFIG_PM_SLEEP
+ .pm = &ged_pm_ops,
+#endif
+
},
};
builtin_platform_driver(ged_driver);
--
2.20.1
1
1

20 Apr '20
From: Sunnanyong <sunnanyong(a)huawei.com>
If we want to support S4 (suspend to Disk), it is necessary to guarantee
that the ITS tables are at the same address in the booting kernel and
the resumed kernel. That covers all the ITS tables and as well as the
RDs'.
To support this, allocting the itt memory from memory pool intead.
Signed-off-by: Sunnanyong <sunnanyong(a)huawei.com>
---
drivers/irqchip/irq-gic-v3-its.c | 72 ++++++++++++++++++++++++++++++--
1 file changed, 69 insertions(+), 3 deletions(-)
diff --git a/drivers/irqchip/irq-gic-v3-its.c b/drivers/irqchip/irq-gic-v3-its.c
index 860f3ef2969e..9de585fe74fb 100644
--- a/drivers/irqchip/irq-gic-v3-its.c
+++ b/drivers/irqchip/irq-gic-v3-its.c
@@ -189,6 +189,14 @@ static DEFINE_IDA(its_vpeid_ida);
#define gic_data_rdist_rd_base() (gic_data_rdist()->rd_base)
#define gic_data_rdist_vlpi_base() (gic_data_rdist_rd_base() + SZ_128K)
+static void *its_mem_pool_alloc(int dev_id);
+#define ITS_MEM_POOL_SIZE (SZ_4M)
+#define ITS_MEM_POOL_MAX (16)
+#define GITS_OTHER_OFFSET 0x20000
+#define GITS_OTHER_REG_SIZE 0x100
+#define GITS_FUNC_REG_OFFSET 0x80
+static void *its_mem_pool[ITS_MEM_POOL_MAX] = {0};
+
static u16 get_its_list(struct its_vm *vm)
{
struct its_node *its;
@@ -2436,7 +2444,11 @@ static struct its_device *its_create_device(struct its_node *its, u32 dev_id,
nr_ites = max(2, nvecs);
sz = nr_ites * its->ite_size;
sz = max(sz, ITS_ITT_ALIGN) + ITS_ITT_ALIGN - 1;
- itt = kzalloc_node(sz, GFP_KERNEL, its->numa_node);
+
+ itt = its_mem_pool_alloc(dev_id);
+ if (!itt)
+ itt = kzalloc_node(sz, GFP_KERNEL, its->numa_node);
+
if (alloc_lpis) {
lpi_map = its_lpi_alloc(nvecs, &lpi_base, &nr_lpis);
if (lpi_map)
@@ -2450,7 +2462,6 @@ static struct its_device *its_create_device(struct its_node *its, u32 dev_id,
if (!dev || !itt || !col_map || (!lpi_map && alloc_lpis)) {
kfree(dev);
- kfree(itt);
kfree(lpi_map);
kfree(col_map);
return NULL;
@@ -2486,7 +2497,6 @@ static void its_free_device(struct its_device *its_dev)
raw_spin_lock_irqsave(&its_dev->its->lock, flags);
list_del(&its_dev->entry);
raw_spin_unlock_irqrestore(&its_dev->its->lock, flags);
- kfree(its_dev->itt);
kfree(its_dev);
}
@@ -3798,11 +3808,41 @@ static int redist_disable_lpis(void)
return 0;
}
+static void its_cpu_clear_cache(void)
+{
+ struct its_node *its;
+ u64 val = 0;
+ void __iomem *func_base;
+
+ raw_spin_lock(&its_lock);
+
+ list_for_each_entry(its, &its_nodes, entry) {
+ func_base = ioremap(its->phys_base + GITS_OTHER_OFFSET,
+ GITS_OTHER_REG_SIZE);
+ if (!func_base) {
+ pr_err("ITS@%p : Unable to map ITS OTHER registers\n",
+ (void *)(its->phys_base + GITS_OTHER_OFFSET));
+ raw_spin_unlock(&its_lock);
+ return;
+ }
+
+ val = readl_relaxed(func_base + GITS_FUNC_REG_OFFSET);
+ val = val | (0x7 << 16);
+ writel_relaxed(val, func_base + GITS_FUNC_REG_OFFSET);
+ dsb(sy);
+ iounmap(func_base);
+ }
+
+ raw_spin_unlock(&its_lock);
+}
+
+
int its_cpu_init(void)
{
if (!list_empty(&its_nodes)) {
int ret;
+ its_cpu_clear_cache();
ret = redist_disable_lpis();
if (ret)
return ret;
@@ -4001,6 +4041,7 @@ int __init its_init(struct fwnode_handle *handle, struct rdists *rdists,
struct its_node *its;
bool has_v4 = false;
int err;
+ int i;
its_parent = parent_domain;
of_node = to_of_node(handle);
@@ -4014,6 +4055,16 @@ int __init its_init(struct fwnode_handle *handle, struct rdists *rdists,
return -ENXIO;
}
+ for (i = 0; i < ITS_MEM_POOL_MAX; i++) {
+ if (!its_mem_pool[i]) {
+ its_mem_pool[i] = (void *)__get_free_pages(GFP_KERNEL | __GFP_ZERO,
+ get_order(ITS_MEM_POOL_SIZE));
+ if (!its_mem_pool[i])
+ pr_err("err:[its mem[%d]] has no memory\n", i);
+ }
+ }
+
+
gic_rdists = rdists;
err = allocate_lpi_tables();
@@ -4035,3 +4086,18 @@ int __init its_init(struct fwnode_handle *handle, struct rdists *rdists,
return 0;
}
+
+void *its_mem_pool_alloc(int dev_id)
+{
+ int pool_num = dev_id / (ITS_MEM_POOL_SIZE / SZ_512);
+ int idx = dev_id % (ITS_MEM_POOL_SIZE / SZ_512);
+ void *addr = NULL;
+
+ if (pool_num >= ITS_MEM_POOL_MAX || !its_mem_pool[pool_num]) {
+ pr_err("[its mem[%d]] alloc error\n", pool_num);
+ return NULL;
+ }
+
+ addr = its_mem_pool[pool_num] + idx * SZ_512;
+ return addr;
+}
--
2.20.1
1
0
As a new x86 CPU Vendor, Chengdu Haiguang IC Design Co., Ltd (Hygon)
is a Joint Venture between AMD and Haiguang Information Technology Co.,
Ltd., and aims at providing high performance x86 processor for China
server market.
The first generation Hygon's processor(Dhyana) originates from AMD
technology and shares most of the architecture with AMD's family 17h,
but with different CPU Vendor ID("HygonGenuine")/PCIE Device Vendor ID
(0x1D94)/Family series number (Family 18h).
To enable the support of Linux kernel to Hygon's CPU, we added a new
vendor type (X86_VENDOR_HYGON, with value of 9) in arch/x86/include/
asm/processor.h, and shared most of kernel support codes with AMD
family 17h.
As Hygon will negotiate with AMD to make sure that only Hygon will
use family 18h, so try to minimize code modification and share most
codes with AMD under this consideration.
This patch series have been applied and tested successfully on Hygon
Dhyana SoC silicon. Also tested on AMD EPYC (Family 17h) processor,
it works fine and makes no harm to the existing codes.
This patch series are created for the current branch openEuler-1.0-LTS.
References:
[1] Linux kernel patches for Hygon Dhyana, merged in 4.20:
https://git.kernel.org/tip/c9661c1e80b609cd038db7c908e061f0535804ef
[2] MSR and CPUID definition:
https://www.amd.com/system/files/TechDocs/54945_PPR_Family_17h_Models_00h-0…
Pu Wen (22):
x86/cpu: Create Hygon Dhyana architecture support file
x86/cpu: Get cache info and setup cache cpumap for Hygon Dhyana
x86/cpu/mtrr: Support TOP_MEM2 and get MTRR number
x86/smpboot: Do not use BSP INIT delay and MWAIT to idle on Dhyana
x86/events: Add Hygon Dhyana support to PMU infrastructure
x86/alternative: Init ideal_nops for Hygon Dhyana
x86/amd_nb: Check vendor in AMD-only functions
x86/pci, x86/amd_nb: Add Hygon Dhyana support to PCI and northbridge
x86/apic: Add Hygon Dhyana support
x86/bugs: Add Hygon Dhyana to the respective mitigation machinery
x86/mce: Add Hygon Dhyana support to the MCA infrastructure
x86/kvm: Add Hygon Dhyana support to KVM
x86/xen: Add Hygon Dhyana support to Xen
ACPI: Add Hygon Dhyana support
cpufreq: Add Hygon Dhyana support
EDAC, amd64: Add Hygon Dhyana support
tools/cpupower: Add Hygon Dhyana support
hwmon: (k10temp) Add Hygon Dhyana support
x86/CPU/hygon: Fix phys_proc_id calculation logic for multi-die
processors
i2c-piix4: Add Hygon Dhyana SMBus support
x86/amd_nb: Make hygon_nb_misc_ids static
NTB: Add Hygon Device ID
Documentation/i2c/busses/i2c-piix4 | 2 +
MAINTAINERS | 6 +
arch/x86/Kconfig.cpu | 14 +
arch/x86/events/amd/core.c | 4 +
arch/x86/events/amd/uncore.c | 20 +-
arch/x86/events/core.c | 4 +
arch/x86/include/asm/amd_nb.h | 3 +
arch/x86/include/asm/cacheinfo.h | 1 +
arch/x86/include/asm/kvm_emulate.h | 4 +
arch/x86/include/asm/mce.h | 2 +
arch/x86/include/asm/processor.h | 3 +-
arch/x86/include/asm/virtext.h | 5 +-
arch/x86/kernel/alternative.c | 4 +
arch/x86/kernel/amd_nb.c | 49 ++-
arch/x86/kernel/apic/apic.c | 7 +
arch/x86/kernel/apic/probe_32.c | 1 +
arch/x86/kernel/cpu/Makefile | 1 +
arch/x86/kernel/cpu/bugs.c | 4 +-
arch/x86/kernel/cpu/cacheinfo.c | 31 +-
arch/x86/kernel/cpu/common.c | 4 +
arch/x86/kernel/cpu/cpu.h | 1 +
arch/x86/kernel/cpu/hygon.c | 413 ++++++++++++++++++
arch/x86/kernel/cpu/mce/core.c | 20 +-
arch/x86/kernel/cpu/mce/severity.c | 3 +-
arch/x86/kernel/cpu/mtrr/cleanup.c | 3 +-
arch/x86/kernel/cpu/mtrr/mtrr.c | 2 +-
arch/x86/kernel/cpu/perfctr-watchdog.c | 2 +
arch/x86/kernel/smpboot.c | 4 +-
arch/x86/kvm/emulate.c | 11 +-
arch/x86/pci/amd_bus.c | 6 +-
arch/x86/xen/pmu.c | 12 +-
drivers/acpi/acpi_pad.c | 1 +
drivers/acpi/processor_idle.c | 1 +
drivers/cpufreq/acpi-cpufreq.c | 5 +
drivers/cpufreq/amd_freq_sensitivity.c | 9 +-
drivers/edac/amd64_edac.c | 10 +-
drivers/edac/mce_amd.c | 4 +-
drivers/hwmon/k10temp.c | 3 +-
drivers/i2c/busses/Kconfig | 1 +
drivers/i2c/busses/i2c-piix4.c | 15 +-
drivers/ntb/hw/amd/ntb_hw_amd.c | 1 +
include/linux/pci_ids.h | 2 +
tools/power/cpupower/utils/cpufreq-info.c | 6 +-
tools/power/cpupower/utils/helpers/amd.c | 4 +-
tools/power/cpupower/utils/helpers/cpuid.c | 8 +-
tools/power/cpupower/utils/helpers/helpers.h | 2 +-
tools/power/cpupower/utils/helpers/misc.c | 2 +-
.../utils/idle_monitor/mperf_monitor.c | 3 +-
48 files changed, 668 insertions(+), 55 deletions(-)
create mode 100644 arch/x86/kernel/cpu/hygon.c
--
2.23.0
5
28
Alexander Usyskin (1):
mei: me: add cedar fork device ids
Amritha Nambiar (1):
net: Fix Tx hash bound checking
Arun KS (1):
arm64: Fix size of __early_cpu_boot_status
Avihai Horon (1):
RDMA/cm: Update num_paths in cma_resolve_iboe_route error flow
Chris Lew (1):
rpmsg: glink: Remove chunk size word align warning
Daniel Jordan (1):
padata: always acquire cpu_hotplug_lock before pinst->lock
David Ahern (1):
tools/accounting/getdelays.c: fix netlink attribute length
David Howells (1):
rxrpc: Fix sendmsg(MSG_WAITALL) handling
Eugene Syromiatnikov (1):
coresight: do not use the BIT() macro in the UAPI header
Eugeniy Paltsev (1):
initramfs: restore default compression behavior
Florian Fainelli (2):
net: dsa: bcm_sf2: Do not register slave MDIO bus with OF
net: dsa: bcm_sf2: Ensure correct sub-node is parsed
Geoffrey Allott (1):
ALSA: hda/ca0132 - Add Recon3Di quirk to handle integrated sound on
EVGA X99 Classified motherboard
Gerd Hoffmann (1):
drm/bochs: downgrade pci_request_region failure from error to warning
Greg Kroah-Hartman (1):
Linux 4.19.115
Hans Verkuil (1):
drm_dp_mst_topology: fix broken
drm_dp_sideband_parse_remote_dpcd_read()
Hans de Goede (2):
extcon: axp288: Add wakeup support
power: supply: axp288_charger: Add special handling for HP Pavilion x2
10
Ilya Dryomov (1):
ceph: canonicalize server path in place
James Zhu (1):
drm/amdgpu: fix typo for vcn1 idle check
Jarod Wilson (1):
ipv6: don't auto-add link-local address to lag ports
Jason A. Donenfeld (1):
random: always use batched entropy for get_random_u{32, 64}
Jason Gunthorpe (2):
RDMA/ucma: Put a lock around every call to the rdma_cm layer
RDMA/cma: Teach lockdep about the order of rtnl and lock
Jisheng Zhang (1):
net: stmmac: dwmac1000: fix out-of-bounds mac address reg setting
Kaike Wan (2):
IB/hfi1: Call kobject_put() when kobject_init_and_add() fails
IB/hfi1: Fix memory leaks in sysfs registration and unregistration
Kishon Vijay Abraham I (2):
misc: pci_endpoint_test: Fix to support > 10 pci-endpoint-test devices
misc: pci_endpoint_test: Avoid using module parameter to determine
irqtype
Len Brown (2):
tools/power turbostat: Fix gcc build warnings
tools/power turbostat: Fix missing SYS_LPI counter on some Chromebooks
Lucas Stach (1):
drm/etnaviv: replace MMU flush marker with flush sequence
Marcelo Ricardo Leitner (1):
sctp: fix possibly using a bad saddr with a given dst
Mario Kleiner (1):
drm/amd/display: Add link_rate quirk for Apple 15" MBP 2017
Martin Kaiser (1):
hwrng: imx-rngc - fix an error path
Oleksij Rempel (1):
net: phy: micrel: kszphy_resume(): add delay after genphy_resume()
before accessing PHY registers
Paul Cercueil (1):
ASoC: jz4740-i2s: Fix divider written at incorrect offset in register
Petr Machata (1):
mlxsw: spectrum_flower: Do not stop at FLOW_ACTION_VLAN_MANGLE
Prabhath Sajeepa (1):
nvme-rdma: Avoid double freeing of async event data
Qian Cai (1):
ipv4: fix a RCU-list lock in fib_triestat_seq_show
Qiujun Huang (3):
sctp: fix refcount bug in sctp_wfree
Bluetooth: RFCOMM: fix ODEBUG bug in rfcomm_dev_ioctl
fbcon: fix null-ptr-deref in fbcon_switch
Rob Clark (2):
drm/msm: stop abusing dma_map/unmap for cache
drm/msm: Use the correct dma_sync calls in msm_gem
Roger Quadros (1):
usb: dwc3: don't set gadget->is_otg flag
Sean Young (1):
media: rc: IR signal for Panasonic air conditioner too long
Taniya Das (1):
clk: qcom: rcg: Return failure for RCG update
Thinh Nguyen (1):
usb: dwc3: gadget: Wrap around when skip TRBs
William Dauchy (1):
net, ip_tunnel: fix interface lookup with no key
Xiubo Li (1):
ceph: remove the extra slashes in the server path
YueHaibing (1):
misc: rtsx: set correct pcr_ops for rts522A
Makefile | 2 +-
arch/arm64/kernel/head.S | 2 +-
drivers/char/hw_random/imx-rngc.c | 4 +-
drivers/char/random.c | 20 ++------
drivers/clk/qcom/clk-rcg2.c | 2 +-
drivers/extcon/extcon-axp288.c | 32 ++++++++++++
drivers/gpu/drm/amd/amdgpu/vcn_v1_0.c | 2 +-
drivers/gpu/drm/amd/display/dc/core/dc_link_dp.c | 11 +++++
drivers/gpu/drm/bochs/bochs_hw.c | 6 +--
drivers/gpu/drm/drm_dp_mst_topology.c | 1 +
drivers/gpu/drm/etnaviv/etnaviv_buffer.c | 10 ++--
drivers/gpu/drm/etnaviv/etnaviv_gpu.h | 1 +
drivers/gpu/drm/etnaviv/etnaviv_mmu.c | 6 +--
drivers/gpu/drm/etnaviv/etnaviv_mmu.h | 2 +-
drivers/gpu/drm/msm/msm_gem.c | 47 ++++++++++++++++--
drivers/infiniband/core/cma.c | 14 ++++++
drivers/infiniband/core/ucma.c | 49 ++++++++++++++++++-
drivers/infiniband/hw/hfi1/sysfs.c | 26 +++++++---
drivers/media/rc/lirc_dev.c | 2 +-
drivers/misc/cardreader/rts5227.c | 1 +
drivers/misc/mei/hw-me-regs.h | 2 +
drivers/misc/mei/pci-me.c | 2 +
drivers/misc/pci_endpoint_test.c | 14 ++++--
drivers/net/dsa/bcm_sf2.c | 9 +++-
.../net/ethernet/mellanox/mlxsw/spectrum_flower.c | 8 +--
.../net/ethernet/stmicro/stmmac/dwmac1000_core.c | 2 +-
drivers/net/phy/micrel.c | 7 +++
drivers/nvme/host/rdma.c | 8 +--
drivers/power/supply/axp288_charger.c | 57 +++++++++++++++++++++-
drivers/rpmsg/qcom_glink_native.c | 3 --
drivers/usb/dwc3/gadget.c | 3 +-
drivers/video/fbdev/core/fbcon.c | 3 ++
fs/ceph/super.c | 56 +++++++++++++--------
fs/ceph/super.h | 2 +-
include/uapi/linux/coresight-stm.h | 6 ++-
kernel/padata.c | 4 +-
net/bluetooth/rfcomm/tty.c | 4 +-
net/core/dev.c | 2 +
net/ipv4/fib_trie.c | 3 ++
net/ipv4/ip_tunnel.c | 6 +--
net/ipv6/addrconf.c | 4 ++
net/rxrpc/sendmsg.c | 4 +-
net/sctp/ipv6.c | 20 +++++---
net/sctp/protocol.c | 28 +++++++----
net/sctp/socket.c | 31 +++++++++---
sound/pci/hda/patch_ca0132.c | 1 +
sound/soc/jz4740/jz4740-i2s.c | 2 +-
tools/accounting/getdelays.c | 2 +-
tools/power/x86/turbostat/turbostat.c | 27 +++++-----
usr/Kconfig | 22 ++++-----
50 files changed, 434 insertions(+), 148 deletions(-)
--
1.8.3
1
52
From: "Paul E. McKenney" <paulmck(a)kernel.org>
mainline inclusion
from mainline-v5.6-rc1
commit 844a378de3372c923909681706d62336d702531e
category: bugfix
bugzilla: 28851
CVE: NA
-------------------------------------------------------------------------
The ->srcu_last_gp_end field is accessed from any CPU at any time
by synchronize_srcu(), so non-initialization references need to use
READ_ONCE() and WRITE_ONCE(). This commit therefore makes that change.
Reported-by: syzbot+08f3e9d26e5541e1ecf2(a)syzkaller.appspotmail.com
Acked-by: Marco Elver <elver(a)google.com>
Signed-off-by: Paul E. McKenney <paulmck(a)kernel.org>
Conflicts:
kernel/rcu/srcutree.c
Signed-off-by: Zhen Lei <thunder.leizhen(a)huawei.com>
Reviewed-by: Xie XiuQi <xiexiuqi(a)huawei.com>
Signed-off-by: Yang Yingliang <yangyingliang(a)huawei.com>
---
kernel/rcu/srcutree.c | 7 ++++---
1 file changed, 4 insertions(+), 3 deletions(-)
diff --git a/kernel/rcu/srcutree.c b/kernel/rcu/srcutree.c
index 4b0a6e3..7bd0204 100644
--- a/kernel/rcu/srcutree.c
+++ b/kernel/rcu/srcutree.c
@@ -552,7 +552,7 @@ static void srcu_gp_end(struct srcu_struct *sp)
idx = rcu_seq_state(sp->srcu_gp_seq);
WARN_ON_ONCE(idx != SRCU_STATE_SCAN2);
cbdelay = srcu_get_delay(sp);
- sp->srcu_last_gp_end = ktime_get_mono_fast_ns();
+ WRITE_ONCE(sp->srcu_last_gp_end, ktime_get_mono_fast_ns());
rcu_seq_end(&sp->srcu_gp_seq);
gpseq = rcu_seq_current(&sp->srcu_gp_seq);
if (ULONG_CMP_LT(sp->srcu_gp_seq_needed_exp, gpseq))
@@ -780,6 +780,7 @@ static bool srcu_might_be_idle(struct srcu_struct *sp)
unsigned long flags;
struct srcu_data *sdp;
unsigned long t;
+ unsigned long tlast;
/* If the local srcu_data structure has callbacks, not idle. */
local_irq_save(flags);
@@ -798,9 +799,9 @@ static bool srcu_might_be_idle(struct srcu_struct *sp)
/* First, see if enough time has passed since the last GP. */
t = ktime_get_mono_fast_ns();
+ tlast = READ_ONCE(sp->srcu_last_gp_end);
if (exp_holdoff == 0 ||
- time_in_range_open(t, sp->srcu_last_gp_end,
- sp->srcu_last_gp_end + exp_holdoff))
+ time_in_range_open(t, tlast, tlast + exp_holdoff))
return false; /* Too soon after last GP. */
/* Next, check for probable idleness. */
--
1.8.3
1
0

18 Apr '20
From: Yunsheng Lin <linyunsheng(a)huawei.com>
mainline inclusion
from mainline-v5.4-rc1
commit 6b0c54e7f2715997c366e8374209bc74259b0a59
category: bugfix
bugzilla: 21318
CVE: NA
-------------------------------------------------------------------------
The cookie is dereferenced before null checking in the function
iommu_dma_init_domain.
This patch moves the dereferencing after the null checking.
Fixes: fdbe574eb693 ("iommu/dma: Allow MSI-only cookies")
Signed-off-by: Yunsheng Lin <linyunsheng(a)huawei.com>
Signed-off-by: Joerg Roedel <jroedel(a)suse.de>
Conflicts:
drivers/iommu/dma-iommu.c
Signed-off-by: Zhen Lei <thunder.leizhen(a)huawei.com>
Reviewed-by: Hanjun Guo <guohanjun(a)huawei.com>
Signed-off-by: Yang Yingliang <yangyingliang(a)huawei.com>
---
drivers/iommu/dma-iommu.c | 4 +++-
1 file changed, 3 insertions(+), 1 deletion(-)
diff --git a/drivers/iommu/dma-iommu.c b/drivers/iommu/dma-iommu.c
index 64ae17e8b..b68d9fd 100644
--- a/drivers/iommu/dma-iommu.c
+++ b/drivers/iommu/dma-iommu.c
@@ -290,13 +290,15 @@ int iommu_dma_init_domain(struct iommu_domain *domain, dma_addr_t base,
u64 size, struct device *dev)
{
struct iommu_dma_cookie *cookie = domain->iova_cookie;
- struct iova_domain *iovad = &cookie->iovad;
unsigned long order, base_pfn, end_pfn;
+ struct iova_domain *iovad;
int attr;
if (!cookie || cookie->type != IOMMU_DMA_IOVA_COOKIE)
return -EINVAL;
+ iovad = &cookie->iovad;
+
/* Use the smallest supported page size for IOVA granularity */
order = __ffs(domain->pgsize_bitmap);
base_pfn = max_t(unsigned long, 1, base >> order);
--
1.8.3
1
1

18 Apr '20
From: Shaozhengchao <shaozhengchao(a)huawei.com>
driver inclusion
category: bugfix
bugzilla: 4472
-----------------------------------------------------------------------
Fix the problem that out-of-bounds access caused by user input
In order to solve the problem, restrictions are imposed on each input
which is done in kernel driver.
Signed-off-by: Shaozhengchao <shaozhengchao(a)huawei.com>
Reviewed-by: Luoshaokai <luoshaokai(a)huawei.com>
Signed-off-by: Yang Yingliang <yangyingliang(a)huawei.com>
---
drivers/net/ethernet/huawei/hinic/hinic_nictool.c | 18 ++++++++++++++++++
drivers/net/ethernet/huawei/hinic/hinic_nictool.h | 2 ++
drivers/net/ethernet/huawei/hinic/hinic_sml_counter.c | 16 +++++++++++++---
3 files changed, 33 insertions(+), 3 deletions(-)
diff --git a/drivers/net/ethernet/huawei/hinic/hinic_nictool.c b/drivers/net/ethernet/huawei/hinic/hinic_nictool.c
index 46dd9ec..df01088 100644
--- a/drivers/net/ethernet/huawei/hinic/hinic_nictool.c
+++ b/drivers/net/ethernet/huawei/hinic/hinic_nictool.c
@@ -1712,6 +1712,19 @@ static u32 get_up_timeout_val(enum hinic_mod_type mod, u8 cmd)
return UP_COMP_TIME_OUT_VAL;
}
+static int check_useparam_valid(struct msg_module *nt_msg, void *buf_in)
+{
+ struct csr_write_st *csr_write_msg = (struct csr_write_st *)buf_in;
+ u32 rd_len = csr_write_msg->rd_len;
+
+ if (rd_len > TOOL_COUNTER_MAX_LEN) {
+ pr_err("Csr read or write len is invalid!\n");
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
static int send_to_up(void *hwdev, struct msg_module *nt_msg,
void *buf_in, u32 in_size, void *buf_out, u32 *out_size)
{
@@ -1744,6 +1757,9 @@ static int send_to_up(void *hwdev, struct msg_module *nt_msg,
}
} else if (nt_msg->up_cmd.up_db.up_api_type == API_CHAIN) {
+ if (check_useparam_valid(nt_msg, buf_in))
+ return -EINVAL;
+
if (nt_msg->up_cmd.up_db.chipif_cmd == API_CSR_WRITE) {
ret = api_csr_write(hwdev, nt_msg, buf_in,
in_size, buf_out, out_size);
@@ -1994,6 +2010,8 @@ static int get_all_chip_id_cmd(struct msg_module *nt_msg)
{
struct nic_card_id card_id;
+ memset(&card_id, 0, sizeof(card_id));
+
hinic_get_all_chip_id((void *)&card_id);
if (copy_to_user(nt_msg->out_buf, &card_id, sizeof(card_id))) {
diff --git a/drivers/net/ethernet/huawei/hinic/hinic_nictool.h b/drivers/net/ethernet/huawei/hinic/hinic_nictool.h
index cfbe435..e8eccaf 100644
--- a/drivers/net/ethernet/huawei/hinic/hinic_nictool.h
+++ b/drivers/net/ethernet/huawei/hinic/hinic_nictool.h
@@ -285,4 +285,6 @@ struct hinic_pf_info {
extern void hinic_get_io_stats(struct hinic_nic_dev *nic_dev,
struct hinic_show_item *items);
+#define TOOL_COUNTER_MAX_LEN 512
+
#endif
diff --git a/drivers/net/ethernet/huawei/hinic/hinic_sml_counter.c b/drivers/net/ethernet/huawei/hinic/hinic_sml_counter.c
index 9536adf..eb35df6 100644
--- a/drivers/net/ethernet/huawei/hinic/hinic_sml_counter.c
+++ b/drivers/net/ethernet/huawei/hinic/hinic_sml_counter.c
@@ -253,9 +253,19 @@ int hinic_sm_ctr_rd64_pair(void *hwdev, u8 node, u8 instance,
ctr_rd_rsp_u rsp;
int ret;
- if (!hwdev || (0 != (ctr_id & 0x1)) || !value1 || !value2) {
- pr_err("Hwdev(0x%p) or value1(0x%p) or value2(0x%p) is NULL or ctr_id(%d) is odd number\n",
- hwdev, value1, value2, ctr_id);
+ if (!value1) {
+ pr_err("value1 is NULL for read 64 bit pair\n");
+ return -EFAULT;
+ }
+
+ if (!value2) {
+ pr_err("value2 is NULL for read 64 bit pair\n");
+ return -EFAULT;
+ }
+
+ if (!hwdev || (0 != (ctr_id & 0x1))) {
+ pr_err("Hwdev is NULL or ctr_id(%d) is odd number for read 64 bit pair\n",
+ ctr_id);
return -EFAULT;
}
--
1.8.3
1
0

18 Apr '20
From: Li Bin <huawei.libin(a)huawei.com>
hulk inclusion
category: bugfix
bugzilla: 34234
CVE: NA
--------------------------------
If the dxfer_len is greater than 256M then the request is invalid,
it should call sg_remove_request in sg_common_write.
Fixes: f930c7043663 ("scsi: sg: only check for dxfer_len greater than 256M")
Signed-off-by: Li Bin <huawei.libin(a)huawei.com>
Acked-by: Douglas Gilbert <dgilbert(a)interlog.com>
Reviewed-by: Hou Tao <houtao1(a)huawei.com>
Signed-off-by: Yang Yingliang <yangyingliang(a)huawei.com>
---
drivers/scsi/sg.c | 4 +++-
1 file changed, 3 insertions(+), 1 deletion(-)
diff --git a/drivers/scsi/sg.c b/drivers/scsi/sg.c
index c75324a..9c4b71e 100644
--- a/drivers/scsi/sg.c
+++ b/drivers/scsi/sg.c
@@ -808,8 +808,10 @@ static int sg_allow_access(struct file *filp, unsigned char *cmd)
"sg_common_write: scsi opcode=0x%02x, cmd_size=%d\n",
(int) cmnd[0], (int) hp->cmd_len));
- if (hp->dxfer_len >= SZ_256M)
+ if (hp->dxfer_len >= SZ_256M) {
+ sg_remove_request(sfp, srp);
return -EINVAL;
+ }
k = sg_start_req(srp, cmnd);
if (k) {
--
1.8.3
1
1

[PATCH 1/2] btrfs: extent_io: Handle errors better in extent_write_full_page()
by Yang Yingliang 18 Apr '20
by Yang Yingliang 18 Apr '20
18 Apr '20
From: Qu Wenruo <wqu(a)suse.com>
mainline inclusion
from mainline-v5.2-rc2
commit 3065976b045f77a910809fa7699f99a1e7c0dbbb
category: bugfix
bugzilla: 13690
CVE: CVE-2019-19377
Introduce end_write_bio() for CVE-2019-19377.
-------------------------------------------------
Since now flush_write_bio() could return error, kill the BUG_ON() first.
Then don't call flush_write_bio() unconditionally, instead we check the
return value from __extent_writepage() first.
If __extent_writepage() fails, we do cleanup, and return error without
submitting the possible corrupted or half-baked bio.
If __extent_writepage() successes, then we call flush_write_bio() and
return the result.
Signed-off-by: Qu Wenruo <wqu(a)suse.com>
Reviewed-by: David Sterba <dsterba(a)suse.com>
Signed-off-by: David Sterba <dsterba(a)suse.com>
Conflicts:
fs/btrfs/extent_io.c
[yyl: adjust context]
Signed-off-by: Yang Yingliang <yangyingliang(a)huawei.com>
Reviewed-by: Jason Yan <yanaijie(a)huawei.com>
Signed-off-by: Yang Yingliang <yangyingliang(a)huawei.com>
---
fs/btrfs/extent_io.c | 19 +++++++++++++++++++
1 file changed, 19 insertions(+)
diff --git a/fs/btrfs/extent_io.c b/fs/btrfs/extent_io.c
index 11efb4f..7f2990f 100644
--- a/fs/btrfs/extent_io.c
+++ b/fs/btrfs/extent_io.c
@@ -2731,6 +2731,16 @@ static int __must_check submit_one_bio(struct bio *bio, int mirror_num,
return blk_status_to_errno(ret);
}
+/* Cleanup unsubmitted bios */
+static void end_write_bio(struct extent_page_data *epd, int ret)
+{
+ if (epd->bio) {
+ epd->bio->bi_status = errno_to_blk_status(ret);
+ bio_endio(epd->bio);
+ epd->bio = NULL;
+ }
+}
+
/*
* @opf: bio REQ_OP_* and REQ_* flags as one value
* @tree: tree so we can call our merge_bio hook
@@ -3438,6 +3448,9 @@ static noinline_for_stack int __extent_writepage_io(struct inode *inode,
* records are inserted to lock ranges in the tree, and as dirty areas
* are found, they are marked writeback. Then the lock bits are removed
* and the end_io handler clears the writeback ranges
+ *
+ * Return 0 if everything goes well.
+ * Return <0 for error.
*/
static int __extent_writepage(struct page *page, struct writeback_control *wbc,
struct extent_page_data *epd)
@@ -3505,6 +3518,7 @@ static int __extent_writepage(struct page *page, struct writeback_control *wbc,
end_extent_writepage(page, ret, start, page_end);
}
unlock_page(page);
+ ASSERT(ret <= 0);
return ret;
done_unlocked:
@@ -4054,6 +4068,11 @@ int extent_write_full_page(struct page *page, struct writeback_control *wbc)
};
ret = __extent_writepage(page, wbc, &epd);
+ ASSERT(ret <= 0);
+ if (ret < 0) {
+ end_write_bio(&epd, ret);
+ return ret;
+ }
flush_write_bio(&epd);
return ret;
--
1.8.3
1
1
From: Shaozhengchao <shaozhengchao(a)huawei.com>
driver inclusion
category: cleanup
bugzilla: 4472
-----------------------------------------------------------------------
Delete useless header files
Signed-off-by: Shaozhengchao <shaozhengchao(a)huawei.com>
Reviewed-by: Luoshaokai <luoshaokai(a)huawei.com>
Signed-off-by: Yang Yingliang <yangyingliang(a)huawei.com>
---
drivers/net/ethernet/huawei/hinic/hinic_common.c | 80 -
drivers/net/ethernet/huawei/hinic/hinic_common.h | 38 -
drivers/net/ethernet/huawei/hinic/hinic_dev.h | 64 -
.../net/ethernet/huawei/hinic/hinic_hw_api_cmd.c | 978 -------
.../net/ethernet/huawei/hinic/hinic_hw_api_cmd.h | 208 --
drivers/net/ethernet/huawei/hinic/hinic_hw_cmdq.c | 947 -------
drivers/net/ethernet/huawei/hinic/hinic_hw_cmdq.h | 187 --
drivers/net/ethernet/huawei/hinic/hinic_hw_csr.h | 149 --
drivers/net/ethernet/huawei/hinic/hinic_hw_dev.c | 1010 --------
drivers/net/ethernet/huawei/hinic/hinic_hw_dev.h | 239 --
drivers/net/ethernet/huawei/hinic/hinic_hw_eqs.c | 886 -------
drivers/net/ethernet/huawei/hinic/hinic_hw_eqs.h | 265 --
drivers/net/ethernet/huawei/hinic/hinic_hw_if.c | 351 ---
drivers/net/ethernet/huawei/hinic/hinic_hw_if.h | 272 --
drivers/net/ethernet/huawei/hinic/hinic_hw_io.c | 533 ----
drivers/net/ethernet/huawei/hinic/hinic_hw_io.h | 97 -
drivers/net/ethernet/huawei/hinic/hinic_hw_mgmt.c | 597 -----
drivers/net/ethernet/huawei/hinic/hinic_hw_qp.c | 907 -------
drivers/net/ethernet/huawei/hinic/hinic_hw_qp.h | 205 --
.../net/ethernet/huawei/hinic/hinic_hw_qp_ctxt.h | 214 --
drivers/net/ethernet/huawei/hinic/hinic_hw_wq.c | 878 -------
drivers/net/ethernet/huawei/hinic/hinic_hw_wq.h | 117 -
drivers/net/ethernet/huawei/hinic/hinic_hw_wqe.h | 368 ---
drivers/net/ethernet/huawei/hinic/hinic_port.c | 379 ---
drivers/net/ethernet/huawei/hinic/hinic_port.h | 198 --
.../net/ethernet/huawei/hinic/hinic_sml_table.h | 2728 --------------------
.../ethernet/huawei/hinic/hinic_sml_table_pub.h | 277 --
27 files changed, 13172 deletions(-)
delete mode 100644 drivers/net/ethernet/huawei/hinic/hinic_common.c
delete mode 100644 drivers/net/ethernet/huawei/hinic/hinic_common.h
delete mode 100644 drivers/net/ethernet/huawei/hinic/hinic_dev.h
delete mode 100644 drivers/net/ethernet/huawei/hinic/hinic_hw_api_cmd.c
delete mode 100644 drivers/net/ethernet/huawei/hinic/hinic_hw_api_cmd.h
delete mode 100644 drivers/net/ethernet/huawei/hinic/hinic_hw_cmdq.c
delete mode 100644 drivers/net/ethernet/huawei/hinic/hinic_hw_cmdq.h
delete mode 100644 drivers/net/ethernet/huawei/hinic/hinic_hw_csr.h
delete mode 100644 drivers/net/ethernet/huawei/hinic/hinic_hw_dev.c
delete mode 100644 drivers/net/ethernet/huawei/hinic/hinic_hw_dev.h
delete mode 100644 drivers/net/ethernet/huawei/hinic/hinic_hw_eqs.c
delete mode 100644 drivers/net/ethernet/huawei/hinic/hinic_hw_eqs.h
delete mode 100644 drivers/net/ethernet/huawei/hinic/hinic_hw_if.c
delete mode 100644 drivers/net/ethernet/huawei/hinic/hinic_hw_if.h
delete mode 100644 drivers/net/ethernet/huawei/hinic/hinic_hw_io.c
delete mode 100644 drivers/net/ethernet/huawei/hinic/hinic_hw_io.h
delete mode 100644 drivers/net/ethernet/huawei/hinic/hinic_hw_mgmt.c
delete mode 100644 drivers/net/ethernet/huawei/hinic/hinic_hw_qp.c
delete mode 100644 drivers/net/ethernet/huawei/hinic/hinic_hw_qp.h
delete mode 100644 drivers/net/ethernet/huawei/hinic/hinic_hw_qp_ctxt.h
delete mode 100644 drivers/net/ethernet/huawei/hinic/hinic_hw_wq.c
delete mode 100644 drivers/net/ethernet/huawei/hinic/hinic_hw_wq.h
delete mode 100644 drivers/net/ethernet/huawei/hinic/hinic_hw_wqe.h
delete mode 100644 drivers/net/ethernet/huawei/hinic/hinic_port.c
delete mode 100644 drivers/net/ethernet/huawei/hinic/hinic_port.h
delete mode 100644 drivers/net/ethernet/huawei/hinic/hinic_sml_table.h
delete mode 100644 drivers/net/ethernet/huawei/hinic/hinic_sml_table_pub.h
diff --git a/drivers/net/ethernet/huawei/hinic/hinic_common.c b/drivers/net/ethernet/huawei/hinic/hinic_common.c
deleted file mode 100644
index 02c74fd..00000000
--- a/drivers/net/ethernet/huawei/hinic/hinic_common.c
+++ /dev/null
@@ -1,80 +0,0 @@
-/*
- * Huawei HiNIC PCI Express Linux driver
- * Copyright(c) 2017 Huawei Technologies Co., Ltd
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms and conditions of the GNU General Public License,
- * version 2, as published by the Free Software Foundation.
- *
- * This program is distributed in the hope it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
- * for more details.
- *
- */
-
-#include <linux/kernel.h>
-#include <linux/types.h>
-#include <asm/byteorder.h>
-
-#include "hinic_common.h"
-
-/**
- * hinic_cpu_to_be32 - convert data to big endian 32 bit format
- * @data: the data to convert
- * @len: length of data to convert
- **/
-void hinic_cpu_to_be32(void *data, int len)
-{
- u32 *mem = data;
- int i;
-
- len = len / sizeof(u32);
-
- for (i = 0; i < len; i++) {
- *mem = cpu_to_be32(*mem);
- mem++;
- }
-}
-
-/**
- * hinic_be32_to_cpu - convert data from big endian 32 bit format
- * @data: the data to convert
- * @len: length of data to convert
- **/
-void hinic_be32_to_cpu(void *data, int len)
-{
- u32 *mem = data;
- int i;
-
- len = len / sizeof(u32);
-
- for (i = 0; i < len; i++) {
- *mem = be32_to_cpu(*mem);
- mem++;
- }
-}
-
-/**
- * hinic_set_sge - set dma area in scatter gather entry
- * @sge: scatter gather entry
- * @addr: dma address
- * @len: length of relevant data in the dma address
- **/
-void hinic_set_sge(struct hinic_sge *sge, dma_addr_t addr, int len)
-{
- sge->hi_addr = upper_32_bits(addr);
- sge->lo_addr = lower_32_bits(addr);
- sge->len = len;
-}
-
-/**
- * hinic_sge_to_dma - get dma address from scatter gather entry
- * @sge: scatter gather entry
- *
- * Return dma address of sg entry
- **/
-dma_addr_t hinic_sge_to_dma(struct hinic_sge *sge)
-{
- return (dma_addr_t)((((u64)sge->hi_addr) << 32) | sge->lo_addr);
-}
diff --git a/drivers/net/ethernet/huawei/hinic/hinic_common.h b/drivers/net/ethernet/huawei/hinic/hinic_common.h
deleted file mode 100644
index 2c06b76..00000000
--- a/drivers/net/ethernet/huawei/hinic/hinic_common.h
+++ /dev/null
@@ -1,38 +0,0 @@
-/*
- * Huawei HiNIC PCI Express Linux driver
- * Copyright(c) 2017 Huawei Technologies Co., Ltd
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms and conditions of the GNU General Public License,
- * version 2, as published by the Free Software Foundation.
- *
- * This program is distributed in the hope it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
- * for more details.
- *
- */
-
-#ifndef HINIC_COMMON_H
-#define HINIC_COMMON_H
-
-#include <linux/types.h>
-
-#define UPPER_8_BITS(data) (((data) >> 8) & 0xFF)
-#define LOWER_8_BITS(data) ((data) & 0xFF)
-
-struct hinic_sge {
- u32 hi_addr;
- u32 lo_addr;
- u32 len;
-};
-
-void hinic_cpu_to_be32(void *data, int len);
-
-void hinic_be32_to_cpu(void *data, int len);
-
-void hinic_set_sge(struct hinic_sge *sge, dma_addr_t addr, int len);
-
-dma_addr_t hinic_sge_to_dma(struct hinic_sge *sge);
-
-#endif
diff --git a/drivers/net/ethernet/huawei/hinic/hinic_dev.h b/drivers/net/ethernet/huawei/hinic/hinic_dev.h
deleted file mode 100644
index 5186cc9..00000000
--- a/drivers/net/ethernet/huawei/hinic/hinic_dev.h
+++ /dev/null
@@ -1,64 +0,0 @@
-/*
- * Huawei HiNIC PCI Express Linux driver
- * Copyright(c) 2017 Huawei Technologies Co., Ltd
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms and conditions of the GNU General Public License,
- * version 2, as published by the Free Software Foundation.
- *
- * This program is distributed in the hope it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
- * for more details.
- *
- */
-
-#ifndef HINIC_DEV_H
-#define HINIC_DEV_H
-
-#include <linux/netdevice.h>
-#include <linux/types.h>
-#include <linux/semaphore.h>
-#include <linux/workqueue.h>
-#include <linux/bitops.h>
-
-#include "hinic_hw_dev.h"
-#include "hinic_tx.h"
-#include "hinic_rx.h"
-
-#define HINIC_DRV_NAME "hinic"
-
-enum hinic_flags {
- HINIC_LINK_UP = BIT(0),
- HINIC_INTF_UP = BIT(1),
-};
-
-struct hinic_rx_mode_work {
- struct work_struct work;
- u32 rx_mode;
-};
-
-struct hinic_dev {
- struct net_device *netdev;
- struct hinic_hwdev *hwdev;
-
- u32 msg_enable;
- unsigned int tx_weight;
- unsigned int rx_weight;
-
- unsigned int flags;
-
- struct semaphore mgmt_lock;
- unsigned long *vlan_bitmap;
-
- struct hinic_rx_mode_work rx_mode_work;
- struct workqueue_struct *workq;
-
- struct hinic_txq *txqs;
- struct hinic_rxq *rxqs;
-
- struct hinic_txq_stats tx_stats;
- struct hinic_rxq_stats rx_stats;
-};
-
-#endif
diff --git a/drivers/net/ethernet/huawei/hinic/hinic_hw_api_cmd.c b/drivers/net/ethernet/huawei/hinic/hinic_hw_api_cmd.c
deleted file mode 100644
index c40603a..00000000
--- a/drivers/net/ethernet/huawei/hinic/hinic_hw_api_cmd.c
+++ /dev/null
@@ -1,978 +0,0 @@
-/*
- * Huawei HiNIC PCI Express Linux driver
- * Copyright(c) 2017 Huawei Technologies Co., Ltd
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms and conditions of the GNU General Public License,
- * version 2, as published by the Free Software Foundation.
- *
- * This program is distributed in the hope it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
- * for more details.
- *
- */
-
-#include <linux/kernel.h>
-#include <linux/types.h>
-#include <linux/errno.h>
-#include <linux/pci.h>
-#include <linux/device.h>
-#include <linux/slab.h>
-#include <linux/dma-mapping.h>
-#include <linux/bitops.h>
-#include <linux/err.h>
-#include <linux/jiffies.h>
-#include <linux/delay.h>
-#include <linux/log2.h>
-#include <linux/semaphore.h>
-#include <asm/byteorder.h>
-#include <asm/barrier.h>
-
-#include "hinic_hw_csr.h"
-#include "hinic_hw_if.h"
-#include "hinic_hw_api_cmd.h"
-
-#define API_CHAIN_NUM_CELLS 32
-
-#define API_CMD_CELL_SIZE_SHIFT 6
-#define API_CMD_CELL_SIZE_MIN (BIT(API_CMD_CELL_SIZE_SHIFT))
-
-#define API_CMD_CELL_SIZE(cell_size) \
- (((cell_size) >= API_CMD_CELL_SIZE_MIN) ? \
- (1 << (fls(cell_size - 1))) : API_CMD_CELL_SIZE_MIN)
-
-#define API_CMD_CELL_SIZE_VAL(size) \
- ilog2((size) >> API_CMD_CELL_SIZE_SHIFT)
-
-#define API_CMD_BUF_SIZE 2048
-
-/* Sizes of the members in hinic_api_cmd_cell */
-#define API_CMD_CELL_DESC_SIZE 8
-#define API_CMD_CELL_DATA_ADDR_SIZE 8
-
-#define API_CMD_CELL_ALIGNMENT 8
-
-#define API_CMD_TIMEOUT 1000
-
-#define MASKED_IDX(chain, idx) ((idx) & ((chain)->num_cells - 1))
-
-#define SIZE_8BYTES(size) (ALIGN((size), 8) >> 3)
-#define SIZE_4BYTES(size) (ALIGN((size), 4) >> 2)
-
-#define RD_DMA_ATTR_DEFAULT 0
-#define WR_DMA_ATTR_DEFAULT 0
-
-enum api_cmd_data_format {
- SGE_DATA = 1, /* cell data is passed by hw address */
-};
-
-enum api_cmd_type {
- API_CMD_WRITE = 0,
-};
-
-enum api_cmd_bypass {
- NO_BYPASS = 0,
- BYPASS = 1,
-};
-
-enum api_cmd_xor_chk_level {
- XOR_CHK_DIS = 0,
-
- XOR_CHK_ALL = 3,
-};
-
-static u8 xor_chksum_set(void *data)
-{
- int idx;
- u8 *val, checksum = 0;
-
- val = data;
-
- for (idx = 0; idx < 7; idx++)
- checksum ^= val[idx];
-
- return checksum;
-}
-
-static void set_prod_idx(struct hinic_api_cmd_chain *chain)
-{
- enum hinic_api_cmd_chain_type chain_type = chain->chain_type;
- struct hinic_hwif *hwif = chain->hwif;
- u32 addr, prod_idx;
-
- addr = HINIC_CSR_API_CMD_CHAIN_PI_ADDR(chain_type);
- prod_idx = hinic_hwif_read_reg(hwif, addr);
-
- prod_idx = HINIC_API_CMD_PI_CLEAR(prod_idx, IDX);
-
- prod_idx |= HINIC_API_CMD_PI_SET(chain->prod_idx, IDX);
-
- hinic_hwif_write_reg(hwif, addr, prod_idx);
-}
-
-static u32 get_hw_cons_idx(struct hinic_api_cmd_chain *chain)
-{
- u32 addr, val;
-
- addr = HINIC_CSR_API_CMD_STATUS_ADDR(chain->chain_type);
- val = hinic_hwif_read_reg(chain->hwif, addr);
-
- return HINIC_API_CMD_STATUS_GET(val, CONS_IDX);
-}
-
-/**
- * chain_busy - check if the chain is still processing last requests
- * @chain: chain to check
- *
- * Return 0 - Success, negative - Failure
- **/
-static int chain_busy(struct hinic_api_cmd_chain *chain)
-{
- struct hinic_hwif *hwif = chain->hwif;
- struct pci_dev *pdev = hwif->pdev;
- u32 prod_idx;
-
- switch (chain->chain_type) {
- case HINIC_API_CMD_WRITE_TO_MGMT_CPU:
- chain->cons_idx = get_hw_cons_idx(chain);
- prod_idx = chain->prod_idx;
-
- /* check for a space for a new command */
- if (chain->cons_idx == MASKED_IDX(chain, prod_idx + 1)) {
- dev_err(&pdev->dev, "API CMD chain %d is busy\n",
- chain->chain_type);
- return -EBUSY;
- }
- break;
-
- default:
- dev_err(&pdev->dev, "Unknown API CMD Chain type\n");
- break;
- }
-
- return 0;
-}
-
-/**
- * get_cell_data_size - get the data size of a specific cell type
- * @type: chain type
- *
- * Return the data(Desc + Address) size in the cell
- **/
-static u8 get_cell_data_size(enum hinic_api_cmd_chain_type type)
-{
- u8 cell_data_size = 0;
-
- switch (type) {
- case HINIC_API_CMD_WRITE_TO_MGMT_CPU:
- cell_data_size = ALIGN(API_CMD_CELL_DESC_SIZE +
- API_CMD_CELL_DATA_ADDR_SIZE,
- API_CMD_CELL_ALIGNMENT);
- break;
- default:
- break;
- }
-
- return cell_data_size;
-}
-
-/**
- * prepare_cell_ctrl - prepare the ctrl of the cell for the command
- * @cell_ctrl: the control of the cell to set the control value into it
- * @data_size: the size of the data in the cell
- **/
-static void prepare_cell_ctrl(u64 *cell_ctrl, u16 data_size)
-{
- u8 chksum;
- u64 ctrl;
-
- ctrl = HINIC_API_CMD_CELL_CTRL_SET(SIZE_8BYTES(data_size), DATA_SZ) |
- HINIC_API_CMD_CELL_CTRL_SET(RD_DMA_ATTR_DEFAULT, RD_DMA_ATTR) |
- HINIC_API_CMD_CELL_CTRL_SET(WR_DMA_ATTR_DEFAULT, WR_DMA_ATTR);
-
- chksum = xor_chksum_set(&ctrl);
-
- ctrl |= HINIC_API_CMD_CELL_CTRL_SET(chksum, XOR_CHKSUM);
-
- /* The data in the HW should be in Big Endian Format */
- *cell_ctrl = cpu_to_be64(ctrl);
-}
-
-/**
- * prepare_api_cmd - prepare API CMD command
- * @chain: chain for the command
- * @dest: destination node on the card that will receive the command
- * @cmd: command data
- * @cmd_size: the command size
- **/
-static void prepare_api_cmd(struct hinic_api_cmd_chain *chain,
- enum hinic_node_id dest,
- void *cmd, u16 cmd_size)
-{
- struct hinic_api_cmd_cell *cell = chain->curr_node;
- struct hinic_api_cmd_cell_ctxt *cell_ctxt;
- struct hinic_hwif *hwif = chain->hwif;
- struct pci_dev *pdev = hwif->pdev;
-
- cell_ctxt = &chain->cell_ctxt[chain->prod_idx];
-
- switch (chain->chain_type) {
- case HINIC_API_CMD_WRITE_TO_MGMT_CPU:
- cell->desc = HINIC_API_CMD_DESC_SET(SGE_DATA, API_TYPE) |
- HINIC_API_CMD_DESC_SET(API_CMD_WRITE, RD_WR) |
- HINIC_API_CMD_DESC_SET(NO_BYPASS, MGMT_BYPASS);
- break;
-
- default:
- dev_err(&pdev->dev, "unknown Chain type\n");
- return;
- }
-
- cell->desc |= HINIC_API_CMD_DESC_SET(dest, DEST) |
- HINIC_API_CMD_DESC_SET(SIZE_4BYTES(cmd_size), SIZE);
-
- cell->desc |= HINIC_API_CMD_DESC_SET(xor_chksum_set(&cell->desc),
- XOR_CHKSUM);
-
- /* The data in the HW should be in Big Endian Format */
- cell->desc = cpu_to_be64(cell->desc);
-
- memcpy(cell_ctxt->api_cmd_vaddr, cmd, cmd_size);
-}
-
-/**
- * prepare_cell - prepare cell ctrl and cmd in the current cell
- * @chain: chain for the command
- * @dest: destination node on the card that will receive the command
- * @cmd: command data
- * @cmd_size: the command size
- *
- * Return 0 - Success, negative - Failure
- **/
-static void prepare_cell(struct hinic_api_cmd_chain *chain,
- enum hinic_node_id dest,
- void *cmd, u16 cmd_size)
-{
- struct hinic_api_cmd_cell *curr_node = chain->curr_node;
- u16 data_size = get_cell_data_size(chain->chain_type);
-
- prepare_cell_ctrl(&curr_node->ctrl, data_size);
- prepare_api_cmd(chain, dest, cmd, cmd_size);
-}
-
-static inline void cmd_chain_prod_idx_inc(struct hinic_api_cmd_chain *chain)
-{
- chain->prod_idx = MASKED_IDX(chain, chain->prod_idx + 1);
-}
-
-/**
- * api_cmd_status_update - update the status in the chain struct
- * @chain: chain to update
- **/
-static void api_cmd_status_update(struct hinic_api_cmd_chain *chain)
-{
- enum hinic_api_cmd_chain_type chain_type;
- struct hinic_api_cmd_status *wb_status;
- struct hinic_hwif *hwif = chain->hwif;
- struct pci_dev *pdev = hwif->pdev;
- u64 status_header;
- u32 status;
-
- wb_status = chain->wb_status;
- status_header = be64_to_cpu(wb_status->header);
-
- status = be32_to_cpu(wb_status->status);
- if (HINIC_API_CMD_STATUS_GET(status, CHKSUM_ERR)) {
- dev_err(&pdev->dev, "API CMD status: Xor check error\n");
- return;
- }
-
- chain_type = HINIC_API_CMD_STATUS_HEADER_GET(status_header, CHAIN_ID);
- if (chain_type >= HINIC_API_CMD_MAX) {
- dev_err(&pdev->dev, "unknown API CMD Chain %d\n", chain_type);
- return;
- }
-
- chain->cons_idx = HINIC_API_CMD_STATUS_GET(status, CONS_IDX);
-}
-
-/**
- * wait_for_status_poll - wait for write to api cmd command to complete
- * @chain: the chain of the command
- *
- * Return 0 - Success, negative - Failure
- **/
-static int wait_for_status_poll(struct hinic_api_cmd_chain *chain)
-{
- int err = -ETIMEDOUT;
- unsigned long end;
-
- end = jiffies + msecs_to_jiffies(API_CMD_TIMEOUT);
- do {
- api_cmd_status_update(chain);
-
- /* wait for CI to be updated - sign for completion */
- if (chain->cons_idx == chain->prod_idx) {
- err = 0;
- break;
- }
-
- msleep(20);
- } while (time_before(jiffies, end));
-
- return err;
-}
-
-/**
- * wait_for_api_cmd_completion - wait for command to complete
- * @chain: chain for the command
- *
- * Return 0 - Success, negative - Failure
- **/
-static int wait_for_api_cmd_completion(struct hinic_api_cmd_chain *chain)
-{
- struct hinic_hwif *hwif = chain->hwif;
- struct pci_dev *pdev = hwif->pdev;
- int err;
-
- switch (chain->chain_type) {
- case HINIC_API_CMD_WRITE_TO_MGMT_CPU:
- err = wait_for_status_poll(chain);
- if (err) {
- dev_err(&pdev->dev, "API CMD Poll status timeout\n");
- break;
- }
- break;
-
- default:
- dev_err(&pdev->dev, "unknown API CMD Chain type\n");
- err = -EINVAL;
- break;
- }
-
- return err;
-}
-
-/**
- * api_cmd - API CMD command
- * @chain: chain for the command
- * @dest: destination node on the card that will receive the command
- * @cmd: command data
- * @size: the command size
- *
- * Return 0 - Success, negative - Failure
- **/
-static int api_cmd(struct hinic_api_cmd_chain *chain,
- enum hinic_node_id dest, u8 *cmd, u16 cmd_size)
-{
- struct hinic_api_cmd_cell_ctxt *ctxt;
- int err;
-
- down(&chain->sem);
- if (chain_busy(chain)) {
- up(&chain->sem);
- return -EBUSY;
- }
-
- prepare_cell(chain, dest, cmd, cmd_size);
- cmd_chain_prod_idx_inc(chain);
-
- wmb(); /* inc pi before issue the command */
-
- set_prod_idx(chain); /* issue the command */
-
- ctxt = &chain->cell_ctxt[chain->prod_idx];
-
- chain->curr_node = ctxt->cell_vaddr;
-
- err = wait_for_api_cmd_completion(chain);
-
- up(&chain->sem);
- return err;
-}
-
-/**
- * hinic_api_cmd_write - Write API CMD command
- * @chain: chain for write command
- * @dest: destination node on the card that will receive the command
- * @cmd: command data
- * @size: the command size
- *
- * Return 0 - Success, negative - Failure
- **/
-int hinic_api_cmd_write(struct hinic_api_cmd_chain *chain,
- enum hinic_node_id dest, u8 *cmd, u16 size)
-{
- /* Verify the chain type */
- if (chain->chain_type == HINIC_API_CMD_WRITE_TO_MGMT_CPU)
- return api_cmd(chain, dest, cmd, size);
-
- return -EINVAL;
-}
-
-/**
- * api_cmd_hw_restart - restart the chain in the HW
- * @chain: the API CMD specific chain to restart
- *
- * Return 0 - Success, negative - Failure
- **/
-static int api_cmd_hw_restart(struct hinic_api_cmd_chain *chain)
-{
- struct hinic_hwif *hwif = chain->hwif;
- int err = -ETIMEDOUT;
- unsigned long end;
- u32 reg_addr, val;
-
- /* Read Modify Write */
- reg_addr = HINIC_CSR_API_CMD_CHAIN_REQ_ADDR(chain->chain_type);
- val = hinic_hwif_read_reg(hwif, reg_addr);
-
- val = HINIC_API_CMD_CHAIN_REQ_CLEAR(val, RESTART);
- val |= HINIC_API_CMD_CHAIN_REQ_SET(1, RESTART);
-
- hinic_hwif_write_reg(hwif, reg_addr, val);
-
- end = jiffies + msecs_to_jiffies(API_CMD_TIMEOUT);
- do {
- val = hinic_hwif_read_reg(hwif, reg_addr);
-
- if (!HINIC_API_CMD_CHAIN_REQ_GET(val, RESTART)) {
- err = 0;
- break;
- }
-
- msleep(20);
- } while (time_before(jiffies, end));
-
- return err;
-}
-
-/**
- * api_cmd_ctrl_init - set the control register of a chain
- * @chain: the API CMD specific chain to set control register for
- **/
-static void api_cmd_ctrl_init(struct hinic_api_cmd_chain *chain)
-{
- struct hinic_hwif *hwif = chain->hwif;
- u32 addr, ctrl;
- u16 cell_size;
-
- /* Read Modify Write */
- addr = HINIC_CSR_API_CMD_CHAIN_CTRL_ADDR(chain->chain_type);
-
- cell_size = API_CMD_CELL_SIZE_VAL(chain->cell_size);
-
- ctrl = hinic_hwif_read_reg(hwif, addr);
-
- ctrl = HINIC_API_CMD_CHAIN_CTRL_CLEAR(ctrl, RESTART_WB_STAT) &
- HINIC_API_CMD_CHAIN_CTRL_CLEAR(ctrl, XOR_ERR) &
- HINIC_API_CMD_CHAIN_CTRL_CLEAR(ctrl, AEQE_EN) &
- HINIC_API_CMD_CHAIN_CTRL_CLEAR(ctrl, XOR_CHK_EN) &
- HINIC_API_CMD_CHAIN_CTRL_CLEAR(ctrl, CELL_SIZE);
-
- ctrl |= HINIC_API_CMD_CHAIN_CTRL_SET(1, XOR_ERR) |
- HINIC_API_CMD_CHAIN_CTRL_SET(XOR_CHK_ALL, XOR_CHK_EN) |
- HINIC_API_CMD_CHAIN_CTRL_SET(cell_size, CELL_SIZE);
-
- hinic_hwif_write_reg(hwif, addr, ctrl);
-}
-
-/**
- * api_cmd_set_status_addr - set the status address of a chain in the HW
- * @chain: the API CMD specific chain to set in HW status address for
- **/
-static void api_cmd_set_status_addr(struct hinic_api_cmd_chain *chain)
-{
- struct hinic_hwif *hwif = chain->hwif;
- u32 addr, val;
-
- addr = HINIC_CSR_API_CMD_STATUS_HI_ADDR(chain->chain_type);
- val = upper_32_bits(chain->wb_status_paddr);
- hinic_hwif_write_reg(hwif, addr, val);
-
- addr = HINIC_CSR_API_CMD_STATUS_LO_ADDR(chain->chain_type);
- val = lower_32_bits(chain->wb_status_paddr);
- hinic_hwif_write_reg(hwif, addr, val);
-}
-
-/**
- * api_cmd_set_num_cells - set the number cells of a chain in the HW
- * @chain: the API CMD specific chain to set in HW the number of cells for
- **/
-static void api_cmd_set_num_cells(struct hinic_api_cmd_chain *chain)
-{
- struct hinic_hwif *hwif = chain->hwif;
- u32 addr, val;
-
- addr = HINIC_CSR_API_CMD_CHAIN_NUM_CELLS_ADDR(chain->chain_type);
- val = chain->num_cells;
- hinic_hwif_write_reg(hwif, addr, val);
-}
-
-/**
- * api_cmd_head_init - set the head of a chain in the HW
- * @chain: the API CMD specific chain to set in HW the head for
- **/
-static void api_cmd_head_init(struct hinic_api_cmd_chain *chain)
-{
- struct hinic_hwif *hwif = chain->hwif;
- u32 addr, val;
-
- addr = HINIC_CSR_API_CMD_CHAIN_HEAD_HI_ADDR(chain->chain_type);
- val = upper_32_bits(chain->head_cell_paddr);
- hinic_hwif_write_reg(hwif, addr, val);
-
- addr = HINIC_CSR_API_CMD_CHAIN_HEAD_LO_ADDR(chain->chain_type);
- val = lower_32_bits(chain->head_cell_paddr);
- hinic_hwif_write_reg(hwif, addr, val);
-}
-
-/**
- * api_cmd_chain_hw_clean - clean the HW
- * @chain: the API CMD specific chain
- **/
-static void api_cmd_chain_hw_clean(struct hinic_api_cmd_chain *chain)
-{
- struct hinic_hwif *hwif = chain->hwif;
- u32 addr, ctrl;
-
- addr = HINIC_CSR_API_CMD_CHAIN_CTRL_ADDR(chain->chain_type);
-
- ctrl = hinic_hwif_read_reg(hwif, addr);
- ctrl = HINIC_API_CMD_CHAIN_CTRL_CLEAR(ctrl, RESTART_WB_STAT) &
- HINIC_API_CMD_CHAIN_CTRL_CLEAR(ctrl, XOR_ERR) &
- HINIC_API_CMD_CHAIN_CTRL_CLEAR(ctrl, AEQE_EN) &
- HINIC_API_CMD_CHAIN_CTRL_CLEAR(ctrl, XOR_CHK_EN) &
- HINIC_API_CMD_CHAIN_CTRL_CLEAR(ctrl, CELL_SIZE);
-
- hinic_hwif_write_reg(hwif, addr, ctrl);
-}
-
-/**
- * api_cmd_chain_hw_init - initialize the chain in the HW
- * @chain: the API CMD specific chain to initialize in HW
- *
- * Return 0 - Success, negative - Failure
- **/
-static int api_cmd_chain_hw_init(struct hinic_api_cmd_chain *chain)
-{
- struct hinic_hwif *hwif = chain->hwif;
- struct pci_dev *pdev = hwif->pdev;
- int err;
-
- api_cmd_chain_hw_clean(chain);
-
- api_cmd_set_status_addr(chain);
-
- err = api_cmd_hw_restart(chain);
- if (err) {
- dev_err(&pdev->dev, "Failed to restart API CMD HW\n");
- return err;
- }
-
- api_cmd_ctrl_init(chain);
- api_cmd_set_num_cells(chain);
- api_cmd_head_init(chain);
- return 0;
-}
-
-/**
- * free_cmd_buf - free the dma buffer of API CMD command
- * @chain: the API CMD specific chain of the cmd
- * @cell_idx: the cell index of the cmd
- **/
-static void free_cmd_buf(struct hinic_api_cmd_chain *chain, int cell_idx)
-{
- struct hinic_api_cmd_cell_ctxt *cell_ctxt;
- struct hinic_hwif *hwif = chain->hwif;
- struct pci_dev *pdev = hwif->pdev;
-
- cell_ctxt = &chain->cell_ctxt[cell_idx];
-
- dma_free_coherent(&pdev->dev, API_CMD_BUF_SIZE,
- cell_ctxt->api_cmd_vaddr,
- cell_ctxt->api_cmd_paddr);
-}
-
-/**
- * alloc_cmd_buf - allocate a dma buffer for API CMD command
- * @chain: the API CMD specific chain for the cmd
- * @cell: the cell in the HW for the cmd
- * @cell_idx: the index of the cell
- *
- * Return 0 - Success, negative - Failure
- **/
-static int alloc_cmd_buf(struct hinic_api_cmd_chain *chain,
- struct hinic_api_cmd_cell *cell, int cell_idx)
-{
- struct hinic_api_cmd_cell_ctxt *cell_ctxt;
- struct hinic_hwif *hwif = chain->hwif;
- struct pci_dev *pdev = hwif->pdev;
- dma_addr_t cmd_paddr;
- u8 *cmd_vaddr;
- int err = 0;
-
- cmd_vaddr = dma_zalloc_coherent(&pdev->dev, API_CMD_BUF_SIZE,
- &cmd_paddr, GFP_KERNEL);
- if (!cmd_vaddr) {
- dev_err(&pdev->dev, "Failed to allocate API CMD DMA memory\n");
- return -ENOMEM;
- }
-
- cell_ctxt = &chain->cell_ctxt[cell_idx];
-
- cell_ctxt->api_cmd_vaddr = cmd_vaddr;
- cell_ctxt->api_cmd_paddr = cmd_paddr;
-
- /* set the cmd DMA address in the cell */
- switch (chain->chain_type) {
- case HINIC_API_CMD_WRITE_TO_MGMT_CPU:
- /* The data in the HW should be in Big Endian Format */
- cell->write.hw_cmd_paddr = cpu_to_be64(cmd_paddr);
- break;
-
- default:
- dev_err(&pdev->dev, "Unsupported API CMD chain type\n");
- free_cmd_buf(chain, cell_idx);
- err = -EINVAL;
- break;
- }
-
- return err;
-}
-
-/**
- * api_cmd_create_cell - create API CMD cell for specific chain
- * @chain: the API CMD specific chain to create its cell
- * @cell_idx: the index of the cell to create
- * @pre_node: previous cell
- * @node_vaddr: the returned virt addr of the cell
- *
- * Return 0 - Success, negative - Failure
- **/
-static int api_cmd_create_cell(struct hinic_api_cmd_chain *chain,
- int cell_idx,
- struct hinic_api_cmd_cell *pre_node,
- struct hinic_api_cmd_cell **node_vaddr)
-{
- struct hinic_api_cmd_cell_ctxt *cell_ctxt;
- struct hinic_hwif *hwif = chain->hwif;
- struct pci_dev *pdev = hwif->pdev;
- struct hinic_api_cmd_cell *node;
- dma_addr_t node_paddr;
- int err;
-
- node = dma_zalloc_coherent(&pdev->dev, chain->cell_size,
- &node_paddr, GFP_KERNEL);
- if (!node) {
- dev_err(&pdev->dev, "Failed to allocate dma API CMD cell\n");
- return -ENOMEM;
- }
-
- node->read.hw_wb_resp_paddr = 0;
-
- cell_ctxt = &chain->cell_ctxt[cell_idx];
- cell_ctxt->cell_vaddr = node;
- cell_ctxt->cell_paddr = node_paddr;
-
- if (!pre_node) {
- chain->head_cell_paddr = node_paddr;
- chain->head_node = node;
- } else {
- /* The data in the HW should be in Big Endian Format */
- pre_node->next_cell_paddr = cpu_to_be64(node_paddr);
- }
-
- switch (chain->chain_type) {
- case HINIC_API_CMD_WRITE_TO_MGMT_CPU:
- err = alloc_cmd_buf(chain, node, cell_idx);
- if (err) {
- dev_err(&pdev->dev, "Failed to allocate cmd buffer\n");
- goto err_alloc_cmd_buf;
- }
- break;
-
- default:
- dev_err(&pdev->dev, "Unsupported API CMD chain type\n");
- err = -EINVAL;
- goto err_alloc_cmd_buf;
- }
-
- *node_vaddr = node;
- return 0;
-
-err_alloc_cmd_buf:
- dma_free_coherent(&pdev->dev, chain->cell_size, node, node_paddr);
- return err;
-}
-
-/**
- * api_cmd_destroy_cell - destroy API CMD cell of specific chain
- * @chain: the API CMD specific chain to destroy its cell
- * @cell_idx: the cell to destroy
- **/
-static void api_cmd_destroy_cell(struct hinic_api_cmd_chain *chain,
- int cell_idx)
-{
- struct hinic_api_cmd_cell_ctxt *cell_ctxt;
- struct hinic_hwif *hwif = chain->hwif;
- struct pci_dev *pdev = hwif->pdev;
- struct hinic_api_cmd_cell *node;
- dma_addr_t node_paddr;
- size_t node_size;
-
- cell_ctxt = &chain->cell_ctxt[cell_idx];
-
- node = cell_ctxt->cell_vaddr;
- node_paddr = cell_ctxt->cell_paddr;
- node_size = chain->cell_size;
-
- if (cell_ctxt->api_cmd_vaddr) {
- switch (chain->chain_type) {
- case HINIC_API_CMD_WRITE_TO_MGMT_CPU:
- free_cmd_buf(chain, cell_idx);
- break;
- default:
- dev_err(&pdev->dev, "Unsupported API CMD chain type\n");
- break;
- }
-
- dma_free_coherent(&pdev->dev, node_size, node,
- node_paddr);
- }
-}
-
-/**
- * api_cmd_destroy_cells - destroy API CMD cells of specific chain
- * @chain: the API CMD specific chain to destroy its cells
- * @num_cells: number of cells to destroy
- **/
-static void api_cmd_destroy_cells(struct hinic_api_cmd_chain *chain,
- int num_cells)
-{
- int cell_idx;
-
- for (cell_idx = 0; cell_idx < num_cells; cell_idx++)
- api_cmd_destroy_cell(chain, cell_idx);
-}
-
-/**
- * api_cmd_create_cells - create API CMD cells for specific chain
- * @chain: the API CMD specific chain
- *
- * Return 0 - Success, negative - Failure
- **/
-static int api_cmd_create_cells(struct hinic_api_cmd_chain *chain)
-{
- struct hinic_api_cmd_cell *node = NULL, *pre_node = NULL;
- struct hinic_hwif *hwif = chain->hwif;
- struct pci_dev *pdev = hwif->pdev;
- int err, cell_idx;
-
- for (cell_idx = 0; cell_idx < chain->num_cells; cell_idx++) {
- err = api_cmd_create_cell(chain, cell_idx, pre_node, &node);
- if (err) {
- dev_err(&pdev->dev, "Failed to create API CMD cell\n");
- goto err_create_cell;
- }
-
- pre_node = node;
- }
-
- /* set the Final node to point on the start */
- node->next_cell_paddr = cpu_to_be64(chain->head_cell_paddr);
-
- /* set the current node to be the head */
- chain->curr_node = chain->head_node;
- return 0;
-
-err_create_cell:
- api_cmd_destroy_cells(chain, cell_idx);
- return err;
-}
-
-/**
- * api_chain_init - initialize API CMD specific chain
- * @chain: the API CMD specific chain to initialize
- * @attr: attributes to set in the chain
- *
- * Return 0 - Success, negative - Failure
- **/
-static int api_chain_init(struct hinic_api_cmd_chain *chain,
- struct hinic_api_cmd_chain_attr *attr)
-{
- struct hinic_hwif *hwif = attr->hwif;
- struct pci_dev *pdev = hwif->pdev;
- size_t cell_ctxt_size;
-
- chain->hwif = hwif;
- chain->chain_type = attr->chain_type;
- chain->num_cells = attr->num_cells;
- chain->cell_size = attr->cell_size;
-
- chain->prod_idx = 0;
- chain->cons_idx = 0;
-
- sema_init(&chain->sem, 1);
-
- cell_ctxt_size = chain->num_cells * sizeof(*chain->cell_ctxt);
- chain->cell_ctxt = devm_kzalloc(&pdev->dev, cell_ctxt_size, GFP_KERNEL);
- if (!chain->cell_ctxt)
- return -ENOMEM;
-
- chain->wb_status = dma_zalloc_coherent(&pdev->dev,
- sizeof(*chain->wb_status),
- &chain->wb_status_paddr,
- GFP_KERNEL);
- if (!chain->wb_status) {
- dev_err(&pdev->dev, "Failed to allocate DMA wb status\n");
- return -ENOMEM;
- }
-
- return 0;
-}
-
-/**
- * api_chain_free - free API CMD specific chain
- * @chain: the API CMD specific chain to free
- **/
-static void api_chain_free(struct hinic_api_cmd_chain *chain)
-{
- struct hinic_hwif *hwif = chain->hwif;
- struct pci_dev *pdev = hwif->pdev;
-
- dma_free_coherent(&pdev->dev, sizeof(*chain->wb_status),
- chain->wb_status, chain->wb_status_paddr);
-}
-
-/**
- * api_cmd_create_chain - create API CMD specific chain
- * @attr: attributes to set the chain
- *
- * Return the created chain
- **/
-static struct hinic_api_cmd_chain *
- api_cmd_create_chain(struct hinic_api_cmd_chain_attr *attr)
-{
- struct hinic_hwif *hwif = attr->hwif;
- struct pci_dev *pdev = hwif->pdev;
- struct hinic_api_cmd_chain *chain;
- int err;
-
- if (attr->num_cells & (attr->num_cells - 1)) {
- dev_err(&pdev->dev, "Invalid number of cells, must be power of 2\n");
- return ERR_PTR(-EINVAL);
- }
-
- chain = devm_kzalloc(&pdev->dev, sizeof(*chain), GFP_KERNEL);
- if (!chain)
- return ERR_PTR(-ENOMEM);
-
- err = api_chain_init(chain, attr);
- if (err) {
- dev_err(&pdev->dev, "Failed to initialize chain\n");
- return ERR_PTR(err);
- }
-
- err = api_cmd_create_cells(chain);
- if (err) {
- dev_err(&pdev->dev, "Failed to create cells for API CMD chain\n");
- goto err_create_cells;
- }
-
- err = api_cmd_chain_hw_init(chain);
- if (err) {
- dev_err(&pdev->dev, "Failed to initialize chain HW\n");
- goto err_chain_hw_init;
- }
-
- return chain;
-
-err_chain_hw_init:
- api_cmd_destroy_cells(chain, chain->num_cells);
-
-err_create_cells:
- api_chain_free(chain);
- return ERR_PTR(err);
-}
-
-/**
- * api_cmd_destroy_chain - destroy API CMD specific chain
- * @chain: the API CMD specific chain to destroy
- **/
-static void api_cmd_destroy_chain(struct hinic_api_cmd_chain *chain)
-{
- api_cmd_chain_hw_clean(chain);
- api_cmd_destroy_cells(chain, chain->num_cells);
- api_chain_free(chain);
-}
-
-/**
- * hinic_api_cmd_init - Initialize all the API CMD chains
- * @chain: the API CMD chains that are initialized
- * @hwif: the hardware interface of a pci function device
- *
- * Return 0 - Success, negative - Failure
- **/
-int hinic_api_cmd_init(struct hinic_api_cmd_chain **chain,
- struct hinic_hwif *hwif)
-{
- enum hinic_api_cmd_chain_type type, chain_type;
- struct hinic_api_cmd_chain_attr attr;
- struct pci_dev *pdev = hwif->pdev;
- size_t hw_cell_sz;
- int err;
-
- hw_cell_sz = sizeof(struct hinic_api_cmd_cell);
-
- attr.hwif = hwif;
- attr.num_cells = API_CHAIN_NUM_CELLS;
- attr.cell_size = API_CMD_CELL_SIZE(hw_cell_sz);
-
- chain_type = HINIC_API_CMD_WRITE_TO_MGMT_CPU;
- for ( ; chain_type < HINIC_API_CMD_MAX; chain_type++) {
- attr.chain_type = chain_type;
-
- if (chain_type != HINIC_API_CMD_WRITE_TO_MGMT_CPU)
- continue;
-
- chain[chain_type] = api_cmd_create_chain(&attr);
- if (IS_ERR(chain[chain_type])) {
- dev_err(&pdev->dev, "Failed to create chain %d\n",
- chain_type);
- err = PTR_ERR(chain[chain_type]);
- goto err_create_chain;
- }
- }
-
- return 0;
-
-err_create_chain:
- type = HINIC_API_CMD_WRITE_TO_MGMT_CPU;
- for ( ; type < chain_type; type++) {
- if (type != HINIC_API_CMD_WRITE_TO_MGMT_CPU)
- continue;
-
- api_cmd_destroy_chain(chain[type]);
- }
-
- return err;
-}
-
-/**
- * hinic_api_cmd_free - free the API CMD chains
- * @chain: the API CMD chains that are freed
- **/
-void hinic_api_cmd_free(struct hinic_api_cmd_chain **chain)
-{
- enum hinic_api_cmd_chain_type chain_type;
-
- chain_type = HINIC_API_CMD_WRITE_TO_MGMT_CPU;
- for ( ; chain_type < HINIC_API_CMD_MAX; chain_type++) {
- if (chain_type != HINIC_API_CMD_WRITE_TO_MGMT_CPU)
- continue;
-
- api_cmd_destroy_chain(chain[chain_type]);
- }
-}
diff --git a/drivers/net/ethernet/huawei/hinic/hinic_hw_api_cmd.h b/drivers/net/ethernet/huawei/hinic/hinic_hw_api_cmd.h
deleted file mode 100644
index 31b94d5..00000000
--- a/drivers/net/ethernet/huawei/hinic/hinic_hw_api_cmd.h
+++ /dev/null
@@ -1,208 +0,0 @@
-/*
- * Huawei HiNIC PCI Express Linux driver
- * Copyright(c) 2017 Huawei Technologies Co., Ltd
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms and conditions of the GNU General Public License,
- * version 2, as published by the Free Software Foundation.
- *
- * This program is distributed in the hope it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
- * for more details.
- *
- */
-
-#ifndef HINIC_HW_API_CMD_H
-#define HINIC_HW_API_CMD_H
-
-#include <linux/types.h>
-#include <linux/semaphore.h>
-
-#include "hinic_hw_if.h"
-
-#define HINIC_API_CMD_PI_IDX_SHIFT 0
-
-#define HINIC_API_CMD_PI_IDX_MASK 0xFFFFFF
-
-#define HINIC_API_CMD_PI_SET(val, member) \
- (((u32)(val) & HINIC_API_CMD_PI_##member##_MASK) << \
- HINIC_API_CMD_PI_##member##_SHIFT)
-
-#define HINIC_API_CMD_PI_CLEAR(val, member) \
- ((val) & (~(HINIC_API_CMD_PI_##member##_MASK \
- << HINIC_API_CMD_PI_##member##_SHIFT)))
-
-#define HINIC_API_CMD_CHAIN_REQ_RESTART_SHIFT 1
-
-#define HINIC_API_CMD_CHAIN_REQ_RESTART_MASK 0x1
-
-#define HINIC_API_CMD_CHAIN_REQ_SET(val, member) \
- (((u32)(val) & HINIC_API_CMD_CHAIN_REQ_##member##_MASK) << \
- HINIC_API_CMD_CHAIN_REQ_##member##_SHIFT)
-
-#define HINIC_API_CMD_CHAIN_REQ_GET(val, member) \
- (((val) >> HINIC_API_CMD_CHAIN_REQ_##member##_SHIFT) & \
- HINIC_API_CMD_CHAIN_REQ_##member##_MASK)
-
-#define HINIC_API_CMD_CHAIN_REQ_CLEAR(val, member) \
- ((val) & (~(HINIC_API_CMD_CHAIN_REQ_##member##_MASK \
- << HINIC_API_CMD_CHAIN_REQ_##member##_SHIFT)))
-
-#define HINIC_API_CMD_CHAIN_CTRL_RESTART_WB_STAT_SHIFT 1
-#define HINIC_API_CMD_CHAIN_CTRL_XOR_ERR_SHIFT 2
-#define HINIC_API_CMD_CHAIN_CTRL_AEQE_EN_SHIFT 4
-#define HINIC_API_CMD_CHAIN_CTRL_AEQ_ID_SHIFT 8
-#define HINIC_API_CMD_CHAIN_CTRL_XOR_CHK_EN_SHIFT 28
-#define HINIC_API_CMD_CHAIN_CTRL_CELL_SIZE_SHIFT 30
-
-#define HINIC_API_CMD_CHAIN_CTRL_RESTART_WB_STAT_MASK 0x1
-#define HINIC_API_CMD_CHAIN_CTRL_XOR_ERR_MASK 0x1
-#define HINIC_API_CMD_CHAIN_CTRL_AEQE_EN_MASK 0x1
-#define HINIC_API_CMD_CHAIN_CTRL_AEQ_ID_MASK 0x3
-#define HINIC_API_CMD_CHAIN_CTRL_XOR_CHK_EN_MASK 0x3
-#define HINIC_API_CMD_CHAIN_CTRL_CELL_SIZE_MASK 0x3
-
-#define HINIC_API_CMD_CHAIN_CTRL_SET(val, member) \
- (((u32)(val) & HINIC_API_CMD_CHAIN_CTRL_##member##_MASK) << \
- HINIC_API_CMD_CHAIN_CTRL_##member##_SHIFT)
-
-#define HINIC_API_CMD_CHAIN_CTRL_CLEAR(val, member) \
- ((val) & (~(HINIC_API_CMD_CHAIN_CTRL_##member##_MASK \
- << HINIC_API_CMD_CHAIN_CTRL_##member##_SHIFT)))
-
-#define HINIC_API_CMD_CELL_CTRL_DATA_SZ_SHIFT 0
-#define HINIC_API_CMD_CELL_CTRL_RD_DMA_ATTR_SHIFT 16
-#define HINIC_API_CMD_CELL_CTRL_WR_DMA_ATTR_SHIFT 24
-#define HINIC_API_CMD_CELL_CTRL_XOR_CHKSUM_SHIFT 56
-
-#define HINIC_API_CMD_CELL_CTRL_DATA_SZ_MASK 0x3F
-#define HINIC_API_CMD_CELL_CTRL_RD_DMA_ATTR_MASK 0x3F
-#define HINIC_API_CMD_CELL_CTRL_WR_DMA_ATTR_MASK 0x3F
-#define HINIC_API_CMD_CELL_CTRL_XOR_CHKSUM_MASK 0xFF
-
-#define HINIC_API_CMD_CELL_CTRL_SET(val, member) \
- ((((u64)val) & HINIC_API_CMD_CELL_CTRL_##member##_MASK) << \
- HINIC_API_CMD_CELL_CTRL_##member##_SHIFT)
-
-#define HINIC_API_CMD_DESC_API_TYPE_SHIFT 0
-#define HINIC_API_CMD_DESC_RD_WR_SHIFT 1
-#define HINIC_API_CMD_DESC_MGMT_BYPASS_SHIFT 2
-#define HINIC_API_CMD_DESC_DEST_SHIFT 32
-#define HINIC_API_CMD_DESC_SIZE_SHIFT 40
-#define HINIC_API_CMD_DESC_XOR_CHKSUM_SHIFT 56
-
-#define HINIC_API_CMD_DESC_API_TYPE_MASK 0x1
-#define HINIC_API_CMD_DESC_RD_WR_MASK 0x1
-#define HINIC_API_CMD_DESC_MGMT_BYPASS_MASK 0x1
-#define HINIC_API_CMD_DESC_DEST_MASK 0x1F
-#define HINIC_API_CMD_DESC_SIZE_MASK 0x7FF
-#define HINIC_API_CMD_DESC_XOR_CHKSUM_MASK 0xFF
-
-#define HINIC_API_CMD_DESC_SET(val, member) \
- ((((u64)val) & HINIC_API_CMD_DESC_##member##_MASK) << \
- HINIC_API_CMD_DESC_##member##_SHIFT)
-
-#define HINIC_API_CMD_STATUS_HEADER_CHAIN_ID_SHIFT 16
-
-#define HINIC_API_CMD_STATUS_HEADER_CHAIN_ID_MASK 0xFF
-
-#define HINIC_API_CMD_STATUS_HEADER_GET(val, member) \
- (((val) >> HINIC_API_CMD_STATUS_HEADER_##member##_SHIFT) & \
- HINIC_API_CMD_STATUS_HEADER_##member##_MASK)
-
-#define HINIC_API_CMD_STATUS_CONS_IDX_SHIFT 0
-#define HINIC_API_CMD_STATUS_CHKSUM_ERR_SHIFT 28
-
-#define HINIC_API_CMD_STATUS_CONS_IDX_MASK 0xFFFFFF
-#define HINIC_API_CMD_STATUS_CHKSUM_ERR_MASK 0x3
-
-#define HINIC_API_CMD_STATUS_GET(val, member) \
- (((val) >> HINIC_API_CMD_STATUS_##member##_SHIFT) & \
- HINIC_API_CMD_STATUS_##member##_MASK)
-
-enum hinic_api_cmd_chain_type {
- HINIC_API_CMD_WRITE_TO_MGMT_CPU = 2,
-
- HINIC_API_CMD_MAX,
-};
-
-struct hinic_api_cmd_chain_attr {
- struct hinic_hwif *hwif;
- enum hinic_api_cmd_chain_type chain_type;
-
- u32 num_cells;
- u16 cell_size;
-};
-
-struct hinic_api_cmd_status {
- u64 header;
- u32 status;
- u32 rsvd0;
- u32 rsvd1;
- u32 rsvd2;
- u64 rsvd3;
-};
-
-/* HW struct */
-struct hinic_api_cmd_cell {
- u64 ctrl;
-
- /* address is 64 bit in HW struct */
- u64 next_cell_paddr;
-
- u64 desc;
-
- /* HW struct */
- union {
- struct {
- u64 hw_cmd_paddr;
- } write;
-
- struct {
- u64 hw_wb_resp_paddr;
- u64 hw_cmd_paddr;
- } read;
- };
-};
-
-struct hinic_api_cmd_cell_ctxt {
- dma_addr_t cell_paddr;
- struct hinic_api_cmd_cell *cell_vaddr;
-
- dma_addr_t api_cmd_paddr;
- u8 *api_cmd_vaddr;
-};
-
-struct hinic_api_cmd_chain {
- struct hinic_hwif *hwif;
- enum hinic_api_cmd_chain_type chain_type;
-
- u32 num_cells;
- u16 cell_size;
-
- /* HW members in 24 bit format */
- u32 prod_idx;
- u32 cons_idx;
-
- struct semaphore sem;
-
- struct hinic_api_cmd_cell_ctxt *cell_ctxt;
-
- dma_addr_t wb_status_paddr;
- struct hinic_api_cmd_status *wb_status;
-
- dma_addr_t head_cell_paddr;
- struct hinic_api_cmd_cell *head_node;
- struct hinic_api_cmd_cell *curr_node;
-};
-
-int hinic_api_cmd_write(struct hinic_api_cmd_chain *chain,
- enum hinic_node_id dest, u8 *cmd, u16 size);
-
-int hinic_api_cmd_init(struct hinic_api_cmd_chain **chain,
- struct hinic_hwif *hwif);
-
-void hinic_api_cmd_free(struct hinic_api_cmd_chain **chain);
-
-#endif
diff --git a/drivers/net/ethernet/huawei/hinic/hinic_hw_cmdq.c b/drivers/net/ethernet/huawei/hinic/hinic_hw_cmdq.c
deleted file mode 100644
index 4d09ea7..00000000
--- a/drivers/net/ethernet/huawei/hinic/hinic_hw_cmdq.c
+++ /dev/null
@@ -1,947 +0,0 @@
-/*
- * Huawei HiNIC PCI Express Linux driver
- * Copyright(c) 2017 Huawei Technologies Co., Ltd
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms and conditions of the GNU General Public License,
- * version 2, as published by the Free Software Foundation.
- *
- * This program is distributed in the hope it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
- * for more details.
- *
- */
-
-#include <linux/kernel.h>
-#include <linux/types.h>
-#include <linux/errno.h>
-#include <linux/pci.h>
-#include <linux/device.h>
-#include <linux/slab.h>
-#include <linux/vmalloc.h>
-#include <linux/spinlock.h>
-#include <linux/sizes.h>
-#include <linux/atomic.h>
-#include <linux/log2.h>
-#include <linux/io.h>
-#include <linux/completion.h>
-#include <linux/err.h>
-#include <asm/byteorder.h>
-#include <asm/barrier.h>
-
-#include "hinic_common.h"
-#include "hinic_hw_if.h"
-#include "hinic_hw_eqs.h"
-#include "hinic_hw_mgmt.h"
-#include "hinic_hw_wqe.h"
-#include "hinic_hw_wq.h"
-#include "hinic_hw_cmdq.h"
-#include "hinic_hw_io.h"
-#include "hinic_hw_dev.h"
-
-#define CMDQ_CEQE_TYPE_SHIFT 0
-
-#define CMDQ_CEQE_TYPE_MASK 0x7
-
-#define CMDQ_CEQE_GET(val, member) \
- (((val) >> CMDQ_CEQE_##member##_SHIFT) \
- & CMDQ_CEQE_##member##_MASK)
-
-#define CMDQ_WQE_ERRCODE_VAL_SHIFT 20
-
-#define CMDQ_WQE_ERRCODE_VAL_MASK 0xF
-
-#define CMDQ_WQE_ERRCODE_GET(val, member) \
- (((val) >> CMDQ_WQE_ERRCODE_##member##_SHIFT) \
- & CMDQ_WQE_ERRCODE_##member##_MASK)
-
-#define CMDQ_DB_PI_OFF(pi) (((u16)LOWER_8_BITS(pi)) << 3)
-
-#define CMDQ_DB_ADDR(db_base, pi) ((db_base) + CMDQ_DB_PI_OFF(pi))
-
-#define CMDQ_WQE_HEADER(wqe) ((struct hinic_cmdq_header *)(wqe))
-
-#define CMDQ_WQE_COMPLETED(ctrl_info) \
- HINIC_CMDQ_CTRL_GET(ctrl_info, HW_BUSY_BIT)
-
-#define FIRST_DATA_TO_WRITE_LAST sizeof(u64)
-
-#define CMDQ_DB_OFF SZ_2K
-
-#define CMDQ_WQEBB_SIZE 64
-#define CMDQ_WQE_SIZE 64
-#define CMDQ_DEPTH SZ_4K
-
-#define CMDQ_WQ_PAGE_SIZE SZ_4K
-
-#define WQE_LCMD_SIZE 64
-#define WQE_SCMD_SIZE 64
-
-#define COMPLETE_LEN 3
-
-#define CMDQ_TIMEOUT 1000
-
-#define CMDQ_PFN(addr, page_size) ((addr) >> (ilog2(page_size)))
-
-#define cmdq_to_cmdqs(cmdq) container_of((cmdq) - (cmdq)->cmdq_type, \
- struct hinic_cmdqs, cmdq[0])
-
-#define cmdqs_to_func_to_io(cmdqs) container_of(cmdqs, \
- struct hinic_func_to_io, \
- cmdqs)
-
-enum cmdq_wqe_type {
- WQE_LCMD_TYPE = 0,
- WQE_SCMD_TYPE = 1,
-};
-
-enum completion_format {
- COMPLETE_DIRECT = 0,
- COMPLETE_SGE = 1,
-};
-
-enum data_format {
- DATA_SGE = 0,
- DATA_DIRECT = 1,
-};
-
-enum bufdesc_len {
- BUFDESC_LCMD_LEN = 2, /* 16 bytes - 2(8 byte unit) */
- BUFDESC_SCMD_LEN = 3, /* 24 bytes - 3(8 byte unit) */
-};
-
-enum ctrl_sect_len {
- CTRL_SECT_LEN = 1, /* 4 bytes (ctrl) - 1(8 byte unit) */
- CTRL_DIRECT_SECT_LEN = 2, /* 12 bytes (ctrl + rsvd) - 2(8 byte unit) */
-};
-
-enum cmdq_scmd_type {
- CMDQ_SET_ARM_CMD = 2,
-};
-
-enum cmdq_cmd_type {
- CMDQ_CMD_SYNC_DIRECT_RESP = 0,
- CMDQ_CMD_SYNC_SGE_RESP = 1,
-};
-
-enum completion_request {
- NO_CEQ = 0,
- CEQ_SET = 1,
-};
-
-/**
- * hinic_alloc_cmdq_buf - alloc buffer for sending command
- * @cmdqs: the cmdqs
- * @cmdq_buf: the buffer returned in this struct
- *
- * Return 0 - Success, negative - Failure
- **/
-int hinic_alloc_cmdq_buf(struct hinic_cmdqs *cmdqs,
- struct hinic_cmdq_buf *cmdq_buf)
-{
- struct hinic_hwif *hwif = cmdqs->hwif;
- struct pci_dev *pdev = hwif->pdev;
-
- cmdq_buf->buf = dma_pool_alloc(cmdqs->cmdq_buf_pool, GFP_KERNEL,
- &cmdq_buf->dma_addr);
- if (!cmdq_buf->buf) {
- dev_err(&pdev->dev, "Failed to allocate cmd from the pool\n");
- return -ENOMEM;
- }
-
- return 0;
-}
-
-/**
- * hinic_free_cmdq_buf - free buffer
- * @cmdqs: the cmdqs
- * @cmdq_buf: the buffer to free that is in this struct
- **/
-void hinic_free_cmdq_buf(struct hinic_cmdqs *cmdqs,
- struct hinic_cmdq_buf *cmdq_buf)
-{
- dma_pool_free(cmdqs->cmdq_buf_pool, cmdq_buf->buf, cmdq_buf->dma_addr);
-}
-
-static unsigned int cmdq_wqe_size_from_bdlen(enum bufdesc_len len)
-{
- unsigned int wqe_size = 0;
-
- switch (len) {
- case BUFDESC_LCMD_LEN:
- wqe_size = WQE_LCMD_SIZE;
- break;
- case BUFDESC_SCMD_LEN:
- wqe_size = WQE_SCMD_SIZE;
- break;
- }
-
- return wqe_size;
-}
-
-static void cmdq_set_sge_completion(struct hinic_cmdq_completion *completion,
- struct hinic_cmdq_buf *buf_out)
-{
- struct hinic_sge_resp *sge_resp = &completion->sge_resp;
-
- hinic_set_sge(&sge_resp->sge, buf_out->dma_addr, buf_out->size);
-}
-
-static void cmdq_prepare_wqe_ctrl(struct hinic_cmdq_wqe *wqe, int wrapped,
- enum hinic_cmd_ack_type ack_type,
- enum hinic_mod_type mod, u8 cmd, u16 prod_idx,
- enum completion_format complete_format,
- enum data_format data_format,
- enum bufdesc_len buf_len)
-{
- struct hinic_cmdq_wqe_lcmd *wqe_lcmd;
- struct hinic_cmdq_wqe_scmd *wqe_scmd;
- enum ctrl_sect_len ctrl_len;
- struct hinic_ctrl *ctrl;
- u32 saved_data;
-
- if (data_format == DATA_SGE) {
- wqe_lcmd = &wqe->wqe_lcmd;
-
- wqe_lcmd->status.status_info = 0;
- ctrl = &wqe_lcmd->ctrl;
- ctrl_len = CTRL_SECT_LEN;
- } else {
- wqe_scmd = &wqe->direct_wqe.wqe_scmd;
-
- wqe_scmd->status.status_info = 0;
- ctrl = &wqe_scmd->ctrl;
- ctrl_len = CTRL_DIRECT_SECT_LEN;
- }
-
- ctrl->ctrl_info = HINIC_CMDQ_CTRL_SET(prod_idx, PI) |
- HINIC_CMDQ_CTRL_SET(cmd, CMD) |
- HINIC_CMDQ_CTRL_SET(mod, MOD) |
- HINIC_CMDQ_CTRL_SET(ack_type, ACK_TYPE);
-
- CMDQ_WQE_HEADER(wqe)->header_info =
- HINIC_CMDQ_WQE_HEADER_SET(buf_len, BUFDESC_LEN) |
- HINIC_CMDQ_WQE_HEADER_SET(complete_format, COMPLETE_FMT) |
- HINIC_CMDQ_WQE_HEADER_SET(data_format, DATA_FMT) |
- HINIC_CMDQ_WQE_HEADER_SET(CEQ_SET, COMPLETE_REQ) |
- HINIC_CMDQ_WQE_HEADER_SET(COMPLETE_LEN, COMPLETE_SECT_LEN) |
- HINIC_CMDQ_WQE_HEADER_SET(ctrl_len, CTRL_LEN) |
- HINIC_CMDQ_WQE_HEADER_SET(wrapped, TOGGLED_WRAPPED);
-
- saved_data = CMDQ_WQE_HEADER(wqe)->saved_data;
- saved_data = HINIC_SAVED_DATA_CLEAR(saved_data, ARM);
-
- if ((cmd == CMDQ_SET_ARM_CMD) && (mod == HINIC_MOD_COMM))
- CMDQ_WQE_HEADER(wqe)->saved_data |=
- HINIC_SAVED_DATA_SET(1, ARM);
- else
- CMDQ_WQE_HEADER(wqe)->saved_data = saved_data;
-}
-
-static void cmdq_set_lcmd_bufdesc(struct hinic_cmdq_wqe_lcmd *wqe_lcmd,
- struct hinic_cmdq_buf *buf_in)
-{
- hinic_set_sge(&wqe_lcmd->buf_desc.sge, buf_in->dma_addr, buf_in->size);
-}
-
-static void cmdq_set_direct_wqe_data(struct hinic_cmdq_direct_wqe *wqe,
- void *buf_in, u32 in_size)
-{
- struct hinic_cmdq_wqe_scmd *wqe_scmd = &wqe->wqe_scmd;
-
- wqe_scmd->buf_desc.buf_len = in_size;
- memcpy(wqe_scmd->buf_desc.data, buf_in, in_size);
-}
-
-static void cmdq_set_lcmd_wqe(struct hinic_cmdq_wqe *wqe,
- enum cmdq_cmd_type cmd_type,
- struct hinic_cmdq_buf *buf_in,
- struct hinic_cmdq_buf *buf_out, int wrapped,
- enum hinic_cmd_ack_type ack_type,
- enum hinic_mod_type mod, u8 cmd, u16 prod_idx)
-{
- struct hinic_cmdq_wqe_lcmd *wqe_lcmd = &wqe->wqe_lcmd;
- enum completion_format complete_format;
-
- switch (cmd_type) {
- case CMDQ_CMD_SYNC_SGE_RESP:
- complete_format = COMPLETE_SGE;
- cmdq_set_sge_completion(&wqe_lcmd->completion, buf_out);
- break;
- case CMDQ_CMD_SYNC_DIRECT_RESP:
- complete_format = COMPLETE_DIRECT;
- wqe_lcmd->completion.direct_resp = 0;
- break;
- }
-
- cmdq_prepare_wqe_ctrl(wqe, wrapped, ack_type, mod, cmd,
- prod_idx, complete_format, DATA_SGE,
- BUFDESC_LCMD_LEN);
-
- cmdq_set_lcmd_bufdesc(wqe_lcmd, buf_in);
-}
-
-static void cmdq_set_direct_wqe(struct hinic_cmdq_wqe *wqe,
- enum cmdq_cmd_type cmd_type,
- void *buf_in, u16 in_size,
- struct hinic_cmdq_buf *buf_out, int wrapped,
- enum hinic_cmd_ack_type ack_type,
- enum hinic_mod_type mod, u8 cmd, u16 prod_idx)
-{
- struct hinic_cmdq_direct_wqe *direct_wqe = &wqe->direct_wqe;
- enum completion_format complete_format;
- struct hinic_cmdq_wqe_scmd *wqe_scmd;
-
- wqe_scmd = &direct_wqe->wqe_scmd;
-
- switch (cmd_type) {
- case CMDQ_CMD_SYNC_SGE_RESP:
- complete_format = COMPLETE_SGE;
- cmdq_set_sge_completion(&wqe_scmd->completion, buf_out);
- break;
- case CMDQ_CMD_SYNC_DIRECT_RESP:
- complete_format = COMPLETE_DIRECT;
- wqe_scmd->completion.direct_resp = 0;
- break;
- }
-
- cmdq_prepare_wqe_ctrl(wqe, wrapped, ack_type, mod, cmd, prod_idx,
- complete_format, DATA_DIRECT, BUFDESC_SCMD_LEN);
-
- cmdq_set_direct_wqe_data(direct_wqe, buf_in, in_size);
-}
-
-static void cmdq_wqe_fill(void *dst, void *src)
-{
- memcpy(dst + FIRST_DATA_TO_WRITE_LAST, src + FIRST_DATA_TO_WRITE_LAST,
- CMDQ_WQE_SIZE - FIRST_DATA_TO_WRITE_LAST);
-
- wmb(); /* The first 8 bytes should be written last */
-
- *(u64 *)dst = *(u64 *)src;
-}
-
-static void cmdq_fill_db(u32 *db_info,
- enum hinic_cmdq_type cmdq_type, u16 prod_idx)
-{
- *db_info = HINIC_CMDQ_DB_INFO_SET(UPPER_8_BITS(prod_idx), HI_PROD_IDX) |
- HINIC_CMDQ_DB_INFO_SET(HINIC_CTRL_PATH, PATH) |
- HINIC_CMDQ_DB_INFO_SET(cmdq_type, CMDQ_TYPE) |
- HINIC_CMDQ_DB_INFO_SET(HINIC_DB_CMDQ_TYPE, DB_TYPE);
-}
-
-static void cmdq_set_db(struct hinic_cmdq *cmdq,
- enum hinic_cmdq_type cmdq_type, u16 prod_idx)
-{
- u32 db_info;
-
- cmdq_fill_db(&db_info, cmdq_type, prod_idx);
-
- /* The data that is written to HW should be in Big Endian Format */
- db_info = cpu_to_be32(db_info);
-
- wmb(); /* write all before the doorbell */
-
- writel(db_info, CMDQ_DB_ADDR(cmdq->db_base, prod_idx));
-}
-
-static int cmdq_sync_cmd_direct_resp(struct hinic_cmdq *cmdq,
- enum hinic_mod_type mod, u8 cmd,
- struct hinic_cmdq_buf *buf_in,
- u64 *resp)
-{
- struct hinic_cmdq_wqe *curr_cmdq_wqe, cmdq_wqe;
- u16 curr_prod_idx, next_prod_idx;
- int errcode, wrapped, num_wqebbs;
- struct hinic_wq *wq = cmdq->wq;
- struct hinic_hw_wqe *hw_wqe;
- struct completion done;
-
- /* Keep doorbell index correct. bh - for tasklet(ceq). */
- spin_lock_bh(&cmdq->cmdq_lock);
-
- /* WQE_SIZE = WQEBB_SIZE, we will get the wq element and not shadow*/
- hw_wqe = hinic_get_wqe(wq, WQE_LCMD_SIZE, &curr_prod_idx);
- if (IS_ERR(hw_wqe)) {
- spin_unlock_bh(&cmdq->cmdq_lock);
- return -EBUSY;
- }
-
- curr_cmdq_wqe = &hw_wqe->cmdq_wqe;
-
- wrapped = cmdq->wrapped;
-
- num_wqebbs = ALIGN(WQE_LCMD_SIZE, wq->wqebb_size) / wq->wqebb_size;
- next_prod_idx = curr_prod_idx + num_wqebbs;
- if (next_prod_idx >= wq->q_depth) {
- cmdq->wrapped = !cmdq->wrapped;
- next_prod_idx -= wq->q_depth;
- }
-
- cmdq->errcode[curr_prod_idx] = &errcode;
-
- init_completion(&done);
- cmdq->done[curr_prod_idx] = &done;
-
- cmdq_set_lcmd_wqe(&cmdq_wqe, CMDQ_CMD_SYNC_DIRECT_RESP, buf_in, NULL,
- wrapped, HINIC_CMD_ACK_TYPE_CMDQ, mod, cmd,
- curr_prod_idx);
-
- /* The data that is written to HW should be in Big Endian Format */
- hinic_cpu_to_be32(&cmdq_wqe, WQE_LCMD_SIZE);
-
- /* CMDQ WQE is not shadow, therefore wqe will be written to wq */
- cmdq_wqe_fill(curr_cmdq_wqe, &cmdq_wqe);
-
- cmdq_set_db(cmdq, HINIC_CMDQ_SYNC, next_prod_idx);
-
- spin_unlock_bh(&cmdq->cmdq_lock);
-
- if (!wait_for_completion_timeout(&done, CMDQ_TIMEOUT)) {
- spin_lock_bh(&cmdq->cmdq_lock);
-
- if (cmdq->errcode[curr_prod_idx] == &errcode)
- cmdq->errcode[curr_prod_idx] = NULL;
-
- if (cmdq->done[curr_prod_idx] == &done)
- cmdq->done[curr_prod_idx] = NULL;
-
- spin_unlock_bh(&cmdq->cmdq_lock);
-
- return -ETIMEDOUT;
- }
-
- smp_rmb(); /* read error code after completion */
-
- if (resp) {
- struct hinic_cmdq_wqe_lcmd *wqe_lcmd = &curr_cmdq_wqe->wqe_lcmd;
-
- *resp = cpu_to_be64(wqe_lcmd->completion.direct_resp);
- }
-
- if (errcode != 0)
- return -EFAULT;
-
- return 0;
-}
-
-static int cmdq_set_arm_bit(struct hinic_cmdq *cmdq, void *buf_in,
- u16 in_size)
-{
- struct hinic_cmdq_wqe *curr_cmdq_wqe, cmdq_wqe;
- u16 curr_prod_idx, next_prod_idx;
- struct hinic_wq *wq = cmdq->wq;
- struct hinic_hw_wqe *hw_wqe;
- int wrapped, num_wqebbs;
-
- /* Keep doorbell index correct */
- spin_lock(&cmdq->cmdq_lock);
-
- /* WQE_SIZE = WQEBB_SIZE, we will get the wq element and not shadow*/
- hw_wqe = hinic_get_wqe(wq, WQE_SCMD_SIZE, &curr_prod_idx);
- if (IS_ERR(hw_wqe)) {
- spin_unlock(&cmdq->cmdq_lock);
- return -EBUSY;
- }
-
- curr_cmdq_wqe = &hw_wqe->cmdq_wqe;
-
- wrapped = cmdq->wrapped;
-
- num_wqebbs = ALIGN(WQE_SCMD_SIZE, wq->wqebb_size) / wq->wqebb_size;
- next_prod_idx = curr_prod_idx + num_wqebbs;
- if (next_prod_idx >= wq->q_depth) {
- cmdq->wrapped = !cmdq->wrapped;
- next_prod_idx -= wq->q_depth;
- }
-
- cmdq_set_direct_wqe(&cmdq_wqe, CMDQ_CMD_SYNC_DIRECT_RESP, buf_in,
- in_size, NULL, wrapped, HINIC_CMD_ACK_TYPE_CMDQ,
- HINIC_MOD_COMM, CMDQ_SET_ARM_CMD, curr_prod_idx);
-
- /* The data that is written to HW should be in Big Endian Format */
- hinic_cpu_to_be32(&cmdq_wqe, WQE_SCMD_SIZE);
-
- /* cmdq wqe is not shadow, therefore wqe will be written to wq */
- cmdq_wqe_fill(curr_cmdq_wqe, &cmdq_wqe);
-
- cmdq_set_db(cmdq, HINIC_CMDQ_SYNC, next_prod_idx);
-
- spin_unlock(&cmdq->cmdq_lock);
- return 0;
-}
-
-static int cmdq_params_valid(struct hinic_cmdq_buf *buf_in)
-{
- if (buf_in->size > HINIC_CMDQ_MAX_DATA_SIZE)
- return -EINVAL;
-
- return 0;
-}
-
-/**
- * hinic_cmdq_direct_resp - send command with direct data as resp
- * @cmdqs: the cmdqs
- * @mod: module on the card that will handle the command
- * @cmd: the command
- * @buf_in: the buffer for the command
- * @resp: the response to return
- *
- * Return 0 - Success, negative - Failure
- **/
-int hinic_cmdq_direct_resp(struct hinic_cmdqs *cmdqs,
- enum hinic_mod_type mod, u8 cmd,
- struct hinic_cmdq_buf *buf_in, u64 *resp)
-{
- struct hinic_hwif *hwif = cmdqs->hwif;
- struct pci_dev *pdev = hwif->pdev;
- int err;
-
- err = cmdq_params_valid(buf_in);
- if (err) {
- dev_err(&pdev->dev, "Invalid CMDQ parameters\n");
- return err;
- }
-
- return cmdq_sync_cmd_direct_resp(&cmdqs->cmdq[HINIC_CMDQ_SYNC],
- mod, cmd, buf_in, resp);
-}
-
-/**
- * hinic_set_arm_bit - set arm bit for enable interrupt again
- * @cmdqs: the cmdqs
- * @q_type: type of queue to set the arm bit for
- * @q_id: the queue number
- *
- * Return 0 - Success, negative - Failure
- **/
-int hinic_set_arm_bit(struct hinic_cmdqs *cmdqs,
- enum hinic_set_arm_qtype q_type, u32 q_id)
-{
- struct hinic_cmdq *cmdq = &cmdqs->cmdq[HINIC_CMDQ_SYNC];
- struct hinic_hwif *hwif = cmdqs->hwif;
- struct pci_dev *pdev = hwif->pdev;
- struct hinic_cmdq_arm_bit arm_bit;
- int err;
-
- arm_bit.q_type = q_type;
- arm_bit.q_id = q_id;
-
- err = cmdq_set_arm_bit(cmdq, &arm_bit, sizeof(arm_bit));
- if (err) {
- dev_err(&pdev->dev, "Failed to set arm for qid %d\n", q_id);
- return err;
- }
-
- return 0;
-}
-
-static void clear_wqe_complete_bit(struct hinic_cmdq *cmdq,
- struct hinic_cmdq_wqe *wqe)
-{
- u32 header_info = be32_to_cpu(CMDQ_WQE_HEADER(wqe)->header_info);
- unsigned int bufdesc_len, wqe_size;
- struct hinic_ctrl *ctrl;
-
- bufdesc_len = HINIC_CMDQ_WQE_HEADER_GET(header_info, BUFDESC_LEN);
- wqe_size = cmdq_wqe_size_from_bdlen(bufdesc_len);
- if (wqe_size == WQE_LCMD_SIZE) {
- struct hinic_cmdq_wqe_lcmd *wqe_lcmd = &wqe->wqe_lcmd;
-
- ctrl = &wqe_lcmd->ctrl;
- } else {
- struct hinic_cmdq_direct_wqe *direct_wqe = &wqe->direct_wqe;
- struct hinic_cmdq_wqe_scmd *wqe_scmd;
-
- wqe_scmd = &direct_wqe->wqe_scmd;
- ctrl = &wqe_scmd->ctrl;
- }
-
- /* clear HW busy bit */
- ctrl->ctrl_info = 0;
-
- wmb(); /* verify wqe is clear */
-}
-
-/**
- * cmdq_arm_ceq_handler - cmdq completion event handler for arm command
- * @cmdq: the cmdq of the arm command
- * @wqe: the wqe of the arm command
- *
- * Return 0 - Success, negative - Failure
- **/
-static int cmdq_arm_ceq_handler(struct hinic_cmdq *cmdq,
- struct hinic_cmdq_wqe *wqe)
-{
- struct hinic_cmdq_direct_wqe *direct_wqe = &wqe->direct_wqe;
- struct hinic_cmdq_wqe_scmd *wqe_scmd;
- struct hinic_ctrl *ctrl;
- u32 ctrl_info;
-
- wqe_scmd = &direct_wqe->wqe_scmd;
- ctrl = &wqe_scmd->ctrl;
- ctrl_info = be32_to_cpu(ctrl->ctrl_info);
-
- /* HW should toggle the HW BUSY BIT */
- if (!CMDQ_WQE_COMPLETED(ctrl_info))
- return -EBUSY;
-
- clear_wqe_complete_bit(cmdq, wqe);
-
- hinic_put_wqe(cmdq->wq, WQE_SCMD_SIZE);
- return 0;
-}
-
-static void cmdq_update_errcode(struct hinic_cmdq *cmdq, u16 prod_idx,
- int errcode)
-{
- if (cmdq->errcode[prod_idx])
- *cmdq->errcode[prod_idx] = errcode;
-}
-
-/**
- * cmdq_arm_ceq_handler - cmdq completion event handler for sync command
- * @cmdq: the cmdq of the command
- * @cons_idx: the consumer index to update the error code for
- * @errcode: the error code
- **/
-static void cmdq_sync_cmd_handler(struct hinic_cmdq *cmdq, u16 cons_idx,
- int errcode)
-{
- u16 prod_idx = cons_idx;
-
- spin_lock(&cmdq->cmdq_lock);
- cmdq_update_errcode(cmdq, prod_idx, errcode);
-
- wmb(); /* write all before update for the command request */
-
- if (cmdq->done[prod_idx])
- complete(cmdq->done[prod_idx]);
- spin_unlock(&cmdq->cmdq_lock);
-}
-
-static int cmdq_cmd_ceq_handler(struct hinic_cmdq *cmdq, u16 ci,
- struct hinic_cmdq_wqe *cmdq_wqe)
-{
- struct hinic_cmdq_wqe_lcmd *wqe_lcmd = &cmdq_wqe->wqe_lcmd;
- struct hinic_status *status = &wqe_lcmd->status;
- struct hinic_ctrl *ctrl = &wqe_lcmd->ctrl;
- int errcode;
-
- if (!CMDQ_WQE_COMPLETED(be32_to_cpu(ctrl->ctrl_info)))
- return -EBUSY;
-
- errcode = CMDQ_WQE_ERRCODE_GET(be32_to_cpu(status->status_info), VAL);
-
- cmdq_sync_cmd_handler(cmdq, ci, errcode);
-
- clear_wqe_complete_bit(cmdq, cmdq_wqe);
- hinic_put_wqe(cmdq->wq, WQE_LCMD_SIZE);
- return 0;
-}
-
-/**
- * cmdq_ceq_handler - cmdq completion event handler
- * @handle: private data for the handler(cmdqs)
- * @ceqe_data: ceq element data
- **/
-static void cmdq_ceq_handler(void *handle, u32 ceqe_data)
-{
- enum hinic_cmdq_type cmdq_type = CMDQ_CEQE_GET(ceqe_data, TYPE);
- struct hinic_cmdqs *cmdqs = (struct hinic_cmdqs *)handle;
- struct hinic_cmdq *cmdq = &cmdqs->cmdq[cmdq_type];
- struct hinic_cmdq_header *header;
- struct hinic_hw_wqe *hw_wqe;
- int err, set_arm = 0;
- u32 saved_data;
- u16 ci;
-
- /* Read the smallest wqe size for getting wqe size */
- while ((hw_wqe = hinic_read_wqe(cmdq->wq, WQE_SCMD_SIZE, &ci))) {
- if (IS_ERR(hw_wqe))
- break;
-
- header = CMDQ_WQE_HEADER(&hw_wqe->cmdq_wqe);
- saved_data = be32_to_cpu(header->saved_data);
-
- if (HINIC_SAVED_DATA_GET(saved_data, ARM)) {
- /* arm_bit was set until here */
- set_arm = 0;
-
- if (cmdq_arm_ceq_handler(cmdq, &hw_wqe->cmdq_wqe))
- break;
- } else {
- set_arm = 1;
-
- hw_wqe = hinic_read_wqe(cmdq->wq, WQE_LCMD_SIZE, &ci);
- if (IS_ERR(hw_wqe))
- break;
-
- if (cmdq_cmd_ceq_handler(cmdq, ci, &hw_wqe->cmdq_wqe))
- break;
- }
- }
-
- if (set_arm) {
- struct hinic_hwif *hwif = cmdqs->hwif;
- struct pci_dev *pdev = hwif->pdev;
-
- err = hinic_set_arm_bit(cmdqs, HINIC_SET_ARM_CMDQ, cmdq_type);
- if (err)
- dev_err(&pdev->dev, "Failed to set arm for CMDQ\n");
- }
-}
-
-/**
- * cmdq_init_queue_ctxt - init the queue ctxt of a cmdq
- * @cmdq_ctxt: cmdq ctxt to initialize
- * @cmdq: the cmdq
- * @cmdq_pages: the memory of the queue
- **/
-static void cmdq_init_queue_ctxt(struct hinic_cmdq_ctxt *cmdq_ctxt,
- struct hinic_cmdq *cmdq,
- struct hinic_cmdq_pages *cmdq_pages)
-{
- struct hinic_cmdq_ctxt_info *ctxt_info = &cmdq_ctxt->ctxt_info;
- u64 wq_first_page_paddr, cmdq_first_block_paddr, pfn;
- struct hinic_cmdqs *cmdqs = cmdq_to_cmdqs(cmdq);
- struct hinic_wq *wq = cmdq->wq;
-
- /* The data in the HW is in Big Endian Format */
- wq_first_page_paddr = be64_to_cpu(*wq->block_vaddr);
-
- pfn = CMDQ_PFN(wq_first_page_paddr, wq->wq_page_size);
-
- ctxt_info->curr_wqe_page_pfn =
- HINIC_CMDQ_CTXT_PAGE_INFO_SET(pfn, CURR_WQE_PAGE_PFN) |
- HINIC_CMDQ_CTXT_PAGE_INFO_SET(HINIC_CEQ_ID_CMDQ, EQ_ID) |
- HINIC_CMDQ_CTXT_PAGE_INFO_SET(1, CEQ_ARM) |
- HINIC_CMDQ_CTXT_PAGE_INFO_SET(1, CEQ_EN) |
- HINIC_CMDQ_CTXT_PAGE_INFO_SET(cmdq->wrapped, WRAPPED);
-
- /* block PFN - Read Modify Write */
- cmdq_first_block_paddr = cmdq_pages->page_paddr;
-
- pfn = CMDQ_PFN(cmdq_first_block_paddr, wq->wq_page_size);
-
- ctxt_info->wq_block_pfn =
- HINIC_CMDQ_CTXT_BLOCK_INFO_SET(pfn, WQ_BLOCK_PFN) |
- HINIC_CMDQ_CTXT_BLOCK_INFO_SET(atomic_read(&wq->cons_idx), CI);
-
- cmdq_ctxt->func_idx = HINIC_HWIF_FUNC_IDX(cmdqs->hwif);
- cmdq_ctxt->cmdq_type = cmdq->cmdq_type;
-}
-
-/**
- * init_cmdq - initialize cmdq
- * @cmdq: the cmdq
- * @wq: the wq attaced to the cmdq
- * @q_type: the cmdq type of the cmdq
- * @db_area: doorbell area for the cmdq
- *
- * Return 0 - Success, negative - Failure
- **/
-static int init_cmdq(struct hinic_cmdq *cmdq, struct hinic_wq *wq,
- enum hinic_cmdq_type q_type, void __iomem *db_area)
-{
- int err;
-
- cmdq->wq = wq;
- cmdq->cmdq_type = q_type;
- cmdq->wrapped = 1;
-
- spin_lock_init(&cmdq->cmdq_lock);
-
- cmdq->done = vzalloc(array_size(sizeof(*cmdq->done), wq->q_depth));
- if (!cmdq->done)
- return -ENOMEM;
-
- cmdq->errcode = vzalloc(array_size(sizeof(*cmdq->errcode),
- wq->q_depth));
- if (!cmdq->errcode) {
- err = -ENOMEM;
- goto err_errcode;
- }
-
- cmdq->db_base = db_area + CMDQ_DB_OFF;
- return 0;
-
-err_errcode:
- vfree(cmdq->done);
- return err;
-}
-
-/**
- * free_cmdq - Free cmdq
- * @cmdq: the cmdq to free
- **/
-static void free_cmdq(struct hinic_cmdq *cmdq)
-{
- vfree(cmdq->errcode);
- vfree(cmdq->done);
-}
-
-/**
- * init_cmdqs_ctxt - write the cmdq ctxt to HW after init all cmdq
- * @hwdev: the NIC HW device
- * @cmdqs: cmdqs to write the ctxts for
- * &db_area: db_area for all the cmdqs
- *
- * Return 0 - Success, negative - Failure
- **/
-static int init_cmdqs_ctxt(struct hinic_hwdev *hwdev,
- struct hinic_cmdqs *cmdqs, void __iomem **db_area)
-{
- struct hinic_hwif *hwif = hwdev->hwif;
- enum hinic_cmdq_type type, cmdq_type;
- struct hinic_cmdq_ctxt *cmdq_ctxts;
- struct pci_dev *pdev = hwif->pdev;
- struct hinic_pfhwdev *pfhwdev;
- size_t cmdq_ctxts_size;
- int err;
-
- if (!HINIC_IS_PF(hwif) && !HINIC_IS_PPF(hwif)) {
- dev_err(&pdev->dev, "Unsupported PCI function type\n");
- return -EINVAL;
- }
-
- cmdq_ctxts_size = HINIC_MAX_CMDQ_TYPES * sizeof(*cmdq_ctxts);
- cmdq_ctxts = devm_kzalloc(&pdev->dev, cmdq_ctxts_size, GFP_KERNEL);
- if (!cmdq_ctxts)
- return -ENOMEM;
-
- pfhwdev = container_of(hwdev, struct hinic_pfhwdev, hwdev);
-
- cmdq_type = HINIC_CMDQ_SYNC;
- for (; cmdq_type < HINIC_MAX_CMDQ_TYPES; cmdq_type++) {
- err = init_cmdq(&cmdqs->cmdq[cmdq_type],
- &cmdqs->saved_wqs[cmdq_type], cmdq_type,
- db_area[cmdq_type]);
- if (err) {
- dev_err(&pdev->dev, "Failed to initialize cmdq\n");
- goto err_init_cmdq;
- }
-
- cmdq_init_queue_ctxt(&cmdq_ctxts[cmdq_type],
- &cmdqs->cmdq[cmdq_type],
- &cmdqs->cmdq_pages);
- }
-
- /* Write the CMDQ ctxts */
- cmdq_type = HINIC_CMDQ_SYNC;
- for (; cmdq_type < HINIC_MAX_CMDQ_TYPES; cmdq_type++) {
- err = hinic_msg_to_mgmt(&pfhwdev->pf_to_mgmt, HINIC_MOD_COMM,
- HINIC_COMM_CMD_CMDQ_CTXT_SET,
- &cmdq_ctxts[cmdq_type],
- sizeof(cmdq_ctxts[cmdq_type]),
- NULL, NULL, HINIC_MGMT_MSG_SYNC);
- if (err) {
- dev_err(&pdev->dev, "Failed to set CMDQ CTXT type = %d\n",
- cmdq_type);
- goto err_write_cmdq_ctxt;
- }
- }
-
- devm_kfree(&pdev->dev, cmdq_ctxts);
- return 0;
-
-err_write_cmdq_ctxt:
- cmdq_type = HINIC_MAX_CMDQ_TYPES;
-
-err_init_cmdq:
- for (type = HINIC_CMDQ_SYNC; type < cmdq_type; type++)
- free_cmdq(&cmdqs->cmdq[type]);
-
- devm_kfree(&pdev->dev, cmdq_ctxts);
- return err;
-}
-
-/**
- * hinic_init_cmdqs - init all cmdqs
- * @cmdqs: cmdqs to init
- * @hwif: HW interface for accessing cmdqs
- * @db_area: doorbell areas for all the cmdqs
- *
- * Return 0 - Success, negative - Failure
- **/
-int hinic_init_cmdqs(struct hinic_cmdqs *cmdqs, struct hinic_hwif *hwif,
- void __iomem **db_area)
-{
- struct hinic_func_to_io *func_to_io = cmdqs_to_func_to_io(cmdqs);
- struct pci_dev *pdev = hwif->pdev;
- struct hinic_hwdev *hwdev;
- size_t saved_wqs_size;
- u16 max_wqe_size;
- int err;
-
- cmdqs->hwif = hwif;
- cmdqs->cmdq_buf_pool = dma_pool_create("hinic_cmdq", &pdev->dev,
- HINIC_CMDQ_BUF_SIZE,
- HINIC_CMDQ_BUF_SIZE, 0);
- if (!cmdqs->cmdq_buf_pool)
- return -ENOMEM;
-
- saved_wqs_size = HINIC_MAX_CMDQ_TYPES * sizeof(struct hinic_wq);
- cmdqs->saved_wqs = devm_kzalloc(&pdev->dev, saved_wqs_size, GFP_KERNEL);
- if (!cmdqs->saved_wqs) {
- err = -ENOMEM;
- goto err_saved_wqs;
- }
-
- max_wqe_size = WQE_LCMD_SIZE;
- err = hinic_wqs_cmdq_alloc(&cmdqs->cmdq_pages, cmdqs->saved_wqs, hwif,
- HINIC_MAX_CMDQ_TYPES, CMDQ_WQEBB_SIZE,
- CMDQ_WQ_PAGE_SIZE, CMDQ_DEPTH, max_wqe_size);
- if (err) {
- dev_err(&pdev->dev, "Failed to allocate CMDQ wqs\n");
- goto err_cmdq_wqs;
- }
-
- hwdev = container_of(func_to_io, struct hinic_hwdev, func_to_io);
- err = init_cmdqs_ctxt(hwdev, cmdqs, db_area);
- if (err) {
- dev_err(&pdev->dev, "Failed to write cmdq ctxt\n");
- goto err_cmdq_ctxt;
- }
-
- hinic_ceq_register_cb(&func_to_io->ceqs, HINIC_CEQ_CMDQ, cmdqs,
- cmdq_ceq_handler);
- return 0;
-
-err_cmdq_ctxt:
- hinic_wqs_cmdq_free(&cmdqs->cmdq_pages, cmdqs->saved_wqs,
- HINIC_MAX_CMDQ_TYPES);
-
-err_cmdq_wqs:
- devm_kfree(&pdev->dev, cmdqs->saved_wqs);
-
-err_saved_wqs:
- dma_pool_destroy(cmdqs->cmdq_buf_pool);
- return err;
-}
-
-/**
- * hinic_free_cmdqs - free all cmdqs
- * @cmdqs: cmdqs to free
- **/
-void hinic_free_cmdqs(struct hinic_cmdqs *cmdqs)
-{
- struct hinic_func_to_io *func_to_io = cmdqs_to_func_to_io(cmdqs);
- struct hinic_hwif *hwif = cmdqs->hwif;
- struct pci_dev *pdev = hwif->pdev;
- enum hinic_cmdq_type cmdq_type;
-
- hinic_ceq_unregister_cb(&func_to_io->ceqs, HINIC_CEQ_CMDQ);
-
- cmdq_type = HINIC_CMDQ_SYNC;
- for (; cmdq_type < HINIC_MAX_CMDQ_TYPES; cmdq_type++)
- free_cmdq(&cmdqs->cmdq[cmdq_type]);
-
- hinic_wqs_cmdq_free(&cmdqs->cmdq_pages, cmdqs->saved_wqs,
- HINIC_MAX_CMDQ_TYPES);
-
- devm_kfree(&pdev->dev, cmdqs->saved_wqs);
-
- dma_pool_destroy(cmdqs->cmdq_buf_pool);
-}
diff --git a/drivers/net/ethernet/huawei/hinic/hinic_hw_cmdq.h b/drivers/net/ethernet/huawei/hinic/hinic_hw_cmdq.h
deleted file mode 100644
index 23f8d39..00000000
--- a/drivers/net/ethernet/huawei/hinic/hinic_hw_cmdq.h
+++ /dev/null
@@ -1,187 +0,0 @@
-/*
- * Huawei HiNIC PCI Express Linux driver
- * Copyright(c) 2017 Huawei Technologies Co., Ltd
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms and conditions of the GNU General Public License,
- * version 2, as published by the Free Software Foundation.
- *
- * This program is distributed in the hope it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
- * for more details.
- *
- */
-
-#ifndef HINIC_CMDQ_H
-#define HINIC_CMDQ_H
-
-#include <linux/types.h>
-#include <linux/spinlock.h>
-#include <linux/completion.h>
-#include <linux/pci.h>
-
-#include "hinic_hw_if.h"
-#include "hinic_hw_wq.h"
-
-#define HINIC_CMDQ_CTXT_CURR_WQE_PAGE_PFN_SHIFT 0
-#define HINIC_CMDQ_CTXT_EQ_ID_SHIFT 56
-#define HINIC_CMDQ_CTXT_CEQ_ARM_SHIFT 61
-#define HINIC_CMDQ_CTXT_CEQ_EN_SHIFT 62
-#define HINIC_CMDQ_CTXT_WRAPPED_SHIFT 63
-
-#define HINIC_CMDQ_CTXT_CURR_WQE_PAGE_PFN_MASK 0xFFFFFFFFFFFFF
-#define HINIC_CMDQ_CTXT_EQ_ID_MASK 0x1F
-#define HINIC_CMDQ_CTXT_CEQ_ARM_MASK 0x1
-#define HINIC_CMDQ_CTXT_CEQ_EN_MASK 0x1
-#define HINIC_CMDQ_CTXT_WRAPPED_MASK 0x1
-
-#define HINIC_CMDQ_CTXT_PAGE_INFO_SET(val, member) \
- (((u64)(val) & HINIC_CMDQ_CTXT_##member##_MASK) \
- << HINIC_CMDQ_CTXT_##member##_SHIFT)
-
-#define HINIC_CMDQ_CTXT_PAGE_INFO_CLEAR(val, member) \
- ((val) & (~((u64)HINIC_CMDQ_CTXT_##member##_MASK \
- << HINIC_CMDQ_CTXT_##member##_SHIFT)))
-
-#define HINIC_CMDQ_CTXT_WQ_BLOCK_PFN_SHIFT 0
-#define HINIC_CMDQ_CTXT_CI_SHIFT 52
-
-#define HINIC_CMDQ_CTXT_WQ_BLOCK_PFN_MASK 0xFFFFFFFFFFFFF
-#define HINIC_CMDQ_CTXT_CI_MASK 0xFFF
-
-#define HINIC_CMDQ_CTXT_BLOCK_INFO_SET(val, member) \
- (((u64)(val) & HINIC_CMDQ_CTXT_##member##_MASK) \
- << HINIC_CMDQ_CTXT_##member##_SHIFT)
-
-#define HINIC_CMDQ_CTXT_BLOCK_INFO_CLEAR(val, member) \
- ((val) & (~((u64)HINIC_CMDQ_CTXT_##member##_MASK \
- << HINIC_CMDQ_CTXT_##member##_SHIFT)))
-
-#define HINIC_SAVED_DATA_ARM_SHIFT 31
-
-#define HINIC_SAVED_DATA_ARM_MASK 0x1
-
-#define HINIC_SAVED_DATA_SET(val, member) \
- (((u32)(val) & HINIC_SAVED_DATA_##member##_MASK) \
- << HINIC_SAVED_DATA_##member##_SHIFT)
-
-#define HINIC_SAVED_DATA_GET(val, member) \
- (((val) >> HINIC_SAVED_DATA_##member##_SHIFT) \
- & HINIC_SAVED_DATA_##member##_MASK)
-
-#define HINIC_SAVED_DATA_CLEAR(val, member) \
- ((val) & (~(HINIC_SAVED_DATA_##member##_MASK \
- << HINIC_SAVED_DATA_##member##_SHIFT)))
-
-#define HINIC_CMDQ_DB_INFO_HI_PROD_IDX_SHIFT 0
-#define HINIC_CMDQ_DB_INFO_PATH_SHIFT 23
-#define HINIC_CMDQ_DB_INFO_CMDQ_TYPE_SHIFT 24
-#define HINIC_CMDQ_DB_INFO_DB_TYPE_SHIFT 27
-
-#define HINIC_CMDQ_DB_INFO_HI_PROD_IDX_MASK 0xFF
-#define HINIC_CMDQ_DB_INFO_PATH_MASK 0x1
-#define HINIC_CMDQ_DB_INFO_CMDQ_TYPE_MASK 0x7
-#define HINIC_CMDQ_DB_INFO_DB_TYPE_MASK 0x1F
-
-#define HINIC_CMDQ_DB_INFO_SET(val, member) \
- (((u32)(val) & HINIC_CMDQ_DB_INFO_##member##_MASK) \
- << HINIC_CMDQ_DB_INFO_##member##_SHIFT)
-
-#define HINIC_CMDQ_BUF_SIZE 2048
-
-#define HINIC_CMDQ_BUF_HW_RSVD 8
-#define HINIC_CMDQ_MAX_DATA_SIZE (HINIC_CMDQ_BUF_SIZE - \
- HINIC_CMDQ_BUF_HW_RSVD)
-
-enum hinic_cmdq_type {
- HINIC_CMDQ_SYNC,
-
- HINIC_MAX_CMDQ_TYPES,
-};
-
-enum hinic_set_arm_qtype {
- HINIC_SET_ARM_CMDQ,
-};
-
-enum hinic_cmd_ack_type {
- HINIC_CMD_ACK_TYPE_CMDQ,
-};
-
-struct hinic_cmdq_buf {
- void *buf;
- dma_addr_t dma_addr;
- size_t size;
-};
-
-struct hinic_cmdq_arm_bit {
- u32 q_type;
- u32 q_id;
-};
-
-struct hinic_cmdq_ctxt_info {
- u64 curr_wqe_page_pfn;
- u64 wq_block_pfn;
-};
-
-struct hinic_cmdq_ctxt {
- u8 status;
- u8 version;
- u8 rsvd0[6];
-
- u16 func_idx;
- u8 cmdq_type;
- u8 rsvd1[1];
-
- u8 rsvd2[4];
-
- struct hinic_cmdq_ctxt_info ctxt_info;
-};
-
-struct hinic_cmdq {
- struct hinic_wq *wq;
-
- enum hinic_cmdq_type cmdq_type;
- int wrapped;
-
- /* Lock for keeping the doorbell order */
- spinlock_t cmdq_lock;
-
- struct completion **done;
- int **errcode;
-
- /* doorbell area */
- void __iomem *db_base;
-};
-
-struct hinic_cmdqs {
- struct hinic_hwif *hwif;
-
- struct dma_pool *cmdq_buf_pool;
-
- struct hinic_wq *saved_wqs;
-
- struct hinic_cmdq_pages cmdq_pages;
-
- struct hinic_cmdq cmdq[HINIC_MAX_CMDQ_TYPES];
-};
-
-int hinic_alloc_cmdq_buf(struct hinic_cmdqs *cmdqs,
- struct hinic_cmdq_buf *cmdq_buf);
-
-void hinic_free_cmdq_buf(struct hinic_cmdqs *cmdqs,
- struct hinic_cmdq_buf *cmdq_buf);
-
-int hinic_cmdq_direct_resp(struct hinic_cmdqs *cmdqs,
- enum hinic_mod_type mod, u8 cmd,
- struct hinic_cmdq_buf *buf_in, u64 *out_param);
-
-int hinic_set_arm_bit(struct hinic_cmdqs *cmdqs,
- enum hinic_set_arm_qtype q_type, u32 q_id);
-
-int hinic_init_cmdqs(struct hinic_cmdqs *cmdqs, struct hinic_hwif *hwif,
- void __iomem **db_area);
-
-void hinic_free_cmdqs(struct hinic_cmdqs *cmdqs);
-
-#endif
diff --git a/drivers/net/ethernet/huawei/hinic/hinic_hw_csr.h b/drivers/net/ethernet/huawei/hinic/hinic_hw_csr.h
deleted file mode 100644
index f39b184..00000000
--- a/drivers/net/ethernet/huawei/hinic/hinic_hw_csr.h
+++ /dev/null
@@ -1,149 +0,0 @@
-/*
- * Huawei HiNIC PCI Express Linux driver
- * Copyright(c) 2017 Huawei Technologies Co., Ltd
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms and conditions of the GNU General Public License,
- * version 2, as published by the Free Software Foundation.
- *
- * This program is distributed in the hope it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
- * for more details.
- *
- */
-
-#ifndef HINIC_HW_CSR_H
-#define HINIC_HW_CSR_H
-
-/* HW interface registers */
-#define HINIC_CSR_FUNC_ATTR0_ADDR 0x0
-#define HINIC_CSR_FUNC_ATTR1_ADDR 0x4
-
-#define HINIC_CSR_FUNC_ATTR4_ADDR 0x10
-#define HINIC_CSR_FUNC_ATTR5_ADDR 0x14
-
-#define HINIC_DMA_ATTR_BASE 0xC80
-#define HINIC_ELECTION_BASE 0x4200
-
-#define HINIC_DMA_ATTR_STRIDE 0x4
-#define HINIC_CSR_DMA_ATTR_ADDR(idx) \
- (HINIC_DMA_ATTR_BASE + (idx) * HINIC_DMA_ATTR_STRIDE)
-
-#define HINIC_PPF_ELECTION_STRIDE 0x4
-#define HINIC_CSR_MAX_PORTS 4
-
-#define HINIC_CSR_PPF_ELECTION_ADDR(idx) \
- (HINIC_ELECTION_BASE + (idx) * HINIC_PPF_ELECTION_STRIDE)
-
-/* API CMD registers */
-#define HINIC_CSR_API_CMD_BASE 0xF000
-
-#define HINIC_CSR_API_CMD_STRIDE 0x100
-
-#define HINIC_CSR_API_CMD_CHAIN_HEAD_HI_ADDR(idx) \
- (HINIC_CSR_API_CMD_BASE + 0x0 + (idx) * HINIC_CSR_API_CMD_STRIDE)
-
-#define HINIC_CSR_API_CMD_CHAIN_HEAD_LO_ADDR(idx) \
- (HINIC_CSR_API_CMD_BASE + 0x4 + (idx) * HINIC_CSR_API_CMD_STRIDE)
-
-#define HINIC_CSR_API_CMD_STATUS_HI_ADDR(idx) \
- (HINIC_CSR_API_CMD_BASE + 0x8 + (idx) * HINIC_CSR_API_CMD_STRIDE)
-
-#define HINIC_CSR_API_CMD_STATUS_LO_ADDR(idx) \
- (HINIC_CSR_API_CMD_BASE + 0xC + (idx) * HINIC_CSR_API_CMD_STRIDE)
-
-#define HINIC_CSR_API_CMD_CHAIN_NUM_CELLS_ADDR(idx) \
- (HINIC_CSR_API_CMD_BASE + 0x10 + (idx) * HINIC_CSR_API_CMD_STRIDE)
-
-#define HINIC_CSR_API_CMD_CHAIN_CTRL_ADDR(idx) \
- (HINIC_CSR_API_CMD_BASE + 0x14 + (idx) * HINIC_CSR_API_CMD_STRIDE)
-
-#define HINIC_CSR_API_CMD_CHAIN_PI_ADDR(idx) \
- (HINIC_CSR_API_CMD_BASE + 0x1C + (idx) * HINIC_CSR_API_CMD_STRIDE)
-
-#define HINIC_CSR_API_CMD_CHAIN_REQ_ADDR(idx) \
- (HINIC_CSR_API_CMD_BASE + 0x20 + (idx) * HINIC_CSR_API_CMD_STRIDE)
-
-#define HINIC_CSR_API_CMD_STATUS_ADDR(idx) \
- (HINIC_CSR_API_CMD_BASE + 0x30 + (idx) * HINIC_CSR_API_CMD_STRIDE)
-
-/* MSI-X registers */
-#define HINIC_CSR_MSIX_CTRL_BASE 0x2000
-#define HINIC_CSR_MSIX_CNT_BASE 0x2004
-
-#define HINIC_CSR_MSIX_STRIDE 0x8
-
-#define HINIC_CSR_MSIX_CTRL_ADDR(idx) \
- (HINIC_CSR_MSIX_CTRL_BASE + (idx) * HINIC_CSR_MSIX_STRIDE)
-
-#define HINIC_CSR_MSIX_CNT_ADDR(idx) \
- (HINIC_CSR_MSIX_CNT_BASE + (idx) * HINIC_CSR_MSIX_STRIDE)
-
-/* EQ registers */
-#define HINIC_AEQ_MTT_OFF_BASE_ADDR 0x200
-#define HINIC_CEQ_MTT_OFF_BASE_ADDR 0x400
-
-#define HINIC_EQ_MTT_OFF_STRIDE 0x40
-
-#define HINIC_CSR_AEQ_MTT_OFF(id) \
- (HINIC_AEQ_MTT_OFF_BASE_ADDR + (id) * HINIC_EQ_MTT_OFF_STRIDE)
-
-#define HINIC_CSR_CEQ_MTT_OFF(id) \
- (HINIC_CEQ_MTT_OFF_BASE_ADDR + (id) * HINIC_EQ_MTT_OFF_STRIDE)
-
-#define HINIC_CSR_EQ_PAGE_OFF_STRIDE 8
-
-#define HINIC_CSR_AEQ_HI_PHYS_ADDR_REG(q_id, pg_num) \
- (HINIC_CSR_AEQ_MTT_OFF(q_id) + \
- (pg_num) * HINIC_CSR_EQ_PAGE_OFF_STRIDE)
-
-#define HINIC_CSR_CEQ_HI_PHYS_ADDR_REG(q_id, pg_num) \
- (HINIC_CSR_CEQ_MTT_OFF(q_id) + \
- (pg_num) * HINIC_CSR_EQ_PAGE_OFF_STRIDE)
-
-#define HINIC_CSR_AEQ_LO_PHYS_ADDR_REG(q_id, pg_num) \
- (HINIC_CSR_AEQ_MTT_OFF(q_id) + \
- (pg_num) * HINIC_CSR_EQ_PAGE_OFF_STRIDE + 4)
-
-#define HINIC_CSR_CEQ_LO_PHYS_ADDR_REG(q_id, pg_num) \
- (HINIC_CSR_CEQ_MTT_OFF(q_id) + \
- (pg_num) * HINIC_CSR_EQ_PAGE_OFF_STRIDE + 4)
-
-#define HINIC_AEQ_CTRL_0_ADDR_BASE 0xE00
-#define HINIC_AEQ_CTRL_1_ADDR_BASE 0xE04
-#define HINIC_AEQ_CONS_IDX_ADDR_BASE 0xE08
-#define HINIC_AEQ_PROD_IDX_ADDR_BASE 0xE0C
-
-#define HINIC_CEQ_CTRL_0_ADDR_BASE 0x1000
-#define HINIC_CEQ_CTRL_1_ADDR_BASE 0x1004
-#define HINIC_CEQ_CONS_IDX_ADDR_BASE 0x1008
-#define HINIC_CEQ_PROD_IDX_ADDR_BASE 0x100C
-
-#define HINIC_EQ_OFF_STRIDE 0x80
-
-#define HINIC_CSR_AEQ_CTRL_0_ADDR(idx) \
- (HINIC_AEQ_CTRL_0_ADDR_BASE + (idx) * HINIC_EQ_OFF_STRIDE)
-
-#define HINIC_CSR_AEQ_CTRL_1_ADDR(idx) \
- (HINIC_AEQ_CTRL_1_ADDR_BASE + (idx) * HINIC_EQ_OFF_STRIDE)
-
-#define HINIC_CSR_AEQ_CONS_IDX_ADDR(idx) \
- (HINIC_AEQ_CONS_IDX_ADDR_BASE + (idx) * HINIC_EQ_OFF_STRIDE)
-
-#define HINIC_CSR_AEQ_PROD_IDX_ADDR(idx) \
- (HINIC_AEQ_PROD_IDX_ADDR_BASE + (idx) * HINIC_EQ_OFF_STRIDE)
-
-#define HINIC_CSR_CEQ_CTRL_0_ADDR(idx) \
- (HINIC_CEQ_CTRL_0_ADDR_BASE + (idx) * HINIC_EQ_OFF_STRIDE)
-
-#define HINIC_CSR_CEQ_CTRL_1_ADDR(idx) \
- (HINIC_CEQ_CTRL_1_ADDR_BASE + (idx) * HINIC_EQ_OFF_STRIDE)
-
-#define HINIC_CSR_CEQ_CONS_IDX_ADDR(idx) \
- (HINIC_CEQ_CONS_IDX_ADDR_BASE + (idx) * HINIC_EQ_OFF_STRIDE)
-
-#define HINIC_CSR_CEQ_PROD_IDX_ADDR(idx) \
- (HINIC_CEQ_PROD_IDX_ADDR_BASE + (idx) * HINIC_EQ_OFF_STRIDE)
-
-#endif
diff --git a/drivers/net/ethernet/huawei/hinic/hinic_hw_dev.c b/drivers/net/ethernet/huawei/hinic/hinic_hw_dev.c
deleted file mode 100644
index 6b19607..00000000
--- a/drivers/net/ethernet/huawei/hinic/hinic_hw_dev.c
+++ /dev/null
@@ -1,1010 +0,0 @@
-/*
- * Huawei HiNIC PCI Express Linux driver
- * Copyright(c) 2017 Huawei Technologies Co., Ltd
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms and conditions of the GNU General Public License,
- * version 2, as published by the Free Software Foundation.
- *
- * This program is distributed in the hope it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
- * for more details.
- *
- */
-
-#include <linux/kernel.h>
-#include <linux/types.h>
-#include <linux/pci.h>
-#include <linux/device.h>
-#include <linux/errno.h>
-#include <linux/slab.h>
-#include <linux/bitops.h>
-#include <linux/delay.h>
-#include <linux/jiffies.h>
-#include <linux/log2.h>
-#include <linux/err.h>
-
-#include "hinic_hw_if.h"
-#include "hinic_hw_eqs.h"
-#include "hinic_hw_mgmt.h"
-#include "hinic_hw_qp_ctxt.h"
-#include "hinic_hw_qp.h"
-#include "hinic_hw_io.h"
-#include "hinic_hw_dev.h"
-
-#define IO_STATUS_TIMEOUT 100
-#define OUTBOUND_STATE_TIMEOUT 100
-#define DB_STATE_TIMEOUT 100
-
-#define MAX_IRQS(max_qps, num_aeqs, num_ceqs) \
- (2 * (max_qps) + (num_aeqs) + (num_ceqs))
-
-#define ADDR_IN_4BYTES(addr) ((addr) >> 2)
-
-enum intr_type {
- INTR_MSIX_TYPE,
-};
-
-enum io_status {
- IO_STOPPED = 0,
- IO_RUNNING = 1,
-};
-
-enum hw_ioctxt_set_cmdq_depth {
- HW_IOCTXT_SET_CMDQ_DEPTH_DEFAULT,
-};
-
-/* HW struct */
-struct hinic_dev_cap {
- u8 status;
- u8 version;
- u8 rsvd0[6];
-
- u8 rsvd1[5];
- u8 intr_type;
- u8 rsvd2[66];
- u16 max_sqs;
- u16 max_rqs;
- u8 rsvd3[208];
-};
-
-/**
- * get_capability - convert device capabilities to NIC capabilities
- * @hwdev: the HW device to set and convert device capabilities for
- * @dev_cap: device capabilities from FW
- *
- * Return 0 - Success, negative - Failure
- **/
-static int get_capability(struct hinic_hwdev *hwdev,
- struct hinic_dev_cap *dev_cap)
-{
- struct hinic_cap *nic_cap = &hwdev->nic_cap;
- int num_aeqs, num_ceqs, num_irqs;
-
- if (!HINIC_IS_PF(hwdev->hwif) && !HINIC_IS_PPF(hwdev->hwif))
- return -EINVAL;
-
- if (dev_cap->intr_type != INTR_MSIX_TYPE)
- return -EFAULT;
-
- num_aeqs = HINIC_HWIF_NUM_AEQS(hwdev->hwif);
- num_ceqs = HINIC_HWIF_NUM_CEQS(hwdev->hwif);
- num_irqs = HINIC_HWIF_NUM_IRQS(hwdev->hwif);
-
- /* Each QP has its own (SQ + RQ) interrupts */
- nic_cap->num_qps = (num_irqs - (num_aeqs + num_ceqs)) / 2;
-
- if (nic_cap->num_qps > HINIC_Q_CTXT_MAX)
- nic_cap->num_qps = HINIC_Q_CTXT_MAX;
-
- /* num_qps must be power of 2 */
- nic_cap->num_qps = BIT(fls(nic_cap->num_qps) - 1);
-
- nic_cap->max_qps = dev_cap->max_sqs + 1;
- if (nic_cap->max_qps != (dev_cap->max_rqs + 1))
- return -EFAULT;
-
- if (nic_cap->num_qps > nic_cap->max_qps)
- nic_cap->num_qps = nic_cap->max_qps;
-
- return 0;
-}
-
-/**
- * get_cap_from_fw - get device capabilities from FW
- * @pfhwdev: the PF HW device to get capabilities for
- *
- * Return 0 - Success, negative - Failure
- **/
-static int get_cap_from_fw(struct hinic_pfhwdev *pfhwdev)
-{
- struct hinic_hwdev *hwdev = &pfhwdev->hwdev;
- struct hinic_hwif *hwif = hwdev->hwif;
- struct pci_dev *pdev = hwif->pdev;
- struct hinic_dev_cap dev_cap;
- u16 in_len, out_len;
- int err;
-
- in_len = 0;
- out_len = sizeof(dev_cap);
-
- err = hinic_msg_to_mgmt(&pfhwdev->pf_to_mgmt, HINIC_MOD_CFGM,
- HINIC_CFG_NIC_CAP, &dev_cap, in_len, &dev_cap,
- &out_len, HINIC_MGMT_MSG_SYNC);
- if (err) {
- dev_err(&pdev->dev, "Failed to get capability from FW\n");
- return err;
- }
-
- return get_capability(hwdev, &dev_cap);
-}
-
-/**
- * get_dev_cap - get device capabilities
- * @hwdev: the NIC HW device to get capabilities for
- *
- * Return 0 - Success, negative - Failure
- **/
-static int get_dev_cap(struct hinic_hwdev *hwdev)
-{
- struct hinic_hwif *hwif = hwdev->hwif;
- struct pci_dev *pdev = hwif->pdev;
- struct hinic_pfhwdev *pfhwdev;
- int err;
-
- switch (HINIC_FUNC_TYPE(hwif)) {
- case HINIC_PPF:
- case HINIC_PF:
- pfhwdev = container_of(hwdev, struct hinic_pfhwdev, hwdev);
-
- err = get_cap_from_fw(pfhwdev);
- if (err) {
- dev_err(&pdev->dev, "Failed to get capability from FW\n");
- return err;
- }
- break;
-
- default:
- dev_err(&pdev->dev, "Unsupported PCI Function type\n");
- return -EINVAL;
- }
-
- return 0;
-}
-
-/**
- * init_msix - enable the msix and save the entries
- * @hwdev: the NIC HW device
- *
- * Return 0 - Success, negative - Failure
- **/
-static int init_msix(struct hinic_hwdev *hwdev)
-{
- struct hinic_hwif *hwif = hwdev->hwif;
- struct pci_dev *pdev = hwif->pdev;
- int nr_irqs, num_aeqs, num_ceqs;
- size_t msix_entries_size;
- int i, err;
-
- num_aeqs = HINIC_HWIF_NUM_AEQS(hwif);
- num_ceqs = HINIC_HWIF_NUM_CEQS(hwif);
- nr_irqs = MAX_IRQS(HINIC_MAX_QPS, num_aeqs, num_ceqs);
- if (nr_irqs > HINIC_HWIF_NUM_IRQS(hwif))
- nr_irqs = HINIC_HWIF_NUM_IRQS(hwif);
-
- msix_entries_size = nr_irqs * sizeof(*hwdev->msix_entries);
- hwdev->msix_entries = devm_kzalloc(&pdev->dev, msix_entries_size,
- GFP_KERNEL);
- if (!hwdev->msix_entries)
- return -ENOMEM;
-
- for (i = 0; i < nr_irqs; i++)
- hwdev->msix_entries[i].entry = i;
-
- err = pci_enable_msix_exact(pdev, hwdev->msix_entries, nr_irqs);
- if (err) {
- dev_err(&pdev->dev, "Failed to enable pci msix\n");
- return err;
- }
-
- return 0;
-}
-
-/**
- * disable_msix - disable the msix
- * @hwdev: the NIC HW device
- **/
-static void disable_msix(struct hinic_hwdev *hwdev)
-{
- struct hinic_hwif *hwif = hwdev->hwif;
- struct pci_dev *pdev = hwif->pdev;
-
- pci_disable_msix(pdev);
-}
-
-/**
- * hinic_port_msg_cmd - send port msg to mgmt
- * @hwdev: the NIC HW device
- * @cmd: the port command
- * @buf_in: input buffer
- * @in_size: input size
- * @buf_out: output buffer
- * @out_size: returned output size
- *
- * Return 0 - Success, negative - Failure
- **/
-int hinic_port_msg_cmd(struct hinic_hwdev *hwdev, enum hinic_port_cmd cmd,
- void *buf_in, u16 in_size, void *buf_out, u16 *out_size)
-{
- struct hinic_hwif *hwif = hwdev->hwif;
- struct pci_dev *pdev = hwif->pdev;
- struct hinic_pfhwdev *pfhwdev;
-
- if (!HINIC_IS_PF(hwif) && !HINIC_IS_PPF(hwif)) {
- dev_err(&pdev->dev, "unsupported PCI Function type\n");
- return -EINVAL;
- }
-
- pfhwdev = container_of(hwdev, struct hinic_pfhwdev, hwdev);
-
- return hinic_msg_to_mgmt(&pfhwdev->pf_to_mgmt, HINIC_MOD_L2NIC, cmd,
- buf_in, in_size, buf_out, out_size,
- HINIC_MGMT_MSG_SYNC);
-}
-
-/**
- * init_fw_ctxt- Init Firmware tables before network mgmt and io operations
- * @hwdev: the NIC HW device
- *
- * Return 0 - Success, negative - Failure
- **/
-static int init_fw_ctxt(struct hinic_hwdev *hwdev)
-{
- struct hinic_hwif *hwif = hwdev->hwif;
- struct pci_dev *pdev = hwif->pdev;
- struct hinic_cmd_fw_ctxt fw_ctxt;
- u16 out_size;
- int err;
-
- if (!HINIC_IS_PF(hwif) && !HINIC_IS_PPF(hwif)) {
- dev_err(&pdev->dev, "Unsupported PCI Function type\n");
- return -EINVAL;
- }
-
- fw_ctxt.func_idx = HINIC_HWIF_FUNC_IDX(hwif);
- fw_ctxt.rx_buf_sz = HINIC_RX_BUF_SZ;
-
- err = hinic_port_msg_cmd(hwdev, HINIC_PORT_CMD_FWCTXT_INIT,
- &fw_ctxt, sizeof(fw_ctxt),
- &fw_ctxt, &out_size);
- if (err || (out_size != sizeof(fw_ctxt)) || fw_ctxt.status) {
- dev_err(&pdev->dev, "Failed to init FW ctxt, ret = %d\n",
- fw_ctxt.status);
- return -EFAULT;
- }
-
- return 0;
-}
-
-/**
- * set_hw_ioctxt - set the shape of the IO queues in FW
- * @hwdev: the NIC HW device
- * @rq_depth: rq depth
- * @sq_depth: sq depth
- *
- * Return 0 - Success, negative - Failure
- **/
-static int set_hw_ioctxt(struct hinic_hwdev *hwdev, unsigned int rq_depth,
- unsigned int sq_depth)
-{
- struct hinic_hwif *hwif = hwdev->hwif;
- struct hinic_cmd_hw_ioctxt hw_ioctxt;
- struct pci_dev *pdev = hwif->pdev;
- struct hinic_pfhwdev *pfhwdev;
-
- if (!HINIC_IS_PF(hwif) && !HINIC_IS_PPF(hwif)) {
- dev_err(&pdev->dev, "Unsupported PCI Function type\n");
- return -EINVAL;
- }
-
- hw_ioctxt.func_idx = HINIC_HWIF_FUNC_IDX(hwif);
-
- hw_ioctxt.set_cmdq_depth = HW_IOCTXT_SET_CMDQ_DEPTH_DEFAULT;
- hw_ioctxt.cmdq_depth = 0;
-
- hw_ioctxt.rq_depth = ilog2(rq_depth);
-
- hw_ioctxt.rx_buf_sz_idx = HINIC_RX_BUF_SZ_IDX;
-
- hw_ioctxt.sq_depth = ilog2(sq_depth);
-
- pfhwdev = container_of(hwdev, struct hinic_pfhwdev, hwdev);
-
- return hinic_msg_to_mgmt(&pfhwdev->pf_to_mgmt, HINIC_MOD_COMM,
- HINIC_COMM_CMD_HWCTXT_SET,
- &hw_ioctxt, sizeof(hw_ioctxt), NULL,
- NULL, HINIC_MGMT_MSG_SYNC);
-}
-
-static int wait_for_outbound_state(struct hinic_hwdev *hwdev)
-{
- enum hinic_outbound_state outbound_state;
- struct hinic_hwif *hwif = hwdev->hwif;
- struct pci_dev *pdev = hwif->pdev;
- unsigned long end;
-
- end = jiffies + msecs_to_jiffies(OUTBOUND_STATE_TIMEOUT);
- do {
- outbound_state = hinic_outbound_state_get(hwif);
-
- if (outbound_state == HINIC_OUTBOUND_ENABLE)
- return 0;
-
- msleep(20);
- } while (time_before(jiffies, end));
-
- dev_err(&pdev->dev, "Wait for OUTBOUND - Timeout\n");
- return -EFAULT;
-}
-
-static int wait_for_db_state(struct hinic_hwdev *hwdev)
-{
- struct hinic_hwif *hwif = hwdev->hwif;
- struct pci_dev *pdev = hwif->pdev;
- enum hinic_db_state db_state;
- unsigned long end;
-
- end = jiffies + msecs_to_jiffies(DB_STATE_TIMEOUT);
- do {
- db_state = hinic_db_state_get(hwif);
-
- if (db_state == HINIC_DB_ENABLE)
- return 0;
-
- msleep(20);
- } while (time_before(jiffies, end));
-
- dev_err(&pdev->dev, "Wait for DB - Timeout\n");
- return -EFAULT;
-}
-
-static int wait_for_io_stopped(struct hinic_hwdev *hwdev)
-{
- struct hinic_cmd_io_status cmd_io_status;
- struct hinic_hwif *hwif = hwdev->hwif;
- struct pci_dev *pdev = hwif->pdev;
- struct hinic_pfhwdev *pfhwdev;
- unsigned long end;
- u16 out_size;
- int err;
-
- if (!HINIC_IS_PF(hwif) && !HINIC_IS_PPF(hwif)) {
- dev_err(&pdev->dev, "Unsupported PCI Function type\n");
- return -EINVAL;
- }
-
- pfhwdev = container_of(hwdev, struct hinic_pfhwdev, hwdev);
-
- cmd_io_status.func_idx = HINIC_HWIF_FUNC_IDX(hwif);
-
- end = jiffies + msecs_to_jiffies(IO_STATUS_TIMEOUT);
- do {
- err = hinic_msg_to_mgmt(&pfhwdev->pf_to_mgmt, HINIC_MOD_COMM,
- HINIC_COMM_CMD_IO_STATUS_GET,
- &cmd_io_status, sizeof(cmd_io_status),
- &cmd_io_status, &out_size,
- HINIC_MGMT_MSG_SYNC);
- if ((err) || (out_size != sizeof(cmd_io_status))) {
- dev_err(&pdev->dev, "Failed to get IO status, ret = %d\n",
- err);
- return err;
- }
-
- if (cmd_io_status.status == IO_STOPPED) {
- dev_info(&pdev->dev, "IO stopped\n");
- return 0;
- }
-
- msleep(20);
- } while (time_before(jiffies, end));
-
- dev_err(&pdev->dev, "Wait for IO stopped - Timeout\n");
- return -ETIMEDOUT;
-}
-
-/**
- * clear_io_resource - set the IO resources as not active in the NIC
- * @hwdev: the NIC HW device
- *
- * Return 0 - Success, negative - Failure
- **/
-static int clear_io_resources(struct hinic_hwdev *hwdev)
-{
- struct hinic_cmd_clear_io_res cmd_clear_io_res;
- struct hinic_hwif *hwif = hwdev->hwif;
- struct pci_dev *pdev = hwif->pdev;
- struct hinic_pfhwdev *pfhwdev;
- int err;
-
- if (!HINIC_IS_PF(hwif) && !HINIC_IS_PPF(hwif)) {
- dev_err(&pdev->dev, "Unsupported PCI Function type\n");
- return -EINVAL;
- }
-
- err = wait_for_io_stopped(hwdev);
- if (err) {
- dev_err(&pdev->dev, "IO has not stopped yet\n");
- return err;
- }
-
- cmd_clear_io_res.func_idx = HINIC_HWIF_FUNC_IDX(hwif);
-
- pfhwdev = container_of(hwdev, struct hinic_pfhwdev, hwdev);
-
- err = hinic_msg_to_mgmt(&pfhwdev->pf_to_mgmt, HINIC_MOD_COMM,
- HINIC_COMM_CMD_IO_RES_CLEAR, &cmd_clear_io_res,
- sizeof(cmd_clear_io_res), NULL, NULL,
- HINIC_MGMT_MSG_SYNC);
- if (err) {
- dev_err(&pdev->dev, "Failed to clear IO resources\n");
- return err;
- }
-
- return 0;
-}
-
-/**
- * set_resources_state - set the state of the resources in the NIC
- * @hwdev: the NIC HW device
- * @state: the state to set
- *
- * Return 0 - Success, negative - Failure
- **/
-static int set_resources_state(struct hinic_hwdev *hwdev,
- enum hinic_res_state state)
-{
- struct hinic_cmd_set_res_state res_state;
- struct hinic_hwif *hwif = hwdev->hwif;
- struct pci_dev *pdev = hwif->pdev;
- struct hinic_pfhwdev *pfhwdev;
-
- if (!HINIC_IS_PF(hwif) && !HINIC_IS_PPF(hwif)) {
- dev_err(&pdev->dev, "Unsupported PCI Function type\n");
- return -EINVAL;
- }
-
- res_state.func_idx = HINIC_HWIF_FUNC_IDX(hwif);
- res_state.state = state;
-
- pfhwdev = container_of(hwdev, struct hinic_pfhwdev, hwdev);
-
- return hinic_msg_to_mgmt(&pfhwdev->pf_to_mgmt,
- HINIC_MOD_COMM,
- HINIC_COMM_CMD_RES_STATE_SET,
- &res_state, sizeof(res_state), NULL,
- NULL, HINIC_MGMT_MSG_SYNC);
-}
-
-/**
- * get_base_qpn - get the first qp number
- * @hwdev: the NIC HW device
- * @base_qpn: returned qp number
- *
- * Return 0 - Success, negative - Failure
- **/
-static int get_base_qpn(struct hinic_hwdev *hwdev, u16 *base_qpn)
-{
- struct hinic_cmd_base_qpn cmd_base_qpn;
- struct hinic_hwif *hwif = hwdev->hwif;
- struct pci_dev *pdev = hwif->pdev;
- u16 out_size;
- int err;
-
- cmd_base_qpn.func_idx = HINIC_HWIF_FUNC_IDX(hwif);
-
- err = hinic_port_msg_cmd(hwdev, HINIC_PORT_CMD_GET_GLOBAL_QPN,
- &cmd_base_qpn, sizeof(cmd_base_qpn),
- &cmd_base_qpn, &out_size);
- if (err || (out_size != sizeof(cmd_base_qpn)) || cmd_base_qpn.status) {
- dev_err(&pdev->dev, "Failed to get base qpn, status = %d\n",
- cmd_base_qpn.status);
- return -EFAULT;
- }
-
- *base_qpn = cmd_base_qpn.qpn;
- return 0;
-}
-
-/**
- * hinic_hwdev_ifup - Preparing the HW for passing IO
- * @hwdev: the NIC HW device
- *
- * Return 0 - Success, negative - Failure
- **/
-int hinic_hwdev_ifup(struct hinic_hwdev *hwdev)
-{
- struct hinic_func_to_io *func_to_io = &hwdev->func_to_io;
- struct hinic_cap *nic_cap = &hwdev->nic_cap;
- struct hinic_hwif *hwif = hwdev->hwif;
- int err, num_aeqs, num_ceqs, num_qps;
- struct msix_entry *ceq_msix_entries;
- struct msix_entry *sq_msix_entries;
- struct msix_entry *rq_msix_entries;
- struct pci_dev *pdev = hwif->pdev;
- u16 base_qpn;
-
- err = get_base_qpn(hwdev, &base_qpn);
- if (err) {
- dev_err(&pdev->dev, "Failed to get global base qp number\n");
- return err;
- }
-
- num_aeqs = HINIC_HWIF_NUM_AEQS(hwif);
- num_ceqs = HINIC_HWIF_NUM_CEQS(hwif);
-
- ceq_msix_entries = &hwdev->msix_entries[num_aeqs];
-
- err = hinic_io_init(func_to_io, hwif, nic_cap->max_qps, num_ceqs,
- ceq_msix_entries);
- if (err) {
- dev_err(&pdev->dev, "Failed to init IO channel\n");
- return err;
- }
-
- num_qps = nic_cap->num_qps;
- sq_msix_entries = &hwdev->msix_entries[num_aeqs + num_ceqs];
- rq_msix_entries = &hwdev->msix_entries[num_aeqs + num_ceqs + num_qps];
-
- err = hinic_io_create_qps(func_to_io, base_qpn, num_qps,
- sq_msix_entries, rq_msix_entries);
- if (err) {
- dev_err(&pdev->dev, "Failed to create QPs\n");
- goto err_create_qps;
- }
-
- err = wait_for_db_state(hwdev);
- if (err) {
- dev_warn(&pdev->dev, "db - disabled, try again\n");
- hinic_db_state_set(hwif, HINIC_DB_ENABLE);
- }
-
- err = set_hw_ioctxt(hwdev, HINIC_SQ_DEPTH, HINIC_RQ_DEPTH);
- if (err) {
- dev_err(&pdev->dev, "Failed to set HW IO ctxt\n");
- goto err_hw_ioctxt;
- }
-
- return 0;
-
-err_hw_ioctxt:
- hinic_io_destroy_qps(func_to_io, num_qps);
-
-err_create_qps:
- hinic_io_free(func_to_io);
- return err;
-}
-
-/**
- * hinic_hwdev_ifdown - Closing the HW for passing IO
- * @hwdev: the NIC HW device
- *
- **/
-void hinic_hwdev_ifdown(struct hinic_hwdev *hwdev)
-{
- struct hinic_func_to_io *func_to_io = &hwdev->func_to_io;
- struct hinic_cap *nic_cap = &hwdev->nic_cap;
-
- clear_io_resources(hwdev);
-
- hinic_io_destroy_qps(func_to_io, nic_cap->num_qps);
- hinic_io_free(func_to_io);
-}
-
-/**
- * hinic_hwdev_cb_register - register callback handler for MGMT events
- * @hwdev: the NIC HW device
- * @cmd: the mgmt event
- * @handle: private data for the handler
- * @handler: event handler
- **/
-void hinic_hwdev_cb_register(struct hinic_hwdev *hwdev,
- enum hinic_mgmt_msg_cmd cmd, void *handle,
- void (*handler)(void *handle, void *buf_in,
- u16 in_size, void *buf_out,
- u16 *out_size))
-{
- struct hinic_hwif *hwif = hwdev->hwif;
- struct pci_dev *pdev = hwif->pdev;
- struct hinic_pfhwdev *pfhwdev;
- struct hinic_nic_cb *nic_cb;
- u8 cmd_cb;
-
- if (!HINIC_IS_PF(hwif) && !HINIC_IS_PPF(hwif)) {
- dev_err(&pdev->dev, "unsupported PCI Function type\n");
- return;
- }
-
- pfhwdev = container_of(hwdev, struct hinic_pfhwdev, hwdev);
-
- cmd_cb = cmd - HINIC_MGMT_MSG_CMD_BASE;
- nic_cb = &pfhwdev->nic_cb[cmd_cb];
-
- nic_cb->handler = handler;
- nic_cb->handle = handle;
- nic_cb->cb_state = HINIC_CB_ENABLED;
-}
-
-/**
- * hinic_hwdev_cb_unregister - unregister callback handler for MGMT events
- * @hwdev: the NIC HW device
- * @cmd: the mgmt event
- **/
-void hinic_hwdev_cb_unregister(struct hinic_hwdev *hwdev,
- enum hinic_mgmt_msg_cmd cmd)
-{
- struct hinic_hwif *hwif = hwdev->hwif;
- struct pci_dev *pdev = hwif->pdev;
- struct hinic_pfhwdev *pfhwdev;
- struct hinic_nic_cb *nic_cb;
- u8 cmd_cb;
-
- if (!HINIC_IS_PF(hwif) && !HINIC_IS_PPF(hwif)) {
- dev_err(&pdev->dev, "unsupported PCI Function type\n");
- return;
- }
-
- pfhwdev = container_of(hwdev, struct hinic_pfhwdev, hwdev);
-
- cmd_cb = cmd - HINIC_MGMT_MSG_CMD_BASE;
- nic_cb = &pfhwdev->nic_cb[cmd_cb];
-
- nic_cb->cb_state &= ~HINIC_CB_ENABLED;
-
- while (nic_cb->cb_state & HINIC_CB_RUNNING)
- schedule();
-
- nic_cb->handler = NULL;
-}
-
-/**
- * nic_mgmt_msg_handler - nic mgmt event handler
- * @handle: private data for the handler
- * @buf_in: input buffer
- * @in_size: input size
- * @buf_out: output buffer
- * @out_size: returned output size
- **/
-static void nic_mgmt_msg_handler(void *handle, u8 cmd, void *buf_in,
- u16 in_size, void *buf_out, u16 *out_size)
-{
- struct hinic_pfhwdev *pfhwdev = handle;
- enum hinic_cb_state cb_state;
- struct hinic_nic_cb *nic_cb;
- struct hinic_hwdev *hwdev;
- struct hinic_hwif *hwif;
- struct pci_dev *pdev;
- u8 cmd_cb;
-
- hwdev = &pfhwdev->hwdev;
- hwif = hwdev->hwif;
- pdev = hwif->pdev;
-
- if ((cmd < HINIC_MGMT_MSG_CMD_BASE) ||
- (cmd >= HINIC_MGMT_MSG_CMD_MAX)) {
- dev_err(&pdev->dev, "unknown L2NIC event, cmd = %d\n", cmd);
- return;
- }
-
- cmd_cb = cmd - HINIC_MGMT_MSG_CMD_BASE;
-
- nic_cb = &pfhwdev->nic_cb[cmd_cb];
-
- cb_state = cmpxchg(&nic_cb->cb_state,
- HINIC_CB_ENABLED,
- HINIC_CB_ENABLED | HINIC_CB_RUNNING);
-
- if ((cb_state == HINIC_CB_ENABLED) && (nic_cb->handler))
- nic_cb->handler(nic_cb->handle, buf_in,
- in_size, buf_out, out_size);
- else
- dev_err(&pdev->dev, "Unhandled NIC Event %d\n", cmd);
-
- nic_cb->cb_state &= ~HINIC_CB_RUNNING;
-}
-
-/**
- * init_pfhwdev - Initialize the extended components of PF
- * @pfhwdev: the HW device for PF
- *
- * Return 0 - success, negative - failure
- **/
-static int init_pfhwdev(struct hinic_pfhwdev *pfhwdev)
-{
- struct hinic_hwdev *hwdev = &pfhwdev->hwdev;
- struct hinic_hwif *hwif = hwdev->hwif;
- struct pci_dev *pdev = hwif->pdev;
- int err;
-
- err = hinic_pf_to_mgmt_init(&pfhwdev->pf_to_mgmt, hwif);
- if (err) {
- dev_err(&pdev->dev, "Failed to initialize PF to MGMT channel\n");
- return err;
- }
-
- hinic_register_mgmt_msg_cb(&pfhwdev->pf_to_mgmt, HINIC_MOD_L2NIC,
- pfhwdev, nic_mgmt_msg_handler);
-
- hinic_set_pf_action(hwif, HINIC_PF_MGMT_ACTIVE);
- return 0;
-}
-
-/**
- * free_pfhwdev - Free the extended components of PF
- * @pfhwdev: the HW device for PF
- **/
-static void free_pfhwdev(struct hinic_pfhwdev *pfhwdev)
-{
- struct hinic_hwdev *hwdev = &pfhwdev->hwdev;
-
- hinic_set_pf_action(hwdev->hwif, HINIC_PF_MGMT_INIT);
-
- hinic_unregister_mgmt_msg_cb(&pfhwdev->pf_to_mgmt, HINIC_MOD_L2NIC);
-
- hinic_pf_to_mgmt_free(&pfhwdev->pf_to_mgmt);
-}
-
-/**
- * hinic_init_hwdev - Initialize the NIC HW
- * @pdev: the NIC pci device
- *
- * Return initialized NIC HW device
- *
- * Initialize the NIC HW device and return a pointer to it
- **/
-struct hinic_hwdev *hinic_init_hwdev(struct pci_dev *pdev)
-{
- struct hinic_pfhwdev *pfhwdev;
- struct hinic_hwdev *hwdev;
- struct hinic_hwif *hwif;
- int err, num_aeqs;
-
- hwif = devm_kzalloc(&pdev->dev, sizeof(*hwif), GFP_KERNEL);
- if (!hwif)
- return ERR_PTR(-ENOMEM);
-
- err = hinic_init_hwif(hwif, pdev);
- if (err) {
- dev_err(&pdev->dev, "Failed to init HW interface\n");
- return ERR_PTR(err);
- }
-
- if (!HINIC_IS_PF(hwif) && !HINIC_IS_PPF(hwif)) {
- dev_err(&pdev->dev, "Unsupported PCI Function type\n");
- err = -EFAULT;
- goto err_func_type;
- }
-
- pfhwdev = devm_kzalloc(&pdev->dev, sizeof(*pfhwdev), GFP_KERNEL);
- if (!pfhwdev) {
- err = -ENOMEM;
- goto err_pfhwdev_alloc;
- }
-
- hwdev = &pfhwdev->hwdev;
- hwdev->hwif = hwif;
-
- err = init_msix(hwdev);
- if (err) {
- dev_err(&pdev->dev, "Failed to init msix\n");
- goto err_init_msix;
- }
-
- err = wait_for_outbound_state(hwdev);
- if (err) {
- dev_warn(&pdev->dev, "outbound - disabled, try again\n");
- hinic_outbound_state_set(hwif, HINIC_OUTBOUND_ENABLE);
- }
-
- num_aeqs = HINIC_HWIF_NUM_AEQS(hwif);
-
- err = hinic_aeqs_init(&hwdev->aeqs, hwif, num_aeqs,
- HINIC_DEFAULT_AEQ_LEN, HINIC_EQ_PAGE_SIZE,
- hwdev->msix_entries);
- if (err) {
- dev_err(&pdev->dev, "Failed to init async event queues\n");
- goto err_aeqs_init;
- }
-
- err = init_pfhwdev(pfhwdev);
- if (err) {
- dev_err(&pdev->dev, "Failed to init PF HW device\n");
- goto err_init_pfhwdev;
- }
-
- err = get_dev_cap(hwdev);
- if (err) {
- dev_err(&pdev->dev, "Failed to get device capabilities\n");
- goto err_dev_cap;
- }
-
- err = init_fw_ctxt(hwdev);
- if (err) {
- dev_err(&pdev->dev, "Failed to init function table\n");
- goto err_init_fw_ctxt;
- }
-
- err = set_resources_state(hwdev, HINIC_RES_ACTIVE);
- if (err) {
- dev_err(&pdev->dev, "Failed to set resources state\n");
- goto err_resources_state;
- }
-
- return hwdev;
-
-err_resources_state:
-err_init_fw_ctxt:
-err_dev_cap:
- free_pfhwdev(pfhwdev);
-
-err_init_pfhwdev:
- hinic_aeqs_free(&hwdev->aeqs);
-
-err_aeqs_init:
- disable_msix(hwdev);
-
-err_init_msix:
-err_pfhwdev_alloc:
-err_func_type:
- hinic_free_hwif(hwif);
- return ERR_PTR(err);
-}
-
-/**
- * hinic_free_hwdev - Free the NIC HW device
- * @hwdev: the NIC HW device
- **/
-void hinic_free_hwdev(struct hinic_hwdev *hwdev)
-{
- struct hinic_pfhwdev *pfhwdev = container_of(hwdev,
- struct hinic_pfhwdev,
- hwdev);
-
- set_resources_state(hwdev, HINIC_RES_CLEAN);
-
- free_pfhwdev(pfhwdev);
-
- hinic_aeqs_free(&hwdev->aeqs);
-
- disable_msix(hwdev);
-
- hinic_free_hwif(hwdev->hwif);
-}
-
-/**
- * hinic_hwdev_num_qps - return the number QPs available for use
- * @hwdev: the NIC HW device
- *
- * Return number QPs available for use
- **/
-int hinic_hwdev_num_qps(struct hinic_hwdev *hwdev)
-{
- struct hinic_cap *nic_cap = &hwdev->nic_cap;
-
- return nic_cap->num_qps;
-}
-
-/**
- * hinic_hwdev_get_sq - get SQ
- * @hwdev: the NIC HW device
- * @i: the position of the SQ
- *
- * Return: the SQ in the i position
- **/
-struct hinic_sq *hinic_hwdev_get_sq(struct hinic_hwdev *hwdev, int i)
-{
- struct hinic_func_to_io *func_to_io = &hwdev->func_to_io;
- struct hinic_qp *qp = &func_to_io->qps[i];
-
- if (i >= hinic_hwdev_num_qps(hwdev))
- return NULL;
-
- return &qp->sq;
-}
-
-/**
- * hinic_hwdev_get_sq - get RQ
- * @hwdev: the NIC HW device
- * @i: the position of the RQ
- *
- * Return: the RQ in the i position
- **/
-struct hinic_rq *hinic_hwdev_get_rq(struct hinic_hwdev *hwdev, int i)
-{
- struct hinic_func_to_io *func_to_io = &hwdev->func_to_io;
- struct hinic_qp *qp = &func_to_io->qps[i];
-
- if (i >= hinic_hwdev_num_qps(hwdev))
- return NULL;
-
- return &qp->rq;
-}
-
-/**
- * hinic_hwdev_msix_cnt_set - clear message attribute counters for msix entry
- * @hwdev: the NIC HW device
- * @msix_index: msix_index
- *
- * Return 0 - Success, negative - Failure
- **/
-int hinic_hwdev_msix_cnt_set(struct hinic_hwdev *hwdev, u16 msix_index)
-{
- return hinic_msix_attr_cnt_clear(hwdev->hwif, msix_index);
-}
-
-/**
- * hinic_hwdev_msix_set - set message attribute for msix entry
- * @hwdev: the NIC HW device
- * @msix_index: msix_index
- * @pending_limit: the maximum pending interrupt events (unit 8)
- * @coalesc_timer: coalesc period for interrupt (unit 8 us)
- * @lli_timer: replenishing period for low latency credit (unit 8 us)
- * @lli_credit_limit: maximum credits for low latency msix messages (unit 8)
- * @resend_timer: maximum wait for resending msix (unit coalesc period)
- *
- * Return 0 - Success, negative - Failure
- **/
-int hinic_hwdev_msix_set(struct hinic_hwdev *hwdev, u16 msix_index,
- u8 pending_limit, u8 coalesc_timer,
- u8 lli_timer_cfg, u8 lli_credit_limit,
- u8 resend_timer)
-{
- return hinic_msix_attr_set(hwdev->hwif, msix_index,
- pending_limit, coalesc_timer,
- lli_timer_cfg, lli_credit_limit,
- resend_timer);
-}
-
-/**
- * hinic_hwdev_hw_ci_addr_set - set cons idx addr and attributes in HW for sq
- * @hwdev: the NIC HW device
- * @sq: send queue
- * @pending_limit: the maximum pending update ci events (unit 8)
- * @coalesc_timer: coalesc period for update ci (unit 8 us)
- *
- * Return 0 - Success, negative - Failure
- **/
-int hinic_hwdev_hw_ci_addr_set(struct hinic_hwdev *hwdev, struct hinic_sq *sq,
- u8 pending_limit, u8 coalesc_timer)
-{
- struct hinic_qp *qp = container_of(sq, struct hinic_qp, sq);
- struct hinic_hwif *hwif = hwdev->hwif;
- struct pci_dev *pdev = hwif->pdev;
- struct hinic_pfhwdev *pfhwdev;
- struct hinic_cmd_hw_ci hw_ci;
-
- if (!HINIC_IS_PF(hwif) && !HINIC_IS_PPF(hwif)) {
- dev_err(&pdev->dev, "Unsupported PCI Function type\n");
- return -EINVAL;
- }
-
- hw_ci.dma_attr_off = 0;
- hw_ci.pending_limit = pending_limit;
- hw_ci.coalesc_timer = coalesc_timer;
-
- hw_ci.msix_en = 1;
- hw_ci.msix_entry_idx = sq->msix_entry;
-
- hw_ci.func_idx = HINIC_HWIF_FUNC_IDX(hwif);
-
- hw_ci.sq_id = qp->q_id;
-
- hw_ci.ci_addr = ADDR_IN_4BYTES(sq->hw_ci_dma_addr);
-
- pfhwdev = container_of(hwdev, struct hinic_pfhwdev, hwdev);
- return hinic_msg_to_mgmt(&pfhwdev->pf_to_mgmt,
- HINIC_MOD_COMM,
- HINIC_COMM_CMD_SQ_HI_CI_SET,
- &hw_ci, sizeof(hw_ci), NULL,
- NULL, HINIC_MGMT_MSG_SYNC);
-}
diff --git a/drivers/net/ethernet/huawei/hinic/hinic_hw_dev.h b/drivers/net/ethernet/huawei/hinic/hinic_hw_dev.h
deleted file mode 100644
index 0f5563f..00000000
--- a/drivers/net/ethernet/huawei/hinic/hinic_hw_dev.h
+++ /dev/null
@@ -1,239 +0,0 @@
-/*
- * Huawei HiNIC PCI Express Linux driver
- * Copyright(c) 2017 Huawei Technologies Co., Ltd
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms and conditions of the GNU General Public License,
- * version 2, as published by the Free Software Foundation.
- *
- * This program is distributed in the hope it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
- * for more details.
- *
- */
-
-#ifndef HINIC_HW_DEV_H
-#define HINIC_HW_DEV_H
-
-#include <linux/pci.h>
-#include <linux/types.h>
-#include <linux/bitops.h>
-
-#include "hinic_hw_if.h"
-#include "hinic_hw_eqs.h"
-#include "hinic_hw_mgmt.h"
-#include "hinic_hw_qp.h"
-#include "hinic_hw_io.h"
-
-#define HINIC_MAX_QPS 32
-
-#define HINIC_MGMT_NUM_MSG_CMD (HINIC_MGMT_MSG_CMD_MAX - \
- HINIC_MGMT_MSG_CMD_BASE)
-
-struct hinic_cap {
- u16 max_qps;
- u16 num_qps;
-};
-
-enum hinic_port_cmd {
- HINIC_PORT_CMD_CHANGE_MTU = 2,
-
- HINIC_PORT_CMD_ADD_VLAN = 3,
- HINIC_PORT_CMD_DEL_VLAN = 4,
-
- HINIC_PORT_CMD_SET_MAC = 9,
- HINIC_PORT_CMD_GET_MAC = 10,
- HINIC_PORT_CMD_DEL_MAC = 11,
-
- HINIC_PORT_CMD_SET_RX_MODE = 12,
-
- HINIC_PORT_CMD_GET_LINK_STATE = 24,
-
- HINIC_PORT_CMD_SET_PORT_STATE = 41,
-
- HINIC_PORT_CMD_FWCTXT_INIT = 69,
-
- HINIC_PORT_CMD_SET_FUNC_STATE = 93,
-
- HINIC_PORT_CMD_GET_GLOBAL_QPN = 102,
-
- HINIC_PORT_CMD_GET_CAP = 170,
-};
-
-enum hinic_mgmt_msg_cmd {
- HINIC_MGMT_MSG_CMD_BASE = 160,
-
- HINIC_MGMT_MSG_CMD_LINK_STATUS = 160,
-
- HINIC_MGMT_MSG_CMD_MAX,
-};
-
-enum hinic_cb_state {
- HINIC_CB_ENABLED = BIT(0),
- HINIC_CB_RUNNING = BIT(1),
-};
-
-enum hinic_res_state {
- HINIC_RES_CLEAN = 0,
- HINIC_RES_ACTIVE = 1,
-};
-
-struct hinic_cmd_fw_ctxt {
- u8 status;
- u8 version;
- u8 rsvd0[6];
-
- u16 func_idx;
- u16 rx_buf_sz;
-
- u32 rsvd1;
-};
-
-struct hinic_cmd_hw_ioctxt {
- u8 status;
- u8 version;
- u8 rsvd0[6];
-
- u16 func_idx;
-
- u16 rsvd1;
-
- u8 set_cmdq_depth;
- u8 cmdq_depth;
-
- u8 rsvd2;
- u8 rsvd3;
- u8 rsvd4;
- u8 rsvd5;
-
- u16 rq_depth;
- u16 rx_buf_sz_idx;
- u16 sq_depth;
-};
-
-struct hinic_cmd_io_status {
- u8 status;
- u8 version;
- u8 rsvd0[6];
-
- u16 func_idx;
- u8 rsvd1;
- u8 rsvd2;
- u32 io_status;
-};
-
-struct hinic_cmd_clear_io_res {
- u8 status;
- u8 version;
- u8 rsvd0[6];
-
- u16 func_idx;
- u8 rsvd1;
- u8 rsvd2;
-};
-
-struct hinic_cmd_set_res_state {
- u8 status;
- u8 version;
- u8 rsvd0[6];
-
- u16 func_idx;
- u8 state;
- u8 rsvd1;
- u32 rsvd2;
-};
-
-struct hinic_cmd_base_qpn {
- u8 status;
- u8 version;
- u8 rsvd0[6];
-
- u16 func_idx;
- u16 qpn;
-};
-
-struct hinic_cmd_hw_ci {
- u8 status;
- u8 version;
- u8 rsvd0[6];
-
- u16 func_idx;
-
- u8 dma_attr_off;
- u8 pending_limit;
- u8 coalesc_timer;
-
- u8 msix_en;
- u16 msix_entry_idx;
-
- u32 sq_id;
- u32 rsvd1;
- u64 ci_addr;
-};
-
-struct hinic_hwdev {
- struct hinic_hwif *hwif;
- struct msix_entry *msix_entries;
-
- struct hinic_aeqs aeqs;
- struct hinic_func_to_io func_to_io;
-
- struct hinic_cap nic_cap;
-};
-
-struct hinic_nic_cb {
- void (*handler)(void *handle, void *buf_in,
- u16 in_size, void *buf_out,
- u16 *out_size);
-
- void *handle;
- unsigned long cb_state;
-};
-
-struct hinic_pfhwdev {
- struct hinic_hwdev hwdev;
-
- struct hinic_pf_to_mgmt pf_to_mgmt;
-
- struct hinic_nic_cb nic_cb[HINIC_MGMT_NUM_MSG_CMD];
-};
-
-void hinic_hwdev_cb_register(struct hinic_hwdev *hwdev,
- enum hinic_mgmt_msg_cmd cmd, void *handle,
- void (*handler)(void *handle, void *buf_in,
- u16 in_size, void *buf_out,
- u16 *out_size));
-
-void hinic_hwdev_cb_unregister(struct hinic_hwdev *hwdev,
- enum hinic_mgmt_msg_cmd cmd);
-
-int hinic_port_msg_cmd(struct hinic_hwdev *hwdev, enum hinic_port_cmd cmd,
- void *buf_in, u16 in_size, void *buf_out,
- u16 *out_size);
-
-int hinic_hwdev_ifup(struct hinic_hwdev *hwdev);
-
-void hinic_hwdev_ifdown(struct hinic_hwdev *hwdev);
-
-struct hinic_hwdev *hinic_init_hwdev(struct pci_dev *pdev);
-
-void hinic_free_hwdev(struct hinic_hwdev *hwdev);
-
-int hinic_hwdev_num_qps(struct hinic_hwdev *hwdev);
-
-struct hinic_sq *hinic_hwdev_get_sq(struct hinic_hwdev *hwdev, int i);
-
-struct hinic_rq *hinic_hwdev_get_rq(struct hinic_hwdev *hwdev, int i);
-
-int hinic_hwdev_msix_cnt_set(struct hinic_hwdev *hwdev, u16 msix_index);
-
-int hinic_hwdev_msix_set(struct hinic_hwdev *hwdev, u16 msix_index,
- u8 pending_limit, u8 coalesc_timer,
- u8 lli_timer_cfg, u8 lli_credit_limit,
- u8 resend_timer);
-
-int hinic_hwdev_hw_ci_addr_set(struct hinic_hwdev *hwdev, struct hinic_sq *sq,
- u8 pending_limit, u8 coalesc_timer);
-
-#endif
diff --git a/drivers/net/ethernet/huawei/hinic/hinic_hw_eqs.c b/drivers/net/ethernet/huawei/hinic/hinic_hw_eqs.c
deleted file mode 100644
index 7cb8b9b9..00000000
--- a/drivers/net/ethernet/huawei/hinic/hinic_hw_eqs.c
+++ /dev/null
@@ -1,886 +0,0 @@
-/*
- * Huawei HiNIC PCI Express Linux driver
- * Copyright(c) 2017 Huawei Technologies Co., Ltd
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms and conditions of the GNU General Public License,
- * version 2, as published by the Free Software Foundation.
- *
- * This program is distributed in the hope it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
- * for more details.
- *
- */
-
-#include <linux/kernel.h>
-#include <linux/types.h>
-#include <linux/errno.h>
-#include <linux/pci.h>
-#include <linux/device.h>
-#include <linux/workqueue.h>
-#include <linux/interrupt.h>
-#include <linux/slab.h>
-#include <linux/dma-mapping.h>
-#include <linux/log2.h>
-#include <asm/byteorder.h>
-#include <asm/barrier.h>
-
-#include "hinic_hw_csr.h"
-#include "hinic_hw_if.h"
-#include "hinic_hw_eqs.h"
-
-#define HINIC_EQS_WQ_NAME "hinic_eqs"
-
-#define GET_EQ_NUM_PAGES(eq, pg_size) \
- (ALIGN((eq)->q_len * (eq)->elem_size, pg_size) / (pg_size))
-
-#define GET_EQ_NUM_ELEMS_IN_PG(eq, pg_size) ((pg_size) / (eq)->elem_size)
-
-#define EQ_CONS_IDX_REG_ADDR(eq) (((eq)->type == HINIC_AEQ) ? \
- HINIC_CSR_AEQ_CONS_IDX_ADDR((eq)->q_id) : \
- HINIC_CSR_CEQ_CONS_IDX_ADDR((eq)->q_id))
-
-#define EQ_PROD_IDX_REG_ADDR(eq) (((eq)->type == HINIC_AEQ) ? \
- HINIC_CSR_AEQ_PROD_IDX_ADDR((eq)->q_id) : \
- HINIC_CSR_CEQ_PROD_IDX_ADDR((eq)->q_id))
-
-#define EQ_HI_PHYS_ADDR_REG(eq, pg_num) (((eq)->type == HINIC_AEQ) ? \
- HINIC_CSR_AEQ_HI_PHYS_ADDR_REG((eq)->q_id, pg_num) : \
- HINIC_CSR_CEQ_HI_PHYS_ADDR_REG((eq)->q_id, pg_num))
-
-#define EQ_LO_PHYS_ADDR_REG(eq, pg_num) (((eq)->type == HINIC_AEQ) ? \
- HINIC_CSR_AEQ_LO_PHYS_ADDR_REG((eq)->q_id, pg_num) : \
- HINIC_CSR_CEQ_LO_PHYS_ADDR_REG((eq)->q_id, pg_num))
-
-#define GET_EQ_ELEMENT(eq, idx) \
- ((eq)->virt_addr[(idx) / (eq)->num_elem_in_pg] + \
- (((idx) & ((eq)->num_elem_in_pg - 1)) * (eq)->elem_size))
-
-#define GET_AEQ_ELEM(eq, idx) ((struct hinic_aeq_elem *) \
- GET_EQ_ELEMENT(eq, idx))
-
-#define GET_CEQ_ELEM(eq, idx) ((u32 *) \
- GET_EQ_ELEMENT(eq, idx))
-
-#define GET_CURR_AEQ_ELEM(eq) GET_AEQ_ELEM(eq, (eq)->cons_idx)
-
-#define GET_CURR_CEQ_ELEM(eq) GET_CEQ_ELEM(eq, (eq)->cons_idx)
-
-#define PAGE_IN_4K(page_size) ((page_size) >> 12)
-#define EQ_SET_HW_PAGE_SIZE_VAL(eq) (ilog2(PAGE_IN_4K((eq)->page_size)))
-
-#define ELEMENT_SIZE_IN_32B(eq) (((eq)->elem_size) >> 5)
-#define EQ_SET_HW_ELEM_SIZE_VAL(eq) (ilog2(ELEMENT_SIZE_IN_32B(eq)))
-
-#define EQ_MAX_PAGES 8
-
-#define CEQE_TYPE_SHIFT 23
-#define CEQE_TYPE_MASK 0x7
-
-#define CEQE_TYPE(ceqe) (((ceqe) >> CEQE_TYPE_SHIFT) & \
- CEQE_TYPE_MASK)
-
-#define CEQE_DATA_MASK 0x3FFFFFF
-#define CEQE_DATA(ceqe) ((ceqe) & CEQE_DATA_MASK)
-
-#define aeq_to_aeqs(eq) \
- container_of((eq) - (eq)->q_id, struct hinic_aeqs, aeq[0])
-
-#define ceq_to_ceqs(eq) \
- container_of((eq) - (eq)->q_id, struct hinic_ceqs, ceq[0])
-
-#define work_to_aeq_work(work) \
- container_of(work, struct hinic_eq_work, work)
-
-#define DMA_ATTR_AEQ_DEFAULT 0
-#define DMA_ATTR_CEQ_DEFAULT 0
-
-/* No coalescence */
-#define THRESH_CEQ_DEFAULT 0
-
-enum eq_int_mode {
- EQ_INT_MODE_ARMED,
- EQ_INT_MODE_ALWAYS
-};
-
-enum eq_arm_state {
- EQ_NOT_ARMED,
- EQ_ARMED
-};
-
-/**
- * hinic_aeq_register_hw_cb - register AEQ callback for specific event
- * @aeqs: pointer to Async eqs of the chip
- * @event: aeq event to register callback for it
- * @handle: private data will be used by the callback
- * @hw_handler: callback function
- **/
-void hinic_aeq_register_hw_cb(struct hinic_aeqs *aeqs,
- enum hinic_aeq_type event, void *handle,
- void (*hwe_handler)(void *handle, void *data,
- u8 size))
-{
- struct hinic_hw_event_cb *hwe_cb = &aeqs->hwe_cb[event];
-
- hwe_cb->hwe_handler = hwe_handler;
- hwe_cb->handle = handle;
- hwe_cb->hwe_state = HINIC_EQE_ENABLED;
-}
-
-/**
- * hinic_aeq_unregister_hw_cb - unregister the AEQ callback for specific event
- * @aeqs: pointer to Async eqs of the chip
- * @event: aeq event to unregister callback for it
- **/
-void hinic_aeq_unregister_hw_cb(struct hinic_aeqs *aeqs,
- enum hinic_aeq_type event)
-{
- struct hinic_hw_event_cb *hwe_cb = &aeqs->hwe_cb[event];
-
- hwe_cb->hwe_state &= ~HINIC_EQE_ENABLED;
-
- while (hwe_cb->hwe_state & HINIC_EQE_RUNNING)
- schedule();
-
- hwe_cb->hwe_handler = NULL;
-}
-
-/**
- * hinic_ceq_register_cb - register CEQ callback for specific event
- * @ceqs: pointer to Completion eqs part of the chip
- * @event: ceq event to register callback for it
- * @handle: private data will be used by the callback
- * @handler: callback function
- **/
-void hinic_ceq_register_cb(struct hinic_ceqs *ceqs,
- enum hinic_ceq_type event, void *handle,
- void (*handler)(void *handle, u32 ceqe_data))
-{
- struct hinic_ceq_cb *ceq_cb = &ceqs->ceq_cb[event];
-
- ceq_cb->handler = handler;
- ceq_cb->handle = handle;
- ceq_cb->ceqe_state = HINIC_EQE_ENABLED;
-}
-
-/**
- * hinic_ceq_unregister_cb - unregister the CEQ callback for specific event
- * @ceqs: pointer to Completion eqs part of the chip
- * @event: ceq event to unregister callback for it
- **/
-void hinic_ceq_unregister_cb(struct hinic_ceqs *ceqs,
- enum hinic_ceq_type event)
-{
- struct hinic_ceq_cb *ceq_cb = &ceqs->ceq_cb[event];
-
- ceq_cb->ceqe_state &= ~HINIC_EQE_ENABLED;
-
- while (ceq_cb->ceqe_state & HINIC_EQE_RUNNING)
- schedule();
-
- ceq_cb->handler = NULL;
-}
-
-static u8 eq_cons_idx_checksum_set(u32 val)
-{
- u8 checksum = 0;
- int idx;
-
- for (idx = 0; idx < 32; idx += 4)
- checksum ^= ((val >> idx) & 0xF);
-
- return (checksum & 0xF);
-}
-
-/**
- * eq_update_ci - update the HW cons idx of event queue
- * @eq: the event queue to update the cons idx for
- **/
-static void eq_update_ci(struct hinic_eq *eq)
-{
- u32 val, addr = EQ_CONS_IDX_REG_ADDR(eq);
-
- /* Read Modify Write */
- val = hinic_hwif_read_reg(eq->hwif, addr);
-
- val = HINIC_EQ_CI_CLEAR(val, IDX) &
- HINIC_EQ_CI_CLEAR(val, WRAPPED) &
- HINIC_EQ_CI_CLEAR(val, INT_ARMED) &
- HINIC_EQ_CI_CLEAR(val, XOR_CHKSUM);
-
- val |= HINIC_EQ_CI_SET(eq->cons_idx, IDX) |
- HINIC_EQ_CI_SET(eq->wrapped, WRAPPED) |
- HINIC_EQ_CI_SET(EQ_ARMED, INT_ARMED);
-
- val |= HINIC_EQ_CI_SET(eq_cons_idx_checksum_set(val), XOR_CHKSUM);
-
- hinic_hwif_write_reg(eq->hwif, addr, val);
-}
-
-/**
- * aeq_irq_handler - handler for the AEQ event
- * @eq: the Async Event Queue that received the event
- **/
-static void aeq_irq_handler(struct hinic_eq *eq)
-{
- struct hinic_aeqs *aeqs = aeq_to_aeqs(eq);
- struct hinic_hwif *hwif = aeqs->hwif;
- struct pci_dev *pdev = hwif->pdev;
- struct hinic_aeq_elem *aeqe_curr;
- struct hinic_hw_event_cb *hwe_cb;
- enum hinic_aeq_type event;
- unsigned long eqe_state;
- u32 aeqe_desc;
- int i, size;
-
- for (i = 0; i < eq->q_len; i++) {
- aeqe_curr = GET_CURR_AEQ_ELEM(eq);
-
- /* Data in HW is in Big endian Format */
- aeqe_desc = be32_to_cpu(aeqe_curr->desc);
-
- /* HW toggles the wrapped bit, when it adds eq element */
- if (HINIC_EQ_ELEM_DESC_GET(aeqe_desc, WRAPPED) == eq->wrapped)
- break;
-
- event = HINIC_EQ_ELEM_DESC_GET(aeqe_desc, TYPE);
- if (event >= HINIC_MAX_AEQ_EVENTS) {
- dev_err(&pdev->dev, "Unknown AEQ Event %d\n", event);
- return;
- }
-
- if (!HINIC_EQ_ELEM_DESC_GET(aeqe_desc, SRC)) {
- hwe_cb = &aeqs->hwe_cb[event];
-
- size = HINIC_EQ_ELEM_DESC_GET(aeqe_desc, SIZE);
-
- eqe_state = cmpxchg(&hwe_cb->hwe_state,
- HINIC_EQE_ENABLED,
- HINIC_EQE_ENABLED |
- HINIC_EQE_RUNNING);
- if ((eqe_state == HINIC_EQE_ENABLED) &&
- (hwe_cb->hwe_handler))
- hwe_cb->hwe_handler(hwe_cb->handle,
- aeqe_curr->data, size);
- else
- dev_err(&pdev->dev, "Unhandled AEQ Event %d\n",
- event);
-
- hwe_cb->hwe_state &= ~HINIC_EQE_RUNNING;
- }
-
- eq->cons_idx++;
-
- if (eq->cons_idx == eq->q_len) {
- eq->cons_idx = 0;
- eq->wrapped = !eq->wrapped;
- }
- }
-}
-
-/**
- * ceq_event_handler - handler for the ceq events
- * @ceqs: ceqs part of the chip
- * @ceqe: ceq element that describes the event
- **/
-static void ceq_event_handler(struct hinic_ceqs *ceqs, u32 ceqe)
-{
- struct hinic_hwif *hwif = ceqs->hwif;
- struct pci_dev *pdev = hwif->pdev;
- struct hinic_ceq_cb *ceq_cb;
- enum hinic_ceq_type event;
- unsigned long eqe_state;
-
- event = CEQE_TYPE(ceqe);
- if (event >= HINIC_MAX_CEQ_EVENTS) {
- dev_err(&pdev->dev, "Unknown CEQ event, event = %d\n", event);
- return;
- }
-
- ceq_cb = &ceqs->ceq_cb[event];
-
- eqe_state = cmpxchg(&ceq_cb->ceqe_state,
- HINIC_EQE_ENABLED,
- HINIC_EQE_ENABLED | HINIC_EQE_RUNNING);
-
- if ((eqe_state == HINIC_EQE_ENABLED) && (ceq_cb->handler))
- ceq_cb->handler(ceq_cb->handle, CEQE_DATA(ceqe));
- else
- dev_err(&pdev->dev, "Unhandled CEQ Event %d\n", event);
-
- ceq_cb->ceqe_state &= ~HINIC_EQE_RUNNING;
-}
-
-/**
- * ceq_irq_handler - handler for the CEQ event
- * @eq: the Completion Event Queue that received the event
- **/
-static void ceq_irq_handler(struct hinic_eq *eq)
-{
- struct hinic_ceqs *ceqs = ceq_to_ceqs(eq);
- u32 ceqe;
- int i;
-
- for (i = 0; i < eq->q_len; i++) {
- ceqe = *(GET_CURR_CEQ_ELEM(eq));
-
- /* Data in HW is in Big endian Format */
- ceqe = be32_to_cpu(ceqe);
-
- /* HW toggles the wrapped bit, when it adds eq element event */
- if (HINIC_EQ_ELEM_DESC_GET(ceqe, WRAPPED) == eq->wrapped)
- break;
-
- ceq_event_handler(ceqs, ceqe);
-
- eq->cons_idx++;
-
- if (eq->cons_idx == eq->q_len) {
- eq->cons_idx = 0;
- eq->wrapped = !eq->wrapped;
- }
- }
-}
-
-/**
- * eq_irq_handler - handler for the EQ event
- * @data: the Event Queue that received the event
- **/
-static void eq_irq_handler(void *data)
-{
- struct hinic_eq *eq = data;
-
- if (eq->type == HINIC_AEQ)
- aeq_irq_handler(eq);
- else if (eq->type == HINIC_CEQ)
- ceq_irq_handler(eq);
-
- eq_update_ci(eq);
-}
-
-/**
- * eq_irq_work - the work of the EQ that received the event
- * @work: the work struct that is associated with the EQ
- **/
-static void eq_irq_work(struct work_struct *work)
-{
- struct hinic_eq_work *aeq_work = work_to_aeq_work(work);
- struct hinic_eq *aeq;
-
- aeq = aeq_work->data;
- eq_irq_handler(aeq);
-}
-
-/**
- * ceq_tasklet - the tasklet of the EQ that received the event
- * @ceq_data: the eq
- **/
-static void ceq_tasklet(unsigned long ceq_data)
-{
- struct hinic_eq *ceq = (struct hinic_eq *)ceq_data;
-
- eq_irq_handler(ceq);
-}
-
-/**
- * aeq_interrupt - aeq interrupt handler
- * @irq: irq number
- * @data: the Async Event Queue that collected the event
- **/
-static irqreturn_t aeq_interrupt(int irq, void *data)
-{
- struct hinic_eq_work *aeq_work;
- struct hinic_eq *aeq = data;
- struct hinic_aeqs *aeqs;
-
- /* clear resend timer cnt register */
- hinic_msix_attr_cnt_clear(aeq->hwif, aeq->msix_entry.entry);
-
- aeq_work = &aeq->aeq_work;
- aeq_work->data = aeq;
-
- aeqs = aeq_to_aeqs(aeq);
- queue_work(aeqs->workq, &aeq_work->work);
-
- return IRQ_HANDLED;
-}
-
-/**
- * ceq_interrupt - ceq interrupt handler
- * @irq: irq number
- * @data: the Completion Event Queue that collected the event
- **/
-static irqreturn_t ceq_interrupt(int irq, void *data)
-{
- struct hinic_eq *ceq = data;
-
- /* clear resend timer cnt register */
- hinic_msix_attr_cnt_clear(ceq->hwif, ceq->msix_entry.entry);
-
- tasklet_schedule(&ceq->ceq_tasklet);
-
- return IRQ_HANDLED;
-}
-
-static void set_ctrl0(struct hinic_eq *eq)
-{
- struct msix_entry *msix_entry = &eq->msix_entry;
- enum hinic_eq_type type = eq->type;
- u32 addr, val, ctrl0;
-
- if (type == HINIC_AEQ) {
- /* RMW Ctrl0 */
- addr = HINIC_CSR_AEQ_CTRL_0_ADDR(eq->q_id);
-
- val = hinic_hwif_read_reg(eq->hwif, addr);
-
- val = HINIC_AEQ_CTRL_0_CLEAR(val, INT_IDX) &
- HINIC_AEQ_CTRL_0_CLEAR(val, DMA_ATTR) &
- HINIC_AEQ_CTRL_0_CLEAR(val, PCI_INTF_IDX) &
- HINIC_AEQ_CTRL_0_CLEAR(val, INT_MODE);
-
- ctrl0 = HINIC_AEQ_CTRL_0_SET(msix_entry->entry, INT_IDX) |
- HINIC_AEQ_CTRL_0_SET(DMA_ATTR_AEQ_DEFAULT, DMA_ATTR) |
- HINIC_AEQ_CTRL_0_SET(HINIC_HWIF_PCI_INTF(eq->hwif),
- PCI_INTF_IDX) |
- HINIC_AEQ_CTRL_0_SET(EQ_INT_MODE_ARMED, INT_MODE);
-
- val |= ctrl0;
-
- hinic_hwif_write_reg(eq->hwif, addr, val);
- } else if (type == HINIC_CEQ) {
- /* RMW Ctrl0 */
- addr = HINIC_CSR_CEQ_CTRL_0_ADDR(eq->q_id);
-
- val = hinic_hwif_read_reg(eq->hwif, addr);
-
- val = HINIC_CEQ_CTRL_0_CLEAR(val, INTR_IDX) &
- HINIC_CEQ_CTRL_0_CLEAR(val, DMA_ATTR) &
- HINIC_CEQ_CTRL_0_CLEAR(val, KICK_THRESH) &
- HINIC_CEQ_CTRL_0_CLEAR(val, PCI_INTF_IDX) &
- HINIC_CEQ_CTRL_0_CLEAR(val, INTR_MODE);
-
- ctrl0 = HINIC_CEQ_CTRL_0_SET(msix_entry->entry, INTR_IDX) |
- HINIC_CEQ_CTRL_0_SET(DMA_ATTR_CEQ_DEFAULT, DMA_ATTR) |
- HINIC_CEQ_CTRL_0_SET(THRESH_CEQ_DEFAULT, KICK_THRESH) |
- HINIC_CEQ_CTRL_0_SET(HINIC_HWIF_PCI_INTF(eq->hwif),
- PCI_INTF_IDX) |
- HINIC_CEQ_CTRL_0_SET(EQ_INT_MODE_ARMED, INTR_MODE);
-
- val |= ctrl0;
-
- hinic_hwif_write_reg(eq->hwif, addr, val);
- }
-}
-
-static void set_ctrl1(struct hinic_eq *eq)
-{
- enum hinic_eq_type type = eq->type;
- u32 page_size_val, elem_size;
- u32 addr, val, ctrl1;
-
- if (type == HINIC_AEQ) {
- /* RMW Ctrl1 */
- addr = HINIC_CSR_AEQ_CTRL_1_ADDR(eq->q_id);
-
- page_size_val = EQ_SET_HW_PAGE_SIZE_VAL(eq);
- elem_size = EQ_SET_HW_ELEM_SIZE_VAL(eq);
-
- val = hinic_hwif_read_reg(eq->hwif, addr);
-
- val = HINIC_AEQ_CTRL_1_CLEAR(val, LEN) &
- HINIC_AEQ_CTRL_1_CLEAR(val, ELEM_SIZE) &
- HINIC_AEQ_CTRL_1_CLEAR(val, PAGE_SIZE);
-
- ctrl1 = HINIC_AEQ_CTRL_1_SET(eq->q_len, LEN) |
- HINIC_AEQ_CTRL_1_SET(elem_size, ELEM_SIZE) |
- HINIC_AEQ_CTRL_1_SET(page_size_val, PAGE_SIZE);
-
- val |= ctrl1;
-
- hinic_hwif_write_reg(eq->hwif, addr, val);
- } else if (type == HINIC_CEQ) {
- /* RMW Ctrl1 */
- addr = HINIC_CSR_CEQ_CTRL_1_ADDR(eq->q_id);
-
- page_size_val = EQ_SET_HW_PAGE_SIZE_VAL(eq);
-
- val = hinic_hwif_read_reg(eq->hwif, addr);
-
- val = HINIC_CEQ_CTRL_1_CLEAR(val, LEN) &
- HINIC_CEQ_CTRL_1_CLEAR(val, PAGE_SIZE);
-
- ctrl1 = HINIC_CEQ_CTRL_1_SET(eq->q_len, LEN) |
- HINIC_CEQ_CTRL_1_SET(page_size_val, PAGE_SIZE);
-
- val |= ctrl1;
-
- hinic_hwif_write_reg(eq->hwif, addr, val);
- }
-}
-
-/**
- * set_eq_ctrls - setting eq's ctrl registers
- * @eq: the Event Queue for setting
- **/
-static void set_eq_ctrls(struct hinic_eq *eq)
-{
- set_ctrl0(eq);
- set_ctrl1(eq);
-}
-
-/**
- * aeq_elements_init - initialize all the elements in the aeq
- * @eq: the Async Event Queue
- * @init_val: value to initialize the elements with it
- **/
-static void aeq_elements_init(struct hinic_eq *eq, u32 init_val)
-{
- struct hinic_aeq_elem *aeqe;
- int i;
-
- for (i = 0; i < eq->q_len; i++) {
- aeqe = GET_AEQ_ELEM(eq, i);
- aeqe->desc = cpu_to_be32(init_val);
- }
-
- wmb(); /* Write the initilzation values */
-}
-
-/**
- * ceq_elements_init - Initialize all the elements in the ceq
- * @eq: the event queue
- * @init_val: value to init with it the elements
- **/
-static void ceq_elements_init(struct hinic_eq *eq, u32 init_val)
-{
- u32 *ceqe;
- int i;
-
- for (i = 0; i < eq->q_len; i++) {
- ceqe = GET_CEQ_ELEM(eq, i);
- *(ceqe) = cpu_to_be32(init_val);
- }
-
- wmb(); /* Write the initilzation values */
-}
-
-/**
- * alloc_eq_pages - allocate the pages for the queue
- * @eq: the event queue
- *
- * Return 0 - Success, Negative - Failure
- **/
-static int alloc_eq_pages(struct hinic_eq *eq)
-{
- struct hinic_hwif *hwif = eq->hwif;
- struct pci_dev *pdev = hwif->pdev;
- u32 init_val, addr, val;
- size_t addr_size;
- int err, pg;
-
- addr_size = eq->num_pages * sizeof(*eq->dma_addr);
- eq->dma_addr = devm_kzalloc(&pdev->dev, addr_size, GFP_KERNEL);
- if (!eq->dma_addr)
- return -ENOMEM;
-
- addr_size = eq->num_pages * sizeof(*eq->virt_addr);
- eq->virt_addr = devm_kzalloc(&pdev->dev, addr_size, GFP_KERNEL);
- if (!eq->virt_addr) {
- err = -ENOMEM;
- goto err_virt_addr_alloc;
- }
-
- for (pg = 0; pg < eq->num_pages; pg++) {
- eq->virt_addr[pg] = dma_zalloc_coherent(&pdev->dev,
- eq->page_size,
- &eq->dma_addr[pg],
- GFP_KERNEL);
- if (!eq->virt_addr[pg]) {
- err = -ENOMEM;
- goto err_dma_alloc;
- }
-
- addr = EQ_HI_PHYS_ADDR_REG(eq, pg);
- val = upper_32_bits(eq->dma_addr[pg]);
-
- hinic_hwif_write_reg(hwif, addr, val);
-
- addr = EQ_LO_PHYS_ADDR_REG(eq, pg);
- val = lower_32_bits(eq->dma_addr[pg]);
-
- hinic_hwif_write_reg(hwif, addr, val);
- }
-
- init_val = HINIC_EQ_ELEM_DESC_SET(eq->wrapped, WRAPPED);
-
- if (eq->type == HINIC_AEQ)
- aeq_elements_init(eq, init_val);
- else if (eq->type == HINIC_CEQ)
- ceq_elements_init(eq, init_val);
-
- return 0;
-
-err_dma_alloc:
- while (--pg >= 0)
- dma_free_coherent(&pdev->dev, eq->page_size,
- eq->virt_addr[pg],
- eq->dma_addr[pg]);
-
- devm_kfree(&pdev->dev, eq->virt_addr);
-
-err_virt_addr_alloc:
- devm_kfree(&pdev->dev, eq->dma_addr);
- return err;
-}
-
-/**
- * free_eq_pages - free the pages of the queue
- * @eq: the Event Queue
- **/
-static void free_eq_pages(struct hinic_eq *eq)
-{
- struct hinic_hwif *hwif = eq->hwif;
- struct pci_dev *pdev = hwif->pdev;
- int pg;
-
- for (pg = 0; pg < eq->num_pages; pg++)
- dma_free_coherent(&pdev->dev, eq->page_size,
- eq->virt_addr[pg],
- eq->dma_addr[pg]);
-
- devm_kfree(&pdev->dev, eq->virt_addr);
- devm_kfree(&pdev->dev, eq->dma_addr);
-}
-
-/**
- * init_eq - initialize Event Queue
- * @eq: the event queue
- * @hwif: the HW interface of a PCI function device
- * @type: the type of the event queue, aeq or ceq
- * @q_id: Queue id number
- * @q_len: the number of EQ elements
- * @page_size: the page size of the pages in the event queue
- * @entry: msix entry associated with the event queue
- *
- * Return 0 - Success, Negative - Failure
- **/
-static int init_eq(struct hinic_eq *eq, struct hinic_hwif *hwif,
- enum hinic_eq_type type, int q_id, u32 q_len, u32 page_size,
- struct msix_entry entry)
-{
- struct pci_dev *pdev = hwif->pdev;
- int err;
-
- eq->hwif = hwif;
- eq->type = type;
- eq->q_id = q_id;
- eq->q_len = q_len;
- eq->page_size = page_size;
-
- /* Clear PI and CI, also clear the ARM bit */
- hinic_hwif_write_reg(eq->hwif, EQ_CONS_IDX_REG_ADDR(eq), 0);
- hinic_hwif_write_reg(eq->hwif, EQ_PROD_IDX_REG_ADDR(eq), 0);
-
- eq->cons_idx = 0;
- eq->wrapped = 0;
-
- if (type == HINIC_AEQ) {
- eq->elem_size = HINIC_AEQE_SIZE;
- } else if (type == HINIC_CEQ) {
- eq->elem_size = HINIC_CEQE_SIZE;
- } else {
- dev_err(&pdev->dev, "Invalid EQ type\n");
- return -EINVAL;
- }
-
- eq->num_pages = GET_EQ_NUM_PAGES(eq, page_size);
- eq->num_elem_in_pg = GET_EQ_NUM_ELEMS_IN_PG(eq, page_size);
-
- eq->msix_entry = entry;
-
- if (eq->num_elem_in_pg & (eq->num_elem_in_pg - 1)) {
- dev_err(&pdev->dev, "num elements in eq page != power of 2\n");
- return -EINVAL;
- }
-
- if (eq->num_pages > EQ_MAX_PAGES) {
- dev_err(&pdev->dev, "too many pages for eq\n");
- return -EINVAL;
- }
-
- set_eq_ctrls(eq);
- eq_update_ci(eq);
-
- err = alloc_eq_pages(eq);
- if (err) {
- dev_err(&pdev->dev, "Failed to allocate pages for eq\n");
- return err;
- }
-
- if (type == HINIC_AEQ) {
- struct hinic_eq_work *aeq_work = &eq->aeq_work;
-
- INIT_WORK(&aeq_work->work, eq_irq_work);
- } else if (type == HINIC_CEQ) {
- tasklet_init(&eq->ceq_tasklet, ceq_tasklet,
- (unsigned long)eq);
- }
-
- /* set the attributes of the msix entry */
- hinic_msix_attr_set(eq->hwif, eq->msix_entry.entry,
- HINIC_EQ_MSIX_PENDING_LIMIT_DEFAULT,
- HINIC_EQ_MSIX_COALESC_TIMER_DEFAULT,
- HINIC_EQ_MSIX_LLI_TIMER_DEFAULT,
- HINIC_EQ_MSIX_LLI_CREDIT_LIMIT_DEFAULT,
- HINIC_EQ_MSIX_RESEND_TIMER_DEFAULT);
-
- if (type == HINIC_AEQ)
- err = request_irq(entry.vector, aeq_interrupt, 0,
- "hinic_aeq", eq);
- else if (type == HINIC_CEQ)
- err = request_irq(entry.vector, ceq_interrupt, 0,
- "hinic_ceq", eq);
-
- if (err) {
- dev_err(&pdev->dev, "Failed to request irq for the EQ\n");
- goto err_req_irq;
- }
-
- return 0;
-
-err_req_irq:
- free_eq_pages(eq);
- return err;
-}
-
-/**
- * remove_eq - remove Event Queue
- * @eq: the event queue
- **/
-static void remove_eq(struct hinic_eq *eq)
-{
- struct msix_entry *entry = &eq->msix_entry;
-
- free_irq(entry->vector, eq);
-
- if (eq->type == HINIC_AEQ) {
- struct hinic_eq_work *aeq_work = &eq->aeq_work;
-
- cancel_work_sync(&aeq_work->work);
- } else if (eq->type == HINIC_CEQ) {
- tasklet_kill(&eq->ceq_tasklet);
- }
-
- free_eq_pages(eq);
-}
-
-/**
- * hinic_aeqs_init - initialize all the aeqs
- * @aeqs: pointer to Async eqs of the chip
- * @hwif: the HW interface of a PCI function device
- * @num_aeqs: number of AEQs
- * @q_len: number of EQ elements
- * @page_size: the page size of the pages in the event queue
- * @msix_entries: msix entries associated with the event queues
- *
- * Return 0 - Success, negative - Failure
- **/
-int hinic_aeqs_init(struct hinic_aeqs *aeqs, struct hinic_hwif *hwif,
- int num_aeqs, u32 q_len, u32 page_size,
- struct msix_entry *msix_entries)
-{
- struct pci_dev *pdev = hwif->pdev;
- int err, i, q_id;
-
- aeqs->workq = create_singlethread_workqueue(HINIC_EQS_WQ_NAME);
- if (!aeqs->workq)
- return -ENOMEM;
-
- aeqs->hwif = hwif;
- aeqs->num_aeqs = num_aeqs;
-
- for (q_id = 0; q_id < num_aeqs; q_id++) {
- err = init_eq(&aeqs->aeq[q_id], hwif, HINIC_AEQ, q_id, q_len,
- page_size, msix_entries[q_id]);
- if (err) {
- dev_err(&pdev->dev, "Failed to init aeq %d\n", q_id);
- goto err_init_aeq;
- }
- }
-
- return 0;
-
-err_init_aeq:
- for (i = 0; i < q_id; i++)
- remove_eq(&aeqs->aeq[i]);
-
- destroy_workqueue(aeqs->workq);
- return err;
-}
-
-/**
- * hinic_aeqs_free - free all the aeqs
- * @aeqs: pointer to Async eqs of the chip
- **/
-void hinic_aeqs_free(struct hinic_aeqs *aeqs)
-{
- int q_id;
-
- for (q_id = 0; q_id < aeqs->num_aeqs ; q_id++)
- remove_eq(&aeqs->aeq[q_id]);
-
- destroy_workqueue(aeqs->workq);
-}
-
-/**
- * hinic_ceqs_init - init all the ceqs
- * @ceqs: ceqs part of the chip
- * @hwif: the hardware interface of a pci function device
- * @num_ceqs: number of CEQs
- * @q_len: number of EQ elements
- * @page_size: the page size of the event queue
- * @msix_entries: msix entries associated with the event queues
- *
- * Return 0 - Success, Negative - Failure
- **/
-int hinic_ceqs_init(struct hinic_ceqs *ceqs, struct hinic_hwif *hwif,
- int num_ceqs, u32 q_len, u32 page_size,
- struct msix_entry *msix_entries)
-{
- struct pci_dev *pdev = hwif->pdev;
- int i, q_id, err;
-
- ceqs->hwif = hwif;
- ceqs->num_ceqs = num_ceqs;
-
- for (q_id = 0; q_id < num_ceqs; q_id++) {
- err = init_eq(&ceqs->ceq[q_id], hwif, HINIC_CEQ, q_id, q_len,
- page_size, msix_entries[q_id]);
- if (err) {
- dev_err(&pdev->dev, "Failed to init ceq %d\n", q_id);
- goto err_init_ceq;
- }
- }
-
- return 0;
-
-err_init_ceq:
- for (i = 0; i < q_id; i++)
- remove_eq(&ceqs->ceq[i]);
-
- return err;
-}
-
-/**
- * hinic_ceqs_free - free all the ceqs
- * @ceqs: ceqs part of the chip
- **/
-void hinic_ceqs_free(struct hinic_ceqs *ceqs)
-{
- int q_id;
-
- for (q_id = 0; q_id < ceqs->num_ceqs; q_id++)
- remove_eq(&ceqs->ceq[q_id]);
-}
diff --git a/drivers/net/ethernet/huawei/hinic/hinic_hw_eqs.h b/drivers/net/ethernet/huawei/hinic/hinic_hw_eqs.h
deleted file mode 100644
index ecb9c2b..00000000
--- a/drivers/net/ethernet/huawei/hinic/hinic_hw_eqs.h
+++ /dev/null
@@ -1,265 +0,0 @@
-/*
- * Huawei HiNIC PCI Express Linux driver
- * Copyright(c) 2017 Huawei Technologies Co., Ltd
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms and conditions of the GNU General Public License,
- * version 2, as published by the Free Software Foundation.
- *
- * This program is distributed in the hope it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
- * for more details.
- *
- */
-
-#ifndef HINIC_HW_EQS_H
-#define HINIC_HW_EQS_H
-
-#include <linux/types.h>
-#include <linux/workqueue.h>
-#include <linux/pci.h>
-#include <linux/sizes.h>
-#include <linux/bitops.h>
-#include <linux/interrupt.h>
-
-#include "hinic_hw_if.h"
-
-#define HINIC_AEQ_CTRL_0_INT_IDX_SHIFT 0
-#define HINIC_AEQ_CTRL_0_DMA_ATTR_SHIFT 12
-#define HINIC_AEQ_CTRL_0_PCI_INTF_IDX_SHIFT 20
-#define HINIC_AEQ_CTRL_0_INT_MODE_SHIFT 31
-
-#define HINIC_AEQ_CTRL_0_INT_IDX_MASK 0x3FF
-#define HINIC_AEQ_CTRL_0_DMA_ATTR_MASK 0x3F
-#define HINIC_AEQ_CTRL_0_PCI_INTF_IDX_MASK 0x3
-#define HINIC_AEQ_CTRL_0_INT_MODE_MASK 0x1
-
-#define HINIC_AEQ_CTRL_0_SET(val, member) \
- (((u32)(val) & HINIC_AEQ_CTRL_0_##member##_MASK) << \
- HINIC_AEQ_CTRL_0_##member##_SHIFT)
-
-#define HINIC_AEQ_CTRL_0_CLEAR(val, member) \
- ((val) & (~(HINIC_AEQ_CTRL_0_##member##_MASK \
- << HINIC_AEQ_CTRL_0_##member##_SHIFT)))
-
-#define HINIC_AEQ_CTRL_1_LEN_SHIFT 0
-#define HINIC_AEQ_CTRL_1_ELEM_SIZE_SHIFT 24
-#define HINIC_AEQ_CTRL_1_PAGE_SIZE_SHIFT 28
-
-#define HINIC_AEQ_CTRL_1_LEN_MASK 0x1FFFFF
-#define HINIC_AEQ_CTRL_1_ELEM_SIZE_MASK 0x3
-#define HINIC_AEQ_CTRL_1_PAGE_SIZE_MASK 0xF
-
-#define HINIC_AEQ_CTRL_1_SET(val, member) \
- (((u32)(val) & HINIC_AEQ_CTRL_1_##member##_MASK) << \
- HINIC_AEQ_CTRL_1_##member##_SHIFT)
-
-#define HINIC_AEQ_CTRL_1_CLEAR(val, member) \
- ((val) & (~(HINIC_AEQ_CTRL_1_##member##_MASK \
- << HINIC_AEQ_CTRL_1_##member##_SHIFT)))
-
-#define HINIC_CEQ_CTRL_0_INTR_IDX_SHIFT 0
-#define HINIC_CEQ_CTRL_0_DMA_ATTR_SHIFT 12
-#define HINIC_CEQ_CTRL_0_KICK_THRESH_SHIFT 20
-#define HINIC_CEQ_CTRL_0_PCI_INTF_IDX_SHIFT 24
-#define HINIC_CEQ_CTRL_0_INTR_MODE_SHIFT 31
-
-#define HINIC_CEQ_CTRL_0_INTR_IDX_MASK 0x3FF
-#define HINIC_CEQ_CTRL_0_DMA_ATTR_MASK 0x3F
-#define HINIC_CEQ_CTRL_0_KICK_THRESH_MASK 0xF
-#define HINIC_CEQ_CTRL_0_PCI_INTF_IDX_MASK 0x3
-#define HINIC_CEQ_CTRL_0_INTR_MODE_MASK 0x1
-
-#define HINIC_CEQ_CTRL_0_SET(val, member) \
- (((u32)(val) & HINIC_CEQ_CTRL_0_##member##_MASK) << \
- HINIC_CEQ_CTRL_0_##member##_SHIFT)
-
-#define HINIC_CEQ_CTRL_0_CLEAR(val, member) \
- ((val) & (~(HINIC_CEQ_CTRL_0_##member##_MASK \
- << HINIC_CEQ_CTRL_0_##member##_SHIFT)))
-
-#define HINIC_CEQ_CTRL_1_LEN_SHIFT 0
-#define HINIC_CEQ_CTRL_1_PAGE_SIZE_SHIFT 28
-
-#define HINIC_CEQ_CTRL_1_LEN_MASK 0x1FFFFF
-#define HINIC_CEQ_CTRL_1_PAGE_SIZE_MASK 0xF
-
-#define HINIC_CEQ_CTRL_1_SET(val, member) \
- (((u32)(val) & HINIC_CEQ_CTRL_1_##member##_MASK) << \
- HINIC_CEQ_CTRL_1_##member##_SHIFT)
-
-#define HINIC_CEQ_CTRL_1_CLEAR(val, member) \
- ((val) & (~(HINIC_CEQ_CTRL_1_##member##_MASK \
- << HINIC_CEQ_CTRL_1_##member##_SHIFT)))
-
-#define HINIC_EQ_ELEM_DESC_TYPE_SHIFT 0
-#define HINIC_EQ_ELEM_DESC_SRC_SHIFT 7
-#define HINIC_EQ_ELEM_DESC_SIZE_SHIFT 8
-#define HINIC_EQ_ELEM_DESC_WRAPPED_SHIFT 31
-
-#define HINIC_EQ_ELEM_DESC_TYPE_MASK 0x7F
-#define HINIC_EQ_ELEM_DESC_SRC_MASK 0x1
-#define HINIC_EQ_ELEM_DESC_SIZE_MASK 0xFF
-#define HINIC_EQ_ELEM_DESC_WRAPPED_MASK 0x1
-
-#define HINIC_EQ_ELEM_DESC_SET(val, member) \
- (((u32)(val) & HINIC_EQ_ELEM_DESC_##member##_MASK) << \
- HINIC_EQ_ELEM_DESC_##member##_SHIFT)
-
-#define HINIC_EQ_ELEM_DESC_GET(val, member) \
- (((val) >> HINIC_EQ_ELEM_DESC_##member##_SHIFT) & \
- HINIC_EQ_ELEM_DESC_##member##_MASK)
-
-#define HINIC_EQ_CI_IDX_SHIFT 0
-#define HINIC_EQ_CI_WRAPPED_SHIFT 20
-#define HINIC_EQ_CI_XOR_CHKSUM_SHIFT 24
-#define HINIC_EQ_CI_INT_ARMED_SHIFT 31
-
-#define HINIC_EQ_CI_IDX_MASK 0xFFFFF
-#define HINIC_EQ_CI_WRAPPED_MASK 0x1
-#define HINIC_EQ_CI_XOR_CHKSUM_MASK 0xF
-#define HINIC_EQ_CI_INT_ARMED_MASK 0x1
-
-#define HINIC_EQ_CI_SET(val, member) \
- (((u32)(val) & HINIC_EQ_CI_##member##_MASK) << \
- HINIC_EQ_CI_##member##_SHIFT)
-
-#define HINIC_EQ_CI_CLEAR(val, member) \
- ((val) & (~(HINIC_EQ_CI_##member##_MASK \
- << HINIC_EQ_CI_##member##_SHIFT)))
-
-#define HINIC_MAX_AEQS 4
-#define HINIC_MAX_CEQS 32
-
-#define HINIC_AEQE_SIZE 64
-#define HINIC_CEQE_SIZE 4
-
-#define HINIC_AEQE_DESC_SIZE 4
-#define HINIC_AEQE_DATA_SIZE \
- (HINIC_AEQE_SIZE - HINIC_AEQE_DESC_SIZE)
-
-#define HINIC_DEFAULT_AEQ_LEN 64
-#define HINIC_DEFAULT_CEQ_LEN 1024
-
-#define HINIC_EQ_PAGE_SIZE SZ_4K
-
-#define HINIC_CEQ_ID_CMDQ 0
-
-enum hinic_eq_type {
- HINIC_AEQ,
- HINIC_CEQ,
-};
-
-enum hinic_aeq_type {
- HINIC_MSG_FROM_MGMT_CPU = 2,
-
- HINIC_MAX_AEQ_EVENTS,
-};
-
-enum hinic_ceq_type {
- HINIC_CEQ_CMDQ = 3,
-
- HINIC_MAX_CEQ_EVENTS,
-};
-
-enum hinic_eqe_state {
- HINIC_EQE_ENABLED = BIT(0),
- HINIC_EQE_RUNNING = BIT(1),
-};
-
-struct hinic_aeq_elem {
- u8 data[HINIC_AEQE_DATA_SIZE];
- u32 desc;
-};
-
-struct hinic_eq_work {
- struct work_struct work;
- void *data;
-};
-
-struct hinic_eq {
- struct hinic_hwif *hwif;
-
- enum hinic_eq_type type;
- int q_id;
- u32 q_len;
- u32 page_size;
-
- u32 cons_idx;
- int wrapped;
-
- size_t elem_size;
- int num_pages;
- int num_elem_in_pg;
-
- struct msix_entry msix_entry;
-
- dma_addr_t *dma_addr;
- void **virt_addr;
-
- struct hinic_eq_work aeq_work;
-
- struct tasklet_struct ceq_tasklet;
-};
-
-struct hinic_hw_event_cb {
- void (*hwe_handler)(void *handle, void *data, u8 size);
- void *handle;
- unsigned long hwe_state;
-};
-
-struct hinic_aeqs {
- struct hinic_hwif *hwif;
-
- struct hinic_eq aeq[HINIC_MAX_AEQS];
- int num_aeqs;
-
- struct hinic_hw_event_cb hwe_cb[HINIC_MAX_AEQ_EVENTS];
-
- struct workqueue_struct *workq;
-};
-
-struct hinic_ceq_cb {
- void (*handler)(void *handle, u32 ceqe_data);
- void *handle;
- enum hinic_eqe_state ceqe_state;
-};
-
-struct hinic_ceqs {
- struct hinic_hwif *hwif;
-
- struct hinic_eq ceq[HINIC_MAX_CEQS];
- int num_ceqs;
-
- struct hinic_ceq_cb ceq_cb[HINIC_MAX_CEQ_EVENTS];
-};
-
-void hinic_aeq_register_hw_cb(struct hinic_aeqs *aeqs,
- enum hinic_aeq_type event, void *handle,
- void (*hwe_handler)(void *handle, void *data,
- u8 size));
-
-void hinic_aeq_unregister_hw_cb(struct hinic_aeqs *aeqs,
- enum hinic_aeq_type event);
-
-void hinic_ceq_register_cb(struct hinic_ceqs *ceqs,
- enum hinic_ceq_type event, void *handle,
- void (*ceq_cb)(void *handle, u32 ceqe_data));
-
-void hinic_ceq_unregister_cb(struct hinic_ceqs *ceqs,
- enum hinic_ceq_type event);
-
-int hinic_aeqs_init(struct hinic_aeqs *aeqs, struct hinic_hwif *hwif,
- int num_aeqs, u32 q_len, u32 page_size,
- struct msix_entry *msix_entries);
-
-void hinic_aeqs_free(struct hinic_aeqs *aeqs);
-
-int hinic_ceqs_init(struct hinic_ceqs *ceqs, struct hinic_hwif *hwif,
- int num_ceqs, u32 q_len, u32 page_size,
- struct msix_entry *msix_entries);
-
-void hinic_ceqs_free(struct hinic_ceqs *ceqs);
-
-#endif
diff --git a/drivers/net/ethernet/huawei/hinic/hinic_hw_if.c b/drivers/net/ethernet/huawei/hinic/hinic_hw_if.c
deleted file mode 100644
index 823a170..00000000
--- a/drivers/net/ethernet/huawei/hinic/hinic_hw_if.c
+++ /dev/null
@@ -1,351 +0,0 @@
-/*
- * Huawei HiNIC PCI Express Linux driver
- * Copyright(c) 2017 Huawei Technologies Co., Ltd
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms and conditions of the GNU General Public License,
- * version 2, as published by the Free Software Foundation.
- *
- * This program is distributed in the hope it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
- * for more details.
- *
- */
-
-#include <linux/pci.h>
-#include <linux/device.h>
-#include <linux/errno.h>
-#include <linux/io.h>
-#include <linux/types.h>
-#include <linux/bitops.h>
-
-#include "hinic_hw_csr.h"
-#include "hinic_hw_if.h"
-
-#define PCIE_ATTR_ENTRY 0
-
-#define VALID_MSIX_IDX(attr, msix_index) ((msix_index) < (attr)->num_irqs)
-
-/**
- * hinic_msix_attr_set - set message attribute for msix entry
- * @hwif: the HW interface of a pci function device
- * @msix_index: msix_index
- * @pending_limit: the maximum pending interrupt events (unit 8)
- * @coalesc_timer: coalesc period for interrupt (unit 8 us)
- * @lli_timer: replenishing period for low latency credit (unit 8 us)
- * @lli_credit_limit: maximum credits for low latency msix messages (unit 8)
- * @resend_timer: maximum wait for resending msix (unit coalesc period)
- *
- * Return 0 - Success, negative - Failure
- **/
-int hinic_msix_attr_set(struct hinic_hwif *hwif, u16 msix_index,
- u8 pending_limit, u8 coalesc_timer,
- u8 lli_timer, u8 lli_credit_limit,
- u8 resend_timer)
-{
- u32 msix_ctrl, addr;
-
- if (!VALID_MSIX_IDX(&hwif->attr, msix_index))
- return -EINVAL;
-
- msix_ctrl = HINIC_MSIX_ATTR_SET(pending_limit, PENDING_LIMIT) |
- HINIC_MSIX_ATTR_SET(coalesc_timer, COALESC_TIMER) |
- HINIC_MSIX_ATTR_SET(lli_timer, LLI_TIMER) |
- HINIC_MSIX_ATTR_SET(lli_credit_limit, LLI_CREDIT) |
- HINIC_MSIX_ATTR_SET(resend_timer, RESEND_TIMER);
-
- addr = HINIC_CSR_MSIX_CTRL_ADDR(msix_index);
-
- hinic_hwif_write_reg(hwif, addr, msix_ctrl);
- return 0;
-}
-
-/**
- * hinic_msix_attr_get - get message attribute of msix entry
- * @hwif: the HW interface of a pci function device
- * @msix_index: msix_index
- * @pending_limit: the maximum pending interrupt events (unit 8)
- * @coalesc_timer: coalesc period for interrupt (unit 8 us)
- * @lli_timer: replenishing period for low latency credit (unit 8 us)
- * @lli_credit_limit: maximum credits for low latency msix messages (unit 8)
- * @resend_timer: maximum wait for resending msix (unit coalesc period)
- *
- * Return 0 - Success, negative - Failure
- **/
-int hinic_msix_attr_get(struct hinic_hwif *hwif, u16 msix_index,
- u8 *pending_limit, u8 *coalesc_timer,
- u8 *lli_timer, u8 *lli_credit_limit,
- u8 *resend_timer)
-{
- u32 addr, val;
-
- if (!VALID_MSIX_IDX(&hwif->attr, msix_index))
- return -EINVAL;
-
- addr = HINIC_CSR_MSIX_CTRL_ADDR(msix_index);
- val = hinic_hwif_read_reg(hwif, addr);
-
- *pending_limit = HINIC_MSIX_ATTR_GET(val, PENDING_LIMIT);
- *coalesc_timer = HINIC_MSIX_ATTR_GET(val, COALESC_TIMER);
- *lli_timer = HINIC_MSIX_ATTR_GET(val, LLI_TIMER);
- *lli_credit_limit = HINIC_MSIX_ATTR_GET(val, LLI_CREDIT);
- *resend_timer = HINIC_MSIX_ATTR_GET(val, RESEND_TIMER);
- return 0;
-}
-
-/**
- * hinic_msix_attr_cnt_clear - clear message attribute counters for msix entry
- * @hwif: the HW interface of a pci function device
- * @msix_index: msix_index
- *
- * Return 0 - Success, negative - Failure
- **/
-int hinic_msix_attr_cnt_clear(struct hinic_hwif *hwif, u16 msix_index)
-{
- u32 msix_ctrl, addr;
-
- if (!VALID_MSIX_IDX(&hwif->attr, msix_index))
- return -EINVAL;
-
- msix_ctrl = HINIC_MSIX_CNT_SET(1, RESEND_TIMER);
- addr = HINIC_CSR_MSIX_CNT_ADDR(msix_index);
-
- hinic_hwif_write_reg(hwif, addr, msix_ctrl);
- return 0;
-}
-
-/**
- * hinic_set_pf_action - set action on pf channel
- * @hwif: the HW interface of a pci function device
- * @action: action on pf channel
- *
- * Return 0 - Success, negative - Failure
- **/
-void hinic_set_pf_action(struct hinic_hwif *hwif, enum hinic_pf_action action)
-{
- u32 attr5 = hinic_hwif_read_reg(hwif, HINIC_CSR_FUNC_ATTR5_ADDR);
-
- attr5 = HINIC_FA5_CLEAR(attr5, PF_ACTION);
- attr5 |= HINIC_FA5_SET(action, PF_ACTION);
-
- hinic_hwif_write_reg(hwif, HINIC_CSR_FUNC_ATTR5_ADDR, attr5);
-}
-
-enum hinic_outbound_state hinic_outbound_state_get(struct hinic_hwif *hwif)
-{
- u32 attr4 = hinic_hwif_read_reg(hwif, HINIC_CSR_FUNC_ATTR4_ADDR);
-
- return HINIC_FA4_GET(attr4, OUTBOUND_STATE);
-}
-
-void hinic_outbound_state_set(struct hinic_hwif *hwif,
- enum hinic_outbound_state outbound_state)
-{
- u32 attr4 = hinic_hwif_read_reg(hwif, HINIC_CSR_FUNC_ATTR4_ADDR);
-
- attr4 = HINIC_FA4_CLEAR(attr4, OUTBOUND_STATE);
- attr4 |= HINIC_FA4_SET(outbound_state, OUTBOUND_STATE);
-
- hinic_hwif_write_reg(hwif, HINIC_CSR_FUNC_ATTR4_ADDR, attr4);
-}
-
-enum hinic_db_state hinic_db_state_get(struct hinic_hwif *hwif)
-{
- u32 attr4 = hinic_hwif_read_reg(hwif, HINIC_CSR_FUNC_ATTR4_ADDR);
-
- return HINIC_FA4_GET(attr4, DB_STATE);
-}
-
-void hinic_db_state_set(struct hinic_hwif *hwif,
- enum hinic_db_state db_state)
-{
- u32 attr4 = hinic_hwif_read_reg(hwif, HINIC_CSR_FUNC_ATTR4_ADDR);
-
- attr4 = HINIC_FA4_CLEAR(attr4, DB_STATE);
- attr4 |= HINIC_FA4_SET(db_state, DB_STATE);
-
- hinic_hwif_write_reg(hwif, HINIC_CSR_FUNC_ATTR4_ADDR, attr4);
-}
-
-/**
- * hwif_ready - test if the HW is ready for use
- * @hwif: the HW interface of a pci function device
- *
- * Return 0 - Success, negative - Failure
- **/
-static int hwif_ready(struct hinic_hwif *hwif)
-{
- struct pci_dev *pdev = hwif->pdev;
- u32 addr, attr1;
-
- addr = HINIC_CSR_FUNC_ATTR1_ADDR;
- attr1 = hinic_hwif_read_reg(hwif, addr);
-
- if (!HINIC_FA1_GET(attr1, INIT_STATUS)) {
- dev_err(&pdev->dev, "hwif status is not ready\n");
- return -EFAULT;
- }
-
- return 0;
-}
-
-/**
- * set_hwif_attr - set the attributes in the relevant members in hwif
- * @hwif: the HW interface of a pci function device
- * @attr0: the first attribute that was read from the hw
- * @attr1: the second attribute that was read from the hw
- **/
-static void set_hwif_attr(struct hinic_hwif *hwif, u32 attr0, u32 attr1)
-{
- hwif->attr.func_idx = HINIC_FA0_GET(attr0, FUNC_IDX);
- hwif->attr.pf_idx = HINIC_FA0_GET(attr0, PF_IDX);
- hwif->attr.pci_intf_idx = HINIC_FA0_GET(attr0, PCI_INTF_IDX);
- hwif->attr.func_type = HINIC_FA0_GET(attr0, FUNC_TYPE);
-
- hwif->attr.num_aeqs = BIT(HINIC_FA1_GET(attr1, AEQS_PER_FUNC));
- hwif->attr.num_ceqs = BIT(HINIC_FA1_GET(attr1, CEQS_PER_FUNC));
- hwif->attr.num_irqs = BIT(HINIC_FA1_GET(attr1, IRQS_PER_FUNC));
- hwif->attr.num_dma_attr = BIT(HINIC_FA1_GET(attr1, DMA_ATTR_PER_FUNC));
-}
-
-/**
- * read_hwif_attr - read the attributes and set members in hwif
- * @hwif: the HW interface of a pci function device
- **/
-static void read_hwif_attr(struct hinic_hwif *hwif)
-{
- u32 addr, attr0, attr1;
-
- addr = HINIC_CSR_FUNC_ATTR0_ADDR;
- attr0 = hinic_hwif_read_reg(hwif, addr);
-
- addr = HINIC_CSR_FUNC_ATTR1_ADDR;
- attr1 = hinic_hwif_read_reg(hwif, addr);
-
- set_hwif_attr(hwif, attr0, attr1);
-}
-
-/**
- * set_ppf - try to set hwif as ppf and set the type of hwif in this case
- * @hwif: the HW interface of a pci function device
- **/
-static void set_ppf(struct hinic_hwif *hwif)
-{
- struct hinic_func_attr *attr = &hwif->attr;
- u32 addr, val, ppf_election;
-
- /* Read Modify Write */
- addr = HINIC_CSR_PPF_ELECTION_ADDR(HINIC_HWIF_PCI_INTF(hwif));
-
- val = hinic_hwif_read_reg(hwif, addr);
- val = HINIC_PPF_ELECTION_CLEAR(val, IDX);
-
- ppf_election = HINIC_PPF_ELECTION_SET(HINIC_HWIF_FUNC_IDX(hwif), IDX);
-
- val |= ppf_election;
- hinic_hwif_write_reg(hwif, addr, val);
-
- /* check PPF */
- val = hinic_hwif_read_reg(hwif, addr);
-
- attr->ppf_idx = HINIC_PPF_ELECTION_GET(val, IDX);
- if (attr->ppf_idx == HINIC_HWIF_FUNC_IDX(hwif))
- attr->func_type = HINIC_PPF;
-}
-
-/**
- * set_dma_attr - set the dma attributes in the HW
- * @hwif: the HW interface of a pci function device
- * @entry_idx: the entry index in the dma table
- * @st: PCIE TLP steering tag
- * @at: PCIE TLP AT field
- * @ph: PCIE TLP Processing Hint field
- * @no_snooping: PCIE TLP No snooping
- * @tph_en: PCIE TLP Processing Hint Enable
- **/
-static void set_dma_attr(struct hinic_hwif *hwif, u32 entry_idx,
- u8 st, u8 at, u8 ph,
- enum hinic_pcie_nosnoop no_snooping,
- enum hinic_pcie_tph tph_en)
-{
- u32 addr, val, dma_attr_entry;
-
- /* Read Modify Write */
- addr = HINIC_CSR_DMA_ATTR_ADDR(entry_idx);
-
- val = hinic_hwif_read_reg(hwif, addr);
- val = HINIC_DMA_ATTR_CLEAR(val, ST) &
- HINIC_DMA_ATTR_CLEAR(val, AT) &
- HINIC_DMA_ATTR_CLEAR(val, PH) &
- HINIC_DMA_ATTR_CLEAR(val, NO_SNOOPING) &
- HINIC_DMA_ATTR_CLEAR(val, TPH_EN);
-
- dma_attr_entry = HINIC_DMA_ATTR_SET(st, ST) |
- HINIC_DMA_ATTR_SET(at, AT) |
- HINIC_DMA_ATTR_SET(ph, PH) |
- HINIC_DMA_ATTR_SET(no_snooping, NO_SNOOPING) |
- HINIC_DMA_ATTR_SET(tph_en, TPH_EN);
-
- val |= dma_attr_entry;
- hinic_hwif_write_reg(hwif, addr, val);
-}
-
-/**
- * dma_attr_table_init - initialize the the default dma attributes
- * @hwif: the HW interface of a pci function device
- **/
-static void dma_attr_init(struct hinic_hwif *hwif)
-{
- set_dma_attr(hwif, PCIE_ATTR_ENTRY, HINIC_PCIE_ST_DISABLE,
- HINIC_PCIE_AT_DISABLE, HINIC_PCIE_PH_DISABLE,
- HINIC_PCIE_SNOOP, HINIC_PCIE_TPH_DISABLE);
-}
-
-/**
- * hinic_init_hwif - initialize the hw interface
- * @hwif: the HW interface of a pci function device
- * @pdev: the pci device for acessing PCI resources
- *
- * Return 0 - Success, negative - Failure
- **/
-int hinic_init_hwif(struct hinic_hwif *hwif, struct pci_dev *pdev)
-{
- int err;
-
- hwif->pdev = pdev;
-
- hwif->cfg_regs_bar = pci_ioremap_bar(pdev, HINIC_PCI_CFG_REGS_BAR);
- if (!hwif->cfg_regs_bar) {
- dev_err(&pdev->dev, "Failed to map configuration regs\n");
- return -ENOMEM;
- }
-
- err = hwif_ready(hwif);
- if (err) {
- dev_err(&pdev->dev, "HW interface is not ready\n");
- goto err_hwif_ready;
- }
-
- read_hwif_attr(hwif);
-
- if (HINIC_IS_PF(hwif))
- set_ppf(hwif);
-
- /* No transactionss before DMA is initialized */
- dma_attr_init(hwif);
- return 0;
-
-err_hwif_ready:
- iounmap(hwif->cfg_regs_bar);
- return err;
-}
-
-/**
- * hinic_free_hwif - free the HW interface
- * @hwif: the HW interface of a pci function device
- **/
-void hinic_free_hwif(struct hinic_hwif *hwif)
-{
- iounmap(hwif->cfg_regs_bar);
-}
diff --git a/drivers/net/ethernet/huawei/hinic/hinic_hw_if.h b/drivers/net/ethernet/huawei/hinic/hinic_hw_if.h
deleted file mode 100644
index 5b4760c..00000000
--- a/drivers/net/ethernet/huawei/hinic/hinic_hw_if.h
+++ /dev/null
@@ -1,272 +0,0 @@
-/*
- * Huawei HiNIC PCI Express Linux driver
- * Copyright(c) 2017 Huawei Technologies Co., Ltd
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms and conditions of the GNU General Public License,
- * version 2, as published by the Free Software Foundation.
- *
- * This program is distributed in the hope it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
- * for more details.
- *
- */
-
-#ifndef HINIC_HW_IF_H
-#define HINIC_HW_IF_H
-
-#include <linux/pci.h>
-#include <linux/io.h>
-#include <linux/types.h>
-#include <asm/byteorder.h>
-
-#define HINIC_DMA_ATTR_ST_SHIFT 0
-#define HINIC_DMA_ATTR_AT_SHIFT 8
-#define HINIC_DMA_ATTR_PH_SHIFT 10
-#define HINIC_DMA_ATTR_NO_SNOOPING_SHIFT 12
-#define HINIC_DMA_ATTR_TPH_EN_SHIFT 13
-
-#define HINIC_DMA_ATTR_ST_MASK 0xFF
-#define HINIC_DMA_ATTR_AT_MASK 0x3
-#define HINIC_DMA_ATTR_PH_MASK 0x3
-#define HINIC_DMA_ATTR_NO_SNOOPING_MASK 0x1
-#define HINIC_DMA_ATTR_TPH_EN_MASK 0x1
-
-#define HINIC_DMA_ATTR_SET(val, member) \
- (((u32)(val) & HINIC_DMA_ATTR_##member##_MASK) << \
- HINIC_DMA_ATTR_##member##_SHIFT)
-
-#define HINIC_DMA_ATTR_CLEAR(val, member) \
- ((val) & (~(HINIC_DMA_ATTR_##member##_MASK \
- << HINIC_DMA_ATTR_##member##_SHIFT)))
-
-#define HINIC_FA0_FUNC_IDX_SHIFT 0
-#define HINIC_FA0_PF_IDX_SHIFT 10
-#define HINIC_FA0_PCI_INTF_IDX_SHIFT 14
-/* reserved members - off 16 */
-#define HINIC_FA0_FUNC_TYPE_SHIFT 24
-
-#define HINIC_FA0_FUNC_IDX_MASK 0x3FF
-#define HINIC_FA0_PF_IDX_MASK 0xF
-#define HINIC_FA0_PCI_INTF_IDX_MASK 0x3
-#define HINIC_FA0_FUNC_TYPE_MASK 0x1
-
-#define HINIC_FA0_GET(val, member) \
- (((val) >> HINIC_FA0_##member##_SHIFT) & HINIC_FA0_##member##_MASK)
-
-#define HINIC_FA1_AEQS_PER_FUNC_SHIFT 8
-/* reserved members - off 10 */
-#define HINIC_FA1_CEQS_PER_FUNC_SHIFT 12
-/* reserved members - off 15 */
-#define HINIC_FA1_IRQS_PER_FUNC_SHIFT 20
-#define HINIC_FA1_DMA_ATTR_PER_FUNC_SHIFT 24
-/* reserved members - off 27 */
-#define HINIC_FA1_INIT_STATUS_SHIFT 30
-
-#define HINIC_FA1_AEQS_PER_FUNC_MASK 0x3
-#define HINIC_FA1_CEQS_PER_FUNC_MASK 0x7
-#define HINIC_FA1_IRQS_PER_FUNC_MASK 0xF
-#define HINIC_FA1_DMA_ATTR_PER_FUNC_MASK 0x7
-#define HINIC_FA1_INIT_STATUS_MASK 0x1
-
-#define HINIC_FA1_GET(val, member) \
- (((val) >> HINIC_FA1_##member##_SHIFT) & HINIC_FA1_##member##_MASK)
-
-#define HINIC_FA4_OUTBOUND_STATE_SHIFT 0
-#define HINIC_FA4_DB_STATE_SHIFT 1
-
-#define HINIC_FA4_OUTBOUND_STATE_MASK 0x1
-#define HINIC_FA4_DB_STATE_MASK 0x1
-
-#define HINIC_FA4_GET(val, member) \
- (((val) >> HINIC_FA4_##member##_SHIFT) & HINIC_FA4_##member##_MASK)
-
-#define HINIC_FA4_SET(val, member) \
- ((((u32)val) & HINIC_FA4_##member##_MASK) << HINIC_FA4_##member##_SHIFT)
-
-#define HINIC_FA4_CLEAR(val, member) \
- ((val) & (~(HINIC_FA4_##member##_MASK << HINIC_FA4_##member##_SHIFT)))
-
-#define HINIC_FA5_PF_ACTION_SHIFT 0
-#define HINIC_FA5_PF_ACTION_MASK 0xFFFF
-
-#define HINIC_FA5_SET(val, member) \
- (((u32)(val) & HINIC_FA5_##member##_MASK) << HINIC_FA5_##member##_SHIFT)
-
-#define HINIC_FA5_CLEAR(val, member) \
- ((val) & (~(HINIC_FA5_##member##_MASK << HINIC_FA5_##member##_SHIFT)))
-
-#define HINIC_PPF_ELECTION_IDX_SHIFT 0
-#define HINIC_PPF_ELECTION_IDX_MASK 0x1F
-
-#define HINIC_PPF_ELECTION_SET(val, member) \
- (((u32)(val) & HINIC_PPF_ELECTION_##member##_MASK) << \
- HINIC_PPF_ELECTION_##member##_SHIFT)
-
-#define HINIC_PPF_ELECTION_GET(val, member) \
- (((val) >> HINIC_PPF_ELECTION_##member##_SHIFT) & \
- HINIC_PPF_ELECTION_##member##_MASK)
-
-#define HINIC_PPF_ELECTION_CLEAR(val, member) \
- ((val) & (~(HINIC_PPF_ELECTION_##member##_MASK \
- << HINIC_PPF_ELECTION_##member##_SHIFT)))
-
-#define HINIC_MSIX_PENDING_LIMIT_SHIFT 0
-#define HINIC_MSIX_COALESC_TIMER_SHIFT 8
-#define HINIC_MSIX_LLI_TIMER_SHIFT 16
-#define HINIC_MSIX_LLI_CREDIT_SHIFT 24
-#define HINIC_MSIX_RESEND_TIMER_SHIFT 29
-
-#define HINIC_MSIX_PENDING_LIMIT_MASK 0xFF
-#define HINIC_MSIX_COALESC_TIMER_MASK 0xFF
-#define HINIC_MSIX_LLI_TIMER_MASK 0xFF
-#define HINIC_MSIX_LLI_CREDIT_MASK 0x1F
-#define HINIC_MSIX_RESEND_TIMER_MASK 0x7
-
-#define HINIC_MSIX_ATTR_SET(val, member) \
- (((u32)(val) & HINIC_MSIX_##member##_MASK) << \
- HINIC_MSIX_##member##_SHIFT)
-
-#define HINIC_MSIX_ATTR_GET(val, member) \
- (((val) >> HINIC_MSIX_##member##_SHIFT) & \
- HINIC_MSIX_##member##_MASK)
-
-#define HINIC_MSIX_CNT_RESEND_TIMER_SHIFT 29
-
-#define HINIC_MSIX_CNT_RESEND_TIMER_MASK 0x1
-
-#define HINIC_MSIX_CNT_SET(val, member) \
- (((u32)(val) & HINIC_MSIX_CNT_##member##_MASK) << \
- HINIC_MSIX_CNT_##member##_SHIFT)
-
-#define HINIC_HWIF_NUM_AEQS(hwif) ((hwif)->attr.num_aeqs)
-#define HINIC_HWIF_NUM_CEQS(hwif) ((hwif)->attr.num_ceqs)
-#define HINIC_HWIF_NUM_IRQS(hwif) ((hwif)->attr.num_irqs)
-#define HINIC_HWIF_FUNC_IDX(hwif) ((hwif)->attr.func_idx)
-#define HINIC_HWIF_PCI_INTF(hwif) ((hwif)->attr.pci_intf_idx)
-#define HINIC_HWIF_PF_IDX(hwif) ((hwif)->attr.pf_idx)
-
-#define HINIC_FUNC_TYPE(hwif) ((hwif)->attr.func_type)
-#define HINIC_IS_PF(hwif) (HINIC_FUNC_TYPE(hwif) == HINIC_PF)
-#define HINIC_IS_PPF(hwif) (HINIC_FUNC_TYPE(hwif) == HINIC_PPF)
-
-#define HINIC_PCI_CFG_REGS_BAR 0
-#define HINIC_PCI_DB_BAR 4
-
-#define HINIC_PCIE_ST_DISABLE 0
-#define HINIC_PCIE_AT_DISABLE 0
-#define HINIC_PCIE_PH_DISABLE 0
-
-#define HINIC_EQ_MSIX_PENDING_LIMIT_DEFAULT 0 /* Disabled */
-#define HINIC_EQ_MSIX_COALESC_TIMER_DEFAULT 0xFF /* max */
-#define HINIC_EQ_MSIX_LLI_TIMER_DEFAULT 0 /* Disabled */
-#define HINIC_EQ_MSIX_LLI_CREDIT_LIMIT_DEFAULT 0 /* Disabled */
-#define HINIC_EQ_MSIX_RESEND_TIMER_DEFAULT 7 /* max */
-
-enum hinic_pcie_nosnoop {
- HINIC_PCIE_SNOOP = 0,
- HINIC_PCIE_NO_SNOOP = 1,
-};
-
-enum hinic_pcie_tph {
- HINIC_PCIE_TPH_DISABLE = 0,
- HINIC_PCIE_TPH_ENABLE = 1,
-};
-
-enum hinic_func_type {
- HINIC_PF = 0,
- HINIC_PPF = 2,
-};
-
-enum hinic_mod_type {
- HINIC_MOD_COMM = 0, /* HW communication module */
- HINIC_MOD_L2NIC = 1, /* L2NIC module */
- HINIC_MOD_CFGM = 7, /* Configuration module */
-
- HINIC_MOD_MAX = 15
-};
-
-enum hinic_node_id {
- HINIC_NODE_ID_MGMT = 21,
-};
-
-enum hinic_pf_action {
- HINIC_PF_MGMT_INIT = 0x0,
-
- HINIC_PF_MGMT_ACTIVE = 0x11,
-};
-
-enum hinic_outbound_state {
- HINIC_OUTBOUND_ENABLE = 0,
- HINIC_OUTBOUND_DISABLE = 1,
-};
-
-enum hinic_db_state {
- HINIC_DB_ENABLE = 0,
- HINIC_DB_DISABLE = 1,
-};
-
-struct hinic_func_attr {
- u16 func_idx;
- u8 pf_idx;
- u8 pci_intf_idx;
-
- enum hinic_func_type func_type;
-
- u8 ppf_idx;
-
- u16 num_irqs;
- u8 num_aeqs;
- u8 num_ceqs;
-
- u8 num_dma_attr;
-};
-
-struct hinic_hwif {
- struct pci_dev *pdev;
- void __iomem *cfg_regs_bar;
-
- struct hinic_func_attr attr;
-};
-
-static inline u32 hinic_hwif_read_reg(struct hinic_hwif *hwif, u32 reg)
-{
- return be32_to_cpu(readl(hwif->cfg_regs_bar + reg));
-}
-
-static inline void hinic_hwif_write_reg(struct hinic_hwif *hwif, u32 reg,
- u32 val)
-{
- writel(cpu_to_be32(val), hwif->cfg_regs_bar + reg);
-}
-
-int hinic_msix_attr_set(struct hinic_hwif *hwif, u16 msix_index,
- u8 pending_limit, u8 coalesc_timer,
- u8 lli_timer_cfg, u8 lli_credit_limit,
- u8 resend_timer);
-
-int hinic_msix_attr_get(struct hinic_hwif *hwif, u16 msix_index,
- u8 *pending_limit, u8 *coalesc_timer_cfg,
- u8 *lli_timer, u8 *lli_credit_limit,
- u8 *resend_timer);
-
-int hinic_msix_attr_cnt_clear(struct hinic_hwif *hwif, u16 msix_index);
-
-void hinic_set_pf_action(struct hinic_hwif *hwif, enum hinic_pf_action action);
-
-enum hinic_outbound_state hinic_outbound_state_get(struct hinic_hwif *hwif);
-
-void hinic_outbound_state_set(struct hinic_hwif *hwif,
- enum hinic_outbound_state outbound_state);
-
-enum hinic_db_state hinic_db_state_get(struct hinic_hwif *hwif);
-
-void hinic_db_state_set(struct hinic_hwif *hwif,
- enum hinic_db_state db_state);
-
-int hinic_init_hwif(struct hinic_hwif *hwif, struct pci_dev *pdev);
-
-void hinic_free_hwif(struct hinic_hwif *hwif);
-
-#endif
diff --git a/drivers/net/ethernet/huawei/hinic/hinic_hw_io.c b/drivers/net/ethernet/huawei/hinic/hinic_hw_io.c
deleted file mode 100644
index 8e58976..00000000
--- a/drivers/net/ethernet/huawei/hinic/hinic_hw_io.c
+++ /dev/null
@@ -1,533 +0,0 @@
-/*
- * Huawei HiNIC PCI Express Linux driver
- * Copyright(c) 2017 Huawei Technologies Co., Ltd
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms and conditions of the GNU General Public License,
- * version 2, as published by the Free Software Foundation.
- *
- * This program is distributed in the hope it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
- * for more details.
- *
- */
-
-#include <linux/kernel.h>
-#include <linux/types.h>
-#include <linux/pci.h>
-#include <linux/device.h>
-#include <linux/errno.h>
-#include <linux/slab.h>
-#include <linux/semaphore.h>
-#include <linux/dma-mapping.h>
-#include <linux/io.h>
-#include <linux/err.h>
-
-#include "hinic_hw_if.h"
-#include "hinic_hw_eqs.h"
-#include "hinic_hw_wqe.h"
-#include "hinic_hw_wq.h"
-#include "hinic_hw_cmdq.h"
-#include "hinic_hw_qp_ctxt.h"
-#include "hinic_hw_qp.h"
-#include "hinic_hw_io.h"
-
-#define CI_Q_ADDR_SIZE sizeof(u32)
-
-#define CI_ADDR(base_addr, q_id) ((base_addr) + \
- (q_id) * CI_Q_ADDR_SIZE)
-
-#define CI_TABLE_SIZE(num_qps) ((num_qps) * CI_Q_ADDR_SIZE)
-
-#define DB_IDX(db, db_base) \
- (((unsigned long)(db) - (unsigned long)(db_base)) / HINIC_DB_PAGE_SIZE)
-
-enum io_cmd {
- IO_CMD_MODIFY_QUEUE_CTXT = 0,
-};
-
-static void init_db_area_idx(struct hinic_free_db_area *free_db_area)
-{
- int i;
-
- for (i = 0; i < HINIC_DB_MAX_AREAS; i++)
- free_db_area->db_idx[i] = i;
-
- free_db_area->alloc_pos = 0;
- free_db_area->return_pos = HINIC_DB_MAX_AREAS;
-
- free_db_area->num_free = HINIC_DB_MAX_AREAS;
-
- sema_init(&free_db_area->idx_lock, 1);
-}
-
-static void __iomem *get_db_area(struct hinic_func_to_io *func_to_io)
-{
- struct hinic_free_db_area *free_db_area = &func_to_io->free_db_area;
- int pos, idx;
-
- down(&free_db_area->idx_lock);
-
- free_db_area->num_free--;
-
- if (free_db_area->num_free < 0) {
- free_db_area->num_free++;
- up(&free_db_area->idx_lock);
- return ERR_PTR(-ENOMEM);
- }
-
- pos = free_db_area->alloc_pos++;
- pos &= HINIC_DB_MAX_AREAS - 1;
-
- idx = free_db_area->db_idx[pos];
-
- free_db_area->db_idx[pos] = -1;
-
- up(&free_db_area->idx_lock);
-
- return func_to_io->db_base + idx * HINIC_DB_PAGE_SIZE;
-}
-
-static void return_db_area(struct hinic_func_to_io *func_to_io,
- void __iomem *db_base)
-{
- struct hinic_free_db_area *free_db_area = &func_to_io->free_db_area;
- int pos, idx = DB_IDX(db_base, func_to_io->db_base);
-
- down(&free_db_area->idx_lock);
-
- pos = free_db_area->return_pos++;
- pos &= HINIC_DB_MAX_AREAS - 1;
-
- free_db_area->db_idx[pos] = idx;
-
- free_db_area->num_free++;
-
- up(&free_db_area->idx_lock);
-}
-
-static int write_sq_ctxts(struct hinic_func_to_io *func_to_io, u16 base_qpn,
- u16 num_sqs)
-{
- struct hinic_hwif *hwif = func_to_io->hwif;
- struct hinic_sq_ctxt_block *sq_ctxt_block;
- struct pci_dev *pdev = hwif->pdev;
- struct hinic_cmdq_buf cmdq_buf;
- struct hinic_sq_ctxt *sq_ctxt;
- struct hinic_qp *qp;
- u64 out_param;
- int err, i;
-
- err = hinic_alloc_cmdq_buf(&func_to_io->cmdqs, &cmdq_buf);
- if (err) {
- dev_err(&pdev->dev, "Failed to allocate cmdq buf\n");
- return err;
- }
-
- sq_ctxt_block = cmdq_buf.buf;
- sq_ctxt = sq_ctxt_block->sq_ctxt;
-
- hinic_qp_prepare_header(&sq_ctxt_block->hdr, HINIC_QP_CTXT_TYPE_SQ,
- num_sqs, func_to_io->max_qps);
- for (i = 0; i < num_sqs; i++) {
- qp = &func_to_io->qps[i];
-
- hinic_sq_prepare_ctxt(&sq_ctxt[i], &qp->sq,
- base_qpn + qp->q_id);
- }
-
- cmdq_buf.size = HINIC_SQ_CTXT_SIZE(num_sqs);
-
- err = hinic_cmdq_direct_resp(&func_to_io->cmdqs, HINIC_MOD_L2NIC,
- IO_CMD_MODIFY_QUEUE_CTXT, &cmdq_buf,
- &out_param);
- if ((err) || (out_param != 0)) {
- dev_err(&pdev->dev, "Failed to set SQ ctxts\n");
- err = -EFAULT;
- }
-
- hinic_free_cmdq_buf(&func_to_io->cmdqs, &cmdq_buf);
- return err;
-}
-
-static int write_rq_ctxts(struct hinic_func_to_io *func_to_io, u16 base_qpn,
- u16 num_rqs)
-{
- struct hinic_hwif *hwif = func_to_io->hwif;
- struct hinic_rq_ctxt_block *rq_ctxt_block;
- struct pci_dev *pdev = hwif->pdev;
- struct hinic_cmdq_buf cmdq_buf;
- struct hinic_rq_ctxt *rq_ctxt;
- struct hinic_qp *qp;
- u64 out_param;
- int err, i;
-
- err = hinic_alloc_cmdq_buf(&func_to_io->cmdqs, &cmdq_buf);
- if (err) {
- dev_err(&pdev->dev, "Failed to allocate cmdq buf\n");
- return err;
- }
-
- rq_ctxt_block = cmdq_buf.buf;
- rq_ctxt = rq_ctxt_block->rq_ctxt;
-
- hinic_qp_prepare_header(&rq_ctxt_block->hdr, HINIC_QP_CTXT_TYPE_RQ,
- num_rqs, func_to_io->max_qps);
- for (i = 0; i < num_rqs; i++) {
- qp = &func_to_io->qps[i];
-
- hinic_rq_prepare_ctxt(&rq_ctxt[i], &qp->rq,
- base_qpn + qp->q_id);
- }
-
- cmdq_buf.size = HINIC_RQ_CTXT_SIZE(num_rqs);
-
- err = hinic_cmdq_direct_resp(&func_to_io->cmdqs, HINIC_MOD_L2NIC,
- IO_CMD_MODIFY_QUEUE_CTXT, &cmdq_buf,
- &out_param);
- if ((err) || (out_param != 0)) {
- dev_err(&pdev->dev, "Failed to set RQ ctxts\n");
- err = -EFAULT;
- }
-
- hinic_free_cmdq_buf(&func_to_io->cmdqs, &cmdq_buf);
- return err;
-}
-
-/**
- * write_qp_ctxts - write the qp ctxt to HW
- * @func_to_io: func to io channel that holds the IO components
- * @base_qpn: first qp number
- * @num_qps: number of qps to write
- *
- * Return 0 - Success, negative - Failure
- **/
-static int write_qp_ctxts(struct hinic_func_to_io *func_to_io, u16 base_qpn,
- u16 num_qps)
-{
- return (write_sq_ctxts(func_to_io, base_qpn, num_qps) ||
- write_rq_ctxts(func_to_io, base_qpn, num_qps));
-}
-
-/**
- * init_qp - Initialize a Queue Pair
- * @func_to_io: func to io channel that holds the IO components
- * @qp: pointer to the qp to initialize
- * @q_id: the id of the qp
- * @sq_msix_entry: msix entry for sq
- * @rq_msix_entry: msix entry for rq
- *
- * Return 0 - Success, negative - Failure
- **/
-static int init_qp(struct hinic_func_to_io *func_to_io,
- struct hinic_qp *qp, int q_id,
- struct msix_entry *sq_msix_entry,
- struct msix_entry *rq_msix_entry)
-{
- struct hinic_hwif *hwif = func_to_io->hwif;
- struct pci_dev *pdev = hwif->pdev;
- void __iomem *db_base;
- int err;
-
- qp->q_id = q_id;
-
- err = hinic_wq_allocate(&func_to_io->wqs, &func_to_io->sq_wq[q_id],
- HINIC_SQ_WQEBB_SIZE, HINIC_SQ_PAGE_SIZE,
- HINIC_SQ_DEPTH, HINIC_SQ_WQE_MAX_SIZE);
- if (err) {
- dev_err(&pdev->dev, "Failed to allocate WQ for SQ\n");
- return err;
- }
-
- err = hinic_wq_allocate(&func_to_io->wqs, &func_to_io->rq_wq[q_id],
- HINIC_RQ_WQEBB_SIZE, HINIC_RQ_PAGE_SIZE,
- HINIC_RQ_DEPTH, HINIC_RQ_WQE_SIZE);
- if (err) {
- dev_err(&pdev->dev, "Failed to allocate WQ for RQ\n");
- goto err_rq_alloc;
- }
-
- db_base = get_db_area(func_to_io);
- if (IS_ERR(db_base)) {
- dev_err(&pdev->dev, "Failed to get DB area for SQ\n");
- err = PTR_ERR(db_base);
- goto err_get_db;
- }
-
- func_to_io->sq_db[q_id] = db_base;
-
- err = hinic_init_sq(&qp->sq, hwif, &func_to_io->sq_wq[q_id],
- sq_msix_entry,
- CI_ADDR(func_to_io->ci_addr_base, q_id),
- CI_ADDR(func_to_io->ci_dma_base, q_id), db_base);
- if (err) {
- dev_err(&pdev->dev, "Failed to init SQ\n");
- goto err_sq_init;
- }
-
- err = hinic_init_rq(&qp->rq, hwif, &func_to_io->rq_wq[q_id],
- rq_msix_entry);
- if (err) {
- dev_err(&pdev->dev, "Failed to init RQ\n");
- goto err_rq_init;
- }
-
- return 0;
-
-err_rq_init:
- hinic_clean_sq(&qp->sq);
-
-err_sq_init:
- return_db_area(func_to_io, db_base);
-
-err_get_db:
- hinic_wq_free(&func_to_io->wqs, &func_to_io->rq_wq[q_id]);
-
-err_rq_alloc:
- hinic_wq_free(&func_to_io->wqs, &func_to_io->sq_wq[q_id]);
- return err;
-}
-
-/**
- * destroy_qp - Clean the resources of a Queue Pair
- * @func_to_io: func to io channel that holds the IO components
- * @qp: pointer to the qp to clean
- **/
-static void destroy_qp(struct hinic_func_to_io *func_to_io,
- struct hinic_qp *qp)
-{
- int q_id = qp->q_id;
-
- hinic_clean_rq(&qp->rq);
- hinic_clean_sq(&qp->sq);
-
- return_db_area(func_to_io, func_to_io->sq_db[q_id]);
-
- hinic_wq_free(&func_to_io->wqs, &func_to_io->rq_wq[q_id]);
- hinic_wq_free(&func_to_io->wqs, &func_to_io->sq_wq[q_id]);
-}
-
-/**
- * hinic_io_create_qps - Create Queue Pairs
- * @func_to_io: func to io channel that holds the IO components
- * @base_qpn: base qp number
- * @num_qps: number queue pairs to create
- * @sq_msix_entry: msix entries for sq
- * @rq_msix_entry: msix entries for rq
- *
- * Return 0 - Success, negative - Failure
- **/
-int hinic_io_create_qps(struct hinic_func_to_io *func_to_io,
- u16 base_qpn, int num_qps,
- struct msix_entry *sq_msix_entries,
- struct msix_entry *rq_msix_entries)
-{
- struct hinic_hwif *hwif = func_to_io->hwif;
- struct pci_dev *pdev = hwif->pdev;
- size_t qps_size, wq_size, db_size;
- void *ci_addr_base;
- int i, j, err;
-
- qps_size = num_qps * sizeof(*func_to_io->qps);
- func_to_io->qps = devm_kzalloc(&pdev->dev, qps_size, GFP_KERNEL);
- if (!func_to_io->qps)
- return -ENOMEM;
-
- wq_size = num_qps * sizeof(*func_to_io->sq_wq);
- func_to_io->sq_wq = devm_kzalloc(&pdev->dev, wq_size, GFP_KERNEL);
- if (!func_to_io->sq_wq) {
- err = -ENOMEM;
- goto err_sq_wq;
- }
-
- wq_size = num_qps * sizeof(*func_to_io->rq_wq);
- func_to_io->rq_wq = devm_kzalloc(&pdev->dev, wq_size, GFP_KERNEL);
- if (!func_to_io->rq_wq) {
- err = -ENOMEM;
- goto err_rq_wq;
- }
-
- db_size = num_qps * sizeof(*func_to_io->sq_db);
- func_to_io->sq_db = devm_kzalloc(&pdev->dev, db_size, GFP_KERNEL);
- if (!func_to_io->sq_db) {
- err = -ENOMEM;
- goto err_sq_db;
- }
-
- ci_addr_base = dma_zalloc_coherent(&pdev->dev, CI_TABLE_SIZE(num_qps),
- &func_to_io->ci_dma_base,
- GFP_KERNEL);
- if (!ci_addr_base) {
- dev_err(&pdev->dev, "Failed to allocate CI area\n");
- err = -ENOMEM;
- goto err_ci_base;
- }
-
- func_to_io->ci_addr_base = ci_addr_base;
-
- for (i = 0; i < num_qps; i++) {
- err = init_qp(func_to_io, &func_to_io->qps[i], i,
- &sq_msix_entries[i], &rq_msix_entries[i]);
- if (err) {
- dev_err(&pdev->dev, "Failed to create QP %d\n", i);
- goto err_init_qp;
- }
- }
-
- err = write_qp_ctxts(func_to_io, base_qpn, num_qps);
- if (err) {
- dev_err(&pdev->dev, "Failed to init QP ctxts\n");
- goto err_write_qp_ctxts;
- }
-
- return 0;
-
-err_write_qp_ctxts:
-err_init_qp:
- for (j = 0; j < i; j++)
- destroy_qp(func_to_io, &func_to_io->qps[j]);
-
- dma_free_coherent(&pdev->dev, CI_TABLE_SIZE(num_qps),
- func_to_io->ci_addr_base, func_to_io->ci_dma_base);
-
-err_ci_base:
- devm_kfree(&pdev->dev, func_to_io->sq_db);
-
-err_sq_db:
- devm_kfree(&pdev->dev, func_to_io->rq_wq);
-
-err_rq_wq:
- devm_kfree(&pdev->dev, func_to_io->sq_wq);
-
-err_sq_wq:
- devm_kfree(&pdev->dev, func_to_io->qps);
- return err;
-}
-
-/**
- * hinic_io_destroy_qps - Destroy the IO Queue Pairs
- * @func_to_io: func to io channel that holds the IO components
- * @num_qps: number queue pairs to destroy
- **/
-void hinic_io_destroy_qps(struct hinic_func_to_io *func_to_io, int num_qps)
-{
- struct hinic_hwif *hwif = func_to_io->hwif;
- struct pci_dev *pdev = hwif->pdev;
- size_t ci_table_size;
- int i;
-
- ci_table_size = CI_TABLE_SIZE(num_qps);
-
- for (i = 0; i < num_qps; i++)
- destroy_qp(func_to_io, &func_to_io->qps[i]);
-
- dma_free_coherent(&pdev->dev, ci_table_size, func_to_io->ci_addr_base,
- func_to_io->ci_dma_base);
-
- devm_kfree(&pdev->dev, func_to_io->sq_db);
-
- devm_kfree(&pdev->dev, func_to_io->rq_wq);
- devm_kfree(&pdev->dev, func_to_io->sq_wq);
-
- devm_kfree(&pdev->dev, func_to_io->qps);
-}
-
-/**
- * hinic_io_init - Initialize the IO components
- * @func_to_io: func to io channel that holds the IO components
- * @hwif: HW interface for accessing IO
- * @max_qps: maximum QPs in HW
- * @num_ceqs: number completion event queues
- * @ceq_msix_entries: msix entries for ceqs
- *
- * Return 0 - Success, negative - Failure
- **/
-int hinic_io_init(struct hinic_func_to_io *func_to_io,
- struct hinic_hwif *hwif, u16 max_qps, int num_ceqs,
- struct msix_entry *ceq_msix_entries)
-{
- struct pci_dev *pdev = hwif->pdev;
- enum hinic_cmdq_type cmdq, type;
- void __iomem *db_area;
- int err;
-
- func_to_io->hwif = hwif;
- func_to_io->qps = NULL;
- func_to_io->max_qps = max_qps;
-
- err = hinic_ceqs_init(&func_to_io->ceqs, hwif, num_ceqs,
- HINIC_DEFAULT_CEQ_LEN, HINIC_EQ_PAGE_SIZE,
- ceq_msix_entries);
- if (err) {
- dev_err(&pdev->dev, "Failed to init CEQs\n");
- return err;
- }
-
- err = hinic_wqs_alloc(&func_to_io->wqs, 2 * max_qps, hwif);
- if (err) {
- dev_err(&pdev->dev, "Failed to allocate WQS for IO\n");
- goto err_wqs_alloc;
- }
-
- func_to_io->db_base = pci_ioremap_bar(pdev, HINIC_PCI_DB_BAR);
- if (!func_to_io->db_base) {
- dev_err(&pdev->dev, "Failed to remap IO DB area\n");
- err = -ENOMEM;
- goto err_db_ioremap;
- }
-
- init_db_area_idx(&func_to_io->free_db_area);
-
- for (cmdq = HINIC_CMDQ_SYNC; cmdq < HINIC_MAX_CMDQ_TYPES; cmdq++) {
- db_area = get_db_area(func_to_io);
- if (IS_ERR(db_area)) {
- dev_err(&pdev->dev, "Failed to get cmdq db area\n");
- err = PTR_ERR(db_area);
- goto err_db_area;
- }
-
- func_to_io->cmdq_db_area[cmdq] = db_area;
- }
-
- err = hinic_init_cmdqs(&func_to_io->cmdqs, hwif,
- func_to_io->cmdq_db_area);
- if (err) {
- dev_err(&pdev->dev, "Failed to initialize cmdqs\n");
- goto err_init_cmdqs;
- }
-
- return 0;
-
-err_init_cmdqs:
-err_db_area:
- for (type = HINIC_CMDQ_SYNC; type < cmdq; type++)
- return_db_area(func_to_io, func_to_io->cmdq_db_area[type]);
-
- iounmap(func_to_io->db_base);
-
-err_db_ioremap:
- hinic_wqs_free(&func_to_io->wqs);
-
-err_wqs_alloc:
- hinic_ceqs_free(&func_to_io->ceqs);
- return err;
-}
-
-/**
- * hinic_io_free - Free the IO components
- * @func_to_io: func to io channel that holds the IO components
- **/
-void hinic_io_free(struct hinic_func_to_io *func_to_io)
-{
- enum hinic_cmdq_type cmdq;
-
- hinic_free_cmdqs(&func_to_io->cmdqs);
-
- for (cmdq = HINIC_CMDQ_SYNC; cmdq < HINIC_MAX_CMDQ_TYPES; cmdq++)
- return_db_area(func_to_io, func_to_io->cmdq_db_area[cmdq]);
-
- iounmap(func_to_io->db_base);
- hinic_wqs_free(&func_to_io->wqs);
- hinic_ceqs_free(&func_to_io->ceqs);
-}
diff --git a/drivers/net/ethernet/huawei/hinic/hinic_hw_io.h b/drivers/net/ethernet/huawei/hinic/hinic_hw_io.h
deleted file mode 100644
index adb6417..00000000
--- a/drivers/net/ethernet/huawei/hinic/hinic_hw_io.h
+++ /dev/null
@@ -1,97 +0,0 @@
-/*
- * Huawei HiNIC PCI Express Linux driver
- * Copyright(c) 2017 Huawei Technologies Co., Ltd
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms and conditions of the GNU General Public License,
- * version 2, as published by the Free Software Foundation.
- *
- * This program is distributed in the hope it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
- * for more details.
- *
- */
-
-#ifndef HINIC_HW_IO_H
-#define HINIC_HW_IO_H
-
-#include <linux/types.h>
-#include <linux/pci.h>
-#include <linux/semaphore.h>
-#include <linux/sizes.h>
-
-#include "hinic_hw_if.h"
-#include "hinic_hw_eqs.h"
-#include "hinic_hw_wq.h"
-#include "hinic_hw_cmdq.h"
-#include "hinic_hw_qp.h"
-
-#define HINIC_DB_PAGE_SIZE SZ_4K
-#define HINIC_DB_SIZE SZ_4M
-
-#define HINIC_DB_MAX_AREAS (HINIC_DB_SIZE / HINIC_DB_PAGE_SIZE)
-
-enum hinic_db_type {
- HINIC_DB_CMDQ_TYPE,
- HINIC_DB_SQ_TYPE,
-};
-
-enum hinic_io_path {
- HINIC_CTRL_PATH,
- HINIC_DATA_PATH,
-};
-
-struct hinic_free_db_area {
- int db_idx[HINIC_DB_MAX_AREAS];
-
- int alloc_pos;
- int return_pos;
-
- int num_free;
-
- /* Lock for getting db area */
- struct semaphore idx_lock;
-};
-
-struct hinic_func_to_io {
- struct hinic_hwif *hwif;
-
- struct hinic_ceqs ceqs;
-
- struct hinic_wqs wqs;
-
- struct hinic_wq *sq_wq;
- struct hinic_wq *rq_wq;
-
- struct hinic_qp *qps;
- u16 max_qps;
-
- void __iomem **sq_db;
- void __iomem *db_base;
-
- void *ci_addr_base;
- dma_addr_t ci_dma_base;
-
- struct hinic_free_db_area free_db_area;
-
- void __iomem *cmdq_db_area[HINIC_MAX_CMDQ_TYPES];
-
- struct hinic_cmdqs cmdqs;
-};
-
-int hinic_io_create_qps(struct hinic_func_to_io *func_to_io,
- u16 base_qpn, int num_qps,
- struct msix_entry *sq_msix_entries,
- struct msix_entry *rq_msix_entries);
-
-void hinic_io_destroy_qps(struct hinic_func_to_io *func_to_io,
- int num_qps);
-
-int hinic_io_init(struct hinic_func_to_io *func_to_io,
- struct hinic_hwif *hwif, u16 max_qps, int num_ceqs,
- struct msix_entry *ceq_msix_entries);
-
-void hinic_io_free(struct hinic_func_to_io *func_to_io);
-
-#endif
diff --git a/drivers/net/ethernet/huawei/hinic/hinic_hw_mgmt.c b/drivers/net/ethernet/huawei/hinic/hinic_hw_mgmt.c
deleted file mode 100644
index 278dc13..00000000
--- a/drivers/net/ethernet/huawei/hinic/hinic_hw_mgmt.c
+++ /dev/null
@@ -1,597 +0,0 @@
-/*
- * Huawei HiNIC PCI Express Linux driver
- * Copyright(c) 2017 Huawei Technologies Co., Ltd
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms and conditions of the GNU General Public License,
- * version 2, as published by the Free Software Foundation.
- *
- * This program is distributed in the hope it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
- * for more details.
- *
- */
-
-#include <linux/kernel.h>
-#include <linux/types.h>
-#include <linux/errno.h>
-#include <linux/pci.h>
-#include <linux/device.h>
-#include <linux/semaphore.h>
-#include <linux/completion.h>
-#include <linux/slab.h>
-#include <asm/barrier.h>
-
-#include "hinic_hw_if.h"
-#include "hinic_hw_eqs.h"
-#include "hinic_hw_api_cmd.h"
-#include "hinic_hw_mgmt.h"
-#include "hinic_hw_dev.h"
-
-#define SYNC_MSG_ID_MASK 0x1FF
-
-#define SYNC_MSG_ID(pf_to_mgmt) ((pf_to_mgmt)->sync_msg_id)
-
-#define SYNC_MSG_ID_INC(pf_to_mgmt) (SYNC_MSG_ID(pf_to_mgmt) = \
- ((SYNC_MSG_ID(pf_to_mgmt) + 1) & \
- SYNC_MSG_ID_MASK))
-
-#define MSG_SZ_IS_VALID(in_size) ((in_size) <= MAX_MSG_LEN)
-
-#define MGMT_MSG_LEN_MIN 20
-#define MGMT_MSG_LEN_STEP 16
-#define MGMT_MSG_RSVD_FOR_DEV 8
-
-#define SEGMENT_LEN 48
-
-#define MAX_PF_MGMT_BUF_SIZE 2048
-
-/* Data should be SEG LEN size aligned */
-#define MAX_MSG_LEN 2016
-
-#define MSG_NOT_RESP 0xFFFF
-
-#define MGMT_MSG_TIMEOUT 1000
-
-#define mgmt_to_pfhwdev(pf_mgmt) \
- container_of(pf_mgmt, struct hinic_pfhwdev, pf_to_mgmt)
-
-enum msg_segment_type {
- NOT_LAST_SEGMENT = 0,
- LAST_SEGMENT = 1,
-};
-
-enum mgmt_direction_type {
- MGMT_DIRECT_SEND = 0,
- MGMT_RESP = 1,
-};
-
-enum msg_ack_type {
- MSG_ACK = 0,
- MSG_NO_ACK = 1,
-};
-
-/**
- * hinic_register_mgmt_msg_cb - register msg handler for a msg from a module
- * @pf_to_mgmt: PF to MGMT channel
- * @mod: module in the chip that this handler will handle its messages
- * @handle: private data for the callback
- * @callback: the handler that will handle messages
- **/
-void hinic_register_mgmt_msg_cb(struct hinic_pf_to_mgmt *pf_to_mgmt,
- enum hinic_mod_type mod,
- void *handle,
- void (*callback)(void *handle,
- u8 cmd, void *buf_in,
- u16 in_size, void *buf_out,
- u16 *out_size))
-{
- struct hinic_mgmt_cb *mgmt_cb = &pf_to_mgmt->mgmt_cb[mod];
-
- mgmt_cb->cb = callback;
- mgmt_cb->handle = handle;
- mgmt_cb->state = HINIC_MGMT_CB_ENABLED;
-}
-
-/**
- * hinic_unregister_mgmt_msg_cb - unregister msg handler for a msg from a module
- * @pf_to_mgmt: PF to MGMT channel
- * @mod: module in the chip that this handler handles its messages
- **/
-void hinic_unregister_mgmt_msg_cb(struct hinic_pf_to_mgmt *pf_to_mgmt,
- enum hinic_mod_type mod)
-{
- struct hinic_mgmt_cb *mgmt_cb = &pf_to_mgmt->mgmt_cb[mod];
-
- mgmt_cb->state &= ~HINIC_MGMT_CB_ENABLED;
-
- while (mgmt_cb->state & HINIC_MGMT_CB_RUNNING)
- schedule();
-
- mgmt_cb->cb = NULL;
-}
-
-/**
- * prepare_header - prepare the header of the message
- * @pf_to_mgmt: PF to MGMT channel
- * @msg_len: the length of the message
- * @mod: module in the chip that will get the message
- * @ack_type: ask for response
- * @direction: the direction of the message
- * @cmd: command of the message
- * @msg_id: message id
- *
- * Return the prepared header value
- **/
-static u64 prepare_header(struct hinic_pf_to_mgmt *pf_to_mgmt,
- u16 msg_len, enum hinic_mod_type mod,
- enum msg_ack_type ack_type,
- enum mgmt_direction_type direction,
- u16 cmd, u16 msg_id)
-{
- struct hinic_hwif *hwif = pf_to_mgmt->hwif;
-
- return HINIC_MSG_HEADER_SET(msg_len, MSG_LEN) |
- HINIC_MSG_HEADER_SET(mod, MODULE) |
- HINIC_MSG_HEADER_SET(SEGMENT_LEN, SEG_LEN) |
- HINIC_MSG_HEADER_SET(ack_type, NO_ACK) |
- HINIC_MSG_HEADER_SET(0, ASYNC_MGMT_TO_PF) |
- HINIC_MSG_HEADER_SET(0, SEQID) |
- HINIC_MSG_HEADER_SET(LAST_SEGMENT, LAST) |
- HINIC_MSG_HEADER_SET(direction, DIRECTION) |
- HINIC_MSG_HEADER_SET(cmd, CMD) |
- HINIC_MSG_HEADER_SET(HINIC_HWIF_PCI_INTF(hwif), PCI_INTF) |
- HINIC_MSG_HEADER_SET(HINIC_HWIF_PF_IDX(hwif), PF_IDX) |
- HINIC_MSG_HEADER_SET(msg_id, MSG_ID);
-}
-
-/**
- * prepare_mgmt_cmd - prepare the mgmt command
- * @mgmt_cmd: pointer to the command to prepare
- * @header: pointer of the header for the message
- * @msg: the data of the message
- * @msg_len: the length of the message
- **/
-static void prepare_mgmt_cmd(u8 *mgmt_cmd, u64 *header, u8 *msg, u16 msg_len)
-{
- memset(mgmt_cmd, 0, MGMT_MSG_RSVD_FOR_DEV);
-
- mgmt_cmd += MGMT_MSG_RSVD_FOR_DEV;
- memcpy(mgmt_cmd, header, sizeof(*header));
-
- mgmt_cmd += sizeof(*header);
- memcpy(mgmt_cmd, msg, msg_len);
-}
-
-/**
- * mgmt_msg_len - calculate the total message length
- * @msg_data_len: the length of the message data
- *
- * Return the total message length
- **/
-static u16 mgmt_msg_len(u16 msg_data_len)
-{
- /* RSVD + HEADER_SIZE + DATA_LEN */
- u16 msg_len = MGMT_MSG_RSVD_FOR_DEV + sizeof(u64) + msg_data_len;
-
- if (msg_len > MGMT_MSG_LEN_MIN)
- msg_len = MGMT_MSG_LEN_MIN +
- ALIGN((msg_len - MGMT_MSG_LEN_MIN),
- MGMT_MSG_LEN_STEP);
- else
- msg_len = MGMT_MSG_LEN_MIN;
-
- return msg_len;
-}
-
-/**
- * send_msg_to_mgmt - send message to mgmt by API CMD
- * @pf_to_mgmt: PF to MGMT channel
- * @mod: module in the chip that will get the message
- * @cmd: command of the message
- * @data: the msg data
- * @data_len: the msg data length
- * @ack_type: ask for response
- * @direction: the direction of the original message
- * @resp_msg_id: msg id to response for
- *
- * Return 0 - Success, negative - Failure
- **/
-static int send_msg_to_mgmt(struct hinic_pf_to_mgmt *pf_to_mgmt,
- enum hinic_mod_type mod, u8 cmd,
- u8 *data, u16 data_len,
- enum msg_ack_type ack_type,
- enum mgmt_direction_type direction,
- u16 resp_msg_id)
-{
- struct hinic_api_cmd_chain *chain;
- u64 header;
- u16 msg_id;
-
- msg_id = SYNC_MSG_ID(pf_to_mgmt);
-
- if (direction == MGMT_RESP) {
- header = prepare_header(pf_to_mgmt, data_len, mod, ack_type,
- direction, cmd, resp_msg_id);
- } else {
- SYNC_MSG_ID_INC(pf_to_mgmt);
- header = prepare_header(pf_to_mgmt, data_len, mod, ack_type,
- direction, cmd, msg_id);
- }
-
- prepare_mgmt_cmd(pf_to_mgmt->sync_msg_buf, &header, data, data_len);
-
- chain = pf_to_mgmt->cmd_chain[HINIC_API_CMD_WRITE_TO_MGMT_CPU];
- return hinic_api_cmd_write(chain, HINIC_NODE_ID_MGMT,
- pf_to_mgmt->sync_msg_buf,
- mgmt_msg_len(data_len));
-}
-
-/**
- * msg_to_mgmt_sync - send sync message to mgmt
- * @pf_to_mgmt: PF to MGMT channel
- * @mod: module in the chip that will get the message
- * @cmd: command of the message
- * @buf_in: the msg data
- * @in_size: the msg data length
- * @buf_out: response
- * @out_size: response length
- * @direction: the direction of the original message
- * @resp_msg_id: msg id to response for
- *
- * Return 0 - Success, negative - Failure
- **/
-static int msg_to_mgmt_sync(struct hinic_pf_to_mgmt *pf_to_mgmt,
- enum hinic_mod_type mod, u8 cmd,
- u8 *buf_in, u16 in_size,
- u8 *buf_out, u16 *out_size,
- enum mgmt_direction_type direction,
- u16 resp_msg_id)
-{
- struct hinic_hwif *hwif = pf_to_mgmt->hwif;
- struct pci_dev *pdev = hwif->pdev;
- struct hinic_recv_msg *recv_msg;
- struct completion *recv_done;
- u16 msg_id;
- int err;
-
- /* Lock the sync_msg_buf */
- down(&pf_to_mgmt->sync_msg_lock);
-
- recv_msg = &pf_to_mgmt->recv_resp_msg_from_mgmt;
- recv_done = &recv_msg->recv_done;
-
- if (resp_msg_id == MSG_NOT_RESP)
- msg_id = SYNC_MSG_ID(pf_to_mgmt);
- else
- msg_id = resp_msg_id;
-
- init_completion(recv_done);
-
- err = send_msg_to_mgmt(pf_to_mgmt, mod, cmd, buf_in, in_size,
- MSG_ACK, direction, resp_msg_id);
- if (err) {
- dev_err(&pdev->dev, "Failed to send sync msg to mgmt\n");
- goto unlock_sync_msg;
- }
-
- if (!wait_for_completion_timeout(recv_done, MGMT_MSG_TIMEOUT)) {
- dev_err(&pdev->dev, "MGMT timeout, MSG id = %d\n", msg_id);
- err = -ETIMEDOUT;
- goto unlock_sync_msg;
- }
-
- smp_rmb(); /* verify reading after completion */
-
- if (recv_msg->msg_id != msg_id) {
- dev_err(&pdev->dev, "incorrect MSG for id = %d\n", msg_id);
- err = -EFAULT;
- goto unlock_sync_msg;
- }
-
- if ((buf_out) && (recv_msg->msg_len <= MAX_PF_MGMT_BUF_SIZE)) {
- memcpy(buf_out, recv_msg->msg, recv_msg->msg_len);
- *out_size = recv_msg->msg_len;
- }
-
-unlock_sync_msg:
- up(&pf_to_mgmt->sync_msg_lock);
- return err;
-}
-
-/**
- * msg_to_mgmt_async - send message to mgmt without response
- * @pf_to_mgmt: PF to MGMT channel
- * @mod: module in the chip that will get the message
- * @cmd: command of the message
- * @buf_in: the msg data
- * @in_size: the msg data length
- * @direction: the direction of the original message
- * @resp_msg_id: msg id to response for
- *
- * Return 0 - Success, negative - Failure
- **/
-static int msg_to_mgmt_async(struct hinic_pf_to_mgmt *pf_to_mgmt,
- enum hinic_mod_type mod, u8 cmd,
- u8 *buf_in, u16 in_size,
- enum mgmt_direction_type direction,
- u16 resp_msg_id)
-{
- int err;
-
- /* Lock the sync_msg_buf */
- down(&pf_to_mgmt->sync_msg_lock);
-
- err = send_msg_to_mgmt(pf_to_mgmt, mod, cmd, buf_in, in_size,
- MSG_NO_ACK, direction, resp_msg_id);
-
- up(&pf_to_mgmt->sync_msg_lock);
- return err;
-}
-
-/**
- * hinic_msg_to_mgmt - send message to mgmt
- * @pf_to_mgmt: PF to MGMT channel
- * @mod: module in the chip that will get the message
- * @cmd: command of the message
- * @buf_in: the msg data
- * @in_size: the msg data length
- * @buf_out: response
- * @out_size: returned response length
- * @sync: sync msg or async msg
- *
- * Return 0 - Success, negative - Failure
- **/
-int hinic_msg_to_mgmt(struct hinic_pf_to_mgmt *pf_to_mgmt,
- enum hinic_mod_type mod, u8 cmd,
- void *buf_in, u16 in_size, void *buf_out, u16 *out_size,
- enum hinic_mgmt_msg_type sync)
-{
- struct hinic_hwif *hwif = pf_to_mgmt->hwif;
- struct pci_dev *pdev = hwif->pdev;
-
- if (sync != HINIC_MGMT_MSG_SYNC) {
- dev_err(&pdev->dev, "Invalid MGMT msg type\n");
- return -EINVAL;
- }
-
- if (!MSG_SZ_IS_VALID(in_size)) {
- dev_err(&pdev->dev, "Invalid MGMT msg buffer size\n");
- return -EINVAL;
- }
-
- return msg_to_mgmt_sync(pf_to_mgmt, mod, cmd, buf_in, in_size,
- buf_out, out_size, MGMT_DIRECT_SEND,
- MSG_NOT_RESP);
-}
-
-/**
- * mgmt_recv_msg_handler - handler for message from mgmt cpu
- * @pf_to_mgmt: PF to MGMT channel
- * @recv_msg: received message details
- **/
-static void mgmt_recv_msg_handler(struct hinic_pf_to_mgmt *pf_to_mgmt,
- struct hinic_recv_msg *recv_msg)
-{
- struct hinic_hwif *hwif = pf_to_mgmt->hwif;
- struct pci_dev *pdev = hwif->pdev;
- u8 *buf_out = recv_msg->buf_out;
- struct hinic_mgmt_cb *mgmt_cb;
- unsigned long cb_state;
- u16 out_size = 0;
-
- if (recv_msg->mod >= HINIC_MOD_MAX) {
- dev_err(&pdev->dev, "Unknown MGMT MSG module = %d\n",
- recv_msg->mod);
- return;
- }
-
- mgmt_cb = &pf_to_mgmt->mgmt_cb[recv_msg->mod];
-
- cb_state = cmpxchg(&mgmt_cb->state,
- HINIC_MGMT_CB_ENABLED,
- HINIC_MGMT_CB_ENABLED | HINIC_MGMT_CB_RUNNING);
-
- if ((cb_state == HINIC_MGMT_CB_ENABLED) && (mgmt_cb->cb))
- mgmt_cb->cb(mgmt_cb->handle, recv_msg->cmd,
- recv_msg->msg, recv_msg->msg_len,
- buf_out, &out_size);
- else
- dev_err(&pdev->dev, "No MGMT msg handler, mod = %d\n",
- recv_msg->mod);
-
- mgmt_cb->state &= ~HINIC_MGMT_CB_RUNNING;
-
- if (!recv_msg->async_mgmt_to_pf)
- /* MGMT sent sync msg, send the response */
- msg_to_mgmt_async(pf_to_mgmt, recv_msg->mod, recv_msg->cmd,
- buf_out, out_size, MGMT_RESP,
- recv_msg->msg_id);
-}
-
-/**
- * mgmt_resp_msg_handler - handler for a response message from mgmt cpu
- * @pf_to_mgmt: PF to MGMT channel
- * @recv_msg: received message details
- **/
-static void mgmt_resp_msg_handler(struct hinic_pf_to_mgmt *pf_to_mgmt,
- struct hinic_recv_msg *recv_msg)
-{
- wmb(); /* verify writing all, before reading */
-
- complete(&recv_msg->recv_done);
-}
-
-/**
- * recv_mgmt_msg_handler - handler for a message from mgmt cpu
- * @pf_to_mgmt: PF to MGMT channel
- * @header: the header of the message
- * @recv_msg: received message details
- **/
-static void recv_mgmt_msg_handler(struct hinic_pf_to_mgmt *pf_to_mgmt,
- u64 *header, struct hinic_recv_msg *recv_msg)
-{
- struct hinic_hwif *hwif = pf_to_mgmt->hwif;
- struct pci_dev *pdev = hwif->pdev;
- int seq_id, seg_len;
- u8 *msg_body;
-
- seq_id = HINIC_MSG_HEADER_GET(*header, SEQID);
- seg_len = HINIC_MSG_HEADER_GET(*header, SEG_LEN);
-
- if (seq_id >= (MAX_MSG_LEN / SEGMENT_LEN)) {
- dev_err(&pdev->dev, "recv big mgmt msg\n");
- return;
- }
-
- msg_body = (u8 *)header + sizeof(*header);
- memcpy(recv_msg->msg + seq_id * SEGMENT_LEN, msg_body, seg_len);
-
- if (!HINIC_MSG_HEADER_GET(*header, LAST))
- return;
-
- recv_msg->cmd = HINIC_MSG_HEADER_GET(*header, CMD);
- recv_msg->mod = HINIC_MSG_HEADER_GET(*header, MODULE);
- recv_msg->async_mgmt_to_pf = HINIC_MSG_HEADER_GET(*header,
- ASYNC_MGMT_TO_PF);
- recv_msg->msg_len = HINIC_MSG_HEADER_GET(*header, MSG_LEN);
- recv_msg->msg_id = HINIC_MSG_HEADER_GET(*header, MSG_ID);
-
- if (HINIC_MSG_HEADER_GET(*header, DIRECTION) == MGMT_RESP)
- mgmt_resp_msg_handler(pf_to_mgmt, recv_msg);
- else
- mgmt_recv_msg_handler(pf_to_mgmt, recv_msg);
-}
-
-/**
- * mgmt_msg_aeqe_handler - handler for a mgmt message event
- * @handle: PF to MGMT channel
- * @data: the header of the message
- * @size: unused
- **/
-static void mgmt_msg_aeqe_handler(void *handle, void *data, u8 size)
-{
- struct hinic_pf_to_mgmt *pf_to_mgmt = handle;
- struct hinic_recv_msg *recv_msg;
- u64 *header = (u64 *)data;
-
- recv_msg = HINIC_MSG_HEADER_GET(*header, DIRECTION) ==
- MGMT_DIRECT_SEND ?
- &pf_to_mgmt->recv_msg_from_mgmt :
- &pf_to_mgmt->recv_resp_msg_from_mgmt;
-
- recv_mgmt_msg_handler(pf_to_mgmt, header, recv_msg);
-}
-
-/**
- * alloc_recv_msg - allocate receive message memory
- * @pf_to_mgmt: PF to MGMT channel
- * @recv_msg: pointer that will hold the allocated data
- *
- * Return 0 - Success, negative - Failure
- **/
-static int alloc_recv_msg(struct hinic_pf_to_mgmt *pf_to_mgmt,
- struct hinic_recv_msg *recv_msg)
-{
- struct hinic_hwif *hwif = pf_to_mgmt->hwif;
- struct pci_dev *pdev = hwif->pdev;
-
- recv_msg->msg = devm_kzalloc(&pdev->dev, MAX_PF_MGMT_BUF_SIZE,
- GFP_KERNEL);
- if (!recv_msg->msg)
- return -ENOMEM;
-
- recv_msg->buf_out = devm_kzalloc(&pdev->dev, MAX_PF_MGMT_BUF_SIZE,
- GFP_KERNEL);
- if (!recv_msg->buf_out)
- return -ENOMEM;
-
- return 0;
-}
-
-/**
- * alloc_msg_buf - allocate all the message buffers of PF to MGMT channel
- * @pf_to_mgmt: PF to MGMT channel
- *
- * Return 0 - Success, negative - Failure
- **/
-static int alloc_msg_buf(struct hinic_pf_to_mgmt *pf_to_mgmt)
-{
- struct hinic_hwif *hwif = pf_to_mgmt->hwif;
- struct pci_dev *pdev = hwif->pdev;
- int err;
-
- err = alloc_recv_msg(pf_to_mgmt,
- &pf_to_mgmt->recv_msg_from_mgmt);
- if (err) {
- dev_err(&pdev->dev, "Failed to allocate recv msg\n");
- return err;
- }
-
- err = alloc_recv_msg(pf_to_mgmt,
- &pf_to_mgmt->recv_resp_msg_from_mgmt);
- if (err) {
- dev_err(&pdev->dev, "Failed to allocate resp recv msg\n");
- return err;
- }
-
- pf_to_mgmt->sync_msg_buf = devm_kzalloc(&pdev->dev,
- MAX_PF_MGMT_BUF_SIZE,
- GFP_KERNEL);
- if (!pf_to_mgmt->sync_msg_buf)
- return -ENOMEM;
-
- return 0;
-}
-
-/**
- * hinic_pf_to_mgmt_init - initialize PF to MGMT channel
- * @pf_to_mgmt: PF to MGMT channel
- * @hwif: HW interface the PF to MGMT will use for accessing HW
- *
- * Return 0 - Success, negative - Failure
- **/
-int hinic_pf_to_mgmt_init(struct hinic_pf_to_mgmt *pf_to_mgmt,
- struct hinic_hwif *hwif)
-{
- struct hinic_pfhwdev *pfhwdev = mgmt_to_pfhwdev(pf_to_mgmt);
- struct hinic_hwdev *hwdev = &pfhwdev->hwdev;
- struct pci_dev *pdev = hwif->pdev;
- int err;
-
- pf_to_mgmt->hwif = hwif;
-
- sema_init(&pf_to_mgmt->sync_msg_lock, 1);
- pf_to_mgmt->sync_msg_id = 0;
-
- err = alloc_msg_buf(pf_to_mgmt);
- if (err) {
- dev_err(&pdev->dev, "Failed to allocate msg buffers\n");
- return err;
- }
-
- err = hinic_api_cmd_init(pf_to_mgmt->cmd_chain, hwif);
- if (err) {
- dev_err(&pdev->dev, "Failed to initialize cmd chains\n");
- return err;
- }
-
- hinic_aeq_register_hw_cb(&hwdev->aeqs, HINIC_MSG_FROM_MGMT_CPU,
- pf_to_mgmt,
- mgmt_msg_aeqe_handler);
- return 0;
-}
-
-/**
- * hinic_pf_to_mgmt_free - free PF to MGMT channel
- * @pf_to_mgmt: PF to MGMT channel
- **/
-void hinic_pf_to_mgmt_free(struct hinic_pf_to_mgmt *pf_to_mgmt)
-{
- struct hinic_pfhwdev *pfhwdev = mgmt_to_pfhwdev(pf_to_mgmt);
- struct hinic_hwdev *hwdev = &pfhwdev->hwdev;
-
- hinic_aeq_unregister_hw_cb(&hwdev->aeqs, HINIC_MSG_FROM_MGMT_CPU);
- hinic_api_cmd_free(pf_to_mgmt->cmd_chain);
-}
diff --git a/drivers/net/ethernet/huawei/hinic/hinic_hw_qp.c b/drivers/net/ethernet/huawei/hinic/hinic_hw_qp.c
deleted file mode 100644
index cb23962..00000000
--- a/drivers/net/ethernet/huawei/hinic/hinic_hw_qp.c
+++ /dev/null
@@ -1,907 +0,0 @@
-/*
- * Huawei HiNIC PCI Express Linux driver
- * Copyright(c) 2017 Huawei Technologies Co., Ltd
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms and conditions of the GNU General Public License,
- * version 2, as published by the Free Software Foundation.
- *
- * This program is distributed in the hope it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
- * for more details.
- *
- */
-
-#include <linux/kernel.h>
-#include <linux/types.h>
-#include <linux/pci.h>
-#include <linux/device.h>
-#include <linux/dma-mapping.h>
-#include <linux/vmalloc.h>
-#include <linux/errno.h>
-#include <linux/sizes.h>
-#include <linux/atomic.h>
-#include <linux/skbuff.h>
-#include <linux/io.h>
-#include <asm/barrier.h>
-#include <asm/byteorder.h>
-
-#include "hinic_common.h"
-#include "hinic_hw_if.h"
-#include "hinic_hw_wqe.h"
-#include "hinic_hw_wq.h"
-#include "hinic_hw_qp_ctxt.h"
-#include "hinic_hw_qp.h"
-#include "hinic_hw_io.h"
-
-#define SQ_DB_OFF SZ_2K
-
-/* The number of cache line to prefetch Until threshold state */
-#define WQ_PREFETCH_MAX 2
-/* The number of cache line to prefetch After threshold state */
-#define WQ_PREFETCH_MIN 1
-/* Threshold state */
-#define WQ_PREFETCH_THRESHOLD 256
-
-/* sizes of the SQ/RQ ctxt */
-#define Q_CTXT_SIZE 48
-#define CTXT_RSVD 240
-
-#define SQ_CTXT_OFFSET(max_sqs, max_rqs, q_id) \
- (((max_rqs) + (max_sqs)) * CTXT_RSVD + (q_id) * Q_CTXT_SIZE)
-
-#define RQ_CTXT_OFFSET(max_sqs, max_rqs, q_id) \
- (((max_rqs) + (max_sqs)) * CTXT_RSVD + \
- (max_sqs + (q_id)) * Q_CTXT_SIZE)
-
-#define SIZE_16BYTES(size) (ALIGN(size, 16) >> 4)
-#define SIZE_8BYTES(size) (ALIGN(size, 8) >> 3)
-#define SECT_SIZE_FROM_8BYTES(size) ((size) << 3)
-
-#define SQ_DB_PI_HI_SHIFT 8
-#define SQ_DB_PI_HI(prod_idx) ((prod_idx) >> SQ_DB_PI_HI_SHIFT)
-
-#define SQ_DB_PI_LOW_MASK 0xFF
-#define SQ_DB_PI_LOW(prod_idx) ((prod_idx) & SQ_DB_PI_LOW_MASK)
-
-#define SQ_DB_ADDR(sq, pi) ((u64 *)((sq)->db_base) + SQ_DB_PI_LOW(pi))
-
-#define SQ_MASKED_IDX(sq, idx) ((idx) & (sq)->wq->mask)
-#define RQ_MASKED_IDX(rq, idx) ((idx) & (rq)->wq->mask)
-
-#define TX_MAX_MSS_DEFAULT 0x3E00
-
-enum sq_wqe_type {
- SQ_NORMAL_WQE = 0,
-};
-
-enum rq_completion_fmt {
- RQ_COMPLETE_SGE = 1
-};
-
-void hinic_qp_prepare_header(struct hinic_qp_ctxt_header *qp_ctxt_hdr,
- enum hinic_qp_ctxt_type ctxt_type,
- u16 num_queues, u16 max_queues)
-{
- u16 max_sqs = max_queues;
- u16 max_rqs = max_queues;
-
- qp_ctxt_hdr->num_queues = num_queues;
- qp_ctxt_hdr->queue_type = ctxt_type;
-
- if (ctxt_type == HINIC_QP_CTXT_TYPE_SQ)
- qp_ctxt_hdr->addr_offset = SQ_CTXT_OFFSET(max_sqs, max_rqs, 0);
- else
- qp_ctxt_hdr->addr_offset = RQ_CTXT_OFFSET(max_sqs, max_rqs, 0);
-
- qp_ctxt_hdr->addr_offset = SIZE_16BYTES(qp_ctxt_hdr->addr_offset);
-
- hinic_cpu_to_be32(qp_ctxt_hdr, sizeof(*qp_ctxt_hdr));
-}
-
-void hinic_sq_prepare_ctxt(struct hinic_sq_ctxt *sq_ctxt,
- struct hinic_sq *sq, u16 global_qid)
-{
- u32 wq_page_pfn_hi, wq_page_pfn_lo, wq_block_pfn_hi, wq_block_pfn_lo;
- u64 wq_page_addr, wq_page_pfn, wq_block_pfn;
- u16 pi_start, ci_start;
- struct hinic_wq *wq;
-
- wq = sq->wq;
- ci_start = atomic_read(&wq->cons_idx);
- pi_start = atomic_read(&wq->prod_idx);
-
- /* Read the first page paddr from the WQ page paddr ptrs */
- wq_page_addr = be64_to_cpu(*wq->block_vaddr);
-
- wq_page_pfn = HINIC_WQ_PAGE_PFN(wq_page_addr);
- wq_page_pfn_hi = upper_32_bits(wq_page_pfn);
- wq_page_pfn_lo = lower_32_bits(wq_page_pfn);
-
- wq_block_pfn = HINIC_WQ_BLOCK_PFN(wq->block_paddr);
- wq_block_pfn_hi = upper_32_bits(wq_block_pfn);
- wq_block_pfn_lo = lower_32_bits(wq_block_pfn);
-
- sq_ctxt->ceq_attr = HINIC_SQ_CTXT_CEQ_ATTR_SET(global_qid,
- GLOBAL_SQ_ID) |
- HINIC_SQ_CTXT_CEQ_ATTR_SET(0, EN);
-
- sq_ctxt->ci_wrapped = HINIC_SQ_CTXT_CI_SET(ci_start, IDX) |
- HINIC_SQ_CTXT_CI_SET(1, WRAPPED);
-
- sq_ctxt->wq_hi_pfn_pi =
- HINIC_SQ_CTXT_WQ_PAGE_SET(wq_page_pfn_hi, HI_PFN) |
- HINIC_SQ_CTXT_WQ_PAGE_SET(pi_start, PI);
-
- sq_ctxt->wq_lo_pfn = wq_page_pfn_lo;
-
- sq_ctxt->pref_cache =
- HINIC_SQ_CTXT_PREF_SET(WQ_PREFETCH_MIN, CACHE_MIN) |
- HINIC_SQ_CTXT_PREF_SET(WQ_PREFETCH_MAX, CACHE_MAX) |
- HINIC_SQ_CTXT_PREF_SET(WQ_PREFETCH_THRESHOLD, CACHE_THRESHOLD);
-
- sq_ctxt->pref_wrapped = 1;
-
- sq_ctxt->pref_wq_hi_pfn_ci =
- HINIC_SQ_CTXT_PREF_SET(ci_start, CI) |
- HINIC_SQ_CTXT_PREF_SET(wq_page_pfn_hi, WQ_HI_PFN);
-
- sq_ctxt->pref_wq_lo_pfn = wq_page_pfn_lo;
-
- sq_ctxt->wq_block_hi_pfn =
- HINIC_SQ_CTXT_WQ_BLOCK_SET(wq_block_pfn_hi, HI_PFN);
-
- sq_ctxt->wq_block_lo_pfn = wq_block_pfn_lo;
-
- hinic_cpu_to_be32(sq_ctxt, sizeof(*sq_ctxt));
-}
-
-void hinic_rq_prepare_ctxt(struct hinic_rq_ctxt *rq_ctxt,
- struct hinic_rq *rq, u16 global_qid)
-{
- u32 wq_page_pfn_hi, wq_page_pfn_lo, wq_block_pfn_hi, wq_block_pfn_lo;
- u64 wq_page_addr, wq_page_pfn, wq_block_pfn;
- u16 pi_start, ci_start;
- struct hinic_wq *wq;
-
- wq = rq->wq;
- ci_start = atomic_read(&wq->cons_idx);
- pi_start = atomic_read(&wq->prod_idx);
-
- /* Read the first page paddr from the WQ page paddr ptrs */
- wq_page_addr = be64_to_cpu(*wq->block_vaddr);
-
- wq_page_pfn = HINIC_WQ_PAGE_PFN(wq_page_addr);
- wq_page_pfn_hi = upper_32_bits(wq_page_pfn);
- wq_page_pfn_lo = lower_32_bits(wq_page_pfn);
-
- wq_block_pfn = HINIC_WQ_BLOCK_PFN(wq->block_paddr);
- wq_block_pfn_hi = upper_32_bits(wq_block_pfn);
- wq_block_pfn_lo = lower_32_bits(wq_block_pfn);
-
- rq_ctxt->ceq_attr = HINIC_RQ_CTXT_CEQ_ATTR_SET(0, EN) |
- HINIC_RQ_CTXT_CEQ_ATTR_SET(1, WRAPPED);
-
- rq_ctxt->pi_intr_attr = HINIC_RQ_CTXT_PI_SET(pi_start, IDX) |
- HINIC_RQ_CTXT_PI_SET(rq->msix_entry, INTR);
-
- rq_ctxt->wq_hi_pfn_ci = HINIC_RQ_CTXT_WQ_PAGE_SET(wq_page_pfn_hi,
- HI_PFN) |
- HINIC_RQ_CTXT_WQ_PAGE_SET(ci_start, CI);
-
- rq_ctxt->wq_lo_pfn = wq_page_pfn_lo;
-
- rq_ctxt->pref_cache =
- HINIC_RQ_CTXT_PREF_SET(WQ_PREFETCH_MIN, CACHE_MIN) |
- HINIC_RQ_CTXT_PREF_SET(WQ_PREFETCH_MAX, CACHE_MAX) |
- HINIC_RQ_CTXT_PREF_SET(WQ_PREFETCH_THRESHOLD, CACHE_THRESHOLD);
-
- rq_ctxt->pref_wrapped = 1;
-
- rq_ctxt->pref_wq_hi_pfn_ci =
- HINIC_RQ_CTXT_PREF_SET(wq_page_pfn_hi, WQ_HI_PFN) |
- HINIC_RQ_CTXT_PREF_SET(ci_start, CI);
-
- rq_ctxt->pref_wq_lo_pfn = wq_page_pfn_lo;
-
- rq_ctxt->pi_paddr_hi = upper_32_bits(rq->pi_dma_addr);
- rq_ctxt->pi_paddr_lo = lower_32_bits(rq->pi_dma_addr);
-
- rq_ctxt->wq_block_hi_pfn =
- HINIC_RQ_CTXT_WQ_BLOCK_SET(wq_block_pfn_hi, HI_PFN);
-
- rq_ctxt->wq_block_lo_pfn = wq_block_pfn_lo;
-
- hinic_cpu_to_be32(rq_ctxt, sizeof(*rq_ctxt));
-}
-
-/**
- * alloc_sq_skb_arr - allocate sq array for saved skb
- * @sq: HW Send Queue
- *
- * Return 0 - Success, negative - Failure
- **/
-static int alloc_sq_skb_arr(struct hinic_sq *sq)
-{
- struct hinic_wq *wq = sq->wq;
- size_t skb_arr_size;
-
- skb_arr_size = wq->q_depth * sizeof(*sq->saved_skb);
- sq->saved_skb = vzalloc(skb_arr_size);
- if (!sq->saved_skb)
- return -ENOMEM;
-
- return 0;
-}
-
-/**
- * free_sq_skb_arr - free sq array for saved skb
- * @sq: HW Send Queue
- **/
-static void free_sq_skb_arr(struct hinic_sq *sq)
-{
- vfree(sq->saved_skb);
-}
-
-/**
- * alloc_rq_skb_arr - allocate rq array for saved skb
- * @rq: HW Receive Queue
- *
- * Return 0 - Success, negative - Failure
- **/
-static int alloc_rq_skb_arr(struct hinic_rq *rq)
-{
- struct hinic_wq *wq = rq->wq;
- size_t skb_arr_size;
-
- skb_arr_size = wq->q_depth * sizeof(*rq->saved_skb);
- rq->saved_skb = vzalloc(skb_arr_size);
- if (!rq->saved_skb)
- return -ENOMEM;
-
- return 0;
-}
-
-/**
- * free_rq_skb_arr - free rq array for saved skb
- * @rq: HW Receive Queue
- **/
-static void free_rq_skb_arr(struct hinic_rq *rq)
-{
- vfree(rq->saved_skb);
-}
-
-/**
- * hinic_init_sq - Initialize HW Send Queue
- * @sq: HW Send Queue
- * @hwif: HW Interface for accessing HW
- * @wq: Work Queue for the data of the SQ
- * @entry: msix entry for sq
- * @ci_addr: address for reading the current HW consumer index
- * @ci_dma_addr: dma address for reading the current HW consumer index
- * @db_base: doorbell base address
- *
- * Return 0 - Success, negative - Failure
- **/
-int hinic_init_sq(struct hinic_sq *sq, struct hinic_hwif *hwif,
- struct hinic_wq *wq, struct msix_entry *entry,
- void *ci_addr, dma_addr_t ci_dma_addr,
- void __iomem *db_base)
-{
- sq->hwif = hwif;
-
- sq->wq = wq;
-
- sq->irq = entry->vector;
- sq->msix_entry = entry->entry;
-
- sq->hw_ci_addr = ci_addr;
- sq->hw_ci_dma_addr = ci_dma_addr;
-
- sq->db_base = db_base + SQ_DB_OFF;
-
- return alloc_sq_skb_arr(sq);
-}
-
-/**
- * hinic_clean_sq - Clean HW Send Queue's Resources
- * @sq: Send Queue
- **/
-void hinic_clean_sq(struct hinic_sq *sq)
-{
- free_sq_skb_arr(sq);
-}
-
-/**
- * alloc_rq_cqe - allocate rq completion queue elements
- * @rq: HW Receive Queue
- *
- * Return 0 - Success, negative - Failure
- **/
-static int alloc_rq_cqe(struct hinic_rq *rq)
-{
- struct hinic_hwif *hwif = rq->hwif;
- struct pci_dev *pdev = hwif->pdev;
- size_t cqe_dma_size, cqe_size;
- struct hinic_wq *wq = rq->wq;
- int j, i;
-
- cqe_size = wq->q_depth * sizeof(*rq->cqe);
- rq->cqe = vzalloc(cqe_size);
- if (!rq->cqe)
- return -ENOMEM;
-
- cqe_dma_size = wq->q_depth * sizeof(*rq->cqe_dma);
- rq->cqe_dma = vzalloc(cqe_dma_size);
- if (!rq->cqe_dma)
- goto err_cqe_dma_arr_alloc;
-
- for (i = 0; i < wq->q_depth; i++) {
- rq->cqe[i] = dma_zalloc_coherent(&pdev->dev,
- sizeof(*rq->cqe[i]),
- &rq->cqe_dma[i], GFP_KERNEL);
- if (!rq->cqe[i])
- goto err_cqe_alloc;
- }
-
- return 0;
-
-err_cqe_alloc:
- for (j = 0; j < i; j++)
- dma_free_coherent(&pdev->dev, sizeof(*rq->cqe[j]), rq->cqe[j],
- rq->cqe_dma[j]);
-
- vfree(rq->cqe_dma);
-
-err_cqe_dma_arr_alloc:
- vfree(rq->cqe);
- return -ENOMEM;
-}
-
-/**
- * free_rq_cqe - free rq completion queue elements
- * @rq: HW Receive Queue
- **/
-static void free_rq_cqe(struct hinic_rq *rq)
-{
- struct hinic_hwif *hwif = rq->hwif;
- struct pci_dev *pdev = hwif->pdev;
- struct hinic_wq *wq = rq->wq;
- int i;
-
- for (i = 0; i < wq->q_depth; i++)
- dma_free_coherent(&pdev->dev, sizeof(*rq->cqe[i]), rq->cqe[i],
- rq->cqe_dma[i]);
-
- vfree(rq->cqe_dma);
- vfree(rq->cqe);
-}
-
-/**
- * hinic_init_rq - Initialize HW Receive Queue
- * @rq: HW Receive Queue
- * @hwif: HW Interface for accessing HW
- * @wq: Work Queue for the data of the RQ
- * @entry: msix entry for rq
- *
- * Return 0 - Success, negative - Failure
- **/
-int hinic_init_rq(struct hinic_rq *rq, struct hinic_hwif *hwif,
- struct hinic_wq *wq, struct msix_entry *entry)
-{
- struct pci_dev *pdev = hwif->pdev;
- size_t pi_size;
- int err;
-
- rq->hwif = hwif;
-
- rq->wq = wq;
-
- rq->irq = entry->vector;
- rq->msix_entry = entry->entry;
-
- rq->buf_sz = HINIC_RX_BUF_SZ;
-
- err = alloc_rq_skb_arr(rq);
- if (err) {
- dev_err(&pdev->dev, "Failed to allocate rq priv data\n");
- return err;
- }
-
- err = alloc_rq_cqe(rq);
- if (err) {
- dev_err(&pdev->dev, "Failed to allocate rq cqe\n");
- goto err_alloc_rq_cqe;
- }
-
- /* HW requirements: Must be at least 32 bit */
- pi_size = ALIGN(sizeof(*rq->pi_virt_addr), sizeof(u32));
- rq->pi_virt_addr = dma_zalloc_coherent(&pdev->dev, pi_size,
- &rq->pi_dma_addr, GFP_KERNEL);
- if (!rq->pi_virt_addr) {
- dev_err(&pdev->dev, "Failed to allocate PI address\n");
- err = -ENOMEM;
- goto err_pi_virt;
- }
-
- return 0;
-
-err_pi_virt:
- free_rq_cqe(rq);
-
-err_alloc_rq_cqe:
- free_rq_skb_arr(rq);
- return err;
-}
-
-/**
- * hinic_clean_rq - Clean HW Receive Queue's Resources
- * @rq: HW Receive Queue
- **/
-void hinic_clean_rq(struct hinic_rq *rq)
-{
- struct hinic_hwif *hwif = rq->hwif;
- struct pci_dev *pdev = hwif->pdev;
- size_t pi_size;
-
- pi_size = ALIGN(sizeof(*rq->pi_virt_addr), sizeof(u32));
- dma_free_coherent(&pdev->dev, pi_size, rq->pi_virt_addr,
- rq->pi_dma_addr);
-
- free_rq_cqe(rq);
- free_rq_skb_arr(rq);
-}
-
-/**
- * hinic_get_sq_free_wqebbs - return number of free wqebbs for use
- * @sq: send queue
- *
- * Return number of free wqebbs
- **/
-int hinic_get_sq_free_wqebbs(struct hinic_sq *sq)
-{
- struct hinic_wq *wq = sq->wq;
-
- return atomic_read(&wq->delta) - 1;
-}
-
-/**
- * hinic_get_rq_free_wqebbs - return number of free wqebbs for use
- * @rq: recv queue
- *
- * Return number of free wqebbs
- **/
-int hinic_get_rq_free_wqebbs(struct hinic_rq *rq)
-{
- struct hinic_wq *wq = rq->wq;
-
- return atomic_read(&wq->delta) - 1;
-}
-
-static void sq_prepare_ctrl(struct hinic_sq_ctrl *ctrl, u16 prod_idx,
- int nr_descs)
-{
- u32 ctrl_size, task_size, bufdesc_size;
-
- ctrl_size = SIZE_8BYTES(sizeof(struct hinic_sq_ctrl));
- task_size = SIZE_8BYTES(sizeof(struct hinic_sq_task));
- bufdesc_size = nr_descs * sizeof(struct hinic_sq_bufdesc);
- bufdesc_size = SIZE_8BYTES(bufdesc_size);
-
- ctrl->ctrl_info = HINIC_SQ_CTRL_SET(bufdesc_size, BUFDESC_SECT_LEN) |
- HINIC_SQ_CTRL_SET(task_size, TASKSECT_LEN) |
- HINIC_SQ_CTRL_SET(SQ_NORMAL_WQE, DATA_FORMAT) |
- HINIC_SQ_CTRL_SET(ctrl_size, LEN);
-
- ctrl->queue_info = HINIC_SQ_CTRL_SET(TX_MAX_MSS_DEFAULT,
- QUEUE_INFO_MSS);
-}
-
-static void sq_prepare_task(struct hinic_sq_task *task)
-{
- task->pkt_info0 =
- HINIC_SQ_TASK_INFO0_SET(0, L2HDR_LEN) |
- HINIC_SQ_TASK_INFO0_SET(HINIC_L4_OFF_DISABLE, L4_OFFLOAD) |
- HINIC_SQ_TASK_INFO0_SET(HINIC_OUTER_L3TYPE_UNKNOWN,
- INNER_L3TYPE) |
- HINIC_SQ_TASK_INFO0_SET(HINIC_VLAN_OFF_DISABLE,
- VLAN_OFFLOAD) |
- HINIC_SQ_TASK_INFO0_SET(HINIC_PKT_NOT_PARSED, PARSE_FLAG);
-
- task->pkt_info1 =
- HINIC_SQ_TASK_INFO1_SET(HINIC_MEDIA_UNKNOWN, MEDIA_TYPE) |
- HINIC_SQ_TASK_INFO1_SET(0, INNER_L4_LEN) |
- HINIC_SQ_TASK_INFO1_SET(0, INNER_L3_LEN);
-
- task->pkt_info2 =
- HINIC_SQ_TASK_INFO2_SET(0, TUNNEL_L4_LEN) |
- HINIC_SQ_TASK_INFO2_SET(0, OUTER_L3_LEN) |
- HINIC_SQ_TASK_INFO2_SET(HINIC_TUNNEL_L4TYPE_UNKNOWN,
- TUNNEL_L4TYPE) |
- HINIC_SQ_TASK_INFO2_SET(HINIC_OUTER_L3TYPE_UNKNOWN,
- OUTER_L3TYPE);
-
- task->ufo_v6_identify = 0;
-
- task->pkt_info4 = HINIC_SQ_TASK_INFO4_SET(HINIC_L2TYPE_ETH, L2TYPE);
-
- task->zero_pad = 0;
-}
-
-/**
- * hinic_sq_prepare_wqe - prepare wqe before insert to the queue
- * @sq: send queue
- * @prod_idx: pi value
- * @sq_wqe: wqe to prepare
- * @sges: sges for use by the wqe for send for buf addresses
- * @nr_sges: number of sges
- **/
-void hinic_sq_prepare_wqe(struct hinic_sq *sq, u16 prod_idx,
- struct hinic_sq_wqe *sq_wqe, struct hinic_sge *sges,
- int nr_sges)
-{
- int i;
-
- sq_prepare_ctrl(&sq_wqe->ctrl, prod_idx, nr_sges);
-
- sq_prepare_task(&sq_wqe->task);
-
- for (i = 0; i < nr_sges; i++)
- sq_wqe->buf_descs[i].sge = sges[i];
-}
-
-/**
- * sq_prepare_db - prepare doorbell to write
- * @sq: send queue
- * @prod_idx: pi value for the doorbell
- * @cos: cos of the doorbell
- *
- * Return db value
- **/
-static u32 sq_prepare_db(struct hinic_sq *sq, u16 prod_idx, unsigned int cos)
-{
- struct hinic_qp *qp = container_of(sq, struct hinic_qp, sq);
- u8 hi_prod_idx = SQ_DB_PI_HI(SQ_MASKED_IDX(sq, prod_idx));
-
- /* Data should be written to HW in Big Endian Format */
- return cpu_to_be32(HINIC_SQ_DB_INFO_SET(hi_prod_idx, PI_HI) |
- HINIC_SQ_DB_INFO_SET(HINIC_DB_SQ_TYPE, TYPE) |
- HINIC_SQ_DB_INFO_SET(HINIC_DATA_PATH, PATH) |
- HINIC_SQ_DB_INFO_SET(cos, COS) |
- HINIC_SQ_DB_INFO_SET(qp->q_id, QID));
-}
-
-/**
- * hinic_sq_write_db- write doorbell
- * @sq: send queue
- * @prod_idx: pi value for the doorbell
- * @wqe_size: wqe size
- * @cos: cos of the wqe
- **/
-void hinic_sq_write_db(struct hinic_sq *sq, u16 prod_idx, unsigned int wqe_size,
- unsigned int cos)
-{
- struct hinic_wq *wq = sq->wq;
-
- /* increment prod_idx to the next */
- prod_idx += ALIGN(wqe_size, wq->wqebb_size) / wq->wqebb_size;
-
- wmb(); /* Write all before the doorbell */
-
- writel(sq_prepare_db(sq, prod_idx, cos), SQ_DB_ADDR(sq, prod_idx));
-}
-
-/**
- * hinic_sq_get_wqe - get wqe ptr in the current pi and update the pi
- * @sq: sq to get wqe from
- * @wqe_size: wqe size
- * @prod_idx: returned pi
- *
- * Return wqe pointer
- **/
-struct hinic_sq_wqe *hinic_sq_get_wqe(struct hinic_sq *sq,
- unsigned int wqe_size, u16 *prod_idx)
-{
- struct hinic_hw_wqe *hw_wqe = hinic_get_wqe(sq->wq, wqe_size,
- prod_idx);
-
- if (IS_ERR(hw_wqe))
- return NULL;
-
- return &hw_wqe->sq_wqe;
-}
-
-/**
- * hinic_sq_write_wqe - write the wqe to the sq
- * @sq: send queue
- * @prod_idx: pi of the wqe
- * @sq_wqe: the wqe to write
- * @skb: skb to save
- * @wqe_size: the size of the wqe
- **/
-void hinic_sq_write_wqe(struct hinic_sq *sq, u16 prod_idx,
- struct hinic_sq_wqe *sq_wqe,
- struct sk_buff *skb, unsigned int wqe_size)
-{
- struct hinic_hw_wqe *hw_wqe = (struct hinic_hw_wqe *)sq_wqe;
-
- sq->saved_skb[prod_idx] = skb;
-
- /* The data in the HW should be in Big Endian Format */
- hinic_cpu_to_be32(sq_wqe, wqe_size);
-
- hinic_write_wqe(sq->wq, hw_wqe, wqe_size);
-}
-
-/**
- * hinic_sq_read_wqebb - read wqe ptr in the current ci and update the ci, the
- * wqe only have one wqebb
- * @sq: send queue
- * @skb: return skb that was saved
- * @wqe_size: the wqe size ptr
- * @cons_idx: consumer index of the wqe
- *
- * Return wqe in ci position
- **/
-struct hinic_sq_wqe *hinic_sq_read_wqebb(struct hinic_sq *sq,
- struct sk_buff **skb,
- unsigned int *wqe_size, u16 *cons_idx)
-{
- struct hinic_hw_wqe *hw_wqe;
- struct hinic_sq_wqe *sq_wqe;
- struct hinic_sq_ctrl *ctrl;
- unsigned int buf_sect_len;
- u32 ctrl_info;
-
- /* read the ctrl section for getting wqe size */
- hw_wqe = hinic_read_wqe(sq->wq, sizeof(*ctrl), cons_idx);
- if (IS_ERR(hw_wqe))
- return NULL;
-
- *skb = sq->saved_skb[*cons_idx];
-
- sq_wqe = &hw_wqe->sq_wqe;
- ctrl = &sq_wqe->ctrl;
- ctrl_info = be32_to_cpu(ctrl->ctrl_info);
- buf_sect_len = HINIC_SQ_CTRL_GET(ctrl_info, BUFDESC_SECT_LEN);
-
- *wqe_size = sizeof(*ctrl) + sizeof(sq_wqe->task);
- *wqe_size += SECT_SIZE_FROM_8BYTES(buf_sect_len);
- *wqe_size = ALIGN(*wqe_size, sq->wq->wqebb_size);
-
- return &hw_wqe->sq_wqe;
-}
-
-/**
- * hinic_sq_read_wqe - read wqe ptr in the current ci and update the ci
- * @sq: send queue
- * @skb: return skb that was saved
- * @wqe_size: the size of the wqe
- * @cons_idx: consumer index of the wqe
- *
- * Return wqe in ci position
- **/
-struct hinic_sq_wqe *hinic_sq_read_wqe(struct hinic_sq *sq,
- struct sk_buff **skb,
- unsigned int wqe_size, u16 *cons_idx)
-{
- struct hinic_hw_wqe *hw_wqe;
-
- hw_wqe = hinic_read_wqe(sq->wq, wqe_size, cons_idx);
- *skb = sq->saved_skb[*cons_idx];
-
- return &hw_wqe->sq_wqe;
-}
-
-/**
- * hinic_sq_put_wqe - release the ci for new wqes
- * @sq: send queue
- * @wqe_size: the size of the wqe
- **/
-void hinic_sq_put_wqe(struct hinic_sq *sq, unsigned int wqe_size)
-{
- hinic_put_wqe(sq->wq, wqe_size);
-}
-
-/**
- * hinic_sq_get_sges - get sges from the wqe
- * @sq_wqe: wqe to get the sges from its buffer addresses
- * @sges: returned sges
- * @nr_sges: number sges to return
- **/
-void hinic_sq_get_sges(struct hinic_sq_wqe *sq_wqe, struct hinic_sge *sges,
- int nr_sges)
-{
- int i;
-
- for (i = 0; i < nr_sges && i < HINIC_MAX_SQ_BUFDESCS; i++) {
- sges[i] = sq_wqe->buf_descs[i].sge;
- hinic_be32_to_cpu(&sges[i], sizeof(sges[i]));
- }
-}
-
-/**
- * hinic_rq_get_wqe - get wqe ptr in the current pi and update the pi
- * @rq: rq to get wqe from
- * @wqe_size: wqe size
- * @prod_idx: returned pi
- *
- * Return wqe pointer
- **/
-struct hinic_rq_wqe *hinic_rq_get_wqe(struct hinic_rq *rq,
- unsigned int wqe_size, u16 *prod_idx)
-{
- struct hinic_hw_wqe *hw_wqe = hinic_get_wqe(rq->wq, wqe_size,
- prod_idx);
-
- if (IS_ERR(hw_wqe))
- return NULL;
-
- return &hw_wqe->rq_wqe;
-}
-
-/**
- * hinic_rq_write_wqe - write the wqe to the rq
- * @rq: recv queue
- * @prod_idx: pi of the wqe
- * @rq_wqe: the wqe to write
- * @skb: skb to save
- **/
-void hinic_rq_write_wqe(struct hinic_rq *rq, u16 prod_idx,
- struct hinic_rq_wqe *rq_wqe, struct sk_buff *skb)
-{
- struct hinic_hw_wqe *hw_wqe = (struct hinic_hw_wqe *)rq_wqe;
-
- rq->saved_skb[prod_idx] = skb;
-
- /* The data in the HW should be in Big Endian Format */
- hinic_cpu_to_be32(rq_wqe, sizeof(*rq_wqe));
-
- hinic_write_wqe(rq->wq, hw_wqe, sizeof(*rq_wqe));
-}
-
-/**
- * hinic_rq_read_wqe - read wqe ptr in the current ci and update the ci
- * @rq: recv queue
- * @wqe_size: the size of the wqe
- * @skb: return saved skb
- * @cons_idx: consumer index of the wqe
- *
- * Return wqe in ci position
- **/
-struct hinic_rq_wqe *hinic_rq_read_wqe(struct hinic_rq *rq,
- unsigned int wqe_size,
- struct sk_buff **skb, u16 *cons_idx)
-{
- struct hinic_hw_wqe *hw_wqe;
- struct hinic_rq_cqe *cqe;
- int rx_done;
- u32 status;
-
- hw_wqe = hinic_read_wqe(rq->wq, wqe_size, cons_idx);
- if (IS_ERR(hw_wqe))
- return NULL;
-
- cqe = rq->cqe[*cons_idx];
-
- status = be32_to_cpu(cqe->status);
-
- rx_done = HINIC_RQ_CQE_STATUS_GET(status, RXDONE);
- if (!rx_done)
- return NULL;
-
- *skb = rq->saved_skb[*cons_idx];
-
- return &hw_wqe->rq_wqe;
-}
-
-/**
- * hinic_rq_read_next_wqe - increment ci and read the wqe in ci position
- * @rq: recv queue
- * @wqe_size: the size of the wqe
- * @skb: return saved skb
- * @cons_idx: consumer index in the wq
- *
- * Return wqe in incremented ci position
- **/
-struct hinic_rq_wqe *hinic_rq_read_next_wqe(struct hinic_rq *rq,
- unsigned int wqe_size,
- struct sk_buff **skb,
- u16 *cons_idx)
-{
- struct hinic_wq *wq = rq->wq;
- struct hinic_hw_wqe *hw_wqe;
- unsigned int num_wqebbs;
-
- wqe_size = ALIGN(wqe_size, wq->wqebb_size);
- num_wqebbs = wqe_size / wq->wqebb_size;
-
- *cons_idx = RQ_MASKED_IDX(rq, *cons_idx + num_wqebbs);
-
- *skb = rq->saved_skb[*cons_idx];
-
- hw_wqe = hinic_read_wqe_direct(wq, *cons_idx);
-
- return &hw_wqe->rq_wqe;
-}
-
-/**
- * hinic_put_wqe - release the ci for new wqes
- * @rq: recv queue
- * @cons_idx: consumer index of the wqe
- * @wqe_size: the size of the wqe
- **/
-void hinic_rq_put_wqe(struct hinic_rq *rq, u16 cons_idx,
- unsigned int wqe_size)
-{
- struct hinic_rq_cqe *cqe = rq->cqe[cons_idx];
- u32 status = be32_to_cpu(cqe->status);
-
- status = HINIC_RQ_CQE_STATUS_CLEAR(status, RXDONE);
-
- /* Rx WQE size is 1 WQEBB, no wq shadow*/
- cqe->status = cpu_to_be32(status);
-
- wmb(); /* clear done flag */
-
- hinic_put_wqe(rq->wq, wqe_size);
-}
-
-/**
- * hinic_rq_get_sge - get sge from the wqe
- * @rq: recv queue
- * @rq_wqe: wqe to get the sge from its buf address
- * @cons_idx: consumer index
- * @sge: returned sge
- **/
-void hinic_rq_get_sge(struct hinic_rq *rq, struct hinic_rq_wqe *rq_wqe,
- u16 cons_idx, struct hinic_sge *sge)
-{
- struct hinic_rq_cqe *cqe = rq->cqe[cons_idx];
- u32 len = be32_to_cpu(cqe->len);
-
- sge->hi_addr = be32_to_cpu(rq_wqe->buf_desc.hi_addr);
- sge->lo_addr = be32_to_cpu(rq_wqe->buf_desc.lo_addr);
- sge->len = HINIC_RQ_CQE_SGE_GET(len, LEN);
-}
-
-/**
- * hinic_rq_prepare_wqe - prepare wqe before insert to the queue
- * @rq: recv queue
- * @prod_idx: pi value
- * @rq_wqe: the wqe
- * @sge: sge for use by the wqe for recv buf address
- **/
-void hinic_rq_prepare_wqe(struct hinic_rq *rq, u16 prod_idx,
- struct hinic_rq_wqe *rq_wqe, struct hinic_sge *sge)
-{
- struct hinic_rq_cqe_sect *cqe_sect = &rq_wqe->cqe_sect;
- struct hinic_rq_bufdesc *buf_desc = &rq_wqe->buf_desc;
- struct hinic_rq_cqe *cqe = rq->cqe[prod_idx];
- struct hinic_rq_ctrl *ctrl = &rq_wqe->ctrl;
- dma_addr_t cqe_dma = rq->cqe_dma[prod_idx];
-
- ctrl->ctrl_info =
- HINIC_RQ_CTRL_SET(SIZE_8BYTES(sizeof(*ctrl)), LEN) |
- HINIC_RQ_CTRL_SET(SIZE_8BYTES(sizeof(*cqe_sect)),
- COMPLETE_LEN) |
- HINIC_RQ_CTRL_SET(SIZE_8BYTES(sizeof(*buf_desc)),
- BUFDESC_SECT_LEN) |
- HINIC_RQ_CTRL_SET(RQ_COMPLETE_SGE, COMPLETE_FORMAT);
-
- hinic_set_sge(&cqe_sect->sge, cqe_dma, sizeof(*cqe));
-
- buf_desc->hi_addr = sge->hi_addr;
- buf_desc->lo_addr = sge->lo_addr;
-}
-
-/**
- * hinic_rq_update - update pi of the rq
- * @rq: recv queue
- * @prod_idx: pi value
- **/
-void hinic_rq_update(struct hinic_rq *rq, u16 prod_idx)
-{
- *rq->pi_virt_addr = cpu_to_be16(RQ_MASKED_IDX(rq, prod_idx + 1));
-}
diff --git a/drivers/net/ethernet/huawei/hinic/hinic_hw_qp.h b/drivers/net/ethernet/huawei/hinic/hinic_hw_qp.h
deleted file mode 100644
index 6c84f83..00000000
--- a/drivers/net/ethernet/huawei/hinic/hinic_hw_qp.h
+++ /dev/null
@@ -1,205 +0,0 @@
-/*
- * Huawei HiNIC PCI Express Linux driver
- * Copyright(c) 2017 Huawei Technologies Co., Ltd
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms and conditions of the GNU General Public License,
- * version 2, as published by the Free Software Foundation.
- *
- * This program is distributed in the hope it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
- * for more details.
- *
- */
-
-#ifndef HINIC_HW_QP_H
-#define HINIC_HW_QP_H
-
-#include <linux/kernel.h>
-#include <linux/types.h>
-#include <linux/sizes.h>
-#include <linux/pci.h>
-#include <linux/skbuff.h>
-
-#include "hinic_common.h"
-#include "hinic_hw_if.h"
-#include "hinic_hw_wqe.h"
-#include "hinic_hw_wq.h"
-#include "hinic_hw_qp_ctxt.h"
-
-#define HINIC_SQ_DB_INFO_PI_HI_SHIFT 0
-#define HINIC_SQ_DB_INFO_QID_SHIFT 8
-#define HINIC_SQ_DB_INFO_PATH_SHIFT 23
-#define HINIC_SQ_DB_INFO_COS_SHIFT 24
-#define HINIC_SQ_DB_INFO_TYPE_SHIFT 27
-
-#define HINIC_SQ_DB_INFO_PI_HI_MASK 0xFF
-#define HINIC_SQ_DB_INFO_QID_MASK 0x3FF
-#define HINIC_SQ_DB_INFO_PATH_MASK 0x1
-#define HINIC_SQ_DB_INFO_COS_MASK 0x7
-#define HINIC_SQ_DB_INFO_TYPE_MASK 0x1F
-
-#define HINIC_SQ_DB_INFO_SET(val, member) \
- (((u32)(val) & HINIC_SQ_DB_INFO_##member##_MASK) \
- << HINIC_SQ_DB_INFO_##member##_SHIFT)
-
-#define HINIC_SQ_WQEBB_SIZE 64
-#define HINIC_RQ_WQEBB_SIZE 32
-
-#define HINIC_SQ_PAGE_SIZE SZ_4K
-#define HINIC_RQ_PAGE_SIZE SZ_4K
-
-#define HINIC_SQ_DEPTH SZ_4K
-#define HINIC_RQ_DEPTH SZ_4K
-
-/* In any change to HINIC_RX_BUF_SZ, HINIC_RX_BUF_SZ_IDX must be changed */
-#define HINIC_RX_BUF_SZ 2048
-#define HINIC_RX_BUF_SZ_IDX HINIC_RX_BUF_SZ_2048_IDX
-
-#define HINIC_MIN_TX_WQE_SIZE(wq) \
- ALIGN(HINIC_SQ_WQE_SIZE(1), (wq)->wqebb_size)
-
-#define HINIC_MIN_TX_NUM_WQEBBS(sq) \
- (HINIC_MIN_TX_WQE_SIZE((sq)->wq) / (sq)->wq->wqebb_size)
-
-enum hinic_rx_buf_sz_idx {
- HINIC_RX_BUF_SZ_32_IDX,
- HINIC_RX_BUF_SZ_64_IDX,
- HINIC_RX_BUF_SZ_96_IDX,
- HINIC_RX_BUF_SZ_128_IDX,
- HINIC_RX_BUF_SZ_192_IDX,
- HINIC_RX_BUF_SZ_256_IDX,
- HINIC_RX_BUF_SZ_384_IDX,
- HINIC_RX_BUF_SZ_512_IDX,
- HINIC_RX_BUF_SZ_768_IDX,
- HINIC_RX_BUF_SZ_1024_IDX,
- HINIC_RX_BUF_SZ_1536_IDX,
- HINIC_RX_BUF_SZ_2048_IDX,
- HINIC_RX_BUF_SZ_3072_IDX,
- HINIC_RX_BUF_SZ_4096_IDX,
- HINIC_RX_BUF_SZ_8192_IDX,
- HINIC_RX_BUF_SZ_16384_IDX,
-};
-
-struct hinic_sq {
- struct hinic_hwif *hwif;
-
- struct hinic_wq *wq;
-
- u32 irq;
- u16 msix_entry;
-
- void *hw_ci_addr;
- dma_addr_t hw_ci_dma_addr;
-
- void __iomem *db_base;
-
- struct sk_buff **saved_skb;
-};
-
-struct hinic_rq {
- struct hinic_hwif *hwif;
-
- struct hinic_wq *wq;
-
- u32 irq;
- u16 msix_entry;
-
- size_t buf_sz;
-
- struct sk_buff **saved_skb;
-
- struct hinic_rq_cqe **cqe;
- dma_addr_t *cqe_dma;
-
- u16 *pi_virt_addr;
- dma_addr_t pi_dma_addr;
-};
-
-struct hinic_qp {
- struct hinic_sq sq;
- struct hinic_rq rq;
-
- u16 q_id;
-};
-
-void hinic_qp_prepare_header(struct hinic_qp_ctxt_header *qp_ctxt_hdr,
- enum hinic_qp_ctxt_type ctxt_type,
- u16 num_queues, u16 max_queues);
-
-void hinic_sq_prepare_ctxt(struct hinic_sq_ctxt *sq_ctxt,
- struct hinic_sq *sq, u16 global_qid);
-
-void hinic_rq_prepare_ctxt(struct hinic_rq_ctxt *rq_ctxt,
- struct hinic_rq *rq, u16 global_qid);
-
-int hinic_init_sq(struct hinic_sq *sq, struct hinic_hwif *hwif,
- struct hinic_wq *wq, struct msix_entry *entry, void *ci_addr,
- dma_addr_t ci_dma_addr, void __iomem *db_base);
-
-void hinic_clean_sq(struct hinic_sq *sq);
-
-int hinic_init_rq(struct hinic_rq *rq, struct hinic_hwif *hwif,
- struct hinic_wq *wq, struct msix_entry *entry);
-
-void hinic_clean_rq(struct hinic_rq *rq);
-
-int hinic_get_sq_free_wqebbs(struct hinic_sq *sq);
-
-int hinic_get_rq_free_wqebbs(struct hinic_rq *rq);
-
-void hinic_sq_prepare_wqe(struct hinic_sq *sq, u16 prod_idx,
- struct hinic_sq_wqe *wqe, struct hinic_sge *sges,
- int nr_sges);
-
-void hinic_sq_write_db(struct hinic_sq *sq, u16 prod_idx, unsigned int wqe_size,
- unsigned int cos);
-
-struct hinic_sq_wqe *hinic_sq_get_wqe(struct hinic_sq *sq,
- unsigned int wqe_size, u16 *prod_idx);
-
-void hinic_sq_write_wqe(struct hinic_sq *sq, u16 prod_idx,
- struct hinic_sq_wqe *wqe, struct sk_buff *skb,
- unsigned int wqe_size);
-
-struct hinic_sq_wqe *hinic_sq_read_wqe(struct hinic_sq *sq,
- struct sk_buff **skb,
- unsigned int wqe_size, u16 *cons_idx);
-
-struct hinic_sq_wqe *hinic_sq_read_wqebb(struct hinic_sq *sq,
- struct sk_buff **skb,
- unsigned int *wqe_size, u16 *cons_idx);
-
-void hinic_sq_put_wqe(struct hinic_sq *sq, unsigned int wqe_size);
-
-void hinic_sq_get_sges(struct hinic_sq_wqe *wqe, struct hinic_sge *sges,
- int nr_sges);
-
-struct hinic_rq_wqe *hinic_rq_get_wqe(struct hinic_rq *rq,
- unsigned int wqe_size, u16 *prod_idx);
-
-void hinic_rq_write_wqe(struct hinic_rq *rq, u16 prod_idx,
- struct hinic_rq_wqe *wqe, struct sk_buff *skb);
-
-struct hinic_rq_wqe *hinic_rq_read_wqe(struct hinic_rq *rq,
- unsigned int wqe_size,
- struct sk_buff **skb, u16 *cons_idx);
-
-struct hinic_rq_wqe *hinic_rq_read_next_wqe(struct hinic_rq *rq,
- unsigned int wqe_size,
- struct sk_buff **skb,
- u16 *cons_idx);
-
-void hinic_rq_put_wqe(struct hinic_rq *rq, u16 cons_idx,
- unsigned int wqe_size);
-
-void hinic_rq_get_sge(struct hinic_rq *rq, struct hinic_rq_wqe *wqe,
- u16 cons_idx, struct hinic_sge *sge);
-
-void hinic_rq_prepare_wqe(struct hinic_rq *rq, u16 prod_idx,
- struct hinic_rq_wqe *wqe, struct hinic_sge *sge);
-
-void hinic_rq_update(struct hinic_rq *rq, u16 prod_idx);
-
-#endif
diff --git a/drivers/net/ethernet/huawei/hinic/hinic_hw_qp_ctxt.h b/drivers/net/ethernet/huawei/hinic/hinic_hw_qp_ctxt.h
deleted file mode 100644
index 376abf0..00000000
--- a/drivers/net/ethernet/huawei/hinic/hinic_hw_qp_ctxt.h
+++ /dev/null
@@ -1,214 +0,0 @@
-/*
- * Huawei HiNIC PCI Express Linux driver
- * Copyright(c) 2017 Huawei Technologies Co., Ltd
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms and conditions of the GNU General Public License,
- * version 2, as published by the Free Software Foundation.
- *
- * This program is distributed in the hope it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
- * for more details.
- *
- */
-
-#ifndef HINIC_HW_QP_CTXT_H
-#define HINIC_HW_QP_CTXT_H
-
-#include <linux/types.h>
-
-#include "hinic_hw_cmdq.h"
-
-#define HINIC_SQ_CTXT_CEQ_ATTR_GLOBAL_SQ_ID_SHIFT 13
-#define HINIC_SQ_CTXT_CEQ_ATTR_EN_SHIFT 23
-
-#define HINIC_SQ_CTXT_CEQ_ATTR_GLOBAL_SQ_ID_MASK 0x3FF
-#define HINIC_SQ_CTXT_CEQ_ATTR_EN_MASK 0x1
-
-#define HINIC_SQ_CTXT_CEQ_ATTR_SET(val, member) \
- (((u32)(val) & HINIC_SQ_CTXT_CEQ_ATTR_##member##_MASK) \
- << HINIC_SQ_CTXT_CEQ_ATTR_##member##_SHIFT)
-
-#define HINIC_SQ_CTXT_CI_IDX_SHIFT 11
-#define HINIC_SQ_CTXT_CI_WRAPPED_SHIFT 23
-
-#define HINIC_SQ_CTXT_CI_IDX_MASK 0xFFF
-#define HINIC_SQ_CTXT_CI_WRAPPED_MASK 0x1
-
-#define HINIC_SQ_CTXT_CI_SET(val, member) \
- (((u32)(val) & HINIC_SQ_CTXT_CI_##member##_MASK) \
- << HINIC_SQ_CTXT_CI_##member##_SHIFT)
-
-#define HINIC_SQ_CTXT_WQ_PAGE_HI_PFN_SHIFT 0
-#define HINIC_SQ_CTXT_WQ_PAGE_PI_SHIFT 20
-
-#define HINIC_SQ_CTXT_WQ_PAGE_HI_PFN_MASK 0xFFFFF
-#define HINIC_SQ_CTXT_WQ_PAGE_PI_MASK 0xFFF
-
-#define HINIC_SQ_CTXT_WQ_PAGE_SET(val, member) \
- (((u32)(val) & HINIC_SQ_CTXT_WQ_PAGE_##member##_MASK) \
- << HINIC_SQ_CTXT_WQ_PAGE_##member##_SHIFT)
-
-#define HINIC_SQ_CTXT_PREF_CACHE_THRESHOLD_SHIFT 0
-#define HINIC_SQ_CTXT_PREF_CACHE_MAX_SHIFT 14
-#define HINIC_SQ_CTXT_PREF_CACHE_MIN_SHIFT 25
-
-#define HINIC_SQ_CTXT_PREF_CACHE_THRESHOLD_MASK 0x3FFF
-#define HINIC_SQ_CTXT_PREF_CACHE_MAX_MASK 0x7FF
-#define HINIC_SQ_CTXT_PREF_CACHE_MIN_MASK 0x7F
-
-#define HINIC_SQ_CTXT_PREF_WQ_HI_PFN_SHIFT 0
-#define HINIC_SQ_CTXT_PREF_CI_SHIFT 20
-
-#define HINIC_SQ_CTXT_PREF_WQ_HI_PFN_MASK 0xFFFFF
-#define HINIC_SQ_CTXT_PREF_CI_MASK 0xFFF
-
-#define HINIC_SQ_CTXT_PREF_SET(val, member) \
- (((u32)(val) & HINIC_SQ_CTXT_PREF_##member##_MASK) \
- << HINIC_SQ_CTXT_PREF_##member##_SHIFT)
-
-#define HINIC_SQ_CTXT_WQ_BLOCK_HI_PFN_SHIFT 0
-
-#define HINIC_SQ_CTXT_WQ_BLOCK_HI_PFN_MASK 0x7FFFFF
-
-#define HINIC_SQ_CTXT_WQ_BLOCK_SET(val, member) \
- (((u32)(val) & HINIC_SQ_CTXT_WQ_BLOCK_##member##_MASK) \
- << HINIC_SQ_CTXT_WQ_BLOCK_##member##_SHIFT)
-
-#define HINIC_RQ_CTXT_CEQ_ATTR_EN_SHIFT 0
-#define HINIC_RQ_CTXT_CEQ_ATTR_WRAPPED_SHIFT 1
-
-#define HINIC_RQ_CTXT_CEQ_ATTR_EN_MASK 0x1
-#define HINIC_RQ_CTXT_CEQ_ATTR_WRAPPED_MASK 0x1
-
-#define HINIC_RQ_CTXT_CEQ_ATTR_SET(val, member) \
- (((u32)(val) & HINIC_RQ_CTXT_CEQ_ATTR_##member##_MASK) \
- << HINIC_RQ_CTXT_CEQ_ATTR_##member##_SHIFT)
-
-#define HINIC_RQ_CTXT_PI_IDX_SHIFT 0
-#define HINIC_RQ_CTXT_PI_INTR_SHIFT 22
-
-#define HINIC_RQ_CTXT_PI_IDX_MASK 0xFFF
-#define HINIC_RQ_CTXT_PI_INTR_MASK 0x3FF
-
-#define HINIC_RQ_CTXT_PI_SET(val, member) \
- (((u32)(val) & HINIC_RQ_CTXT_PI_##member##_MASK) << \
- HINIC_RQ_CTXT_PI_##member##_SHIFT)
-
-#define HINIC_RQ_CTXT_WQ_PAGE_HI_PFN_SHIFT 0
-#define HINIC_RQ_CTXT_WQ_PAGE_CI_SHIFT 20
-
-#define HINIC_RQ_CTXT_WQ_PAGE_HI_PFN_MASK 0xFFFFF
-#define HINIC_RQ_CTXT_WQ_PAGE_CI_MASK 0xFFF
-
-#define HINIC_RQ_CTXT_WQ_PAGE_SET(val, member) \
- (((u32)(val) & HINIC_RQ_CTXT_WQ_PAGE_##member##_MASK) << \
- HINIC_RQ_CTXT_WQ_PAGE_##member##_SHIFT)
-
-#define HINIC_RQ_CTXT_PREF_CACHE_THRESHOLD_SHIFT 0
-#define HINIC_RQ_CTXT_PREF_CACHE_MAX_SHIFT 14
-#define HINIC_RQ_CTXT_PREF_CACHE_MIN_SHIFT 25
-
-#define HINIC_RQ_CTXT_PREF_CACHE_THRESHOLD_MASK 0x3FFF
-#define HINIC_RQ_CTXT_PREF_CACHE_MAX_MASK 0x7FF
-#define HINIC_RQ_CTXT_PREF_CACHE_MIN_MASK 0x7F
-
-#define HINIC_RQ_CTXT_PREF_WQ_HI_PFN_SHIFT 0
-#define HINIC_RQ_CTXT_PREF_CI_SHIFT 20
-
-#define HINIC_RQ_CTXT_PREF_WQ_HI_PFN_MASK 0xFFFFF
-#define HINIC_RQ_CTXT_PREF_CI_MASK 0xFFF
-
-#define HINIC_RQ_CTXT_PREF_SET(val, member) \
- (((u32)(val) & HINIC_RQ_CTXT_PREF_##member##_MASK) << \
- HINIC_RQ_CTXT_PREF_##member##_SHIFT)
-
-#define HINIC_RQ_CTXT_WQ_BLOCK_HI_PFN_SHIFT 0
-
-#define HINIC_RQ_CTXT_WQ_BLOCK_HI_PFN_MASK 0x7FFFFF
-
-#define HINIC_RQ_CTXT_WQ_BLOCK_SET(val, member) \
- (((u32)(val) & HINIC_RQ_CTXT_WQ_BLOCK_##member##_MASK) << \
- HINIC_RQ_CTXT_WQ_BLOCK_##member##_SHIFT)
-
-#define HINIC_SQ_CTXT_SIZE(num_sqs) (sizeof(struct hinic_qp_ctxt_header) \
- + (num_sqs) * sizeof(struct hinic_sq_ctxt))
-
-#define HINIC_RQ_CTXT_SIZE(num_rqs) (sizeof(struct hinic_qp_ctxt_header) \
- + (num_rqs) * sizeof(struct hinic_rq_ctxt))
-
-#define HINIC_WQ_PAGE_PFN_SHIFT 12
-#define HINIC_WQ_BLOCK_PFN_SHIFT 9
-
-#define HINIC_WQ_PAGE_PFN(page_addr) ((page_addr) >> HINIC_WQ_PAGE_PFN_SHIFT)
-#define HINIC_WQ_BLOCK_PFN(page_addr) ((page_addr) >> \
- HINIC_WQ_BLOCK_PFN_SHIFT)
-
-#define HINIC_Q_CTXT_MAX \
- ((HINIC_CMDQ_BUF_SIZE - sizeof(struct hinic_qp_ctxt_header)) \
- / sizeof(struct hinic_sq_ctxt))
-
-enum hinic_qp_ctxt_type {
- HINIC_QP_CTXT_TYPE_SQ,
- HINIC_QP_CTXT_TYPE_RQ
-};
-
-struct hinic_qp_ctxt_header {
- u16 num_queues;
- u16 queue_type;
- u32 addr_offset;
-};
-
-struct hinic_sq_ctxt {
- u32 ceq_attr;
-
- u32 ci_wrapped;
-
- u32 wq_hi_pfn_pi;
- u32 wq_lo_pfn;
-
- u32 pref_cache;
- u32 pref_wrapped;
- u32 pref_wq_hi_pfn_ci;
- u32 pref_wq_lo_pfn;
-
- u32 rsvd0;
- u32 rsvd1;
-
- u32 wq_block_hi_pfn;
- u32 wq_block_lo_pfn;
-};
-
-struct hinic_rq_ctxt {
- u32 ceq_attr;
-
- u32 pi_intr_attr;
-
- u32 wq_hi_pfn_ci;
- u32 wq_lo_pfn;
-
- u32 pref_cache;
- u32 pref_wrapped;
-
- u32 pref_wq_hi_pfn_ci;
- u32 pref_wq_lo_pfn;
-
- u32 pi_paddr_hi;
- u32 pi_paddr_lo;
-
- u32 wq_block_hi_pfn;
- u32 wq_block_lo_pfn;
-};
-
-struct hinic_sq_ctxt_block {
- struct hinic_qp_ctxt_header hdr;
- struct hinic_sq_ctxt sq_ctxt[HINIC_Q_CTXT_MAX];
-};
-
-struct hinic_rq_ctxt_block {
- struct hinic_qp_ctxt_header hdr;
- struct hinic_rq_ctxt rq_ctxt[HINIC_Q_CTXT_MAX];
-};
-
-#endif
diff --git a/drivers/net/ethernet/huawei/hinic/hinic_hw_wq.c b/drivers/net/ethernet/huawei/hinic/hinic_hw_wq.c
deleted file mode 100644
index 3e3181c08..00000000
--- a/drivers/net/ethernet/huawei/hinic/hinic_hw_wq.c
+++ /dev/null
@@ -1,878 +0,0 @@
-/*
- * Huawei HiNIC PCI Express Linux driver
- * Copyright(c) 2017 Huawei Technologies Co., Ltd
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms and conditions of the GNU General Public License,
- * version 2, as published by the Free Software Foundation.
- *
- * This program is distributed in the hope it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
- * for more details.
- *
- */
-
-#include <linux/kernel.h>
-#include <linux/types.h>
-#include <linux/pci.h>
-#include <linux/device.h>
-#include <linux/dma-mapping.h>
-#include <linux/slab.h>
-#include <linux/atomic.h>
-#include <linux/semaphore.h>
-#include <linux/errno.h>
-#include <linux/vmalloc.h>
-#include <linux/err.h>
-#include <asm/byteorder.h>
-
-#include "hinic_hw_if.h"
-#include "hinic_hw_wqe.h"
-#include "hinic_hw_wq.h"
-#include "hinic_hw_cmdq.h"
-
-#define WQS_BLOCKS_PER_PAGE 4
-
-#define WQ_BLOCK_SIZE 4096
-#define WQS_PAGE_SIZE (WQS_BLOCKS_PER_PAGE * WQ_BLOCK_SIZE)
-
-#define WQS_MAX_NUM_BLOCKS 128
-#define WQS_FREE_BLOCKS_SIZE(wqs) (WQS_MAX_NUM_BLOCKS * \
- sizeof((wqs)->free_blocks[0]))
-
-#define WQ_SIZE(wq) ((wq)->q_depth * (wq)->wqebb_size)
-
-#define WQ_PAGE_ADDR_SIZE sizeof(u64)
-#define WQ_MAX_PAGES (WQ_BLOCK_SIZE / WQ_PAGE_ADDR_SIZE)
-
-#define CMDQ_BLOCK_SIZE 512
-#define CMDQ_PAGE_SIZE 4096
-
-#define CMDQ_WQ_MAX_PAGES (CMDQ_BLOCK_SIZE / WQ_PAGE_ADDR_SIZE)
-
-#define WQ_BASE_VADDR(wqs, wq) \
- ((void *)((wqs)->page_vaddr[(wq)->page_idx]) \
- + (wq)->block_idx * WQ_BLOCK_SIZE)
-
-#define WQ_BASE_PADDR(wqs, wq) \
- ((wqs)->page_paddr[(wq)->page_idx] \
- + (wq)->block_idx * WQ_BLOCK_SIZE)
-
-#define WQ_BASE_ADDR(wqs, wq) \
- ((void *)((wqs)->shadow_page_vaddr[(wq)->page_idx]) \
- + (wq)->block_idx * WQ_BLOCK_SIZE)
-
-#define CMDQ_BASE_VADDR(cmdq_pages, wq) \
- ((void *)((cmdq_pages)->page_vaddr) \
- + (wq)->block_idx * CMDQ_BLOCK_SIZE)
-
-#define CMDQ_BASE_PADDR(cmdq_pages, wq) \
- ((cmdq_pages)->page_paddr \
- + (wq)->block_idx * CMDQ_BLOCK_SIZE)
-
-#define CMDQ_BASE_ADDR(cmdq_pages, wq) \
- ((void *)((cmdq_pages)->shadow_page_vaddr) \
- + (wq)->block_idx * CMDQ_BLOCK_SIZE)
-
-#define WQE_PAGE_OFF(wq, idx) (((idx) & ((wq)->num_wqebbs_per_page - 1)) * \
- (wq)->wqebb_size)
-
-#define WQE_PAGE_NUM(wq, idx) (((idx) / ((wq)->num_wqebbs_per_page)) \
- & ((wq)->num_q_pages - 1))
-
-#define WQ_PAGE_ADDR(wq, idx) \
- ((wq)->shadow_block_vaddr[WQE_PAGE_NUM(wq, idx)])
-
-#define MASKED_WQE_IDX(wq, idx) ((idx) & (wq)->mask)
-
-#define WQE_IN_RANGE(wqe, start, end) \
- (((unsigned long)(wqe) >= (unsigned long)(start)) && \
- ((unsigned long)(wqe) < (unsigned long)(end)))
-
-#define WQE_SHADOW_PAGE(wq, wqe) \
- (((unsigned long)(wqe) - (unsigned long)(wq)->shadow_wqe) \
- / (wq)->max_wqe_size)
-
-/**
- * queue_alloc_page - allocate page for Queue
- * @hwif: HW interface for allocating DMA
- * @vaddr: virtual address will be returned in this address
- * @paddr: physical address will be returned in this address
- * @shadow_vaddr: VM area will be return here for holding WQ page addresses
- * @page_sz: page size of each WQ page
- *
- * Return 0 - Success, negative - Failure
- **/
-static int queue_alloc_page(struct hinic_hwif *hwif, u64 **vaddr, u64 *paddr,
- void ***shadow_vaddr, size_t page_sz)
-{
- struct pci_dev *pdev = hwif->pdev;
- dma_addr_t dma_addr;
-
- *vaddr = dma_zalloc_coherent(&pdev->dev, page_sz, &dma_addr,
- GFP_KERNEL);
- if (!*vaddr) {
- dev_err(&pdev->dev, "Failed to allocate dma for wqs page\n");
- return -ENOMEM;
- }
-
- *paddr = (u64)dma_addr;
-
- /* use vzalloc for big mem */
- *shadow_vaddr = vzalloc(page_sz);
- if (!*shadow_vaddr)
- goto err_shadow_vaddr;
-
- return 0;
-
-err_shadow_vaddr:
- dma_free_coherent(&pdev->dev, page_sz, *vaddr, dma_addr);
- return -ENOMEM;
-}
-
-/**
- * wqs_allocate_page - allocate page for WQ set
- * @wqs: Work Queue Set
- * @page_idx: the page index of the page will be allocated
- *
- * Return 0 - Success, negative - Failure
- **/
-static int wqs_allocate_page(struct hinic_wqs *wqs, int page_idx)
-{
- return queue_alloc_page(wqs->hwif, &wqs->page_vaddr[page_idx],
- &wqs->page_paddr[page_idx],
- &wqs->shadow_page_vaddr[page_idx],
- WQS_PAGE_SIZE);
-}
-
-/**
- * wqs_free_page - free page of WQ set
- * @wqs: Work Queue Set
- * @page_idx: the page index of the page will be freed
- **/
-static void wqs_free_page(struct hinic_wqs *wqs, int page_idx)
-{
- struct hinic_hwif *hwif = wqs->hwif;
- struct pci_dev *pdev = hwif->pdev;
-
- dma_free_coherent(&pdev->dev, WQS_PAGE_SIZE,
- wqs->page_vaddr[page_idx],
- (dma_addr_t)wqs->page_paddr[page_idx]);
- vfree(wqs->shadow_page_vaddr[page_idx]);
-}
-
-/**
- * cmdq_allocate_page - allocate page for cmdq
- * @cmdq_pages: the pages of the cmdq queue struct to hold the page
- *
- * Return 0 - Success, negative - Failure
- **/
-static int cmdq_allocate_page(struct hinic_cmdq_pages *cmdq_pages)
-{
- return queue_alloc_page(cmdq_pages->hwif, &cmdq_pages->page_vaddr,
- &cmdq_pages->page_paddr,
- &cmdq_pages->shadow_page_vaddr,
- CMDQ_PAGE_SIZE);
-}
-
-/**
- * cmdq_free_page - free page from cmdq
- * @cmdq_pages: the pages of the cmdq queue struct that hold the page
- *
- * Return 0 - Success, negative - Failure
- **/
-static void cmdq_free_page(struct hinic_cmdq_pages *cmdq_pages)
-{
- struct hinic_hwif *hwif = cmdq_pages->hwif;
- struct pci_dev *pdev = hwif->pdev;
-
- dma_free_coherent(&pdev->dev, CMDQ_PAGE_SIZE,
- cmdq_pages->page_vaddr,
- (dma_addr_t)cmdq_pages->page_paddr);
- vfree(cmdq_pages->shadow_page_vaddr);
-}
-
-static int alloc_page_arrays(struct hinic_wqs *wqs)
-{
- struct hinic_hwif *hwif = wqs->hwif;
- struct pci_dev *pdev = hwif->pdev;
- size_t size;
-
- size = wqs->num_pages * sizeof(*wqs->page_paddr);
- wqs->page_paddr = devm_kzalloc(&pdev->dev, size, GFP_KERNEL);
- if (!wqs->page_paddr)
- return -ENOMEM;
-
- size = wqs->num_pages * sizeof(*wqs->page_vaddr);
- wqs->page_vaddr = devm_kzalloc(&pdev->dev, size, GFP_KERNEL);
- if (!wqs->page_vaddr)
- goto err_page_vaddr;
-
- size = wqs->num_pages * sizeof(*wqs->shadow_page_vaddr);
- wqs->shadow_page_vaddr = devm_kzalloc(&pdev->dev, size, GFP_KERNEL);
- if (!wqs->shadow_page_vaddr)
- goto err_page_shadow_vaddr;
-
- return 0;
-
-err_page_shadow_vaddr:
- devm_kfree(&pdev->dev, wqs->page_vaddr);
-
-err_page_vaddr:
- devm_kfree(&pdev->dev, wqs->page_paddr);
- return -ENOMEM;
-}
-
-static void free_page_arrays(struct hinic_wqs *wqs)
-{
- struct hinic_hwif *hwif = wqs->hwif;
- struct pci_dev *pdev = hwif->pdev;
-
- devm_kfree(&pdev->dev, wqs->shadow_page_vaddr);
- devm_kfree(&pdev->dev, wqs->page_vaddr);
- devm_kfree(&pdev->dev, wqs->page_paddr);
-}
-
-static int wqs_next_block(struct hinic_wqs *wqs, int *page_idx,
- int *block_idx)
-{
- int pos;
-
- down(&wqs->alloc_blocks_lock);
-
- wqs->num_free_blks--;
-
- if (wqs->num_free_blks < 0) {
- wqs->num_free_blks++;
- up(&wqs->alloc_blocks_lock);
- return -ENOMEM;
- }
-
- pos = wqs->alloc_blk_pos++;
- pos &= WQS_MAX_NUM_BLOCKS - 1;
-
- *page_idx = wqs->free_blocks[pos].page_idx;
- *block_idx = wqs->free_blocks[pos].block_idx;
-
- wqs->free_blocks[pos].page_idx = -1;
- wqs->free_blocks[pos].block_idx = -1;
-
- up(&wqs->alloc_blocks_lock);
- return 0;
-}
-
-static void wqs_return_block(struct hinic_wqs *wqs, int page_idx,
- int block_idx)
-{
- int pos;
-
- down(&wqs->alloc_blocks_lock);
-
- pos = wqs->return_blk_pos++;
- pos &= WQS_MAX_NUM_BLOCKS - 1;
-
- wqs->free_blocks[pos].page_idx = page_idx;
- wqs->free_blocks[pos].block_idx = block_idx;
-
- wqs->num_free_blks++;
-
- up(&wqs->alloc_blocks_lock);
-}
-
-static void init_wqs_blocks_arr(struct hinic_wqs *wqs)
-{
- int page_idx, blk_idx, pos = 0;
-
- for (page_idx = 0; page_idx < wqs->num_pages; page_idx++) {
- for (blk_idx = 0; blk_idx < WQS_BLOCKS_PER_PAGE; blk_idx++) {
- wqs->free_blocks[pos].page_idx = page_idx;
- wqs->free_blocks[pos].block_idx = blk_idx;
- pos++;
- }
- }
-
- wqs->alloc_blk_pos = 0;
- wqs->return_blk_pos = pos;
- wqs->num_free_blks = pos;
-
- sema_init(&wqs->alloc_blocks_lock, 1);
-}
-
-/**
- * hinic_wqs_alloc - allocate Work Queues set
- * @wqs: Work Queue Set
- * @max_wqs: maximum wqs to allocate
- * @hwif: HW interface for use for the allocation
- *
- * Return 0 - Success, negative - Failure
- **/
-int hinic_wqs_alloc(struct hinic_wqs *wqs, int max_wqs,
- struct hinic_hwif *hwif)
-{
- struct pci_dev *pdev = hwif->pdev;
- int err, i, page_idx;
-
- max_wqs = ALIGN(max_wqs, WQS_BLOCKS_PER_PAGE);
- if (max_wqs > WQS_MAX_NUM_BLOCKS) {
- dev_err(&pdev->dev, "Invalid max_wqs = %d\n", max_wqs);
- return -EINVAL;
- }
-
- wqs->hwif = hwif;
- wqs->num_pages = max_wqs / WQS_BLOCKS_PER_PAGE;
-
- if (alloc_page_arrays(wqs)) {
- dev_err(&pdev->dev,
- "Failed to allocate mem for page addresses\n");
- return -ENOMEM;
- }
-
- for (page_idx = 0; page_idx < wqs->num_pages; page_idx++) {
- err = wqs_allocate_page(wqs, page_idx);
- if (err) {
- dev_err(&pdev->dev, "Failed wq page allocation\n");
- goto err_wq_allocate_page;
- }
- }
-
- wqs->free_blocks = devm_kzalloc(&pdev->dev, WQS_FREE_BLOCKS_SIZE(wqs),
- GFP_KERNEL);
- if (!wqs->free_blocks) {
- err = -ENOMEM;
- goto err_alloc_blocks;
- }
-
- init_wqs_blocks_arr(wqs);
- return 0;
-
-err_alloc_blocks:
-err_wq_allocate_page:
- for (i = 0; i < page_idx; i++)
- wqs_free_page(wqs, i);
-
- free_page_arrays(wqs);
- return err;
-}
-
-/**
- * hinic_wqs_free - free Work Queues set
- * @wqs: Work Queue Set
- **/
-void hinic_wqs_free(struct hinic_wqs *wqs)
-{
- struct hinic_hwif *hwif = wqs->hwif;
- struct pci_dev *pdev = hwif->pdev;
- int page_idx;
-
- devm_kfree(&pdev->dev, wqs->free_blocks);
-
- for (page_idx = 0; page_idx < wqs->num_pages; page_idx++)
- wqs_free_page(wqs, page_idx);
-
- free_page_arrays(wqs);
-}
-
-/**
- * alloc_wqes_shadow - allocate WQE shadows for WQ
- * @wq: WQ to allocate shadows for
- *
- * Return 0 - Success, negative - Failure
- **/
-static int alloc_wqes_shadow(struct hinic_wq *wq)
-{
- struct hinic_hwif *hwif = wq->hwif;
- struct pci_dev *pdev = hwif->pdev;
- size_t size;
-
- size = wq->num_q_pages * wq->max_wqe_size;
- wq->shadow_wqe = devm_kzalloc(&pdev->dev, size, GFP_KERNEL);
- if (!wq->shadow_wqe)
- return -ENOMEM;
-
- size = wq->num_q_pages * sizeof(wq->prod_idx);
- wq->shadow_idx = devm_kzalloc(&pdev->dev, size, GFP_KERNEL);
- if (!wq->shadow_idx)
- goto err_shadow_idx;
-
- return 0;
-
-err_shadow_idx:
- devm_kfree(&pdev->dev, wq->shadow_wqe);
- return -ENOMEM;
-}
-
-/**
- * free_wqes_shadow - free WQE shadows of WQ
- * @wq: WQ to free shadows from
- **/
-static void free_wqes_shadow(struct hinic_wq *wq)
-{
- struct hinic_hwif *hwif = wq->hwif;
- struct pci_dev *pdev = hwif->pdev;
-
- devm_kfree(&pdev->dev, wq->shadow_idx);
- devm_kfree(&pdev->dev, wq->shadow_wqe);
-}
-
-/**
- * free_wq_pages - free pages of WQ
- * @hwif: HW interface for releasing dma addresses
- * @wq: WQ to free pages from
- * @num_q_pages: number pages to free
- **/
-static void free_wq_pages(struct hinic_wq *wq, struct hinic_hwif *hwif,
- int num_q_pages)
-{
- struct pci_dev *pdev = hwif->pdev;
- int i;
-
- for (i = 0; i < num_q_pages; i++) {
- void **vaddr = &wq->shadow_block_vaddr[i];
- u64 *paddr = &wq->block_vaddr[i];
- dma_addr_t dma_addr;
-
- dma_addr = (dma_addr_t)be64_to_cpu(*paddr);
- dma_free_coherent(&pdev->dev, wq->wq_page_size, *vaddr,
- dma_addr);
- }
-
- free_wqes_shadow(wq);
-}
-
-/**
- * alloc_wq_pages - alloc pages for WQ
- * @hwif: HW interface for allocating dma addresses
- * @wq: WQ to allocate pages for
- * @max_pages: maximum pages allowed
- *
- * Return 0 - Success, negative - Failure
- **/
-static int alloc_wq_pages(struct hinic_wq *wq, struct hinic_hwif *hwif,
- int max_pages)
-{
- struct pci_dev *pdev = hwif->pdev;
- int i, err, num_q_pages;
-
- num_q_pages = ALIGN(WQ_SIZE(wq), wq->wq_page_size) / wq->wq_page_size;
- if (num_q_pages > max_pages) {
- dev_err(&pdev->dev, "Number wq pages exceeds the limit\n");
- return -EINVAL;
- }
-
- if (num_q_pages & (num_q_pages - 1)) {
- dev_err(&pdev->dev, "Number wq pages must be power of 2\n");
- return -EINVAL;
- }
-
- wq->num_q_pages = num_q_pages;
-
- err = alloc_wqes_shadow(wq);
- if (err) {
- dev_err(&pdev->dev, "Failed to allocate wqe shadow\n");
- return err;
- }
-
- for (i = 0; i < num_q_pages; i++) {
- void **vaddr = &wq->shadow_block_vaddr[i];
- u64 *paddr = &wq->block_vaddr[i];
- dma_addr_t dma_addr;
-
- *vaddr = dma_zalloc_coherent(&pdev->dev, wq->wq_page_size,
- &dma_addr, GFP_KERNEL);
- if (!*vaddr) {
- dev_err(&pdev->dev, "Failed to allocate wq page\n");
- goto err_alloc_wq_pages;
- }
-
- /* HW uses Big Endian Format */
- *paddr = cpu_to_be64(dma_addr);
- }
-
- return 0;
-
-err_alloc_wq_pages:
- free_wq_pages(wq, hwif, i);
- return -ENOMEM;
-}
-
-/**
- * hinic_wq_allocate - Allocate the WQ resources from the WQS
- * @wqs: WQ set from which to allocate the WQ resources
- * @wq: WQ to allocate resources for it from the WQ set
- * @wqebb_size: Work Queue Block Byte Size
- * @wq_page_size: the page size in the Work Queue
- * @q_depth: number of wqebbs in WQ
- * @max_wqe_size: maximum WQE size that will be used in the WQ
- *
- * Return 0 - Success, negative - Failure
- **/
-int hinic_wq_allocate(struct hinic_wqs *wqs, struct hinic_wq *wq,
- u16 wqebb_size, u16 wq_page_size, u16 q_depth,
- u16 max_wqe_size)
-{
- struct hinic_hwif *hwif = wqs->hwif;
- struct pci_dev *pdev = hwif->pdev;
- u16 num_wqebbs_per_page;
- int err;
-
- if (wqebb_size == 0) {
- dev_err(&pdev->dev, "wqebb_size must be > 0\n");
- return -EINVAL;
- }
-
- if (wq_page_size == 0) {
- dev_err(&pdev->dev, "wq_page_size must be > 0\n");
- return -EINVAL;
- }
-
- if (q_depth & (q_depth - 1)) {
- dev_err(&pdev->dev, "WQ q_depth must be power of 2\n");
- return -EINVAL;
- }
-
- num_wqebbs_per_page = ALIGN(wq_page_size, wqebb_size) / wqebb_size;
-
- if (num_wqebbs_per_page & (num_wqebbs_per_page - 1)) {
- dev_err(&pdev->dev, "num wqebbs per page must be power of 2\n");
- return -EINVAL;
- }
-
- wq->hwif = hwif;
-
- err = wqs_next_block(wqs, &wq->page_idx, &wq->block_idx);
- if (err) {
- dev_err(&pdev->dev, "Failed to get free wqs next block\n");
- return err;
- }
-
- wq->wqebb_size = wqebb_size;
- wq->wq_page_size = wq_page_size;
- wq->q_depth = q_depth;
- wq->max_wqe_size = max_wqe_size;
- wq->num_wqebbs_per_page = num_wqebbs_per_page;
-
- wq->block_vaddr = WQ_BASE_VADDR(wqs, wq);
- wq->shadow_block_vaddr = WQ_BASE_ADDR(wqs, wq);
- wq->block_paddr = WQ_BASE_PADDR(wqs, wq);
-
- err = alloc_wq_pages(wq, wqs->hwif, WQ_MAX_PAGES);
- if (err) {
- dev_err(&pdev->dev, "Failed to allocate wq pages\n");
- goto err_alloc_wq_pages;
- }
-
- atomic_set(&wq->cons_idx, 0);
- atomic_set(&wq->prod_idx, 0);
- atomic_set(&wq->delta, q_depth);
- wq->mask = q_depth - 1;
-
- return 0;
-
-err_alloc_wq_pages:
- wqs_return_block(wqs, wq->page_idx, wq->block_idx);
- return err;
-}
-
-/**
- * hinic_wq_free - Free the WQ resources to the WQS
- * @wqs: WQ set to free the WQ resources to it
- * @wq: WQ to free its resources to the WQ set resources
- **/
-void hinic_wq_free(struct hinic_wqs *wqs, struct hinic_wq *wq)
-{
- free_wq_pages(wq, wqs->hwif, wq->num_q_pages);
-
- wqs_return_block(wqs, wq->page_idx, wq->block_idx);
-}
-
-/**
- * hinic_wqs_cmdq_alloc - Allocate wqs for cmdqs
- * @cmdq_pages: will hold the pages of the cmdq
- * @wq: returned wqs
- * @hwif: HW interface
- * @cmdq_blocks: number of cmdq blocks/wq to allocate
- * @wqebb_size: Work Queue Block Byte Size
- * @wq_page_size: the page size in the Work Queue
- * @q_depth: number of wqebbs in WQ
- * @max_wqe_size: maximum WQE size that will be used in the WQ
- *
- * Return 0 - Success, negative - Failure
- **/
-int hinic_wqs_cmdq_alloc(struct hinic_cmdq_pages *cmdq_pages,
- struct hinic_wq *wq, struct hinic_hwif *hwif,
- int cmdq_blocks, u16 wqebb_size, u16 wq_page_size,
- u16 q_depth, u16 max_wqe_size)
-{
- struct pci_dev *pdev = hwif->pdev;
- u16 num_wqebbs_per_page;
- int i, j, err = -ENOMEM;
-
- if (wqebb_size == 0) {
- dev_err(&pdev->dev, "wqebb_size must be > 0\n");
- return -EINVAL;
- }
-
- if (wq_page_size == 0) {
- dev_err(&pdev->dev, "wq_page_size must be > 0\n");
- return -EINVAL;
- }
-
- if (q_depth & (q_depth - 1)) {
- dev_err(&pdev->dev, "WQ q_depth must be power of 2\n");
- return -EINVAL;
- }
-
- num_wqebbs_per_page = ALIGN(wq_page_size, wqebb_size) / wqebb_size;
-
- if (num_wqebbs_per_page & (num_wqebbs_per_page - 1)) {
- dev_err(&pdev->dev, "num wqebbs per page must be power of 2\n");
- return -EINVAL;
- }
-
- cmdq_pages->hwif = hwif;
-
- err = cmdq_allocate_page(cmdq_pages);
- if (err) {
- dev_err(&pdev->dev, "Failed to allocate CMDQ page\n");
- return err;
- }
-
- for (i = 0; i < cmdq_blocks; i++) {
- wq[i].hwif = hwif;
- wq[i].page_idx = 0;
- wq[i].block_idx = i;
-
- wq[i].wqebb_size = wqebb_size;
- wq[i].wq_page_size = wq_page_size;
- wq[i].q_depth = q_depth;
- wq[i].max_wqe_size = max_wqe_size;
- wq[i].num_wqebbs_per_page = num_wqebbs_per_page;
-
- wq[i].block_vaddr = CMDQ_BASE_VADDR(cmdq_pages, &wq[i]);
- wq[i].shadow_block_vaddr = CMDQ_BASE_ADDR(cmdq_pages, &wq[i]);
- wq[i].block_paddr = CMDQ_BASE_PADDR(cmdq_pages, &wq[i]);
-
- err = alloc_wq_pages(&wq[i], cmdq_pages->hwif,
- CMDQ_WQ_MAX_PAGES);
- if (err) {
- dev_err(&pdev->dev, "Failed to alloc CMDQ blocks\n");
- goto err_cmdq_block;
- }
-
- atomic_set(&wq[i].cons_idx, 0);
- atomic_set(&wq[i].prod_idx, 0);
- atomic_set(&wq[i].delta, q_depth);
- wq[i].mask = q_depth - 1;
- }
-
- return 0;
-
-err_cmdq_block:
- for (j = 0; j < i; j++)
- free_wq_pages(&wq[j], cmdq_pages->hwif, wq[j].num_q_pages);
-
- cmdq_free_page(cmdq_pages);
- return err;
-}
-
-/**
- * hinic_wqs_cmdq_free - Free wqs from cmdqs
- * @cmdq_pages: hold the pages of the cmdq
- * @wq: wqs to free
- * @cmdq_blocks: number of wqs to free
- **/
-void hinic_wqs_cmdq_free(struct hinic_cmdq_pages *cmdq_pages,
- struct hinic_wq *wq, int cmdq_blocks)
-{
- int i;
-
- for (i = 0; i < cmdq_blocks; i++)
- free_wq_pages(&wq[i], cmdq_pages->hwif, wq[i].num_q_pages);
-
- cmdq_free_page(cmdq_pages);
-}
-
-static void copy_wqe_to_shadow(struct hinic_wq *wq, void *shadow_addr,
- int num_wqebbs, u16 idx)
-{
- void *wqebb_addr;
- int i;
-
- for (i = 0; i < num_wqebbs; i++, idx++) {
- idx = MASKED_WQE_IDX(wq, idx);
- wqebb_addr = WQ_PAGE_ADDR(wq, idx) +
- WQE_PAGE_OFF(wq, idx);
-
- memcpy(shadow_addr, wqebb_addr, wq->wqebb_size);
-
- shadow_addr += wq->wqebb_size;
- }
-}
-
-static void copy_wqe_from_shadow(struct hinic_wq *wq, void *shadow_addr,
- int num_wqebbs, u16 idx)
-{
- void *wqebb_addr;
- int i;
-
- for (i = 0; i < num_wqebbs; i++, idx++) {
- idx = MASKED_WQE_IDX(wq, idx);
- wqebb_addr = WQ_PAGE_ADDR(wq, idx) +
- WQE_PAGE_OFF(wq, idx);
-
- memcpy(wqebb_addr, shadow_addr, wq->wqebb_size);
- shadow_addr += wq->wqebb_size;
- }
-}
-
-/**
- * hinic_get_wqe - get wqe ptr in the current pi and update the pi
- * @wq: wq to get wqe from
- * @wqe_size: wqe size
- * @prod_idx: returned pi
- *
- * Return wqe pointer
- **/
-struct hinic_hw_wqe *hinic_get_wqe(struct hinic_wq *wq, unsigned int wqe_size,
- u16 *prod_idx)
-{
- int curr_pg, end_pg, num_wqebbs;
- u16 curr_prod_idx, end_prod_idx;
-
- *prod_idx = MASKED_WQE_IDX(wq, atomic_read(&wq->prod_idx));
-
- num_wqebbs = ALIGN(wqe_size, wq->wqebb_size) / wq->wqebb_size;
-
- if (atomic_sub_return(num_wqebbs, &wq->delta) <= 0) {
- atomic_add(num_wqebbs, &wq->delta);
- return ERR_PTR(-EBUSY);
- }
-
- end_prod_idx = atomic_add_return(num_wqebbs, &wq->prod_idx);
-
- end_prod_idx = MASKED_WQE_IDX(wq, end_prod_idx);
- curr_prod_idx = end_prod_idx - num_wqebbs;
- curr_prod_idx = MASKED_WQE_IDX(wq, curr_prod_idx);
-
- /* end prod index points to the next wqebb, therefore minus 1 */
- end_prod_idx = MASKED_WQE_IDX(wq, end_prod_idx - 1);
-
- curr_pg = WQE_PAGE_NUM(wq, curr_prod_idx);
- end_pg = WQE_PAGE_NUM(wq, end_prod_idx);
-
- *prod_idx = curr_prod_idx;
-
- if (curr_pg != end_pg) {
- void *shadow_addr = &wq->shadow_wqe[curr_pg * wq->max_wqe_size];
-
- copy_wqe_to_shadow(wq, shadow_addr, num_wqebbs, *prod_idx);
-
- wq->shadow_idx[curr_pg] = *prod_idx;
- return shadow_addr;
- }
-
- return WQ_PAGE_ADDR(wq, *prod_idx) + WQE_PAGE_OFF(wq, *prod_idx);
-}
-
-/**
- * hinic_put_wqe - return the wqe place to use for a new wqe
- * @wq: wq to return wqe
- * @wqe_size: wqe size
- **/
-void hinic_put_wqe(struct hinic_wq *wq, unsigned int wqe_size)
-{
- int num_wqebbs = ALIGN(wqe_size, wq->wqebb_size) / wq->wqebb_size;
-
- atomic_add(num_wqebbs, &wq->cons_idx);
-
- atomic_add(num_wqebbs, &wq->delta);
-}
-
-/**
- * hinic_read_wqe - read wqe ptr in the current ci
- * @wq: wq to get read from
- * @wqe_size: wqe size
- * @cons_idx: returned ci
- *
- * Return wqe pointer
- **/
-struct hinic_hw_wqe *hinic_read_wqe(struct hinic_wq *wq, unsigned int wqe_size,
- u16 *cons_idx)
-{
- int num_wqebbs = ALIGN(wqe_size, wq->wqebb_size) / wq->wqebb_size;
- u16 curr_cons_idx, end_cons_idx;
- int curr_pg, end_pg;
-
- if ((atomic_read(&wq->delta) + num_wqebbs) > wq->q_depth)
- return ERR_PTR(-EBUSY);
-
- curr_cons_idx = atomic_read(&wq->cons_idx);
-
- curr_cons_idx = MASKED_WQE_IDX(wq, curr_cons_idx);
- end_cons_idx = MASKED_WQE_IDX(wq, curr_cons_idx + num_wqebbs - 1);
-
- curr_pg = WQE_PAGE_NUM(wq, curr_cons_idx);
- end_pg = WQE_PAGE_NUM(wq, end_cons_idx);
-
- *cons_idx = curr_cons_idx;
-
- if (curr_pg != end_pg) {
- void *shadow_addr = &wq->shadow_wqe[curr_pg * wq->max_wqe_size];
-
- copy_wqe_to_shadow(wq, shadow_addr, num_wqebbs, *cons_idx);
- return shadow_addr;
- }
-
- return WQ_PAGE_ADDR(wq, *cons_idx) + WQE_PAGE_OFF(wq, *cons_idx);
-}
-
-/**
- * hinic_read_wqe_direct - read wqe directly from ci position
- * @wq: wq
- * @cons_idx: ci position
- *
- * Return wqe
- **/
-struct hinic_hw_wqe *hinic_read_wqe_direct(struct hinic_wq *wq, u16 cons_idx)
-{
- return WQ_PAGE_ADDR(wq, cons_idx) + WQE_PAGE_OFF(wq, cons_idx);
-}
-
-/**
- * wqe_shadow - check if a wqe is shadow
- * @wq: wq of the wqe
- * @wqe: the wqe for shadow checking
- *
- * Return true - shadow, false - Not shadow
- **/
-static inline bool wqe_shadow(struct hinic_wq *wq, struct hinic_hw_wqe *wqe)
-{
- size_t wqe_shadow_size = wq->num_q_pages * wq->max_wqe_size;
-
- return WQE_IN_RANGE(wqe, wq->shadow_wqe,
- &wq->shadow_wqe[wqe_shadow_size]);
-}
-
-/**
- * hinic_write_wqe - write the wqe to the wq
- * @wq: wq to write wqe to
- * @wqe: wqe to write
- * @wqe_size: wqe size
- **/
-void hinic_write_wqe(struct hinic_wq *wq, struct hinic_hw_wqe *wqe,
- unsigned int wqe_size)
-{
- int curr_pg, num_wqebbs;
- void *shadow_addr;
- u16 prod_idx;
-
- if (wqe_shadow(wq, wqe)) {
- curr_pg = WQE_SHADOW_PAGE(wq, wqe);
-
- prod_idx = wq->shadow_idx[curr_pg];
- num_wqebbs = ALIGN(wqe_size, wq->wqebb_size) / wq->wqebb_size;
- shadow_addr = &wq->shadow_wqe[curr_pg * wq->max_wqe_size];
-
- copy_wqe_from_shadow(wq, shadow_addr, num_wqebbs, prod_idx);
- }
-}
diff --git a/drivers/net/ethernet/huawei/hinic/hinic_hw_wq.h b/drivers/net/ethernet/huawei/hinic/hinic_hw_wq.h
deleted file mode 100644
index 9c030a0..00000000
--- a/drivers/net/ethernet/huawei/hinic/hinic_hw_wq.h
+++ /dev/null
@@ -1,117 +0,0 @@
-/*
- * Huawei HiNIC PCI Express Linux driver
- * Copyright(c) 2017 Huawei Technologies Co., Ltd
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms and conditions of the GNU General Public License,
- * version 2, as published by the Free Software Foundation.
- *
- * This program is distributed in the hope it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
- * for more details.
- *
- */
-
-#ifndef HINIC_HW_WQ_H
-#define HINIC_HW_WQ_H
-
-#include <linux/types.h>
-#include <linux/semaphore.h>
-#include <linux/atomic.h>
-
-#include "hinic_hw_if.h"
-#include "hinic_hw_wqe.h"
-
-struct hinic_free_block {
- int page_idx;
- int block_idx;
-};
-
-struct hinic_wq {
- struct hinic_hwif *hwif;
-
- int page_idx;
- int block_idx;
-
- u16 wqebb_size;
- u16 wq_page_size;
- u16 q_depth;
- u16 max_wqe_size;
- u16 num_wqebbs_per_page;
-
- /* The addresses are 64 bit in the HW */
- u64 block_paddr;
- void **shadow_block_vaddr;
- u64 *block_vaddr;
-
- int num_q_pages;
- u8 *shadow_wqe;
- u16 *shadow_idx;
-
- atomic_t cons_idx;
- atomic_t prod_idx;
- atomic_t delta;
- u16 mask;
-};
-
-struct hinic_wqs {
- struct hinic_hwif *hwif;
- int num_pages;
-
- /* The addresses are 64 bit in the HW */
- u64 *page_paddr;
- u64 **page_vaddr;
- void ***shadow_page_vaddr;
-
- struct hinic_free_block *free_blocks;
- int alloc_blk_pos;
- int return_blk_pos;
- int num_free_blks;
-
- /* Lock for getting a free block from the WQ set */
- struct semaphore alloc_blocks_lock;
-};
-
-struct hinic_cmdq_pages {
- /* The addresses are 64 bit in the HW */
- u64 page_paddr;
- u64 *page_vaddr;
- void **shadow_page_vaddr;
-
- struct hinic_hwif *hwif;
-};
-
-int hinic_wqs_cmdq_alloc(struct hinic_cmdq_pages *cmdq_pages,
- struct hinic_wq *wq, struct hinic_hwif *hwif,
- int cmdq_blocks, u16 wqebb_size, u16 wq_page_size,
- u16 q_depth, u16 max_wqe_size);
-
-void hinic_wqs_cmdq_free(struct hinic_cmdq_pages *cmdq_pages,
- struct hinic_wq *wq, int cmdq_blocks);
-
-int hinic_wqs_alloc(struct hinic_wqs *wqs, int num_wqs,
- struct hinic_hwif *hwif);
-
-void hinic_wqs_free(struct hinic_wqs *wqs);
-
-int hinic_wq_allocate(struct hinic_wqs *wqs, struct hinic_wq *wq,
- u16 wqebb_size, u16 wq_page_size, u16 q_depth,
- u16 max_wqe_size);
-
-void hinic_wq_free(struct hinic_wqs *wqs, struct hinic_wq *wq);
-
-struct hinic_hw_wqe *hinic_get_wqe(struct hinic_wq *wq, unsigned int wqe_size,
- u16 *prod_idx);
-
-void hinic_put_wqe(struct hinic_wq *wq, unsigned int wqe_size);
-
-struct hinic_hw_wqe *hinic_read_wqe(struct hinic_wq *wq, unsigned int wqe_size,
- u16 *cons_idx);
-
-struct hinic_hw_wqe *hinic_read_wqe_direct(struct hinic_wq *wq, u16 cons_idx);
-
-void hinic_write_wqe(struct hinic_wq *wq, struct hinic_hw_wqe *wqe,
- unsigned int wqe_size);
-
-#endif
diff --git a/drivers/net/ethernet/huawei/hinic/hinic_hw_wqe.h b/drivers/net/ethernet/huawei/hinic/hinic_hw_wqe.h
deleted file mode 100644
index bc73485..00000000
--- a/drivers/net/ethernet/huawei/hinic/hinic_hw_wqe.h
+++ /dev/null
@@ -1,368 +0,0 @@
-/*
- * Huawei HiNIC PCI Express Linux driver
- * Copyright(c) 2017 Huawei Technologies Co., Ltd
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms and conditions of the GNU General Public License,
- * version 2, as published by the Free Software Foundation.
- *
- * This program is distributed in the hope it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
- * for more details.
- *
- */
-
-#ifndef HINIC_HW_WQE_H
-#define HINIC_HW_WQE_H
-
-#include "hinic_common.h"
-
-#define HINIC_CMDQ_CTRL_PI_SHIFT 0
-#define HINIC_CMDQ_CTRL_CMD_SHIFT 16
-#define HINIC_CMDQ_CTRL_MOD_SHIFT 24
-#define HINIC_CMDQ_CTRL_ACK_TYPE_SHIFT 29
-#define HINIC_CMDQ_CTRL_HW_BUSY_BIT_SHIFT 31
-
-#define HINIC_CMDQ_CTRL_PI_MASK 0xFFFF
-#define HINIC_CMDQ_CTRL_CMD_MASK 0xFF
-#define HINIC_CMDQ_CTRL_MOD_MASK 0x1F
-#define HINIC_CMDQ_CTRL_ACK_TYPE_MASK 0x3
-#define HINIC_CMDQ_CTRL_HW_BUSY_BIT_MASK 0x1
-
-#define HINIC_CMDQ_CTRL_SET(val, member) \
- (((u32)(val) & HINIC_CMDQ_CTRL_##member##_MASK) \
- << HINIC_CMDQ_CTRL_##member##_SHIFT)
-
-#define HINIC_CMDQ_CTRL_GET(val, member) \
- (((val) >> HINIC_CMDQ_CTRL_##member##_SHIFT) \
- & HINIC_CMDQ_CTRL_##member##_MASK)
-
-#define HINIC_CMDQ_WQE_HEADER_BUFDESC_LEN_SHIFT 0
-#define HINIC_CMDQ_WQE_HEADER_COMPLETE_FMT_SHIFT 15
-#define HINIC_CMDQ_WQE_HEADER_DATA_FMT_SHIFT 22
-#define HINIC_CMDQ_WQE_HEADER_COMPLETE_REQ_SHIFT 23
-#define HINIC_CMDQ_WQE_HEADER_COMPLETE_SECT_LEN_SHIFT 27
-#define HINIC_CMDQ_WQE_HEADER_CTRL_LEN_SHIFT 29
-#define HINIC_CMDQ_WQE_HEADER_TOGGLED_WRAPPED_SHIFT 31
-
-#define HINIC_CMDQ_WQE_HEADER_BUFDESC_LEN_MASK 0xFF
-#define HINIC_CMDQ_WQE_HEADER_COMPLETE_FMT_MASK 0x1
-#define HINIC_CMDQ_WQE_HEADER_DATA_FMT_MASK 0x1
-#define HINIC_CMDQ_WQE_HEADER_COMPLETE_REQ_MASK 0x1
-#define HINIC_CMDQ_WQE_HEADER_COMPLETE_SECT_LEN_MASK 0x3
-#define HINIC_CMDQ_WQE_HEADER_CTRL_LEN_MASK 0x3
-#define HINIC_CMDQ_WQE_HEADER_TOGGLED_WRAPPED_MASK 0x1
-
-#define HINIC_CMDQ_WQE_HEADER_SET(val, member) \
- (((u32)(val) & HINIC_CMDQ_WQE_HEADER_##member##_MASK) \
- << HINIC_CMDQ_WQE_HEADER_##member##_SHIFT)
-
-#define HINIC_CMDQ_WQE_HEADER_GET(val, member) \
- (((val) >> HINIC_CMDQ_WQE_HEADER_##member##_SHIFT) \
- & HINIC_CMDQ_WQE_HEADER_##member##_MASK)
-
-#define HINIC_SQ_CTRL_BUFDESC_SECT_LEN_SHIFT 0
-#define HINIC_SQ_CTRL_TASKSECT_LEN_SHIFT 16
-#define HINIC_SQ_CTRL_DATA_FORMAT_SHIFT 22
-#define HINIC_SQ_CTRL_LEN_SHIFT 29
-
-#define HINIC_SQ_CTRL_BUFDESC_SECT_LEN_MASK 0xFF
-#define HINIC_SQ_CTRL_TASKSECT_LEN_MASK 0x1F
-#define HINIC_SQ_CTRL_DATA_FORMAT_MASK 0x1
-#define HINIC_SQ_CTRL_LEN_MASK 0x3
-
-#define HINIC_SQ_CTRL_QUEUE_INFO_MSS_SHIFT 13
-
-#define HINIC_SQ_CTRL_QUEUE_INFO_MSS_MASK 0x3FFF
-
-#define HINIC_SQ_CTRL_SET(val, member) \
- (((u32)(val) & HINIC_SQ_CTRL_##member##_MASK) \
- << HINIC_SQ_CTRL_##member##_SHIFT)
-
-#define HINIC_SQ_CTRL_GET(val, member) \
- (((val) >> HINIC_SQ_CTRL_##member##_SHIFT) \
- & HINIC_SQ_CTRL_##member##_MASK)
-
-#define HINIC_SQ_TASK_INFO0_L2HDR_LEN_SHIFT 0
-#define HINIC_SQ_TASK_INFO0_L4_OFFLOAD_SHIFT 8
-#define HINIC_SQ_TASK_INFO0_INNER_L3TYPE_SHIFT 10
-#define HINIC_SQ_TASK_INFO0_VLAN_OFFLOAD_SHIFT 12
-#define HINIC_SQ_TASK_INFO0_PARSE_FLAG_SHIFT 13
-/* 1 bit reserved */
-#define HINIC_SQ_TASK_INFO0_TSO_FLAG_SHIFT 15
-#define HINIC_SQ_TASK_INFO0_VLAN_TAG_SHIFT 16
-
-#define HINIC_SQ_TASK_INFO0_L2HDR_LEN_MASK 0xFF
-#define HINIC_SQ_TASK_INFO0_L4_OFFLOAD_MASK 0x3
-#define HINIC_SQ_TASK_INFO0_INNER_L3TYPE_MASK 0x3
-#define HINIC_SQ_TASK_INFO0_VLAN_OFFLOAD_MASK 0x1
-#define HINIC_SQ_TASK_INFO0_PARSE_FLAG_MASK 0x1
-/* 1 bit reserved */
-#define HINIC_SQ_TASK_INFO0_TSO_FLAG_MASK 0x1
-#define HINIC_SQ_TASK_INFO0_VLAN_TAG_MASK 0xFFFF
-
-#define HINIC_SQ_TASK_INFO0_SET(val, member) \
- (((u32)(val) & HINIC_SQ_TASK_INFO0_##member##_MASK) << \
- HINIC_SQ_TASK_INFO0_##member##_SHIFT)
-
-/* 8 bits reserved */
-#define HINIC_SQ_TASK_INFO1_MEDIA_TYPE_SHIFT 8
-#define HINIC_SQ_TASK_INFO1_INNER_L4_LEN_SHIFT 16
-#define HINIC_SQ_TASK_INFO1_INNER_L3_LEN_SHIFT 24
-
-/* 8 bits reserved */
-#define HINIC_SQ_TASK_INFO1_MEDIA_TYPE_MASK 0xFF
-#define HINIC_SQ_TASK_INFO1_INNER_L4_LEN_MASK 0xFF
-#define HINIC_SQ_TASK_INFO1_INNER_L3_LEN_MASK 0xFF
-
-#define HINIC_SQ_TASK_INFO1_SET(val, member) \
- (((u32)(val) & HINIC_SQ_TASK_INFO1_##member##_MASK) << \
- HINIC_SQ_TASK_INFO1_##member##_SHIFT)
-
-#define HINIC_SQ_TASK_INFO2_TUNNEL_L4_LEN_SHIFT 0
-#define HINIC_SQ_TASK_INFO2_OUTER_L3_LEN_SHIFT 12
-#define HINIC_SQ_TASK_INFO2_TUNNEL_L4TYPE_SHIFT 19
-/* 1 bit reserved */
-#define HINIC_SQ_TASK_INFO2_OUTER_L3TYPE_SHIFT 22
-/* 8 bits reserved */
-
-#define HINIC_SQ_TASK_INFO2_TUNNEL_L4_LEN_MASK 0xFFF
-#define HINIC_SQ_TASK_INFO2_OUTER_L3_LEN_MASK 0x7F
-#define HINIC_SQ_TASK_INFO2_TUNNEL_L4TYPE_MASK 0x3
-/* 1 bit reserved */
-#define HINIC_SQ_TASK_INFO2_OUTER_L3TYPE_MASK 0x3
-/* 8 bits reserved */
-
-#define HINIC_SQ_TASK_INFO2_SET(val, member) \
- (((u32)(val) & HINIC_SQ_TASK_INFO2_##member##_MASK) << \
- HINIC_SQ_TASK_INFO2_##member##_SHIFT)
-
-/* 31 bits reserved */
-#define HINIC_SQ_TASK_INFO4_L2TYPE_SHIFT 31
-
-/* 31 bits reserved */
-#define HINIC_SQ_TASK_INFO4_L2TYPE_MASK 0x1
-
-#define HINIC_SQ_TASK_INFO4_SET(val, member) \
- (((u32)(val) & HINIC_SQ_TASK_INFO4_##member##_MASK) << \
- HINIC_SQ_TASK_INFO4_##member##_SHIFT)
-
-#define HINIC_RQ_CQE_STATUS_RXDONE_SHIFT 31
-
-#define HINIC_RQ_CQE_STATUS_RXDONE_MASK 0x1
-
-#define HINIC_RQ_CQE_STATUS_GET(val, member) \
- (((val) >> HINIC_RQ_CQE_STATUS_##member##_SHIFT) & \
- HINIC_RQ_CQE_STATUS_##member##_MASK)
-
-#define HINIC_RQ_CQE_STATUS_CLEAR(val, member) \
- ((val) & (~(HINIC_RQ_CQE_STATUS_##member##_MASK << \
- HINIC_RQ_CQE_STATUS_##member##_SHIFT)))
-
-#define HINIC_RQ_CQE_SGE_LEN_SHIFT 16
-
-#define HINIC_RQ_CQE_SGE_LEN_MASK 0xFFFF
-
-#define HINIC_RQ_CQE_SGE_GET(val, member) \
- (((val) >> HINIC_RQ_CQE_SGE_##member##_SHIFT) & \
- HINIC_RQ_CQE_SGE_##member##_MASK)
-
-#define HINIC_RQ_CTRL_BUFDESC_SECT_LEN_SHIFT 0
-#define HINIC_RQ_CTRL_COMPLETE_FORMAT_SHIFT 15
-#define HINIC_RQ_CTRL_COMPLETE_LEN_SHIFT 27
-#define HINIC_RQ_CTRL_LEN_SHIFT 29
-
-#define HINIC_RQ_CTRL_BUFDESC_SECT_LEN_MASK 0xFF
-#define HINIC_RQ_CTRL_COMPLETE_FORMAT_MASK 0x1
-#define HINIC_RQ_CTRL_COMPLETE_LEN_MASK 0x3
-#define HINIC_RQ_CTRL_LEN_MASK 0x3
-
-#define HINIC_RQ_CTRL_SET(val, member) \
- (((u32)(val) & HINIC_RQ_CTRL_##member##_MASK) << \
- HINIC_RQ_CTRL_##member##_SHIFT)
-
-#define HINIC_SQ_WQE_SIZE(nr_sges) \
- (sizeof(struct hinic_sq_ctrl) + \
- sizeof(struct hinic_sq_task) + \
- (nr_sges) * sizeof(struct hinic_sq_bufdesc))
-
-#define HINIC_SCMD_DATA_LEN 16
-
-#define HINIC_MAX_SQ_BUFDESCS 17
-
-#define HINIC_SQ_WQE_MAX_SIZE 320
-#define HINIC_RQ_WQE_SIZE 32
-
-enum hinic_l4offload_type {
- HINIC_L4_OFF_DISABLE = 0,
- HINIC_TCP_OFFLOAD_ENABLE = 1,
- HINIC_SCTP_OFFLOAD_ENABLE = 2,
- HINIC_UDP_OFFLOAD_ENABLE = 3,
-};
-
-enum hinic_vlan_offload {
- HINIC_VLAN_OFF_DISABLE = 0,
- HINIC_VLAN_OFF_ENABLE = 1,
-};
-
-enum hinic_pkt_parsed {
- HINIC_PKT_NOT_PARSED = 0,
- HINIC_PKT_PARSED = 1,
-};
-
-enum hinic_outer_l3type {
- HINIC_OUTER_L3TYPE_UNKNOWN = 0,
- HINIC_OUTER_L3TYPE_IPV6 = 1,
- HINIC_OUTER_L3TYPE_IPV4_NO_CHKSUM = 2,
- HINIC_OUTER_L3TYPE_IPV4_CHKSUM = 3,
-};
-
-enum hinic_media_type {
- HINIC_MEDIA_UNKNOWN = 0,
-};
-
-enum hinic_l2type {
- HINIC_L2TYPE_ETH = 0,
-};
-
-enum hinc_tunnel_l4type {
- HINIC_TUNNEL_L4TYPE_UNKNOWN = 0,
-};
-
-struct hinic_cmdq_header {
- u32 header_info;
- u32 saved_data;
-};
-
-struct hinic_status {
- u32 status_info;
-};
-
-struct hinic_ctrl {
- u32 ctrl_info;
-};
-
-struct hinic_sge_resp {
- struct hinic_sge sge;
- u32 rsvd;
-};
-
-struct hinic_cmdq_completion {
- /* HW Format */
- union {
- struct hinic_sge_resp sge_resp;
- u64 direct_resp;
- };
-};
-
-struct hinic_scmd_bufdesc {
- u32 buf_len;
- u32 rsvd;
- u8 data[HINIC_SCMD_DATA_LEN];
-};
-
-struct hinic_lcmd_bufdesc {
- struct hinic_sge sge;
- u32 rsvd1;
- u64 rsvd2;
- u64 rsvd3;
-};
-
-struct hinic_cmdq_wqe_scmd {
- struct hinic_cmdq_header header;
- u64 rsvd;
- struct hinic_status status;
- struct hinic_ctrl ctrl;
- struct hinic_cmdq_completion completion;
- struct hinic_scmd_bufdesc buf_desc;
-};
-
-struct hinic_cmdq_wqe_lcmd {
- struct hinic_cmdq_header header;
- struct hinic_status status;
- struct hinic_ctrl ctrl;
- struct hinic_cmdq_completion completion;
- struct hinic_lcmd_bufdesc buf_desc;
-};
-
-struct hinic_cmdq_direct_wqe {
- struct hinic_cmdq_wqe_scmd wqe_scmd;
-};
-
-struct hinic_cmdq_wqe {
- /* HW Format */
- union {
- struct hinic_cmdq_direct_wqe direct_wqe;
- struct hinic_cmdq_wqe_lcmd wqe_lcmd;
- };
-};
-
-struct hinic_sq_ctrl {
- u32 ctrl_info;
- u32 queue_info;
-};
-
-struct hinic_sq_task {
- u32 pkt_info0;
- u32 pkt_info1;
- u32 pkt_info2;
- u32 ufo_v6_identify;
- u32 pkt_info4;
- u32 zero_pad;
-};
-
-struct hinic_sq_bufdesc {
- struct hinic_sge sge;
- u32 rsvd;
-};
-
-struct hinic_sq_wqe {
- struct hinic_sq_ctrl ctrl;
- struct hinic_sq_task task;
- struct hinic_sq_bufdesc buf_descs[HINIC_MAX_SQ_BUFDESCS];
-};
-
-struct hinic_rq_cqe {
- u32 status;
- u32 len;
-
- u32 rsvd2;
- u32 rsvd3;
- u32 rsvd4;
- u32 rsvd5;
- u32 rsvd6;
- u32 rsvd7;
-};
-
-struct hinic_rq_ctrl {
- u32 ctrl_info;
-};
-
-struct hinic_rq_cqe_sect {
- struct hinic_sge sge;
- u32 rsvd;
-};
-
-struct hinic_rq_bufdesc {
- u32 hi_addr;
- u32 lo_addr;
-};
-
-struct hinic_rq_wqe {
- struct hinic_rq_ctrl ctrl;
- u32 rsvd;
- struct hinic_rq_cqe_sect cqe_sect;
- struct hinic_rq_bufdesc buf_desc;
-};
-
-struct hinic_hw_wqe {
- /* HW Format */
- union {
- struct hinic_cmdq_wqe cmdq_wqe;
- struct hinic_sq_wqe sq_wqe;
- struct hinic_rq_wqe rq_wqe;
- };
-};
-
-#endif
diff --git a/drivers/net/ethernet/huawei/hinic/hinic_port.c b/drivers/net/ethernet/huawei/hinic/hinic_port.c
deleted file mode 100644
index 4d4e3f0..00000000
--- a/drivers/net/ethernet/huawei/hinic/hinic_port.c
+++ /dev/null
@@ -1,379 +0,0 @@
-/*
- * Huawei HiNIC PCI Express Linux driver
- * Copyright(c) 2017 Huawei Technologies Co., Ltd
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms and conditions of the GNU General Public License,
- * version 2, as published by the Free Software Foundation.
- *
- * This program is distributed in the hope it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
- * for more details.
- *
- */
-
-#include <linux/types.h>
-#include <linux/netdevice.h>
-#include <linux/etherdevice.h>
-#include <linux/if_vlan.h>
-#include <linux/pci.h>
-#include <linux/device.h>
-#include <linux/errno.h>
-
-#include "hinic_hw_if.h"
-#include "hinic_hw_dev.h"
-#include "hinic_port.h"
-#include "hinic_dev.h"
-
-#define HINIC_MIN_MTU_SIZE 256
-#define HINIC_MAX_JUMBO_FRAME_SIZE 15872
-
-enum mac_op {
- MAC_DEL,
- MAC_SET,
-};
-
-/**
- * change_mac - change(add or delete) mac address
- * @nic_dev: nic device
- * @addr: mac address
- * @vlan_id: vlan number to set with the mac
- * @op: add or delete the mac
- *
- * Return 0 - Success, negative - Failure
- **/
-static int change_mac(struct hinic_dev *nic_dev, const u8 *addr,
- u16 vlan_id, enum mac_op op)
-{
- struct net_device *netdev = nic_dev->netdev;
- struct hinic_hwdev *hwdev = nic_dev->hwdev;
- struct hinic_port_mac_cmd port_mac_cmd;
- struct hinic_hwif *hwif = hwdev->hwif;
- struct pci_dev *pdev = hwif->pdev;
- enum hinic_port_cmd cmd;
- u16 out_size;
- int err;
-
- if (vlan_id >= VLAN_N_VID) {
- netif_err(nic_dev, drv, netdev, "Invalid VLAN number\n");
- return -EINVAL;
- }
-
- if (op == MAC_SET)
- cmd = HINIC_PORT_CMD_SET_MAC;
- else
- cmd = HINIC_PORT_CMD_DEL_MAC;
-
- port_mac_cmd.func_idx = HINIC_HWIF_FUNC_IDX(hwif);
- port_mac_cmd.vlan_id = vlan_id;
- memcpy(port_mac_cmd.mac, addr, ETH_ALEN);
-
- err = hinic_port_msg_cmd(hwdev, cmd, &port_mac_cmd,
- sizeof(port_mac_cmd),
- &port_mac_cmd, &out_size);
- if (err || (out_size != sizeof(port_mac_cmd)) || port_mac_cmd.status) {
- dev_err(&pdev->dev, "Failed to change MAC, ret = %d\n",
- port_mac_cmd.status);
- return -EFAULT;
- }
-
- return 0;
-}
-
-/**
- * hinic_port_add_mac - add mac address
- * @nic_dev: nic device
- * @addr: mac address
- * @vlan_id: vlan number to set with the mac
- *
- * Return 0 - Success, negative - Failure
- **/
-int hinic_port_add_mac(struct hinic_dev *nic_dev,
- const u8 *addr, u16 vlan_id)
-{
- return change_mac(nic_dev, addr, vlan_id, MAC_SET);
-}
-
-/**
- * hinic_port_del_mac - remove mac address
- * @nic_dev: nic device
- * @addr: mac address
- * @vlan_id: vlan number that is connected to the mac
- *
- * Return 0 - Success, negative - Failure
- **/
-int hinic_port_del_mac(struct hinic_dev *nic_dev, const u8 *addr,
- u16 vlan_id)
-{
- return change_mac(nic_dev, addr, vlan_id, MAC_DEL);
-}
-
-/**
- * hinic_port_get_mac - get the mac address of the nic device
- * @nic_dev: nic device
- * @addr: returned mac address
- *
- * Return 0 - Success, negative - Failure
- **/
-int hinic_port_get_mac(struct hinic_dev *nic_dev, u8 *addr)
-{
- struct hinic_hwdev *hwdev = nic_dev->hwdev;
- struct hinic_port_mac_cmd port_mac_cmd;
- struct hinic_hwif *hwif = hwdev->hwif;
- struct pci_dev *pdev = hwif->pdev;
- u16 out_size;
- int err;
-
- port_mac_cmd.func_idx = HINIC_HWIF_FUNC_IDX(hwif);
-
- err = hinic_port_msg_cmd(hwdev, HINIC_PORT_CMD_GET_MAC,
- &port_mac_cmd, sizeof(port_mac_cmd),
- &port_mac_cmd, &out_size);
- if (err || (out_size != sizeof(port_mac_cmd)) || port_mac_cmd.status) {
- dev_err(&pdev->dev, "Failed to get mac, ret = %d\n",
- port_mac_cmd.status);
- return -EFAULT;
- }
-
- memcpy(addr, port_mac_cmd.mac, ETH_ALEN);
- return 0;
-}
-
-/**
- * hinic_port_set_mtu - set mtu
- * @nic_dev: nic device
- * @new_mtu: new mtu
- *
- * Return 0 - Success, negative - Failure
- **/
-int hinic_port_set_mtu(struct hinic_dev *nic_dev, int new_mtu)
-{
- struct net_device *netdev = nic_dev->netdev;
- struct hinic_hwdev *hwdev = nic_dev->hwdev;
- struct hinic_port_mtu_cmd port_mtu_cmd;
- struct hinic_hwif *hwif = hwdev->hwif;
- struct pci_dev *pdev = hwif->pdev;
- int err, max_frame;
- u16 out_size;
-
- if (new_mtu < HINIC_MIN_MTU_SIZE) {
- netif_err(nic_dev, drv, netdev, "mtu < MIN MTU size");
- return -EINVAL;
- }
-
- max_frame = new_mtu + ETH_HLEN + ETH_FCS_LEN;
- if (max_frame > HINIC_MAX_JUMBO_FRAME_SIZE) {
- netif_err(nic_dev, drv, netdev, "mtu > MAX MTU size");
- return -EINVAL;
- }
-
- port_mtu_cmd.func_idx = HINIC_HWIF_FUNC_IDX(hwif);
- port_mtu_cmd.mtu = new_mtu;
-
- err = hinic_port_msg_cmd(hwdev, HINIC_PORT_CMD_CHANGE_MTU,
- &port_mtu_cmd, sizeof(port_mtu_cmd),
- &port_mtu_cmd, &out_size);
- if (err || (out_size != sizeof(port_mtu_cmd)) || port_mtu_cmd.status) {
- dev_err(&pdev->dev, "Failed to set mtu, ret = %d\n",
- port_mtu_cmd.status);
- return -EFAULT;
- }
-
- return 0;
-}
-
-/**
- * hinic_port_add_vlan - add vlan to the nic device
- * @nic_dev: nic device
- * @vlan_id: the vlan number to add
- *
- * Return 0 - Success, negative - Failure
- **/
-int hinic_port_add_vlan(struct hinic_dev *nic_dev, u16 vlan_id)
-{
- struct hinic_hwdev *hwdev = nic_dev->hwdev;
- struct hinic_port_vlan_cmd port_vlan_cmd;
-
- port_vlan_cmd.func_idx = HINIC_HWIF_FUNC_IDX(hwdev->hwif);
- port_vlan_cmd.vlan_id = vlan_id;
-
- return hinic_port_msg_cmd(hwdev, HINIC_PORT_CMD_ADD_VLAN,
- &port_vlan_cmd, sizeof(port_vlan_cmd),
- NULL, NULL);
-}
-
-/**
- * hinic_port_del_vlan - delete vlan from the nic device
- * @nic_dev: nic device
- * @vlan_id: the vlan number to delete
- *
- * Return 0 - Success, negative - Failure
- **/
-int hinic_port_del_vlan(struct hinic_dev *nic_dev, u16 vlan_id)
-{
- struct hinic_hwdev *hwdev = nic_dev->hwdev;
- struct hinic_port_vlan_cmd port_vlan_cmd;
-
- port_vlan_cmd.func_idx = HINIC_HWIF_FUNC_IDX(hwdev->hwif);
- port_vlan_cmd.vlan_id = vlan_id;
-
- return hinic_port_msg_cmd(hwdev, HINIC_PORT_CMD_DEL_VLAN,
- &port_vlan_cmd, sizeof(port_vlan_cmd),
- NULL, NULL);
-}
-
-/**
- * hinic_port_set_rx_mode - set rx mode in the nic device
- * @nic_dev: nic device
- * @rx_mode: the rx mode to set
- *
- * Return 0 - Success, negative - Failure
- **/
-int hinic_port_set_rx_mode(struct hinic_dev *nic_dev, u32 rx_mode)
-{
- struct hinic_hwdev *hwdev = nic_dev->hwdev;
- struct hinic_port_rx_mode_cmd rx_mode_cmd;
-
- rx_mode_cmd.func_idx = HINIC_HWIF_FUNC_IDX(hwdev->hwif);
- rx_mode_cmd.rx_mode = rx_mode;
-
- return hinic_port_msg_cmd(hwdev, HINIC_PORT_CMD_SET_RX_MODE,
- &rx_mode_cmd, sizeof(rx_mode_cmd),
- NULL, NULL);
-}
-
-/**
- * hinic_port_link_state - get the link state
- * @nic_dev: nic device
- * @link_state: the returned link state
- *
- * Return 0 - Success, negative - Failure
- **/
-int hinic_port_link_state(struct hinic_dev *nic_dev,
- enum hinic_port_link_state *link_state)
-{
- struct hinic_hwdev *hwdev = nic_dev->hwdev;
- struct hinic_hwif *hwif = hwdev->hwif;
- struct hinic_port_link_cmd link_cmd;
- struct pci_dev *pdev = hwif->pdev;
- u16 out_size;
- int err;
-
- if (!HINIC_IS_PF(hwif) && !HINIC_IS_PPF(hwif)) {
- dev_err(&pdev->dev, "unsupported PCI Function type\n");
- return -EINVAL;
- }
-
- link_cmd.func_idx = HINIC_HWIF_FUNC_IDX(hwif);
-
- err = hinic_port_msg_cmd(hwdev, HINIC_PORT_CMD_GET_LINK_STATE,
- &link_cmd, sizeof(link_cmd),
- &link_cmd, &out_size);
- if (err || (out_size != sizeof(link_cmd)) || link_cmd.status) {
- dev_err(&pdev->dev, "Failed to get link state, ret = %d\n",
- link_cmd.status);
- return -EINVAL;
- }
-
- *link_state = link_cmd.state;
- return 0;
-}
-
-/**
- * hinic_port_set_state - set port state
- * @nic_dev: nic device
- * @state: the state to set
- *
- * Return 0 - Success, negative - Failure
- **/
-int hinic_port_set_state(struct hinic_dev *nic_dev, enum hinic_port_state state)
-{
- struct hinic_hwdev *hwdev = nic_dev->hwdev;
- struct hinic_port_state_cmd port_state;
- struct hinic_hwif *hwif = hwdev->hwif;
- struct pci_dev *pdev = hwif->pdev;
- u16 out_size;
- int err;
-
- if (!HINIC_IS_PF(hwif) && !HINIC_IS_PPF(hwif)) {
- dev_err(&pdev->dev, "unsupported PCI Function type\n");
- return -EINVAL;
- }
-
- port_state.state = state;
-
- err = hinic_port_msg_cmd(hwdev, HINIC_PORT_CMD_SET_PORT_STATE,
- &port_state, sizeof(port_state),
- &port_state, &out_size);
- if (err || (out_size != sizeof(port_state)) || port_state.status) {
- dev_err(&pdev->dev, "Failed to set port state, ret = %d\n",
- port_state.status);
- return -EFAULT;
- }
-
- return 0;
-}
-
-/**
- * hinic_port_set_func_state- set func device state
- * @nic_dev: nic device
- * @state: the state to set
- *
- * Return 0 - Success, negative - Failure
- **/
-int hinic_port_set_func_state(struct hinic_dev *nic_dev,
- enum hinic_func_port_state state)
-{
- struct hinic_port_func_state_cmd func_state;
- struct hinic_hwdev *hwdev = nic_dev->hwdev;
- struct hinic_hwif *hwif = hwdev->hwif;
- struct pci_dev *pdev = hwif->pdev;
- u16 out_size;
- int err;
-
- func_state.func_idx = HINIC_HWIF_FUNC_IDX(hwif);
- func_state.state = state;
-
- err = hinic_port_msg_cmd(hwdev, HINIC_PORT_CMD_SET_FUNC_STATE,
- &func_state, sizeof(func_state),
- &func_state, &out_size);
- if (err || (out_size != sizeof(func_state)) || func_state.status) {
- dev_err(&pdev->dev, "Failed to set port func state, ret = %d\n",
- func_state.status);
- return -EFAULT;
- }
-
- return 0;
-}
-
-/**
- * hinic_port_get_cap - get port capabilities
- * @nic_dev: nic device
- * @port_cap: returned port capabilities
- *
- * Return 0 - Success, negative - Failure
- **/
-int hinic_port_get_cap(struct hinic_dev *nic_dev,
- struct hinic_port_cap *port_cap)
-{
- struct hinic_hwdev *hwdev = nic_dev->hwdev;
- struct hinic_hwif *hwif = hwdev->hwif;
- struct pci_dev *pdev = hwif->pdev;
- u16 out_size;
- int err;
-
- port_cap->func_idx = HINIC_HWIF_FUNC_IDX(hwif);
-
- err = hinic_port_msg_cmd(hwdev, HINIC_PORT_CMD_GET_CAP,
- port_cap, sizeof(*port_cap),
- port_cap, &out_size);
- if (err || (out_size != sizeof(*port_cap)) || port_cap->status) {
- dev_err(&pdev->dev,
- "Failed to get port capabilities, ret = %d\n",
- port_cap->status);
- return -EINVAL;
- }
-
- return 0;
-}
diff --git a/drivers/net/ethernet/huawei/hinic/hinic_port.h b/drivers/net/ethernet/huawei/hinic/hinic_port.h
deleted file mode 100644
index 9404365..00000000
--- a/drivers/net/ethernet/huawei/hinic/hinic_port.h
+++ /dev/null
@@ -1,198 +0,0 @@
-/*
- * Huawei HiNIC PCI Express Linux driver
- * Copyright(c) 2017 Huawei Technologies Co., Ltd
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms and conditions of the GNU General Public License,
- * version 2, as published by the Free Software Foundation.
- *
- * This program is distributed in the hope it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
- * for more details.
- *
- */
-
-#ifndef HINIC_PORT_H
-#define HINIC_PORT_H
-
-#include <linux/types.h>
-#include <linux/etherdevice.h>
-#include <linux/bitops.h>
-
-#include "hinic_dev.h"
-
-enum hinic_rx_mode {
- HINIC_RX_MODE_UC = BIT(0),
- HINIC_RX_MODE_MC = BIT(1),
- HINIC_RX_MODE_BC = BIT(2),
- HINIC_RX_MODE_MC_ALL = BIT(3),
- HINIC_RX_MODE_PROMISC = BIT(4),
-};
-
-enum hinic_port_link_state {
- HINIC_LINK_STATE_DOWN,
- HINIC_LINK_STATE_UP,
-};
-
-enum hinic_port_state {
- HINIC_PORT_DISABLE = 0,
- HINIC_PORT_ENABLE = 3,
-};
-
-enum hinic_func_port_state {
- HINIC_FUNC_PORT_DISABLE = 0,
- HINIC_FUNC_PORT_ENABLE = 2,
-};
-
-enum hinic_autoneg_cap {
- HINIC_AUTONEG_UNSUPPORTED,
- HINIC_AUTONEG_SUPPORTED,
-};
-
-enum hinic_autoneg_state {
- HINIC_AUTONEG_DISABLED,
- HINIC_AUTONEG_ACTIVE,
-};
-
-enum hinic_duplex {
- HINIC_DUPLEX_HALF,
- HINIC_DUPLEX_FULL,
-};
-
-enum hinic_speed {
- HINIC_SPEED_10MB_LINK = 0,
- HINIC_SPEED_100MB_LINK,
- HINIC_SPEED_1000MB_LINK,
- HINIC_SPEED_10GB_LINK,
- HINIC_SPEED_25GB_LINK,
- HINIC_SPEED_40GB_LINK,
- HINIC_SPEED_100GB_LINK,
-
- HINIC_SPEED_UNKNOWN = 0xFF,
-};
-
-struct hinic_port_mac_cmd {
- u8 status;
- u8 version;
- u8 rsvd0[6];
-
- u16 func_idx;
- u16 vlan_id;
- u16 rsvd1;
- unsigned char mac[ETH_ALEN];
-};
-
-struct hinic_port_mtu_cmd {
- u8 status;
- u8 version;
- u8 rsvd0[6];
-
- u16 func_idx;
- u16 rsvd1;
- u32 mtu;
-};
-
-struct hinic_port_vlan_cmd {
- u8 status;
- u8 version;
- u8 rsvd0[6];
-
- u16 func_idx;
- u16 vlan_id;
-};
-
-struct hinic_port_rx_mode_cmd {
- u8 status;
- u8 version;
- u8 rsvd0[6];
-
- u16 func_idx;
- u16 rsvd;
- u32 rx_mode;
-};
-
-struct hinic_port_link_cmd {
- u8 status;
- u8 version;
- u8 rsvd0[6];
-
- u16 func_idx;
- u8 state;
- u8 rsvd1;
-};
-
-struct hinic_port_state_cmd {
- u8 status;
- u8 version;
- u8 rsvd0[6];
-
- u8 state;
- u8 rsvd1[3];
-};
-
-struct hinic_port_link_status {
- u8 status;
- u8 version;
- u8 rsvd0[6];
-
- u16 rsvd1;
- u8 link;
- u8 rsvd2;
-};
-
-struct hinic_port_func_state_cmd {
- u8 status;
- u8 version;
- u8 rsvd0[6];
-
- u16 func_idx;
- u16 rsvd1;
- u8 state;
- u8 rsvd2[3];
-};
-
-struct hinic_port_cap {
- u8 status;
- u8 version;
- u8 rsvd0[6];
-
- u16 func_idx;
- u16 rsvd1;
- u8 port_type;
- u8 autoneg_cap;
- u8 autoneg_state;
- u8 duplex;
- u8 speed;
- u8 rsvd2[3];
-};
-
-int hinic_port_add_mac(struct hinic_dev *nic_dev, const u8 *addr,
- u16 vlan_id);
-
-int hinic_port_del_mac(struct hinic_dev *nic_dev, const u8 *addr,
- u16 vlan_id);
-
-int hinic_port_get_mac(struct hinic_dev *nic_dev, u8 *addr);
-
-int hinic_port_set_mtu(struct hinic_dev *nic_dev, int new_mtu);
-
-int hinic_port_add_vlan(struct hinic_dev *nic_dev, u16 vlan_id);
-
-int hinic_port_del_vlan(struct hinic_dev *nic_dev, u16 vlan_id);
-
-int hinic_port_set_rx_mode(struct hinic_dev *nic_dev, u32 rx_mode);
-
-int hinic_port_link_state(struct hinic_dev *nic_dev,
- enum hinic_port_link_state *link_state);
-
-int hinic_port_set_state(struct hinic_dev *nic_dev,
- enum hinic_port_state state);
-
-int hinic_port_set_func_state(struct hinic_dev *nic_dev,
- enum hinic_func_port_state state);
-
-int hinic_port_get_cap(struct hinic_dev *nic_dev,
- struct hinic_port_cap *port_cap);
-
-#endif
diff --git a/drivers/net/ethernet/huawei/hinic/hinic_sml_table.h b/drivers/net/ethernet/huawei/hinic/hinic_sml_table.h
deleted file mode 100644
index b837dab..00000000
--- a/drivers/net/ethernet/huawei/hinic/hinic_sml_table.h
+++ /dev/null
@@ -1,2728 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0*/
-/* Huawei HiNIC PCI Express Linux driver
- * Copyright(c) 2017 Huawei Technologies Co., Ltd
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms and conditions of the GNU General Public License,
- * version 2, as published by the Free Software Foundation.
- *
- * This program is distributed in the hope it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
- * for more details.
- *
- */
-
-#ifndef __SML_TABLE_H__
-#define __SML_TABLE_H__
-
-#include "hinic_sml_table_pub.h"
-
-#ifdef __cplusplus
-#if __cplusplus
-extern "C" {
-#endif
-#endif /* __cplusplus */
-
-#define TBL_ID_CTR_DFX_S32_SM_NODE 11
-#define TBL_ID_CTR_DFX_S32_SM_INST 20
-
-#define TBL_ID_CTR_DFX_PAIR_SM_NODE 10
-#define TBL_ID_CTR_DFX_PAIR_SM_INST 24
-
-#define TBL_ID_CTR_DFX_S64_SM_NODE 11
-#define TBL_ID_CTR_DFX_S64_SM_INST 21
-
-#if (!defined(__UP_FPGA__) && (!defined(HI1822_MODE_FPGA)) && \
- (!defined(__FPGA__)))
-
-#define TBL_ID_GLOBAL_SM_NODE 10
-#define TBL_ID_GLOBAL_SM_INST 1
-
-#define TBL_ID_PORT_CFG_SM_NODE 10
-#define TBL_ID_PORT_CFG_SM_INST 2
-
-#define TBL_ID_VLAN_SM_NODE 10
-#define TBL_ID_VLAN_SM_INST 3
-
-#define TBL_ID_MULTICAST_SM_NODE 10
-#define TBL_ID_MULTICAST_SM_INST 4
-
-#define TBL_ID_MISC_RSS_HASH0_SM_NODE 10
-#define TBL_ID_MISC_RSS_HASH0_SM_INST 5
-
-#define TBL_ID_FIC_VOQ_MAP_SM_NODE 10
-#define TBL_ID_FIC_VOQ_MAP_SM_INST 6
-
-#define TBL_ID_CAR_SM_NODE 10
-#define TBL_ID_CAR_SM_INST 7
-
-#define TBL_ID_IPMAC_FILTER_SM_NODE 10
-#define TBL_ID_IPMAC_FILTER_SM_INST 8
-
-#define TBL_ID_GLOBAL_QUE_MAP_SM_NODE 10
-#define TBL_ID_GLOBAL_QUE_MAP_SM_INST 9
-
-#define TBL_ID_CTR_VSW_FUNC_MIB_SM_NODE 10
-#define TBL_ID_CTR_VSW_FUNC_MIB_SM_INST 10
-
-#define TBL_ID_UCODE_EXEC_INFO_SM_NODE 10
-#define TBL_ID_UCODE_EXEC_INFO_SM_INST 11
-
-#define TBL_ID_RQ_IQ_MAPPING_SM_NODE 10
-#define TBL_ID_RQ_IQ_MAPPING_SM_INST 12
-
-#define TBL_ID_MAC_SM_NODE 10
-#define TBL_ID_MAC_SM_INST 21
-
-#define TBL_ID_MAC_BHEAP_SM_NODE 10
-#define TBL_ID_MAC_BHEAP_SM_INST 22
-
-#define TBL_ID_MAC_MISC_SM_NODE 10
-#define TBL_ID_MAC_MISC_SM_INST 23
-
-#define TBL_ID_FUNC_CFG_SM_NODE 11
-#define TBL_ID_FUNC_CFG_SM_INST 1
-
-#define TBL_ID_TRUNK_FWD_SM_NODE 11
-#define TBL_ID_TRUNK_FWD_SM_INST 2
-
-#define TBL_ID_VLAN_FILTER_SM_NODE 11
-#define TBL_ID_VLAN_FILTER_SM_INST 3
-
-#define TBL_ID_ELB_SM_NODE 11
-#define TBL_ID_ELB_SM_INST 4
-
-#define TBL_ID_MISC_RSS_HASH1_SM_NODE 11
-#define TBL_ID_MISC_RSS_HASH1_SM_INST 5
-
-#define TBL_ID_RSS_CONTEXT_SM_NODE 11
-#define TBL_ID_RSS_CONTEXT_SM_INST 6
-
-#define TBL_ID_ETHERTYPE_FILTER_SM_NODE 11
-#define TBL_ID_ETHERTYPE_FILTER_SM_INST 7
-
-#define TBL_ID_VTEP_IP_SM_NODE 11
-#define TBL_ID_VTEP_IP_SM_INST 8
-
-#define TBL_ID_NAT_SM_NODE 11
-#define TBL_ID_NAT_SM_INST 9
-
-#define TBL_ID_BHEAP_LRO_AGING_SM_NODE 11
-#define TBL_ID_BHEAP_LRO_AGING_SM_INST 10
-
-#define TBL_ID_MISC_LRO_AGING_SM_NODE 11
-#define TBL_ID_MISC_LRO_AGING_SM_INST 11
-
-#define TBL_ID_BHEAP_CQE_AGING_SM_NODE 11
-#define TBL_ID_BHEAP_CQE_AGING_SM_INST 12
-
-#define TBL_ID_MISC_CQE_AGING_SM_NODE 11
-#define TBL_ID_MISC_CQE_AGING_SM_INST 13
-
-#define TBL_ID_DFX_LOG_POINTER_SM_NODE 11
-#define TBL_ID_DFX_LOG_POINTER_SM_INST 14
-
-#define TBL_ID_CTR_VSW_FUNC_S32_DROP_ERR_SM_NODE 11
-#define TBL_ID_CTR_VSW_FUNC_S32_DROP_ERR_SM_INST 15
-
-#define TBL_ID_CTR_VSW_FUNC_S32_DFX_SM_NODE 11
-#define TBL_ID_CTR_VSW_FUNC_S32_DFX_SM_INST 16
-
-#define TBL_ID_CTR_COMM_FUNC_S32_SM_NODE 11
-#define TBL_ID_CTR_COMM_FUNC_S32_SM_INST 17
-
-#define TBL_ID_CTR_SRIOV_FUNC_PAIR_SM_NODE 11
-#define TBL_ID_CTR_SRIOV_FUNC_PAIR_SM_INST 41
-
-#define TBL_ID_CTR_SRIOV_FUNC_S32_SM_NODE 11
-#define TBL_ID_CTR_SRIOV_FUNC_S32_SM_INST 42
-
-#define TBL_ID_CTR_OVS_FUNC_S64_SM_NODE 11
-#define TBL_ID_CTR_OVS_FUNC_S64_SM_INST 43
-
-#define TBL_ID_CTR_XOE_FUNC_PAIR_SM_NODE 11
-#define TBL_ID_CTR_XOE_FUNC_PAIR_SM_INST 44
-
-#define TBL_ID_CTR_XOE_FUNC_S32_SM_NODE 11
-#define TBL_ID_CTR_XOE_FUNC_S32_SM_INST 45
-
-#define TBL_ID_CTR_SYS_GLB_S32_SM_NODE 11
-#define TBL_ID_CTR_SYS_GLB_S32_SM_INST 46
-
-#define TBL_ID_CTR_VSW_GLB_S32_SM_NODE 11
-#define TBL_ID_CTR_VSW_GLB_S32_SM_INST 47
-
-#define TBL_ID_CTR_ROCE_GLB_S32_SM_NODE 11
-#define TBL_ID_CTR_ROCE_GLB_S32_SM_INST 48
-
-#define TBL_ID_CTR_COMM_GLB_S32_SM_NODE 11
-#define TBL_ID_CTR_COMM_GLB_S32_SM_INST 49
-
-#define TBL_ID_CTR_XOE_GLB_S32_SM_NODE 11
-#define TBL_ID_CTR_XOE_GLB_S32_SM_INST 50
-
-#define TBL_ID_CTR_OVS_GLB_S64_SM_NODE 11
-#define TBL_ID_CTR_OVS_GLB_S64_SM_INST 51
-
-#define TBL_ID_RWLOCK_ROCE_SM_NODE 11
-#define TBL_ID_RWLOCK_ROCE_SM_INST 30
-
-#define TBL_ID_CQE_ADDR_SM_NODE 11
-#define TBL_ID_CQE_ADDR_SM_INST 31
-
-#else
-
-#define TBL_ID_GLOBAL_SM_NODE 10
-#define TBL_ID_GLOBAL_SM_INST 1
-
-#define TBL_ID_PORT_CFG_SM_NODE 10
-#define TBL_ID_PORT_CFG_SM_INST 2
-
-#define TBL_ID_VLAN_SM_NODE 10
-#define TBL_ID_VLAN_SM_INST 3
-
-#define TBL_ID_MULTICAST_SM_NODE 10
-#define TBL_ID_MULTICAST_SM_INST 4
-
-#define TBL_ID_MISC_RSS_HASH0_SM_NODE 10
-#define TBL_ID_MISC_RSS_HASH0_SM_INST 5
-
-#define TBL_ID_FIC_VOQ_MAP_SM_NODE 10
-#define TBL_ID_FIC_VOQ_MAP_SM_INST 6
-
-#define TBL_ID_CAR_SM_NODE 10
-#define TBL_ID_CAR_SM_INST 7
-
-#define TBL_ID_IPMAC_FILTER_SM_NODE 10
-#define TBL_ID_IPMAC_FILTER_SM_INST 8
-
-#define TBL_ID_GLOBAL_QUE_MAP_SM_NODE 10
-#define TBL_ID_GLOBAL_QUE_MAP_SM_INST 9
-
-#define TBL_ID_CTR_VSW_FUNC_MIB_SM_NODE 10
-#define TBL_ID_CTR_VSW_FUNC_MIB_SM_INST 10
-
-#define TBL_ID_UCODE_EXEC_INFO_SM_NODE 10
-#define TBL_ID_UCODE_EXEC_INFO_SM_INST 11
-
-#define TBL_ID_RQ_IQ_MAPPING_SM_NODE 10
-#define TBL_ID_RQ_IQ_MAPPING_SM_INST 12
-
-#define TBL_ID_MAC_SM_NODE 10
-#define TBL_ID_MAC_SM_INST 13
-
-#define TBL_ID_MAC_BHEAP_SM_NODE 10
-#define TBL_ID_MAC_BHEAP_SM_INST 14
-
-#define TBL_ID_MAC_MISC_SM_NODE 10
-#define TBL_ID_MAC_MISC_SM_INST 15
-
-#define TBL_ID_FUNC_CFG_SM_NODE 10
-#define TBL_ID_FUNC_CFG_SM_INST 16
-
-#define TBL_ID_TRUNK_FWD_SM_NODE 10
-#define TBL_ID_TRUNK_FWD_SM_INST 17
-
-#define TBL_ID_VLAN_FILTER_SM_NODE 10
-#define TBL_ID_VLAN_FILTER_SM_INST 18
-
-#define TBL_ID_ELB_SM_NODE 10
-#define TBL_ID_ELB_SM_INST 19
-
-#define TBL_ID_MISC_RSS_HASH1_SM_NODE 10
-#define TBL_ID_MISC_RSS_HASH1_SM_INST 20
-
-#define TBL_ID_RSS_CONTEXT_SM_NODE 10
-#define TBL_ID_RSS_CONTEXT_SM_INST 21
-
-#define TBL_ID_ETHERTYPE_FILTER_SM_NODE 10
-#define TBL_ID_ETHERTYPE_FILTER_SM_INST 22
-
-#define TBL_ID_VTEP_IP_SM_NODE 10
-#define TBL_ID_VTEP_IP_SM_INST 23
-
-#define TBL_ID_NAT_SM_NODE 10
-#define TBL_ID_NAT_SM_INST 24
-
-#define TBL_ID_BHEAP_LRO_AGING_SM_NODE 10
-#define TBL_ID_BHEAP_LRO_AGING_SM_INST 25
-
-#define TBL_ID_MISC_LRO_AGING_SM_NODE 10
-#define TBL_ID_MISC_LRO_AGING_SM_INST 26
-
-#define TBL_ID_BHEAP_CQE_AGING_SM_NODE 10
-#define TBL_ID_BHEAP_CQE_AGING_SM_INST 27
-
-#define TBL_ID_MISC_CQE_AGING_SM_NODE 10
-#define TBL_ID_MISC_CQE_AGING_SM_INST 28
-
-#define TBL_ID_DFX_LOG_POINTER_SM_NODE 10
-#define TBL_ID_DFX_LOG_POINTER_SM_INST 29
-
-#define TBL_ID_CTR_VSW_FUNC_S32_DROP_ERR_SM_NODE 10
-#define TBL_ID_CTR_VSW_FUNC_S32_DROP_ERR_SM_INST 40
-
-#define TBL_ID_CTR_VSW_FUNC_S32_DFX_SM_NODE 10
-#define TBL_ID_CTR_VSW_FUNC_S32_DFX_SM_INST 41
-
-#define TBL_ID_CTR_COMM_FUNC_S32_SM_NODE 10
-#define TBL_ID_CTR_COMM_FUNC_S32_SM_INST 42
-
-#define TBL_ID_CTR_SRIOV_FUNC_PAIR_SM_NODE 10
-#define TBL_ID_CTR_SRIOV_FUNC_PAIR_SM_INST 43
-
-#define TBL_ID_CTR_SRIOV_FUNC_S32_SM_NODE 10
-#define TBL_ID_CTR_SRIOV_FUNC_S32_SM_INST 44
-
-#define TBL_ID_CTR_OVS_FUNC_S64_SM_NODE 10
-#define TBL_ID_CTR_OVS_FUNC_S64_SM_INST 45
-
-#define TBL_ID_CTR_XOE_FUNC_PAIR_SM_NODE 10
-#define TBL_ID_CTR_XOE_FUNC_PAIR_SM_INST 46
-
-#define TBL_ID_CTR_XOE_FUNC_S32_SM_NODE 10
-#define TBL_ID_CTR_XOE_FUNC_S32_SM_INST 47
-
-#define TBL_ID_CTR_SYS_GLB_S32_SM_NODE 10
-#define TBL_ID_CTR_SYS_GLB_S32_SM_INST 48
-
-#define TBL_ID_CTR_VSW_GLB_S32_SM_NODE 10
-#define TBL_ID_CTR_VSW_GLB_S32_SM_INST 49
-
-#define TBL_ID_CTR_ROCE_GLB_S32_SM_NODE 10
-#define TBL_ID_CTR_ROCE_GLB_S32_SM_INST 50
-
-#define TBL_ID_CTR_COMM_GLB_S32_SM_NODE 10
-#define TBL_ID_CTR_COMM_GLB_S32_SM_INST 51
-
-#define TBL_ID_CTR_XOE_GLB_S32_SM_NODE 10
-#define TBL_ID_CTR_XOE_GLB_S32_SM_INST 52
-
-#define TBL_ID_CTR_OVS_GLB_S64_SM_NODE 10
-#define TBL_ID_CTR_OVS_GLB_S64_SM_INST 53
-
-#define TBL_ID_RWLOCK_ROCE_SM_NODE 10
-#define TBL_ID_RWLOCK_ROCE_SM_INST 30
-
-#define TBL_ID_CQE_ADDR_SM_NODE 10
-#define TBL_ID_CQE_ADDR_SM_INST 31
-
-#endif
-
-#define TBL_ID_MISC_RSS_HASH_SM_NODE TBL_ID_MISC_RSS_HASH0_SM_NODE
-#define TBL_ID_MISC_RSS_HASH_SM_INST TBL_ID_MISC_RSS_HASH0_SM_INST
-
-/*rx cqe checksum err*/
-#define NIC_RX_CSUM_IP_CSUM_ERR BIT(0)
-#define NIC_RX_CSUM_TCP_CSUM_ERR BIT(1)
-#define NIC_RX_CSUM_UDP_CSUM_ERR BIT(2)
-#define NIC_RX_CSUM_IGMP_CSUM_ERR BIT(3)
-#define NIC_RX_CSUM_ICMPV4_CSUM_ERR BIT(4)
-#define NIC_RX_CSUM_ICMPV6_CSUM_ERR BIT(5)
-#define NIC_RX_CSUM_SCTP_CRC_ERR BIT(6)
-#define NIC_RX_CSUM_HW_BYPASS_ERR BIT(7)
-
-typedef struct tag_log_ctrl {
-#if (__BYTE_ORDER__ == __BIG_ENDIAN__)
- u32 mod_name:4;
- u32 log_level:4;
- u32 rsvd:8;
- u32 line_num:16;
-#else
- u32 line_num:16;
- u32 rsvd:8;
- u32 log_level:4;
- u32 mod_name:4;
-#endif
-} log_ctrl;
-
-/**
- * 1. bank GPA address is HOST-based, every host has 4 bank GPA,
- * total size 4*32B
- * 2. Allocated space for storing
- * Two global entry are allocated for storing bank GPA,
- * which are index5 and index6. (Note index start value is 0)
- * The index5 top 32B store the bank GPA of host 0;
- * Remain 32B store the bank GPA of host 1.
- * The index6 top 32B store the bank GPA of host 2,
- * the remain 32B store the bank GPA of host 3.
- * Bank GPA corresponding to the each host is based on the following format)
- */
-typedef struct tag_sml_global_bank_gpa {
- u32 bank0_gpa_h32;
- u32 bank0_gpa_l32;
-
- u32 bank1_gpa_h32;
- u32 bank1_gpa_l32;
-
- u32 bank2_gpa_h32;
- u32 bank2_gpa_l32;
-
- u32 bank3_gpa_h32;
- u32 bank3_gpa_l32;
-} global_bank_gpa_s;
-
-/**
- * Struct name: sml_global_table_s
- * @brief: global_table structure
- * Description: global configuration table
- */
-typedef struct tag_sml_global_table {
- union {
- struct {
-#if (__BYTE_ORDER__ == __BIG_ENDIAN__)
- u32 port_mode:1; /*portmode:0-eth;1-fic */
- /* dualplaneenable:0-disable;1-enable */
- u32 dual_plane_en:1;
- /* fourrouteenable:0-disable;1-enable */
- u32 four_route_en:1;
- /* ficworkmode:0-fabric;1-fullmesh.*/
- u32 fic_work_mode:1;
- /* unicast/multicastmode:0-drop;
- * 1-broadcastinvlandomain
- */
- u32 un_mc_mode:1;
- /* maclearnenable:1-enable */
- u32 mac_learn_en:1;
- u32 qcn_en:1;
- u32 esl_run_flag:1;
- /* 1-special protocal pkt to up; 0-to x86 */
- u32 special_pro_to_up_flag:1;
- u32 vf_mask:4;
- u32 dif_ser_type:2;
- u32 rsvd0:1;
- u32 board_num:16; /*boardnumber */
-#else
- u32 board_num:16; /*boardnumber */
- u32 rsvd0:1;
- u32 dif_ser_type:2;
- u32 vf_mask:4;
- /*1-special protocal pkt to up; 0-to x86 */
- u32 special_pro_to_up_flag:1;
- u32 esl_run_flag:1;
- u32 qcn_en:1;
- u32 mac_learn_en:1; /*maclearnenable:1-enable */
- /*unicast/multicastmode:0-drop;1-broadcastinvlandomain*/
- u32 un_mc_mode:1;
- /* ficworkmode:0-fabric;1-fullmesh.*/
- u32 fic_work_mode:1;
- /*fourrouteenable:0-disable;1-enable */
- u32 four_route_en:1;
- /*dualplaneenable:0-disable;1-enable */
- u32 dual_plane_en:1;
- u32 port_mode:1; /*portmode:0-eth;1-fic */
-#endif
- } bs;
- u32 value;
- } dw0;
-
- union {
- struct {
-#if (__BYTE_ORDER__ == __BIG_ENDIAN__)
- u32 bc_offset:16; /*broadcastoffset */
- u32 mc_offset:16; /*multicastoffset */
-#else
- u32 mc_offset:16; /*multicastoffset */
- u32 bc_offset:16; /*broadcastoffset */
-#endif
- } bs;
- u32 value;
- } dw1;
-
- union {
- struct {
-#if (__BYTE_ORDER__ == __BIG_ENDIAN__)
- u32 net_src_type:8; /* eth-FWD_PORT, fic-FWD_FIC */
- u32 xrc_pl_dec:1;
- u32 sq_cqn:20;
- u32 qpc_stg:1;
- u32 qpc_state_err:1;
- u32 qpc_wb_flag:1;
-#else
- u32 qpc_wb_flag:1;
- u32 qpc_state_err:1;
- u32 qpc_stg:1;
- u32 sq_cqn:20;
- u32 xrc_pl_dec:1;
- u32 net_src_type:8; /* eth-FWD_PORT, fic-FWD_FIC */
-#endif
- } bs;
-
- u32 value;
- } dw2;
-
- union {
- struct {
-#if (__BYTE_ORDER__ == __BIG_ENDIAN__)
- u32 drop_cause_id:16;
- u32 pkt_len:16;
-#else
- u32 pkt_len:16;
- u32 drop_cause_id:16;
-#endif
- } bs;
-
- u32 value;
- } dw3;
-
- u8 fcoe_vf_table[12];
-
- union {
- struct {
- /* [31:30]Pipeline number mode. */
- u32 cfg_mode_pn:2;
- /* [29:28]initial default fq mode for traffic
- * from rx side
- */
- u32 cfg_mode_init_def_fq:2;
- /* [27:16]base fqid for initial default fqs
- * (for packest from rx side only).
- */
- u32 cfg_base_init_def_fq:12;
- /* [15:15]push doorbell as new packet to tile
- * via command path enable.
- */
- u32 cfg_psh_msg_en:1;
- /* [14:14]1,enable asc for scanning
- * active fq.0,disable.
- */
- u32 enable_asc:1;
- /* [13:13]1,enable pro for commands process.0,disable.*/
- u32 enable_pro:1;
- /* [12:12]1,ngsf mode.0,ethernet mode. */
- u32 cfg_ngsf_mod:1;
- /* [11:11]Stateful process enable. */
- u32 enable_stf:1;
- /* [10:9]initial default fq mode for
- * traffic from tx side.
- */
- u32 cfg_mode_init_def_fq_tx:2;
- /* [8:0]maximum allocable oeid configuration. */
- u32 cfg_max_oeid:9;
- } bs;
- u32 value;
- } fq_mode;
-
- u32 rsvd2[8];
-} sml_global_table_s;
-
-/**
- * Struct name: sml_fic_config_table_s
- * @brief: global_table structure
- * Description: global configuration table
- */
-typedef struct tag_sml_fic_config_table {
- union {
- struct {
-#if (__BYTE_ORDER__ == __BIG_ENDIAN__)
- /*dualplaneenable:0-disable;1-enable */
- u32 dual_plane_en:1;
- /*fourrouteenable:0-disable;1-enable */
- u32 four_route_en:1;
- /* ficworkmode:0-fabric;1-fullmesh.*/
- u32 fic_work_mode:1;
- u32 mac_learn_en:1; /*maclearnenable:1-enable */
- u32 rsvd:12;
- u32 board_num:16; /*boardnumber */
-#else
- u32 board_num:16; /*boardnumber */
- u32 rsvd:12;
- u32 mac_learn_en:1;
- /* ficworkmode:0-fabric;1-fullmesh.*/
- u32 fic_work_mode:1;
- /* fourrouteenable:0-disable;1-enable */
- u32 four_route_en:1;
- /* dualplaneenable:0-disable;1-enable */
- u32 dual_plane_en:1;
-#endif
- } bs;
- u32 value;
- } dw0;
-
- union {
- struct {
-#if (__BYTE_ORDER__ == __BIG_ENDIAN__)
- u32 bc_offset:16; /*broadcastoffset */
- u32 mc_offset:16; /*multicastoffset */
-#else
- u32 mc_offset:16; /*multicastoffset */
- u32 bc_offset:16; /*broadcastoffset */
-#endif
- } bs;
- u32 value;
- } dw1;
-
- u32 rsvd2[14];
-} sml_fic_config_table_s;
-
-/**
- * Struct name: sml_ucode_version_info_table_s
- * @brief: microcode version information structure
- * Description: global configuration table entry data structure of index 1
- */
-typedef struct tag_sml_ucode_version_info_table {
- u32 ucode_version[4];
- u32 ucode_compile_time[5];
- u32 rsvd[7];
-} sml_ucode_version_info_table_s;
-
-/**
- * Struct name: sml_funcfg_tbl_s
- * @brief: Function Configuration Table
- * Description: Function Configuration attribute table
- */
-typedef struct tag_sml_funcfg_tbl {
- union {
- struct {
-#if (__BYTE_ORDER__ == __BIG_ENDIAN__)
- /* function valid: 0-invalid; 1-valid */
- u32 valid:1;
- /* mac learn enable: 0-disable; 1-enable */
- u32 learn_en:1;
- /* lli enable: 0-disable; 1-enable */
- u32 lli_en:1;
- /* rss enable: 0-disable; 1-enable */
- u32 rss_en:1;
- /* rx vlan offload enable: 0-disable; 1-enable */
- u32 rxvlan_offload_en:1;
- /* tso local coalesce enable: 0-disable; 1-enable */
- u32 tso_local_coalesce:1;
- u32 rsvd1:1;
- u32 rsvd2:1;
- /* qos rx car enable: 0-disable; 1-enable */
- u32 qos_rx_car_en:1;
- /* mac filter enable: 0-disable; 1-enable */
- u32 mac_filter_en:1;
- /* ipmac filter enable: 0-disable; 1-enable */
- u32 ipmac_filter_en:1;
- /* ethtype filter enable: 0-disable; 1-enable */
- u32 ethtype_filter_en:1;
- /* mc bc limit enable: 0-disable; 1-enable */
- u32 mc_bc_limit_en:1;
- /* acl tx enable: 0-disable; 1-enable */
- u32 acl_tx_en:1;
- /* acl rx enable: 0-disable; 1-enable */
- u32 acl_rx_en:1;
- /* ovs function enable: 0-disable; 1-enable */
- u32 ovs_func_en:1;
- /* ucode capture enable: 0-disable; 1-enable */
- u32 ucapture_en:1;
- /* fic car enable: 0-disable; 1-enable */
- u32 fic_car_en:1;
- u32 tso_en:1;
- u32 nic_rx_mode:5; /* nic_rx_mode:
- * 0b00001: unicast mode
- * 0b00010: multicast mode
- * 0b00100: broadcast mode
- * 0b01000: all multicast mode
- * 0b10000: promisc mod
- */
- u32 rsvd4:3;
- u32 def_pri:3; /* default priority */
- /* host id: [0~3]. support up to 4 Host. */
- u32 host_id:2;
-#else
- u32 host_id:2;
- u32 def_pri:3;
- u32 rsvd4:3;
- u32 nic_rx_mode:5;
- u32 tso_en:1;
- u32 fic_car_en:1;
- /* ucode capture enable: 0-disable; 1-enable */
- u32 ucapture_en:1;
- u32 ovs_func_en:1;
- u32 acl_rx_en:1;
- u32 acl_tx_en:1;
- u32 mc_bc_limit_en:1;
- u32 ethtype_filter_en:1;
- u32 ipmac_filter_en:1;
- u32 mac_filter_en:1;
- u32 qos_rx_car_en:1;
- u32 rsvd2:1;
- u32 rsvd1:1;
- u32 tso_local_coalesce:1;
- u32 rxvlan_offload_en:1;
- u32 rss_en:1;
- u32 lli_en:1;
- u32 learn_en:1;
- u32 valid:1;
-#endif
- } bs;
-
- u32 value;
- } dw0;
-
- union {
- struct {
-#if (__BYTE_ORDER__ == __BIG_ENDIAN__)
- u32 mtu:16; /* mtu value: [64-15500] */
- u32 rsvd1:1;
- /* vlan mode: 0-all; 1-access; 2-trunk;
- * 3-hybrid(unsupport); 4-qinq port;
- */
- u32 vlan_mode:3;
- u32 vlan_id:12; /* vlan id: [0~4095] */
-#else
- u32 vlan_id:12;
- u32 vlan_mode:3;
- u32 rsvd1:1;
- u32 mtu:16;
-#endif
- } bs;
-
- u32 value;
- } dw1;
-
- union {
- struct {
-#if (__BYTE_ORDER__ == __BIG_ENDIAN__)
- u32 lli_mode:1; /* lli mode */
- /* er forward trunk type: 0-ethernet type, 1-fic type */
- u32 er_fwd_trunk_type:1;
- /* er forward trunk mode:
- * 0-standby; 1-smac; 2-dmac; 3-smacdmac; 4-sip; 5-dip;
- * 6-sipdip; 7-5tuples; 8-lacp
- */
- u32 er_fwd_trunk_mode:4;
- /* edge relay mode: 0-VEB; 1-VEPA(unsupport);
- * 2-Multi-Channel(unsupport)
- */
- u32 er_mode:2;
- /* edge relay id: [0~15]. support up to 16 er. */
- u32 er_id:4;
- /* er forward type: 2-port; 3-fic;
- * 4-trunk; other-unsupport
- */
- u32 er_fwd_type:4;
- /* er forward id:
- * fwd_type=2: forward ethernet port id
- * fwd_type=3: forward fic id(tb+tp)
- * fwd_type=4: forward trunk id
- */
- u32 er_fwd_id:16;
-#else
- u32 er_fwd_id:16;
- u32 er_fwd_type:4;
- u32 er_id:4;
- u32 er_mode:2;
- u32 er_fwd_trunk_mode:4;
- u32 er_fwd_trunk_type:1;
- u32 lli_mode:1;
-#endif
- } bs;
-
- u32 value;
- } dw2;
-
- union {
- struct {
-#if (__BYTE_ORDER__ == __BIG_ENDIAN__)
- u32 pfc_en:1;
- u32 rsvd1:7;
- u32 ovs_invld_tcp_action:1;
- u32 ovs_ip_frag_action:1;
- u32 rsvd2:2;
- u32 roce_en:1;
- u32 iwarp_en:1;
- u32 fcoe_en:1;
- u32 toe_en:1;
- u32 rsvd3:8;
- u32 ethtype_group_id:8;
-#else
- u32 ethtype_group_id:8;
- u32 rsvd3:8;
- u32 toe_en:1;
- u32 fcoe_en:1;
- u32 iwarp_en:1;
- u32 roce_en:1;
- u32 rsvd2:2;
- u32 ovs_ip_frag_action:1;
- u32 ovs_invld_tcp_action:1;
- u32 rsvd1:7;
- u32 pfc_en:1;
-#endif
- } bs;
-
- u32 value;
- } dw3;
-
- union {
- struct {
-#if (__BYTE_ORDER__ == __BIG_ENDIAN__)
- u32 rsvd1:8;
- u32 vni:24;
-#else
- u32 vni:24;
- u32 rsvd1:8;
-#endif
- } bs;
-
- u32 value;
- } dw4;
-
- union {
- struct {
-#if (__BYTE_ORDER__ == __BIG_ENDIAN__)
- u32 rsvd1;
-#else
- u32 rsvd1;
-#endif
- } bs;
-
- u32 value;
- } dw5;
-
- union {
- struct {
-#if (__BYTE_ORDER__ == __BIG_ENDIAN__)
- u32 rsvd1:8;
- u32 rq_thd:13;
- u32 host_car_id:11; /* host car id */
-#else
- u32 host_car_id:11;
- u32 rq_thd:13;
- u32 rsvd1:8;
-#endif
- } bs;
-
- u32 value;
- } dw6;
-
- union {
- struct {
-#if (__BYTE_ORDER__ == __BIG_ENDIAN__)
- u32 rsvd1:5;
- u32 fic_uc_car_id:11; /* fic unicast car id */
- u32 rsvd2:5;
- u32 fic_mc_car_id:11; /* fic multicast car id */
-#else
- u32 fic_mc_car_id:11;
- u32 rsvd2:5;
- u32 fic_uc_car_id:11;
- u32 rsvd1:5;
-#endif
- } fic_bs;
-
- u32 value;
- } dw7;
-
- union {
- struct {
-#if (__BYTE_ORDER__ == __BIG_ENDIAN__)
- /* safe group identifier valid: 0-invalid; 1-valid */
- u32 sg_id_valid:1;
- u32 sg_id:10; /* safe group identifier */
- u32 rsvd9:1;
- /* rq priority enable: 0-disable; 1-enable */
- u32 rq_pri_en:1;
- /* rq priority num: 0-1pri; 1-2pri; 2-4pri; 3-8pri */
- u32 rq_pri_num:3;
- /* one wqe buffer size, default is 2K bytes */
- u32 rx_wqe_buffer_size:16;
-#else
- u32 rx_wqe_buffer_size:16;
- u32 rq_pri_num:3;
- u32 rq_pri_en:1;
- u32 rsvd9:1;
- u32 sg_id:10;
- u32 sg_id_valid:1;
-#endif
- } bs;
-
- u32 value;
- } dw8;
-
- union {
- struct {
-#if (__BYTE_ORDER__ == __BIG_ENDIAN__)
- /* IPv4 LRO enable: 0-disable; 1-enable; */
- u32 lro_ipv4_en:1;
- /* IPv6 LRO enable: 0-disable; 1-enable; */
- u32 lro_ipv6_en:1;
- /* LRO pkt max wqe buffer number */
- u32 lro_max_wqe_num:6;
- /* Each group occupies 3bits,
- * 8 group share allocation 24bits,
- * group 0 corresponds to the low 3bits
- */
- u32 vlan_pri_map_group:24;
-#else
- u32 vlan_pri_map_group:24;
- u32 lro_max_wqe_num:6;
- u32 lro_ipv6_en:1;
- u32 lro_ipv4_en:1;
-#endif
- } bs;
-
- u32 value;
- } dw9;
-
- union {
- struct {
-#if (__BYTE_ORDER__ == __BIG_ENDIAN__)
- u32 rss_group_id:4;
- u32 lli_frame_size:12;
- u32 smac_h16:16;
-#else
- u32 smac_h16:16;
- u32 lli_frame_size:12;
- u32 rss_group_id:4;
-#endif
- } bs;
-
- u32 value;
- } dw10;
-
- u32 smac_l32;
-
- union {
- struct {
-#if (__BYTE_ORDER__ == __BIG_ENDIAN__)
- u32 oqid:16;
- u32 vf_map_pf_id:4;
- /*lro change; 0:changing 1:change done */
- u32 lro_change_flag:1;
- u32 rsvd11:1;
- u32 base_qid:10;
-#else
- u32 base_qid:10;
- u32 rsvd11:1;
- u32 lro_change_flag:1;
- u32 vf_map_pf_id:4;
- u32 oqid:16;
-#endif
- } bs;
-
- u32 value;
- } dw12;
-
- union {
- struct {
-#if (__BYTE_ORDER__ == __BIG_ENDIAN__)
- u32 rsvd1:2;
- u32 cfg_rq_depth:6;
- u32 cfg_q_num:6;
- u32 fc_port_id:4;
- u32 rsvd2:14;
-#else
- u32 rsvd2:14;
- u32 fc_port_id:4;
- u32 cfg_q_num:6;
- u32 cfg_rq_depth:6;
- u32 rsvd1:2;
-#endif
- } bs;
-
- u32 value;
- } dw13;
-
- union {
- struct {
-#if (__BYTE_ORDER__ == __BIG_ENDIAN__)
- u32 rsvd1;
-#else
- u32 rsvd1;
-#endif
- } bs;
-
- u32 value;
-
- } dw14;
-
- union {
- struct {
-#if (__BYTE_ORDER__ == __BIG_ENDIAN__)
- u32 rsvd3:2;
- u32 bond3_hash_policy:3;
- u32 bond3_mode:3;
- u32 rsvd2:2;
- u32 bond2_hash_policy:3;
- u32 bond2_mode:3;
- u32 rsvd1:2;
- u32 bond1_hash_policy:3;
- u32 bond1_mode:3;
- u32 rsvd0:2;
- u32 bond0_hash_policy:3;
- u32 bond0_mode:3;
-#else
- u32 bond0_mode:3;
- u32 bond0_hash_policy:3;
- u32 rsvd0:2;
- u32 bond1_mode:3;
- u32 bond1_hash_policy:3;
- u32 rsvd1:2;
- u32 bond2_mode:3;
- u32 bond2_hash_policy:3;
- u32 rsvd2:2;
- u32 bond3_mode:3;
- u32 bond3_hash_policy:3;
- u32 rsvd3:2;
-#endif
- } bs;
-
- u32 value;
-
- } dw15;
-} sml_funcfg_tbl_s;
-
-/**
- * Struct name: sml_portcfg_tbl_s
- * @brief: Port Configuration Table
- * Description: Port Configuration attribute table
- */
-typedef struct tag_sml_portcfg_tbl {
- union {
- struct {
-#if (__BYTE_ORDER__ == __BIG_ENDIAN__)
- u32 valid:1; /* valid:0-invalid; 1-valid */
- /* mac learn enable: 0-disable; 1-enable */
- u32 learn_en:1;
- u32 trunk_en:1; /* trunk enable: 0-disable; 1-enable */
- /* broadcast suppression enable: 0-disable; 1-enable */
- u32 bc_sups_en:1;
- /* unknown multicast suppression enable:
- * 0-disable; 1-enable
- */
- u32 un_mc_sups_en:1;
- /* unknown unicast suppression enable:
- * 0-disable; 1-enable
- */
- u32 un_uc_sups_en:1;
- u32 ovs_mirror_tx_en:1;
- /* ovs port enable: 0-disable; 1-enable */
- u32 ovs_port_en:1;
- u32 ovs_mirror_rx_en:1;
- u32 qcn_en:1; /* qcn enable: 0-disable; 1-enable */
- /* ucode capture enable: 0-disable; 1-enable */
- u32 ucapture_en:1;
- u32 ovs_invld_tcp_action:1;
- u32 ovs_ip_frag_action:1;
- u32 def_pri:3; /* default priority */
- u32 rsvd3:2;
- /* edge relay mode: 0-VEB; 1-VEPA(unsupport);
- * 2-Multi-Channel(unsupport)
- */
- u32 er_mode:2;
- /* edge relay identifier: [0~15]. support up to 16 er */
- u32 er_id:4;
- u32 trunk_id:8; /* trunk identifier: [0~255] */
-#else
- u32 trunk_id:8;
- u32 er_id:4;
- u32 er_mode:2;
- u32 rsvd3:2;
- u32 def_pri:3;
- u32 ovs_ip_frag_action:1;
- u32 ovs_invld_tcp_action:1;
- u32 ucapture_en:1;
- u32 qcn_en:1;
- u32 ovs_mirror_rx_en:1;
- u32 ovs_port_en:1;
- u32 ovs_mirror_tx_en:1;
- u32 un_uc_sups_en:1;
- u32 un_mc_sups_en:1;
- u32 bc_sups_en:1;
- u32 trunk_en:1;
- u32 learn_en:1;
- u32 valid:1;
-#endif
- } bs;
- u32 value;
- } dw0;
-
- union {
- struct {
-#if (__BYTE_ORDER__ == __BIG_ENDIAN__)
- u32 rsvd2:2;
- u32 mtu:14;
- u32 rsvd3:1;
- u32 vlan_mode:3;
- u32 vlan_id:12;
-#else
- u32 vlan_id:12;
- u32 vlan_mode:3;
- u32 rsvd3:1;
- u32 mtu:14;
- u32 rsvd2:2;
-#endif
- } bs;
- u32 value;
- } dw1;
-
- union {
- struct {
-#if (__BYTE_ORDER__ == __BIG_ENDIAN__)
- /* q7_cos : ... : q0_cos = 4bits : ... : 4bits */
- u32 ovs_queue_cos;
-#else
- u32 ovs_queue_cos;
-#endif
- } bs;
- u32 value;
- } dw2;
-
- union {
- struct {
-#if (__BYTE_ORDER__ == __BIG_ENDIAN__)
- u32 rsvd1:10;
- u32 un_mc_car_id:11;
- u32 un_uc_car_id:11;
-#else
- u32 un_uc_car_id:11;
- u32 un_mc_car_id:11;
- u32 rsvd1:10;
-#endif
- } bs;
- u32 value;
- } dw3;
-
- union {
- struct {
-#if (__BYTE_ORDER__ == __BIG_ENDIAN__)
- u32 rsvd6:5;
- u32 bc_car_id:11;
- u32 pf_promiscuous_bitmap:16;
-#else
- u32 pf_promiscuous_bitmap:16;
- u32 bc_car_id:11;
- u32 rsvd6:5;
-#endif
- } bs;
- u32 value;
- } dw4;
-
- union {
- struct {
- u32 fc_map;
-
- } fcoe_bs;
- struct {
-#if (__BYTE_ORDER__ == __BIG_ENDIAN__)
- u32 start_queue:8;
- u32 queue_size:8;
- u32 mirror_func_id:16;
-#else
- u32 mirror_func_id:16;
- u32 queue_size:8;
- u32 start_queue:8;
-#endif
- } ovs_mirror_bs;
- u32 value;
- } dw5;
-
- union {
- struct {
-#if (__BYTE_ORDER__ == __BIG_ENDIAN__)
- u16 vlan;
- u16 dmac_h16;
-#else
- u16 dmac_h16;
- u16 vlan;
-#endif
- } fcoe_bs;
- u32 value;
- } dw6;
-
- union {
- struct {
- u32 dmac_l32;
-
- } fcoe_bs;
- u32 value;
- } dw7;
-
-} sml_portcfg_tbl_s;
-
-/**
- * Struct name: sml_taggedlist_tbl_s
- * @brief: Tagged List Table
- * Description: VLAN filtering Trunk/Hybrid type tagged list table
- */
-typedef struct tag_sml_taggedlist_tbl {
- u32 bitmap[TBL_ID_TAGGEDLIST_BITMAP32_NUM];
-} sml_taggedlist_tbl_s;
-
-/**
- * Struct name: sml_untaggedlist_tbl_s
- * @brief: Untagged List Table
- * Description: VLAN filtering Hybrid type Untagged list table
- */
-typedef struct tag_sml_untaggedlist_tbl {
- u32 bitmap[TBL_ID_UNTAGGEDLIST_BITMAP32_NUM];
-} sml_untaggedlist_tbl_s;
-
-/**
- * Struct name: sml_trunkfwd_tbl_s
- * @brief: Trunk Forward Table
- * Description: port aggregation Eth-Trunk forwarding table
- */
-typedef struct tag_sml_trunkfwd_tbl {
- u16 fwd_id[TBL_ID_TRUNKFWD_ENTRY_ELEM_NUM]; /* dw0-dw15 */
-} sml_trunkfwd_tbl_s;
-
-/**
- * Struct name: sml_mac_tbl_head_u
- * @brief: Mac table request/response head
- * Description: MAC table, Hash API header
- */
-typedef union tag_sml_mac_tbl_head {
- struct {
-#if (__BYTE_ORDER__ == __BIG_ENDIAN__)
- u32 src:5;
- u32 instance_id:6;
- u32 opid:5;
- u32 A:1;
- u32 S:1;
- u32 rsvd:14;
-#elif (__BYTE_ORDER__ == __LITTLE_ENDIAN__)
- u32 rsvd:14;
- u32 S:1;
- u32 A:1;
- u32 opid:5;
- u32 instance_id:6;
- u32 src:5;
-#endif
- } req_bs;
-
- struct {
-#if (__BYTE_ORDER__ == __BIG_ENDIAN__)
- u32 code:2;
- u32 subcode:2;
- u32 node_index:28;
-#elif (__BYTE_ORDER__ == __LITTLE_ENDIAN__)
- u32 node_index:28;
- u32 subcode:2;
- u32 code:2;
-#endif
- } rsp_bs;
-
- u32 value;
-} sml_mac_tbl_head_u;
-
-/**
- * Struct name: sml_mac_tbl_8_4_key_u
- * @brief: Mac Table Key
- * Description: MAC table key
- */
-typedef union tag_sml_mac_tbl_8_4_key {
- struct {
- u32 val0;
- u32 val1;
- } value;
-
- struct {
-#if (__BYTE_ORDER__ == __BIG_ENDIAN__)
- u32 er_id:4;
- u32 vlan_id:12;
- u32 mac_h16:16;
-
- u32 mac_m16:16;
- u32 mac_l16:16;
-#elif (__BYTE_ORDER__ == __LITTLE_ENDIAN__)
- u32 mac_h16:16;
- u32 vlan_id:12;
- u32 er_id:4;
-
- u32 mac_l16:16;
- u32 mac_m16:16;
-#endif
- } bs;
-
- struct {
-#if (__BYTE_ORDER__ == __BIG_ENDIAN__)
- u32 er_id:4;
- u32 vlan_id:12;
- u32 mac0:8;
- u32 mac1:8;
-
- u32 mac2:8;
- u32 mac3:8;
- u32 mac4:8;
- u32 mac5:8;
-#elif (__BYTE_ORDER__ == __LITTLE_ENDIAN__)
- u32 mac1:8;
- u32 mac0:8;
- u32 vlan_id:12;
- u32 er_id:4;
-
- u32 mac5:8;
- u32 mac4:8;
- u32 mac3:8;
- u32 mac2:8;
-#endif
- } mac_bs;
-} sml_mac_tbl_8_4_key_u;
-
-/**
- * Struct name: sml_mac_tbl_8_4_item_u
- * @brief: Mac Table Item
- * Description: xxxxxxxxxxxxxxx
- */
-typedef union tag_sml_mac_tbl_8_4_item {
- u32 value;
-
- struct {
-#if (__BYTE_ORDER__ == __BIG_ENDIAN__)
- u32 rsvd:10;
- u32 host_id:2;
- u32 fwd_type:4;
- u32 fwd_id:16;
-#elif (__BYTE_ORDER__ == __LITTLE_ENDIAN__)
- u32 fwd_id:16;
- u32 fwd_type:4;
- u32 host_id:2;
- u32 rsvd:10;
-#endif
- } bs;
-} sml_mac_tbl_8_4_item_u;
-
-/**
- * Struct name: sml_mac_tbl_key_item_s
- * @brief: Mac Table( 8 + 4 )
- * Description: MAC table Key + Item
- */
-typedef struct tag_sml_mac_tbl_8_4 {
- sml_mac_tbl_head_u head;
- sml_mac_tbl_8_4_key_u key;
- sml_mac_tbl_8_4_item_u item;
-} sml_mac_tbl_8_4_s;
-
-/**
- * Struct name: sml_vtep_tbl_8_20_key_s
- * @brief: Vtep Table Key
- * Description: xxxxxxxxxxxxxxx
- */
-typedef struct tag_sml_vtep_tbl_8_20_key {
- u32 vtep_remote_ip;
- u32 rsvd;
-} sml_vtep_tbl_8_20_key_s;
-
-/**
- * Struct name: dmac_smac_u
- * @brief: Dmac & Smac for VxLAN encapsulation
- * Description: xxxxxxxxxxxxxxx
- */
-typedef union tag_dmac_smac {
- u16 mac_addr[6];
- struct {
-#if (__BYTE_ORDER__ == __BIG_ENDIAN__)
- u16 d_mac0:8;
- u16 d_mac1:8;
- u16 d_mac2:8;
- u16 d_mac3:8;
-
- u16 d_mac4:8;
- u16 d_mac5:8;
- u16 s_mac0:8;
- u16 s_mac1:8;
-
- u16 s_mac2:8;
- u16 s_mac3:8;
- u16 s_mac4:8;
- u16 s_mac5:8;
-#elif (__BYTE_ORDER__ == __LITTLE_ENDIAN__)
- u16 d_mac1:8;
- u16 d_mac0:8;
- u16 d_mac3:8;
- u16 d_mac2:8;
-
- u16 d_mac5:8;
- u16 d_mac4:8;
- u16 s_mac1:8;
- u16 s_mac0:8;
-
- u16 s_mac3:8;
- u16 s_mac2:8;
- u16 s_mac5:8;
- u16 s_mac4:8;
-#endif
- } bs;
-} dmac_smac_u;
-
-/**
- * Struct name: sml_vtep_tbl_8_20_item_u
- * @brief: Vtep Table Item
- * Description: xxxxxxxxxxxxxxx
- */
-typedef struct tag_sml_vtep_tbl_8_20_item {
- dmac_smac_u dmac_smac;
- u32 source_ip;
-
- union {
- struct {
-#if (__BYTE_ORDER__ == __BIG_ENDIAN__)
- u32 er_id:4;
- u32 rsvd:12;
- u32 vlan:16; /* The PRI*/
-#else
- u32 vlan:16; /* The PRI*/
- u32 rsvd:12;
- u32 er_id:4;
-#endif
- } bs;
-
- u32 value;
- } misc;
-} sml_vtep_tbl_8_20_item_s;
-
-/**
- * Struct name: sml_vtep_tbl_8_20_s
- * @brief: Vtep Table( 8 + 20)
- * Description: xxxxxxxxxxxxxxx
- */
-typedef struct tag_sml_vtep_tbl_8_20 {
- sml_mac_tbl_head_u head; /*first 4 bytes , the same as mac tbl */
- sml_vtep_tbl_8_20_key_s key;
- sml_vtep_tbl_8_20_item_s item;
-} sml_vtep_tbl_8_20_s;
-
-/**
- * Struct name: sml_vtep_tbl_8_20_key_s
- * @brief: Vtep Table Key
- * Description: xxxxxxxxxxxxxxx
- */
-typedef struct tag_sml_vxlan_udp_portcfg_4_8_key {
- u32 udp_dest_port;
- u32 rsvd;
-} sml_vxlan_udp_portcfg_4_8_key_s;
-
-/**
- * Struct name: sml_vtep_tbl_8_20_item_u
- * @brief: Vtep Table Item
- * Description: xxxxxxxxxxxxxxx
- */
-typedef struct tag_sml_vxlan_udp_portcfg_4_8_item {
- union {
- struct {
-#if (__BYTE_ORDER__ == __BIG_ENDIAN__)
- u32 odp_port:12;
- u32 dp_id:2;
- u32 resvd:20;
-#else
- u32 resvd:20;
- u32 dp_id:2;
- u32 odp_port:12;
-#endif
- } bs;
-
- u32 value;
- } dw0;
-} sml_vxlan_udp_portcfg_4_8_item_s;
-
-/**
- * Struct name: sml_vxlan_udp_portcfg_4_8_s
- * @brief: Vxlan Dest Udp Port Table( 8 + 20)
- * Description: xxxxxxxxxxxxxxx
- */
-typedef struct tag_sml_vxlan_udp_portcfg_4_8 {
- sml_mac_tbl_head_u head; /*first 4 bytes , the same as mac tbl */
- sml_vxlan_udp_portcfg_4_8_key_s key;
- sml_vxlan_udp_portcfg_4_8_item_s item;
-} sml_vxlan_udp_portcfg_4_8_s;
-
-/**
- * Struct name: sml_vtep_er_info_s
- * @brief: Vtep Er Info Table
- * Description: xxxxxxxxxxxxxxx
- */
-typedef struct tag_sml_vtep_er_info {
- union {
- struct {
-#if (__BYTE_ORDER__ == __BIG_ENDIAN__)
- u32 lli_mode:1;
- /* ER bound to the outbound port is Eth-Trunk,
- * type (FIC/Port)
- */
- u32 er_fwd_trunk_type:1;
- /* ER bound to the outbound port is Eth-Trunk,
- * port aggregation mode (Standby/LoadBalance/LACP)
- */
- u32 er_fwd_trunk_mode:4;
- u32 er_mode:2; /* ER mode (VEB/VEPA)*/
- /* er_id as LT index but also used as entries,
- * facilitating service
- */
- u32 er_id:4;
- /* Type of the ER bound to the outbound port
- * (Port/FIC/Eth-Trunk)
- */
- u32 er_fwd_type:4;
- /* ER bound egress ID(PortID/FICID/TrunkID)*/
- u32 er_fwd_id:16;
-#else
- u32 er_fwd_id:16;
- u32 er_fwd_type:4;
- u32 er_id:4;
- u32 er_mode:2;
- u32 er_fwd_trunk_mode:4;
- u32 er_fwd_trunk_type:1;
- u32 lli_mode:1;
-#endif
- } bs;
-
- u32 value;
- } dw0;
-} sml_vtep_er_info_s;
-
-/**
- * Struct name: sml_logic_port_cfg_tbl_s
- * @brief: Logic Port Cfg Table
- * Description: xxxxxxxxxxxxxxx
- */
-typedef struct tag_sm_logic_port_cfg {
- union {
- struct {
-#if (__BYTE_ORDER__ == __BIG_ENDIAN__)
- /* Input switch port (or DP_MAX_PORTS). */
- u32 odp_port:12;
- u32 dp_id:2; /* datapath id */
- u32 er_id:4;
- /* logic port MAC Learning enable or disable */
- u32 learn_en:1;
- u32 resvd:13;
-#else
- u32 resvd:13;
- /* logic port MAC Learning enable or disable */
- u32 learn_en:1;
- u32 er_id:4;
- u32 dp_id:2; /* datapath id */
- /* Input switch port (or DP_MAX_PORTS). */
- u32 odp_port:12;
-#endif
- } bs;
-
- u32 value;
- } dw0;
-
- union {
- struct {
-#if (__BYTE_ORDER__ == __BIG_ENDIAN__)
- u32 rsvd4:1;
- u32 er_fwd_trunk_type:1;
- u32 er_fwd_trunk_mode:4;
- u32 er_mode:2;
- u32 er_id:4;
- u32 er_fwd_type:4;
- u32 er_fwd_id:16;
-#else
- u32 er_fwd_id:16;
- u32 er_fwd_type:4;
- u32 er_id:4;
- u32 er_mode:2;
- u32 er_fwd_trunk_mode:4;
- u32 er_fwd_trunk_type:1;
- u32 rsvd4:1;
-#endif
- } bs;
-
- u32 value;
- } dw1;
-} sml_logic_port_cfg_tbl_s;
-
-/* vport stats counter */
-typedef struct tag_vport_stats_ctr {
- u16 rx_packets; /* total packets received */
- u16 tx_packets; /* total packets transmitted */
- u16 rx_bytes; /* total bytes received */
- u16 tx_bytes; /* total bytes transmitted */
- u16 rx_errors; /* bad packets received */
- u16 tx_errors; /* packet transmit problems */
- u16 rx_dropped; /* no space in linux buffers */
- u16 tx_dropped; /* no space available in linux */
-} vport_stats_ctr_s;
-
-/**
- * Struct name: vport_s
- * @brief: Datapath Cfg Table
- * Description: xxxxxxxxxxxxxxx
- */
-typedef struct tag_vport {
-#if (__BYTE_ORDER__ == __BIG_ENDIAN__)
- /* dw0 */
- u32 valid:1;
- u32 learn_en:1;
- u32 type:4;
- u32 dp_id:2;
- /* The type of Vport mapping port, 0:VF, 1:Logic Port */
- u32 mapping_type:4;
- u32 mapping_port:12; /* odp_port mapping on VF or ER Logic Port */
- u32 rsvd:8;
-
- /* dw1 */
- u32 srctagl:12; /* the function used by parent context */
- /* parent context XID used to upcall missed packet to ovs-vswitchd */
- u32 xid:20;
-
- /* dw2 */
- u32 odp_port:12; /* on datapath port id */
- /* parent context CID used to upcall missed packet to ovs-vswitchd */
- u32 cid:20;
-#else
- /* dw0 */
- u32 rsvd:8;
- u32 mapping_port:12; /* odp_port mapping on VF or ER Logic Port */
- /* The type of Vport mapping port, 0:VF, 1:Logic Port */
- u32 mapping_type:4;
- u32 dp_id:2;
- u32 type:4;
- u32 learn_en:1;
- u32 valid:1;
-
- /* dw1 */
- /* parent context XID used to upcall missed packet to ovs-vswitchd */
- u32 xid:20;
- u32 srctagl:12; /* the function used by parent context */
-
- /* dw2 */
- /* parent context CID used to upcall missed packet to ovs-vswitchd */
- u32 cid:20;
- u32 odp_port:12; /* on datapath port id */
-#endif
-
- /* dw3 is er information and it is valid only
- * when mapping_type=1(logic port)
- */
- union {
- struct {
-#if (__BYTE_ORDER__ == __BIG_ENDIAN__)
- u32 lli_mode:1;
- /* ER bound to the outbound port is Eth-Trunk,
- * type (FIC/Port)
- */
- u32 er_fwd_trunk_type:1;
- /* ER bound to the outbound port is Eth-Trunk,
- * port aggregation mode (Standby/LoadBalance/LACP)
- */
- u32 er_fwd_trunk_mode:4;
- u32 er_mode:2; /* ER mode (VEB/VEPA)*/
- u32 er_id:4; /* ERID */
- /* Type of the ER bound to the outbound port
- * (Port/FIC/Eth-Trunk)
- */
- u32 er_fwd_type:4;
- /*ER bound egress ID(PortID/FICID/TrunkID)*/
- u32 er_fwd_id:16;
-#else
- u32 er_fwd_id:16;
- u32 er_fwd_type:4;
- u32 er_id:4;
- u32 er_mode:2;
- u32 er_fwd_trunk_mode:4;
- /* ER bound to the outbound port is Eth-Trunk,
- * type (FIC/Port)
- */
- u32 er_fwd_trunk_type:1;
- u32 lli_mode:1;
-#endif
- } bs;
- u32 value;
- } dw3;
-
- /* dw4~dw7 */
- vport_stats_ctr_s stats; /* vport stats counters */
-
-} vport_s;
-
-/**
- * Struct name: sml_elb_tbl_elem_u
- * @brief: ELB Table Elem
- * Description: ELB leaf table members
- */
-typedef union tag_sml_elb_tbl_elem {
- struct {
- u32 fwd_val;
- u32 next_val;
- } value;
-
- struct {
-#if (__BYTE_ORDER__ == __BIG_ENDIAN__)
- u32 rsvd0:12;
- u32 fwd_type:4;
- u32 fwd_id:16;
-
- u32 rsvd1:17;
- u32 elb_index_next:15;
-#elif (__BYTE_ORDER__ == __LITTLE_ENDIAN__)
- u32 fwd_id:16;
- u32 fwd_type:4;
- u32 rsvd0:12;
-
- u32 elb_index_next:15;
- u32 rsvd1:17;
-#endif
- } bs;
-} sml_elb_tbl_elem_u;
-
-/**
- * Struct name: sml_elb_tbl_s
- * @brief ELB Table
- * Description: ELB leaf table Entry
- */
-typedef struct tag_sml_elb_tbl {
- sml_elb_tbl_elem_u elem[TBL_ID_ELB_ENTRY_ELEM_NUM];
-} sml_elb_tbl_s;
-
-/**
- * Struct name: sml_vlan_tbl_elem_u
- * @brief: VLAN Table Elem
- * Description: VLAN broadcast table members
- */
-typedef union tag_sml_vlan_tbl_elem {
- u16 value;
-
- struct {
-#if (__BYTE_ORDER__ == __BIG_ENDIAN__)
- u16 learn_en:1;
- u16 elb_index:15;
-#elif (__BYTE_ORDER__ == __LITTLE_ENDIAN__)
- u16 elb_index:15;
- u16 learn_en:1;
-#endif
- } bs;
-} sml_vlan_tbl_elem_u;
-
-/**
- * Struct name: sml_vlan_tbl_s
- * @brief: VLAN Table
- * Entry Description: VLAN broadcast table
- */
-typedef struct tag_sml_vlan_tbl {
- sml_vlan_tbl_elem_u elem[TBL_ID_VLAN_ENTRY_ELEM_NUM];
-} sml_vlan_tbl_s;
-
-/**
- * Struct name: sml_multicast_tbl_array_u
- * @brief: Multicast Table Elem
- * Description: multicast table members
- */
-typedef union tag_sml_multicast_tbl_elem {
- struct {
- u32 route_val;
- u32 next_val;
- } value;
-
- struct {
-#if (__BYTE_ORDER__ == __BIG_ENDIAN__)
- u32 rsvd0:12;
- u32 route_fwd_type:4;
- u32 route_fwd_id:16;
-
- u32 rsvd1:17;
- u32 elb_index:15;
-#elif (__BYTE_ORDER__ == __LITTLE_ENDIAN__)
- u32 route_fwd_id:16;
- u32 route_fwd_type:4;
- u32 rsvd0:12;
-
- u32 elb_index:15;
- u32 rsvd1:17;
-#endif
- } bs;
-} sml_multicast_tbl_elem_u;
-
-/* Struct name: sml_multicast_tbl_s
- * @brief: Multicast Table
- * Entry Description: multicast table
- */
-typedef struct tag_sml_multicast_tbl {
- sml_multicast_tbl_elem_u elem[TBL_ID_MULTICAST_ENTRY_ELEM_NUM];
-} sml_multicast_tbl_s;
-
-/* Struct name: sml_observe_port_s
- * @brief: Observe Port Table
- * Description: observing port entries defined
- */
-typedef struct tag_sml_observe_port {
- union {
- struct {
-#if (__BYTE_ORDER__ == __BIG_ENDIAN__)
- u32 valid:1;
- u32 rsvd0:11;
- u32 dst_type:4;
- u32 dst_id:16;
-#else
- u32 dst_id:16;
- u32 dst_type:4;
- u32 rsvd0:11;
- u32 valid:1;
-#endif
- } bs;
- u32 value;
- } dw0;
-
- union {
- struct {
-#if (__BYTE_ORDER__ == __BIG_ENDIAN__)
- u32 rsvd1:4;
- u32 vlan_id:12;
- u32 rsvd2:2;
- u32 cut_len:14;
-#else
- u32 cut_len:14;
- u32 rsvd2:2;
- u32 vlan_id:12;
- u32 rsvd1:4;
-#endif
- } bs;
- u32 value;
- } dw1;
-
- u32 rsvd_pad[2];
-} sml_observe_port_s;
-
-/* Struct name: sml_ipmac_tbl_16_12_key_s
- * @brief ipmac filter table key
- * Description: ipmac filter key define
- */
-typedef struct tag_sml_ipmac_tbl_16_12_key {
-#if (__BYTE_ORDER__ == __BIG_ENDIAN__)
- u32 func_id:16;
- u32 mac_h16:16;
-#else
- u32 mac_h16:16;
- u32 func_id:16;
-#endif
-
-#if (__BYTE_ORDER__ == __BIG_ENDIAN__)
- u32 mac_m16:16;
- u32 mac_l16:16;
-#else
- u32 mac_l16:16;
- u32 mac_m16:16;
-#endif
-
- u32 ip;
- u32 rsvd;
-} sml_ipmac_tbl_16_12_key_s;
-
-/* Struct name: sml_ipmac_tbl_16_12_item_s
- * @brief ipmac filter table item
- * Description: ipmac filter item define
- */
-typedef struct tag_sml_ipmac_tbl_16_12_item {
- u32 rsvd[3];
-} sml_ipmac_tbl_16_12_item_s;
-
-/* Struct name: sml_ethtype_tbl_8_4_key_s
- * @brief: ethtype filter table key
- * Description: ethtype filter key define
- */
-typedef struct tag_sml_ethtype_tbl_8_4_key {
-#if (__BYTE_ORDER__ == __BIG_ENDIAN__)
- u32 group_id:16;
- u32 ethtype:16;
-#else
- u32 ethtype:16;
- u32 group_id:16;
-#endif
-
- u32 rsvd;
-} sml_ethtype_tbl_8_4_key_s;
-
-/* Struct name: sml_ethtype_tbl_8_4_item_s
- * @brief ethtype filter table item
- * Description: ethtype filter item define
- */
-typedef struct tag_sml_ethtype_tbl_8_4_item {
- u32 rsvd;
-} sml_ethtype_tbl_8_4_item_s;
-
-/* ACL to dfx record packets*/
-typedef enum {
- ACL_PKT_TX = 0,
- ACL_PKT_RX = 1,
-} sml_acl_pkt_dir_e;
-
-/* ACL policy table item*/
-typedef struct tag_sml_acl_policy_tbl {
- union {
- struct {
-#if (__BYTE_ORDER__ == __BIG_ENDIAN__)
- u32 drop:1;
- u32 car_en:1;
- u32 car_id:12;
- u32 counter_type:2;
- u32 counter_id:16;
-#else
- u32 counter_id:16;
- u32 counter_type:2;
- u32 car_id:12;
- u32 car_en:1;
- u32 drop:1;
-#endif
- } bs;
-
- u32 value;
- } dw0;
-
- union {
- struct {
-#if (__BYTE_ORDER__ == __BIG_ENDIAN__)
- u32 rsvd1:7;
- u32 mirrior_en:1;
- u32 observer_port:10;
- u32 change_dscp:1;
- u32 new_dscp:6;
- u32 change_pkt_pri:1;
- u32 new_pkt_pri:3;
- u32 redirect_en:3;
-#else
- u32 redirect_en:3;
- u32 new_pkt_pri:3;
- u32 change_pkt_pri:1;
- u32 new_dscp:6;
- u32 change_dscp:1;
- u32 observer_port:10;
- u32 mirrior_en:1;
- u32 rsvd1:7;
-#endif
- } bs;
-
- u32 value;
- } dw1;
-
- u32 redirect_data;
- u32 rsvd2;
-} sml_acl_policy_tbl_s;
-
-typedef struct tag_sml_acl_ipv4_key {
- union {
- struct {
-#if (__BYTE_ORDER__ == __BIG_ENDIAN__)
- /* The alignment, match_key_type and
- * later field is a KEY value
- */
- u32 padding:16;
- u32 tid0:2;
- u32 match_key_type:3; /* Matching type*/
- u32 rsvd:11; /* Reserved field*/
-#else
- u32 rsvd:11;
- u32 match_key_type:3;
- u32 tid0:2;
- u32 padding:16;
-#endif
- } bs;
- u32 value;
- } dw0;
-
- /* dw1&dw2 */
- u32 sipv4;
- u32 dipv4;
-
- union {
- struct {
-#if (__BYTE_ORDER__ == __BIG_ENDIAN__)
- u32 l4_sport:16;
- u32 l4_dport:16;
-#else
- u32 l4_dport:16;
- u32 l4_sport:16;
-#endif
- } bs;
- u32 value;
- } dw3;
-
- union {
- struct {
-#if (__BYTE_ORDER__ == __BIG_ENDIAN__)
- u32 l4_protocol:8;
- u32 rsvd0:8;
- u32 seg_id:10;
- u32 rsvd1:6;
-#else
- u32 rsvd1:6;
- u32 seg_id:10;
- u32 rsvd0:8;
- u32 l4_protocol:8;
-#endif
- } bs;
- u32 value;
- } dw4;
-
- union {
- struct {
-#if (__BYTE_ORDER__ == __BIG_ENDIAN__)
- u32 tid1:2;
- u32 rsvd:14;
- u32 padding:16;
-#else
- u32 padding:16;
- u32 rsvd:14;
- u32 tid1:2;
-#endif
- } bs;
- u32 value;
- } dw5;
-} sml_acl_ipv4_key_s;
-
-typedef struct tag_sml_acl_ipv6_key {
- union {
- struct {
-#if (__BYTE_ORDER__ == __BIG_ENDIAN__)
- /* The alignment, match_key_type and
- * later field is a KEY value
- */
- u32 padding:16;
- u32 tid0:2;
- u32 match_key_type:3; /* Matching type*/
- u32 rsvd:11; /* Reserved field*/
-#else
- u32 rsvd:11;
- u32 match_key_type:3;
- u32 tid0:2;
- u32 padding:16;
-#endif
- } bs;
- u32 value;
- } dw0;
-
- /*dw1~dw4 */
- u32 sipv6[4];
-
- union {
- struct {
-#if (__BYTE_ORDER__ == __BIG_ENDIAN__)
- u32 tid1:2;
- u32 rsvd1:14;
- u32 tid2:2;
- u32 rsvd2:14;
-#else
- u32 rsvd2:14;
- u32 tid2:2;
- u32 rsvd1:14;
- u32 tid1:2;
-#endif
- } bs;
- u32 value;
- } dw5;
-
- /*dw6~dw9 */
- u32 dipv6[4];
-
- union {
- struct {
-#if (__BYTE_ORDER__ == __BIG_ENDIAN__)
- u32 tid3:2;
- u32 rsvd3:14;
- u32 tid4:2;
- u32 rsvd4:14;
-#else
- u32 rsvd4:14;
- u32 tid4:2;
- u32 rsvd3:14;
- u32 tid3:2;
-#endif
- } bs;
- u32 value;
- } dw10;
-
- union {
- struct {
-#if (__BYTE_ORDER__ == __BIG_ENDIAN__)
- u32 l4_sport:16;
- u32 l4_dport:16;
-#else
- u32 l4_dport:16;
- u32 l4_sport:16;
-#endif
- } bs;
- u32 value;
- } dw11;
-
- union {
- struct {
-#if (__BYTE_ORDER__ == __BIG_ENDIAN__)
- u32 l4_protocol:8;
- u32 rsvd0:8;
- u32 seg_id:10;
- u32 rsvd1:6;
-#else
- u32 rsvd1:6;
- u32 seg_id:10;
- u32 rsvd0:8;
- u32 l4_protocol:8;
-#endif
- } bs;
- u32 value;
- } dw12;
-
- u32 dw13;
- u32 dw14;
-
- union {
- struct {
-#if (__BYTE_ORDER__ == __BIG_ENDIAN__)
- u32 tid5:2;
- u32 rsvd5:14;
- u32 tid6:2;
- u32 rsvd6:14;
-#else
- u32 rsvd6:14;
- u32 tid6:2;
- u32 rsvd5:14;
- u32 tid5:2;
-#endif
- } bs;
- u32 value;
- } dw15;
-
- u32 dw16;
- u32 dw17;
- u32 dw18;
- u32 dw19;
-
- union {
- struct {
-#if (__BYTE_ORDER__ == __BIG_ENDIAN__)
- u32 tid7:2;
- u32 rsvd7:30;
-#else
- u32 rsvd7:30;
- u32 tid7:2;
-#endif
- } bs;
- u32 value;
- } dw20;
-} sml_acl_ipv6_key_s;
-
-/**
- * Struct name: sml_voq_map_table_s
- * @brief: voq_map_table
- * Description: xxxxxxxxxxxxxxx
- */
-typedef struct tag_sml_voq_map_table {
- u16 voq_base[8];
-} sml_voq_map_table_s;
-
-/**
- * Struct name: sml_rss_context_u
- * @brief: rss_context
- * Description: xxxxxxxxxxxxxxx
- */
-typedef union tag_sml_rss_context {
- struct {
-#if (__BYTE_ORDER__ == __BIG_ENDIAN__)
- u32 udp_ipv4:1;
- u32 udp_ipv6:1;
- u32 ipv4:1;
- u32 tcp_ipv4:1;
- u32 ipv6:1;
- u32 tcp_ipv6:1;
- u32 ipv6_ext:1;
- u32 tcp_ipv6_ext:1;
- u32 valid:1;
- u32 rsvd1:13;
- u32 def_qpn:10;
-#else
- u32 def_qpn:10;
- u32 rsvd1:13;
- u32 valid:1;
- u32 tcp_ipv6_ext:1;
- u32 ipv6_ext:1;
- u32 tcp_ipv6:1;
- u32 ipv6:1;
- u32 tcp_ipv4:1;
- u32 ipv4:1;
- u32 udp_ipv6:1;
- u32 udp_ipv4:1;
-#endif
- } bs;
-
- u32 value;
-} sml_rss_context_u;
-
-typedef struct tag_sml_rss_context_tbl {
- sml_rss_context_u element[TBL_ID_RSS_CONTEXT_NUM];
-} sml_rss_context_tbl_s;
-
-/**
- * Struct name: sml_rss_hash_u
- * @brief: rss_hash
- * Description: xxxxxxxxxxxxxxx
- */
-typedef union tag_sml_rss_hash {
- u8 rq_index[256];
-} sml_rss_hash_u;
-
-typedef struct tag_sml_rss_hash_tbl {
- sml_rss_hash_u element[TBL_ID_RSS_HASH_NUM];
-} sml_rss_hash_tbl_s;
-
-/**
- * Struct name: sml_lli_5tuple_key_s
- * @brief: lli_5tuple_key
- * Description: xxxxxxxxxxxxxxx
- */
-typedef struct tag_sml_lli_5tuple_key {
- union {
- struct {
-/** Define the struct bits */
-#if (__BYTE_ORDER__ == __BIG_ENDIAN__)
- u32 src:5;
- /* The tile need fill the Dest */
- u32 rt:1;
- u32 key_size:2;
- /* determines which action that engine will take */
- u32 profile_id:3;
- /* indicates that requestor expect
- * to receive a response data
- */
- u32 op_id:5;
- u32 a:1;
- u32 rsvd:12;
- u32 vld:1;
- u32 xy:1;
- u32 at:1;
-#else
- u32 at:1;
- u32 xy:1;
- u32 vld:1;
- /* indicates that requestor expect to
- * receive a response data
- */
- u32 rsvd:12;
- /* determines which action that engine will take*/
- u32 a:1;
- u32 op_id:5;
- u32 profile_id:3;
- u32 key_size:2;
- u32 rt:1;
- u32 src:5;
-#endif
- } bs;
-
-/* Define an unsigned member */
- u32 value;
- } dw0;
- union {
- struct {
- u32 rsvd:1;
- /* The tile need fill the Dest */
- u32 address:15;
-
- u32 table_type:5;
- u32 ip_type:1;
- u32 func_id:10;
- } bs;
-
- u32 value;
- } misc;
-
- u32 src_ip[4];
- u32 dst_ip[4];
-
- u16 src_port;
- u16 dst_port;
-
- u8 protocol;
- u8 tcp_flag;
- u8 fcoe_rctl;
- u8 fcoe_type;
- u16 eth_type;
-} sml_lli_5tuple_key_s;
-
-/**
- * Struct name: sml_lli_5tuple_rsp_s
- * @brief: lli_5tuple_rsp
- * Description: xxxxxxxxxxxxxxx
- */
-typedef struct tag_sml_lli_5tuple_rsp {
- union {
- struct {
- u32 state:4;
- u32 rsvd:28;
- } bs;
-
- u32 value;
- } dw0;
-
- u32 dw1;
-
- union {
- struct {
- u32 frame_size:16;
- u32 lli_en:8;
- u32 rsvd:8;
- } bs;
-
- u32 value;
- } dw2;
-
- u32 dw3;
-} sml_lli_5tuple_rsp_s;
-
-/**
- * Struct name: l2nic_rx_cqe_s.
- * @brief: l2nic_rx_cqe_s data structure.
- * Description:
- */
-typedef struct tag_l2nic_rx_cqe {
- union {
- struct {
-#if (__BYTE_ORDER__ == __BIG_ENDIAN__)
- u32 rx_done:1;
- u32 bp_en:1;
- u32 rsvd1:6;
- u32 lro_num:8;
- u32 checksum_err:16;
-#else
- u32 checksum_err:16;
- u32 lro_num:8;
- u32 rsvd1:6;
- u32 bp_en:1;
- u32 rx_done:1;
-#endif
- } bs;
- u32 value;
- } dw0;
-
- union {
- struct {
-#if (__BYTE_ORDER__ == __BIG_ENDIAN__)
- u32 length:16;
- u32 vlan:16;
-#else
- u32 vlan:16;
- u32 length:16;
-#endif
- } bs;
- u32 value;
- } dw1;
-
- union {
- struct {
-#if (__BYTE_ORDER__ == __BIG_ENDIAN__)
- u32 rss_type:8;
- u32 rsvd0:2;
- u32 vlan_offload_en:1;
- u32 umbcast:2;
- u32 rsvd1:7;
- u32 pkt_types:12;
-#else
- u32 pkt_types:12;
- u32 rsvd1:7;
- u32 umbcast:2;
- u32 vlan_offload_en:1;
- u32 rsvd0:2;
- u32 rss_type:8;
-#endif
- } bs;
- u32 value;
- } dw2;
-
- union {
- struct {
- u32 rss_hash_value;
- } bs;
- u32 value;
- } dw3;
-
- union {
- struct {
-#if (__BYTE_ORDER__ == __BIG_ENDIAN__)
- u32 if_1588:1;
- u32 if_tx_ts:1;
- u32 if_rx_ts:1;
- u32 rsvd:1;
- u32 msg_1588_type:4;
- u32 msg_1588_offset:8;
- u32 tx_ts_seq:16;
-#else
- u32 tx_ts_seq:16;
- u32 msg_1588_offset:8;
- u32 msg_1588_type:4;
- u32 rsvd:1;
- u32 if_rx_ts:1;
- u32 if_tx_ts:1;
- u32 if_1588:1;
-#endif
- } bs;
- u32 value;
- } dw4;
-
- union {
- struct {
- u32 msg_1588_ts;
- } bs;
-
- struct {
- u32 rsvd0:12;
- /* for ovs. traffic type: 0-default l2nic pkt,
- * 1-fallback traffic, 2-miss upcall traffic,
- * 2-command
- */
- u32 traffic_type:4;
- /* for ovs. traffic from: vf_id,
- * only support traffic_type=0(default l2nic)
- * or 2(miss upcall)
- */
- u32 traffic_from:16;
- } ovs_bs;
-
- u32 value;
- } dw5;
-
- union {
- struct {
- u32 lro_ts;
- } bs;
- u32 value;
- } dw6;
-
- union {
- struct {
- u32 rsvd0;
- } bs;
-
- u32 localtag; /* for ovs */
-
- u32 value;
- } dw7;
-} l2nic_rx_cqe_s;
-
-typedef union tag_sml_global_queue_tbl_elem {
- struct {
-#if (__BYTE_ORDER__ == __BIG_ENDIAN__)
- u32 src_tag_l:16;
- u32 local_qid:8;
- u32 rsvd:8;
-#elif (__BYTE_ORDER__ == __LITTLE_ENDIAN__)
- u32 rsvd:8;
- u32 local_qid:8;
- u32 src_tag_l:16;
-#endif
- } bs;
-
- u32 value;
-} sml_global_queue_tbl_elem_u;
-
-typedef struct tag_sml_global_queue_tbl {
- sml_global_queue_tbl_elem_u element[TBL_ID_GLOBAL_QUEUE_NUM];
-} sml_global_queue_tbl_s;
-
-typedef struct tag_sml_dfx_log_tbl {
- u32 wr_init_pc_h32; /* Initial value of write_pc*/
- u32 wr_init_pc_l32;
-
- union {
- struct {
-#if (__BYTE_ORDER__ == __BIG_ENDIAN__)
- u32 state:8;
- u32 func_en:1;
- u32 srctag:12;
- u32 max_num:11; /* Data block highest value*/
-#else
- u32 max_num:11;
- u32 srctag:12;
- u32 func_en:1;
- u32 state:8;
-#endif
- } bs;
- u32 value;
- } dw2;
-
- u32 ci_index;
-} sml_dfx_log_tbl_s;
-
-typedef struct tag_sml_glb_capture_tbl {
- union {
- struct {
-#if (__BYTE_ORDER__ == __BIG_ENDIAN__)
- u32 valid:1;
- u32 max_num:15;
- u32 rsvd:16;
-#else
- u32 rsvd:16;
- u32 max_num:15;
- u32 valid:1;
-#endif
- } bs;
- u32 value;
- } dw0;
-
- u32 discard_addr_h32;
- u32 discard_addr_l32;
-
- u32 rsvd0;
-
- union {
- struct {
-#if (__BYTE_ORDER__ == __BIG_ENDIAN__)
- u32 valid:1;
- u32 mode:5;
- u32 direct:2;
- u32 offset:8;
- u32 cos:3;
- u32 max_num:13;
-#else
- u32 max_num:13;
- u32 cos:3;
- u32 offset:8;
- u32 direct:2;
- u32 mode:5;
- u32 valid:1;
-#endif
- } bs;
- u32 value;
- } dw4;
-
- u32 data_vlan;
-
- u32 condition_addr_h32;
- u32 condition_addr_l32;
-
-} sml_glb_capture_tbl_s;
-
-typedef struct tag_sml_cqe_addr_tbl {
- u32 cqe_first_addr_h32;
- u32 cqe_first_addr_l32;
- u32 cqe_last_addr_h32;
- u32 cqe_last_addr_l32;
-
-} sml_cqe_addr_tbl_s;
-
-/**
- * Struct name: sml_ucode_exec_info_tbl_s
- * @brief: ucode execption info Table
- * Description: microcode exception information table
- */
-typedef struct tag_ucode_exec_info_tbl {
- union {
- struct {
-#if (__BYTE_ORDER__ == __BIG_ENDIAN__)
- u32 wptr_cpb_ack_str:4;
- u32 mem_cpb_ack_cnums_dma:4;
- u32 mem_cpb_ack_cmd_mode:2;
- u32 pr_ret_vld:1;
- u32 oeid_pd_pkt:1;
- u32 rptr_cmd:4;
- u32 wptr_cmd:4;
- u32 src_tag_l:12;
-#else
- u32 src_tag_l:12;
- u32 wptr_cmd:4;
- u32 rptr_cmd:4;
- u32 oeid_pd_pkt:1;
- u32 pr_ret_vld:1;
- u32 mem_cpb_ack_cmd_mode:2;
- u32 mem_cpb_ack_cnums_dma:4;
- u32 wptr_cpb_ack_str:4;
-#endif
- } bs;
-
- u32 value;
- } dw0;
-
- union {
- struct {
-#if (__BYTE_ORDER__ == __BIG_ENDIAN__)
- u32 fq:16;
- u32 exception_type:4;
- u32 rptr_cpb_ack_str:4;
- u32 header_oeid:8;
-#else
- u32 header_oeid:8;
- u32 rptr_cpb_ack_str:4;
- u32 exception_type:4;
- u32 fq:16;
-#endif
- } bs;
-
- u32 value;
- } dw1;
-
- u32 oeid_pd_data_l32;
- u32 oeid_pd_data_m32;
-} sml_ucode_exec_info_s;
-
-typedef struct rq_iq_mapping_tbl {
- union {
- struct {
-#if (__BYTE_ORDER__ == __BIG_ENDIAN__)
- u32 rqid:16;
- u32 iqid:8;
- u32 rsvd:8;
-#else
- u32 rsvd:8;
- u32 iqid:8;
- u32 rqid:16;
-#endif
- } bs;
- u32 value;
- } dw[4];
-} sml_rq_iq_mapping_tbl_s;
-
-/* nic_ucode_rq_ctx table define
- */
-typedef struct nic_ucode_rq_ctx {
- union {
- struct {
-#if (__BYTE_ORDER__ == __BIG_ENDIAN__)
- u32 max_count:10;
- u32 cqe_tmpl:6;
- u32 pkt_tmpl:6;
- u32 wqe_tmpl:6;
- u32 psge_valid:1;
- u32 rsvd1:1;
- u32 owner:1;
- u32 ceq_en:1;
-#else
- u32 ceq_en:1;
- u32 owner:1;
- u32 rsvd1:1;
- u32 psge_valid:1;
- u32 wqe_tmpl:6;
- u32 pkt_tmpl:6;
- u32 cqe_tmpl:6;
- u32 max_count:10;
-#endif
- } bs;
- u32 dw0;
- };
-
- union {
- struct {
-#if (__BYTE_ORDER__ == __BIG_ENDIAN__)
- /* Interrupt number that L2NIC engine tell SW
- * if generate int instead of CEQ
- */
- u32 int_num:10;
- u32 ceq_count:10;
- /* product index */
- u32 pi:12;
-#else
- /* product index */
- u32 pi:12;
- u32 ceq_count:10;
- /* Interrupt number that L2NIC engine tell SW
- * if generate int instead of CEQ
- */
- u32 int_num:10;
-#endif
- } bs0;
- struct {
-#if (__BYTE_ORDER__ == __BIG_ENDIAN__)
- /* CEQ arm, L2NIC engine will clear it after send ceq,
- * driver should set it by CMD Q after receive all pkt.
- */
- u32 ceq_arm:1;
- u32 eq_id:5;
- u32 rsvd2:4;
- u32 ceq_count:10;
- /* product index */
- u32 pi:12;
-#else
- /* product index */
- u32 pi:12;
- u32 ceq_count:10;
- u32 rsvd2:4;
- u32 eq_id:5;
- /* CEQ arm, L2NIC engine will clear it after send ceq,
- * driver should set it by CMD Q after receive all pkt.
- */
- u32 ceq_arm:1;
-#endif
- } bs1;
- u32 dw1;
- };
-
- union {
- struct {
-#if (__BYTE_ORDER__ == __BIG_ENDIAN__)
- /* consumer index */
- u32 ci:12;
- /* WQE page address of current CI point to, high part */
- u32 ci_wqe_page_addr_hi:20;
-#else
- /* WQE page address of current CI point to, high part */
- u32 ci_wqe_page_addr_hi:20;
- /* consumer index */
- u32 ci:12;
-#endif
- } bs2;
- u32 dw2;
- };
-
- /* WQE page address of current CI point to, low part */
- u32 ci_wqe_page_addr_lo;
-
- union {
- struct {
-#if (__BYTE_ORDER__ == __BIG_ENDIAN__)
- u32 prefetch_min:7;
- u32 prefetch_max:11;
- u32 prefetch_cache_threshold:14;
-#else
- u32 prefetch_cache_threshold:14;
- u32 prefetch_max:11;
- u32 prefetch_min:7;
-#endif
- } bs3;
- u32 dw3;
- };
-
- union {
- struct {
-#if (__BYTE_ORDER__ == __BIG_ENDIAN__)
- u32 rsvd3:31;
- /* ownership of WQE */
- u32 prefetch_owner:1;
-#else
- /* ownership of WQE */
- u32 prefetch_owner:1;
- u32 rsvd3:31;
-#endif
- } bs4;
- u32 dw4;
- };
-
- union {
- struct {
-#if (__BYTE_ORDER__ == __BIG_ENDIAN__)
- u32 prefetch_ci:12;
- /* high part */
- u32 prefetch_ci_wqe_page_addr_hi:20;
-#else
- /* high part */
- u32 prefetch_ci_wqe_page_addr_hi:20;
- u32 prefetch_ci:12;
-#endif
- } bs5;
- u32 dw5;
- };
-
- /* low part */
- u32 prefetch_ci_wqe_page_addr_lo;
- /* host mem GPA, high part */
- u32 pi_gpa_hi;
- /* host mem GPA, low part */
- u32 pi_gpa_lo;
-
- union {
- struct {
-#if (__BYTE_ORDER__ == __BIG_ENDIAN__)
- u32 rsvd4:9;
- u32 ci_cla_tbl_addr_hi:23;
-#else
- u32 ci_cla_tbl_addr_hi:23;
- u32 rsvd4:9;
-#endif
- } bs6;
- u32 dw6;
- };
-
- u32 ci_cla_tbl_addr_lo;
-
-} nic_ucode_rq_ctx_s;
-
-#define LRO_TSO_SPACE_SIZE (240) /* (15 * 16) */
-#define RQ_CTX_SIZE (48)
-
-#ifdef __cplusplus
-#if __cplusplus
-}
-#endif
-#endif /* __cplusplus */
-#endif /* __L2_TABLE_H__ */
diff --git a/drivers/net/ethernet/huawei/hinic/hinic_sml_table_pub.h b/drivers/net/ethernet/huawei/hinic/hinic_sml_table_pub.h
deleted file mode 100644
index 39d0516c..00000000
--- a/drivers/net/ethernet/huawei/hinic/hinic_sml_table_pub.h
+++ /dev/null
@@ -1,277 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0*/
-/* Huawei HiNIC PCI Express Linux driver
- * Copyright(c) 2017 Huawei Technologies Co., Ltd
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms and conditions of the GNU General Public License,
- * version 2, as published by the Free Software Foundation.
- *
- * This program is distributed in the hope it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
- * for more details.
- *
- */
-
-#ifndef __SML_TABLE_PUB_H__
-#define __SML_TABLE_PUB_H__
-
-#ifdef __cplusplus
-#if __cplusplus
-extern "C" {
-#endif
-#endif /* __cplusplus */
-
-/* Un-FPGA(ESL/EMU/EDA) specification */
-#if (!defined(__UP_FPGA__) && (!defined(HI1822_MODE_FPGA)))
-/* ER specification*/
-#define L2_ER_SPEC (16)
-
-/* Entry specification*/
-#define TBL_ID_FUNC_CFG_SPEC (512)
-#define TBL_ID_PORT_CFG_SPEC (16)
-#define TBL_ID_MAC_SPEC (4096)
-#define TBL_ID_MULTICAST_SPEC (1024)
-#define TBL_ID_TRUNK_SPEC (256)
-#define TBL_ID_ELB_SPEC (18432)
-#define TBL_ID_TAGGEDLIST_SPEC (80)
-#define TBL_ID_UNTAGGEDLIST_SPEC (16)
-
-/* VLAN specification*/
-#define VSW_VLAN_SPEC (4096)
-
-#else /* FPGA scenario specifications */
-
-/* ER specification*/
-#define L2_ER_SPEC (4)
-
-/* Entry specification*/
-#define TBL_ID_FUNC_CFG_SPEC (64)
-#define TBL_ID_PORT_CFG_SPEC (16)
-#define TBL_ID_MAC_SPEC (256)
-#define TBL_ID_MULTICAST_SPEC (32)
-#define TBL_ID_TRUNK_SPEC (16)
-#define TBL_ID_ELB_SPEC (1152)
-#define TBL_ID_TAGGEDLIST_SPEC (20)
-#define TBL_ID_UNTAGGEDLIST_SPEC (4)
-
-/* VLAN specification*/
-#define VSW_VLAN_SPEC (1024)
-#endif
-
-/**
- * Number of entries elements defined
- */
-#define TBL_ID_ELB_ENTRY_ELEM_NUM 2
-#define TBL_ID_VLAN_ENTRY_ELEM_NUM 8
-#define TBL_ID_MULTICAST_ENTRY_ELEM_NUM 2
-#define TBL_ID_TRUNKFWD_ENTRY_ELEM_NUM 32
-#define TBL_ID_TAGGEDLIST_BITMAP32_NUM 4
-#define TBL_ID_UNTAGGEDLIST_BITMAP32_NUM 4
-#define TBL_ID_GLOBAL_QUEUE_NUM 4
-#define TBL_ID_RSS_CONTEXT_NUM 4
-#define TBL_ID_RSS_HASH_NUM 4
-
-/**
- * NIC receiving mode defined
- */
-#define NIC_RX_MODE_UC 0x01 /* 0b00001 */
-#define NIC_RX_MODE_MC 0x02 /* 0b00010 */
-#define NIC_RX_MODE_BC 0x04 /* 0b00100 */
-#define NIC_RX_MODE_MC_ALL 0x08 /* 0b01000 */
-#define NIC_RX_MODE_PROMISC 0x10 /* 0b10000 */
-
-/**
- * Maximum number of HCAR
- */
-#define QOS_MAX_HCAR_NUM (12)
-
-/**
- * VLAN Table, Multicast Table, ELB Table Definitions
- * The Table index and sub id index
- */
-#define VSW_DEFAULT_VLAN0 (0)
-#define INVALID_ELB_INDEX (0)
-
-#if (!defined(__UP_FPGA__) && (!defined(HI1822_MODE_FPGA)))
-/* Supports ESL/EMU/EDA 16ER * 4K VLAN, 1 entry stored 8 vlan*/
-#define GET_VLAN_TABLE_INDEX(er_id, vlan_id) \
- ((((er_id) & 0xF) << 9) | (((vlan_id) & 0xFFF) >> 3))
-#else
-/*FPGA supports only 4ER * 1K VLAN, 1 entry stored 8 vlan*/
-#define GET_VLAN_TABLE_INDEX(er_id, vlan_id) \
- ((((er_id) & 0x3) << 7) | (((vlan_id) & 0x3FF) >> 3))
-#endif
-#define GET_VLAN_ENTRY_SUBID(vlan_id) ((vlan_id) & 0x7)
-
-#define GET_MULTICAST_TABLE_INDEX(mc_id) ((mc_id) >> 1)
-#define GET_MULTICAST_ENTRY_SUBID(mc_id) ((mc_id) & 0x1)
-
-#define GET_ELB_TABLE_INDEX(elb_id) ((elb_id) >> 1)
-#define GET_ELB_ENTRY_SUBID(elb_id) ((elb_id) & 0x1)
-
-/**
- * taggedlist_table and untaggedlist_table access offset calculation
- */
-#define GET_TAGLIST_TABLE_INDEX(list_id, vlan_id) \
- (((list_id) << 5) | (((vlan_id) & 0xFFF) >> 7))
-#define GET_TAGLIST_TABLE_BITMAP_IDX(vlan_id) (((vlan_id) >> 5) & 0x3)
-#define GET_TAGLIST_TABLE_VLAN_BIT(vlan_id) \
- (0x1UL << ((vlan_id) & 0x1F))
-
-#define TRUNK_FWDID_NOPORT 0xFFFF
-
-/**
- * MAC type definition
- */
-typedef enum {
- MAC_TYPE_UC = 0,
- MAC_TYPE_BC,
- MAC_TYPE_MC,
- MAC_TYPE_RSV,
-} mac_type_e;
-
-/**
- * Ethernet port definition
- */
-typedef enum {
- MAG_ETH_PORT0 = 0,
- MAG_ETH_PORT1,
- MAG_ETH_PORT2,
- MAG_ETH_PORT3,
- MAG_ETH_PORT4,
- MAG_ETH_PORT5,
- MAG_ETH_PORT6,
- MAG_ETH_PORT7,
- MAG_ETH_PORT8,
- MAG_ETH_PORT9,
-} mag_eth_port_e;
-
-/**
- * vlan filter type defined
- */
-typedef enum {
- VSW_VLAN_MODE_ALL = 0,
- VSW_VLAN_MODE_ACCESS,
- VSW_VLAN_MODE_TRUNK,
- VSW_VLAN_MODE_HYBRID,
- VSW_VLAN_MODE_QINQ,
- VSW_VLAN_MODE_MAX,
-} vsw_vlan_mode_e;
-
-/**
- * MAC table query forwarding port type definition
- */
-typedef enum {
- VSW_FWD_TYPE_FUNCTION = 0, /* forward type function */
- VSW_FWD_TYPE_VMDQ, /* forward type function-queue(vmdq) */
- VSW_FWD_TYPE_PORT, /* forward type port */
- VSW_FWD_TYPE_FIC, /* forward type fic */
- VSW_FWD_TYPE_TRUNK, /* forward type trunk */
- VSW_FWD_TYPE_DP, /* forward type DP */
- VSW_FWD_TYPE_MC, /* forward type multicast */
-
- /* START: is not used and has to be removed */
- VSW_FWD_TYPE_BC, /* forward type broadcast */
- VSW_FWD_TYPE_PF, /* forward type pf */
- /* END: is not used and has to be removed */
-
- VSW_FWD_TYPE_NULL, /* forward type null */
-} vsw_fwd_type_e;
-
-/**
- * Eth-Trunk port aggregation mode
- */
-typedef enum {
- VSW_ETRK_MODE_STANDBY,
- VSW_ETRK_MODE_SMAC,
- VSW_ETRK_MODE_DMAC,
- VSW_ETRK_MODE_SMACDMAC,
- VSW_ETRK_MODE_SIP,
- VSW_ETRK_MODE_DIP,
- VSW_ETRK_MODE_SIPDIP,
- VSW_ETRK_MODE_5TUPLES,
- VSW_ETRK_MODE_LACP,
- VSW_ETRK_MODE_MAX,
-} vsw_etrk_mode_e;
-
-/**
- * Eth-Trunk port aggregation mode
- */
-typedef enum {
- TRUNK_MODE_STANDBY,
- TRUNK_MODE_SMAC,
- TRUNK_MODE_DMAC,
- TRUNK_MODE_SMACDMAC,
- TRUNK_MODE_SIP,
- TRUNK_MODE_DIP,
- TRUNK_MODE_SIPDIP,
- TRUNK_MODE_5TUPLES,
- TRUNK_MODE_SIPV6,
- TRUNK_MODE_DIPV6,
- TRUNK_MODE_SIPDIPV6,
- TRUNK_MODE_5TUPLESV6,
- TRUNK_MODE_LACP,
-} trunk_mode_s;
-
-/* ACL key type */
-enum {
- ACL_KEY_IPV4 = 0,
- ACL_KEY_IPV6
-};
-
-/* ACL filter action */
-enum {
- ACL_ACTION_PERMIT = 0,
- ACL_ACTION_DENY
-};
-
-/* ACL action button*/
-enum {
- ACL_ACTION_OFF = 0,
- ACL_ACTION_ON,
-};
-
-/* ACL statistic action*/
-enum {
- ACL_ACTION_NO_COUNTER = 0,
- ACL_ACTION_COUNT_PKT,
- ACL_ACTION_COUNT_PKT_LEN,
-};
-
-/* ACL redirect action*/
-enum {
- ACL_ACTION_FORWAR_UP = 1,
- ACL_ACTION_FORWAR_PORT,
- ACL_ACTION_FORWAR_NEXT_HOP,
- ACL_ACTION_FORWAR_OTHER,
-};
-
-enum {
- CEQ_TIMER_STOP = 0,
- CEQ_TIMER_START,
-};
-
-enum {
- CEQ_API_DISPATCH = 0,
- CEQ_API_NOT_DISPATCH,
-};
-
-enum {
- CEQ_MODE = 1,
- INT_MODE,
-};
-
-enum {
- ER_MODE_VEB,
- ER_MODE_VEPA,
- ER_MODE_MULTI,
- ER_MODE_NULL,
-};
-
-#ifdef __cplusplus
-#if __cplusplus
-}
-#endif
-#endif /* __cplusplus */
-#endif /* __L2_TABLE_PUB_H__ */
--
1.8.3
1
0

[PATCH 1/4] btrfs: delayed-inode: Kill the BUG_ON() in btrfs_delete_delayed_dir_index()
by Yang Yingliang 17 Apr '20
by Yang Yingliang 17 Apr '20
17 Apr '20
From: Qu Wenruo <wqu(a)suse.com>
mainline inclusion
from mainline-v5.4-rc1
commit 933c22a7512c5c09b1fdc46b557384efe8d03233
category: bugfix
bugzilla: 13690
CVE: CVE-2019-19813
-------------------------------------------------
There is one report of fuzzed image which leads to BUG_ON() in
btrfs_delete_delayed_dir_index().
Although that fuzzed image can already be addressed by enhanced
extent-tree error handler, it's still better to hunt down more BUG_ON().
This patch will hunt down two BUG_ON()s in
btrfs_delete_delayed_dir_index():
- One for error from btrfs_delayed_item_reserve_metadata()
Instead of BUG_ON(), we output an error message and free the item.
And return the error.
All callers of this function handles the error by aborting current
trasaction.
- One for possible EEXIST from __btrfs_add_delayed_deletion_item()
That function can return -EEXIST.
We already have a good enough error message for that, only need to
clean up the reserved metadata space and allocated item.
To help above cleanup, also modifiy __btrfs_remove_delayed_item() called
in btrfs_release_delayed_item(), to skip unassociated item.
Bugzilla: https://bugzilla.kernel.org/show_bug.cgi?id=203253
Signed-off-by: Qu Wenruo <wqu(a)suse.com>
Reviewed-by: David Sterba <dsterba(a)suse.com>
Signed-off-by: David Sterba <dsterba(a)suse.com>
Conflicts:
fs/btrfs/delayed-inode.c
[yyl: adjust context]
Signed-off-by: Yang Yingliang <yangyingliang(a)huawei.com>
Reviewed-by: Jason Yan <yanaijie(a)huawei.com>
Signed-off-by: Yang Yingliang <yangyingliang(a)huawei.com>
---
fs/btrfs/delayed-inode.c | 13 +++++++++++--
1 file changed, 11 insertions(+), 2 deletions(-)
diff --git a/fs/btrfs/delayed-inode.c b/fs/btrfs/delayed-inode.c
index e9522f2..5dc6141 100644
--- a/fs/btrfs/delayed-inode.c
+++ b/fs/btrfs/delayed-inode.c
@@ -471,6 +471,9 @@ static void __btrfs_remove_delayed_item(struct btrfs_delayed_item *delayed_item)
struct rb_root *root;
struct btrfs_delayed_root *delayed_root;
+ /* Not associated with any delayed_node */
+ if (!delayed_item->delayed_node)
+ return;
delayed_root = delayed_item->delayed_node->root->fs_info->delayed_root;
BUG_ON(!delayed_root);
@@ -1526,7 +1529,12 @@ int btrfs_delete_delayed_dir_index(struct btrfs_trans_handle *trans,
* we have reserved enough space when we start a new transaction,
* so reserving metadata failure is impossible.
*/
- BUG_ON(ret);
+ if (ret < 0) {
+ btrfs_err(trans->fs_info,
+"metadata reservation failed for delayed dir item deltiona, should have been reserved");
+ btrfs_release_delayed_item(item);
+ goto end;
+ }
mutex_lock(&node->mutex);
ret = __btrfs_add_delayed_deletion_item(node, item);
@@ -1534,7 +1542,8 @@ int btrfs_delete_delayed_dir_index(struct btrfs_trans_handle *trans,
btrfs_err(trans->fs_info,
"err add delayed dir index item(index: %llu) into the deletion tree of the delayed node(root id: %llu, inode id: %llu, errno: %d)",
index, node->root->objectid, node->inode_id, ret);
- BUG();
+ btrfs_delayed_item_release_metadata(dir->root, item);
+ btrfs_release_delayed_item(item);
}
mutex_unlock(&node->mutex);
end:
--
1.8.3
1
3

[PATCH 01/30] net: hns3: add one printing information in hnae3_unregister_client() function
by Yang Yingliang 17 Apr '20
by Yang Yingliang 17 Apr '20
17 Apr '20
From: shenhao <shenhao21(a)huawei.com>
driver inclusion
category: bugfix
bugzilla: NA
CVE: NA
-----------------------------------------------------
This patch adds one printing information to let user know the client does
not exist when unregister a noneexistent client.
Signed-off-by: Guangbin Huang <huangguangbin2(a)huawei.com>
Signed-off-by: shenhao <shenhao21(a)huawei.com>
Reviewed-by: Zhong Zhaohui <zhongzhaohui(a)huawei.com>
Signed-off-by: Yang Yingliang <yangyingliang(a)huawei.com>
---
drivers/net/ethernet/hisilicon/hns3/hnae3.c | 1 +
1 file changed, 1 insertion(+)
diff --git a/drivers/net/ethernet/hisilicon/hns3/hnae3.c b/drivers/net/ethernet/hisilicon/hns3/hnae3.c
index 4afa509..53a87b3 100644
--- a/drivers/net/ethernet/hisilicon/hns3/hnae3.c
+++ b/drivers/net/ethernet/hisilicon/hns3/hnae3.c
@@ -156,6 +156,7 @@ void hnae3_unregister_client(struct hnae3_client *client)
if (!existed) {
mutex_unlock(&hnae3_common_lock);
+ pr_err("client %s does not exist!\n", client->name);
return;
}
--
1.8.3
1
29
From: youshengzui <youshengzui(a)huawei.com>
driver inclusion
category: bugfix
bugzilla: NA
CVE: NA
--------------------------
This patch is used to modify the hns3 driver version to 1.9.37.4
Signed-off-by: youshengzui <youshengzui(a)huawei.com>
Reviewed-by: Weiwei Deng <dengweiwei(a)huawei.com>
Reviewed-by: Zhaohui Zhong <zhongzhaohui(a)huawei.com>
Reviewed-by: Junxin Chen <chenjunxin1(a)huawei.com>
Reviewed-by: Zhong Zhaohui <zhongzhaohui(a)huawei.com>
Signed-off-by: Yang Yingliang <yangyingliang(a)huawei.com>
---
drivers/net/ethernet/hisilicon/hns3/hnae3.h | 2 +-
drivers/net/ethernet/hisilicon/hns3/hns3_cae/hns3_cae_version.h | 2 +-
drivers/net/ethernet/hisilicon/hns3/hns3_enet.h | 2 +-
drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.h | 2 +-
drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_main.h | 2 +-
5 files changed, 5 insertions(+), 5 deletions(-)
diff --git a/drivers/net/ethernet/hisilicon/hns3/hnae3.h b/drivers/net/ethernet/hisilicon/hns3/hnae3.h
index f795bfd..98dfa7c 100644
--- a/drivers/net/ethernet/hisilicon/hns3/hnae3.h
+++ b/drivers/net/ethernet/hisilicon/hns3/hnae3.h
@@ -30,7 +30,7 @@
#include <linux/pci.h>
#include <linux/types.h>
-#define HNAE3_MOD_VERSION "1.9.37.3"
+#define HNAE3_MOD_VERSION "1.9.37.4"
#define HNAE3_MIN_VECTOR_NUM 2 /* one for msi-x, another for IO */
diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3_cae/hns3_cae_version.h b/drivers/net/ethernet/hisilicon/hns3/hns3_cae/hns3_cae_version.h
index 3977883..630f642 100644
--- a/drivers/net/ethernet/hisilicon/hns3/hns3_cae/hns3_cae_version.h
+++ b/drivers/net/ethernet/hisilicon/hns3/hns3_cae/hns3_cae_version.h
@@ -4,7 +4,7 @@
#ifndef __HNS3_CAE_VERSION_H__
#define __HNS3_CAE_VERSION_H__
-#define HNS3_CAE_MOD_VERSION "1.9.37.3"
+#define HNS3_CAE_MOD_VERSION "1.9.37.4"
#define CMT_ID_LEN 8
#define RESV_LEN 3
diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3_enet.h b/drivers/net/ethernet/hisilicon/hns3/hns3_enet.h
index 5f1d5a3..9e11ec3 100644
--- a/drivers/net/ethernet/hisilicon/hns3/hns3_enet.h
+++ b/drivers/net/ethernet/hisilicon/hns3/hns3_enet.h
@@ -8,7 +8,7 @@
#include "hnae3.h"
-#define HNS3_MOD_VERSION "1.9.37.3"
+#define HNS3_MOD_VERSION "1.9.37.4"
extern char hns3_driver_version[];
diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.h b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.h
index 0146470..5e64d2a 100644
--- a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.h
+++ b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.h
@@ -12,7 +12,7 @@
#include "hclge_cmd.h"
#include "hnae3.h"
-#define HCLGE_MOD_VERSION "1.9.37.3"
+#define HCLGE_MOD_VERSION "1.9.37.4"
#define HCLGE_DRIVER_NAME "hclge"
#define HCLGE_MAX_PF_NUM 8
diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_main.h b/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_main.h
index 51af1050..596618e 100644
--- a/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_main.h
+++ b/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_main.h
@@ -10,7 +10,7 @@
#include "hclgevf_cmd.h"
#include "hnae3.h"
-#define HCLGEVF_MOD_VERSION "1.9.37.3"
+#define HCLGEVF_MOD_VERSION "1.9.37.4"
#define HCLGEVF_DRIVER_NAME "hclgevf"
#define HCLGEVF_MAX_VLAN_ID 4095
--
1.8.3
1
192

[PATCH 01/40] perf/amd/uncore: Replace manual sampling check with CAP_NO_INTERRUPT flag
by Yang Yingliang 17 Apr '20
by Yang Yingliang 17 Apr '20
17 Apr '20
From: Kim Phillips <kim.phillips(a)amd.com>
[ Upstream commit f967140dfb7442e2db0868b03b961f9c59418a1b ]
Enable the sampling check in kernel/events/core.c::perf_event_open(),
which returns the more appropriate -EOPNOTSUPP.
BEFORE:
$ sudo perf record -a -e instructions,l3_request_g1.caching_l3_cache_accesses true
Error:
The sys_perf_event_open() syscall returned with 22 (Invalid argument) for event (l3_request_g1.caching_l3_cache_accesses).
/bin/dmesg | grep -i perf may provide additional information.
With nothing relevant in dmesg.
AFTER:
$ sudo perf record -a -e instructions,l3_request_g1.caching_l3_cache_accesses true
Error:
l3_request_g1.caching_l3_cache_accesses: PMU Hardware doesn't support sampling/overflow-interrupts. Try 'perf stat'
Fixes: c43ca5091a37 ("perf/x86/amd: Add support for AMD NB and L2I "uncore" counters")
Signed-off-by: Kim Phillips <kim.phillips(a)amd.com>
Signed-off-by: Borislav Petkov <bp(a)suse.de>
Acked-by: Peter Zijlstra <peterz(a)infradead.org>
Cc: stable(a)vger.kernel.org
Link: https://lkml.kernel.org/r/20200311191323.13124-1-kim.phillips@amd.com
Signed-off-by: Sasha Levin <sashal(a)kernel.org>
Signed-off-by: Yang Yingliang <yangyingliang(a)huawei.com>
---
arch/x86/events/amd/uncore.c | 14 +++++++-------
1 file changed, 7 insertions(+), 7 deletions(-)
diff --git a/arch/x86/events/amd/uncore.c b/arch/x86/events/amd/uncore.c
index baa7e36..604a855 100644
--- a/arch/x86/events/amd/uncore.c
+++ b/arch/x86/events/amd/uncore.c
@@ -193,20 +193,18 @@ static int amd_uncore_event_init(struct perf_event *event)
/*
* NB and Last level cache counters (MSRs) are shared across all cores
- * that share the same NB / Last level cache. Interrupts can be directed
- * to a single target core, however, event counts generated by processes
- * running on other cores cannot be masked out. So we do not support
- * sampling and per-thread events.
+ * that share the same NB / Last level cache. On family 16h and below,
+ * Interrupts can be directed to a single target core, however, event
+ * counts generated by processes running on other cores cannot be masked
+ * out. So we do not support sampling and per-thread events via
+ * CAP_NO_INTERRUPT, and we do not enable counter overflow interrupts:
*/
- if (is_sampling_event(event) || event->attach_state & PERF_ATTACH_TASK)
- return -EINVAL;
/* NB and Last level cache counters do not have usr/os/guest/host bits */
if (event->attr.exclude_user || event->attr.exclude_kernel ||
event->attr.exclude_host || event->attr.exclude_guest)
return -EINVAL;
- /* and we do not enable counter overflow interrupts */
hwc->config = event->attr.config & AMD64_RAW_EVENT_MASK_NB;
hwc->idx = -1;
@@ -314,6 +312,7 @@ static ssize_t amd_uncore_attr_show_cpumask(struct device *dev,
.start = amd_uncore_start,
.stop = amd_uncore_stop,
.read = amd_uncore_read,
+ .capabilities = PERF_PMU_CAP_NO_INTERRUPT,
};
static struct pmu amd_llc_pmu = {
@@ -324,6 +323,7 @@ static ssize_t amd_uncore_attr_show_cpumask(struct device *dev,
.start = amd_uncore_start,
.stop = amd_uncore_stop,
.read = amd_uncore_read,
+ .capabilities = PERF_PMU_CAP_NO_INTERRUPT,
};
static struct amd_uncore *amd_uncore_alloc(unsigned int cpu)
--
1.8.3
1
39
From: Yu'an Wang <wangyuan46(a)huawei.com>
driver inclusion
category: feature
bugzilla: NA
CVE: NA
In this patch, we try to add dfx for io operation, including send/
recv/send_fail/send_busy. We also can define overtime_threshold to
judge timeout task.
Signed-off-by: Yu'an Wang <wangyuan46(a)huawei.com>
Reviewed-by: Mingqiang Ling <lingmingqiang(a)huawei.com>
Reviewed-by: Guangwei Zhou <zhouguangwei5(a)huawei.com>
Reviewed-by: Ye Kai <yekai13(a)huawei.com>
Signed-off-by: Yang Yingliang <yangyingliang(a)huawei.com>
---
drivers/crypto/hisilicon/hpre/hpre.h | 16 ++++++
drivers/crypto/hisilicon/hpre/hpre_crypto.c | 89 ++++++++++++++++++++++++-----
drivers/crypto/hisilicon/hpre/hpre_main.c | 55 ++++++++++++++++++
3 files changed, 146 insertions(+), 14 deletions(-)
diff --git a/drivers/crypto/hisilicon/hpre/hpre.h b/drivers/crypto/hisilicon/hpre/hpre.h
index 42b2f2a..203eb2a 100644
--- a/drivers/crypto/hisilicon/hpre/hpre.h
+++ b/drivers/crypto/hisilicon/hpre/hpre.h
@@ -25,6 +25,16 @@ enum hpre_ctrl_dbgfs_file {
HPRE_DEBUG_FILE_NUM,
};
+enum hpre_dfx_dbgfs_file {
+ HPRE_SEND_CNT,
+ HPRE_RECV_CNT,
+ HPRE_SEND_FAIL_CNT,
+ HPRE_SEND_BUSY_CNT,
+ HPRE_OVER_THRHLD_CNT,
+ HPRE_OVERTIME_THRHLD,
+ HPRE_DFX_FILE_NUM
+};
+
#define HPRE_DEBUGFS_FILE_NUM (HPRE_DEBUG_FILE_NUM + HPRE_CLUSTERS_NUM - 1)
struct hpre_debugfs_file {
@@ -34,12 +44,18 @@ struct hpre_debugfs_file {
struct hpre_debug *debug;
};
+struct hpre_dfx {
+ atomic64_t value;
+ enum hpre_dfx_dbgfs_file type;
+};
+
/*
* One HPRE controller has one PF and multiple VFs, some global configurations
* which PF has need this structure.
* Just relevant for PF.
*/
struct hpre_debug {
+ struct hpre_dfx dfx[HPRE_DFX_FILE_NUM];
struct hpre_debugfs_file files[HPRE_DEBUGFS_FILE_NUM];
};
diff --git a/drivers/crypto/hisilicon/hpre/hpre_crypto.c b/drivers/crypto/hisilicon/hpre/hpre_crypto.c
index 7610e13..b68b30c 100644
--- a/drivers/crypto/hisilicon/hpre/hpre_crypto.c
+++ b/drivers/crypto/hisilicon/hpre/hpre_crypto.c
@@ -10,6 +10,7 @@
#include <linux/dma-mapping.h>
#include <linux/fips.h>
#include <linux/module.h>
+#include <linux/time.h>
#include "hpre.h"
struct hpre_ctx;
@@ -68,6 +69,7 @@ struct hpre_dh_ctx {
struct hpre_ctx {
struct hisi_qp *qp;
struct hpre_asym_request **req_list;
+ struct hpre *hpre;
spinlock_t req_lock;
unsigned int key_sz;
bool crt_g2_mode;
@@ -90,6 +92,7 @@ struct hpre_asym_request {
int err;
int req_id;
hpre_cb cb;
+ struct timespec64 req_time;
};
static DEFINE_MUTEX(hpre_alg_lock);
@@ -119,6 +122,7 @@ static void hpre_free_req_id(struct hpre_ctx *ctx, int req_id)
static int hpre_add_req_to_ctx(struct hpre_asym_request *hpre_req)
{
struct hpre_ctx *ctx;
+ struct hpre_dfx *dfx;
int id;
ctx = hpre_req->ctx;
@@ -129,6 +133,10 @@ static int hpre_add_req_to_ctx(struct hpre_asym_request *hpre_req)
ctx->req_list[id] = hpre_req;
hpre_req->req_id = id;
+ dfx = ctx->hpre->debug.dfx;
+ if (atomic64_read(&dfx[HPRE_OVERTIME_THRHLD].value))
+ ktime_get_ts64(&hpre_req->req_time);
+
return id;
}
@@ -308,12 +316,16 @@ static int hpre_alg_res_post_hf(struct hpre_ctx *ctx, struct hpre_sqe *sqe,
static int hpre_ctx_set(struct hpre_ctx *ctx, struct hisi_qp *qp, int qlen)
{
+ struct hpre *hpre;
+
if (!ctx || !qp || qlen < 0)
return -EINVAL;
spin_lock_init(&ctx->req_lock);
ctx->qp = qp;
+ hpre = container_of(ctx->qp->qm, struct hpre, qm);
+ ctx->hpre = hpre;
ctx->req_list = kcalloc(qlen, sizeof(void *), GFP_KERNEL);
if (!ctx->req_list)
return -ENOMEM;
@@ -336,30 +348,67 @@ static void hpre_ctx_clear(struct hpre_ctx *ctx, bool is_clear_all)
ctx->key_sz = 0;
}
+static bool hpre_is_bd_timeout(struct hpre_asym_request *req,
+ u64 overtime_thrhld)
+{
+ struct timespec64 reply_time;
+ u64 time_use_us;
+
+#define HPRE_DFX_SEC_TO_US 1000000
+#define HPRE_DFX_US_TO_NS 1000
+
+ ktime_get_ts64(&reply_time);
+ time_use_us = (reply_time.tv_sec - req->req_time.tv_sec) *
+ HPRE_DFX_SEC_TO_US +
+ (reply_time.tv_nsec - req->req_time.tv_nsec) /
+ HPRE_DFX_US_TO_NS;
+
+ if (time_use_us <= overtime_thrhld)
+ return false;
+
+ return true;
+}
+
static void hpre_dh_cb(struct hpre_ctx *ctx, void *resp)
{
+ struct hpre_dfx *dfx = ctx->hpre->debug.dfx;
struct hpre_asym_request *req;
struct kpp_request *areq;
+ u64 overtime_thrhld;
int ret;
ret = hpre_alg_res_post_hf(ctx, resp, (void **)&req);
areq = req->areq.dh;
areq->dst_len = ctx->key_sz;
+
+ overtime_thrhld = atomic64_read(&dfx[HPRE_OVERTIME_THRHLD].value);
+ if (overtime_thrhld && hpre_is_bd_timeout(req, overtime_thrhld))
+ atomic64_inc(&dfx[HPRE_OVER_THRHLD_CNT].value);
+
hpre_hw_data_clr_all(ctx, req, areq->dst, areq->src);
kpp_request_complete(areq, ret);
+ atomic64_inc(&dfx[HPRE_RECV_CNT].value);
}
static void hpre_rsa_cb(struct hpre_ctx *ctx, void *resp)
{
+ struct hpre_dfx *dfx = ctx->hpre->debug.dfx;
struct hpre_asym_request *req;
struct akcipher_request *areq;
+ u64 overtime_thrhld;
int ret;
ret = hpre_alg_res_post_hf(ctx, resp, (void **)&req);
+
+ overtime_thrhld = atomic64_read(&dfx[HPRE_OVERTIME_THRHLD].value);
+ if (overtime_thrhld && hpre_is_bd_timeout(req, overtime_thrhld))
+ atomic64_inc(&dfx[HPRE_OVER_THRHLD_CNT].value);
+
areq = req->areq.rsa;
areq->dst_len = ctx->key_sz;
hpre_hw_data_clr_all(ctx, req, areq->dst, areq->src);
akcipher_request_complete(areq, ret);
+ atomic64_inc(&dfx[HPRE_RECV_CNT].value);
}
static void hpre_alg_cb(struct hisi_qp *qp, void *resp)
@@ -435,6 +484,29 @@ static int hpre_msg_request_set(struct hpre_ctx *ctx, void *req, bool is_rsa)
return 0;
}
+static int hpre_send(struct hpre_ctx *ctx, struct hpre_sqe *msg)
+{
+ struct hpre_dfx *dfx = ctx->hpre->debug.dfx;
+ int ctr = 0;
+ int ret;
+
+ do {
+ atomic64_inc(&dfx[HPRE_SEND_CNT].value);
+ ret = hisi_qp_send(ctx->qp, msg);
+ if (ret != -EBUSY)
+ break;
+ atomic64_inc(&dfx[HPRE_SEND_BUSY_CNT].value);
+ } while (ctr++ < HPRE_TRY_SEND_TIMES);
+
+ if (likely(!ret))
+ return ret;
+
+ if (ret != -EBUSY)
+ atomic64_inc(&dfx[HPRE_SEND_FAIL_CNT].value);
+
+ return ret;
+}
+
#ifdef CONFIG_CRYPTO_DH
static int hpre_dh_compute_value(struct kpp_request *req)
{
@@ -443,7 +515,6 @@ static int hpre_dh_compute_value(struct kpp_request *req)
void *tmp = kpp_request_ctx(req);
struct hpre_asym_request *hpre_req = PTR_ALIGN(tmp, HPRE_ALIGN_SZ);
struct hpre_sqe *msg = &hpre_req->req;
- int ctr = 0;
int ret;
ret = hpre_msg_request_set(ctx, req, false);
@@ -464,11 +535,9 @@ static int hpre_dh_compute_value(struct kpp_request *req)
msg->dw0 = cpu_to_le32(le32_to_cpu(msg->dw0) | HPRE_ALG_DH_G2);
else
msg->dw0 = cpu_to_le32(le32_to_cpu(msg->dw0) | HPRE_ALG_DH);
- do {
- ret = hisi_qp_send(ctx->qp, msg);
- } while (ret == -EBUSY && ctr++ < HPRE_TRY_SEND_TIMES);
/* success */
+ ret = hpre_send(ctx, msg);
if (likely(!ret))
return -EINPROGRESS;
@@ -646,7 +715,6 @@ static int hpre_rsa_enc(struct akcipher_request *req)
void *tmp = akcipher_request_ctx(req);
struct hpre_asym_request *hpre_req = PTR_ALIGN(tmp, HPRE_ALIGN_SZ);
struct hpre_sqe *msg = &hpre_req->req;
- int ctr = 0;
int ret;
/* For 512 and 1536 bits key size, use soft tfm instead */
@@ -676,11 +744,8 @@ static int hpre_rsa_enc(struct akcipher_request *req)
if (unlikely(ret))
goto clear_all;
- do {
- ret = hisi_qp_send(ctx->qp, msg);
- } while (ret == -EBUSY && ctr++ < HPRE_TRY_SEND_TIMES);
-
/* success */
+ ret = hpre_send(ctx, msg);
if (likely(!ret))
return -EINPROGRESS;
@@ -698,7 +763,6 @@ static int hpre_rsa_dec(struct akcipher_request *req)
void *tmp = akcipher_request_ctx(req);
struct hpre_asym_request *hpre_req = PTR_ALIGN(tmp, HPRE_ALIGN_SZ);
struct hpre_sqe *msg = &hpre_req->req;
- int ctr = 0;
int ret;
/* For 512 and 1536 bits key size, use soft tfm instead */
@@ -735,11 +799,8 @@ static int hpre_rsa_dec(struct akcipher_request *req)
if (unlikely(ret))
goto clear_all;
- do {
- ret = hisi_qp_send(ctx->qp, msg);
- } while (ret == -EBUSY && ctr++ < HPRE_TRY_SEND_TIMES);
-
/* success */
+ ret = hpre_send(ctx, msg);
if (likely(!ret))
return -EINPROGRESS;
diff --git a/drivers/crypto/hisilicon/hpre/hpre_main.c b/drivers/crypto/hisilicon/hpre/hpre_main.c
index f727158..2ede8d78 100644
--- a/drivers/crypto/hisilicon/hpre/hpre_main.c
+++ b/drivers/crypto/hisilicon/hpre/hpre_main.c
@@ -169,6 +169,15 @@ struct hpre_hw_error {
{"INT_STATUS ", HPRE_INT_STATUS},
};
+static const char *hpre_dfx_files[HPRE_DFX_FILE_NUM] = {
+ "send_cnt",
+ "recv_cnt",
+ "send_fail_cnt",
+ "send_busy_cnt",
+ "over_thrhld_cnt",
+ "overtime_thrhld"
+};
+
#ifdef CONFIG_CRYPTO_QM_UACCE
static int uacce_mode_set(const char *val, const struct kernel_param *kp)
{
@@ -588,6 +597,33 @@ static ssize_t hpre_ctrl_debug_write(struct file *filp, const char __user *buf,
.write = hpre_ctrl_debug_write,
};
+static int hpre_debugfs_atomic64_get(void *data, u64 *val)
+{
+ struct hpre_dfx *dfx_item = data;
+
+ *val = atomic64_read(&dfx_item->value);
+ return 0;
+}
+
+static int hpre_debugfs_atomic64_set(void *data, u64 val)
+{
+ struct hpre_dfx *dfx_item = data;
+
+ if (dfx_item->type == HPRE_OVERTIME_THRHLD) {
+ struct hpre_dfx *hpre_dfx = dfx_item - HPRE_OVERTIME_THRHLD;
+
+ atomic64_set(&hpre_dfx[HPRE_OVER_THRHLD_CNT].value, 0);
+ } else if (val) {
+ return -EINVAL;
+ }
+
+ atomic64_set(&dfx_item->value, val);
+ return 0;
+}
+
+DEFINE_DEBUGFS_ATTRIBUTE(hpre_atomic64_ops, hpre_debugfs_atomic64_get,
+ hpre_debugfs_atomic64_set, "%llu\n");
+
static int hpre_create_debugfs_file(struct hpre_debug *dbg, struct dentry *dir,
enum hpre_ctrl_dbgfs_file type, int indx)
{
@@ -691,6 +727,22 @@ static int hpre_ctrl_debug_init(struct hpre_debug *debug)
return hpre_cluster_debugfs_init(debug);
}
+static void hpre_dfx_debug_init(struct hpre_debug *debug)
+{
+ struct hpre *hpre = container_of(debug, struct hpre, debug);
+ struct hpre_dfx *dfx = hpre->debug.dfx;
+ struct hisi_qm *qm = &hpre->qm;
+ struct dentry *parent;
+ int i;
+
+ parent = debugfs_create_dir("hpre_dfx", qm->debug.debug_root);
+ for (i = 0; i < HPRE_DFX_FILE_NUM; i++) {
+ dfx[i].type = i;
+ debugfs_create_file(hpre_dfx_files[i], 0644, parent, &dfx[i],
+ &hpre_atomic64_ops);
+ }
+}
+
static int hpre_debugfs_init(struct hisi_qm *qm)
{
struct hpre *hpre = container_of(qm, struct hpre, qm);
@@ -709,6 +761,9 @@ static int hpre_debugfs_init(struct hisi_qm *qm)
if (ret)
goto failed_to_create;
}
+
+ hpre_dfx_debug_init(&hpre->debug);
+
return 0;
failed_to_create:
--
1.8.3
1
8