Kernel
Threads by month
- ----- 2025 -----
- February
- January
- ----- 2024 -----
- December
- November
- October
- September
- August
- July
- June
- May
- April
- March
- February
- January
- ----- 2023 -----
- December
- November
- October
- September
- August
- July
- June
- May
- April
- March
- February
- January
- ----- 2022 -----
- December
- November
- October
- September
- August
- July
- June
- May
- April
- March
- February
- January
- ----- 2021 -----
- December
- November
- October
- September
- August
- July
- June
- May
- April
- March
- February
- January
- ----- 2020 -----
- December
- November
- October
- September
- August
- July
- June
- May
- April
- March
- February
- January
- ----- 2019 -----
- December
- 54 participants
- 16903 discussions

[openeuler:OLK-5.10 2656/2656] kernel/rcu/update.o: warning: objtool: synchronize_rcu_tasks_trace()+0xde: unreachable instruction
by kernel test robot 07 Jan '25
by kernel test robot 07 Jan '25
07 Jan '25
tree: https://gitee.com/openeuler/kernel.git OLK-5.10
head: 9be6ae394c896999214be0f875765d2c58d15b13
commit: 2509f2079e7247ed0d695bdb69c3d019a700a4b8 [2656/2656] rcu-tasks: Make rude RCU-Tasks work well with CPU hotplug
config: x86_64-buildonly-randconfig-006-20250107 (https://download.01.org/0day-ci/archive/20250107/202501071757.EREnWTHH-lkp@…)
compiler: clang version 19.1.3 (https://github.com/llvm/llvm-project ab51eccf88f5321e7c60591c5546b254b6afab99)
reproduce (this is a W=1 build): (https://download.01.org/0day-ci/archive/20250107/202501071757.EREnWTHH-lkp@…)
If you fix the issue in a separate patch/commit (i.e. not just a new version of
the same patch/commit), kindly add following tags
| Reported-by: kernel test robot <lkp(a)intel.com>
| Closes: https://lore.kernel.org/oe-kbuild-all/202501071757.EREnWTHH-lkp@intel.com/
All warnings (new ones prefixed by >>):
>> kernel/rcu/update.o: warning: objtool: synchronize_rcu_tasks_trace()+0xde: unreachable instruction
--
0-DAY CI Kernel Test Service
https://github.com/intel/lkp-tests/wiki
1
0
CVE-2024-56642
Eric Dumazet (1):
tipc: fix NULL deref in cleanup_bearer()
Kuniyuki Iwashima (1):
tipc: Fix use-after-free of kernel socket in cleanup_bearer().
Xin Long (1):
tipc: remove ub->ubsock checks
net/tipc/udp_media.c | 19 +++++++++++--------
1 file changed, 11 insertions(+), 8 deletions(-)
--
2.34.1
2
4
From: Zijian Zhang <zijianzhang(a)bytedance.com>
stable inclusion
from stable-v6.6.64
commit 98c7ea7d11f2588e8197db042e0291e4ac8f8346
category: bugfix
bugzilla: https://gitee.com/src-openeuler/kernel/issues/IBEGFC
CVE: CVE-2024-56720
Reference: https://git.kernel.org/pub/scm/linux/kernel/git/stable/linux.git/commit/?id…
-------------------------------------------------
[ Upstream commit 5d609ba262475db450ba69b8e8a557bd768ac07a ]
Several fixes to bpf_msg_pop_data,
1. In sk_msg_shift_left, we should put_page
2. if (len == 0), return early is better
3. pop the entire sk_msg (last == msg->sg.size) should be supported
4. Fix for the value of variable "a"
5. In sk_msg_shift_left, after shifting, i has already pointed to the next
element. Addtional sk_msg_iter_var_next may result in BUG.
Fixes: 7246d8ed4dcc ("bpf: helper to pop data from messages")
Signed-off-by: Zijian Zhang <zijianzhang(a)bytedance.com>
Reviewed-by: John Fastabend <john.fastabend(a)gmail.com>
Link: https://lore.kernel.org/r/20241106222520.527076-8-zijianzhang@bytedance.com
Signed-off-by: Martin KaFai Lau <martin.lau(a)kernel.org>
Signed-off-by: Sasha Levin <sashal(a)kernel.org>
Signed-off-by: Liu Jian <liujian56(a)huawei.com>
---
net/core/filter.c | 15 +++++++++------
1 file changed, 9 insertions(+), 6 deletions(-)
diff --git a/net/core/filter.c b/net/core/filter.c
index 48dd2896ee1d9..131a21b5463cc 100644
--- a/net/core/filter.c
+++ b/net/core/filter.c
@@ -2889,8 +2889,10 @@ static const struct bpf_func_proto bpf_msg_push_data_proto = {
static void sk_msg_shift_left(struct sk_msg *msg, int i)
{
+ struct scatterlist *sge = sk_msg_elem(msg, i);
int prev;
+ put_page(sg_page(sge));
do {
prev = i;
sk_msg_iter_var_next(i);
@@ -2927,6 +2929,9 @@ BPF_CALL_4(bpf_msg_pop_data, struct sk_msg *, msg, u32, start,
if (unlikely(flags))
return -EINVAL;
+ if (unlikely(len == 0))
+ return 0;
+
/* First find the starting scatterlist element */
i = msg->sg.start;
do {
@@ -2939,7 +2944,7 @@ BPF_CALL_4(bpf_msg_pop_data, struct sk_msg *, msg, u32, start,
} while (i != msg->sg.end);
/* Bounds checks: start and pop must be inside message */
- if (start >= offset + l || last >= msg->sg.size)
+ if (start >= offset + l || last > msg->sg.size)
return -EINVAL;
space = MAX_MSG_FRAGS - sk_msg_elem_used(msg);
@@ -2968,12 +2973,12 @@ BPF_CALL_4(bpf_msg_pop_data, struct sk_msg *, msg, u32, start,
*/
if (start != offset) {
struct scatterlist *nsge, *sge = sk_msg_elem(msg, i);
- int a = start;
+ int a = start - offset;
int b = sge->length - pop - a;
sk_msg_iter_var_next(i);
- if (pop < sge->length - a) {
+ if (b > 0) {
if (space) {
sge->length = a;
sk_msg_shift_right(msg, i);
@@ -2992,7 +2997,6 @@ BPF_CALL_4(bpf_msg_pop_data, struct sk_msg *, msg, u32, start,
if (unlikely(!page))
return -ENOMEM;
- sge->length = a;
orig = sg_page(sge);
from = sg_virt(sge);
to = page_address(page);
@@ -3002,7 +3006,7 @@ BPF_CALL_4(bpf_msg_pop_data, struct sk_msg *, msg, u32, start,
put_page(orig);
}
pop = 0;
- } else if (pop >= sge->length - a) {
+ } else {
pop -= (sge->length - a);
sge->length = a;
}
@@ -3036,7 +3040,6 @@ BPF_CALL_4(bpf_msg_pop_data, struct sk_msg *, msg, u32, start,
pop -= sge->length;
sk_msg_shift_left(msg, i);
}
- sk_msg_iter_var_next(i);
}
sk_mem_uncharge(msg->sk, len - pop);
--
2.34.1
2
5

07 Jan '25
From: Zijian Zhang <zijianzhang(a)bytedance.com>
stable inclusion
from stable-v5.10.231
commit dbedc7e142df5ea238a46fdd7462c1c42cd36a10
category: bugfix
bugzilla: https://gitee.com/src-openeuler/kernel/issues/IBEANQ
CVE: CVE-2024-56633
Reference: https://git.kernel.org/pub/scm/linux/kernel/git/stable/linux.git/commit/?id…
-------------------------------------------------
[ Upstream commit ca70b8baf2bd125b2a4d96e76db79375c07d7ff2 ]
The current sk memory accounting logic in __SK_REDIRECT is pre-uncharging
tosend bytes, which is either msg->sg.size or a smaller value apply_bytes.
Potential problems with this strategy are as follows:
- If the actual sent bytes are smaller than tosend, we need to charge some
bytes back, as in line 487, which is okay but seems not clean.
- When tosend is set to apply_bytes, as in line 417, and (ret < 0), we may
miss uncharging (msg->sg.size - apply_bytes) bytes.
[...]
415 tosend = msg->sg.size;
416 if (psock->apply_bytes && psock->apply_bytes < tosend)
417 tosend = psock->apply_bytes;
[...]
443 sk_msg_return(sk, msg, tosend);
444 release_sock(sk);
446 origsize = msg->sg.size;
447 ret = tcp_bpf_sendmsg_redir(sk_redir, redir_ingress,
448 msg, tosend, flags);
449 sent = origsize - msg->sg.size;
[...]
454 lock_sock(sk);
455 if (unlikely(ret < 0)) {
456 int free = sk_msg_free_nocharge(sk, msg);
458 if (!cork)
459 *copied -= free;
460 }
[...]
487 if (eval == __SK_REDIRECT)
488 sk_mem_charge(sk, tosend - sent);
[...]
When running the selftest test_txmsg_redir_wait_sndmem with txmsg_apply,
the following warning will be reported:
------------[ cut here ]------------
WARNING: CPU: 6 PID: 57 at net/ipv4/af_inet.c:156 inet_sock_destruct+0x190/0x1a0
Modules linked in:
CPU: 6 UID: 0 PID: 57 Comm: kworker/6:0 Not tainted 6.12.0-rc1.bm.1-amd64+ #43
Hardware name: QEMU Standard PC (i440FX + PIIX, 1996), BIOS 1.12.0-1 04/01/2014
Workqueue: events sk_psock_destroy
RIP: 0010:inet_sock_destruct+0x190/0x1a0
RSP: 0018:ffffad0a8021fe08 EFLAGS: 00010206
RAX: 0000000000000011 RBX: ffff9aab4475b900 RCX: ffff9aab481a0800
RDX: 0000000000000303 RSI: 0000000000000011 RDI: ffff9aab4475b900
RBP: ffff9aab4475b990 R08: 0000000000000000 R09: ffff9aab40050ec0
R10: 0000000000000000 R11: ffff9aae6fdb1d01 R12: ffff9aab49c60400
R13: ffff9aab49c60598 R14: ffff9aab49c60598 R15: dead000000000100
FS: 0000000000000000(0000) GS:ffff9aae6fd80000(0000) knlGS:0000000000000000
CS: 0010 DS: 0000 ES: 0000 CR0: 0000000080050033
CR2: 00007ffec7e47bd8 CR3: 00000001a1a1c004 CR4: 0000000000770ef0
DR0: 0000000000000000 DR1: 0000000000000000 DR2: 0000000000000000
DR3: 0000000000000000 DR6: 00000000fffe0ff0 DR7: 0000000000000400
PKRU: 55555554
Call Trace:
<TASK>
? __warn+0x89/0x130
? inet_sock_destruct+0x190/0x1a0
? report_bug+0xfc/0x1e0
? handle_bug+0x5c/0xa0
? exc_invalid_op+0x17/0x70
? asm_exc_invalid_op+0x1a/0x20
? inet_sock_destruct+0x190/0x1a0
__sk_destruct+0x25/0x220
sk_psock_destroy+0x2b2/0x310
process_scheduled_works+0xa3/0x3e0
worker_thread+0x117/0x240
? __pfx_worker_thread+0x10/0x10
kthread+0xcf/0x100
? __pfx_kthread+0x10/0x10
ret_from_fork+0x31/0x40
? __pfx_kthread+0x10/0x10
ret_from_fork_asm+0x1a/0x30
</TASK>
---[ end trace 0000000000000000 ]---
In __SK_REDIRECT, a more concise way is delaying the uncharging after sent
bytes are finalized, and uncharge this value. When (ret < 0), we shall
invoke sk_msg_free.
Same thing happens in case __SK_DROP, when tosend is set to apply_bytes,
we may miss uncharging (msg->sg.size - apply_bytes) bytes. The same
warning will be reported in selftest.
[...]
468 case __SK_DROP:
469 default:
470 sk_msg_free_partial(sk, msg, tosend);
471 sk_msg_apply_bytes(psock, tosend);
472 *copied -= (tosend + delta);
473 return -EACCES;
[...]
So instead of sk_msg_free_partial we can do sk_msg_free here.
Fixes: 604326b41a6f ("bpf, sockmap: convert to generic sk_msg interface")
Fixes: 8ec95b94716a ("bpf, sockmap: Fix the sk->sk_forward_alloc warning of sk_stream_kill_queues")
Signed-off-by: Zijian Zhang <zijianzhang(a)bytedance.com>
Signed-off-by: Daniel Borkmann <daniel(a)iogearbox.net>
Acked-by: John Fastabend <john.fastabend(a)gmail.com>
Link: https://lore.kernel.org/bpf/20241016234838.3167769-3-zijianzhang@bytedance.…
Signed-off-by: Sasha Levin <sashal(a)kernel.org>
Signed-off-by: Liu Jian <liujian56(a)huawei.com>
---
net/ipv4/tcp_bpf.c | 11 ++++-------
1 file changed, 4 insertions(+), 7 deletions(-)
diff --git a/net/ipv4/tcp_bpf.c b/net/ipv4/tcp_bpf.c
index 8645017da9df9..384121426ea28 100644
--- a/net/ipv4/tcp_bpf.c
+++ b/net/ipv4/tcp_bpf.c
@@ -447,7 +447,6 @@ static int tcp_bpf_send_verdict(struct sock *sk, struct sk_psock *psock,
cork = true;
psock->cork = NULL;
}
- sk_msg_return(sk, msg, tosend);
release_sock(sk);
origsize = msg->sg.size;
@@ -459,8 +458,9 @@ static int tcp_bpf_send_verdict(struct sock *sk, struct sk_psock *psock,
sock_put(sk_redir);
lock_sock(sk);
+ sk_mem_uncharge(sk, sent);
if (unlikely(ret < 0)) {
- int free = sk_msg_free_nocharge(sk, msg);
+ int free = sk_msg_free(sk, msg);
if (!cork)
*copied -= free;
@@ -474,7 +474,7 @@ static int tcp_bpf_send_verdict(struct sock *sk, struct sk_psock *psock,
break;
case __SK_DROP:
default:
- sk_msg_free_partial(sk, msg, tosend);
+ sk_msg_free(sk, msg);
sk_msg_apply_bytes(psock, tosend);
*copied -= (tosend + delta);
return -EACCES;
@@ -490,11 +490,8 @@ static int tcp_bpf_send_verdict(struct sock *sk, struct sk_psock *psock,
}
if (msg &&
msg->sg.data[msg->sg.start].page_link &&
- msg->sg.data[msg->sg.start].length) {
- if (eval == __SK_REDIRECT)
- sk_mem_charge(sk, tosend - sent);
+ msg->sg.data[msg->sg.start].length)
goto more_data;
- }
}
return ret;
}
--
2.34.1
2
1

07 Jan '25
From: Suraj Kandpal <suraj.kandpal(a)intel.com>
mainline inclusion
from mainline-v6.12-rc1
commit d34f4f058edf1235c103ca9c921dc54820d14d40
category: bugfix
bugzilla: https://gitee.com/src-openeuler/kernel/issues/IB5KQZ
CVE: CVE-2024-53050
Reference: https://git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git/commit/?…
--------------------------------
Add encoder check in intel_hdcp2_get_capability to avoid
null pointer error.
Signed-off-by: Suraj Kandpal <suraj.kandpal(a)intel.com>
Reviewed-by: Dnyaneshwar Bhadane <dnyaneshwar.bhadane(a)intel.com>
Link: https://patchwork.freedesktop.org/patch/msgid/20240722064451.3610512-3-sura…
Conflicts:
drivers/gpu/drm/i915/display/intel_hdcp.c
drivers/gpu/drm/i915/display/intel_dp_hdcp.c
[ Context conflict:
130849f8ec14("drm/i915/hdcp: Use intel_connector as argument for hdcp_2_2_capable")
a35239a89259("drm/i915/hdcp: Use correct aux for capability check scenario")
287c0de8b294("drm/i915/hdcp: Move to direct reads for HDCP")
55d9b2b8e440("drm/i915/hdcp: Refactor intel_dp_hdcp2_capable")
8e754d9e9e8a("drm/i915/hdcp: Rename hdcp capable functions")
the above patches are not merge.
]
Signed-off-by: He Yujie <coka.heyujie(a)huawei.com>
---
drivers/gpu/drm/i915/display/intel_hdcp.c | 7 ++++++-
1 file changed, 6 insertions(+), 1 deletion(-)
diff --git a/drivers/gpu/drm/i915/display/intel_hdcp.c b/drivers/gpu/drm/i915/display/intel_hdcp.c
index cb99839afcd0..762170e6cbbb 100644
--- a/drivers/gpu/drm/i915/display/intel_hdcp.c
+++ b/drivers/gpu/drm/i915/display/intel_hdcp.c
@@ -163,7 +163,7 @@ bool intel_hdcp_capable(struct intel_connector *connector)
/* Is HDCP2.2 capable on Platform and Sink */
bool intel_hdcp2_capable(struct intel_connector *connector)
{
- struct intel_digital_port *dig_port = intel_attached_dig_port(connector);
+ struct intel_digital_port *dig_port;
struct drm_i915_private *i915 = to_i915(connector->base.dev);
struct intel_hdcp *hdcp = &connector->hdcp;
bool capable = false;
@@ -192,6 +192,11 @@ bool intel_hdcp2_capable(struct intel_connector *connector)
}
mutex_unlock(&i915->display.hdcp.hdcp_mutex);
+ if (!intel_attached_encoder(connector))
+ return false;
+
+ dig_port = intel_attached_dig_port(connector);
+
/* Sink's capability for HDCP2.2 */
hdcp->shim->hdcp_2_2_capable(dig_port, &capable);
--
2.34.1
2
1

07 Jan '25
From: Ryusuke Konishi <konishi.ryusuke(a)gmail.com>
stable inclusion
from stable-v6.6.63
commit 59b49ca67cca7b007a5afd3de0283c8008157665
category: bugfix
bugzilla: https://gitee.com/openeuler/kernel/issues/IBA6RL
CVE: CVE-2024-53131
Reference: https://git.kernel.org/pub/scm/linux/kernel/git/stable/linux.git/commit/?id…
--------------------------------
commit cd45e963e44b0f10d90b9e6c0e8b4f47f3c92471 upstream.
Patch series "nilfs2: fix null-ptr-deref bugs on block tracepoints".
This series fixes null pointer dereference bugs that occur when using
nilfs2 and two block-related tracepoints.
This patch (of 2):
It has been reported that when using "block:block_touch_buffer"
tracepoint, touch_buffer() called from __nilfs_get_folio_block() causes a
NULL pointer dereference, or a general protection fault when KASAN is
enabled.
This happens because since the tracepoint was added in touch_buffer(), it
references the dev_t member bh->b_bdev->bd_dev regardless of whether the
buffer head has a pointer to a block_device structure. In the current
implementation, the block_device structure is set after the function
returns to the caller.
Here, touch_buffer() is used to mark the folio/page that owns the buffer
head as accessed, but the common search helper for folio/page used by the
caller function was optimized to mark the folio/page as accessed when it
was reimplemented a long time ago, eliminating the need to call
touch_buffer() here in the first place.
So this solves the issue by eliminating the touch_buffer() call itself.
Link: https://lkml.kernel.org/r/20241106160811.3316-1-konishi.ryusuke@gmail.com
Link: https://lkml.kernel.org/r/20241106160811.3316-2-konishi.ryusuke@gmail.com
Fixes: 5305cb830834 ("block: add block_{touch|dirty}_buffer tracepoint")
Signed-off-by: Ryusuke Konishi <konishi.ryusuke(a)gmail.com>
Reported-by: Ubisectech Sirius <bugreport(a)valiantsec.com>
Closes: https://lkml.kernel.org/r/86bd3013-887e-4e38-960f-ca45c657f032.bugreport@va…
Reported-by: syzbot+9982fb8d18eba905abe2(a)syzkaller.appspotmail.com
Closes: https://syzkaller.appspot.com/bug?extid=9982fb8d18eba905abe2
Tested-by: syzbot+9982fb8d18eba905abe2(a)syzkaller.appspotmail.com
Cc: Tejun Heo <tj(a)kernel.org>
Cc: <stable(a)vger.kernel.org>
Signed-off-by: Andrew Morton <akpm(a)linux-foundation.org>
Signed-off-by: Greg Kroah-Hartman <gregkh(a)linuxfoundation.org>
Signed-off-by: He Yujie <coka.heyujie(a)huawei.com>
---
fs/nilfs2/page.c | 1 -
1 file changed, 1 deletion(-)
diff --git a/fs/nilfs2/page.c b/fs/nilfs2/page.c
index 9bced0a5c253..144e200c4909 100644
--- a/fs/nilfs2/page.c
+++ b/fs/nilfs2/page.c
@@ -39,7 +39,6 @@ __nilfs_get_page_block(struct page *page, unsigned long block, pgoff_t index,
first_block = (unsigned long)index << (PAGE_SHIFT - blkbits);
bh = nilfs_page_get_nth_block(page, block - first_block);
- touch_buffer(bh);
wait_on_buffer(bh);
return bh;
}
--
2.34.1
2
1

[PATCH OLK-6.6] octeontx2-pf: handle otx2_mbox_get_rsp errors in otx2_ethtool.c
by He Yujie 07 Jan '25
by He Yujie 07 Jan '25
07 Jan '25
From: Dipendra Khadka <kdipendra88(a)gmail.com>
stable inclusion
from stable-v6.6.64
commit c0f64fd73b60aee85f88c270c9d714ead27a7b7a
category: bugfix
bugzilla: https://gitee.com/openeuler/kernel/issues/IBA6RL
CVE: CVE-2024-56728
Reference: https://git.kernel.org/pub/scm/linux/kernel/git/stable/linux.git/commit/?id…
--------------------------------
[ Upstream commit e26f8eac6bb20b20fdb8f7dc695711ebce4c7c5c ]
Add error pointer check after calling otx2_mbox_get_rsp().
Fixes: 75f36270990c ("octeontx2-pf: Support to enable/disable pause frames via ethtool")
Fixes: d0cf9503e908 ("octeontx2-pf: ethtool fec mode support")
Signed-off-by: Dipendra Khadka <kdipendra88(a)gmail.com>
Reviewed-by: Simon Horman <horms(a)kernel.org>
Signed-off-by: Andrew Lunn <andrew(a)lunn.ch>
Signed-off-by: Sasha Levin <sashal(a)kernel.org>
Signed-off-by: He Yujie <coka.heyujie(a)huawei.com>
---
.../net/ethernet/marvell/octeontx2/nic/otx2_ethtool.c | 10 ++++++++++
1 file changed, 10 insertions(+)
diff --git a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_ethtool.c b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_ethtool.c
index 8b7fc0af91ce..532e84bc38c7 100644
--- a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_ethtool.c
+++ b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_ethtool.c
@@ -343,6 +343,11 @@ static void otx2_get_pauseparam(struct net_device *netdev,
if (!otx2_sync_mbox_msg(&pfvf->mbox)) {
rsp = (struct cgx_pause_frm_cfg *)
otx2_mbox_get_rsp(&pfvf->mbox.mbox, 0, &req->hdr);
+ if (IS_ERR(rsp)) {
+ mutex_unlock(&pfvf->mbox.lock);
+ return;
+ }
+
pause->rx_pause = rsp->rx_pause;
pause->tx_pause = rsp->tx_pause;
}
@@ -1082,6 +1087,11 @@ static int otx2_set_fecparam(struct net_device *netdev,
rsp = (struct fec_mode *)otx2_mbox_get_rsp(&pfvf->mbox.mbox,
0, &req->hdr);
+ if (IS_ERR(rsp)) {
+ err = PTR_ERR(rsp);
+ goto end;
+ }
+
if (rsp->fec >= 0)
pfvf->linfo.fec = rsp->fec;
else
--
2.34.1
2
1

07 Jan '25
From: Hubert Wiśniewski <hubert.wisniewski.25632(a)gmail.com>
stable inclusion
from stable-v6.6.64
commit 5906ee3693674d734177df13a519a21bb03f730d
category: bugfix
bugzilla: https://gitee.com/openeuler/kernel/issues/IBA6RL
CVE: CVE-2024-56687
Reference: https://git.kernel.org/pub/scm/linux/kernel/git/stable/linux.git/commit/?id…
--------------------------------
commit 3fc137386c4620305bbc2a216868c53f9245670a upstream.
There is a possibility that a request's callback could be invoked from
usb_ep_queue() (call trace below, supplemented with missing calls):
req->complete from usb_gadget_giveback_request
(drivers/usb/gadget/udc/core.c:999)
usb_gadget_giveback_request from musb_g_giveback
(drivers/usb/musb/musb_gadget.c:147)
musb_g_giveback from rxstate
(drivers/usb/musb/musb_gadget.c:784)
rxstate from musb_ep_restart
(drivers/usb/musb/musb_gadget.c:1169)
musb_ep_restart from musb_ep_restart_resume_work
(drivers/usb/musb/musb_gadget.c:1176)
musb_ep_restart_resume_work from musb_queue_resume_work
(drivers/usb/musb/musb_core.c:2279)
musb_queue_resume_work from musb_gadget_queue
(drivers/usb/musb/musb_gadget.c:1241)
musb_gadget_queue from usb_ep_queue
(drivers/usb/gadget/udc/core.c:300)
According to the docstring of usb_ep_queue(), this should not happen:
"Note that @req's ->complete() callback must never be called from within
usb_ep_queue() as that can create deadlock situations."
In fact, a hardware lockup might occur in the following sequence:
1. The gadget is initialized using musb_gadget_enable().
2. Meanwhile, a packet arrives, and the RXPKTRDY flag is set, raising an
interrupt.
3. If IRQs are enabled, the interrupt is handled, but musb_g_rx() finds an
empty queue (next_request() returns NULL). The interrupt flag has
already been cleared by the glue layer handler, but the RXPKTRDY flag
remains set.
4. The first request is enqueued using usb_ep_queue(), leading to the call
of req->complete(), as shown in the call trace above.
5. If the callback enables IRQs and another packet is waiting, step (3)
repeats. The request queue is empty because usb_g_giveback() removes the
request before invoking the callback.
6. The endpoint remains locked up, as the interrupt triggered by hardware
setting the RXPKTRDY flag has been handled, but the flag itself remains
set.
For this scenario to occur, it is only necessary for IRQs to be enabled at
some point during the complete callback. This happens with the USB Ethernet
gadget, whose rx_complete() callback calls netif_rx(). If called in the
task context, netif_rx() disables the bottom halves (BHs). When the BHs are
re-enabled, IRQs are also enabled to allow soft IRQs to be processed. The
gadget itself is initialized at module load (or at boot if built-in), but
the first request is enqueued when the network interface is brought up,
triggering rx_complete() in the task context via ioctl(). If a packet
arrives while the interface is down, it can prevent the interface from
receiving any further packets from the USB host.
The situation is quite complicated with many parties involved. This
particular issue can be resolved in several possible ways:
1. Ensure that callbacks never enable IRQs. This would be difficult to
enforce, as discovering how netif_rx() interacts with interrupts was
already quite challenging and u_ether is not the only function driver.
Similar "bugs" could be hidden in other drivers as well.
2. Disable MUSB interrupts in musb_g_giveback() before calling the callback
and re-enable them afterwars (by calling musb_{dis,en}able_interrupts(),
for example). This would ensure that MUSB interrupts are not handled
during the callback, even if IRQs are enabled. In fact, it would allow
IRQs to be enabled when releasing the lock. However, this feels like an
inelegant hack.
3. Modify the interrupt handler to clear the RXPKTRDY flag if the request
queue is empty. While this approach also feels like a hack, it wastes
CPU time by attempting to handle incoming packets when the software is
not ready to process them.
4. Flush the Rx FIFO instead of calling rxstate() in musb_ep_restart().
This ensures that the hardware can receive packets when there is at
least one request in the queue. Once IRQs are enabled, the interrupt
handler will be able to correctly process the next incoming packet
(eventually calling rxstate()). This approach may cause one or two
packets to be dropped (two if double buffering is enabled), but this
seems to be a minor issue, as packet loss can occur when the software is
not yet ready to process them. Additionally, this solution makes the
gadget driver compliant with the rule mentioned in the docstring of
usb_ep_queue().
There may be additional solutions, but from these four, the last one has
been chosen as it seems to be the most appropriate, as it addresses the
"bad" behavior of the driver.
Fixes: baebdf48c360 ("net: dev: Makes sure netif_rx() can be invoked in any context.")
Cc: stable(a)vger.kernel.org
Signed-off-by: Hubert Wiśniewski <hubert.wisniewski.25632(a)gmail.com>
Link: https://lore.kernel.org/r/4ee1ead4525f78fb5909a8cbf99513ad0082ad21.camel@gm…
Signed-off-by: Greg Kroah-Hartman <gregkh(a)linuxfoundation.org>
Signed-off-by: He Yujie <coka.heyujie(a)huawei.com>
---
drivers/usb/musb/musb_gadget.c | 13 ++++++++++---
1 file changed, 10 insertions(+), 3 deletions(-)
diff --git a/drivers/usb/musb/musb_gadget.c b/drivers/usb/musb/musb_gadget.c
index 051c6da7cf6d..f175cb2c3e7b 100644
--- a/drivers/usb/musb/musb_gadget.c
+++ b/drivers/usb/musb/musb_gadget.c
@@ -1170,12 +1170,19 @@ struct free_record {
*/
void musb_ep_restart(struct musb *musb, struct musb_request *req)
{
+ u16 csr;
+ void __iomem *epio = req->ep->hw_ep->regs;
+
trace_musb_req_start(req);
musb_ep_select(musb->mregs, req->epnum);
- if (req->tx)
+ if (req->tx) {
txstate(musb, req);
- else
- rxstate(musb, req);
+ } else {
+ csr = musb_readw(epio, MUSB_RXCSR);
+ csr |= MUSB_RXCSR_FLUSHFIFO | MUSB_RXCSR_P_WZC_BITS;
+ musb_writew(epio, MUSB_RXCSR, csr);
+ musb_writew(epio, MUSB_RXCSR, csr);
+ }
}
static int musb_ep_restart_resume_work(struct musb *musb, void *data)
--
2.34.1
2
1

[PATCH OLK-6.6] sched/numa: fix memory leak due to the overwritten vma->numab_state
by He Yujie 07 Jan '25
by He Yujie 07 Jan '25
07 Jan '25
From: Adrian Huang <ahuang12(a)lenovo.com>
mainline inclusion
from mainline-v6.13-rc2
commit 5f1b64e9a9b7ee9cfd32c6b2fab796e29bfed075
category: bugfix
bugzilla: https://gitee.com/src-openeuler/kernel/issues/IBEAPC
CVE: CVE-2024-56613
Reference: https://git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git/commit/?…
--------------------------------
[Problem Description]
When running the hackbench program of LTP, the following memory leak is
reported by kmemleak.
# /opt/ltp/testcases/bin/hackbench 20 thread 1000
Running with 20*40 (== 800) tasks.
# dmesg | grep kmemleak
...
kmemleak: 480 new suspected memory leaks (see /sys/kernel/debug/kmemleak)
kmemleak: 665 new suspected memory leaks (see /sys/kernel/debug/kmemleak)
# cat /sys/kernel/debug/kmemleak
unreferenced object 0xffff888cd8ca2c40 (size 64):
comm "hackbench", pid 17142, jiffies 4299780315
hex dump (first 32 bytes):
ac 74 49 00 01 00 00 00 4c 84 49 00 01 00 00 00 .tI.....L.I.....
00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 ................
backtrace (crc bff18fd4):
[<ffffffff81419a89>] __kmalloc_cache_noprof+0x2f9/0x3f0
[<ffffffff8113f715>] task_numa_work+0x725/0xa00
[<ffffffff8110f878>] task_work_run+0x58/0x90
[<ffffffff81ddd9f8>] syscall_exit_to_user_mode+0x1c8/0x1e0
[<ffffffff81dd78d5>] do_syscall_64+0x85/0x150
[<ffffffff81e0012b>] entry_SYSCALL_64_after_hwframe+0x76/0x7e
...
This issue can be consistently reproduced on three different servers:
* a 448-core server
* a 256-core server
* a 192-core server
[Root Cause]
Since multiple threads are created by the hackbench program (along with
the command argument 'thread'), a shared vma might be accessed by two or
more cores simultaneously. When two or more cores observe that
vma->numab_state is NULL at the same time, vma->numab_state will be
overwritten.
Although current code ensures that only one thread scans the VMAs in a
single 'numa_scan_period', there might be a chance for another thread
to enter in the next 'numa_scan_period' while we have not gotten till
numab_state allocation [1].
Note that the command `/opt/ltp/testcases/bin/hackbench 50 process 1000`
cannot the reproduce the issue. It is verified with 200+ test runs.
[Solution]
Use the cmpxchg atomic operation to ensure that only one thread executes
the vma->numab_state assignment.
[1] https://lore.kernel.org/lkml/1794be3c-358c-4cdc-a43d-a1f841d91ef7@amd.com/
Link: https://lkml.kernel.org/r/20241113102146.2384-1-ahuang12@lenovo.com
Fixes: ef6a22b70f6d ("sched/numa: apply the scan delay to every new vma")
Signed-off-by: Adrian Huang <ahuang12(a)lenovo.com>
Reported-by: Jiwei Sun <sunjw10(a)lenovo.com>
Reviewed-by: Raghavendra K T <raghavendra.kt(a)amd.com>
Reviewed-by: Vlastimil Babka <vbabka(a)suse.cz>
Cc: Ben Segall <bsegall(a)google.com>
Cc: Dietmar Eggemann <dietmar.eggemann(a)arm.com>
Cc: Ingo Molnar <mingo(a)redhat.com>
Cc: Juri Lelli <juri.lelli(a)redhat.com>
Cc: Mel Gorman <mgorman(a)suse.de>
Cc: Peter Zijlstra <peterz(a)infradead.org>
Cc: Steven Rostedt <rostedt(a)goodmis.org>
Cc: Valentin Schneider <vschneid(a)redhat.com>
Cc: Vincent Guittot <vincent.guittot(a)linaro.org>
Cc: <stable(a)vger.kernel.org>
Signed-off-by: Andrew Morton <akpm(a)linux-foundation.org>
Conflicts:
kernel/sched/fair.c
[ Context conflict due to not merge 84db47ca7146 ("sched/numa:
Fix mm numa_scan_seq based unconditional scan") ]
Signed-off-by: He Yujie <coka.heyujie(a)huawei.com>
---
kernel/sched/fair.c | 12 +++++++++---
1 file changed, 9 insertions(+), 3 deletions(-)
diff --git a/kernel/sched/fair.c b/kernel/sched/fair.c
index be1d35549144..7df2b07629c0 100644
--- a/kernel/sched/fair.c
+++ b/kernel/sched/fair.c
@@ -3452,10 +3452,16 @@ static void task_numa_work(struct callback_head *work)
/* Initialise new per-VMA NUMAB state. */
if (!vma->numab_state) {
- vma->numab_state = kzalloc(sizeof(struct vma_numab_state),
- GFP_KERNEL);
- if (!vma->numab_state)
+ struct vma_numab_state *ptr;
+
+ ptr = kzalloc(sizeof(*ptr), GFP_KERNEL);
+ if (!ptr)
+ continue;
+
+ if (cmpxchg(&vma->numab_state, NULL, ptr)) {
+ kfree(ptr);
continue;
+ }
vma->numab_state->next_scan = now +
msecs_to_jiffies(sysctl_numa_balancing_scan_delay);
--
2.34.1
2
1

07 Jan '25
driver inclusion
category: feature
bugzilla: https://gitee.com/openeuler/kernel/issues/IBCO6Q
CVE: NA
---------------------------------
The BIFUR drive supports the following features:
Supports Huawei's self-developed SP670 series network cards;
Supports users to allocate specific PF to derive VF for
single function stream bifurcation;
Signed-off-by: Zhang Zhenghao <zhangzhenghao9(a)huawei.com>
---
MAINTAINERS | 7 +
arch/arm64/configs/openeuler_defconfig | 1 +
arch/x86/configs/openeuler_defconfig | 1 +
drivers/net/ethernet/huawei/Kconfig | 1 +
drivers/net/ethernet/huawei/Makefile | 1 +
drivers/net/ethernet/huawei/hibifur/Kconfig | 13 +
drivers/net/ethernet/huawei/hibifur/Makefile | 26 +
.../ethernet/huawei/hibifur/bifur_common.h | 57 ++
.../net/ethernet/huawei/hibifur/bifur_event.c | 381 +++++++++
.../net/ethernet/huawei/hibifur/bifur_event.h | 26 +
.../net/ethernet/huawei/hibifur/bifur_main.c | 791 ++++++++++++++++++
.../net/ethernet/huawei/hibifur/bifur_main.h | 57 ++
.../net/ethernet/huawei/hibifur/bifur_pfile.c | 545 ++++++++++++
.../net/ethernet/huawei/hibifur/bifur_pfile.h | 75 ++
.../ethernet/huawei/hibifur/bifur_vf_mgr.c | 263 ++++++
.../ethernet/huawei/hibifur/bifur_vf_mgr.h | 58 ++
.../huawei/hibifur/include/nic/nic_mpu_cmd.h | 183 ++++
.../huawei/hibifur/include/nic/nic_npu_cmd.h | 29 +
.../hibifur/include/nic/nic_npu_cmd_defs.h | 133 +++
.../net/ethernet/huawei/hinic3/hinic3_crm.h | 14 +
.../net/ethernet/huawei/hinic3/hinic3_main.c | 18 +-
.../huawei/hinic3/hinic3_mgmt_interface.h | 9 +
.../net/ethernet/huawei/hinic3/hinic3_mt.h | 7 +-
.../huawei/hinic3/hw/hinic3_dev_mgmt.c | 3 +
.../huawei/hinic3/hw/hinic3_dev_mgmt.h | 2 +
.../ethernet/huawei/hinic3/hw/hinic3_hw_cfg.c | 14 +
.../ethernet/huawei/hinic3/hw/hinic3_hw_cfg.h | 2 +
.../ethernet/huawei/hinic3/hw/hinic3_lld.c | 2 +-
.../huawei/hinic3/hw/hinic3_nictool.c | 22 +-
.../include/cfg_mgmt/cfg_mgmt_mpu_cmd_defs.h | 2 +
30 files changed, 2724 insertions(+), 19 deletions(-)
create mode 100644 drivers/net/ethernet/huawei/hibifur/Kconfig
create mode 100644 drivers/net/ethernet/huawei/hibifur/Makefile
create mode 100644 drivers/net/ethernet/huawei/hibifur/bifur_common.h
create mode 100644 drivers/net/ethernet/huawei/hibifur/bifur_event.c
create mode 100644 drivers/net/ethernet/huawei/hibifur/bifur_event.h
create mode 100644 drivers/net/ethernet/huawei/hibifur/bifur_main.c
create mode 100644 drivers/net/ethernet/huawei/hibifur/bifur_main.h
create mode 100644 drivers/net/ethernet/huawei/hibifur/bifur_pfile.c
create mode 100644 drivers/net/ethernet/huawei/hibifur/bifur_pfile.h
create mode 100644 drivers/net/ethernet/huawei/hibifur/bifur_vf_mgr.c
create mode 100644 drivers/net/ethernet/huawei/hibifur/bifur_vf_mgr.h
create mode 100644 drivers/net/ethernet/huawei/hibifur/include/nic/nic_mpu_cmd.h
create mode 100644 drivers/net/ethernet/huawei/hibifur/include/nic/nic_npu_cmd.h
create mode 100644 drivers/net/ethernet/huawei/hibifur/include/nic/nic_npu_cmd_defs.h
diff --git a/MAINTAINERS b/MAINTAINERS
index 219b3d11e..b22778025 100644
--- a/MAINTAINERS
+++ b/MAINTAINERS
@@ -9690,6 +9690,13 @@ F: drivers/net/ethernet/huawei/hinic3/cqm/
F: drivers/net/ethernet/huawei/hinic3/hw/
F: drivers/net/ethernet/huawei/hinic3/include/
+HUAWEI BIFUR DRIVER
+M: Xiaoping zheng <zhengxiaoping5(a)huawei.com>
+L: netdev(a)vger.kernel.org
+S: Supported
+F: drivers/net/ethernet/huawei/hibifur/
+F: drivers/net/ethernet/huawei/hibifur/include/
+
HUAWEI PVRDMA DRIVER
M: Chengbo Gu <guchengbo(a)huawei.com>
R: Xiaoping zheng <zhengxiaoping5(a)huawei.com>
diff --git a/arch/arm64/configs/openeuler_defconfig b/arch/arm64/configs/openeuler_defconfig
index fc6053a50..d4a573c8d 100644
--- a/arch/arm64/configs/openeuler_defconfig
+++ b/arch/arm64/configs/openeuler_defconfig
@@ -2940,6 +2940,7 @@ CONFIG_NET_VENDOR_HUAWEI=y
CONFIG_HINIC=m
CONFIG_HINIC3=m
CONFIG_BMA=m
+CONFIG_HIBIFUR=m
# CONFIG_NET_VENDOR_I825XX is not set
CONFIG_NET_VENDOR_INTEL=y
# CONFIG_E100 is not set
diff --git a/arch/x86/configs/openeuler_defconfig b/arch/x86/configs/openeuler_defconfig
index adfaef0cb..bcb212458 100644
--- a/arch/x86/configs/openeuler_defconfig
+++ b/arch/x86/configs/openeuler_defconfig
@@ -2933,6 +2933,7 @@ CONFIG_NET_VENDOR_HUAWEI=y
CONFIG_HINIC=m
CONFIG_HINIC3=m
CONFIG_BMA=m
+CONFIG_HIBIFUR=m
# CONFIG_NET_VENDOR_I825XX is not set
CONFIG_NET_VENDOR_INTEL=y
# CONFIG_E100 is not set
diff --git a/drivers/net/ethernet/huawei/Kconfig b/drivers/net/ethernet/huawei/Kconfig
index 0df9544dc..fc22693e4 100644
--- a/drivers/net/ethernet/huawei/Kconfig
+++ b/drivers/net/ethernet/huawei/Kconfig
@@ -18,5 +18,6 @@ if NET_VENDOR_HUAWEI
source "drivers/net/ethernet/huawei/hinic/Kconfig"
source "drivers/net/ethernet/huawei/hinic3/Kconfig"
source "drivers/net/ethernet/huawei/bma/Kconfig"
+source "drivers/net/ethernet/huawei/hibifur/Kconfig"
endif # NET_VENDOR_HUAWEI
diff --git a/drivers/net/ethernet/huawei/Makefile b/drivers/net/ethernet/huawei/Makefile
index d88e8fd77..97ee28af8 100644
--- a/drivers/net/ethernet/huawei/Makefile
+++ b/drivers/net/ethernet/huawei/Makefile
@@ -6,3 +6,4 @@
obj-$(CONFIG_HINIC) += hinic/
obj-$(CONFIG_HINIC3) += hinic3/
obj-$(CONFIG_BMA) += bma/
+obj-$(CONFIG_HIBIFUR) += hibifur/
diff --git a/drivers/net/ethernet/huawei/hibifur/Kconfig b/drivers/net/ethernet/huawei/hibifur/Kconfig
new file mode 100644
index 000000000..99d7ecec5
--- /dev/null
+++ b/drivers/net/ethernet/huawei/hibifur/Kconfig
@@ -0,0 +1,13 @@
+# SPDX-License-Identifier: GPL-2.0-only
+#
+# Huawei driver configuration
+#
+
+config HIBIFUR
+ tristate "Huawei Intelligent Network Interface Card Bifur Driver"
+ depends on HINIC3 && PCI_MSI && NUMA && PCI_IOV && DCB && (X86 || ARM64)
+ help
+ This driver supports HIBIFUR PCIE Ethernet cards.
+ To compile this driver as part of the kernel, choose Y here.
+ If unsure, choose N.
+ The default is N.
diff --git a/drivers/net/ethernet/huawei/hibifur/Makefile b/drivers/net/ethernet/huawei/hibifur/Makefile
new file mode 100644
index 000000000..a4fd682e1
--- /dev/null
+++ b/drivers/net/ethernet/huawei/hibifur/Makefile
@@ -0,0 +1,26 @@
+EXPORT_SYMBOL := true
+
+KBUILD_EXTRA_SYMBOLS += $(srctree)/drivers/net/ethernet/huawei/hinic3/Module.symvers
+
+SYS_TIME=$(shell date +%Y-%m-%d_%H:%M:%S)
+ccflags-y += -D __TIME_STR__=\"$(SYS_TIME)\"
+
+ccflags-y += -I$(srctree)/include/linux
+ccflags-y += -I$(srctree)/drivers/net/ethernet/huawei/hinic3/include/public
+ccflags-y += -I$(srctree)/drivers/net/ethernet/huawei/hinic3/include/cfg_mgmt
+ccflags-y += -I$(srctree)/drivers/net/ethernet/huawei/hinic3/include/cqm
+ccflags-y += -I$(srctree)/drivers/net/ethernet/huawei/hinic3/include/bond
+ccflags-y += -I$(srctree)/drivers/net/ethernet/huawei/hinic3/include/mpu
+ccflags-y += -I$(srctree)/drivers/net/ethernet/huawei/hinic3/hw
+ccflags-y += -I$(srctree)/drivers/net/ethernet/huawei/hinic3/bond
+ccflags-y += -I$(srctree)/drivers/net/ethernet/huawei/hibifur/include
+ccflags-y += -I$(srctree)/drivers/net/ethernet/huawei/hibifur/include/nic
+ccflags-y += -I$(srctree)/drivers/net/ethernet/huawei/hinic3
+ccflags-y += -I$(srctree)/drivers/net/ethernet/huawei/hinic3/include
+
+obj-$(CONFIG_HIBIFUR) += hibifur3.o
+
+hibifur3-objs := bifur_main.o \
+ bifur_vf_mgr.o \
+ bifur_pfile.o \
+ bifur_event.o
diff --git a/drivers/net/ethernet/huawei/hibifur/bifur_common.h b/drivers/net/ethernet/huawei/hibifur/bifur_common.h
new file mode 100644
index 000000000..145db9cd3
--- /dev/null
+++ b/drivers/net/ethernet/huawei/hibifur/bifur_common.h
@@ -0,0 +1,57 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Copyright (c) Huawei Technologies Co., Ltd. 2024-2024. All rights reserved.
+ */
+
+#ifndef BIFUR_COMMON_H__
+#define BIFUR_COMMON_H__
+
+#include <linux/module.h>
+#include <linux/init.h>
+#include <linux/etherdevice.h>
+#include <linux/proc_fs.h>
+#include <linux/uaccess.h>
+#include <linux/types.h>
+#include <linux/cdev.h>
+#include <linux/fs.h>
+#include <linux/mm_types.h>
+#include <linux/kdev_t.h>
+#include <linux/netdevice.h>
+#include <linux/notifier.h>
+#include <net/net_namespace.h>
+#include "asm-generic/int-ll64.h"
+#include "linux/pci.h"
+
+#include "ossl_knl_linux.h"
+#include "hinic3_nic_dev.h"
+#include "ossl_knl.h"
+#include "hinic3_nic_cfg.h"
+#include "hinic3_srv_nic.h"
+
+#define BIFUR_VF_NUM 40
+#define BIFUR_FILE_PATH_SIZE 50
+#define BIFUR_RESOURCE_PF_SSID 0x5a1
+
+#define BIFUR_ENABLED 1
+#define BIFUR_DISABLED 0
+
+#define PCI_DBDF(dom, bus, dev, func) \
+ ((((u32)(dom) << 16) | ((u32)(bus) << 8) | ((u32)(dev) << 3) | ((u32)(func) & 0x7)))
+#define PCI_DBDF_DOM(dbdf) (((dbdf) >> 16) & 0xFFFF)
+#define PCI_DBDF_BUS(dbdf) (((dbdf) >> 8) & 0xFF)
+#define PCI_DBDF_DEVID(dbdf) (((dbdf) >> 3) & 0x1F)
+#define PCI_DBDF_FUNCTION(dbdf) ((dbdf) & 0x7)
+#define PCI_DBDF_DEVFN(dbdf) ((dbdf) & 0xFF)
+
+struct bifur_cdev {
+ struct cdev cdev;
+ dev_t cdev_id;
+ struct proc_dir_entry *proc_dir;
+};
+
+#define BIFUR_DEV_INFO(lld_dev, fmt, arg...) dev_info(&((lld_dev)->pdev->dev), "[BIFUR]" fmt, ##arg)
+
+#define BIFUR_DEV_WARN(lld_dev, fmt, arg...) dev_warn(&((lld_dev)->pdev->dev), "[BIFUR]" fmt, ##arg)
+
+#define BIFUR_DEV_ERR(lld_dev, fmt, arg...) dev_err(&((lld_dev)->pdev->dev), "[BIFUR]" fmt, ##arg)
+#endif
diff --git a/drivers/net/ethernet/huawei/hibifur/bifur_event.c b/drivers/net/ethernet/huawei/hibifur/bifur_event.c
new file mode 100644
index 000000000..397ee107f
--- /dev/null
+++ b/drivers/net/ethernet/huawei/hibifur/bifur_event.c
@@ -0,0 +1,381 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright (c) Huawei Technologies Co., Ltd. 2024-2024. All rights reserved.
+ */
+#include <net/bonding.h>
+#include "hinic3_srv_nic.h"
+#include "hinic3_lld.h"
+#include "hinic3_bond.h"
+#include "hinic3_mt.h"
+#include "nic_mpu_cmd.h"
+#include "hinic3_hw.h"
+#include "hinic3_mgmt_interface.h"
+
+#include "bifur_common.h"
+#include "bifur_vf_mgr.h"
+#include "bifur_main.h"
+#include "bifur_event.h"
+
+static int bifur_set_vf_bond_enable(struct bifur_lld_dev *bifur_dev, u16 vf_id, u8 bond_bifur_en)
+{
+ int err;
+ struct hinic3_bond_mask_cmd bond_info = { 0 };
+ u16 out_size = sizeof(struct hinic3_bond_mask_cmd);
+
+ bond_info.msg_head.status = 1;
+ bond_info.func_id = vf_id;
+ bond_info.bond_en = bond_bifur_en;
+
+ err = hinic3_msg_to_mgmt_sync(bifur_dev->lld_dev->hwdev, HINIC3_MOD_L2NIC,
+ HINIC3_NIC_CMD_SET_BOND_MASK, &bond_info,
+ sizeof(bond_info), &bond_info,
+ &out_size, 0, HINIC3_CHANNEL_DEFAULT);
+ if (bond_info.msg_head.status != 0 || err != 0 || out_size == 0) {
+ BIFUR_DEV_ERR(bifur_dev->lld_dev,
+ "Failed to set VF forward id config. err(%d), sts(%u), out_size(%u)\n",
+ err, bond_info.msg_head.status, out_size);
+ return -EIO;
+ }
+ return 0;
+}
+
+void bifur_set_bond_enable(u8 bond_bifur_en)
+{
+ int i, err;
+ struct bifur_vf_info *vf_info = NULL;
+ struct bifur_vf_mgr *vf_mgr = NULL;
+ struct bifur_lld_dev *bifur_src_dev = bifur_get_resource_dev();
+
+ if (!bifur_src_dev) {
+ pr_err("Bifur source pf didn't inited.\n");
+ return;
+ }
+ vf_mgr = bifur_src_dev->vf_mgr;
+
+ mutex_lock(&vf_mgr->vf_mgr_mutex);
+ for (i = 0; i < vf_mgr->vf_sum; ++i) {
+ vf_info = &vf_mgr->vf_info[i];
+ err = bifur_set_vf_bond_enable(bifur_src_dev, vf_info->glb_func_id, bond_bifur_en);
+ if (err != 0) {
+ BIFUR_DEV_WARN(bifur_src_dev->lld_dev,
+ "Failed to set VF(0x%x) bond enable(%u).\n",
+ vf_info->glb_func_id, bond_bifur_en);
+ }
+ }
+
+ mutex_unlock(&vf_mgr->vf_mgr_mutex);
+ bifur_dev_put(bifur_src_dev);
+}
+
+static void bifur_attach_bond_work(struct work_struct *_work)
+{
+ int ret;
+ u16 bond_id;
+ struct bifur_bond_work *work = container_of(_work, struct bifur_bond_work, work);
+ struct bifur_adapter *adp = bifur_get_adp();
+
+ if (!adp) {
+ pr_err("Bifur driver init failed.\n");
+ kfree(work);
+ return;
+ }
+
+ ret = hinic3_bond_attach(work->name, HINIC3_BOND_USER_OVS, &bond_id);
+ if (ret) {
+ pr_info("%s: hinic3 bond attach failed, ret(%d).\n", __func__, ret);
+ kfree(work);
+ return;
+ }
+
+ bifur_dev_list_lock();
+ adp->bond_id = bond_id;
+ adp->bond_bifur_enabled = BIFUR_ENABLED;
+ bifur_dev_list_unlock();
+
+ pr_info("bifur_attach: %s: bond_name(%s), bond_id(%u)\n", __func__, work->name, bond_id);
+ bifur_set_bond_enable(BIFUR_ENABLED);
+
+ kfree(work);
+}
+
+static void bifur_queue_bond_work(struct bifur_adapter *adp, struct net_device *upper_netdev)
+{
+ struct bifur_bond_work *work;
+ struct bonding *bond = netdev_priv(upper_netdev);
+
+ if (!bond) {
+ pr_info("%s: (name:%s) has no bond dev.\n", __func__, upper_netdev->name);
+ return;
+ }
+
+ work = kzalloc(sizeof(*work), GFP_KERNEL);
+ if (!work)
+ return;
+
+ (void)strscpy(work->name, upper_netdev->name, strlen(upper_netdev->name));
+ INIT_WORK(&work->work, bifur_attach_bond_work);
+ (void)queue_work(adp->event_workq, &work->work);
+}
+
+static void bifur_detach_nic_bond_work(struct work_struct *work)
+{
+ struct bifur_bond_work *detach_work = container_of(work, struct bifur_bond_work, work);
+ struct bifur_adapter *adp = bifur_get_adp();
+ u16 bond_id;
+
+ if (!adp) {
+ pr_err("Bifur driver init failed.\n");
+ kfree(detach_work);
+ return;
+ }
+
+ bifur_dev_list_lock();
+ bond_id = adp->bond_id;
+ adp->bond_bifur_enabled = BIFUR_DISABLED;
+ bifur_dev_list_unlock();
+
+ hinic3_bond_detach(bond_id, HINIC3_BOND_USER_OVS);
+ bifur_set_bond_enable(BIFUR_DISABLED);
+ kfree(detach_work);
+}
+
+static void bifur_queue_detach_bond_work(struct bifur_adapter *adp)
+{
+ struct bifur_bond_work *work;
+
+ work = kzalloc(sizeof(*work), GFP_KERNEL);
+ if (!work)
+ return;
+
+ INIT_WORK(&work->work, bifur_detach_nic_bond_work);
+
+ (void)queue_work(adp->event_workq, &work->work);
+}
+
+static bool bifur_can_do_bond(struct bonding *bond)
+{
+ bool ret = false;
+ int slave_cnt = 0;
+ struct slave *slave = NULL;
+ struct list_head *iter = NULL;
+ struct hinic3_lld_dev *lld_dev = NULL;
+ struct hinic3_lld_dev *ppf_dev = NULL;
+
+ if (!bond)
+ return ret;
+
+ rcu_read_lock();
+ bond_for_each_slave_rcu(bond, slave, iter) {
+ lld_dev = hinic3_get_lld_dev_by_netdev(slave->dev);
+ if (!lld_dev)
+ goto out;
+
+ if (!hinic3_support_bifur(lld_dev->hwdev, NULL))
+ goto out;
+
+ if (!ppf_dev) {
+ ppf_dev = hinic3_get_ppf_lld_dev(lld_dev);
+ if (!ppf_dev)
+ goto out;
+ }
+
+ slave_cnt++;
+ pr_info("%s:can do bond? slave_cnt(%d), slave_name(%s)", __func__,
+ slave_cnt, slave->dev->name);
+ }
+
+ ret = (slave_cnt == BIFUR_BOND_2_FUNC_NUM);
+out:
+ rcu_read_unlock();
+ return ret;
+}
+
+static int bifur_bond_netdev_event(struct bifur_adapter *adp,
+ struct netdev_notifier_changeupper_info *info,
+ struct net_device *net_dev)
+{
+ struct bonding *bond = NULL;
+ struct net_device *upper_netdev = info->upper_dev;
+
+ if (net_eq(dev_net(net_dev), &init_net) == 0)
+ return NOTIFY_DONE;
+
+ if (!upper_netdev)
+ return NOTIFY_DONE;
+
+ if (!netif_is_lag_master(upper_netdev))
+ return NOTIFY_DONE;
+
+ bond = netdev_priv(upper_netdev);
+ if (!bifur_can_do_bond(bond)) {
+ bifur_queue_detach_bond_work(adp);
+ pr_info("%s: (name:%s) has no bond dev.\n", __func__, upper_netdev->name);
+ return NOTIFY_DONE;
+ }
+
+ bifur_queue_bond_work(adp, upper_netdev);
+
+ return NOTIFY_DONE;
+}
+
+int bifur_bond_init(void)
+{
+ int ret = 0;
+ struct net_device *upper_netdev;
+ struct bifur_adapter *adp = bifur_get_adp();
+
+ if (!adp) {
+ pr_err("Bifur driver init failed.\n");
+ return -EINVAL;
+ }
+
+ rtnl_lock();
+ for_each_netdev(&init_net, upper_netdev) {
+ if (netif_is_bond_master(upper_netdev) &&
+ bifur_can_do_bond(netdev_priv(upper_netdev))) {
+ bifur_queue_bond_work(adp, upper_netdev);
+ break;
+ }
+ }
+ rtnl_unlock();
+
+ pr_info("%s: bond init exit.\n", __func__);
+ return ret;
+}
+
+void bifur_bond_exit(void)
+{
+ struct bifur_adapter *adp = bifur_get_adp();
+
+ if (!adp) {
+ pr_err("Bifur driver init failed.\n");
+ return;
+ }
+ bifur_queue_detach_bond_work(adp);
+}
+
+void bifur_notify_vf_link_status(struct hinic3_lld_dev *lld_dev, u8 port_id, u16 vf_id,
+ u8 link_status)
+{
+ struct mag_cmd_get_link_status link;
+ u16 out_size = sizeof(link);
+ int err;
+
+ (void)memset(&link, 0, sizeof(link));
+ link.status = link_status;
+ link.port_id = port_id;
+
+ err = hinic3_mbox_to_vf_no_ack(lld_dev->hwdev, vf_id, HINIC3_MOD_HILINK,
+ MAG_CMD_GET_LINK_STATUS, &link, sizeof(link),
+ &link, &out_size, HINIC3_CHANNEL_NIC);
+ if (err == MBOX_ERRCODE_UNKNOWN_DES_FUNC) {
+ pr_err("Vf%d not initialized, disconnect it\n", HW_VF_ID_TO_OS(vf_id));
+ return;
+ }
+
+ if (err || !out_size || link.head.status) {
+ pr_err("Send link change event to VF %d failed, err: %d, status: 0x%x, out_size: 0x%x\n",
+ HW_VF_ID_TO_OS(vf_id), err, link.head.status, out_size);
+ }
+}
+
+void bifur_notify_all_vfs_link_changed(struct hinic3_lld_dev *lld_dev, u32 dbdf, u8 link_status)
+{
+ struct bifur_lld_dev *bifur_src_dev = NULL;
+ struct bifur_vf_mgr *vf_mgr = NULL;
+ struct bifur_vf_info *vf_info = NULL;
+ u16 i;
+ u8 port_id;
+
+ bifur_src_dev = bifur_get_resource_dev();
+ if (!bifur_src_dev)
+ return;
+
+ vf_mgr = bifur_src_dev->vf_mgr;
+ port_id = hinic3_physical_port_id(lld_dev->hwdev);
+
+ mutex_lock(&vf_mgr->vf_mgr_mutex);
+ for (i = 0; i < vf_mgr->vf_sum; ++i) {
+ vf_info = &vf_mgr->vf_info[i];
+ if (vf_info->pf_dbdf == dbdf && vf_info->in_use)
+ bifur_notify_vf_link_status(bifur_src_dev->lld_dev, port_id,
+ OS_VF_ID_TO_HW(i), link_status);
+ }
+ mutex_unlock(&vf_mgr->vf_mgr_mutex);
+
+ bifur_dev_put(bifur_src_dev);
+}
+
+void bifur_netdev_event(struct work_struct *work)
+{
+ struct bifur_lld_dev *bifur_dev = container_of(work, struct bifur_lld_dev, netdev_link);
+
+ bifur_notify_all_vfs_link_changed(bifur_dev->lld_dev, bifur_dev->dbdf,
+ bifur_dev->link_status);
+
+ bifur_dev_put(bifur_dev);
+}
+
+static int bifur_net_link_event(struct bifur_adapter *adp, unsigned long event,
+ struct net_device *dev)
+{
+ u32 dbdf;
+ struct pci_dev *pcidev = NULL;
+ struct bifur_lld_dev *bifur_dev = NULL;
+ struct hinic3_nic_dev *nic_dev = NULL;
+
+ nic_dev = netdev_priv(dev);
+ pcidev = nic_dev->pdev;
+ dbdf = PCI_DBDF(pci_domain_nr(pcidev->bus), pcidev->bus->number,
+ PCI_DBDF_DEVID(pcidev->devfn), PCI_DBDF_FUNCTION(pcidev->devfn));
+
+ bifur_dev = bifur_get_shared_dev_by_dbdf(dbdf);
+ if (!bifur_dev)
+ return NOTIFY_DONE;
+
+ bifur_dev->link_status = (event == NETDEV_UP ? 1 : 0);
+ (void)queue_work(adp->event_workq, &bifur_dev->netdev_link);
+ return NOTIFY_OK;
+}
+
+int bifur_net_event_callback(struct notifier_block *nb, unsigned long event, void *ptr)
+{
+ struct net_device *dev = netdev_notifier_info_to_dev(ptr);
+ struct bifur_adapter *adp = bifur_get_adp();
+
+ if (unlikely(!dev)) {
+ pr_err("bifur notify dev null\n");
+ return NOTIFY_DONE;
+ }
+
+ /* only self-developed NICs can be processed */
+ if (!hinic3_get_lld_dev_by_netdev(dev))
+ return NOTIFY_DONE;
+
+ switch (event) {
+ case NETDEV_UP:
+ case NETDEV_DOWN:
+ return bifur_net_link_event(adp, event, dev);
+ case NETDEV_CHANGEUPPER:
+ return bifur_bond_netdev_event(adp, (struct netdev_notifier_changeupper_info *)ptr,
+ dev);
+ default:
+ break;
+ }
+
+ return NOTIFY_OK;
+}
+
+static struct notifier_block g_bifur_event_nb = {
+ .notifier_call = bifur_net_event_callback
+};
+
+int bifur_register_net_event(void)
+{
+ return register_netdevice_notifier(&g_bifur_event_nb);
+}
+
+void bifur_unregister_net_event(void)
+{
+ (void)unregister_netdevice_notifier(&g_bifur_event_nb);
+}
diff --git a/drivers/net/ethernet/huawei/hibifur/bifur_event.h b/drivers/net/ethernet/huawei/hibifur/bifur_event.h
new file mode 100644
index 000000000..b9068a10f
--- /dev/null
+++ b/drivers/net/ethernet/huawei/hibifur/bifur_event.h
@@ -0,0 +1,26 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Copyright (c) Huawei Technologies Co., Ltd. 2024-2024. All rights reserved.
+ */
+
+#ifndef BIFUR_EVENT_H__
+#define BIFUR_EVENT_H__
+#include <linux/workqueue.h>
+#include <linux/notifier.h>
+#include <linux/if.h>
+
+#define BIFUR_BOND_2_FUNC_NUM 2
+
+struct bifur_bond_work {
+ char name[IFNAMSIZ];
+ struct work_struct work;
+};
+
+void bifur_set_bond_enable(u8 bond_bifur_en);
+int bifur_bond_init(void);
+void bifur_bond_exit(void);
+int bifur_register_net_event(void);
+void bifur_unregister_net_event(void);
+void bifur_netdev_event(struct work_struct *work);
+
+#endif
diff --git a/drivers/net/ethernet/huawei/hibifur/bifur_main.c b/drivers/net/ethernet/huawei/hibifur/bifur_main.c
new file mode 100644
index 000000000..287ef2268
--- /dev/null
+++ b/drivers/net/ethernet/huawei/hibifur/bifur_main.c
@@ -0,0 +1,791 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright (c) Huawei Technologies Co., Ltd. 2024-2024. All rights reserved.
+ */
+
+/* sdk include */
+#include "hinic3_crm.h"
+#include "hinic3_hw.h"
+#include "hinic3_cqm.h"
+#include "hinic3_lld.h"
+#include "hinic3_mt.h"
+
+#include "bifur_common.h"
+#include "bifur_vf_mgr.h"
+#include "bifur_pfile.h"
+#include "bifur_event.h"
+#include "bifur_main.h"
+#define BIFUR_DRV_DESC "BIFUR Offload Driver"
+#define BIFUR_DRV_VERSION ""
+
+#define BIFUR_WAIT_TIMES 1000
+#define BIFUR_REMOVE_TIMESTEP 10
+#define BIFUR_KWRITE_BUF_SIZE 20
+#define BIFUR_DPDK_KDRIVER_TYPE 3
+
+#define BIFUR_SET_ENABLE 0xc0
+#define BIFUR_GET_ENABLE 0xc1
+
+static char *g_bifur_dpdk_kdriver = "vfio-pci";
+module_param(g_bifur_dpdk_kdriver, charp, 0644);
+MODULE_PARM_DESC(g_bifur_dpdk_kdriver,
+ "for dpdk kernel driver module (default:\"igb_uio\", options:\"vfio-pci\", \"uio_pci_generic\")");
+static const char *g_bifur_dpdk_kdriver_all[BIFUR_DPDK_KDRIVER_TYPE] = {
+ "igb_uio",
+ "vfio-pci",
+ "uio_pci_generic"
+};
+
+/* bifur global manager struct */
+static struct bifur_adapter *g_bifur_adapter;
+
+static void bifur_destroy_dev(struct bifur_lld_dev *bifur_dev);
+static void wait_bifur_dev_unused(struct bifur_lld_dev *bifur_dev);
+
+struct bifur_adapter *bifur_get_adp(void)
+{
+ return g_bifur_adapter;
+}
+
+void bifur_dev_hold(struct bifur_lld_dev *bifur_dev)
+{
+ atomic_inc(&bifur_dev->bifur_dev.bifur_dev_ref);
+}
+
+void bifur_dev_put(struct bifur_lld_dev *bifur_dev)
+{
+ atomic_dec(&bifur_dev->bifur_dev.bifur_dev_ref);
+}
+
+void bifur_dev_list_lock(void)
+{
+ mutex_lock(&g_bifur_adapter->bifur_dev_mutex);
+}
+
+void bifur_dev_list_unlock(void)
+{
+ mutex_unlock(&g_bifur_adapter->bifur_dev_mutex);
+}
+
+static int bifur_alloc_adapter(void)
+{
+ /* alloc driver global adapter struct */
+ if (!g_bifur_adapter) {
+ g_bifur_adapter = kzalloc(sizeof(*g_bifur_adapter), GFP_KERNEL);
+ if (!g_bifur_adapter)
+ return -ENOMEM;
+ }
+
+ /* init global adapter */
+ INIT_LIST_HEAD(&g_bifur_adapter->lld_dev_head);
+ mutex_init(&g_bifur_adapter->bifur_dev_mutex);
+
+ g_bifur_adapter->event_workq = create_singlethread_workqueue("bifur_eventq");
+ if (!g_bifur_adapter->event_workq) {
+ kfree(g_bifur_adapter);
+ g_bifur_adapter = NULL;
+ pr_err("Create bifur event_workq fail");
+ return -ENOMEM;
+ }
+
+ pr_info("Alloc bifur adapter success\n");
+ return 0;
+}
+
+static void bifur_free_adapter(void)
+{
+ destroy_workqueue(g_bifur_adapter->event_workq);
+
+ kfree(g_bifur_adapter);
+ g_bifur_adapter = NULL;
+ pr_info("Free adapter success\n");
+}
+
+static bool bifur_check_dpdk_kdriver(void)
+{
+ bool is_valid_driver = false;
+ int i;
+
+ for (i = 0; i < BIFUR_DPDK_KDRIVER_TYPE; ++i) {
+ if (!strcmp(g_bifur_dpdk_kdriver, g_bifur_dpdk_kdriver_all[i]))
+ is_valid_driver = true;
+ }
+
+ return is_valid_driver;
+}
+
+static int bifur_open_and_write_file(const char *file_path, const char *buf, int open_flags,
+ umode_t open_mode)
+{
+ struct file *fp = NULL;
+ loff_t f_pos = 0;
+ int err = 0;
+
+ fp = filp_open(file_path, open_flags, open_mode);
+ if (IS_ERR(fp)) {
+ pr_err("Open %s failed, err %ld\n", file_path, PTR_ERR(fp));
+ return -ENOENT;
+ }
+
+ err = kernel_write(fp, buf, strlen(buf), &f_pos);
+ if (err < 0) {
+ pr_err("Write %s to file %s failed, err %d\n", buf, file_path, err);
+ (void)filp_close(fp, NULL);
+ return err;
+ }
+
+ (void)filp_close(fp, NULL);
+
+ return 0;
+}
+
+static int bifur_enable_disable_vfs(struct bifur_lld_dev *bifur_dev, u16 num_vfs)
+{
+ int err = 0;
+ char file_path[BIFUR_FILE_PATH_SIZE] = {};
+ char buf[BIFUR_KWRITE_BUF_SIZE] = {};
+ struct pci_dev *pdev = bifur_dev->lld_dev->pdev;
+
+ /* write vf num to /sys/bus/pci/devices/%s/sriov_numvfs */
+ err = snprintf(file_path, BIFUR_FILE_PATH_SIZE,
+ "/sys/bus/pci/devices/%s/sriov_numvfs", pci_name(pdev));
+ if (err == -1) {
+ pr_err("Snprintf bifur pci dev sriov_numvfs file path, err %d!\n", err);
+ return err;
+ }
+
+ err = snprintf(buf, BIFUR_KWRITE_BUF_SIZE, "%u", num_vfs);
+ if (err == -1) {
+ pr_err("Snprintf bifur numvfs str, err %d!\n", err);
+ return err;
+ }
+
+ err = bifur_open_and_write_file(file_path, buf, O_WRONLY | O_TRUNC, 0);
+ if (err != 0) {
+ pr_info("Enable vf of pf failed, dbdf:0x%s, sriov_nums:%u\n",
+ pci_name(pdev), num_vfs);
+ return err;
+ }
+
+ pr_info("Enable vf of pf success, dbdf:0x%s, sriov_nums:%u\n", pci_name(pdev), num_vfs);
+
+ return 0;
+}
+
+int bifur_enable_disable_vf_all(bool enable)
+{
+ int err = 0;
+ int num_vfs = enable ? BIFUR_VF_NUM : 0;
+ struct bifur_lld_dev *bifur_dev = NULL;
+ struct bifur_lld_dev *tmp_dev = NULL;
+ struct bifur_adapter *adp = bifur_get_adp();
+ struct list_head *head = &adp->lld_dev_head;
+
+ list_for_each_entry_safe(bifur_dev, tmp_dev, head, list) {
+ if (bifur_dev->pf_type != BIFUR_RESOURCE_PF)
+ continue;
+
+ bifur_dev_hold(bifur_dev);
+ err = bifur_enable_disable_vfs(bifur_dev, num_vfs);
+ bifur_dev_put(bifur_dev);
+ if (err)
+ return err;
+ }
+
+ return 0;
+}
+
+static int bifur_one_unbind_driver(u32 dbdf, const char *driver)
+{
+ int err = 0;
+ char file_path[BIFUR_FILE_PATH_SIZE] = {};
+ char buf[BIFUR_KWRITE_BUF_SIZE] = {};
+
+ /* write pci dbdf to /sys/bus/pci/drivers/%s/unbind */
+ err = snprintf(file_path, BIFUR_FILE_PATH_SIZE,
+ "/sys/bus/pci/drivers/%s/unbind", driver);
+ if (err == -1) {
+ pr_err("Snprintf bifur driver unbind file path, err %d!\n", err);
+ return err;
+ }
+
+ err = snprintf(buf, BIFUR_KWRITE_BUF_SIZE, "%.4x:%.2x:%.2x.%x",
+ PCI_DBDF_DOM(dbdf), PCI_DBDF_BUS(dbdf),
+ PCI_DBDF_DEVID(dbdf), PCI_DBDF_FUNCTION(dbdf));
+ if (err == -1) {
+ pr_err("Snprintf bifur pci dev dbdf str, err %d!\n", err);
+ return err;
+ }
+
+ err = bifur_open_and_write_file(file_path, buf, O_WRONLY | O_APPEND, 0);
+ if (err != 0) {
+ pr_info("Unbind vf from driver %s failed\n", driver);
+ return err;
+ }
+
+ pr_info("Unbind vf from driver %s success\n", driver);
+
+ return 0;
+}
+
+static int bifur_one_bind_dpdk(u32 dbdf)
+{
+ int err = 0;
+ char file_path[BIFUR_FILE_PATH_SIZE] = {};
+ char buf[BIFUR_KWRITE_BUF_SIZE] = {};
+ const char *kernel_driver = "hisdk3";
+
+ bifur_one_unbind_driver(dbdf, kernel_driver);
+
+ err = snprintf(file_path, BIFUR_FILE_PATH_SIZE,
+ "/sys/bus/pci/devices/%.4x:%.2x:%.2x.%x/driver_override",
+ PCI_DBDF_DOM(dbdf), PCI_DBDF_BUS(dbdf),
+ PCI_DBDF_DEVID(dbdf), PCI_DBDF_FUNCTION(dbdf));
+ if (err == -1) {
+ pr_err("Snprintf bifur pci dev driver_override file path, err %d!\n", err);
+ return err;
+ }
+
+ (void)strscpy(buf, g_bifur_dpdk_kdriver, sizeof(buf));
+
+ err = bifur_open_and_write_file(file_path, buf, O_WRONLY | O_TRUNC, 0);
+ if (err != 0)
+ return err;
+
+ err = snprintf(file_path, BIFUR_FILE_PATH_SIZE,
+ "/sys/bus/pci/drivers/%s/bind", g_bifur_dpdk_kdriver);
+ if (err == -1) {
+ pr_err("Snprintf bifur dpdk driver bind file path, err %d!\n", err);
+ return err;
+ }
+
+ err = snprintf(buf, BIFUR_KWRITE_BUF_SIZE, "%.4x:%.2x:%.2x.%x",
+ PCI_DBDF_DOM(dbdf), PCI_DBDF_BUS(dbdf),
+ PCI_DBDF_DEVID(dbdf), PCI_DBDF_FUNCTION(dbdf));
+ if (err == -1) {
+ pr_err("Snprintf bifur pci dev dbdf str, err %d!\n", err);
+ return err;
+ }
+
+ err = bifur_open_and_write_file(file_path, buf, O_WRONLY | O_APPEND, 0);
+ if (err != 0)
+ return err;
+
+ return 0;
+}
+
+static int bifur_bind_unbind_dpdk(struct bifur_lld_dev *bifur_dev, bool enable)
+{
+ int err = 0;
+ u32 dbdf = 0;
+ int i;
+
+ for (i = 0; i < bifur_dev->vf_mgr->vf_sum; ++i) {
+ dbdf = bifur_dev->vf_mgr->vf_info[i].vf_dbdf;
+ if (enable)
+ err = bifur_one_bind_dpdk(dbdf);
+ else
+ err = bifur_one_unbind_driver(dbdf, g_bifur_dpdk_kdriver);
+ if (err) {
+ pr_err("Bind/Unbind failed for vf %08x\n", dbdf);
+ return err;
+ }
+ }
+
+ return 0;
+}
+
+static int bifur_bind_unbind_dpdk_all(bool enable)
+{
+ int err = 0;
+ struct bifur_lld_dev *bifur_dev = NULL;
+ struct bifur_lld_dev *tmp_dev = NULL;
+ struct bifur_adapter *adp = bifur_get_adp();
+ struct list_head *head = &adp->lld_dev_head;
+
+ list_for_each_entry_safe(bifur_dev, tmp_dev, head, list) {
+ if (bifur_dev->pf_type != BIFUR_RESOURCE_PF)
+ continue;
+
+ bifur_dev_hold(bifur_dev);
+ err = bifur_bind_unbind_dpdk(bifur_dev, enable);
+ bifur_dev_put(bifur_dev);
+ if (err)
+ return err;
+ }
+
+ return 0;
+}
+
+static int bifur_probe_vf(struct hinic3_lld_dev *lld_dev)
+{
+ struct bifur_lld_dev *bifur_dev = NULL;
+ struct bifur_vf_mgr *vf_mgr = NULL;
+ struct bifur_vf_info *vf_info = NULL;
+ u32 vf_dbdf;
+ int ret;
+
+ bifur_dev = bifur_get_resource_dev();
+ if (!bifur_dev)
+ return -ENODEV;
+
+ vf_mgr = bifur_dev->vf_mgr;
+ vf_dbdf = PCI_DBDF(pci_domain_nr(lld_dev->pdev->bus), lld_dev->pdev->bus->number,
+ PCI_DBDF_DEVID(lld_dev->pdev->devfn),
+ PCI_DBDF_FUNCTION(lld_dev->pdev->devfn));
+ if (vf_mgr->vf_sum >= BIFUR_VF_NUM) {
+ bifur_dev_put(bifur_dev);
+ BIFUR_DEV_ERR(lld_dev, "current_vf_sum(%u) >= BIFUR_VF_NUM(%u)\n",
+ vf_mgr->vf_sum, BIFUR_VF_NUM);
+ return -ENOMEM;
+ }
+
+ mutex_lock(&bifur_dev->vf_mgr->vf_mgr_mutex);
+ vf_info = &vf_mgr->vf_info[vf_mgr->vf_sum];
+ vf_mgr->vf_sum++;
+ vf_info->vf_dbdf = vf_dbdf;
+ vf_info->glb_func_id = hinic3_global_func_id(lld_dev->hwdev);
+ vf_info->in_use = 0;
+ vf_info->vf_mgr = vf_mgr;
+ ret = snprintf(vf_info->name, (size_t)BIFURNAMSIZ, "bifur%04x", vf_dbdf);
+ if (ret < 0) {
+ mutex_unlock(&bifur_dev->vf_mgr->vf_mgr_mutex);
+ BIFUR_DEV_ERR(lld_dev, "set name failed, ret(%d)\n", ret);
+ bifur_dev_put(bifur_dev);
+ return ret;
+ }
+ bifur_vf_cdev_init(vf_info);
+ mutex_unlock(&bifur_dev->vf_mgr->vf_mgr_mutex);
+
+ bifur_dev_put(bifur_dev);
+
+ return 0;
+}
+
+static int bifur_remove_vf(struct bifur_lld_dev *bifur_dev)
+{
+ struct bifur_vf_info *vf_info = NULL;
+ struct bifur_vf_mgr *vf_mgr = NULL;
+ int i;
+
+ if (!bifur_dev)
+ return -ENODEV;
+ vf_mgr = bifur_dev->vf_mgr;
+
+ mutex_lock(&vf_mgr->vf_mgr_mutex);
+ for (i = 0; i < vf_mgr->vf_sum; ++i) {
+ vf_info = &vf_mgr->vf_info[i];
+ bifur_vf_cdev_uninit(vf_info);
+ }
+ mutex_unlock(&vf_mgr->vf_mgr_mutex);
+ return 0;
+}
+
+static int bifur_probe(struct hinic3_lld_dev *lld_dev, void **uld_dev, char *uld_dev_name)
+{
+ struct bifur_lld_dev *bifur_dev = NULL;
+ struct bifur_adapter *adp = bifur_get_adp();
+ int err = 0;
+
+ if (!uld_dev || !lld_dev || !lld_dev->pdev || !lld_dev->hwdev) {
+ pr_err("Bifur probe failed for invalid param, lld_dev or uld_dev\n");
+ return -EINVAL;
+ }
+
+ if (hinic3_func_type(lld_dev->hwdev) == TYPE_VF) {
+ *uld_dev = NULL;
+ if (hinic3_support_bifur(lld_dev->hwdev, NULL)) {
+ err = bifur_probe_vf(lld_dev);
+ return err;
+ }
+ return 0;
+ }
+
+ bifur_dev = kzalloc(sizeof(*bifur_dev), GFP_KERNEL);
+ if (!bifur_dev) {
+ BIFUR_DEV_ERR(lld_dev, "Alloc bifur lld dev failed\n");
+ return -ENOMEM;
+ }
+
+ /* init bifur dev */
+ bifur_dev->lld_dev = lld_dev;
+
+ if (hinic3_support_bifur(lld_dev->hwdev, NULL)) {
+ if (lld_dev->pdev->subsystem_device == BIFUR_RESOURCE_PF_SSID) {
+ bifur_dev->pf_type = BIFUR_RESOURCE_PF;
+ err = bifur_alloc_vf_mgr(bifur_dev);
+ if (err) {
+ kfree(bifur_dev);
+ bifur_dev = NULL;
+ return err;
+ }
+ } else {
+ bifur_dev->pf_type = BIFUR_SHARED_PF;
+ }
+ } else {
+ bifur_dev->pf_type = BIFUR_EXCLUSIVE_PF;
+ }
+ pr_info("bifur_dev->pf_type: %d\n", bifur_dev->pf_type);
+
+ INIT_WORK(&bifur_dev->netdev_link, bifur_netdev_event);
+ bifur_dev->dbdf = PCI_DBDF(pci_domain_nr(lld_dev->pdev->bus), lld_dev->pdev->bus->number,
+ PCI_DBDF_DEVID(lld_dev->pdev->devfn),
+ PCI_DBDF_FUNCTION(lld_dev->pdev->devfn));
+
+ atomic_set(&bifur_dev->bifur_dev.bifur_dev_ref, 0);
+ bifur_dev->bifur_dev.has_created = true;
+
+ bifur_dev_list_lock();
+ list_add_tail(&bifur_dev->list, &adp->lld_dev_head);
+ bifur_dev_list_unlock();
+
+ *uld_dev = bifur_dev;
+
+ BIFUR_DEV_INFO(lld_dev, "bifur driver probe\n");
+
+ return 0;
+}
+
+static void bifur_remove(struct hinic3_lld_dev *lld_dev, void *uld_dev)
+{
+ struct bifur_lld_dev *bifur_dev = (struct bifur_lld_dev *)uld_dev;
+
+ if (!bifur_dev)
+ return;
+
+ if (bifur_dev->pf_type == BIFUR_RESOURCE_PF) {
+ (void)bifur_remove_vf(bifur_dev);
+ bifur_free_vf_mgr(bifur_dev);
+ }
+
+ /* delete bifur device */
+ bifur_dev_list_lock();
+ list_del(&bifur_dev->list);
+ bifur_dev_list_unlock();
+
+ (void)cancel_work_sync(&bifur_dev->netdev_link);
+ wait_bifur_dev_unused(bifur_dev);
+
+ bifur_destroy_dev(bifur_dev);
+
+ kfree(bifur_dev);
+ bifur_dev = NULL;
+
+ BIFUR_DEV_INFO(lld_dev, "bifur driver remove\n");
+}
+
+static int get_bifur_drv_version(struct drv_version_info *ver_info, u32 *out_size)
+{
+ int err;
+
+ if (*out_size != sizeof(*ver_info)) {
+ pr_err("Unexpected out buf size from user :%u, expect: %lu\n",
+ *out_size, sizeof(*ver_info));
+ return -EINVAL;
+ }
+
+ err = snprintf(ver_info->ver, sizeof(ver_info->ver), "%s %s",
+ BIFUR_DRV_VERSION, __TIME_STR__);
+ if (err == -1) {
+ pr_err("Snprintf bifur version err\n");
+ return -EFAULT;
+ }
+
+ return 0;
+}
+
+static int bifur_enable_vfs(u8 bond_bifur_en)
+{
+ int err;
+
+ err = bifur_enable_disable_vf_all(true);
+ if (err) {
+ pr_err("Enable bifur vf failed. err(%d)\n", err);
+ return err;
+ }
+
+ err = bifur_bind_unbind_dpdk_all(true);
+ if (err) {
+ (void)bifur_enable_disable_vf_all(false);
+ pr_err("Bind bifur vf to dpdk failed. err(%d)\n", err);
+ return err;
+ }
+
+ bifur_set_bond_enable(bond_bifur_en);
+ return 0;
+}
+
+static int bifur_disable_vfs(void)
+{
+ int err;
+
+ bifur_set_bond_enable(BIFUR_DISABLED);
+
+ err = bifur_enable_disable_vf_all(false);
+ if (err) {
+ pr_err("Disable bifur vf failed. err(%d)\n", err);
+ return err;
+ }
+ return 0;
+}
+
+static int bifur_set_vfs_enable_state(struct bifur_adapter *adp, int set_enable,
+ int *out_enable, u32 *out_size)
+{
+ int err;
+ u8 bond_bifur_en;
+
+ if (set_enable != BIFUR_ENABLED && set_enable != BIFUR_DISABLED) {
+ pr_err("Input params invalid. set_enable(%d)\n", set_enable);
+ return -EINVAL;
+ }
+
+ if (*out_size != sizeof(*out_enable)) {
+ pr_err("Unexpected out buf size from user :%u, expect: %lu\n",
+ *out_size, sizeof(*out_enable));
+ return -EINVAL;
+ }
+
+ bifur_dev_list_lock();
+ if ((u8)set_enable == adp->bifur_enabled) {
+ *out_enable = adp->bifur_enabled;
+ bifur_dev_list_unlock();
+ pr_info("Bifur enabled status has been set. set_enable(%d)\n", set_enable);
+ return 0;
+ }
+ bond_bifur_en = adp->bond_bifur_enabled;
+ adp->bifur_enabled = set_enable;
+ bifur_dev_list_unlock();
+
+ if (set_enable == BIFUR_ENABLED)
+ err = bifur_enable_vfs(bond_bifur_en);
+ else
+ err = bifur_disable_vfs();
+
+ bifur_dev_list_lock();
+ if (err != 0)
+ adp->bifur_enabled = !set_enable;
+ *out_enable = adp->bifur_enabled;
+ bifur_dev_list_unlock();
+
+ return err;
+}
+
+static int bifur_get_bifur_enabled(struct bifur_adapter *adp, int *enabled_status, u32 *out_size)
+{
+ if (*out_size != sizeof(*enabled_status)) {
+ pr_err("Unexpected out buf size from user :%u, expect: %lu\n",
+ *out_size, sizeof(*enabled_status));
+ return -EINVAL;
+ }
+
+ bifur_dev_list_lock();
+ *enabled_status = adp->bifur_enabled;
+ bifur_dev_list_unlock();
+ return 0;
+}
+
+static int bifur_ioctl(void *uld_dev, u32 cmd, const void *buf_in, u32 in_size,
+ void *buf_out, u32 *out_size)
+{
+ struct bifur_adapter *adp = bifur_get_adp();
+ struct bifur_lld_dev *bifur_dev = (struct bifur_lld_dev *)uld_dev;
+
+ if (!uld_dev || !out_size || !buf_out) {
+ pr_err("[BIFUR] %s: Input params is null. out_size(%d), buf_out(%d)\n",
+ __func__, (int)(!out_size), (int)(!buf_out));
+ return -EINVAL;
+ }
+
+ if (!hinic3_support_bifur(bifur_dev->lld_dev->hwdev, NULL)) {
+ pr_err("[BIFUR] %s: %s Not support bifurcation\n", __func__,
+ pci_name(bifur_dev->lld_dev->pdev));
+ return -EINVAL;
+ }
+
+ if (cmd == GET_DRV_VERSION)
+ return get_bifur_drv_version((struct drv_version_info *)buf_out, out_size);
+
+ if (cmd == BIFUR_SET_ENABLE)
+ return bifur_set_vfs_enable_state(adp, *(int *)buf_in, (int *)buf_out, out_size);
+ else if (cmd == BIFUR_GET_ENABLE)
+ return bifur_get_bifur_enabled(adp, (int *)buf_out, out_size);
+
+ pr_err("Not support cmd %u for bifur\n", cmd);
+ return 0;
+}
+
+static struct hinic3_uld_info bifur_uld_info = {
+ .probe = bifur_probe,
+ .remove = bifur_remove,
+ .suspend = NULL,
+ .resume = NULL,
+ .ioctl = bifur_ioctl,
+};
+
+static __init int hibifur_init(void)
+{
+ int err = 0;
+
+ pr_info("%s - version %s, compile time:%s\n", BIFUR_DRV_DESC,
+ BIFUR_DRV_VERSION, __TIME_STR__);
+
+ if (!bifur_check_dpdk_kdriver()) {
+ pr_err("Invalid dpdk kernel driver type: %s\n", g_bifur_dpdk_kdriver);
+ return -EINVAL;
+ }
+
+ err = bifur_alloc_adapter();
+ if (err != 0)
+ return -ENOMEM;
+
+ err = hinic3_register_uld(SERVICE_T_BIFUR, &bifur_uld_info);
+ if (err != 0) {
+ pr_err("Register bifur uld failed\n");
+ goto register_uld_err;
+ }
+
+ err = bifur_global_dev_init();
+ if (err) {
+ pr_err("Register bifur global cdev failed\n");
+ goto global_dev_init_err;
+ }
+
+ err = bifur_register_net_event();
+ if (err) {
+ pr_err("Register bifur global cdev failed\n");
+ goto register_event_err;
+ }
+
+ err = bifur_bond_init();
+ if (err != 0) {
+ pr_err("Bifur bond status init failed\n");
+ goto bond_init_err;
+ }
+
+ return 0;
+
+bond_init_err:
+ bifur_unregister_net_event();
+register_event_err:
+ bifur_global_dev_uninit();
+global_dev_init_err:
+ hinic3_unregister_uld(SERVICE_T_BIFUR);
+register_uld_err:
+ bifur_free_adapter();
+ return err;
+}
+
+static __exit void hibifur_exit(void)
+{
+ struct bifur_adapter *adp = bifur_get_adp();
+ u8 bifur_enabled = BIFUR_DISABLED;
+
+ bifur_bond_exit();
+ bifur_unregister_net_event();
+ bifur_global_dev_uninit();
+
+ bifur_dev_list_lock();
+ if (adp->bifur_enabled) {
+ bifur_enabled = adp->bifur_enabled;
+ adp->bifur_enabled = BIFUR_DISABLED;
+ }
+ bifur_dev_list_unlock();
+
+ if (bifur_enabled) {
+ (void)bifur_bind_unbind_dpdk_all(false);
+ (void)bifur_enable_disable_vf_all(false);
+ }
+
+ hinic3_unregister_uld(SERVICE_T_BIFUR);
+ bifur_free_adapter();
+
+ pr_info("%s exit\n", BIFUR_DRV_DESC);
+}
+
+struct bifur_lld_dev *bifur_get_resource_dev(void)
+{
+ struct bifur_lld_dev *bifur_dev = NULL;
+ struct bifur_lld_dev *tmp_dev = NULL;
+ struct bifur_adapter *adp = bifur_get_adp();
+ struct list_head *head = &adp->lld_dev_head;
+
+ bifur_dev_list_lock();
+
+ /* found the bifur_lld_dev of resource pf */
+ list_for_each_entry_safe(bifur_dev, tmp_dev, head, list) {
+ if (bifur_dev->pf_type == BIFUR_RESOURCE_PF) {
+ bifur_dev_hold(bifur_dev);
+ bifur_dev_list_unlock();
+ pr_info("Find resource pf DBDF 0x%08x\n", bifur_dev->dbdf);
+ return bifur_dev;
+ }
+ }
+
+ bifur_dev_list_unlock();
+
+ pr_err("Can't find resource pf\n");
+ return NULL;
+}
+
+struct bifur_lld_dev *bifur_get_shared_dev_by_dbdf(u32 dbdf)
+{
+ struct bifur_lld_dev *bifur_dev = NULL;
+ struct bifur_lld_dev *tmp_dev = NULL;
+ struct bifur_adapter *adp = bifur_get_adp();
+ struct list_head *head = &adp->lld_dev_head;
+
+ bifur_dev_list_lock();
+
+ /* found the bifur_lld_dev of shared pf */
+ list_for_each_entry_safe(bifur_dev, tmp_dev, head, list) {
+ if (bifur_dev->pf_type == BIFUR_SHARED_PF && dbdf == bifur_dev->dbdf) {
+ bifur_dev_hold(bifur_dev);
+ bifur_dev_list_unlock();
+ pr_info("Find shared pf DBDF 0x%08x\n", bifur_dev->dbdf);
+ return bifur_dev;
+ }
+ }
+
+ bifur_dev_list_unlock();
+
+ pr_err("Can't find shared pf 0x%x\n", dbdf);
+ return NULL;
+}
+
+static void wait_bifur_dev_unused(struct bifur_lld_dev *bifur_dev)
+{
+ int i;
+
+ for (i = 0; i < BIFUR_WAIT_TIMES; i++) {
+ if (!atomic_read(&bifur_dev->bifur_dev.bifur_dev_ref))
+ break;
+
+ msleep(BIFUR_REMOVE_TIMESTEP);
+ }
+
+ if (i == BIFUR_WAIT_TIMES) {
+ BIFUR_DEV_WARN(bifur_dev->lld_dev,
+ "destroy BIFUR device failed, bifur_dev_ref(%d) can not be 0 after %d ms\n",
+ atomic_read(&bifur_dev->bifur_dev.bifur_dev_ref),
+ (BIFUR_WAIT_TIMES * BIFUR_REMOVE_TIMESTEP));
+ }
+}
+
+static void bifur_destroy_dev(struct bifur_lld_dev *bifur_dev)
+{
+ if (!bifur_dev->bifur_dev.has_created)
+ return;
+
+ bifur_dev->bifur_dev.has_created = false;
+
+ BIFUR_DEV_INFO(bifur_dev->lld_dev, "Destroy BIFUR device success\n");
+}
+
+module_init(hibifur_init);
+module_exit(hibifur_exit);
+
+MODULE_AUTHOR("Huawei Technologies CO., Ltd");
+MODULE_DESCRIPTION(BIFUR_DRV_DESC);
+MODULE_VERSION(BIFUR_DRV_VERSION);
+MODULE_LICENSE("GPL");
diff --git a/drivers/net/ethernet/huawei/hibifur/bifur_main.h b/drivers/net/ethernet/huawei/hibifur/bifur_main.h
new file mode 100644
index 000000000..9daac17dd
--- /dev/null
+++ b/drivers/net/ethernet/huawei/hibifur/bifur_main.h
@@ -0,0 +1,57 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Copyright (c) Huawei Technologies Co., Ltd. 2024-2024. All rights reserved.
+ */
+
+#ifndef BIFUR_MAIN_H__
+#define BIFUR_MAIN_H__
+
+#include "hinic3_lld.h"
+#include "asm-generic/int-ll64.h"
+
+#include <linux/mutex.h>
+#include <linux/types.h>
+#include <linux/timer.h>
+
+enum bifur_func_type {
+ BIFUR_EXCLUSIVE_PF = 1,
+ BIFUR_SHARED_PF,
+ BIFUR_RESOURCE_PF,
+ BIFUR_RESOURCE_VF,
+ BIFUR_FUNC_TYPE_MAX
+};
+
+struct bifur_flow_dev {
+ atomic_t bifur_dev_ref;
+ bool has_created; /* bifur dev created or not */
+};
+
+struct bifur_adapter {
+ struct list_head lld_dev_head; /* pcie device list head */
+ struct mutex bifur_dev_mutex; /* lock for bifur dev list */
+ struct workqueue_struct *event_workq; /* global work queue */
+ u8 bifur_enabled; /* used for mark whether to enable traffic bifurcation */
+ u8 bond_bifur_enabled; /* used for mark whether to enable bond status of bifurcation vfs */
+ u16 bond_id;
+};
+
+struct bifur_vf_mgr;
+struct bifur_lld_dev {
+ struct list_head list;
+ struct hinic3_lld_dev *lld_dev;
+ struct bifur_vf_mgr *vf_mgr;
+ struct bifur_flow_dev bifur_dev; /* bifur device */
+ enum bifur_func_type pf_type;
+ struct work_struct netdev_link;
+ u8 link_status;
+ u32 dbdf;
+};
+
+void bifur_dev_hold(struct bifur_lld_dev *bifur_dev);
+void bifur_dev_put(struct bifur_lld_dev *bifur_dev);
+void bifur_dev_list_lock(void);
+void bifur_dev_list_unlock(void);
+struct bifur_lld_dev *bifur_get_resource_dev(void);
+struct bifur_adapter *bifur_get_adp(void);
+struct bifur_lld_dev *bifur_get_shared_dev_by_dbdf(u32 dbdf);
+#endif
diff --git a/drivers/net/ethernet/huawei/hibifur/bifur_pfile.c b/drivers/net/ethernet/huawei/hibifur/bifur_pfile.c
new file mode 100644
index 000000000..3621bb10d
--- /dev/null
+++ b/drivers/net/ethernet/huawei/hibifur/bifur_pfile.c
@@ -0,0 +1,545 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright (c) Huawei Technologies Co., Ltd. 2024-2024. All rights reserved.
+ */
+
+#include <linux/kernel.h>
+#include "nic_mpu_cmd.h"
+#include "hinic3_hw.h"
+#include "hinic3_mgmt_interface.h"
+#include "hinic3_common.h"
+
+#include "bifur_common.h"
+#include "bifur_vf_mgr.h"
+#include "bifur_main.h"
+#include "bifur_pfile.h"
+
+#define BIFUR_GLOBAL_CDEV_NAME "bifur_gdev"
+struct bifur_cdev g_bifur_global_dev;
+struct bifur_global_file_list_t g_bifur_global_file_list;
+
+struct class *g_bifur_class;
+
+void bifur_global_dev_uninit(void)
+{
+ struct bifur_proc_file_t *tmp = NULL;
+ struct bifur_proc_file_t *proc_file = NULL;
+ struct bifur_cdev *bdev = &g_bifur_global_dev;
+
+ mutex_lock(&g_bifur_global_file_list.lock);
+ list_for_each_entry_safe(proc_file, tmp, &g_bifur_global_file_list.list, node) {
+ list_del(&proc_file->node);
+ }
+ mutex_unlock(&g_bifur_global_file_list.lock);
+
+ device_destroy(g_bifur_class, bdev->cdev_id);
+ class_destroy(g_bifur_class);
+ g_bifur_class = NULL;
+ cdev_del(&bdev->cdev);
+ unregister_chrdev_region(bdev->cdev_id, 1);
+ pr_info("Bifur destroy global cdev(%s) succeed.", BIFUR_GLOBAL_CDEV_NAME);
+}
+
+void bifur_global_file_del(struct bifur_proc_file_t *proc_file)
+{
+ mutex_lock(&g_bifur_global_file_list.lock);
+ list_del(&proc_file->node);
+ mutex_unlock(&g_bifur_global_file_list.lock);
+}
+
+int bifur_global_dev_close(struct inode *inode, struct file *filp)
+{
+ struct bifur_proc_file_t *proc_file = filp->private_data;
+
+ pr_info("Close global proc_file(%p), filp(%p).", proc_file, filp);
+ bifur_global_file_del(proc_file); // Direct chain removal without traversing
+ kfree(proc_file);
+
+ return 0;
+}
+
+/* One dpdk process has only one process_file. A unique file is added to the global list. */
+int bifur_global_file_add(struct bifur_proc_file_t *add_proc_file)
+{
+ struct bifur_proc_file_t *tmp = NULL;
+ struct bifur_proc_file_t *proc_file = NULL;
+
+ mutex_lock(&g_bifur_global_file_list.lock);
+
+ list_for_each_entry_safe(proc_file, tmp, &g_bifur_global_file_list.list, node) {
+ if (proc_file->pid == add_proc_file->pid) {
+ mutex_unlock(&g_bifur_global_file_list.lock);
+ pr_err("Process(%u) file is exist.", proc_file->pid);
+ return -EPERM;
+ }
+ }
+
+ list_add_tail(&add_proc_file->node, &g_bifur_global_file_list.list);
+ mutex_unlock(&g_bifur_global_file_list.lock);
+
+ return 0;
+}
+
+struct bifur_proc_file_t *bifur_alloc_proc_file(void)
+{
+ struct bifur_proc_file_t *proc_file = kzalloc(sizeof(struct bifur_proc_file_t), GFP_KERNEL);
+
+ if (!proc_file)
+ return NULL;
+
+ proc_file->pid = current->pid;
+
+ return proc_file;
+}
+
+int bifur_global_dev_open(struct inode *inode, struct file *filp)
+{
+ int ret;
+ struct bifur_proc_file_t *proc_file = bifur_alloc_proc_file();
+
+ if (!proc_file)
+ return -ENOMEM;
+
+ ret = bifur_global_file_add(proc_file);
+ if (ret != 0) {
+ pr_err("Duplicate processes(%u) open global char dev.", current->pid);
+ kfree(proc_file);
+ return -EEXIST;
+ }
+ filp->private_data = proc_file;
+
+ pr_info("Open proc global proc file success, proc_file(%p), filp(%p) pid(%u).",
+ proc_file, filp, proc_file->pid);
+
+ return nonseekable_open(inode, filp);
+}
+
+static int bifur_drv_cmd_func_attr_get(struct file *filp, struct bifur_msg *cmd)
+{
+ struct bifur_lld_dev *bifur_dev = NULL;
+ struct bifur_lld_dev *tmp_dev = NULL;
+ struct bifur_adapter *adp = bifur_get_adp();
+ struct list_head *head = &adp->lld_dev_head;
+ int i;
+
+ struct bifur_func_attr_get_cmd_msg *query_cmd;
+ struct bifur_func_attr_get_cmd_rsp *query_resp;
+
+ query_cmd = (struct bifur_func_attr_get_cmd_msg *)(cmd->in_buf);
+ query_resp = (struct bifur_func_attr_get_cmd_rsp *)(cmd->out_buf);
+ if ((!query_cmd) || (!query_resp) ||
+ cmd->in_buf_len < sizeof(struct bifur_func_attr_get_cmd_msg) ||
+ cmd->out_buf_len < sizeof(struct bifur_func_attr_get_cmd_rsp)) {
+ pr_err("Input param fail, in_buf_len(%u), out_buf_len(%u).",
+ cmd->in_buf_len, cmd->out_buf_len);
+ return -EPERM;
+ }
+
+ query_resp->func_type = 0;
+ cmd->out_data_len = sizeof(struct bifur_func_attr_get_cmd_rsp);
+
+ bifur_dev_list_lock();
+ if (adp->bifur_enabled == BIFUR_DISABLED) {
+ bifur_dev_list_unlock();
+ query_resp->func_type = BIFUR_EXCLUSIVE_PF;
+ pr_info("Didn't enable traffic bifurcation, functions are exclusive.\n");
+ return 0;
+ }
+ bifur_dev_list_unlock();
+
+ list_for_each_entry_safe(bifur_dev, tmp_dev, head, list) {
+ if (bifur_dev->dbdf == query_cmd->dbdf) {
+ query_resp->func_type = bifur_dev->pf_type;
+ break;
+ }
+
+ if (bifur_dev->pf_type == BIFUR_RESOURCE_PF) {
+ for (i = 0; i < bifur_dev->vf_mgr->vf_sum; ++i) {
+ if (bifur_dev->vf_mgr->vf_info[i].vf_dbdf == query_cmd->dbdf) {
+ query_resp->func_type = BIFUR_RESOURCE_VF;
+ break;
+ }
+ }
+ }
+ }
+
+ pr_info("Do get func attr cmd success\n");
+ return 0;
+}
+
+static inline void bifur_release_vf(struct bifur_vf_mgr *vf_mgr, struct bifur_vf_info *vf_info)
+{
+ mutex_lock(&vf_mgr->vf_mgr_mutex);
+ vf_info->in_use = 0;
+ vf_mgr->vf_in_use--;
+ mutex_unlock(&vf_mgr->vf_mgr_mutex);
+}
+
+static int get_global_func_id_by_dbdf(u32 dbdf, u16 *glb_func_id)
+{
+ struct bifur_lld_dev *bifur_dev = NULL;
+ struct bifur_lld_dev *tmp_dev = NULL;
+ struct bifur_adapter *adp = bifur_get_adp();
+ struct list_head *head = &adp->lld_dev_head;
+ int i;
+
+ list_for_each_entry_safe(bifur_dev, tmp_dev, head, list) {
+ if (bifur_dev->dbdf == dbdf) {
+ *glb_func_id = hinic3_global_func_id(bifur_dev->lld_dev->hwdev);
+ return 0;
+ }
+
+ if (bifur_dev->pf_type == BIFUR_RESOURCE_PF) {
+ for (i = 0; i < bifur_dev->vf_mgr->vf_sum; ++i) {
+ if (bifur_dev->vf_mgr->vf_info[i].vf_dbdf == dbdf) {
+ *glb_func_id = bifur_dev->vf_mgr->vf_info[i].glb_func_id;
+ return 0;
+ }
+ }
+ }
+ }
+ return -ENODEV;
+}
+
+static int bifur_set_vf_tx_port(struct bifur_lld_dev *bifur_dev, u32 src_dbdf, u32 dst_dbdf)
+{
+ int err;
+ struct hinic3_func_er_value_cmd vf_fwd_id_cfg = {0};
+ u16 out_size = sizeof(struct hinic3_func_er_value_cmd);
+
+ err = get_global_func_id_by_dbdf(src_dbdf, &vf_fwd_id_cfg.vf_id);
+ if (err != 0) {
+ BIFUR_DEV_ERR(bifur_dev->lld_dev, "Do not exit this vf, vf(%u)\n", src_dbdf);
+ return err;
+ }
+ BIFUR_DEV_INFO(bifur_dev->lld_dev, "src_vf(0x%x), vf_id(%u)\n",
+ src_dbdf, vf_fwd_id_cfg.vf_id);
+
+ err = get_global_func_id_by_dbdf(dst_dbdf, &vf_fwd_id_cfg.er_fwd_id);
+ if (err != 0) {
+ BIFUR_DEV_ERR(bifur_dev->lld_dev, "Do not exit this port, port dbdf(%u)\n",
+ dst_dbdf);
+ return err;
+ }
+ BIFUR_DEV_INFO(bifur_dev->lld_dev, "dst_dbdf(0x%x), er_fwd_id(%u)\n",
+ dst_dbdf, vf_fwd_id_cfg.er_fwd_id);
+
+ err = hinic3_msg_to_mgmt_sync(bifur_dev->lld_dev->hwdev, HINIC3_MOD_L2NIC,
+ HINIC3_NIC_CMD_SET_FUNC_ER_FWD_ID, &vf_fwd_id_cfg,
+ sizeof(vf_fwd_id_cfg), &vf_fwd_id_cfg, &out_size, 0,
+ HINIC3_CHANNEL_DEFAULT);
+ if (vf_fwd_id_cfg.msg_head.status != 0 || err != 0 || out_size == 0) {
+ BIFUR_DEV_ERR(bifur_dev->lld_dev,
+ "Failed to set VF forward id config. err(%d), sts(%u), out_size(%u)\n",
+ err, vf_fwd_id_cfg.msg_head.status, out_size);
+ return -EIO;
+ }
+ return 0;
+}
+
+static int bifur_drv_cmd_vf_alloc(struct file *filp, struct bifur_msg *cmd)
+{
+ int err;
+ struct bifur_lld_dev *bifur_dev = NULL;
+ struct bifur_vf_info *vf_info = NULL;
+ struct bifur_vf_alloc_cmd_msg *query_cmd = (struct bifur_vf_alloc_cmd_msg *)(cmd->in_buf);
+ struct bifur_vf_alloc_cmd_rsp *query_resp = (struct bifur_vf_alloc_cmd_rsp *)(cmd->out_buf);
+ struct bifur_adapter *adp = bifur_get_adp();
+
+ if (!query_cmd || !query_resp ||
+ cmd->in_buf_len < sizeof(struct bifur_vf_alloc_cmd_msg) ||
+ cmd->out_buf_len < sizeof(struct bifur_vf_alloc_cmd_rsp)) {
+ pr_err("Input param fail, in_buf_len(%u), out_buf_len(%u).",
+ cmd->in_buf_len, cmd->out_buf_len);
+ return -EINVAL;
+ }
+
+ bifur_dev_list_lock();
+ if (adp->bifur_enabled == BIFUR_DISABLED) {
+ bifur_dev_list_unlock();
+ pr_err("Didn't enable traffic bifurcation.\n");
+ return -EPERM;
+ }
+ bifur_dev_list_unlock();
+
+ /* found the bifur device */
+ bifur_dev = bifur_get_resource_dev();
+ if (!bifur_dev)
+ return -EINVAL;
+
+ vf_info = bifur_find_vf(bifur_dev->vf_mgr, query_cmd->dbdf);
+ if (!vf_info) {
+ bifur_dev_put(bifur_dev);
+ BIFUR_DEV_ERR(bifur_dev->lld_dev, "Alloc vf failed, %u vf in use\n",
+ bifur_dev->vf_mgr->vf_in_use);
+ return -EFAULT;
+ }
+
+ err = bifur_set_vf_tx_port(bifur_dev, vf_info->vf_dbdf, query_cmd->dbdf);
+ if (err) {
+ bifur_release_vf(bifur_dev->vf_mgr, vf_info);
+ bifur_dev_put(bifur_dev);
+ BIFUR_DEV_ERR(bifur_dev->lld_dev, "Set vf forward id failed, vf(%u), dst_pf(%u)\n",
+ vf_info->vf_dbdf, query_cmd->dbdf);
+ return err;
+ }
+ query_resp->vf_dbdf = vf_info->vf_dbdf;
+
+ BIFUR_DEV_INFO(bifur_dev->lld_dev, "pf_dbdf: 0x%x\n", query_cmd->dbdf);
+ BIFUR_DEV_INFO(bifur_dev->lld_dev, "alloc_vf_dbdf: 0x%x\n", query_resp->vf_dbdf);
+
+ cmd->out_data_len = sizeof(struct bifur_vf_alloc_cmd_rsp);
+ bifur_dev_put(bifur_dev);
+ pr_info("Do vf alloc cmd success\n");
+ return 0;
+}
+
+static int bifur_drv_cmd_mac_get(struct file *filp, struct bifur_msg *cmd)
+{
+ int ret;
+ u32 pf_dbdf;
+ struct bifur_lld_dev *bifur_dev = NULL;
+ struct bifur_lld_dev *shared_bifur_dev = NULL;
+ struct net_device *net_dev = NULL;
+ struct bifur_adapter *adp = bifur_get_adp();
+
+ struct bifur_mac_get_cmd_msg *query_cmd = (struct bifur_mac_get_cmd_msg *)(cmd->in_buf);
+ struct bifur_mac_get_cmd_rsp *query_resp = (struct bifur_mac_get_cmd_rsp *)(cmd->out_buf);
+
+ if (!query_cmd || !query_resp ||
+ cmd->in_buf_len < sizeof(struct bifur_mac_get_cmd_msg) ||
+ cmd->out_buf_len < sizeof(struct bifur_mac_get_cmd_rsp)) {
+ pr_err("Input param fail, in_buf_len(%u), out_buf_len(%u).",
+ cmd->in_buf_len, cmd->out_buf_len);
+ return -EINVAL;
+ }
+
+ bifur_dev_list_lock();
+ if (adp->bifur_enabled == BIFUR_DISABLED) {
+ bifur_dev_list_unlock();
+ pr_err("Didn't enable traffic bifurcation.\n");
+ return -EPERM;
+ }
+ bifur_dev_list_unlock();
+
+ bifur_dev = bifur_get_resource_dev();
+ if (!bifur_dev)
+ return -EINVAL;
+
+ ret = bifur_find_pf_by_vf(bifur_dev->vf_mgr, query_cmd->dbdf, &pf_dbdf);
+ if (ret != 0) {
+ bifur_dev_put(bifur_dev);
+ pr_err("Find pf dbdf failed, vf dbdf(0x%x)\n", query_cmd->dbdf);
+ return -EFAULT;
+ }
+
+ /* found shared dev by pf dbdf */
+ shared_bifur_dev = bifur_get_shared_dev_by_dbdf(pf_dbdf);
+ if (!shared_bifur_dev) {
+ bifur_dev_put(bifur_dev);
+ return -EINVAL;
+ }
+
+ /* found net device by shared dev lld dev */
+ net_dev = hinic3_get_netdev_by_lld(shared_bifur_dev->lld_dev);
+ if (!net_dev) {
+ bifur_dev_put(bifur_dev);
+ bifur_dev_put(shared_bifur_dev);
+ pr_err("Get net device by lld dev failed, pf_dbdf(0x%x)\n", pf_dbdf);
+ return -EINVAL;
+ }
+
+ ether_addr_copy(query_resp->mac, net_dev->dev_addr);
+ bifur_dev_put(bifur_dev);
+ bifur_dev_put(shared_bifur_dev);
+
+ cmd->out_data_len = sizeof(struct bifur_mac_get_cmd_rsp);
+ pr_info("DO Get mac cmd of vf success\n");
+ return 0;
+}
+
+struct {
+ enum bifur_cmd_m drv_cmd;
+ int (*cmd_handle)(struct file *filp, struct bifur_msg *msg);
+} g_bifur_cmd_table[] = {
+ {BIFUR_DRV_CMD_FUNC_ATTR_GET, bifur_drv_cmd_func_attr_get},
+ {BIFUR_DRV_CMD_VF_ALLOC, bifur_drv_cmd_vf_alloc},
+ {BIFUR_DRV_CMD_MAC_GET, bifur_drv_cmd_mac_get},
+};
+
+int bifur_cmd_exec(struct file *file, struct bifur_msg *msg)
+{
+ int i;
+
+ for (i = 0; i < ARRAY_SIZE(g_bifur_cmd_table); i++) {
+ if (g_bifur_cmd_table[i].drv_cmd == msg->drv_cmd)
+ return g_bifur_cmd_table[i].cmd_handle(file, msg);
+ }
+
+ pr_err("Cmd(%u) is not supported.", msg->drv_cmd);
+ return -EOPNOTSUPP;
+}
+
+int bifur_msg_copy_from_usr(const char __user *ubuf, size_t size, struct bifur_msg *usr,
+ struct bifur_msg *knl)
+{
+ u64 ret = 0;
+
+ ret = copy_from_user(usr, ubuf, size);
+ if (ret != 0) {
+ pr_err("Copy msg from user failed, ret(0x%llx).", ret);
+ return -EFAULT;
+ }
+
+ if (usr->in_buf && (usr->in_buf_len == 0 || usr->in_buf_len > BIFUR_MAX_CMD_LEN)) {
+ pr_err("Invalid in buf param, cmd(%u) in_buf_len(%u).",
+ usr->drv_cmd, usr->in_buf_len);
+ return -EINVAL;
+ }
+ if (usr->out_buf && (usr->out_buf_len == 0 || usr->out_buf_len > BIFUR_MAX_CMD_LEN)) {
+ pr_err("Invalid out buf param, cmd(%u) out_buf_len(%u).",
+ usr->drv_cmd, usr->out_buf_len);
+ return -EINVAL;
+ }
+ knl->drv_cmd = usr->drv_cmd;
+ knl->in_buf_len = usr->in_buf_len;
+ knl->out_buf_len = usr->out_buf_len;
+
+ if (usr->in_buf) {
+ knl->in_buf = kzalloc((size_t)usr->in_buf_len, GFP_KERNEL);
+ if (!knl->in_buf)
+ return -ENOMEM;
+ ret = copy_from_user(knl->in_buf, usr->in_buf, (size_t)usr->in_buf_len);
+ if (ret != 0) {
+ pr_err("Cmd(%u) copy in_buf from user failed, ret(0x%llx).",
+ usr->drv_cmd, ret);
+ BUFUR_CHECK_KFREE(knl->in_buf);
+ return -EFAULT;
+ }
+ }
+
+ if (usr->out_buf) {
+ knl->out_buf = kzalloc((size_t)usr->out_buf_len, GFP_KERNEL);
+ if (!knl->out_buf) {
+ BUFUR_CHECK_KFREE(knl->in_buf);
+ return -ENOMEM;
+ }
+ }
+ return 0;
+}
+
+void bifur_free_knl_msg_buf(struct bifur_msg *msg)
+{
+ BUFUR_CHECK_KFREE(msg->in_buf);
+ BUFUR_CHECK_KFREE(msg->out_buf);
+}
+
+int bifur_msg_copy_to_usr(struct bifur_msg *usr, struct bifur_msg *knl)
+{
+ u64 ret;
+ u32 copy_len;
+
+ if (!usr->out_buf || knl->out_data_len == 0) {
+ usr->out_data_len = 0;
+ return 0;
+ }
+
+ copy_len = (usr->out_buf_len > knl->out_data_len) ? knl->out_data_len : usr->out_buf_len;
+ ret = copy_to_user(usr->out_buf, knl->out_buf, (ulong)copy_len);
+ if (ret != 0) {
+ pr_err("Cmd(%u) copy out_buf to user failed, ret(0x%llx).", usr->drv_cmd, ret);
+ return -EFAULT;
+ }
+
+ return 0;
+}
+
+ssize_t bifur_file_write(struct file *file, const char __user *ubuf, size_t size, loff_t *pos)
+{
+ int ret = 0;
+ struct bifur_msg usr_msg = { 0 };
+ struct bifur_msg knl_msg = { 0 };
+
+ if (!ubuf || size < sizeof(struct bifur_msg) || size > BIFUR_MAX_CMD_LEN) {
+ pr_err("Invalid param, size(%lu).", size);
+ return -EINVAL;
+ }
+
+ ret = bifur_msg_copy_from_usr(ubuf, size, &usr_msg, &knl_msg);
+ if (ret != 0)
+ return ret;
+
+ ret = bifur_cmd_exec(file, &knl_msg);
+ if (ret != 0) {
+ bifur_free_knl_msg_buf(&knl_msg);
+ return (ret < 0) ? ret : -EFAULT;
+ }
+
+ ret = bifur_msg_copy_to_usr(&usr_msg, &knl_msg);
+ if (ret != 0) {
+ bifur_free_knl_msg_buf(&knl_msg);
+ return ret;
+ }
+ bifur_free_knl_msg_buf(&knl_msg);
+
+ return 0;
+}
+
+ssize_t bifur_proc_write(struct file *file, const char __user *ubuf, size_t size, loff_t *pos)
+{
+ return bifur_file_write(file, ubuf, size, pos);
+}
+
+static const struct file_operations g_bifur_global_cdev_fops = {
+ .owner = THIS_MODULE,
+ .open = bifur_global_dev_open,
+ .release = bifur_global_dev_close,
+ .write = bifur_proc_write,
+};
+
+/* When the module is initialized, prepare the global character device. */
+int bifur_global_dev_init(void)
+{
+ char *name = BIFUR_GLOBAL_CDEV_NAME;
+ struct device *device;
+ struct bifur_cdev *bdev = &g_bifur_global_dev;
+
+ int ret = alloc_chrdev_region(&bdev->cdev_id, 0, 1, name);
+
+ if (ret < 0) {
+ pr_err("Bifur cdev(%s) alloc card chrdev region fail, ret(%d).", name, ret);
+ return -EFAULT;
+ }
+
+ cdev_init(&bdev->cdev, &g_bifur_global_cdev_fops);
+
+ ret = cdev_add(&bdev->cdev, bdev->cdev_id, 1);
+ if (ret < 0) {
+ unregister_chrdev_region(bdev->cdev_id, 1);
+ pr_err("Bifur cdev(%s) add cdev fail, ret(%d).", name, ret);
+ return -EFAULT;
+ }
+
+ g_bifur_class = class_create(BIFUR_MOD_NAME);
+ if (IS_ERR(g_bifur_class)) {
+ unregister_chrdev_region(bdev->cdev_id, 1);
+ cdev_del(&bdev->cdev);
+ pr_err("Bifur create class fail.");
+ return -EEXIST;
+ }
+
+ device = device_create(g_bifur_class, NULL, bdev->cdev_id, NULL, "%s", name);
+ if (IS_ERR(device)) {
+ class_destroy(g_bifur_class);
+ unregister_chrdev_region(bdev->cdev_id, 1);
+ cdev_del(&bdev->cdev);
+ pr_err("Bifur cdev(%s) create device fail.", name);
+ return -EFAULT;
+ }
+
+ mutex_init(&g_bifur_global_file_list.lock);
+ INIT_LIST_HEAD(&g_bifur_global_file_list.list);
+ pr_info("Bifur create global cdev(%s) succeed.", name);
+
+ return 0;
+}
diff --git a/drivers/net/ethernet/huawei/hibifur/bifur_pfile.h b/drivers/net/ethernet/huawei/hibifur/bifur_pfile.h
new file mode 100644
index 000000000..396d76311
--- /dev/null
+++ b/drivers/net/ethernet/huawei/hibifur/bifur_pfile.h
@@ -0,0 +1,75 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Copyright (c) Huawei Technologies Co., Ltd. 2024-2024. All rights reserved.
+ */
+#ifndef BIFUR_PFILE_H
+#define BIFUR_PFILE_H
+
+#define BIFUR_MAX_CMD_LEN (1024 * 1024)
+
+#define BIFUR_MOD_NAME "bifur"
+#define BIFUR_PROC_DIR_MOD 0550
+
+#define BUFUR_CHECK_KFREE(m) \
+do { \
+ kfree(m); \
+ m = NULL; \
+} while (0)
+
+#define BIFUR_UNREF_PARAM(x) ((x))
+
+struct bifur_msg {
+ u32 drv_cmd;
+ u32 in_buf_len;
+ u32 out_buf_len;
+ u32 out_data_len;
+ void *in_buf;
+ void *out_buf;
+ u8 rsvd[24];
+};
+
+enum bifur_cmd_m {
+ BIFUR_DRV_CMD_FUNC_ATTR_GET = 1,
+ BIFUR_DRV_CMD_VF_ALLOC,
+ BIFUR_DRV_CMD_MAC_GET,
+ BIFUR_CMD_BUTT
+};
+
+struct bifur_vf_alloc_cmd_msg {
+ unsigned int dbdf;
+};
+
+struct bifur_vf_alloc_cmd_rsp {
+ u32 vf_dbdf;
+};
+
+struct bifur_func_attr_get_cmd_msg {
+ unsigned int dbdf;
+};
+
+struct bifur_func_attr_get_cmd_rsp {
+ u32 func_type;
+};
+
+struct bifur_mac_get_cmd_msg {
+ unsigned int dbdf;
+};
+
+struct bifur_mac_get_cmd_rsp {
+ u8 mac[6];
+};
+
+struct bifur_global_file_list_t {
+ struct mutex lock;
+ struct list_head list;
+};
+
+struct bifur_proc_file_t {
+ struct list_head node;
+ pid_t pid;
+};
+
+int bifur_global_dev_init(void);
+void bifur_global_dev_uninit(void);
+
+#endif // BIFUR_PFILE_H
diff --git a/drivers/net/ethernet/huawei/hibifur/bifur_vf_mgr.c b/drivers/net/ethernet/huawei/hibifur/bifur_vf_mgr.c
new file mode 100644
index 000000000..71974c85d
--- /dev/null
+++ b/drivers/net/ethernet/huawei/hibifur/bifur_vf_mgr.c
@@ -0,0 +1,263 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright (c) Huawei Technologies Co., Ltd. 2024-2024. All rights reserved.
+ */
+#include "bifur_common.h"
+#include "bifur_pfile.h"
+#include "bifur_main.h"
+#include "bifur_vf_mgr.h"
+
+#define PDE_DATA(inode) pde_data(inode)
+
+int bifur_alloc_vf_mgr(struct bifur_lld_dev *bifur_dev)
+{
+ struct bifur_vf_mgr *vf_mgr = NULL;
+ struct bifur_vf_info *vf_info = NULL;
+
+ vf_mgr = kzalloc(sizeof(*vf_mgr), GFP_KERNEL);
+ if (!vf_mgr) {
+ BIFUR_DEV_ERR(bifur_dev->lld_dev, "Alloc vf mgr failed\n");
+ return -ENOMEM;
+ }
+
+ bifur_dev->vf_mgr = vf_mgr;
+ vf_info = kzalloc(sizeof(*vf_info) * BIFUR_VF_NUM, GFP_KERNEL);
+ if (!vf_info) {
+ kfree(vf_mgr);
+ BIFUR_DEV_ERR(bifur_dev->lld_dev, "Alloc vf info failed\n");
+ return -ENOMEM;
+ }
+
+ vf_mgr->vf_sum = 0;
+ vf_mgr->vf_in_use = 0;
+ vf_mgr->vf_info = vf_info;
+ mutex_init(&vf_mgr->vf_mgr_mutex);
+ if (!vf_mgr->bifur_proc_root) {
+ vf_mgr->bifur_proc_root = proc_mkdir_mode(BIFUR_MOD_NAME, BIFUR_PROC_DIR_MOD,
+ init_net.proc_net);
+ if (!vf_mgr->bifur_proc_root) {
+ kfree(vf_mgr);
+ kfree(vf_info);
+ bifur_dev->vf_mgr = NULL;
+ BIFUR_DEV_ERR(bifur_dev->lld_dev, "Bifur create dir failed.");
+ return -ENOMEM;
+ }
+ }
+
+ BIFUR_DEV_INFO(bifur_dev->lld_dev, "Alloc vf mgr success\n");
+ return 0;
+}
+
+void bifur_free_vf_mgr(struct bifur_lld_dev *bifur_dev)
+{
+ if (bifur_dev->vf_mgr && bifur_dev->vf_mgr->vf_info) {
+ kfree(bifur_dev->vf_mgr->vf_info);
+ bifur_dev->vf_mgr->vf_info = NULL;
+ }
+
+ if (bifur_dev->vf_mgr != NULL) {
+ kfree(bifur_dev->vf_mgr);
+ bifur_dev->vf_mgr = NULL;
+ }
+
+ remove_proc_entry(BIFUR_MOD_NAME, init_net.proc_net);
+ BIFUR_DEV_INFO(bifur_dev->lld_dev, "Free vf mgr success\n");
+}
+
+struct bifur_vf_info *bifur_find_vf(struct bifur_vf_mgr *vf_mgr, u32 pf_dbdf)
+{
+ u32 i;
+ struct bifur_vf_info *vf_info = NULL;
+
+ mutex_lock(&vf_mgr->vf_mgr_mutex);
+ for (i = 0; i < vf_mgr->vf_sum; ++i) {
+ vf_info = &vf_mgr->vf_info[i];
+ if (!vf_info->in_use) {
+ vf_info->in_use = 1;
+ vf_info->pf_dbdf = pf_dbdf;
+ vf_mgr->vf_in_use++;
+ mutex_unlock(&vf_mgr->vf_mgr_mutex);
+ return vf_info;
+ }
+ }
+ mutex_unlock(&vf_mgr->vf_mgr_mutex);
+
+ return NULL;
+}
+
+void bifur_vf_info_hold(struct bifur_vf_info *dev)
+{
+ atomic_inc(&dev->refcount);
+}
+
+void bifur_vf_info_put(struct bifur_vf_info *dev)
+{
+ if (atomic_dec_and_test(&dev->refcount))
+ pr_info("Dev(%s) pci_bdf(0x%x) comp complete.", dev->name, dev->vf_dbdf);
+}
+
+void bifur_dev_file_add(struct bifur_dev_file_t *dev_file)
+{
+ mutex_lock(&dev_file->dev->dev_file_mgt.lock);
+ list_add_tail(&dev_file->node, &dev_file->dev->dev_file_mgt.list);
+ mutex_unlock(&dev_file->dev->dev_file_mgt.lock);
+}
+
+void bifur_dev_file_del(struct bifur_dev_file_t *dev_file)
+{
+ mutex_lock(&dev_file->dev->dev_file_mgt.lock);
+ list_del(&dev_file->node);
+ mutex_unlock(&dev_file->dev->dev_file_mgt.lock);
+}
+
+int bifur_proc_open(struct inode *inode, struct file *filp)
+{
+ struct bifur_vf_info *dev = (struct bifur_vf_info *)PDE_DATA(file_inode(filp));
+ struct bifur_dev_file_t *dev_file = kzalloc(sizeof(*dev_file), GFP_KERNEL);
+
+ if (!dev_file)
+ return -ENOMEM;
+
+ atomic_set(&dev_file->refcount, 1);
+ dev_file->dev = dev;
+ bifur_vf_info_hold(dev_file->dev);
+
+ bifur_dev_file_add(dev_file);
+ filp->private_data = dev_file;
+
+ pr_info("Open proc dev(%s) success, filp(%p).", dev->name, filp);
+
+ return nonseekable_open(inode, filp);
+}
+
+int bifur_proc_close(struct inode *inode, struct file *filp)
+{
+ struct bifur_dev_file_t *dev_file = filp->private_data;
+ struct bifur_vf_info *dev = dev_file->dev;
+ struct bifur_vf_mgr *vf_mgr = dev->vf_mgr;
+
+ pr_info("Close proc dev(%s), pci_bdf(0x%x), filp(%p).", dev_file->dev->name,
+ dev_file->dev->vf_dbdf, filp);
+
+ bifur_dev_file_del(dev_file);
+ mutex_lock(&vf_mgr->vf_mgr_mutex);
+ dev->in_use = 0;
+ dev->pf_dbdf = 0;
+ vf_mgr->vf_in_use--;
+ bifur_vf_info_put(dev_file->dev);
+ mutex_unlock(&vf_mgr->vf_mgr_mutex);
+
+ memset(dev_file, 0, sizeof(*dev_file));
+ kfree(dev_file);
+ filp->private_data = NULL;
+
+ return 0;
+}
+
+#ifdef HAVE_PROC_OPS
+const struct proc_ops g_bifur_proc_fops = {
+ .proc_open = bifur_proc_open,
+ .proc_release = bifur_proc_close,
+};
+#else
+const struct file_operations g_bifur_proc_fops = {
+ .owner = THIS_MODULE,
+ .open = bifur_proc_open,
+ .llseek = NULL,
+ .release = bifur_proc_close,
+};
+#endif
+
+int bifur_dev_proc_build(struct bifur_vf_info *dev)
+{
+ struct proc_dir_entry *dir = NULL;
+ char pci_dev_name[BIFURNAMSIZ] = { 0 };
+ struct bifur_vf_mgr *vf_mgr = dev->vf_mgr;
+
+ int ret = sprintf(pci_dev_name, "0x%x", dev->vf_dbdf);
+
+ if (ret < 0) {
+ pr_err("Bifur dev(%s) proc dir create fail, bdf(0x%x).", dev->name, dev->vf_dbdf);
+ return -ENOEXEC;
+ }
+
+ dev->cdev.proc_dir = proc_mkdir_mode(pci_dev_name, BIFUR_PROC_DIR_MOD,
+ vf_mgr->bifur_proc_root);
+ if (!dev->cdev.proc_dir) {
+ pr_err("Bifur dev(%s) proc dir create fail.", dev->name);
+ return -EINVAL;
+ }
+
+ dir = proc_create_data(BIFUR_CDEV_PROC_NAME, BIFUR_PROC_FILE_MOD,
+ dev->cdev.proc_dir, &g_bifur_proc_fops, dev);
+ if (!dir) {
+ remove_proc_entry(pci_dev_name, vf_mgr->bifur_proc_root);
+ dev->cdev.proc_dir = NULL;
+ pr_err("Bifur dev(%s) create card file failed.", dev->name);
+ return -EPERM;
+ }
+
+ pr_info("Bifur dev(%p) name(%s,%s) proc build success.", dev, dev->name, pci_dev_name);
+ return 0;
+}
+
+int bifur_dev_proc_destroy(struct bifur_vf_info *dev)
+{
+ char pci_dev_name[BIFURNAMSIZ] = { 0 };
+ struct bifur_vf_mgr *vf_mgr = dev->vf_mgr;
+
+ int ret = sprintf(pci_dev_name, "0x%x", dev->vf_dbdf);
+
+ if (ret < 0) {
+ pr_err("Bifur dev(%s) proc dir create fail, bdf(0x%x).", dev->name, dev->vf_dbdf);
+ return -ENOEXEC;
+ }
+
+ remove_proc_entry(BIFUR_CDEV_PROC_NAME, dev->cdev.proc_dir);
+ remove_proc_entry(pci_dev_name, vf_mgr->bifur_proc_root);
+ dev->cdev.proc_dir = NULL;
+
+ pr_info("Bifur dev(%s) proc destroy success, pci_dev_name(%s).", dev->name, pci_dev_name);
+
+ return 0;
+}
+
+int bifur_find_pf_by_vf(struct bifur_vf_mgr *vf_mgr, u32 vf_dbdf, u32 *pf_dbdf)
+{
+ u32 i;
+ struct bifur_vf_info *vf_info = NULL;
+
+ mutex_lock(&vf_mgr->vf_mgr_mutex);
+ for (i = 0; i < vf_mgr->vf_sum; ++i) {
+ vf_info = vf_mgr->vf_info + i;
+ if (vf_info->vf_dbdf == vf_dbdf && vf_info->in_use) {
+ *pf_dbdf = vf_info->pf_dbdf;
+ mutex_unlock(&vf_mgr->vf_mgr_mutex);
+ return 0;
+ }
+ }
+ mutex_unlock(&vf_mgr->vf_mgr_mutex);
+
+ return -EINVAL;
+}
+
+int bifur_vf_cdev_init(struct bifur_vf_info *dev)
+{
+ int ret;
+
+ mutex_init(&dev->dev_file_mgt.lock);
+ INIT_LIST_HEAD(&dev->dev_file_mgt.list);
+
+ ret = bifur_dev_proc_build(dev);
+ if (ret != 0) {
+ pr_err("Init dev build failed, ret(%d).", ret);
+ return ret;
+ }
+
+ return 0;
+}
+
+void bifur_vf_cdev_uninit(struct bifur_vf_info *dev)
+{
+ (void)bifur_dev_proc_destroy(dev);
+}
diff --git a/drivers/net/ethernet/huawei/hibifur/bifur_vf_mgr.h b/drivers/net/ethernet/huawei/hibifur/bifur_vf_mgr.h
new file mode 100644
index 000000000..7c4124495
--- /dev/null
+++ b/drivers/net/ethernet/huawei/hibifur/bifur_vf_mgr.h
@@ -0,0 +1,58 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Copyright (c) Huawei Technologies Co., Ltd. 2024-2024. All rights reserved.
+ */
+
+#ifndef BIFUR_VF_MGR_H__
+#define BIFUR_VF_MGR_H__
+
+#define BIFURNAMSIZ 20
+#define BIFUR_PROC_FILE_MOD 0640
+#define BIFUR_CDEV_PROC_NAME "bifur_vdev"
+
+struct bifur_dev_file_mgt_t {
+ struct mutex lock;
+ struct list_head list;
+};
+struct bifur_lld_dev;
+struct bifur_vf_mgr;
+
+struct bifur_vf_info {
+ char name[BIFURNAMSIZ];
+ u32 vf_dbdf;
+ u32 pf_dbdf;
+ u16 glb_func_id;
+ bool in_use;
+ struct bifur_cdev cdev;
+ struct bifur_dev_file_mgt_t dev_file_mgt;
+ struct bifur_vf_mgr *vf_mgr;
+ atomic_t refcount;
+};
+
+struct bifur_vf_mgr {
+ u32 vf_sum;
+ u32 vf_in_use;
+ struct bifur_vf_info *vf_info;
+ struct mutex vf_mgr_mutex;
+ struct proc_dir_entry *bifur_proc_root;
+};
+
+struct bifur_dev_file_t {
+ struct list_head node;
+ struct bifur_vf_info *dev;
+
+ atomic_t refcount;
+};
+
+int bifur_alloc_vf_mgr(struct bifur_lld_dev *bifur_dev);
+
+void bifur_free_vf_mgr(struct bifur_lld_dev *bifur_dev);
+
+struct bifur_vf_info *bifur_find_vf(struct bifur_vf_mgr *vf_mgr, u32 dbdf);
+
+int bifur_find_pf_by_vf(struct bifur_vf_mgr *vf_mgr, u32 vf_dbdf, u32 *pf_dbdf);
+
+int bifur_vf_cdev_init(struct bifur_vf_info *dev);
+void bifur_vf_cdev_uninit(struct bifur_vf_info *dev);
+
+#endif
diff --git a/drivers/net/ethernet/huawei/hibifur/include/nic/nic_mpu_cmd.h b/drivers/net/ethernet/huawei/hibifur/include/nic/nic_mpu_cmd.h
new file mode 100644
index 000000000..1a3c26d00
--- /dev/null
+++ b/drivers/net/ethernet/huawei/hibifur/include/nic/nic_mpu_cmd.h
@@ -0,0 +1,183 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/* Copyright(c) 2024 Huawei Technologies Co., Ltd */
+
+#ifndef NIC_MPU_CMD_H
+#define NIC_MPU_CMD_H
+
+/** Commands between NIC to MPU
+ */
+enum hinic3_nic_cmd {
+ HINIC3_NIC_CMD_VF_REGISTER = 0, /**< Only for PFD and VFD @see > hinic3_cmd_register_vf */
+
+ /** FUNC CFG */
+ HINIC3_NIC_CMD_SET_FUNC_TBL = 5, /**< Set function table @see > hinic3_cmd_set_func_tbl */
+ HINIC3_NIC_CMD_SET_VPORT_ENABLE, /**< Enable a vport @see > hinic3_vport_state */
+ HINIC3_NIC_CMD_SET_RX_MODE, /**< Set nic rx mode. @see > hinic3_rx_mode_config */
+ HINIC3_NIC_CMD_SQ_CI_ATTR_SET, /**< Set SQ CI attr @see > hinic3_cmd_cons_idx_attr */
+ /**< Get vport stat @see > hinic3_port_stats_info, < hinic3_cmd_vport_stats */
+ HINIC3_NIC_CMD_GET_VPORT_STAT,
+ /**< Clean vport stat @see > hinic3_cmd_clear_vport_stats */
+ HINIC3_NIC_CMD_CLEAN_VPORT_STAT,
+ /**< Clean queue pair resource @see > hinic3_cmd_clear_qp_resource */
+ HINIC3_NIC_CMD_CLEAR_QP_RESOURCE,
+ HINIC3_NIC_CMD_CFG_FLEX_QUEUE, /**< Set flex queue @see > hinic3_cmd_cfg_qps */
+ /** LRO CFG */
+ HINIC3_NIC_CMD_CFG_RX_LRO, /**< Set rx LRO @see > hinic3_cmd_lro_config */
+ HINIC3_NIC_CMD_CFG_LRO_TIMER, /**< Set LRO timer @see > hinic3_cmd_lro_timer */
+ /**< negotiate features @see > hinic3_cmd_feature_nego */
+ HINIC3_NIC_CMD_FEATURE_NEGO,
+ /**< Configure local LRO state @see > hinic3_cmd_local_lro_state */
+ HINIC3_NIC_CMD_CFG_LOCAL_LRO_STATE,
+
+ /**< Cache out queue pair resource @see > hinic3_cmd_cache_out_qp_resource */
+ HINIC3_NIC_CMD_CACHE_OUT_QP_RES,
+ /**< Set nic er fwd id. @see > mpu_nic_cmd_set_func_er_fwd_id */
+ HINIC3_NIC_CMD_SET_FUNC_ER_FWD_ID,
+
+ /** MAC & VLAN CFG */
+ HINIC3_NIC_CMD_GET_MAC = 20, /**< Get mac address @see > hinic3_port_mac_set */
+ HINIC3_NIC_CMD_SET_MAC, /**< Set mac address @see > hinic3_port_mac_set */
+ HINIC3_NIC_CMD_DEL_MAC, /**< Delete mac address @see > hinic3_port_mac_set */
+ /**< Update mac address @see > hinic3_port_mac_update */
+ HINIC3_NIC_CMD_UPDATE_MAC,
+ /**< Get all default mac address @see > cmd_mac_info_get_s */
+ HINIC3_NIC_CMD_GET_ALL_DEFAULT_MAC,
+
+ /**< Configure function vlan @see > hinic3_cmd_vlan_config */
+ HINIC3_NIC_CMD_CFG_FUNC_VLAN,
+ /**< Enable vlan filter @see > hinic3_cmd_set_vlan_filter */
+ HINIC3_NIC_CMD_SET_VLAN_FILTER_EN,
+ /**< Set rx vlan offload @see > hinic3_cmd_vlan_offload */
+ HINIC3_NIC_CMD_SET_RX_VLAN_OFFLOAD,
+ HINIC3_NIC_CMD_SMAC_CHECK_STATE,
+
+ /** SR-IOV */
+ /**< Configure vf vlan @see > hinic3_cmd_vf_vlan_config */
+ HINIC3_NIC_CMD_CFG_VF_VLAN = 40,
+ /**< Set snoopchk state @see > hinic3_cmd_spoofchk_set */
+ HINIC3_NIC_CMD_SET_SPOOPCHK_STATE,
+ /* RATE LIMIT */
+ /**< Set rate limit @see > HINIC3_NIC_CMD_SET_MAX_MIN_RATE */
+ HINIC3_NIC_CMD_SET_MAX_MIN_RATE,
+
+ /** RSS CFG */
+ HINIC3_NIC_CMD_RSS_CFG = 60, /**< Set rss config @see > hinic3_cmd_rss_config */
+ HINIC3_NIC_CMD_RSS_TEMP_MGR, /**< TODO: delete after implement nego cmd */
+ HINIC3_NIC_CMD_GET_RSS_CTX_TBL, /**< TODO: delete: move to ucode cmd */
+ /**< Set rss hash key @see > hinic3_cmd_rss_hash_key */
+ HINIC3_NIC_CMD_CFG_RSS_HASH_KEY,
+ /**< Set rss hash engine type @see > hinic3_cmd_rss_engine_type */
+ HINIC3_NIC_CMD_CFG_RSS_HASH_ENGINE,
+ /**< Set rss context table info @see > hinic3_rss_context_table */
+ HINIC3_NIC_CMD_SET_RSS_CTX_TBL_INTO_FUNC,
+ /** IP checksum error packets, enable rss quadruple hash */
+ /**< Set rss config @see > hinic3_ipcs_err_rss_enable_operation_s */
+ HINIC3_NIC_CMD_IPCS_ERR_RSS_ENABLE_OP = 66,
+
+ /** PPA/FDIR */
+ HINIC3_NIC_CMD_ADD_TC_FLOW = 80, /**< Add tc flow @see > nic_cmd_fdir_add_rule */
+ HINIC3_NIC_CMD_DEL_TC_FLOW, /**< Delete tc flow @see > nic_cmd_fdir_del_rules */
+ HINIC3_NIC_CMD_GET_TC_FLOW, /**< Get tc flow @see > nic_cmd_fdir_get_rule */
+ HINIC3_NIC_CMD_FLUSH_TCAM, /**< Flush TCAM @see > nic_cmd_flush_tcam_rules */
+ /**< Configure TCAM block @see > nic_cmd_ctrl_tcam_block_out */
+ HINIC3_NIC_CMD_CFG_TCAM_BLOCK,
+ /**< Enable TCAM @see > nic_cmd_set_tcam_enable */
+ HINIC3_NIC_CMD_ENABLE_TCAM,
+ /**< Get TCAM block @see > nic_cmd_dfx_fdir_tcam_block_table */
+ HINIC3_NIC_CMD_GET_TCAM_BLOCK,
+ /**< Configure PPA table id @see > hinic3_ppa_cfg_table_id_cmd */
+ HINIC3_NIC_CMD_CFG_PPA_TABLE_ID,
+ /**< Set PPA enable @see > hinic3_ppa_cfg_ppa_en_cmd */
+ HINIC3_NIC_CMD_SET_PPA_EN = 88,
+ /**< Configure PPA mode @see > hinic3_ppa_cfg_mode_cmd */
+ HINIC3_NIC_CMD_CFG_PPA_MODE,
+ /**< Configure PPA flush @see > hinic3_ppa_cfg_flush_cmd */
+ HINIC3_NIC_CMD_CFG_PPA_FLUSH,
+ /**< Set FDIR status @see > hinic3_set_fdir_ethertype_rule */
+ HINIC3_NIC_CMD_SET_FDIR_STATUS,
+ /**< Get PPA counter @see > hinic3_ppa_fdir_query_cmd */
+ HINIC3_NIC_CMD_GET_PPA_COUNTER,
+ /**< Set flow bifur status @see > cmd_flow_bifur_func_handle */
+ HINIC3_NIC_CMD_SET_FUNC_FLOW_BIFUR_ENABLE,
+ /**< Set flow bifur bond @see > cmd_flow_bifur_bond_handle */
+ HINIC3_NIC_CMD_SET_BOND_MASK,
+ /**< Get func tcam table @see > get_fdir_func_tcam_table */
+ HINIC3_NIC_CMD_GET_BLOCK_TC_FLOWS,
+ /**< Get flow bifur bond @see > cmd_flow_bifur_bond_handle */
+ HINIC3_NIC_CMD_GET_BOND_MASK,
+
+ /** PORT CFG */
+ HINIC3_NIC_CMD_SET_PORT_ENABLE = 100, /**< set port enable @see > hinic3_port_state */
+ HINIC3_NIC_CMD_CFG_PAUSE_INFO, /**< Configure pause info @see > hinic3_cmd_pause_config */
+
+ HINIC3_NIC_CMD_SET_PORT_CAR, /**< Set port Car @see > hinic3_cmd_set_port_car */
+ HINIC3_NIC_CMD_SET_ER_DROP_PKT, /**< Unused */
+
+ HINIC3_NIC_CMD_VF_COS, /**< Get vf CoS @see > hinic3_cmd_vf_dcb_state */
+ HINIC3_NIC_CMD_SETUP_COS_MAPPING, /**< Unused */
+ HINIC3_NIC_CMD_SET_ETS, /**< Unused */
+ HINIC3_NIC_CMD_SET_PFC, /**< Unused */
+ HINIC3_NIC_CMD_QOS_ETS, /**< Set QoS ETS @see > hinic3_cmd_ets_cfg */
+ HINIC3_NIC_CMD_QOS_PFC, /**< Set QoS PFC @see > hinic3_cmd_set_pfc */
+ HINIC3_NIC_CMD_QOS_DCB_STATE, /**< Get QoS DCB state @see > hinic3_cmd_set_dcb_state */
+ HINIC3_NIC_CMD_QOS_PORT_CFG, /**< Get QoS port cfg @see > hinic3_cmd_qos_port_cfg */
+ HINIC3_NIC_CMD_QOS_MAP_CFG, /**< Get QoS map cfg @see > hinic3_cmd_qos_map_cfg */
+ HINIC3_NIC_CMD_FORCE_PKT_DROP, /**< Force pkt drop @see > hinic3_force_pkt_drop */
+ /**< Configure nic tx promisc skip @see > hinic3_tx_promisc_cfg */
+ HINIC3_NIC_CMD_CFG_TX_PROMISC_SKIP = 114,
+ /**< Set flow bifur port switch @see > cmd_flow_bifur_port_handle */
+ HINIC3_NIC_CMD_SET_PORT_FLOW_BIFUR_ENABLE = 117,
+ /**< Set tx pause exc notice @see > nic_cmd_tx_pause_notice */
+ HINIC3_NIC_CMD_TX_PAUSE_EXCP_NOTICE = 118,
+ /**< Inquirt pause cfg @see > nic_cmd_pause_inquiry_cfg_s */
+ HINIC3_NIC_CMD_INQUIRT_PAUSE_CFG = 119,
+
+ /** MISC */
+ HINIC3_NIC_CMD_BIOS_CFG = 120, /**< Set QoS ETS @see > nic_cmd_bios_cfg */
+ HINIC3_NIC_CMD_SET_FIRMWARE_CUSTOM_PACKETS_MSG, /**< Set QoS ETS @see > fault_msg_st */
+
+ /** BOND */
+ /**< Create bond device @see > hinic3_cmd_create_bond */
+ HINIC3_NIC_CMD_BOND_DEV_CREATE = 134,
+ HINIC3_NIC_CMD_BOND_DEV_DELETE, /**< Delete bond device @see > hinic3_cmd_delete_bond */
+ /**<Open/close bond dev @see > hinic3_cmd_open_close_bond */
+ HINIC3_NIC_CMD_BOND_DEV_OPEN_CLOSE,
+ HINIC3_NIC_CMD_BOND_INFO_GET, /**< Set QoS ETS @see > hinic3_bond_status_info */
+ /**< Get bond active info @see > hinic3_bond_active_report_info */
+ HINIC3_NIC_CMD_BOND_ACTIVE_INFO_GET,
+ /**< Bond active notice report @see > nic_cmd_bond_active_report_info */
+ HINIC3_NIC_CMD_BOND_ACTIVE_NOTICE,
+
+ /** DFX */
+ HINIC3_NIC_CMD_GET_SM_TABLE = 140, /**< Get sm table @see > nic_cmd_dfx_sm_table */
+ /**< Set RD line table @see > nic_mpu_lt_opera, < nic_mpu_lt_opera */
+ HINIC3_NIC_CMD_RD_LINE_TBL,
+
+ HINIC3_NIC_CMD_SET_UCAPTURE_OPT = 160, /**< TODO: move to roce */
+ HINIC3_NIC_CMD_SET_VHD_CFG, /**< Set VHD configuration @see > hinic3_set_vhd_mode */
+
+ /** TODO: move to HILINK */
+ /**< Get port stat @see > hinic3_port_stats_info, < hinic3_port_stats */
+ HINIC3_NIC_CMD_GET_PORT_STAT = 200,
+ HINIC3_NIC_CMD_CLEAN_PORT_STAT, /**< Unused */
+ HINIC3_NIC_CMD_CFG_LOOPBACK_MODE, /**< Unused */
+ HINIC3_NIC_CMD_GET_SFP_QSFP_INFO, /**< Unused */
+ HINIC3_NIC_CMD_SET_SFP_STATUS, /**< Unused */
+ HINIC3_NIC_CMD_GET_LIGHT_MODULE_ABS, /**< Unused */
+ HINIC3_NIC_CMD_GET_LINK_INFO, /**< Unused */
+ HINIC3_NIC_CMD_CFG_AN_TYPE, /**< Unused */
+ HINIC3_NIC_CMD_GET_PORT_INFO, /**< Get port info @see > hinic3_cmd_port_info */
+ HINIC3_NIC_CMD_SET_LINK_SETTINGS, /**< Unused */
+ HINIC3_NIC_CMD_ACTIVATE_BIOS_LINK_CFG, /**< Unused */
+ HINIC3_NIC_CMD_RESTORE_LINK_CFG, /**< Unused */
+ HINIC3_NIC_CMD_SET_LINK_FOLLOW, /**< Unused */
+ HINIC3_NIC_CMD_GET_LINK_STATE, /**< Unused */
+ HINIC3_NIC_CMD_LINK_STATUS_REPORT, /**< Unused */
+ HINIC3_NIC_CMD_CABLE_PLUG_EVENT, /**< Unused */
+ HINIC3_NIC_CMD_LINK_ERR_EVENT, /**< Unused */
+ HINIC3_NIC_CMD_SET_LED_STATUS, /**< Unused */
+
+ HINIC3_NIC_CMD_MAX = 256,
+};
+
+#endif /* NIC_MPU_CMD_H */
diff --git a/drivers/net/ethernet/huawei/hibifur/include/nic/nic_npu_cmd.h b/drivers/net/ethernet/huawei/hibifur/include/nic/nic_npu_cmd.h
new file mode 100644
index 000000000..65b243a0e
--- /dev/null
+++ b/drivers/net/ethernet/huawei/hibifur/include/nic/nic_npu_cmd.h
@@ -0,0 +1,29 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/* Copyright(c) 2024 Huawei Technologies Co., Ltd */
+
+#ifndef NIC_NPU_CMD_H
+#define NIC_NPU_CMD_H
+
+/* NIC CMDQ MODE */
+enum hinic3_ucode_cmd {
+ /**< Modify queue context. @see > hinic3_sq_ctxt_block */
+ HINIC3_UCODE_CMD_MODIFY_QUEUE_CTX = 0,
+ /**< Clean queue context. @see > hinic3_clean_queue_ctxt */
+ HINIC3_UCODE_CMD_CLEAN_QUEUE_CONTEXT,
+ HINIC3_UCODE_CMD_ARM_SQ, /**< Unused */
+ HINIC3_UCODE_CMD_ARM_RQ, /**< Unused */
+ /**< Set RSS indir table. @see > nic_rss_indirect_tbl */
+ HINIC3_UCODE_CMD_SET_RSS_INDIR_TABLE,
+ /**< Set RSS indir table. @see > nic_rss_context_tbl */
+ HINIC3_UCODE_CMD_SET_RSS_CONTEXT_TABLE,
+ HINIC3_UCODE_CMD_GET_RSS_INDIR_TABLE,
+ HINIC3_UCODE_CMD_GET_RSS_CONTEXT_TABLE, /**< Unused */
+ HINIC3_UCODE_CMD_SET_IQ_ENABLE, /**< Unused */
+ HINIC3_UCODE_CMD_SET_RQ_FLUSH = 10, /**< Set RQ flush. @see > hinic3_cmd_set_rq_flush */
+ HINIC3_UCODE_CMD_MODIFY_VLAN_CTX, /**< Get rxq info. @see > nic_vlan_ctx */
+ HINIC3_UCODE_CMD_PPA_HASH_TABLE,
+ /**< Get rxq info. @see > hinic3_rxq_hw, < rxq_check_info */
+ HINIC3_UCODE_CMD_RXQ_INFO_GET = 13,
+};
+
+#endif /* NIC_NPU_CMD_H */
diff --git a/drivers/net/ethernet/huawei/hibifur/include/nic/nic_npu_cmd_defs.h b/drivers/net/ethernet/huawei/hibifur/include/nic/nic_npu_cmd_defs.h
new file mode 100644
index 000000000..b5461ad85
--- /dev/null
+++ b/drivers/net/ethernet/huawei/hibifur/include/nic/nic_npu_cmd_defs.h
@@ -0,0 +1,133 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/* Copyright(c) 2024 Huawei Technologies Co., Ltd */
+
+
+#ifndef NIC_NPU_CMD_DEFS_H
+#define NIC_NPU_CMD_DEFS_H
+
+#include "typedef.h"
+#include "nic_cfg_comm.h"
+
+/* NIC Command Queue Header
+ * This header is used to indicate the start of a NIC command queue.
+ * All contexts that follow this header should be consecutive.
+ */
+struct nic_cmdq_header {
+ union {
+ struct {
+#if (BYTE_ORDER == BIG_ENDIAN)
+ /* 0x0:SQ, 0x1:RQ */
+ u16 queue_type;
+ /* queue number in buffer follow this header */
+ u16 queue_num;
+#else
+ u16 queue_num;
+ u16 queue_type;
+#endif
+ } cmdq_ctx_dw0;
+
+ u32 ctx_dw0;
+ };
+
+#if (BYTE_ORDER == BIG_ENDIAN)
+ u16 rsvd;
+ u16 start_qid;
+#else
+ u16 start_qid;
+ u16 rsvd;
+#endif
+};
+
+struct nic_cmdq_context_modify_s {
+ struct nic_cmdq_header hdr;
+ u8 data[2016];
+};
+
+struct nic_cmdq_clean_q_space {
+ /* queue_type = 0, TSO (TCP Segmentation Offload)
+ * queue_type = 1, LRO (Large Receive Offload)
+ */
+ union {
+ struct {
+#if (BYTE_ORDER == BIG_ENDIAN)
+ u16 queue_type;
+ u16 queue_num;
+#else
+ u16 queue_num;
+ u16 queue_type;
+#endif
+ } cmdq_space_dw0;
+
+ u32 space_dw0;
+ };
+
+#if (BYTE_ORDER == BIG_ENDIAN)
+ u16 rsvd;
+ u16 start_qid;
+#else
+ u16 start_qid;
+ u16 rsvd;
+#endif
+
+ u32 rsvd1;
+};
+
+struct nic_cmdq_flush_rq_task {
+ union {
+ struct {
+#if (BYTE_ORDER == BIG_ENDIAN)
+ u16 q_id;
+ u16 glb_rq_id;
+#else
+ u16 glb_rq_id;
+ u16 q_id;
+#endif
+ } bs;
+
+ u32 value;
+ } dw0;
+};
+
+/* arm sq/rq */
+union nic_cmdq_arm {
+ struct cmdq_arm_dw0_s {
+#if (BYTE_ORDER == BIG_ENDIAN)
+ u16 qpn;
+ u16 pi;
+#else
+ u16 pi;
+ u16 qpn;
+#endif
+ } dw0;
+
+ u32 arm_dw0;
+};
+
+/* rss */
+struct nic_rss_indirect_tbl {
+ u32 rsvd[4]; // Make sure that 16B beyond entry[]
+ u16 entry[NIC_RSS_INDIR_SIZE];
+};
+
+struct nic_rss_glb_qid_indirect_tbl {
+ u32 group_index;
+ u32 offset;
+ u32 size;
+ u32 rsvd; /* Make sure that 16B beyond entry[] */
+ u16 entry[NIC_RSS_INDIR_SIZE];
+};
+
+struct nic_rss_context_tbl {
+ u32 rsvd[4];
+ u32 ctx;
+};
+
+struct nic_vlan_ctx {
+ u32 func_id;
+ u32 qid; /* if qid = 0xFFFF, config current function all queue */
+ u32 vlan_id;
+ u32 vlan_mode;
+ u32 vlan_sel;
+};
+
+#endif /* NIC_NPU_CMD_DEFS_H */
diff --git a/drivers/net/ethernet/huawei/hinic3/hinic3_crm.h b/drivers/net/ethernet/huawei/hinic3/hinic3_crm.h
index 14d409b02..64139c49a 100644
--- a/drivers/net/ethernet/huawei/hinic3/hinic3_crm.h
+++ b/drivers/net/ethernet/huawei/hinic3/hinic3_crm.h
@@ -45,6 +45,7 @@ enum hinic3_service_type {
SERVICE_T_CUSTOM,
SERVICE_T_VROCE,
SERVICE_T_CRYPT,
+ SERVICE_T_BIFUR,
SERVICE_T_MAX,
/* Only used for interruption resource management,
@@ -76,6 +77,10 @@ struct ppa_service_cap {
u16 rsvd1;
};
+struct bifur_service_cap {
+ u8 rsvd;
+};
+
struct vbs_service_cap {
u16 vbs_max_volq;
u8 vbs_main_pf_enable;
@@ -785,6 +790,15 @@ bool hinic3_support_toe(void *hwdev, struct toe_service_cap *cap);
*/
bool hinic3_support_ppa(void *hwdev, struct ppa_service_cap *cap);
+/* *
+ * @brief hinic3_support_bifur - function support bifur
+ * @param hwdev: device pointer to hwdev
+ * @param cap: bifur service capbility
+ * @retval zero: success
+ * @retval non-zero: failure
+ */
+bool hinic3_support_bifur(void *hwdev, struct bifur_service_cap *cap);
+
/* *
* @brief hinic3_support_migr - function support migrate
* @param hwdev: device pointer to hwdev
diff --git a/drivers/net/ethernet/huawei/hinic3/hinic3_main.c b/drivers/net/ethernet/huawei/hinic3/hinic3_main.c
index d07a1ba04..6f90c868a 100644
--- a/drivers/net/ethernet/huawei/hinic3/hinic3_main.c
+++ b/drivers/net/ethernet/huawei/hinic3/hinic3_main.c
@@ -89,6 +89,8 @@ enum hinic3_rx_buff_len {
#define CONVERT_UNIT 1024
+#define BIFUR_RESOURCE_PF_SSID 0x5a1
+
#ifdef HAVE_MULTI_VLAN_OFFLOAD_EN
static int hinic3_netdev_event(struct notifier_block *notifier, unsigned long event, void *ptr);
@@ -876,11 +878,13 @@ static int nic_probe(struct hinic3_lld_dev *lld_dev, void **uld_dev,
hinic3_register_notifier(nic_dev);
#endif
- err = register_netdev(netdev);
- if (err) {
- nic_err(&pdev->dev, "Failed to register netdev\n");
- err = -ENOMEM;
- goto netdev_err;
+ if (pdev->subsystem_device != BIFUR_RESOURCE_PF_SSID) {
+ err = register_netdev(netdev);
+ if (err) {
+ nic_err(&pdev->dev, "Failed to register netdev\n");
+ err = -ENOMEM;
+ goto netdev_err;
+ }
}
queue_delayed_work(nic_dev->workq, &nic_dev->periodic_work, HZ);
@@ -928,7 +932,9 @@ static void nic_remove(struct hinic3_lld_dev *lld_dev, void *adapter)
netdev = nic_dev->netdev;
- unregister_netdev(netdev);
+ if (lld_dev->pdev->subsystem_device != BIFUR_RESOURCE_PF_SSID)
+ unregister_netdev(netdev);
+
#ifdef HAVE_MULTI_VLAN_OFFLOAD_EN
hinic3_unregister_notifier(nic_dev);
#endif
diff --git a/drivers/net/ethernet/huawei/hinic3/hinic3_mgmt_interface.h b/drivers/net/ethernet/huawei/hinic3/hinic3_mgmt_interface.h
index 952133f10..18a85f489 100644
--- a/drivers/net/ethernet/huawei/hinic3/hinic3_mgmt_interface.h
+++ b/drivers/net/ethernet/huawei/hinic3/hinic3_mgmt_interface.h
@@ -1091,9 +1091,18 @@ struct hinic3_bond_mask_cmd {
u8 rsvd[3];
};
+struct hinic3_func_er_value_cmd {
+ struct hinic3_mgmt_msg_head msg_head;
+ u16 vf_id;
+ u16 er_fwd_id;
+};
+
#define HINIC3_TX_SET_PROMISC_SKIP 0
#define HINIC3_TX_GET_PROMISC_SKIP 1
+#define HINIC3_GET_TRAFFIC_BIFUR_STATE 0
+#define HINIC3_SET_TRAFFIC_BIFUR_STATE 1
+
struct hinic3_tx_promisc_cfg {
struct hinic3_mgmt_msg_head msg_head;
u8 port_id;
diff --git a/drivers/net/ethernet/huawei/hinic3/hinic3_mt.h b/drivers/net/ethernet/huawei/hinic3/hinic3_mt.h
index b5bcd8ae8..94e606e96 100644
--- a/drivers/net/ethernet/huawei/hinic3/hinic3_mt.h
+++ b/drivers/net/ethernet/huawei/hinic3/hinic3_mt.h
@@ -132,7 +132,8 @@ enum module_name {
SEND_TO_MIGRATE_DRIVER,
SEND_TO_PPA_DRIVER,
SEND_TO_CUSTOM_DRIVER = SEND_TO_SRV_DRV_BASE + 11,
- SEND_TO_DRIVER_MAX = SEND_TO_SRV_DRV_BASE + 15, /* reserved */
+ SEND_TO_BIFUR_DRIVER = SEND_TO_SRV_DRV_BASE + 15,
+ SEND_TO_DRIVER_MAX = SEND_TO_SRV_DRV_BASE + 16, /* reserved */
};
enum driver_cmd_type {
@@ -208,12 +209,16 @@ enum driver_cmd_type {
GET_XSFP_PRESENT = 0x51,
GET_XSFP_INFO = 0x52,
DEV_NAME_TEST = 0x53,
+ GET_XSFP_INFO_COMP_CMIS = 0x54,
GET_WIN_STAT = 0x60,
WIN_CSR_READ = 0x61,
WIN_CSR_WRITE = 0x62,
WIN_API_CMD_RD = 0x63,
+ BIFUR_SET_ENABLE = 0xc0,
+ BIFUR_GET_ENABLE = 0xc1,
+
VM_COMPAT_TEST = 0xFF
};
diff --git a/drivers/net/ethernet/huawei/hinic3/hw/hinic3_dev_mgmt.c b/drivers/net/ethernet/huawei/hinic3/hw/hinic3_dev_mgmt.c
index 9154a1d27..5e0139aa9 100644
--- a/drivers/net/ethernet/huawei/hinic3/hw/hinic3_dev_mgmt.c
+++ b/drivers/net/ethernet/huawei/hinic3/hw/hinic3_dev_mgmt.c
@@ -622,6 +622,9 @@ void hinic3_get_card_info(const void *hwdev, void *bufin)
if (hinic3_func_for_mgmt(fun_hwdev))
strscpy(info->pf[i].name, "FOR_MGMT", IFNAMSIZ);
+ if (dev->lld_dev.pdev->subsystem_device == BIFUR_RESOURCE_PF_SSID)
+ strscpy(info->pf[i].name, "bifur", IFNAMSIZ);
+
strscpy(info->pf[i].bus_info, pci_name(dev->pcidev),
sizeof(info->pf[i].bus_info));
info->pf_num++;
diff --git a/drivers/net/ethernet/huawei/hinic3/hw/hinic3_dev_mgmt.h b/drivers/net/ethernet/huawei/hinic3/hw/hinic3_dev_mgmt.h
index bb7f5bb67..5c7c7cdd8 100644
--- a/drivers/net/ethernet/huawei/hinic3/hw/hinic3_dev_mgmt.h
+++ b/drivers/net/ethernet/huawei/hinic3/hw/hinic3_dev_mgmt.h
@@ -20,6 +20,8 @@
#define ULD_LOCK_MIN_USLEEP_TIME 900
#define ULD_LOCK_MAX_USLEEP_TIME 1000
+#define BIFUR_RESOURCE_PF_SSID 0x05a1
+
#define HINIC3_IS_VF_DEV(pdev) ((pdev)->device == HINIC3_DEV_ID_VF)
#define HINIC3_IS_SPU_DEV(pdev) ((pdev)->device == HINIC3_DEV_ID_SPU)
diff --git a/drivers/net/ethernet/huawei/hinic3/hw/hinic3_hw_cfg.c b/drivers/net/ethernet/huawei/hinic3/hw/hinic3_hw_cfg.c
index 819c56f42..030ef6df7 100644
--- a/drivers/net/ethernet/huawei/hinic3/hw/hinic3_hw_cfg.c
+++ b/drivers/net/ethernet/huawei/hinic3/hw/hinic3_hw_cfg.c
@@ -1172,6 +1172,20 @@ bool hinic3_support_ppa(void *hwdev, struct ppa_service_cap *cap)
}
EXPORT_SYMBOL(hinic3_support_ppa);
+bool hinic3_support_bifur(void *hwdev, struct bifur_service_cap *cap)
+{
+ struct hinic3_hwdev *dev = (struct hinic3_hwdev *)hwdev;
+
+ if (!hwdev)
+ return false;
+
+ if (!IS_BIFUR_TYPE(dev))
+ return false;
+
+ return true;
+}
+EXPORT_SYMBOL(hinic3_support_bifur);
+
bool hinic3_support_migr(void *hwdev, struct migr_service_cap *cap)
{
struct hinic3_hwdev *dev = hwdev;
diff --git a/drivers/net/ethernet/huawei/hinic3/hw/hinic3_hw_cfg.h b/drivers/net/ethernet/huawei/hinic3/hw/hinic3_hw_cfg.h
index db6e3cab6..671eed69c 100644
--- a/drivers/net/ethernet/huawei/hinic3/hw/hinic3_hw_cfg.h
+++ b/drivers/net/ethernet/huawei/hinic3/hw/hinic3_hw_cfg.h
@@ -322,6 +322,8 @@ struct cfg_mgmt_info {
(((u32)(dev)->cfg_mgmt->svc_cap.chip_svc_type) & CFG_SERVICE_MASK_PPA)
#define IS_MIGR_TYPE(dev) \
(((u32)(dev)->cfg_mgmt->svc_cap.chip_svc_type) & CFG_SERVICE_MASK_MIGRATE)
+#define IS_BIFUR_TYPE(dev) \
+ (((u32)(dev)->cfg_mgmt->svc_cap.chip_svc_type) & CFG_SERVICE_MASK_BIFUR)
int init_cfg_mgmt(struct hinic3_hwdev *dev);
diff --git a/drivers/net/ethernet/huawei/hinic3/hw/hinic3_lld.c b/drivers/net/ethernet/huawei/hinic3/hw/hinic3_lld.c
index 0b1c995bd..6ab1b92e7 100644
--- a/drivers/net/ethernet/huawei/hinic3/hw/hinic3_lld.c
+++ b/drivers/net/ethernet/huawei/hinic3/hw/hinic3_lld.c
@@ -73,7 +73,7 @@ void hinic3_uld_lock_init(void)
static const char *s_uld_name[SERVICE_T_MAX] = {
"nic", "ovs", "roce", "toe", "ioe",
- "fc", "vbs", "ipsec", "virtio", "migrate", "ppa", "custom"};
+ "fc", "vbs", "ipsec", "virtio", "migrate", "ppa", "bifur", "custom"};
const char **hinic3_get_uld_names(void)
{
diff --git a/drivers/net/ethernet/huawei/hinic3/hw/hinic3_nictool.c b/drivers/net/ethernet/huawei/hinic3/hw/hinic3_nictool.c
index 4cdc6f6ea..5e4804a96 100644
--- a/drivers/net/ethernet/huawei/hinic3/hw/hinic3_nictool.c
+++ b/drivers/net/ethernet/huawei/hinic3/hw/hinic3_nictool.c
@@ -550,8 +550,17 @@ static struct hinic3_lld_dev *get_lld_dev_by_nt_msg(struct msg_module *nt_msg)
{
struct hinic3_lld_dev *lld_dev = NULL;
- if (nt_msg->module >= SEND_TO_SRV_DRV_BASE && nt_msg->module < SEND_TO_DRIVER_MAX &&
- nt_msg->module != SEND_TO_HW_DRIVER && nt_msg->msg_formate != GET_DRV_VERSION) {
+ if (nt_msg->module == SEND_TO_NIC_DRIVER && (nt_msg->msg_formate == GET_XSFP_INFO ||
+ nt_msg->msg_formate == GET_XSFP_PRESENT ||
+ nt_msg->msg_formate == GET_XSFP_INFO_COMP_CMIS)) {
+ lld_dev = hinic3_get_lld_dev_by_chip_and_port(nt_msg->device_name, nt_msg->port_id);
+ } else if (nt_msg->module == SEND_TO_CUSTOM_DRIVER &&
+ nt_msg->msg_formate == CMD_CUSTOM_BOND_GET_CHIP_NAME) {
+ lld_dev = hinic3_get_lld_dev_by_dev_name(nt_msg->device_name, SERVICE_T_MAX);
+ } else if (nt_msg->module == SEND_TO_VBS_DRIVER || nt_msg->module == SEND_TO_BIFUR_DRIVER) {
+ lld_dev = hinic3_get_lld_dev_by_chip_name(nt_msg->device_name);
+ } else if (nt_msg->module >= SEND_TO_SRV_DRV_BASE && nt_msg->module < SEND_TO_DRIVER_MAX &&
+ nt_msg->msg_formate != GET_DRV_VERSION) {
lld_dev = hinic3_get_lld_dev_by_dev_name(nt_msg->device_name,
nt_msg->module - SEND_TO_SRV_DRV_BASE);
} else {
@@ -561,15 +570,6 @@ static struct hinic3_lld_dev *get_lld_dev_by_nt_msg(struct msg_module *nt_msg)
SERVICE_T_MAX);
}
- if (nt_msg->module == SEND_TO_NIC_DRIVER && (nt_msg->msg_formate == GET_XSFP_INFO ||
- nt_msg->msg_formate == GET_XSFP_PRESENT))
- lld_dev = hinic3_get_lld_dev_by_chip_and_port(nt_msg->device_name,
- nt_msg->port_id);
-
- if (nt_msg->module == SEND_TO_CUSTOM_DRIVER &&
- nt_msg->msg_formate == CMD_CUSTOM_BOND_GET_CHIP_NAME)
- lld_dev = hinic3_get_lld_dev_by_dev_name(nt_msg->device_name, SERVICE_T_MAX);
-
return lld_dev;
}
diff --git a/drivers/net/ethernet/huawei/hinic3/include/cfg_mgmt/cfg_mgmt_mpu_cmd_defs.h b/drivers/net/ethernet/huawei/hinic3/include/cfg_mgmt/cfg_mgmt_mpu_cmd_defs.h
index f56df083a..e7e16fca4 100644
--- a/drivers/net/ethernet/huawei/hinic3/include/cfg_mgmt/cfg_mgmt_mpu_cmd_defs.h
+++ b/drivers/net/ethernet/huawei/hinic3/include/cfg_mgmt/cfg_mgmt_mpu_cmd_defs.h
@@ -21,6 +21,7 @@ enum servic_bit_define {
SERVICE_BIT_PPA = 11,
SERVICE_BIT_MIGRATE = 12,
SERVICE_BIT_VROCE = 13,
+ SERVICE_BIT_BIFUR = 14,
SERVICE_BIT_MAX
};
@@ -38,6 +39,7 @@ enum servic_bit_define {
#define CFG_SERVICE_MASK_PPA (0x1 << SERVICE_BIT_PPA)
#define CFG_SERVICE_MASK_MIGRATE (0x1 << SERVICE_BIT_MIGRATE)
#define CFG_SERVICE_MASK_VROCE (0x1 << SERVICE_BIT_VROCE)
+#define CFG_SERVICE_MASK_BIFUR (0x1 << SERVICE_BIT_BIFUR)
/* Definition of the scenario ID in the cfg_data, which is used for SML memory allocation. */
enum scenes_id_define {
--
2.45.1.windows.1
2
1