Kernel
Threads by month
- ----- 2025 -----
- November
- October
- September
- August
- July
- June
- May
- April
- March
- February
- January
- ----- 2024 -----
- December
- November
- October
- September
- August
- July
- June
- May
- April
- March
- February
- January
- ----- 2023 -----
- December
- November
- October
- September
- August
- July
- June
- May
- April
- March
- February
- January
- ----- 2022 -----
- December
- November
- October
- September
- August
- July
- June
- May
- April
- March
- February
- January
- ----- 2021 -----
- December
- November
- October
- September
- August
- July
- June
- May
- April
- March
- February
- January
- ----- 2020 -----
- December
- November
- October
- September
- August
- July
- June
- May
- April
- March
- February
- January
- ----- 2019 -----
- December
- 60 participants
- 21438 discussions
[openeuler:openEuler-1.0-LTS] BUILD REGRESSION 4ffe43c9c280969676fa933f022ebf1a8aaebcdb
by kernel test robot 26 Nov '25
by kernel test robot 26 Nov '25
26 Nov '25
tree/branch: https://gitee.com/openeuler/kernel.git openEuler-1.0-LTS
branch HEAD: 4ffe43c9c280969676fa933f022ebf1a8aaebcdb !19055 scsi: ses: Fix possible addl_desc_ptr out-of-bounds accesses
Error/Warning (recently discovered and may have been fixed):
https://lore.kernel.org/oe-kbuild-all/202511251827.iPD7MiMU-lkp@intel.com
block/blk-mq-debugfs-zoned.o: warning: objtool: missing symbol for section .text
crypto/sm4_generic.o: warning: objtool: missing symbol for section .text
drivers/infiniband/hw/usnic/usnic_ib_qp_grp.o: warning: objtool: missing symbol for section .text
drivers/net/ethernet/huawei/bma/kbox_drv/kbox_dump.o: warning: objtool: missing symbol for section .text
drivers/net/ethernet/huawei/bma/kbox_drv/kbox_panic.o: warning: objtool: missing symbol for section .text
drivers/net/ethernet/huawei/bma/kbox_drv/kbox_ram_image.o: warning: objtool: missing symbol for section .text
drivers/net/ethernet/huawei/bma/kbox_drv/kbox_ram_op.o: warning: objtool: missing symbol for section .text
drivers/net/wireless/intel/iwlwifi/fw/acpi.o: warning: objtool: missing symbol for section .text
drivers/net/wireless/intel/iwlwifi/fw/notif-wait.o: warning: objtool: missing symbol for section .text
drivers/net/wireless/intel/iwlwifi/fw/smem.o: warning: objtool: missing symbol for section .text
drivers/net/wireless/intel/iwlwifi/iwl-nvm-parse.o: warning: objtool: missing symbol for section .text
drivers/net/wireless/intel/iwlwifi/mvm/nvm.o: warning: objtool: missing symbol for section .text
include/linux/mempolicy.h:329:13: warning: '__do_mbind' defined but not used [-Wunused-function]
Unverified Error/Warning (likely false positive, kindly check if interested):
(.text+0x26a4): undefined reference to `_init'
(.text+0x31): undefined reference to `_DYNAMIC'
(.text+0x41): undefined reference to `_fini'
/usr/include/bits/floatn.h:97:9: error: __float128 is not supported on this target
crypto/ecc.c:1112:9: warning: 'priv' may be used uninitialized [-Wmaybe-uninitialized]
include/linux/list.h:63:20: warning: storing the address of local variable 'wait' in '((struct list_head *)x)[1].prev' [-Wdangling-pointer=]
include/linux/list.h:63:20: warning: storing the address of local variable 'waiter' in '*(struct list_head *)((char *)sem + 8).prev' [-Wdangling-pointer=]
include/linux/printk.h:346:9: warning: this statement may fall through [-Wimplicit-fallthrough=]
include/linux/signal.h:180:29: warning: this statement may fall through [-Wimplicit-fallthrough=]
include/linux/skbuff.h:1869:9: warning: array subscript 'struct sk_buff[0]' is partly outside array bounds of 'struct ieee80211_tx_data[1]' [-Warray-bounds]
include/linux/skbuff.h:1875:9: warning: array subscript 'struct sk_buff[0]' is partly outside array bounds of 'struct ieee80211_tx_data[1]' [-Warray-bounds=]
include/linux/string.h:333:16: warning: '__builtin_memset' offset [48, 55] is out of the bounds [0, 48] [-Warray-bounds]
Error/Warning ids grouped by kconfigs:
recent_errors
|-- arm64-allnoconfig
| |-- include-linux-list.h:warning:storing-the-address-of-local-variable-wait-in-((struct-list_head-)x)-.prev
| |-- include-linux-list.h:warning:storing-the-address-of-local-variable-waiter-in-(struct-list_head-)((char-)sem-).prev
| |-- include-linux-mempolicy.h:warning:__do_mbind-defined-but-not-used
| |-- include-linux-printk.h:warning:this-statement-may-fall-through
| |-- include-linux-signal.h:warning:this-statement-may-fall-through
| |-- init-calibrate.c:warning:no-previous-prototype-for-calibration_delay_done
| |-- mm-rmap.c:warning:no-previous-prototype-for-is_vma_temporary_stack
| `-- mm-rmap.c:warning:variable-cstart-set-but-not-used
|-- arm64-defconfig
| |-- crypto-ecc.c:warning:priv-may-be-used-uninitialized
| |-- include-asm-generic-io.h:warning:this-statement-may-fall-through
| |-- include-linux-list.h:warning:storing-the-address-of-local-variable-waiter-in-(struct-list_head-)((char-)sem-).prev
| |-- include-linux-printk.h:warning:this-statement-may-fall-through
| |-- include-linux-signal.h:warning:this-statement-may-fall-through
| |-- include-linux-skbuff.h:warning:array-subscript-struct-sk_buff-is-partly-outside-array-bounds-of-struct-ieee80211_tx_data
| |-- init-calibrate.c:warning:no-previous-prototype-for-calibration_delay_done
| `-- mm-memcontrol.c:warning:bad-line:otherwise.
|-- arm64-randconfig-001-20251125
| |-- crypto-ecc.c:warning:priv-may-be-used-uninitialized
| |-- crypto-lrw.c:warning:conflicting-types-for-built-in-function-free-expected-void(void-)
| |-- crypto-xts.c:warning:conflicting-types-for-built-in-function-free-expected-void(void-)
| |-- include-asm-generic-io.h:warning:reg-may-be-used-uninitialized-in-this-function
| |-- include-asm-generic-io.h:warning:this-statement-may-fall-through
| |-- include-linux-signal.h:warning:this-statement-may-fall-through
| |-- include-linux-skbuff.h:warning:array-subscript-struct-sk_buff-is-partly-outside-array-bounds-of-struct-ieee80211_tx_data
| |-- include-linux-string.h:warning:__builtin_memset-offset-is-out-of-the-bounds
| |-- include-media-v4l2-mediabus.h:warning:mbus_fmt-may-be-used-uninitialized
| `-- init-calibrate.c:warning:no-previous-prototype-for-calibration_delay_done
|-- arm64-randconfig-002-20251125
| |-- crypto-xts.c:warning:conflicting-types-for-built-in-function-free-expected-void(void-)
| |-- include-linux-signal.h:warning:this-statement-may-fall-through
| |-- include-linux-unaligned-access_ok.h:warning:array-subscript-is-outside-array-bounds-of-struct-retrieve_data_struct_cmd
| `-- init-calibrate.c:warning:no-previous-prototype-for-calibration_delay_done
|-- arm64-randconfig-003-20251125
| |-- include-linux-signal.h:warning:this-statement-may-fall-through
| |-- init-calibrate.c:warning:no-previous-prototype-for-calibration_delay_done
| |-- mm-memcontrol.c:warning:bad-line:otherwise.
| |-- mm-rmap.c:warning:no-previous-prototype-for-is_vma_temporary_stack
| `-- mm-rmap.c:warning:variable-cstart-set-but-not-used
|-- arm64-randconfig-004-20251125
| |-- crypto-lrw.c:warning:conflicting-types-for-built-in-function-free-expected-void(void-)
| |-- crypto-xts.c:warning:conflicting-types-for-built-in-function-free-expected-void(void-)
| |-- include-linux-signal.h:warning:this-statement-may-fall-through
| `-- init-calibrate.c:warning:no-previous-prototype-for-calibration_delay_done
|-- x86_64-allnoconfig
| |-- mm-rmap.c:warning:variable-cstart-set-but-not-used
| `-- mm-vmscan.c:error:implicit-declaration-of-function-kernel_swap_enabled-Werror-Wimplicit-function-declaration
|-- x86_64-allnoconfig-bpf
| |-- (.text):undefined-reference-to-_DYNAMIC
| |-- (.text):undefined-reference-to-_fini
| |-- (.text):undefined-reference-to-_init
| `-- usr-include-bits-floatn.h:error:__float128-is-not-supported-on-this-target
|-- x86_64-buildonly-randconfig-004-20251125
| |-- block-bfq-wf2q.o:warning:objtool:missing-symbol-for-section-.text
| |-- block-blk-lib.o:warning:objtool:missing-symbol-for-section-.text
| |-- block-blk-mq-debugfs-zoned.o:warning:objtool:missing-symbol-for-section-.text
| |-- block-blk-mq-pci.o:warning:objtool:missing-symbol-for-section-.text
| |-- block-cmdline-parser.o:warning:objtool:missing-symbol-for-section-.text
| |-- crypto-sm4_generic.o:warning:objtool:missing-symbol-for-section-.text
| |-- drivers-infiniband-hw-usnic-usnic_ib_qp_grp.o:warning:objtool:missing-symbol-for-section-.text
| |-- drivers-net-ethernet-huawei-bma-kbox_drv-kbox_dump.o:warning:objtool:missing-symbol-for-section-.text
| |-- drivers-net-ethernet-huawei-bma-kbox_drv-kbox_panic.o:warning:objtool:missing-symbol-for-section-.text
| |-- drivers-net-ethernet-huawei-bma-kbox_drv-kbox_ram_image.o:warning:objtool:missing-symbol-for-section-.text
| |-- drivers-net-ethernet-huawei-bma-kbox_drv-kbox_ram_op.o:warning:objtool:missing-symbol-for-section-.text
| |-- drivers-net-ethernet-mellanox-mlx5-core-en_dim.o:warning:objtool:missing-symbol-for-section-.text
| |-- drivers-net-wireless-intel-iwlwifi-fw-acpi.o:warning:objtool:missing-symbol-for-section-.text
| |-- drivers-net-wireless-intel-iwlwifi-fw-notif-wait.o:warning:objtool:missing-symbol-for-section-.text
| |-- drivers-net-wireless-intel-iwlwifi-fw-smem.o:warning:objtool:missing-symbol-for-section-.text
| |-- drivers-net-wireless-intel-iwlwifi-iwl-nvm-parse.o:warning:objtool:missing-symbol-for-section-.text
| |-- drivers-net-wireless-intel-iwlwifi-mvm-nvm.o:warning:objtool:missing-symbol-for-section-.text
| |-- kernel-sched-core.c:error:use-of-undeclared-identifier-root_task_group
| |-- mm-hugetlb.c:warning:no-previous-prototype-for-function-free_huge_page_to_dhugetlb_pool
| `-- mm-rmap.c:warning:variable-cstart-set-but-not-used
|-- x86_64-randconfig-002-20251125
| |-- mm-rmap.c:warning:variable-cstart-set-but-not-used
| `-- mm-vmscan.c:error:implicit-declaration-of-function-kernel_swap_enabled-Werror-Wimplicit-function-declaration
|-- x86_64-randconfig-003-20251125
| |-- mm-hugetlb.c:warning:no-previous-prototype-for-function-free_huge_page_to_dhugetlb_pool
| |-- mm-rmap.c:warning:variable-cstart-set-but-not-used
| `-- mm-vmscan.c:error:implicit-declaration-of-function-kernel_swap_enabled-Werror-Wimplicit-function-declaration
|-- x86_64-randconfig-004-20251125
| |-- mm-hugetlb.c:warning:no-previous-prototype-for-function-free_huge_page_to_dhugetlb_pool
| |-- mm-memcontrol.c:warning:bad-line:otherwise.
| `-- mm-rmap.c:warning:variable-cstart-set-but-not-used
|-- x86_64-randconfig-012-20251125
| |-- fs-ext4-mballoc.o:warning:objtool:ext4_mb_complex_scan_group:unreachable-instruction
| |-- mm-hugetlb.c:warning:no-previous-prototype-for-function-free_huge_page_to_dhugetlb_pool
| `-- mm-rmap.c:warning:variable-cstart-set-but-not-used
|-- x86_64-randconfig-015-20251125
| |-- mm-hugetlb.c:warning:no-previous-prototype-for-function-free_huge_page_to_dhugetlb_pool
| |-- mm-rmap.c:warning:variable-cstart-set-but-not-used
| `-- mm-vmscan.c:error:implicit-declaration-of-function-kernel_swap_enabled-Werror-Wimplicit-function-declaration
|-- x86_64-randconfig-071-20251125
| |-- mm-memcontrol.c:warning:bad-line:otherwise.
| `-- mm-rmap.c:warning:variable-cstart-set-but-not-used
|-- x86_64-randconfig-072-20251125
| |-- mm-rmap.c:warning:variable-cstart-set-but-not-used
| `-- mm-vmscan.c:error:implicit-declaration-of-function-kernel_swap_enabled-Werror-Wimplicit-function-declaration
|-- x86_64-randconfig-073-20251125
| |-- mm-memcontrol.c:warning:bad-line:otherwise.
| |-- mm-rmap.c:warning:variable-cstart-set-but-not-used
| `-- mm-vmscan.c:error:implicit-declaration-of-function-kernel_swap_enabled-Werror-Wimplicit-function-declaration
|-- x86_64-randconfig-074-20251125
| |-- mm-hugetlb.c:warning:no-previous-prototype-for-function-free_huge_page_to_dhugetlb_pool
| `-- mm-rmap.c:warning:variable-cstart-set-but-not-used
|-- x86_64-randconfig-104-20251125
| |-- arch-x86-events-zhaoxin-uncore.c:opportunity-for-str_enabled_disabled(uncore_enabled)
| `-- mm-rmap.c:warning:variable-cstart-set-but-not-used
|-- x86_64-randconfig-121-20251125
| |-- include-linux-backing-dev.h:sparse:sparse:incompatible-types-in-comparison-expression-(different-address-spaces):
| |-- mm-hugetlb.c:warning:no-previous-prototype-for-function-free_huge_page_to_dhugetlb_pool
| |-- mm-memcontrol.c:warning:bad-line:otherwise.
| |-- mm-rmap.c:warning:variable-cstart-set-but-not-used
| `-- mm-vmscan.c:error:implicit-declaration-of-function-kernel_swap_enabled-Werror-Wimplicit-function-declaration
`-- x86_64-randconfig-122-20251125
|-- include-linux-backing-dev.h:sparse:sparse:incompatible-types-in-comparison-expression-(different-address-spaces):
|-- mm-memcontrol.c:warning:bad-line:otherwise.
|-- mm-rmap.c:warning:variable-cstart-set-but-not-used
`-- mm-vmscan.c:error:implicit-declaration-of-function-kernel_swap_enabled-Werror-Wimplicit-function-declaration
elapsed time: 1558m
configs tested: 32
configs skipped: 105
tested configs:
arm64 allnoconfig gcc-15.1.0
arm64 defconfig gcc-15.1.0
arm64 randconfig-001-20251125 gcc-11.5.0
arm64 randconfig-002-20251125 gcc-13.4.0
arm64 randconfig-003-20251125 gcc-8.5.0
arm64 randconfig-004-20251125 gcc-11.5.0
x86_64 allnoconfig clang-22
x86_64 buildonly-randconfig-001-20251125 gcc-12
x86_64 buildonly-randconfig-002-20251125 gcc-14
x86_64 buildonly-randconfig-003-20251125 gcc-14
x86_64 buildonly-randconfig-004-20251125 clang-22
x86_64 buildonly-randconfig-005-20251125 gcc-14
x86_64 buildonly-randconfig-006-20251125 gcc-14
x86_64 defconfig gcc-14
x86_64 randconfig-001-20251125 clang-22
x86_64 randconfig-002-20251125 clang-22
x86_64 randconfig-003-20251125 clang-22
x86_64 randconfig-004-20251125 clang-22
x86_64 randconfig-005-20251125 clang-22
x86_64 randconfig-006-20251125 clang-22
x86_64 randconfig-011-20251125 gcc-12
x86_64 randconfig-012-20251125 clang-22
x86_64 randconfig-013-20251125 gcc-14
x86_64 randconfig-014-20251125 gcc-14
x86_64 randconfig-015-20251125 clang-22
x86_64 randconfig-016-20251125 gcc-14
x86_64 randconfig-071-20251125 clang-22
x86_64 randconfig-072-20251125 clang-22
x86_64 randconfig-073-20251125 clang-22
x86_64 randconfig-074-20251125 clang-22
x86_64 randconfig-075-20251125 gcc-14
x86_64 randconfig-076-20251125 clang-22
--
0-DAY CI Kernel Test Service
https://github.com/intel/lkp-tests/wiki
1
0
[openeuler:OLK-6.6 3316/3316] kernel/xsched/cfs.c:22:6: warning: no previous prototype for 'xs_rq_add'
by kernel test robot 25 Nov '25
by kernel test robot 25 Nov '25
25 Nov '25
tree: https://gitee.com/openeuler/kernel.git OLK-6.6
head: 0fa9557473e665984ed5e97515969035324b2ec5
commit: 024b851138509252da4531dc2e69b1e8df50fd3b [3316/3316] xsched: Add xsched CFS class
config: x86_64-buildonly-randconfig-002-20251125 (https://download.01.org/0day-ci/archive/20251125/202511252147.paqMgpCw-lkp@…)
compiler: gcc-14 (Debian 14.2.0-19) 14.2.0
reproduce (this is a W=1 build): (https://download.01.org/0day-ci/archive/20251125/202511252147.paqMgpCw-lkp@…)
If you fix the issue in a separate patch/commit (i.e. not just a new version of
the same patch/commit), kindly add following tags
| Reported-by: kernel test robot <lkp(a)intel.com>
| Closes: https://lore.kernel.org/oe-kbuild-all/202511252147.paqMgpCw-lkp@intel.com/
All warnings (new ones prefixed by >>):
>> kernel/xsched/cfs.c:22:6: warning: no previous prototype for 'xs_rq_add' [-Wmissing-prototypes]
22 | void xs_rq_add(struct xsched_entity_cfs *xse)
| ^~~~~~~~~
>> kernel/xsched/cfs.c:45:6: warning: no previous prototype for 'xs_rq_remove' [-Wmissing-prototypes]
45 | void xs_rq_remove(struct xsched_entity_cfs *xse)
| ^~~~~~~~~~~~
>> kernel/xsched/cfs.c:159:6: warning: no previous prototype for 'rq_init_fair' [-Wmissing-prototypes]
159 | void rq_init_fair(struct xsched_cu *xcu)
| ^~~~~~~~~~~~
>> kernel/xsched/cfs.c:164:6: warning: no previous prototype for 'xse_init_fair' [-Wmissing-prototypes]
164 | void xse_init_fair(struct xsched_entity *xse)
| ^~~~~~~~~~~~~
>> kernel/xsched/cfs.c:169:6: warning: no previous prototype for 'xse_deinit_fair' [-Wmissing-prototypes]
169 | void xse_deinit_fair(struct xsched_entity *xse)
| ^~~~~~~~~~~~~~~
--
>> kernel/xsched/cfs.c:56: warning: Function parameter or member 'xse_cfs' not described in 'xs_cfs_rq_update'
>> kernel/xsched/cfs.c:56: warning: Function parameter or member 'new_xrt' not described in 'xs_cfs_rq_update'
vim +/xs_rq_add +22 kernel/xsched/cfs.c
18
19 #define CFS_INNER_RQ_EMPTY(cfs_xse) \
20 ((cfs_xse)->xruntime == XSCHED_TIME_INF)
21
> 22 void xs_rq_add(struct xsched_entity_cfs *xse)
23 {
24 struct xsched_rq_cfs *cfs_rq = xse->cfs_rq;
25 struct rb_node **link = &cfs_rq->ctx_timeline.rb_root.rb_node;
26 struct rb_node *parent = NULL;
27 struct xsched_entity_cfs *entry;
28 bool leftmost = true;
29
30 while (*link) {
31 parent = *link;
32 entry = rb_entry(parent, struct xsched_entity_cfs, run_node);
33 if (xse->xruntime <= entry->xruntime) {
34 link = &parent->rb_left;
35 } else {
36 link = &parent->rb_right;
37 leftmost = false;
38 }
39 }
40
41 rb_link_node(&xse->run_node, parent, link);
42 rb_insert_color_cached(&xse->run_node, &cfs_rq->ctx_timeline, leftmost);
43 }
44
> 45 void xs_rq_remove(struct xsched_entity_cfs *xse)
46 {
47 struct xsched_rq_cfs *cfs_rq = xse->cfs_rq;
48
49 rb_erase_cached(&xse->run_node, &cfs_rq->ctx_timeline);
50 }
51
52 /**
53 * xs_cfs_rq_update() - Update entity's runqueue position with new xruntime
54 */
55 static void xs_cfs_rq_update(struct xsched_entity_cfs *xse_cfs, u64 new_xrt)
> 56 {
57 xs_rq_remove(xse_cfs);
58 xse_cfs->xruntime = new_xrt;
59 xs_rq_add(xse_cfs);
60 }
61
62 static inline struct xsched_entity_cfs *
63 xs_pick_first(struct xsched_rq_cfs *cfs_rq)
64 {
65 struct xsched_entity_cfs *xse_cfs;
66 struct rb_node *left = rb_first_cached(&cfs_rq->ctx_timeline);
67
68 if (!left)
69 return NULL;
70
71 xse_cfs = rb_entry(left, struct xsched_entity_cfs, run_node);
72 return xse_cfs;
73 }
74
75 /**
76 * xs_update() - Account xruntime and runtime metrics.
77 * @xse_cfs: Point to CFS scheduling entity.
78 * @delta: Execution time in last period
79 */
80 static void xs_update(struct xsched_entity_cfs *xse_cfs, u64 delta)
81 {
82 u64 new_xrt = xse_cfs->xruntime + delta * xse_cfs->weight;
83
84 xs_cfs_rq_update(xse_cfs, new_xrt);
85 xse_cfs->sum_exec_runtime += delta;
86 }
87
88 /*
89 * Xsched Fair class methods
90 * For rq manipulation we rely on root runqueue lock already acquired in core.
91 * Access xsched_group_xcu_priv requires no locks because one thread per XCU.
92 */
93 static void dequeue_ctx_fair(struct xsched_entity *xse)
94 {
95 struct xsched_cu *xcu = xse->xcu;
96 struct xsched_entity_cfs *first;
97 struct xsched_entity_cfs *xse_cfs = &xse->cfs;
98
99 xs_rq_remove(xse_cfs);
100
101 first = xs_pick_first(&xcu->xrq.cfs);
102 xcu->xrq.cfs.min_xruntime = (first) ? first->xruntime : XSCHED_TIME_INF;
103 }
104
105 /**
106 * enqueue_ctx_fair() - Add context to the runqueue
107 * @xse: xsched entity of context
108 * @xcu: executor
109 *
110 * In contrary to enqueue_task it is called once on context init.
111 * Although groups reside in tree, their nodes not counted in nr_running.
112 * The xruntime of a group xsched entitry represented by min xruntime inside.
113 */
114 static void enqueue_ctx_fair(struct xsched_entity *xse, struct xsched_cu *xcu)
115 {
116 struct xsched_entity_cfs *first;
117 struct xsched_rq_cfs *rq;
118 struct xsched_entity_cfs *xse_cfs = &xse->cfs;
119
120 rq = xse_cfs->cfs_rq = &xcu->xrq.cfs;
121
122 /* If no XSE of only empty groups */
123 if (xs_pick_first(rq) == NULL || rq->min_xruntime == XSCHED_TIME_INF)
124 rq->min_xruntime = xse_cfs->xruntime;
125 else
126 xse_cfs->xruntime = max(xse_cfs->xruntime, rq->min_xruntime);
127
128 xs_rq_add(xse_cfs);
129
130 first = xs_pick_first(&xcu->xrq.cfs);
131 xcu->xrq.cfs.min_xruntime = (first) ? first->xruntime : XSCHED_TIME_INF;
132 }
133
134 static struct xsched_entity *pick_next_ctx_fair(struct xsched_cu *xcu)
135 {
136 struct xsched_entity_cfs *xse;
137 struct xsched_rq_cfs *rq = &xcu->xrq.cfs;
138
139 xse = xs_pick_first(rq);
140 if (!xse)
141 return NULL;
142
143 return container_of(xse, struct xsched_entity, cfs);
144 }
145
146 static inline bool
147 xs_should_preempt_fair(struct xsched_entity *xse)
148 {
149 return (atomic_read(&xse->submitted_one_kick) >= XSCHED_CFS_KICK_SLICE);
150 }
151
152 static void put_prev_ctx_fair(struct xsched_entity *xse)
153 {
154 struct xsched_entity_cfs *prev = &xse->cfs;
155
156 xs_update(prev, xse->last_exec_runtime);
157 }
158
> 159 void rq_init_fair(struct xsched_cu *xcu)
160 {
161 xcu->xrq.cfs.ctx_timeline = RB_ROOT_CACHED;
162 }
163
> 164 void xse_init_fair(struct xsched_entity *xse)
165 {
166 xse->cfs.weight = XSCHED_CFS_WEIGHT_DFLT;
167 }
168
> 169 void xse_deinit_fair(struct xsched_entity *xse)
170 {
171 /* TODO Cgroup exit */
172 }
173
--
0-DAY CI Kernel Test Service
https://github.com/intel/lkp-tests/wiki
1
0
25 Nov '25
From: zhoubin <zhoubin120(a)h-partners.com>
driver inclusion
category: feature
bugzilla: https://gitee.com/openeuler/kernel/issues/ID86QM?from=project-issue&search_…
CVE: NA
--------------------------------
Add PF/VF traffic split support for the semi-offloaded bonding scenario.
Adde quick fault localization for network port packet loss issues.
Add LRO configuration optimization function.
Add NIC RX CQE aggregation and sending.
Add XDP DROP/PASS, TX/REDIRECT/ABORTED.
Deleted BJ driver tool code.
Refactor the enablement mechanism for NIC traffic split scenarios.
The NIC driver has fixed the following bugs:
Fix bugs related to ethtool.
Fix bugs related to DPDK managing bond.
Fix the bug of out-of-order packets captured by RX.
Fix bond-related bugs.
Signed-off-by: zhoubin <zhoubin120(a)h-partners.com>
Signed-off-by: zhuyikai <zhuyikai1(a)h-partners.com>
Signed-off-by: shijing <shijing34(a)huawei.com>
---
drivers/net/ethernet/huawei/hinic3/adapter/hw_cmdq/hw_cmdq_ops.c | 76 +-
drivers/net/ethernet/huawei/hinic3/adapter/sw_cmdq/sw_cmdq_ops.c | 120 ++-
drivers/net/ethernet/huawei/hinic3/adapter/sw_cmdq/sw_cmdq_ops.h | 6 +
drivers/net/ethernet/huawei/hinic3/bond/hinic3_bond.c | 702 +++++++++++++++---
drivers/net/ethernet/huawei/hinic3/bond/hinic3_bond.h | 29 +
drivers/net/ethernet/huawei/hinic3/cqm/cqm_main.c | 7 +-
drivers/net/ethernet/huawei/hinic3/cqm/cqm_main.h | 11 +-
drivers/net/ethernet/huawei/hinic3/cqm/cqm_object.c | 4 +-
drivers/net/ethernet/huawei/hinic3/cqm/cqm_object_intern.c | 3 +-
drivers/net/ethernet/huawei/hinic3/hinic3_crm.h | 16 +-
drivers/net/ethernet/huawei/hinic3/hinic3_dbg.c | 83 ++-
drivers/net/ethernet/huawei/hinic3/hinic3_ethtool.c | 95 +--
drivers/net/ethernet/huawei/hinic3/hinic3_ethtool_stats.c | 96 ++-
drivers/net/ethernet/huawei/hinic3/hinic3_filter.c | 2 +-
drivers/net/ethernet/huawei/hinic3/hinic3_hw.h | 15 +
drivers/net/ethernet/huawei/hinic3/hinic3_mag_cfg.c | 134 +++-
drivers/net/ethernet/huawei/hinic3/hinic3_main.c | 104 ++-
drivers/net/ethernet/huawei/hinic3/hinic3_mt.h | 29 +-
drivers/net/ethernet/huawei/hinic3/hinic3_netdev_ops.c | 166 ++---
drivers/net/ethernet/huawei/hinic3/hinic3_nic.h | 3 +-
drivers/net/ethernet/huawei/hinic3/hinic3_nic_cfg.c | 92 ++-
drivers/net/ethernet/huawei/hinic3/hinic3_nic_cfg.h | 17 +-
drivers/net/ethernet/huawei/hinic3/hinic3_nic_cmdq.h | 22 -
drivers/net/ethernet/huawei/hinic3/hinic3_nic_dbg.c | 2 +-
drivers/net/ethernet/huawei/hinic3/hinic3_nic_dev.h | 8 +-
drivers/net/ethernet/huawei/hinic3/hinic3_nic_io.c | 310 ++------
drivers/net/ethernet/huawei/hinic3/hinic3_nic_io.h | 248 ++++++-
drivers/net/ethernet/huawei/hinic3/hinic3_nic_prof.c | 1 -
drivers/net/ethernet/huawei/hinic3/hinic3_nic_qp.h | 8 +
drivers/net/ethernet/huawei/hinic3/hinic3_rss.c | 13 +-
drivers/net/ethernet/huawei/hinic3/hinic3_rx.c | 360 ++++++---
drivers/net/ethernet/huawei/hinic3/hinic3_rx.h | 27 +-
drivers/net/ethernet/huawei/hinic3/hinic3_tx.c | 189 ++++-
drivers/net/ethernet/huawei/hinic3/hinic3_tx.h | 48 ++
drivers/net/ethernet/huawei/hinic3/hw/hinic3_dev_mgmt.c | 111 ++-
drivers/net/ethernet/huawei/hinic3/hw/hinic3_dev_mgmt.h | 8 +-
drivers/net/ethernet/huawei/hinic3/hw/hinic3_hw_cfg.c | 61 +-
drivers/net/ethernet/huawei/hinic3/hw/hinic3_hw_cfg.h | 4 +-
drivers/net/ethernet/huawei/hinic3/hw/hinic3_hw_comm.c | 68 +-
drivers/net/ethernet/huawei/hinic3/hw/hinic3_hwdev.c | 11 +
drivers/net/ethernet/huawei/hinic3/hw/hinic3_hwdev.h | 6 +-
drivers/net/ethernet/huawei/hinic3/hw/hinic3_lld.c | 34 +-
drivers/net/ethernet/huawei/hinic3/hw/hinic3_mgmt.c | 6 +-
drivers/net/ethernet/huawei/hinic3/hw/hinic3_nictool.c | 123 ++-
drivers/net/ethernet/huawei/hinic3/hw/hinic3_nictool.h | 6 +
drivers/net/ethernet/huawei/hinic3/include/bond/bond_common_defs.h | 16 +
.../include/cfg_mgmt/cfg_mgmt_mpu_cmd_defs.h | 2 +
drivers/net/ethernet/huawei/hinic3/include/hinic3_lld.h | 7 +
drivers/net/ethernet/huawei/hinic3/include/mpu/mag_mpu_cmd.h | 4 +
drivers/net/ethernet/huawei/hinic3/include/mpu/mpu_inband_cmd.h | 6 +
drivers/net/ethernet/huawei/hinic3/include/mpu/mpu_inband_cmd_defs.h | 16 +-
drivers/net/ethernet/huawei/hinic3/include/mpu/nic_cfg_comm.h | 6 +
drivers/net/ethernet/huawei/hinic3/mag_mpu_cmd_defs.h | 65 +-
drivers/net/ethernet/huawei/hinic3/nic_mpu_cmd.h | 21 +-
drivers/net/ethernet/huawei/hinic3/nic_mpu_cmd_defs.h | 86 ++-
55 files changed, 2861 insertions(+), 852 deletions(-)
diff --git a/drivers/net/ethernet/huawei/hinic3/adapter/hw_cmdq/hw_cmdq_ops.c b/drivers/net/ethernet/huawei/hinic3/adapter/hw_cmdq/hw_cmdq_ops.c
index 11634d9..5376519 100644
--- a/drivers/net/ethernet/huawei/hinic3/adapter/hw_cmdq/hw_cmdq_ops.c
+++ b/drivers/net/ethernet/huawei/hinic3/adapter/hw_cmdq/hw_cmdq_ops.c
@@ -3,6 +3,80 @@
#include "hinic3_nic_cmdq.h"
#include "hw_cmdq_ops.h"
+#include "hinic3_nic_io.h"
+
+static void hinic3_hw_rq_prepare_ctxt(struct hinic3_io_queue *rq,
+ struct hinic3_rq_ctxt *rq_ctxt)
+{
+ u32 wq_page_pfn_hi, wq_page_pfn_lo;
+ u32 wq_block_pfn_hi, wq_block_pfn_lo;
+ u16 pi_start, ci_start;
+ u16 wqe_type = rq->wqe_type;
+
+ /* RQ depth is in unit of 8Bytes */
+ ci_start = (u16)((u32)hinic3_get_rq_local_ci(rq) << wqe_type);
+ pi_start = (u16)((u32)hinic3_get_rq_local_pi(rq) << wqe_type);
+
+ hinic3_rq_prepare_ctxt_get_wq_info(rq, &wq_page_pfn_hi, &wq_page_pfn_lo,
+ &wq_block_pfn_hi, &wq_block_pfn_lo);
+
+ rq_ctxt->ci_pi =
+ RQ_CTXT_CI_PI_SET(ci_start, CI_IDX) |
+ RQ_CTXT_CI_PI_SET(pi_start, PI_IDX);
+
+ rq_ctxt->ceq_attr = RQ_CTXT_CEQ_ATTR_SET(0, EN) |
+ RQ_CTXT_CEQ_ATTR_SET(rq->msix_entry_idx, INTR);
+
+ rq_ctxt->wq_pfn_hi_type_owner =
+ RQ_CTXT_WQ_PAGE_SET(wq_page_pfn_hi, HI_PFN) |
+ RQ_CTXT_WQ_PAGE_SET(1, OWNER);
+
+ switch (wqe_type) {
+ case HINIC3_EXTEND_RQ_WQE:
+ /* use 32Byte WQE with SGE for CQE */
+ rq_ctxt->wq_pfn_hi_type_owner |=
+ RQ_CTXT_WQ_PAGE_SET(0, WQE_TYPE);
+ break;
+ case HINIC3_NORMAL_RQ_WQE:
+ /* use 16Byte WQE with 32Bytes SGE for CQE */
+ rq_ctxt->wq_pfn_hi_type_owner |=
+ RQ_CTXT_WQ_PAGE_SET(2, WQE_TYPE);
+ rq_ctxt->cqe_sge_len = RQ_CTXT_CQE_LEN_SET(1, CQE_LEN);
+ break;
+ case HINIC3_COMPACT_RQ_WQE:
+ /* use 8Byte WQE */
+ rq_ctxt->wq_pfn_hi_type_owner |=
+ RQ_CTXT_WQ_PAGE_SET(3, WQE_TYPE);
+ break;
+ default:
+ pr_err("Invalid rq wqe type: %u", wqe_type);
+ }
+
+ rq_ctxt->wq_pfn_lo = wq_page_pfn_lo;
+
+ rq_ctxt->pref_cache =
+ RQ_CTXT_PREF_SET(WQ_PREFETCH_MIN, CACHE_MIN) |
+ RQ_CTXT_PREF_SET(WQ_PREFETCH_MAX, CACHE_MAX) |
+ RQ_CTXT_PREF_SET(WQ_PREFETCH_THRESHOLD, CACHE_THRESHOLD);
+
+ rq_ctxt->pref_ci_owner =
+ RQ_CTXT_PREF_SET(CI_HIGN_IDX(ci_start), CI_HI) |
+ RQ_CTXT_PREF_SET(1, OWNER);
+
+ rq_ctxt->pref_wq_pfn_hi_ci =
+ RQ_CTXT_PREF_SET(wq_page_pfn_hi, WQ_PFN_HI) |
+ RQ_CTXT_PREF_SET(ci_start, CI_LOW);
+
+ rq_ctxt->pref_wq_pfn_lo = wq_page_pfn_lo;
+
+
+ rq_ctxt->wq_block_pfn_hi =
+ RQ_CTXT_WQ_BLOCK_SET(wq_block_pfn_hi, PFN_HI);
+
+ rq_ctxt->wq_block_pfn_lo = wq_block_pfn_lo;
+
+ hinic3_cpu_to_be32(rq_ctxt, sizeof(*rq_ctxt));
+}
static void hinic3_qp_prepare_cmdq_header(struct hinic3_qp_ctxt_header *qp_ctxt_hdr,
enum hinic3_qp_ctxt_type ctxt_type, u16 num_queues,
@@ -32,7 +106,7 @@ static u8 prepare_cmd_buf_qp_context_multi_store(struct hinic3_nic_io *nic_io,
for (i = 0; i < max_ctxts; i++) {
if (ctxt_type == HINIC3_QP_CTXT_TYPE_RQ)
- hinic3_rq_prepare_ctxt(&nic_io->rq[start_qid + i],
+ hinic3_hw_rq_prepare_ctxt(&nic_io->rq[start_qid + i],
&qp_ctxt_block->rq_ctxt[i]);
else
hinic3_sq_prepare_ctxt(&nic_io->sq[start_qid + i],
diff --git a/drivers/net/ethernet/huawei/hinic3/adapter/sw_cmdq/sw_cmdq_ops.c b/drivers/net/ethernet/huawei/hinic3/adapter/sw_cmdq/sw_cmdq_ops.c
index 440fea6..b24e108 100644
--- a/drivers/net/ethernet/huawei/hinic3/adapter/sw_cmdq/sw_cmdq_ops.c
+++ b/drivers/net/ethernet/huawei/hinic3/adapter/sw_cmdq/sw_cmdq_ops.c
@@ -5,6 +5,118 @@
#include "nic_npu_cmd.h"
#include "hinic3_nic_cmdq.h"
#include "sw_cmdq_ops.h"
+#include "hinic3_nic_io.h"
+
+void hinic3_get_cqe_coalesce_info(void *hwdev, u8 *state, u8 *max_num)
+{
+ struct hinic3_cmd_cqe_coalesce_offload cmd_func_tbl;
+ u16 out_size = sizeof(cmd_func_tbl);
+ int err;
+
+ (void)memset(&cmd_func_tbl, 0, sizeof(cmd_func_tbl));
+ cmd_func_tbl.func_id = hinic3_global_func_id(hwdev);
+ cmd_func_tbl.opcode = HINIC3_CMD_OP_GET;
+
+ err = l2nic_msg_to_mgmt_sync(hwdev,
+ HINIC3_NIC_CMD_CFG_CQE_COALESCE_OFFLOAD,
+ &cmd_func_tbl, sizeof(cmd_func_tbl),
+ &cmd_func_tbl, &out_size);
+ if ((err != 0) || (cmd_func_tbl.msg_head.status != 0) ||
+ (out_size == 0)) {
+ *state = 0;
+ *max_num = 0;
+ } else {
+ *state = cmd_func_tbl.state;
+ *max_num = cmd_func_tbl.max_num;
+ }
+}
+
+static void hinic3_rq_prepare_ctxt(struct hinic3_io_queue *rq,
+ struct hinic3_rq_ctxt *rq_ctxt,
+ u8 cqe_coal_state, u8 cqe_coal_max_num)
+{
+ u32 wq_page_pfn_hi, wq_page_pfn_lo;
+ u32 wq_block_pfn_hi, wq_block_pfn_lo;
+ u16 pi_start, ci_start;
+ u16 wqe_type = rq->wqe_type;
+
+ /* RQ depth is in unit of 8Bytes */
+ ci_start = (u16)((u32)hinic3_get_rq_local_ci(rq) << wqe_type);
+ pi_start = (u16)((u32)hinic3_get_rq_local_pi(rq) << wqe_type);
+
+ hinic3_rq_prepare_ctxt_get_wq_info(rq, &wq_page_pfn_hi, &wq_page_pfn_lo,
+ &wq_block_pfn_hi, &wq_block_pfn_lo);
+
+ rq_ctxt->ci_pi = RQ_CTXT_CI_PI_SET(ci_start, CI_IDX) |
+ RQ_CTXT_CI_PI_SET(pi_start, PI_IDX);
+ /* set ceq_en enable and ceq_arm in case of CQE coalesce */
+ rq_ctxt->ceq_attr = (cqe_coal_state == 0) ?
+ (RQ_CTXT_CEQ_ATTR_SET(0, EN) |
+ RQ_CTXT_CEQ_ATTR_SET(rq->msix_entry_idx, INTR)) :
+ (RQ_CTXT_CEQ_ATTR_SET(1, EN) |
+ RQ_CTXT_CEQ_ATTR_SET(rq->msix_entry_idx, INTR) |
+ RQ_CTXT_CEQ_ATTR_SET(1, ARM));
+ rq_ctxt->wq_pfn_hi_type_owner =
+ RQ_CTXT_WQ_PAGE_SET(wq_page_pfn_hi, HI_PFN) |
+ RQ_CTXT_WQ_PAGE_SET(1, OWNER);
+
+ switch (wqe_type) {
+ case HINIC3_EXTEND_RQ_WQE:
+ /* use 32Byte WQE with SGE for CQE */
+ rq_ctxt->wq_pfn_hi_type_owner |= RQ_CTXT_WQ_PAGE_SET(0,
+ WQE_TYPE);
+
+ break;
+ case HINIC3_NORMAL_RQ_WQE:
+ /* use 16Byte WQE with 32Bytes SGE for CQE */
+ rq_ctxt->wq_pfn_hi_type_owner |= RQ_CTXT_WQ_PAGE_SET(2,
+ WQE_TYPE);
+ /* set Max_len in case of CQE coalesce */
+ rq_ctxt->cqe_sge_len = (cqe_coal_state == 0) ?
+ RQ_CTXT_CQE_LEN_SET(1, CQE_LEN) :
+ RQ_CTXT_CQE_LEN_SET(1, CQE_LEN) |
+ RQ_CTXT_CQE_LEN_SET(cqe_coal_max_num,
+ MAX_COUNT);
+ break;
+ default:
+ pr_err("Invalid rq wqe type: %u", wqe_type);
+ }
+
+ rq_ctxt->wq_pfn_lo = wq_page_pfn_lo;
+
+ rq_ctxt->pref_cache =
+ RQ_CTXT_PREF_SET(WQ_PREFETCH_MIN, CACHE_MIN) |
+ RQ_CTXT_PREF_SET(WQ_PREFETCH_MAX, CACHE_MAX) |
+ RQ_CTXT_PREF_SET(WQ_PREFETCH_THRESHOLD, CACHE_THRESHOLD);
+
+ rq_ctxt->pref_ci_owner =
+ RQ_CTXT_PREF_SET(CI_HIGN_IDX(ci_start), CI_HI) |
+ RQ_CTXT_PREF_SET(1, OWNER);
+
+ rq_ctxt->pref_wq_pfn_hi_ci =
+ RQ_CTXT_PREF_SET(wq_page_pfn_hi, WQ_PFN_HI) |
+ RQ_CTXT_PREF_SET(ci_start, CI_LOW);
+
+ rq_ctxt->pref_wq_pfn_lo = wq_page_pfn_lo;
+
+ if (cqe_coal_state == 0) {
+ rq_ctxt->pi_paddr_hi = upper_32_bits(rq->rx.pi_dma_addr);
+ rq_ctxt->pi_paddr_lo = lower_32_bits(rq->rx.pi_dma_addr);
+ } else {
+ rq_ctxt->pi_paddr_hi = upper_32_bits(rq->cqe_start_paddr);
+ rq_ctxt->pi_paddr_lo = lower_32_bits(rq->cqe_start_paddr);
+
+ rq_ctxt->ci_paddr_hi = upper_32_bits(rq->rx_ci_paddr);
+ rq_ctxt->ci_paddr_lo = lower_32_bits(rq->rx_ci_paddr);
+ }
+
+ rq_ctxt->wq_block_pfn_hi =
+ RQ_CTXT_WQ_BLOCK_SET(wq_block_pfn_hi, PFN_HI);
+
+ rq_ctxt->wq_block_pfn_lo = wq_block_pfn_lo;
+
+ hinic3_cpu_to_be32(rq_ctxt, sizeof(*rq_ctxt));
+}
static void hinic3_qp_prepare_cmdq_header(struct hinic3_qp_ctxt_header *qp_ctxt_hdr,
enum hinic3_qp_ctxt_type ctxt_type, u16 num_queues,
@@ -24,17 +136,21 @@ static u8 prepare_cmd_buf_qp_context_multi_store(struct hinic3_nic_io *nic_io,
u16 start_qid, u16 max_ctxts)
{
struct hinic3_qp_ctxt_block *qp_ctxt_block = NULL;
+ u8 cqe_coal_state, cqe_coal_max_num;
u16 i;
-
qp_ctxt_block = cmd_buf->buf;
+ hinic3_get_cqe_coalesce_info(nic_io->hwdev, &cqe_coal_state,
+ &cqe_coal_max_num);
hinic3_qp_prepare_cmdq_header(&qp_ctxt_block->cmdq_hdr, ctxt_type,
max_ctxts, start_qid);
for (i = 0; i < max_ctxts; i++) {
if (ctxt_type == HINIC3_QP_CTXT_TYPE_RQ)
hinic3_rq_prepare_ctxt(&nic_io->rq[start_qid + i],
- &qp_ctxt_block->rq_ctxt[i]);
+ &qp_ctxt_block->rq_ctxt[i],
+ cqe_coal_state,
+ cqe_coal_max_num);
else
hinic3_sq_prepare_ctxt(&nic_io->sq[start_qid + i], start_qid + i,
&qp_ctxt_block->sq_ctxt[i]);
diff --git a/drivers/net/ethernet/huawei/hinic3/adapter/sw_cmdq/sw_cmdq_ops.h b/drivers/net/ethernet/huawei/hinic3/adapter/sw_cmdq/sw_cmdq_ops.h
index ea68b9f..2d6b5fe 100644
--- a/drivers/net/ethernet/huawei/hinic3/adapter/sw_cmdq/sw_cmdq_ops.h
+++ b/drivers/net/ethernet/huawei/hinic3/adapter/sw_cmdq/sw_cmdq_ops.h
@@ -14,6 +14,11 @@ struct hinic3_qp_ctxt_header {
u16 rsvd;
};
+struct hinic3_rq_ctxt_block {
+ struct hinic3_qp_ctxt_header cmdq_hdr;
+ struct hinic3_rq_ctxt rq_ctxt[HINIC3_Q_CTXT_MAX];
+};
+
struct hinic3_clean_queue_ctxt {
struct hinic3_qp_ctxt_header cmdq_hdr;
u32 rsvd;
@@ -35,4 +40,5 @@ struct hinic3_vlan_ctx {
u32 vlan_sel;
};
+void hinic3_get_cqe_coalesce_info(void *hwdev, u8 *state, u8 *max_num);
#endif
diff --git a/drivers/net/ethernet/huawei/hinic3/bond/hinic3_bond.c b/drivers/net/ethernet/huawei/hinic3/bond/hinic3_bond.c
index dd0699b..30f9ef5 100644
--- a/drivers/net/ethernet/huawei/hinic3/bond/hinic3_bond.c
+++ b/drivers/net/ethernet/huawei/hinic3/bond/hinic3_bond.c
@@ -9,12 +9,13 @@
#include <linux/net.h>
#include <linux/mutex.h>
#include <linux/netdevice.h>
-#include <linux/version.h>
#include "hinic3_lld.h"
#include "hinic3_srv_nic.h"
#include "hinic3_nic_dev.h"
+#include "hinic3_dev_mgmt.h"
#include "hinic3_hw.h"
+#include "mpu_board_defs.h"
#include "mpu_inband_cmd.h"
#include "hinic3_hwdev.h"
#include "hinic3_bond.h"
@@ -29,15 +30,23 @@ struct hinic3_bond_dev {
struct bond_attr new_attr;
struct bonding *bond;
void *ppf_hwdev;
+ struct card_node *chip_node;
struct kref ref;
#define BOND_DEV_STATUS_IDLE 0x0
#define BOND_DEV_STATUS_ACTIVATED 0x1
u8 status;
u8 slot_used[HINIC3_BOND_USER_NUM];
struct workqueue_struct *wq;
- struct delayed_work bond_work;
struct bond_tracker tracker;
spinlock_t lock; /* lock for change status */
+ /* function bitmap of bond offload */
+ u32 func_offload_bitmap[FUNC_OFFLOAD_BITMAP_LEN];
+};
+
+struct bond_work_item {
+ struct delayed_work bond_work;
+ struct hinic3_bond_dev *bond_dev;
+ struct bond_func_attr func_attr;
};
typedef void (*bond_service_func)(const char *bond_name, void *bond_attr,
@@ -56,6 +65,8 @@ struct hinic3_bond_mngr {
static struct hinic3_bond_mngr bond_mngr = { .cnt = 0 };
static DEFINE_MUTEX(g_bond_mutex);
+static void bond_try_do_work(struct work_struct *work);
+
static bool bond_dev_is_activated(const struct hinic3_bond_dev *bdev)
{
return bdev->status == BOND_DEV_STATUS_ACTIVATED;
@@ -71,13 +82,25 @@ static inline bool netif_is_bond_master(const struct net_device *dev)
}
#endif
+static u32 hinic3_get_dbdf(struct hinic3_nic_dev *nic_dev)
+{
+ u32 domain, bus, dev, func;
+ struct pci_dev *pdev = NULL;
+
+ pdev = nic_dev->pdev;
+ domain = (u32)pci_domain_nr(pdev->bus);
+ bus = pdev->bus->number;
+ dev = PCI_SLOT(pdev->devfn);
+ func = PCI_FUNC(pdev->devfn);
+
+ return PCI_DBDF(domain, bus, dev, func);
+}
+
static u32 bond_gen_uplink_id(struct hinic3_bond_dev *bdev)
{
u32 uplink_id = 0;
u8 i;
struct hinic3_nic_dev *nic_dev = NULL;
- struct pci_dev *pdev = NULL;
- u32 domain, bus, dev, func;
spin_lock(&bdev->lock);
for (i = 0; i < BOND_PORT_MAX_NUM; i++) {
@@ -85,12 +108,7 @@ static u32 bond_gen_uplink_id(struct hinic3_bond_dev *bdev)
if (!bdev->tracker.ndev[i])
continue;
nic_dev = netdev_priv(bdev->tracker.ndev[i]);
- pdev = nic_dev->pdev;
- domain = (u32)pci_domain_nr(pdev->bus);
- bus = pdev->bus->number;
- dev = PCI_SLOT(pdev->devfn);
- func = PCI_FUNC(pdev->devfn);
- uplink_id = PCI_DBDF(domain, bus, dev, func);
+ uplink_id = hinic3_get_dbdf(nic_dev);
break;
}
}
@@ -171,12 +189,41 @@ static u8 bond_get_netdev_idx(const struct hinic3_bond_dev *bdev,
return PORT_INVALID_ID;
}
+static void bond_dev_track_port_multichip(struct bond_tracker *tracker,
+ struct hinic3_nic_dev *nic_dev)
+{
+ u8 hw_bus, i;
+ struct hinic3_pcidev *pci_adapter = NULL;
+ struct hinic3_lld_dev *lld_dev = NULL;
+
+ pci_adapter = pci_get_drvdata(nic_dev->lld_dev->pdev);
+ hw_bus = pci_adapter->chip_node->hw_bus_num;
+
+ for (i = 0; i < BOND_PORT_MAX_NUM; i++) {
+ if (tracker->ndev[i] != NULL) {
+ lld_dev = hinic3_get_lld_dev_by_netdev(
+ tracker->ndev[i]);
+ if (lld_dev == NULL || lld_dev->pdev == NULL)
+ continue;
+
+ pci_adapter = pci_get_drvdata(lld_dev->pdev);
+ if (pci_adapter->chip_node->hw_bus_num != hw_bus) {
+ pr_warn("hinic3_bond: track ndev:%s set multi chip bond.\n",
+ tracker->ndev[i]->name);
+ tracker->is_multichip = true;
+ break;
+ }
+ }
+ }
+}
+
static u8 bond_dev_track_port(struct hinic3_bond_dev *bdev,
struct net_device *ndev)
{
u8 port_id;
void *ppf_hwdev = NULL;
struct hinic3_nic_dev *nic_dev = NULL;
+ struct hinic3_pcidev *pci_adapter = NULL;
struct hinic3_lld_dev *ppf_lld_dev = NULL;
nic_dev = get_nic_dev_safe(ndev);
@@ -185,6 +232,8 @@ static u8 bond_dev_track_port(struct hinic3_bond_dev *bdev,
return PORT_INVALID_ID;
}
+ pci_adapter = pci_get_drvdata(nic_dev->pdev);
+ ppf_hwdev = nic_dev->hwdev;
ppf_lld_dev = hinic3_get_ppf_lld_dev_unsafe(nic_dev->lld_dev);
if (ppf_lld_dev)
ppf_hwdev = ppf_lld_dev->hwdev;
@@ -193,6 +242,7 @@ static u8 bond_dev_track_port(struct hinic3_bond_dev *bdev,
port_id = hinic3_physical_port_id(nic_dev->hwdev);
spin_lock(&bdev->lock);
+ bond_dev_track_port_multichip(&bdev->tracker, nic_dev);
/* attach netdev to the port position associated with it */
if (bdev->tracker.ndev[port_id]) {
pr_warn("hinic3_bond: Old ndev:%s is replaced\n",
@@ -203,8 +253,10 @@ static u8 bond_dev_track_port(struct hinic3_bond_dev *bdev,
bdev->tracker.ndev[port_id] = ndev;
bdev->tracker.netdev_state[port_id].link_up = 0;
bdev->tracker.netdev_state[port_id].tx_enabled = 0;
- if (!bdev->ppf_hwdev)
- bdev->ppf_hwdev = ppf_hwdev;
+ bdev->ppf_hwdev = ppf_hwdev;
+ if (pci_adapter && !bdev->chip_node)
+ bdev->chip_node = pci_adapter->chip_node;
+
pr_info("TRACK cnt: %d, slave_name(%s)\n",
bdev->tracker.cnt, ndev->name);
spin_unlock(&bdev->lock);
@@ -217,8 +269,10 @@ static void bond_dev_untrack_port(struct hinic3_bond_dev *bdev, u8 idx)
spin_lock(&bdev->lock);
if (bdev->tracker.ndev[idx]) {
- pr_info("hinic3_bond: untrack port:%u ndev:%s cnt:%d\n", idx,
- bdev->tracker.ndev[idx]->name, bdev->tracker.cnt);
+
+ pr_info("hinic3_bond: untrack port:%u ndev:%s cnt:%d\n",
+ idx, bdev->tracker.ndev[idx]->name,
+ bdev->tracker.cnt - 1);
bdev->tracker.ndev[idx] = NULL;
bdev->tracker.cnt--;
}
@@ -226,10 +280,16 @@ static void bond_dev_untrack_port(struct hinic3_bond_dev *bdev, u8 idx)
spin_unlock(&bdev->lock);
}
-static void bond_slave_event(struct hinic3_bond_dev *bdev, struct slave *slave)
+static void bond_slave_event(struct bond_work_item *work_item,
+ struct slave *slave)
{
+ struct hinic3_bond_dev *bdev = NULL;
u8 idx;
+ if (work_item == NULL)
+ return;
+
+ bdev = work_item->bond_dev;
idx = bond_get_netdev_idx(bdev, slave->dev);
if (idx == PORT_INVALID_ID)
idx = bond_dev_track_port(bdev, slave->dev);
@@ -242,7 +302,7 @@ static void bond_slave_event(struct hinic3_bond_dev *bdev, struct slave *slave)
bond_is_active_slave(slave);
spin_unlock(&bdev->lock);
- queue_delayed_work(bdev->wq, &bdev->bond_work, 0);
+ queue_delayed_work(bdev->wq, &work_item->bond_work, 0);
}
static bool bond_eval_bonding_stats(const struct hinic3_bond_dev *bdev,
@@ -261,14 +321,20 @@ static bool bond_eval_bonding_stats(const struct hinic3_bond_dev *bdev,
return bdev->tracker.cnt > 0;
}
-static void bond_master_event(struct hinic3_bond_dev *bdev,
+static void bond_master_event(struct bond_work_item *work_item,
struct bonding *bond)
{
+ struct hinic3_bond_dev *bdev = NULL;
+
+ if (work_item == NULL)
+ return;
+
+ bdev = work_item->bond_dev;
spin_lock(&bdev->lock);
bdev->tracker.is_bonded = bond_eval_bonding_stats(bdev, bond);
spin_unlock(&bdev->lock);
- queue_delayed_work(bdev->wq, &bdev->bond_work, 0);
+ queue_delayed_work(bdev->wq, &work_item->bond_work, 0);
}
static struct hinic3_bond_dev *bond_get_bdev(struct bonding *bond)
@@ -295,6 +361,7 @@ static struct hinic3_bond_dev *bond_get_bdev(struct bonding *bond)
if (strncmp(bond->dev->name,
bdev->name, BOND_NAME_MAX_LEN) == 0) {
bdev->bond = bond;
+ mutex_unlock(&g_bond_mutex);
return bdev;
}
}
@@ -337,12 +404,48 @@ bool hinic3_is_bond_dev_status_actived(struct net_device *ndev)
return bdev->status == BOND_DEV_STATUS_ACTIVATED;
}
EXPORT_SYMBOL(hinic3_is_bond_dev_status_actived);
-/*lint +e580 +e546*/
+
+static void get_default_func_attr(struct hinic3_bond_dev *bdev,
+ struct bond_func_attr *func_attr)
+{
+ u32 zero_array[FUNC_OFFLOAD_BITMAP_LEN] = {};
+
+ if (memcmp(bdev->func_offload_bitmap, zero_array,
+ sizeof(zero_array)) != 0) {
+ spin_lock(&bdev->lock);
+ (void)memcpy(func_attr->func_offload_bitmap,
+ bdev->func_offload_bitmap,
+ sizeof(bdev->func_offload_bitmap));
+ spin_unlock(&bdev->lock);
+ func_attr->bond_to_func = TO_FUNCTION_TABLE;
+ func_attr->bond_bifur_en = true;
+ }
+}
+
+static struct bond_work_item *get_bond_work_item(struct hinic3_bond_dev *bdev,
+ struct bond_func_attr func_attr)
+{
+ struct bond_work_item *work_item = NULL;
+
+ work_item = kzalloc(sizeof(struct bond_work_item), GFP_KERNEL);
+ if (work_item == NULL) {
+ pr_err("Failed to allocate work item\n");
+ return NULL;
+ }
+
+ work_item->bond_dev = bdev;
+ work_item->func_attr = func_attr;
+ INIT_DELAYED_WORK(&work_item->bond_work, bond_try_do_work);
+
+ return work_item;
+}
static void bond_handle_rtnl_event(struct net_device *ndev)
{
struct hinic3_bond_dev *bdev = NULL;
struct bonding *bond = NULL;
+ struct bond_func_attr func_attr = {};
+ struct bond_work_item *work_item = NULL;
struct slave *slave = NULL;
bond = get_bonding_by_netdev(ndev);
@@ -352,11 +455,14 @@ static void bond_handle_rtnl_event(struct net_device *ndev)
bond_update_attr(bdev, bond);
+ get_default_func_attr(bdev, &func_attr);
+
+ work_item = get_bond_work_item(bdev, func_attr);
if (netif_is_bond_slave(ndev)) {
slave = bond_slave_get_rtnl(ndev);
- bond_slave_event(bdev, slave);
+ bond_slave_event(work_item, slave);
} else {
- bond_master_event(bdev, bond);
+ bond_master_event(work_item, bond);
}
}
@@ -376,7 +482,7 @@ static void bond_rtnl_data_ready(struct sock *sk)
if (!hdr ||
!NLMSG_OK(hdr, skb->len) ||
hdr->nlmsg_type != RTM_NEWLINK ||
- !rtnl_is_locked()) {
+ rtnl_is_locked() == 0) {
goto free_skb;
}
@@ -428,8 +534,37 @@ static void bond_disable_netdev_event(void)
sock_release(bond_mngr.rtnl_sock);
}
+static u32 bond_get_user_bitmap(const struct hinic3_bond_dev *bdev)
+{
+ u32 user_bitmap = 0;
+ u8 user;
+
+ for (user = HINIC3_BOND_USER_OVS; user < HINIC3_BOND_USER_NUM; user++) {
+ if (bdev->slot_used[user] == 1)
+ BITMAP_SET(user_bitmap, user);
+ }
+ return user_bitmap;
+}
+
+static void *get_hwdev_by_chip_node(struct card_node *chip_node)
+{
+ struct hinic3_pcidev *pci_dev = NULL;
+
+ if (!chip_node)
+ return NULL;
+
+ list_for_each_entry(pci_dev, &chip_node->func_list, node) {
+ if (!pci_dev)
+ continue;
+
+ return pci_dev->lld_dev.hwdev;
+ }
+
+ return NULL;
+}
+
static int bond_send_upcmd(struct hinic3_bond_dev *bdev, struct bond_attr *attr,
- u8 cmd_type)
+ struct bond_func_attr func_attr, u8 cmd_type)
{
int err, ret, len;
struct hinic3_bond_cmd cmd = {0};
@@ -437,6 +572,7 @@ static int bond_send_upcmd(struct hinic3_bond_dev *bdev, struct bond_attr *attr,
cmd.sub_cmd = 0;
cmd.ret_status = 0;
+ cmd.func_attr = func_attr;
if (attr) {
memcpy(&cmd.attr, attr, sizeof(*attr));
@@ -444,6 +580,7 @@ static int bond_send_upcmd(struct hinic3_bond_dev *bdev, struct bond_attr *attr,
cmd.attr.bond_id = bdev->bond_attr.bond_id;
cmd.attr.slaves = bdev->bond_attr.slaves;
}
+ cmd.attr.user_bitmap = bond_get_user_bitmap(bdev);
len = sizeof(cmd.bond_name);
if (cmd_type == MPU_CMD_BOND_CREATE) {
@@ -454,20 +591,22 @@ static int bond_send_upcmd(struct hinic3_bond_dev *bdev, struct bond_attr *attr,
}
err = hinic3_msg_to_mgmt_sync(bdev->ppf_hwdev, HINIC3_MOD_OVS, cmd_type,
- &cmd, sizeof(cmd), &cmd, &out_size, 0,
- HINIC3_CHANNEL_NIC);
+ &cmd, sizeof(cmd), &cmd, &out_size, 0,
+ HINIC3_CHANNEL_NIC);
if (err != 0 || !out_size || cmd.ret_status != 0) {
- pr_err("hinic3_bond: uP cmd: %u failed, err: %d, sts: %u, out size: %u\n",
- cmd_type, err, cmd.ret_status, out_size);
+ pr_err("hinic3_bond:uP cmd:%u failed, err:%d, sts:%u, out size:%u\n",
+ cmd_type, err, cmd.ret_status, out_size);
err = -EIO;
}
return err;
}
-static int bond_upcmd_deactivate(struct hinic3_bond_dev *bdev)
+static int bond_upcmd_deactivate(struct hinic3_bond_dev *bdev,
+ struct bond_func_attr func_attr)
{
- int err;
+ u32 user_bitmap = 0;
+ int err = 0;
u16 id_tmp;
if (bdev->status == BOND_DEV_STATUS_IDLE)
@@ -475,7 +614,24 @@ static int bond_upcmd_deactivate(struct hinic3_bond_dev *bdev)
pr_info("hinic3_bond: deactivate bond: %u\n", bdev->bond_attr.bond_id);
- err = bond_send_upcmd(bdev, NULL, MPU_CMD_BOND_DELETE);
+ user_bitmap = bond_get_user_bitmap(bdev);
+ if (bdev->slot_used[HINIC3_BOND_USER_BIFUR] != 0) {
+ err = bond_send_upcmd(bdev, NULL, func_attr,
+ MPU_CMD_BOND_DELETE);
+ if (err == 0) {
+ spin_lock(&bdev->lock);
+ (void)memset(bdev->func_offload_bitmap,
+ 0, sizeof(bdev->func_offload_bitmap));
+ spin_unlock(&bdev->lock);
+ }
+ user_bitmap &= ~(1LU << HINIC3_BOND_USER_BIFUR);
+ }
+ if (user_bitmap != 0) {
+ (void)memset(&func_attr, 0, sizeof(func_attr));
+ err += bond_send_upcmd(bdev, NULL, func_attr,
+ MPU_CMD_BOND_DELETE);
+ }
+
if (err == 0) {
id_tmp = bdev->bond_attr.bond_id;
memset(&bdev->bond_attr, 0, sizeof(bdev->bond_attr));
@@ -540,21 +696,32 @@ static void bond_update_slave_info(struct hinic3_bond_dev *bdev,
} else if (ndev && (ndev == bdev->tracker.ndev[i])) {
/* BOND_MODE_ACTIVEBACKUP */
BITMAP_SET(attr->active_slaves, i);
- break;
}
}
}
static int bond_upcmd_config(struct hinic3_bond_dev *bdev,
- struct bond_attr *attr)
+ struct bond_attr *attr,
+ struct bond_func_attr func_attr)
{
- int err;
+ int err = 0;
+ u32 zeroArr[FUNC_OFFLOAD_BITMAP_LEN] = {0};
+ u16 i;
+ u32 user_bitmap;
bond_update_slave_info(bdev, attr);
attr->bond_pf_bitmap = bdev->new_attr.bond_pf_bitmap;
- if (memcmp(&bdev->bond_attr, attr, sizeof(struct bond_attr)) == 0)
+ if (memcmp(&bdev->bond_attr, attr, sizeof(struct bond_attr)) == 0 &&
+ (memcmp(func_attr.func_offload_bitmap, zeroArr,
+ sizeof(zeroArr)) == 0 ||
+ memcmp(bdev->func_offload_bitmap, func_attr.func_offload_bitmap,
+ sizeof(func_attr.func_offload_bitmap)) == 0)) {
return 0;
+ }
+
+ // 下发时去掉bond的成员func
+ func_attr.func_offload_bitmap[0] &= ~attr->bond_pf_bitmap;
pr_info("hinic3_bond: Config bond: %u\n", attr->bond_id);
pr_info("mode:%u, up_d:%u, down_d:%u, hash:%u, slaves:%u, ap:%u, cs:%u\n",
@@ -568,7 +735,26 @@ static int bond_upcmd_config(struct hinic3_bond_dev *bdev,
pr_info("bond_pf_bitmap: 0x%x\n", attr->bond_pf_bitmap);
pr_info("bond user_bitmap 0x%x\n", attr->user_bitmap);
- err = bond_send_upcmd(bdev, attr, MPU_CMD_BOND_SET_ATTR);
+ user_bitmap = attr->user_bitmap;
+ if (bdev->slot_used[HINIC3_BOND_USER_BIFUR] != 0) {
+ err = bond_send_upcmd(bdev, attr, func_attr,
+ MPU_CMD_BOND_SET_ATTR);
+ if (err == 0) {
+ spin_lock(&bdev->lock);
+ for (i = 0; i < FUNC_OFFLOAD_BITMAP_LEN; i++) {
+ bdev->func_offload_bitmap[i] |=
+ func_attr.func_offload_bitmap[i];
+ }
+ spin_unlock(&bdev->lock);
+ }
+ user_bitmap &= ~(1LU << HINIC3_BOND_USER_BIFUR);
+ }
+ if (user_bitmap != 0) {
+ (void)memset(&func_attr, 0, sizeof(func_attr));
+ err += bond_send_upcmd(bdev, attr, func_attr,
+ MPU_CMD_BOND_SET_ATTR);
+ }
+
if (!err)
memcpy(&bdev->bond_attr, attr, sizeof(*attr));
@@ -576,7 +762,8 @@ static int bond_upcmd_config(struct hinic3_bond_dev *bdev,
}
static int bond_upcmd_activate(struct hinic3_bond_dev *bdev,
- struct bond_attr *attr)
+ struct bond_attr *attr,
+ struct bond_func_attr func_attr)
{
int err;
@@ -585,11 +772,11 @@ static int bond_upcmd_activate(struct hinic3_bond_dev *bdev,
pr_info("hinic3_bond: active bond: %u\n", bdev->bond_attr.bond_id);
- err = bond_send_upcmd(bdev, attr, MPU_CMD_BOND_CREATE);
+ err = bond_send_upcmd(bdev, attr, func_attr, MPU_CMD_BOND_CREATE);
if (err == 0) {
bdev->status = BOND_DEV_STATUS_ACTIVATED;
bdev->bond_attr.bond_mode = attr->bond_mode;
- err = bond_upcmd_config(bdev, attr);
+ err = bond_upcmd_config(bdev, attr, func_attr);
}
return err;
@@ -613,19 +800,8 @@ static void bond_call_service_func(struct hinic3_bond_dev *bdev,
mutex_unlock(&g_bond_service_func_mutex);
}
-static u32 bond_get_user_bitmap(struct hinic3_bond_dev *bdev)
-{
- u32 user_bitmap = 0;
- u8 user;
-
- for (user = HINIC3_BOND_USER_OVS; user < HINIC3_BOND_USER_NUM; user++) {
- if (bdev->slot_used[user] == 1)
- BITMAP_SET(user_bitmap, user);
- }
- return user_bitmap;
-}
-
-static void bond_do_work(struct hinic3_bond_dev *bdev)
+static void bond_do_work(struct hinic3_bond_dev *bdev,
+ struct bond_func_attr func_attr)
{
bool is_bonded = 0;
struct bond_attr attr;
@@ -640,38 +816,31 @@ static void bond_do_work(struct hinic3_bond_dev *bdev)
/* is_bonded indicates whether bond should be activated. */
if (is_bonded && !bond_dev_is_activated(bdev)) {
bond_call_service_func(bdev, &attr, BOND_BEFORE_ACTIVE, 0);
- err = bond_upcmd_activate(bdev, &attr);
+ err = bond_upcmd_activate(bdev, &attr, func_attr);
bond_call_service_func(bdev, &attr, BOND_AFTER_ACTIVE, err);
} else if (is_bonded && bond_dev_is_activated(bdev)) {
bond_call_service_func(bdev, &attr, BOND_BEFORE_MODIFY, 0);
- err = bond_upcmd_config(bdev, &attr);
+ err = bond_upcmd_config(bdev, &attr, func_attr);
bond_call_service_func(bdev, &attr, BOND_AFTER_MODIFY, err);
} else if (!is_bonded && bond_dev_is_activated(bdev)) {
bond_call_service_func(bdev, &attr, BOND_BEFORE_DEACTIVE, 0);
- err = bond_upcmd_deactivate(bdev);
+ err = bond_upcmd_deactivate(bdev, func_attr);
bond_call_service_func(bdev, &attr, BOND_AFTER_DEACTIVE, err);
}
if (err)
- pr_err("hinic3_bond: Do bond failed\n");
+ pr_err("hinic3_bond: Do bond failed, err: %d.\n", err);
}
-#define MIN_BOND_SLAVE_CNT 2
static void bond_try_do_work(struct work_struct *work)
{
struct delayed_work *delayed_work = to_delayed_work(work);
- struct hinic3_bond_dev *bdev =
- container_of(delayed_work, struct hinic3_bond_dev, bond_work);
- int status;
-
- status = mutex_trylock(&g_bond_mutex);
- if (status == 0) {
- /* Delay 1 sec and retry */
- queue_delayed_work(bdev->wq, &bdev->bond_work, HZ);
- } else {
- bond_do_work(bdev);
- mutex_unlock(&g_bond_mutex);
- }
+ struct bond_work_item *work_item =
+ container_of(delayed_work, struct bond_work_item, bond_work);
+
+ bond_do_work(work_item->bond_dev, work_item->func_attr);
+
+ kfree(work_item);
}
static int bond_dev_init(struct hinic3_bond_dev *bdev, const char *name)
@@ -684,12 +853,11 @@ static int bond_dev_init(struct hinic3_bond_dev *bdev, const char *name)
return -ENODEV;
}
- INIT_DELAYED_WORK(&bdev->bond_work, bond_try_do_work);
bdev->status = BOND_DEV_STATUS_IDLE;
err = strscpy(bdev->name, name, strlen(name));
if (err < 0) {
pr_err("hinic3_bond: Failed to init bond dev\n");
- cancel_delayed_work_sync(&bdev->bond_work);
+ flush_workqueue(bdev->wq);
destroy_workqueue(bdev->wq);
return err;
}
@@ -699,13 +867,41 @@ static int bond_dev_init(struct hinic3_bond_dev *bdev, const char *name)
return 0;
}
+static struct bonding *bond_get_knl_bonding(const char *name)
+{
+ struct net_device *ndev_tmp = NULL;
+
+ rtnl_lock();
+ for_each_netdev(&init_net, ndev_tmp) {
+ if (netif_is_bond_master(ndev_tmp) &&
+ (strcmp(ndev_tmp->name, name) == 0)) {
+ dev_hold(ndev_tmp);
+ rtnl_unlock();
+ return netdev_priv(ndev_tmp);
+ }
+ }
+ rtnl_unlock();
+ return NULL;
+}
+
+static inline void bond_put_knl_bonding(struct bonding *bond)
+{
+ dev_put(bond->dev);
+}
+
static int bond_dev_release(struct hinic3_bond_dev *bdev)
{
+ struct bond_func_attr func_attr = {};
int err;
u8 i;
u32 bond_cnt;
- err = bond_upcmd_deactivate(bdev);
+ get_default_func_attr(bdev, &func_attr);
+
+ mutex_unlock(&g_bond_mutex);
+ flush_workqueue(bdev->wq);
+ mutex_lock(&g_bond_mutex);
+ err = bond_upcmd_deactivate(bdev, func_attr);
if (err) {
pr_err("hinic3_bond: Failed to deactivate dev\n");
mutex_unlock(&g_bond_mutex);
@@ -727,8 +923,10 @@ static int bond_dev_release(struct hinic3_bond_dev *bdev)
if (!bond_cnt)
bond_disable_netdev_event();
- cancel_delayed_work_sync(&bdev->bond_work);
+ flush_workqueue(bdev->wq);
destroy_workqueue(bdev->wq);
+ if (bdev->bond != NULL)
+ bond_put_knl_bonding(bdev->bond);
kfree(bdev);
return err;
@@ -818,7 +1016,7 @@ static void update_bond_info(struct hinic3_bond_dev *bdev, struct bonding *bond)
rtnl_unlock();
/* In case user queries info before bonding is complete */
- flush_delayed_work(&bdev->bond_work);
+ flush_workqueue(bdev->wq);
rtnl_lock();
while (i)
@@ -842,25 +1040,72 @@ static struct hinic3_bond_dev *bond_dev_by_name(const char *name)
return bdev;
}
-static void bond_dev_user_attach(struct hinic3_bond_dev *bdev,
+static void queue_bond_work_item(struct hinic3_bond_dev *bdev,
+ struct bond_func_attr func_attr,
enum hinic3_bond_user user)
{
+ struct bond_work_item *work_item = NULL;
u32 user_bitmap;
+ work_item = get_bond_work_item(bdev, func_attr);
+ if (work_item == NULL) {
+ pr_err("hinic3_bond: failed to malloc bond work item memory.\n");
+ return;
+ }
+
+ user_bitmap = bond_get_user_bitmap(bdev);
+ pr_info("hinic3_bond: user %u attach bond %s, user_bitmap %#x\n", user,
+ bdev->name, user_bitmap);
+ queue_delayed_work(bdev->wq, &work_item->bond_work, 0);
+}
+
+static inline void vf_lag_bond_work_item(struct hinic3_bond_dev *bdev,
+ struct bond_func_attr func_attr,
+ enum hinic3_bond_user user)
+{
+ u32 user_bitmap = bond_get_user_bitmap(bdev);
+
+ pr_info("Vf_lag sync hinic3_bond: user %u attach bond %s, user_bitmap %#x\n",
+ user, bdev->name, user_bitmap);
+ bond_do_work(bdev, func_attr);
+}
+
+static void vf_lag_user_attach(struct hinic3_bond_dev *bdev,
+ struct bond_func_attr func_attr,
+ enum hinic3_bond_user user)
+{
if (user < 0 || user >= HINIC3_BOND_USER_NUM)
return;
- if (bdev->slot_used[user])
+ if (bdev->slot_used[user] == 0) {
+ bdev->slot_used[user] = 1;
+ if (kref_get_unless_zero(&bdev->ref) == 0)
+ kref_init(&bdev->ref);
+ else
+ vf_lag_bond_work_item(bdev, func_attr, user);
+ } else {
+ if (func_attr.bond_to_func == 1)
+ vf_lag_bond_work_item(bdev, func_attr, user);
+ }
+}
+
+static void bond_dev_user_attach(struct hinic3_bond_dev *bdev,
+ struct bond_func_attr func_attr,
+ enum hinic3_bond_user user)
+{
+
+ if (user < 0 || user >= HINIC3_BOND_USER_NUM)
return;
- bdev->slot_used[user] = 1;
- if (!kref_get_unless_zero(&bdev->ref)) {
- kref_init(&bdev->ref);
+ if (bdev->slot_used[user] == 0) {
+ bdev->slot_used[user] = 1;
+ if (!kref_get_unless_zero(&bdev->ref))
+ kref_init(&bdev->ref);
+ else
+ queue_bond_work_item(bdev, func_attr, user);
} else {
- user_bitmap = bond_get_user_bitmap(bdev);
- pr_info("hinic3_bond: user %u attach bond %s, user_bitmap %#x\n",
- user, bdev->name, user_bitmap);
- queue_delayed_work(bdev->wq, &bdev->bond_work, 0);
+ if (func_attr.bond_to_func == 1)
+ queue_bond_work_item(bdev, func_attr, user);
}
}
@@ -868,42 +1113,72 @@ static void bond_dev_user_detach(struct hinic3_bond_dev *bdev,
enum hinic3_bond_user user, bool *freed)
{
if (bdev->slot_used[user]) {
- bdev->slot_used[user] = 0;
if (kref_read(&bdev->ref) == 1)
*freed = true;
kref_put(&bdev->ref, bond_dev_free);
+ if (!*freed)
+ bdev->slot_used[user] = 0;
}
}
-static struct bonding *bond_get_knl_bonding(const char *name)
+void hinic3_bond_set_user_bitmap(struct bond_attr *attr,
+ enum hinic3_bond_user user)
{
+ if (BITMAP_JUDGE(attr->user_bitmap, user) == 0)
+ BITMAP_SET(attr->user_bitmap, user);
+}
+EXPORT_SYMBOL(hinic3_bond_set_user_bitmap);
+
+struct bonding *hinic3_get_bond_by_port(u32 port_id,
+ struct hinic3_lld_dev *lld_dev)
+{
+ struct card_node *chip_node = NULL;
+ struct card_node *slave_chip_node = NULL;
struct net_device *ndev_tmp = NULL;
+ struct hinic3_lld_dev *slave_lld_dev = NULL;
+ struct slave *slave = NULL;
+ struct bonding *bond = NULL;
+ u8 slaves_bitmap = 0;
- rcu_read_lock();
+ chip_node = hinic3_get_chip_node_by_lld(lld_dev);
+ if (!chip_node)
+ return NULL;
+
+ rtnl_lock();
for_each_netdev(&init_net, ndev_tmp) {
- if (netif_is_bond_master(ndev_tmp) &&
- !strcmp(ndev_tmp->name, name)) {
- rcu_read_unlock();
- return netdev_priv(ndev_tmp);
+ if (netif_is_bond_slave(ndev_tmp)) {
+ slave_lld_dev = hinic3_get_lld_dev_by_netdev(ndev_tmp);
+ slave_chip_node = hinic3_get_chip_node_by_lld(
+ slave_lld_dev);
+ if (!slave_chip_node)
+ continue;
+
+ slave = bond_slave_get_rtnl(ndev_tmp);
+ if (slave) {
+ bond = bond_get_bond_by_slave(slave);
+ slaves_bitmap = bond_get_slaves_bitmap(NULL,
+ bond);
}
+
+ if (chip_node == slave_chip_node &&
+ (slaves_bitmap & (0x1 << port_id)) != 0) {
+ rtnl_unlock();
+ return bond;
+ }
+ }
}
- rcu_read_unlock();
- return NULL;
-}
+ rtnl_unlock();
-void hinic3_bond_set_user_bitmap(struct bond_attr *attr,
- enum hinic3_bond_user user)
-{
- if (!BITMAP_JUDGE(attr->user_bitmap, user))
- BITMAP_SET(attr->user_bitmap, user);
+ return NULL;
}
-EXPORT_SYMBOL(hinic3_bond_set_user_bitmap);
+EXPORT_SYMBOL(hinic3_get_bond_by_port);
int hinic3_bond_attach(const char *name, enum hinic3_bond_user user,
u16 *bond_id)
{
struct hinic3_bond_dev *bdev = NULL;
struct bonding *bond = NULL;
+ struct bond_func_attr func_attr = {};
bool new_dev = false;
if (!name || !bond_id)
@@ -911,35 +1186,109 @@ int hinic3_bond_attach(const char *name, enum hinic3_bond_user user,
bond = bond_get_knl_bonding(name);
if (!bond) {
- pr_warn("hinic3_bond: Kernel bond %s not exist.\n", name);
+ pr_warn("hinic3_bond: Kernel bond not exist.\n");
return -ENODEV;
}
mutex_lock(&g_bond_mutex);
bdev = bond_dev_by_name(name);
- if (!bdev) {
+ if (bdev == NULL) {
bdev = bond_dev_alloc(name);
new_dev = true;
} else {
- pr_info("hinic3_bond: %s already exist\n", name);
+ pr_info("hinic3_bond: already exist\n");
}
- if (!bdev) {
+ if (bdev == NULL) {
// lock has beed released in bond_dev_alloc
+ bond_put_knl_bonding(bond);
return -ENODEV;
}
- bond_dev_user_attach(bdev, user);
+ bond_dev_user_attach(bdev, func_attr, user);
mutex_unlock(&g_bond_mutex);
if (new_dev)
update_bond_info(bdev, bond);
+ else
+ bond_put_knl_bonding(bond);
+ if ((new_dev == true) && (bdev->tracker.cnt == 0)) {
+ hinic3_bond_detach(bdev->bond_attr.bond_id, user);
+ bdev = NULL;
+ pr_info("hinic3_bond: no slave dev, no need attach bond\n");
+ return -ENODEV;
+ }
*bond_id = bdev->bond_attr.bond_id;
return 0;
}
EXPORT_SYMBOL(hinic3_bond_attach);
+int hinic3_bond_attach_with_func(const char *name, enum hinic3_bond_user user,
+ struct bond_func_attr func_attr, u16 *bond_id)
+{
+ int ret = 0;
+ struct hinic3_bond_dev *bdev = NULL;
+ struct bonding *bond = NULL;
+ u32 zeroArr[FUNC_OFFLOAD_BITMAP_LEN] = {0};
+ bool new_dev = false;
+
+ if (name == NULL)
+ return -EINVAL;
+
+ if (func_attr.bond_to_func != TO_FUNCTION_TABLE ||
+ user != HINIC3_BOND_USER_BIFUR) {
+ pr_warn("hinic3_bond: Invalid bond_to_func: %u or user: %u.\n",
+ func_attr.bond_to_func, user);
+ return -EINVAL;
+ }
+
+ if (memcmp(func_attr.func_offload_bitmap, zeroArr,
+ sizeof(zeroArr)) == 0) {
+ return 0;
+ }
+
+ bond = bond_get_knl_bonding(name);
+ if (!bond) {
+ pr_warn("hinic3_bond: Kernel bond not exist.\n");
+ return -ENODEV;
+ }
+
+ mutex_lock(&g_bond_mutex);
+ bdev = bond_dev_by_name(name);
+ if (!bdev) {
+ bdev = bond_dev_alloc(name);
+ if (!bdev) {
+ // lock has beed released in bond_dev_alloc
+ bond_put_knl_bonding(bond);
+ return -ENOMEM;
+ }
+ (void)memcpy(bdev->func_offload_bitmap,
+ func_attr.func_offload_bitmap,
+ sizeof(func_attr.func_offload_bitmap));
+ new_dev = true;
+ } else {
+ pr_info("hinic3_bond: Already exist.\n");
+ }
+
+ if (func_attr.sync_flag == 1)
+ vf_lag_user_attach(bdev, func_attr, user);
+ else
+ bond_dev_user_attach(bdev, func_attr, user);
+
+ mutex_unlock(&g_bond_mutex);
+
+ if (new_dev)
+ update_bond_info(bdev, bond);
+ else
+ bond_put_knl_bonding(bond);
+
+ *bond_id = bdev->bond_attr.bond_id;
+
+ return ret;
+}
+EXPORT_SYMBOL(hinic3_bond_attach_with_func);
+
int hinic3_bond_detach(u16 bond_id, enum hinic3_bond_user user)
{
int err = 0;
@@ -964,16 +1313,74 @@ int hinic3_bond_detach(u16 bond_id, enum hinic3_bond_user user)
}
EXPORT_SYMBOL(hinic3_bond_detach);
+int hinic3_bond_detach_with_func(const char *name, enum hinic3_bond_user user,
+ struct bond_func_attr func_attr, u16 *bond_id)
+{
+ int ret = 0;
+ struct hinic3_bond_dev *bdev = NULL;
+ bool lock_freed = false;
+ u8 i;
+ u32 zeroArr[FUNC_OFFLOAD_BITMAP_LEN] = {0};
+
+ if (name == NULL) {
+ pr_warn("hinic3_bond: Invalid bond user: %d.\n", user);
+ return -EINVAL;
+ }
+
+ if (func_attr.bond_to_func != TO_FUNCTION_TABLE ||
+ user != HINIC3_BOND_USER_BIFUR) {
+ pr_warn("hinic3_bond: Invalid bond_to_func: %u or user: %u.\n",
+ func_attr.bond_to_func, user);
+ return -EINVAL;
+ }
+
+ mutex_lock(&g_bond_mutex);
+ bdev = bond_dev_by_name(name);
+ if (bdev == NULL) {
+ pr_warn("hinic3_bond: Bond dev does not exist, name: %s.\n",
+ name);
+ mutex_unlock(&g_bond_mutex);
+ return 0;
+ }
+
+ if ((memcmp(bdev->func_offload_bitmap, zeroArr, sizeof(zeroArr)) == 0)
+ || (memcmp(bdev->func_offload_bitmap,
+ func_attr.func_offload_bitmap,
+ sizeof(func_attr.func_offload_bitmap)) == 0)) {
+ bond_dev_user_detach(bdev, user, &lock_freed);
+
+ if (!lock_freed)
+ mutex_unlock(&g_bond_mutex);
+ } else {
+ ret = bond_send_upcmd(bdev, NULL, func_attr,
+ MPU_CMD_BOND_DELETE);
+ if (ret == 0) {
+ spin_lock(&bdev->lock);
+ for (i = 0; i < FUNC_OFFLOAD_BITMAP_LEN; i++) {
+ bdev->func_offload_bitmap[i] &=
+ (~func_attr.func_offload_bitmap[i]);
+ }
+ spin_unlock(&bdev->lock);
+ }
+ mutex_unlock(&g_bond_mutex);
+ }
+ *bond_id = bdev->bond_attr.bond_id;
+
+ return 0;
+}
+EXPORT_SYMBOL(hinic3_bond_detach_with_func);
+
void hinic3_bond_clean_user(enum hinic3_bond_user user)
{
int i = 0;
+ struct hinic3_bond_dev *bdev = NULL;
bool lock_freed = false;
mutex_lock(&g_bond_mutex);
for (i = BOND_FIRST_ID; i <= BOND_MAX_ID; i++) {
- if (bond_mngr.bond_dev[i]) {
- bond_dev_user_detach(bond_mngr.bond_dev[i],
- user, &lock_freed);
+ bdev = bond_mngr.bond_dev[i];
+ if (bdev != NULL) {
+ bond_dev_user_detach(bdev, user, &lock_freed);
if (lock_freed) {
mutex_lock(&g_bond_mutex);
lock_freed = false;
@@ -1140,3 +1547,86 @@ int hinic3_get_bond_tracker_by_name(const char *name,
return -ENODEV;
}
EXPORT_SYMBOL(hinic3_get_bond_tracker_by_name);
+
+int hinic3_get_func_offload_bitmap(const char *bond_name,
+ u32 *func_offload_bitmap, u8 len)
+{
+ struct hinic3_bond_dev *bdev = NULL;
+
+ mutex_lock(&g_bond_mutex);
+ bdev = bond_dev_by_name(bond_name);
+ if (!bdev) {
+ mutex_unlock(&g_bond_mutex);
+ return -ENODEV;
+ }
+ mutex_unlock(&g_bond_mutex);
+
+ (void)memcpy(func_offload_bitmap, bdev->func_offload_bitmap,
+ sizeof(bdev->func_offload_bitmap));
+
+ return 0;
+}
+EXPORT_SYMBOL(hinic3_get_func_offload_bitmap);
+
+bool hinic3_is_bond_offload(struct hinic3_lld_dev *lld_dev)
+{
+ struct card_node *chip_node = NULL;
+ u16 port_id;
+ u8 i;
+ struct hinic3_bond_dev *bdev = NULL;
+ u32 zero_array[FUNC_OFFLOAD_BITMAP_LEN] = {};
+
+ chip_node = hinic3_get_chip_node_by_lld(lld_dev);
+ if (!chip_node)
+ return false;
+
+ port_id = hinic3_physical_port_id(lld_dev->hwdev);
+
+ mutex_lock(&g_bond_mutex);
+ for (i = BOND_FIRST_ID; i <= BOND_MAX_ID; i++) {
+ if (bond_mngr.bond_dev[i]) {
+ bdev = bond_mngr.bond_dev[i];
+ spin_lock(&bdev->lock);
+ if (bdev->chip_node == chip_node &&
+ (bdev->bond_attr.slaves & (0x1 << port_id)) != 0
+ && memcmp(bdev->func_offload_bitmap, zero_array,
+ sizeof(zero_array)) != 0) {
+ spin_unlock(&bdev->lock);
+ mutex_unlock(&g_bond_mutex);
+ return true;
+ }
+ spin_unlock(&bdev->lock);
+ }
+ }
+ mutex_unlock(&g_bond_mutex);
+
+ return false;
+}
+EXPORT_SYMBOL(hinic3_is_bond_offload);
+
+void hinic3_bond_flush_workqueue(void *hwdev)
+{
+ u8 i;
+ struct hinic3_bond_dev *bdev = NULL;
+ void *new_hwdev = NULL;
+
+ mutex_lock(&g_bond_mutex);
+ for (i = BOND_FIRST_ID; i <= BOND_MAX_ID; i++) {
+ bdev = bond_mngr.bond_dev[i];
+ if (!bdev)
+ continue;
+
+ if (hwdev == bdev->ppf_hwdev) {
+ rtnl_lock();
+ flush_workqueue(bdev->wq);
+ rtnl_unlock();
+ new_hwdev = get_hwdev_by_chip_node(bdev->chip_node);
+ spin_lock(&bdev->lock);
+ bdev->ppf_hwdev = new_hwdev;
+ spin_unlock(&bdev->lock);
+ }
+ }
+
+ mutex_unlock(&g_bond_mutex);
+}
+EXPORT_SYMBOL(hinic3_bond_flush_workqueue);
diff --git a/drivers/net/ethernet/huawei/hinic3/bond/hinic3_bond.h b/drivers/net/ethernet/huawei/hinic3/bond/hinic3_bond.h
index 5ab36f7..54a4069 100644
--- a/drivers/net/ethernet/huawei/hinic3/bond/hinic3_bond.h
+++ b/drivers/net/ethernet/huawei/hinic3/bond/hinic3_bond.h
@@ -4,15 +4,18 @@
#ifndef HINIC3_BOND_H
#define HINIC3_BOND_H
+#include <net/bonding.h>
#include <linux/netdevice.h>
#include <linux/types.h>
#include "mpu_inband_cmd_defs.h"
#include "bond_common_defs.h"
+#include "hinic3_lld.h"
enum hinic3_bond_user {
HINIC3_BOND_USER_OVS,
HINIC3_BOND_USER_TOE,
HINIC3_BOND_USER_ROCE,
+ HINIC3_BOND_USER_BIFUR,
HINIC3_BOND_USER_NUM
};
@@ -26,6 +29,9 @@ enum bond_service_proc_pos {
BOND_POS_MAX
};
+#define TO_GLOBAL_TABLE 0
+#define TO_FUNCTION_TABLE 1
+
#define BITMAP_SET(bm, bit) ((bm) |= (typeof(bm))(1U << (bit)))
#define BITMAP_CLR(bm, bit) ((bm) &= ~((typeof(bm))(1U << (bit))))
#define BITMAP_JUDGE(bm, bit) ((bm) & (typeof(bm))(1U << (bit)))
@@ -58,6 +64,7 @@ struct bond_tracker {
struct net_device *ndev[BOND_PORT_MAX_NUM];
u8 cnt;
bool is_bonded;
+ bool is_multichip;
};
struct bond_attr {
@@ -74,18 +81,35 @@ struct bond_attr {
u32 user_bitmap;
};
+/* 预埋bond信息下发至function表控制字段 */
+struct bond_func_attr {
+ u32 func_offload_bitmap[FUNC_OFFLOAD_BITMAP_LEN];
+ /* bond_id and bond_mode dispatch to: 0: global_tbl; 1: func_tbl */
+ u8 bond_to_func;
+ u8 bond_bifur_en;
+ u8 sync_flag;
+ u8 rsvd0;
+};
+
struct hinic3_bond_cmd {
u8 ret_status;
u8 version;
u16 sub_cmd;
struct bond_attr attr;
char bond_name[16];
+ struct bond_func_attr func_attr;
};
bool hinic3_is_bond_dev_status_actived(struct net_device *ndev);
+struct bonding *hinic3_get_bond_by_port(u32 port_id,
+ struct hinic3_lld_dev *lld_dev);
void hinic3_bond_set_user_bitmap(struct bond_attr *attr, enum hinic3_bond_user user);
int hinic3_bond_attach(const char *name, enum hinic3_bond_user user, u16 *bond_id);
+int hinic3_bond_attach_with_func(const char *name, enum hinic3_bond_user user,
+ struct bond_func_attr func_attr, u16 *bond_id);
int hinic3_bond_detach(u16 bond_id, enum hinic3_bond_user user);
+int hinic3_bond_detach_with_func(const char *name, enum hinic3_bond_user user,
+ struct bond_func_attr func_attr, u16 *bond_id);
void hinic3_bond_clean_user(enum hinic3_bond_user user);
int hinic3_bond_get_uplink_id(u16 bond_id, u32 *uplink_id);
int hinic3_bond_register_service_func(enum hinic3_bond_user user, void (*func)
@@ -96,4 +120,9 @@ int hinic3_bond_get_slaves(u16 bond_id, struct hinic3_bond_info_s *info);
struct net_device *hinic3_bond_get_netdev_by_portid(const char *bond_name, u8 port_id);
int hinic3_get_hw_bond_infos(void *hwdev, struct hinic3_hw_bond_infos *infos, u16 channel);
int hinic3_get_bond_tracker_by_name(const char *name, struct bond_tracker *tracker);
+int hinic3_get_func_offload_bitmap(const char *bond_name,
+ u32 *func_offload_bitmap, u8 len);
+bool hinic3_is_bond_offload(struct hinic3_lld_dev *lld_dev);
+void hinic3_bond_flush_workqueue(void *hwdev);
+
#endif /* HINIC3_BOND_H */
diff --git a/drivers/net/ethernet/huawei/hinic3/cqm/cqm_main.c b/drivers/net/ethernet/huawei/hinic3/cqm/cqm_main.c
index 1f1235c..59aa35a 100644
--- a/drivers/net/ethernet/huawei/hinic3/cqm/cqm_main.c
+++ b/drivers/net/ethernet/huawei/hinic3/cqm/cqm_main.c
@@ -488,6 +488,7 @@ static void cqm_service_capability_init_roce(struct tag_cqm_handle *cqm_handle,
func_cap->hash_basic_size = CQM_HASH_BUCKET_SIZE_64;
}
func_cap->qpc_alloc_static = true;
+ func_cap->scqc_alloc_static = true;
func_cap->scqc_number += roce_own_cap->max_cqs;
func_cap->scqc_basic_size = GET_MAX(rdma_cap->cqc_entry_sz,
func_cap->scqc_basic_size);
@@ -898,12 +899,6 @@ static int cqm_capability_init_timer(struct hinic3_hwdev *handle)
func_cap->timer_vf_num, func_cap->timer_vf_id_start);
total_timer_num = func_cap->timer_pf_num + func_cap->timer_vf_num;
- if (IS_SLAVE_HOST(handle)) {
- total_timer_num *= CQM_TIMER_NUM_MULTI;
- cqm_info(handle->dev_hdl,
- "timer init: need double tw resources, total_timer_num=0x%x\n",
- total_timer_num);
- }
}
func_cap->timer_enable = service_capability->timer_en;
diff --git a/drivers/net/ethernet/huawei/hinic3/cqm/cqm_main.h b/drivers/net/ethernet/huawei/hinic3/cqm/cqm_main.h
index 8d1e481..915a74e 100644
--- a/drivers/net/ethernet/huawei/hinic3/cqm/cqm_main.h
+++ b/drivers/net/ethernet/huawei/hinic3/cqm/cqm_main.h
@@ -367,11 +367,14 @@ s32 cqm_fake_vf_num_set(void *ex_handle, u16 fake_vf_num_cfg);
#define CQM_FUNCTION_FAIL(x) "%s: " #x " return failure\n", __func__
#define CQM_WRONG_VALUE(x) "%s: " #x " %u is wrong\n", __func__, (u32)(x)
-#define cqm_err(dev, format, ...) dev_err(dev, "[CQM]" format, ##__VA_ARGS__)
-#define cqm_warn(dev, format, ...) dev_warn(dev, "[CQM]" format, ##__VA_ARGS__)
+#define cqm_err(dev, format, ...) \
+ dev_err_ratelimited(dev, "[CQM]" format, ##__VA_ARGS__)
+#define cqm_warn(dev, format, ...) \
+ dev_warn_ratelimited(dev, "[CQM]" format, ##__VA_ARGS__)
#define cqm_notice(dev, format, ...) \
- dev_notice(dev, "[CQM]" format, ##__VA_ARGS__)
-#define cqm_info(dev, format, ...) dev_info(dev, "[CQM]" format, ##__VA_ARGS__)
+ dev_notice_ratelimited(dev, "[CQM]" format, ##__VA_ARGS__)
+#define cqm_info(dev, format, ...) \
+ dev_info_ratelimited(dev, "[CQM]" format, ##__VA_ARGS__)
#ifdef __CQM_DEBUG__
#define cqm_dbg(format, ...) pr_info("[CQM]" format, ##__VA_ARGS__)
#else
diff --git a/drivers/net/ethernet/huawei/hinic3/cqm/cqm_object.c b/drivers/net/ethernet/huawei/hinic3/cqm/cqm_object.c
index 7d1bd35..3f2e928 100644
--- a/drivers/net/ethernet/huawei/hinic3/cqm/cqm_object.c
+++ b/drivers/net/ethernet/huawei/hinic3/cqm/cqm_object.c
@@ -73,7 +73,9 @@ struct tag_cqm_qpc_mpt *cqm_object_qpc_mpt_create(void *ex_handle, u32 service_t
fake_func_id = index_num / cqm_handle->func_capability.fake_vf_qpc_number;
relative_index = index_num % cqm_handle->func_capability.fake_vf_qpc_number;
- if ((s32)fake_func_id >= cqm_get_child_func_number(cqm_handle)) {
+ if (((s32)fake_func_id >=
+ cqm_get_child_func_number(cqm_handle)) ||
+ (fake_func_id >= CQM_FAKE_FUNC_MAX)) {
cqm_err(handle->dev_hdl, CQM_WRONG_VALUE(fake_func_id));
return NULL;
}
diff --git a/drivers/net/ethernet/huawei/hinic3/cqm/cqm_object_intern.c b/drivers/net/ethernet/huawei/hinic3/cqm/cqm_object_intern.c
index 92c19c4..1af2673 100644
--- a/drivers/net/ethernet/huawei/hinic3/cqm/cqm_object_intern.c
+++ b/drivers/net/ethernet/huawei/hinic3/cqm/cqm_object_intern.c
@@ -627,7 +627,8 @@ void cqm_qpc_mpt_delete(struct tag_cqm_object *object)
* Services ensure that the QPC is referenced
* when the QPC is deleted.
*/
- if (!cla_table->alloc_static)
+ if (!cla_table->alloc_static ||
+ object->service_type == CQM_SERVICE_T_ROCE)
wait_for_completion(&object->free);
/* VMware FC need explicitly deinit spin_lock in completion */
diff --git a/drivers/net/ethernet/huawei/hinic3/hinic3_crm.h b/drivers/net/ethernet/huawei/hinic3/hinic3_crm.h
index 833345a..faf36f9 100644
--- a/drivers/net/ethernet/huawei/hinic3/hinic3_crm.h
+++ b/drivers/net/ethernet/huawei/hinic3/hinic3_crm.h
@@ -8,7 +8,7 @@
#include "mpu_cmd_base_defs.h"
-#define HINIC3_DRV_VERSION "17.7.8.101"
+#define HINIC3_DRV_VERSION "17.12.2.102"
#define HINIC3_DRV_DESC "Intelligent Network Interface Card Driver"
#define HIUDK_DRV_DESC "Intelligent Network Unified Driver"
@@ -422,7 +422,9 @@ struct card_node {
u32 rsvd1;
atomic_t channel_busy_cnt;
void *priv_data;
- u64 rsvd2;
+ u8 hw_bus_num;
+ u8 board_type;
+ u8 rsvd[3];
};
#define HINIC3_SYNFW_TIME_PERIOD (60 * 60 * 1000)
@@ -1044,6 +1046,13 @@ u16 hinic3_func_max_vf(void *hwdev); /* Obtain service_cap.max_vf */
*/
u8 hinic3_max_pf_num(void *hwdev);
+/* *
+ * @brief hinic3_ppf_hwdev - get ppf hwdev
+ * @param hwdev: device pointer to hwdev
+ * @retval ppf device pointer to hwdev
+ */
+void *hinic3_ppf_hwdev(void *hwdev);
+
/* *
* @brief hinic3_host_pf_num - get current host pf number
* @param hwdev: device pointer to hwdev
@@ -1274,6 +1283,9 @@ int hinic3_mbox_to_host_sync(void *hwdev, enum hinic3_mod_type mod,
int hinic3_get_func_vroce_enable(void *hwdev, u16 glb_func_idx, u8 *en);
+void hinic3_set_bifur_link_status(void *hwdev, u8 port_id, u8 status);
+u8 hinic3_get_bifur_link_status(void *hwdev, u8 port_id);
+
void hinic3_module_get(void *hwdev, enum hinic3_service_type type);
void hinic3_module_put(void *hwdev, enum hinic3_service_type type);
diff --git a/drivers/net/ethernet/huawei/hinic3/hinic3_dbg.c b/drivers/net/ethernet/huawei/hinic3/hinic3_dbg.c
index 1191653..2f2f3bf 100644
--- a/drivers/net/ethernet/huawei/hinic3/hinic3_dbg.c
+++ b/drivers/net/ethernet/huawei/hinic3/hinic3_dbg.c
@@ -22,6 +22,7 @@
#include "nic_mpu_cmd_defs.h"
#include "mag_mpu_cmd.h"
#include "mag_mpu_cmd_defs.h"
+#include "hinic3_nictool.h"
typedef int (*nic_driv_module)(struct hinic3_nic_dev *nic_dev,
const void *buf_in, u32 in_size,
@@ -48,7 +49,7 @@ static int get_nic_drv_version(void *buf_out, const u32 *out_size)
}
snprintf(ver_info->ver, sizeof(ver_info->ver), "%s %s",
- HINIC3_NIC_DRV_VERSION, "2025-05-08_00:00:08");
+ HINIC3_NIC_DRV_VERSION, "2025-11-17_00:00:00");
return 0;
}
@@ -1026,6 +1027,81 @@ static int get_xsfp_info(struct hinic3_nic_dev *nic_dev, const void *buf_in,
return 0;
}
+static int set_mac_speed_status(struct hinic3_nic_dev *nic_dev,
+ const void *buf_in,
+ u32 in_size, void *buf_out, u32 *out_size)
+{
+ enum mac_speed_status *status = (enum mac_speed_status *)buf_in;
+
+ if (buf_in == NULL) {
+ nicif_err(nic_dev, drv, nic_dev->netdev,
+ "Do set mac speed status failed for invalid param.\n");
+ return -EINVAL;
+ }
+
+ if (in_size != (u32)sizeof(*status)) {
+ nicif_err(nic_dev, drv, nic_dev->netdev,
+ "Unexpect buf size from user, in_size: %u, expect: %lu\n",
+ in_size, sizeof(*status));
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+static int get_netdev_func_id(struct hinic3_nic_dev *nic_dev,
+ const void *buf_in,
+ u32 in_size, void *buf_out, u32 *out_size)
+{
+ if ((buf_out == NULL) || (out_size == NULL))
+ return -EINVAL;
+
+ if (*out_size != sizeof(u16))
+ return -EINVAL;
+
+ *((u16 *)buf_out) = hinic3_global_func_id(nic_dev->hwdev);
+
+ return 0;
+}
+
+static int bond_default_offload(struct hinic3_nic_dev *nic_dev,
+ const void *buf_in,
+ u32 in_size, void *buf_out, u32 *out_size)
+{
+ struct mag_cmd_bond_default_offload *offload_in =
+ (struct mag_cmd_bond_default_offload *)buf_in;
+ struct mag_cmd_bond_default_offload *offload_out =
+ (struct mag_cmd_bond_default_offload *)buf_out;
+ int ret = 0;
+
+ if ((buf_in == NULL) || (buf_out == NULL) || (out_size == NULL)) {
+ nicif_err(nic_dev, drv, nic_dev->netdev,
+ "Do bond default offload failed for invalid param.\n");
+ return -EINVAL;
+ }
+
+ if (*out_size != sizeof(*offload_out) ||
+ in_size != sizeof(*offload_in)) {
+ nicif_err(nic_dev, drv, nic_dev->netdev,
+ "Unexpect buf size from user, in_size: %u, out_size: %u, expect: %zu\n",
+ in_size, *out_size, sizeof(*offload_in));
+ return -EINVAL;
+ }
+
+ if (memcpy(offload_out, offload_in, sizeof(*offload_in)) != 0)
+ return -ENOMEM;
+
+ if (ret == -ENODEV) {
+ offload_out->head.status = MT_EIO;
+ return 0;
+ }
+ if (ret == -EXDEV) {
+ offload_out->head.status = MT_EINVAL;
+ return 0;
+ }
+ return ret;
+}
+
static const struct nic_drv_module_handle nic_driv_module_cmd_handle[] = {
{TX_INFO, get_tx_info},
{Q_NUM, get_q_num},
@@ -1051,7 +1127,10 @@ static const struct nic_drv_module_handle nic_driv_module_cmd_handle[] = {
{GET_XSFP_PRESENT, get_xsfp_present},
{GET_XSFP_INFO, get_xsfp_info},
{GET_XSFP_INFO_COMP_CMIS, get_xsfp_tlv_info},
- {SET_RX_PF_BW_LIMIT, set_rx_pf_bw_limit}
+ {SET_RX_PF_BW_LIMIT, set_rx_pf_bw_limit},
+ {SET_MAC_SPEED_STATUS, set_mac_speed_status},
+ {GET_FUNC_ID, get_netdev_func_id},
+ {BOND_DEFAULT_OFFLOAD, bond_default_offload}
};
static int send_to_nic_driver(struct hinic3_nic_dev *nic_dev,
diff --git a/drivers/net/ethernet/huawei/hinic3/hinic3_ethtool.c b/drivers/net/ethernet/huawei/hinic3/hinic3_ethtool.c
index e5e5578..00d4a28 100644
--- a/drivers/net/ethernet/huawei/hinic3/hinic3_ethtool.c
+++ b/drivers/net/ethernet/huawei/hinic3/hinic3_ethtool.c
@@ -22,6 +22,7 @@
#include "hinic3_tx.h"
#include "hinic3_rx.h"
#include "hinic3_rss.h"
+#include "hinic3_bond.h"
#define COALESCE_ALL_QUEUE 0xFFFF
#define COALESCE_PENDING_LIMIT_UNIT 8
@@ -946,9 +947,10 @@ static int hinic3_set_force_link_flag(struct net_device *netdev, u32 priv_flags)
netif_carrier_on(netdev);
nicif_info(nic_dev, link, netdev, "Set link up\n");
- if (!HINIC3_FUNC_IS_VF(nic_dev->hwdev))
+ if (!HINIC3_FUNC_IS_VF(nic_dev->hwdev) &&
+ !hinic3_is_bond_offload(nic_dev->lld_dev))
hinic3_notify_all_vfs_link_changed(nic_dev->hwdev,
- nic_dev->link_status);
+ nic_dev->link_status);
} else {
if (!test_and_clear_bit(HINIC3_FORCE_LINK_UP, &nic_dev->flags))
return 0;
@@ -980,7 +982,7 @@ static int hinic3_set_force_link_flag(struct net_device *netdev, u32 priv_flags)
if (!HINIC3_FUNC_IS_VF(nic_dev->hwdev))
hinic3_notify_all_vfs_link_changed(nic_dev->hwdev,
- nic_dev->link_status);
+ nic_dev->link_status);
}
return 0;
@@ -1018,17 +1020,20 @@ static int hinic3_run_lp_test(struct hinic3_nic_dev *nic_dev, u32 test_time)
u8 j;
skb_tmp = alloc_skb(LP_PKT_LEN, GFP_ATOMIC);
- if (!skb_tmp)
+ if (!skb_tmp) {
+ nicif_err(nic_dev, drv, netdev,
+ "Alloc xmit skb template failed for loopback test\n");
return -ENOMEM;
+ }
- eth_hdr = __skb_put(skb_tmp, ETH_HLEN);
+ eth_hdr = (struct ethhdr *)__skb_put(skb_tmp, ETH_HLEN);
eth_hdr->h_proto = htons(ETH_P_ARP);
ether_addr_copy(eth_hdr->h_dest, nic_dev->netdev->dev_addr);
eth_zero_addr(eth_hdr->h_source);
skb_reset_mac_header(skb_tmp);
test_data = __skb_put(skb_tmp, LP_PKT_LEN - ETH_HLEN);
- for (i = ETH_HLEN; i < LP_PKT_LEN; i++)
+ for (i = 0; i < LP_PKT_LEN - ETH_HLEN; i++)
test_data[i] = i & 0xFF;
skb_tmp->queue_mapping = 0;
@@ -1037,7 +1042,7 @@ static int hinic3_run_lp_test(struct hinic3_nic_dev *nic_dev, u32 test_time)
for (i = 0; i < cnt; i++) {
nic_dev->lb_test_rx_idx = 0;
- memset(lb_test_rx_buf, 0, LP_PKT_CNT * LP_PKT_LEN);
+ (void)memset(lb_test_rx_buf, 0, LP_PKT_CNT * LP_PKT_LEN);
for (j = 0; j < LP_PKT_CNT; j++) {
skb = pskb_copy(skb_tmp, GFP_ATOMIC);
@@ -1201,13 +1206,6 @@ static int hinic3_get_fecparam(struct net_device *netdev,
u8 supported_fec = 0;
int err;
- if (fecparam->cmd != ETHTOOL_GFECPARAM) {
- nicif_err(nic_dev, drv, netdev,
- "get fecparam cmd err.exp:0x%x,real:0x%x\n",
- ETHTOOL_GFECPARAM, fecparam->cmd);
- return -EINVAL;
- }
-
err = get_fecparam(nic_dev->hwdev, &advertised_fec, &supported_fec);
if (err) {
nicif_err(nic_dev, drv, netdev, "Get fec param failed\n");
@@ -1225,14 +1223,6 @@ static int hinic3_set_fecparam(struct net_device *netdev,
{
struct hinic3_nic_dev *nic_dev = netdev_priv(netdev);
int err;
-
- if (fecparam->cmd != ETHTOOL_SFECPARAM) {
- nicif_err(nic_dev, drv, netdev,
- "Set fecparam cmd err.exp:0x%x,real:0x%x\n",
- ETHTOOL_SFECPARAM, fecparam->cmd);
- return -EINVAL;
- }
-
err = set_fecparam(nic_dev->hwdev, (u8)fecparam->fec);
if (err) {
nicif_err(nic_dev, drv, netdev, "Set fec param failed\n");
@@ -1282,12 +1272,10 @@ static const struct ethtool_ops hinic3_ethtool_ops = {
.self_test = hinic3_diag_test,
-#ifndef HAVE_RHEL6_ETHTOOL_OPS_EXT_STRUCT
#ifdef HAVE_ETHTOOL_SET_PHYS_ID
.set_phys_id = hinic3_set_phys_id,
#else
.phys_id = hinic3_phys_id,
-#endif
#endif
.get_coalesce = hinic3_get_coalesce,
@@ -1306,7 +1294,6 @@ static const struct ethtool_ops hinic3_ethtool_ops = {
.get_priv_flags = hinic3_get_priv_flags,
.set_priv_flags = hinic3_set_priv_flags,
-#ifndef HAVE_RHEL6_ETHTOOL_OPS_EXT_STRUCT
.get_channels = hinic3_get_channels,
.set_channels = hinic3_set_channels,
@@ -1328,36 +1315,8 @@ static const struct ethtool_ops hinic3_ethtool_ops = {
.set_rxfh_indir = hinic3_set_rxfh_indir,
#endif
-#endif /* HAVE_RHEL6_ETHTOOL_OPS_EXT_STRUCT */
};
-#ifdef HAVE_RHEL6_ETHTOOL_OPS_EXT_STRUCT
-static const struct ethtool_ops_ext hinic3_ethtool_ops_ext = {
- .size = sizeof(struct ethtool_ops_ext),
- .set_phys_id = hinic3_set_phys_id,
- .get_channels = hinic3_get_channels,
- .set_channels = hinic3_set_channels,
-#ifdef ETHTOOL_GMODULEEEPROM
- .get_module_info = hinic3_get_module_info,
- .get_module_eeprom = hinic3_get_module_eeprom,
-#endif
-
-#ifndef NOT_HAVE_GET_RXFH_INDIR_SIZE
- .get_rxfh_indir_size = hinic3_get_rxfh_indir_size,
-#endif
-
-#if defined(ETHTOOL_GRSSH) && defined(ETHTOOL_SRSSH)
- .get_rxfh_key_size = hinic3_get_rxfh_key_size,
- .get_rxfh = hinic3_get_rxfh,
- .set_rxfh = hinic3_set_rxfh,
-#else
- .get_rxfh_indir = hinic3_get_rxfh_indir,
- .set_rxfh_indir = hinic3_set_rxfh_indir,
-#endif
-
-};
-#endif /* HAVE_RHEL6_ETHTOOL_OPS_EXT_STRUCT */
-
static const struct ethtool_ops hinic3vf_ethtool_ops = {
#ifdef SUPPORTED_COALESCE_PARAMS
.supported_coalesce_params = ETHTOOL_COALESCE_USECS |
@@ -1401,29 +1360,6 @@ static const struct ethtool_ops hinic3vf_ethtool_ops = {
.get_priv_flags = hinic3_get_priv_flags,
.set_priv_flags = hinic3_set_priv_flags,
-#ifndef HAVE_RHEL6_ETHTOOL_OPS_EXT_STRUCT
- .get_channels = hinic3_get_channels,
- .set_channels = hinic3_set_channels,
-
-#ifndef NOT_HAVE_GET_RXFH_INDIR_SIZE
- .get_rxfh_indir_size = hinic3_get_rxfh_indir_size,
-#endif
-
-#if defined(ETHTOOL_GRSSH) && defined(ETHTOOL_SRSSH)
- .get_rxfh_key_size = hinic3_get_rxfh_key_size,
- .get_rxfh = hinic3_get_rxfh,
- .set_rxfh = hinic3_set_rxfh,
-#else
- .get_rxfh_indir = hinic3_get_rxfh_indir,
- .set_rxfh_indir = hinic3_set_rxfh_indir,
-#endif
-
-#endif /* HAVE_RHEL6_ETHTOOL_OPS_EXT_STRUCT */
-};
-
-#ifdef HAVE_RHEL6_ETHTOOL_OPS_EXT_STRUCT
-static const struct ethtool_ops_ext hinic3vf_ethtool_ops_ext = {
- .size = sizeof(struct ethtool_ops_ext),
.get_channels = hinic3_get_channels,
.set_channels = hinic3_set_channels,
@@ -1441,21 +1377,14 @@ static const struct ethtool_ops_ext hinic3vf_ethtool_ops_ext = {
#endif
};
-#endif /* HAVE_RHEL6_ETHTOOL_OPS_EXT_STRUCT */
void hinic3_set_ethtool_ops(struct net_device *netdev)
{
SET_ETHTOOL_OPS(netdev, &hinic3_ethtool_ops);
-#ifdef HAVE_RHEL6_ETHTOOL_OPS_EXT_STRUCT
- set_ethtool_ops_ext(netdev, &hinic3_ethtool_ops_ext);
-#endif /* HAVE_RHEL6_ETHTOOL_OPS_EXT_STRUCT */
}
void hinic3vf_set_ethtool_ops(struct net_device *netdev)
{
SET_ETHTOOL_OPS(netdev, &hinic3vf_ethtool_ops);
-#ifdef HAVE_RHEL6_ETHTOOL_OPS_EXT_STRUCT
- set_ethtool_ops_ext(netdev, &hinic3vf_ethtool_ops_ext);
-#endif /* HAVE_RHEL6_ETHTOOL_OPS_EXT_STRUCT */
}
diff --git a/drivers/net/ethernet/huawei/hinic3/hinic3_ethtool_stats.c b/drivers/net/ethernet/huawei/hinic3/hinic3_ethtool_stats.c
index 7f2537a..2379354 100644
--- a/drivers/net/ethernet/huawei/hinic3/hinic3_ethtool_stats.c
+++ b/drivers/net/ethernet/huawei/hinic3/hinic3_ethtool_stats.c
@@ -68,6 +68,16 @@ static struct hinic3_stats hinic3_netdev_link_count[] = {
HINIC3_NETDEV_LINK_COUNT(link_down_events_phy),
};
+#define HINIC3_CIR_DRP(_stat_item) { \
+ .name = #_stat_item, \
+ .size = FIELD_SIZEOF(struct hinic3_cir_drop, _stat_item), \
+ .offset = offsetof(struct hinic3_cir_drop, _stat_item) \
+}
+
+static struct hinic3_stats hinic3_cir_drp[] = {
+ HINIC3_CIR_DRP(rx_discard_phy),
+};
+
#define HINIC3_NETDEV_STAT(_stat_item) { \
.name = #_stat_item, \
.size = FIELD_SIZEOF(struct rtnl_link_stats64, _stat_item), \
@@ -135,14 +145,16 @@ static struct hinic3_stats hinic3_rx_queue_stats[] = {
HINIC3_RXQ_STAT(dropped),
#ifdef HAVE_XDP_SUPPORT
HINIC3_RXQ_STAT(xdp_dropped),
+ HINIC3_RXQ_STAT(xdp_redirected),
#endif
HINIC3_RXQ_STAT(rx_buf_empty),
};
-
static struct hinic3_stats hinic3_rx_queue_stats_extern[] = {
HINIC3_RXQ_STAT(alloc_skb_err),
HINIC3_RXQ_STAT(alloc_rx_buf_err),
+#ifdef HAVE_XDP_SUPPORT
HINIC3_RXQ_STAT(xdp_large_pkt),
+#endif
HINIC3_RXQ_STAT(restore_drop_sge),
HINIC3_RXQ_STAT(rsvd2),
};
@@ -153,6 +165,10 @@ static struct hinic3_stats hinic3_tx_queue_stats[] = {
HINIC3_TXQ_STAT(busy),
HINIC3_TXQ_STAT(wake),
HINIC3_TXQ_STAT(dropped),
+#ifdef HAVE_XDP_SUPPORT
+ HINIC3_TXQ_STAT(xdp_dropped),
+ HINIC3_TXQ_STAT(xdp_xmits),
+#endif
};
static struct hinic3_stats hinic3_tx_queue_stats_extern[] = {
@@ -448,14 +464,14 @@ int hinic3_get_sset_count(struct net_device *netdev, int sset)
ARRAY_LEN(hinic3_nic_dev_stats) +
ARRAY_LEN(hinic3_netdev_link_count) +
ARRAY_LEN(hinic3_function_stats) +
+ ARRAY_LEN(hinic3_cir_drp) +
(ARRAY_LEN(hinic3_tx_queue_stats) +
- ARRAY_LEN(hinic3_rx_queue_stats)) * q_num;
+ ARRAY_LEN(hinic3_rx_queue_stats)) * q_num;
if (!HINIC3_FUNC_IS_VF(nic_dev->hwdev)) {
count += ARRAY_LEN(hinic3_port_stats);
count += ARRAY_LEN(g_hinic3_rsfec_stats);
}
-
return count;
case ETH_SS_PRIV_FLAGS:
return ARRAY_LEN(g_hinic_priv_flags_strings);
@@ -534,6 +550,47 @@ static u16 get_ethtool_port_stats(struct hinic3_nic_dev *nic_dev, u64 *data)
return i;
}
+static u16 get_ethtool_cir_drop(struct hinic3_nic_dev *nic_dev, u64 *data)
+{
+ struct hinic3_cir_drop *port_stats = NULL;
+ char *p = NULL;
+ u16 i = 0, j = 0;
+ int err;
+
+ port_stats = kzalloc(sizeof(*port_stats), GFP_KERNEL);
+ if (!port_stats) {
+ nicif_err(nic_dev, drv, nic_dev->netdev,
+ "Failed to malloc port stats\n");
+ (void)memset(&data[i],
+ 0, ARRAY_LEN(hinic3_cir_drp) * sizeof(*data));
+ i = ARRAY_LEN(hinic3_cir_drp);
+ return i;
+ }
+
+ err = hinic3_get_cir_drop(nic_dev->hwdev,
+ hinic3_global_func_id(nic_dev->hwdev),
+ port_stats);
+ if (err) {
+ nicif_err(nic_dev, drv, nic_dev->netdev,
+ "Failed to get CPB cir drops from fw\n");
+ (void)memset(&data[i],
+ 0, ARRAY_LEN(hinic3_cir_drp) * sizeof(*data));
+ i = ARRAY_LEN(hinic3_cir_drp);
+ kfree(port_stats);
+ return i;
+ }
+
+ for (j = 0; j < ARRAY_LEN(hinic3_cir_drp); j++, i++) {
+ p = (char *)(port_stats) + hinic3_cir_drp[j].offset;
+ data[i] = (hinic3_cir_drp[j].size ==
+ sizeof(u64)) ? *(u64 *)p : *(u32 *)p;
+ }
+
+ kfree(port_stats);
+
+ return i;
+}
+
static u16 get_ethtool_rsfec_stats(struct hinic3_nic_dev *nic_dev, u64 *data)
{
struct mag_cmd_rsfec_stats *port_stats = NULL;
@@ -545,10 +602,10 @@ static u16 get_ethtool_rsfec_stats(struct hinic3_nic_dev *nic_dev, u64 *data)
if (!port_stats) {
nicif_err(nic_dev, drv, nic_dev->netdev,
"Failed to malloc port stats\n");
- memset(&data[i], 0,
- ARRAY_LEN(g_hinic3_rsfec_stats) * sizeof(*data));
- i += ARRAY_LEN(g_hinic3_rsfec_stats);
- return i;
+ memset(&data[i], 0,
+ ARRAY_LEN(g_hinic3_rsfec_stats) * sizeof(*data));
+ i += ARRAY_LEN(g_hinic3_rsfec_stats);
+ return i;
}
err = hinic3_get_phy_rsfec_stats(nic_dev->hwdev, port_stats);
@@ -579,7 +636,7 @@ void hinic3_get_ethtool_stats(struct net_device *netdev,
#endif
struct hinic3_nic_stats *nic_stats = NULL;
- struct hinic3_vport_stats vport_stats = { 0 };
+ struct hinic3_vport_stats vport_stats = {0};
u16 i = 0, j = 0;
char *p = NULL;
int err;
@@ -615,14 +672,15 @@ void hinic3_get_ethtool_stats(struct net_device *netdev,
hinic3_global_func_id(nic_dev->hwdev),
&vport_stats);
if (err)
- nicif_err(nic_dev, drv, netdev,
- "Failed to get function stats from fw\n");
+ nicif_err(nic_dev, drv, netdev, "Failed to get function stats from fw\n");
for (j = 0; j < ARRAY_LEN(hinic3_function_stats); j++, i++) {
p = (char *)(&vport_stats) + hinic3_function_stats[j].offset;
data[i] = get_value_of_ptr(hinic3_function_stats[j].size, p);
}
+ i += get_ethtool_cir_drop(nic_dev, data + i);
+
if (!HINIC3_FUNC_IS_VF(nic_dev->hwdev)) {
i += get_ethtool_port_stats(nic_dev, data + i);
i += get_ethtool_rsfec_stats(nic_dev, data + i);
@@ -661,20 +719,30 @@ static u16 get_hw_stats_strings(struct hinic3_nic_dev *nic_dev, char *p)
u16 i, cnt = 0;
for (i = 0; i < ARRAY_LEN(hinic3_function_stats); i++) {
- memcpy(p, hinic3_function_stats[i].name, ETH_GSTRING_LEN);
+ (void)memcpy(p, hinic3_function_stats[i].name,
+ ETH_GSTRING_LEN);
+ p += ETH_GSTRING_LEN;
+ cnt++;
+ }
+
+ for (i = 0; i < ARRAY_LEN(hinic3_cir_drp); i++) {
+ (void)memcpy(p, hinic3_cir_drp[i].name,
+ ETH_GSTRING_LEN);
p += ETH_GSTRING_LEN;
cnt++;
}
if (!HINIC3_FUNC_IS_VF(nic_dev->hwdev)) {
for (i = 0; i < ARRAY_LEN(hinic3_port_stats); i++) {
- memcpy(p, hinic3_port_stats[i].name, ETH_GSTRING_LEN);
+ (void)memcpy(p, hinic3_port_stats[i].name,
+ ETH_GSTRING_LEN);
p += ETH_GSTRING_LEN;
cnt++;
}
+
for (i = 0; i < ARRAY_LEN(g_hinic3_rsfec_stats); i++) {
- memcpy(p, g_hinic3_rsfec_stats[i].name,
- ETH_GSTRING_LEN);
+ (void)memcpy(p, g_hinic3_rsfec_stats[i].name,
+ ETH_GSTRING_LEN);
p += ETH_GSTRING_LEN;
cnt++;
}
diff --git a/drivers/net/ethernet/huawei/hinic3/hinic3_filter.c b/drivers/net/ethernet/huawei/hinic3/hinic3_filter.c
index 2daa7f9..262f42c 100644
--- a/drivers/net/ethernet/huawei/hinic3/hinic3_filter.c
+++ b/drivers/net/ethernet/huawei/hinic3/hinic3_filter.c
@@ -261,7 +261,7 @@ static int hinic3_mac_filter_sync(struct hinic3_nic_dev *nic_dev,
/* there are errors when add mac to hw, delete all mac in hw */
hinic3_undo_add_filter_entries(mac_filter_list, &tmp_add_list);
- /* VF don't support to enter promisc mode,
+ /* VF doesn't support to enter promisc mode,
* so we can't delete any other uc mac
*/
if (!HINIC3_FUNC_IS_VF(nic_dev->hwdev) || !uc) {
diff --git a/drivers/net/ethernet/huawei/hinic3/hinic3_hw.h b/drivers/net/ethernet/huawei/hinic3/hinic3_hw.h
index a3136ce..e09c8e3 100644
--- a/drivers/net/ethernet/huawei/hinic3/hinic3_hw.h
+++ b/drivers/net/ethernet/huawei/hinic3/hinic3_hw.h
@@ -837,6 +837,21 @@ int hinic3_get_link_event_stats(void *dev, int *link_state);
int hinic3_get_hw_pf_infos(void *hwdev, struct hinic3_hw_pf_infos *infos,
u16 channel);
+/**
+ * @brief hinic3_get_pf_by_func - get pf by func
+ * @param hwdev: device pointer to hwdev
+ * @param func_id: func id
+ * @param pf_id: pf id
+ */
+int hinic3_get_pf_by_func(void *hwdev, u16 func_id, u8 *pf_id);
+
+/**
+ * @brief hinic3_get_pf_bus_by_dev - get pf bus by dev
+ * @param hwdev: device pointer to hwdev
+ * @param bus_num: pf bus num
+ */
+int hinic3_get_pf_bus_by_dev(void *hwdev, u8 *bus_num);
+
/**
* @brief hinic3_func_reset - reset func
* @param hwdev: device pointer to hwdev
diff --git a/drivers/net/ethernet/huawei/hinic3/hinic3_mag_cfg.c b/drivers/net/ethernet/huawei/hinic3/hinic3_mag_cfg.c
index 688bb7d..856c673 100644
--- a/drivers/net/ethernet/huawei/hinic3/hinic3_mag_cfg.c
+++ b/drivers/net/ethernet/huawei/hinic3/hinic3_mag_cfg.c
@@ -26,10 +26,13 @@
#include "hinic3_common.h"
#include "mag_mpu_cmd_defs.h"
-#define BIFUR_RESOURCE_PF_SSID 0x5a1
+#ifndef __UEFI__
+#include "hinic3_bond.h"
+#include "hinic3_dev_mgmt.h"
+#endif
+
#define CAP_INFO_MAX_LEN 512
#define DEVICE_VENDOR_MAX_LEN 17
-#define READ_RSFEC_REGISTER_DELAY_TIME_MS 500
struct parse_tlv_info g_page_info = {0};
struct drv_tag_mag_cmd_get_xsfp_tlv_rsp g_xsfp_tlv_info = {0};
@@ -117,10 +120,67 @@ out:
}
EXPORT_SYMBOL(hinic3_get_phy_port_stats);
+int hinic3_get_phy_port_speed(void *hwdev, struct mag_port_speed *speed,
+ struct mag_speed_info *info)
+{
+ struct mag_cmd_get_port_speed *port_speed = NULL;
+ struct mag_cmd_port_speed_info speed_info = {};
+ u16 out_size;
+ struct hinic3_nic_io *nic_io = NULL;
+ int err;
+
+ if (hwdev == NULL) {
+ pr_err("Do get mac speed cmd failed for invalid param\n");
+ return -EINVAL;
+ }
+
+ nic_io = hinic3_get_service_adapter(hwdev, SERVICE_T_NIC);
+ if (!nic_io) {
+ pr_err("Do get nic io cmd failed for invalid param, hwdev:0x%llx\n",
+ (u64)hwdev);
+ return -EINVAL;
+ }
+
+ out_size = sizeof(struct mag_cmd_get_port_speed) +
+ sizeof(struct mag_port_speed) * info->length;
+ port_speed = kzalloc(out_size, GFP_KERNEL);
+ if (!port_speed) {
+ nic_err(nic_io->dev_hdl,
+ "Failed to malloc mag_cmd_get_port_speed\n");
+ return -ENOMEM;
+ }
+
+ speed_info.port_id = hinic3_physical_port_id(hwdev);
+ memcpy(&(speed_info.info), info, sizeof(*info));
+
+ err = mag_msg_to_mgmt_sync(hwdev, MAG_CMD_GET_PORT_SPEED,
+ &speed_info, sizeof(speed_info),
+ port_speed, &out_size);
+ if (err != 0 || out_size == 0 || port_speed->head.status != 0) {
+ nic_err(nic_io->dev_hdl,
+ "Failed to get port statistics, err: %d, status: 0x%x, out size: 0x%x\n",
+ err, port_speed->head.status, out_size);
+ err = -EIO;
+ goto out;
+ }
+
+ port_speed->speed = (struct mag_port_speed *)
+ ((char *)port_speed +
+ sizeof(struct mag_cmd_get_port_speed));
+ memcpy(speed, port_speed->speed,
+ sizeof(struct mag_port_speed) * info->length);
+
+out:
+ kfree(port_speed);
+
+ return err;
+}
+EXPORT_SYMBOL(hinic3_get_phy_port_speed);
+
int hinic3_get_phy_rsfec_stats(void *hwdev, struct mag_cmd_rsfec_stats *stats)
{
- struct mag_cmd_get_mag_cnt *port_stats = NULL;
- struct mag_cmd_get_mag_cnt stats_info;
+ struct mag_cmd_get_rsfec_cnt *port_stats = NULL;
+ struct mag_cmd_get_rsfec_cnt stats_info;
u16 out_size = sizeof(*port_stats);
struct hinic3_nic_io *nic_io = NULL;
int err;
@@ -138,25 +198,12 @@ int hinic3_get_phy_rsfec_stats(void *hwdev, struct mag_cmd_rsfec_stats *stats)
goto out;
}
- memset(&stats_info, 0, sizeof(stats_info));
+ (void)memset(&stats_info, 0, sizeof(stats_info));
stats_info.port_id = hinic3_physical_port_id(hwdev);
- err = mag_msg_to_mgmt_sync(hwdev, MAG_CMD_GET_MAG_CNT,
- &stats_info, sizeof(stats_info),
- port_stats, &out_size);
- if (err || !out_size || port_stats->head.status) {
- nic_err(nic_io->dev_hdl,
- "Failed to get rsfec statistics, err: %d, status: 0x%x, out size: 0x%x\n",
- err, port_stats->head.status, out_size);
- err = -EIO;
- goto out;
- }
- /* 读2遍, 清除误码残留 */
- msleep(READ_RSFEC_REGISTER_DELAY_TIME_MS);
-
- err = mag_msg_to_mgmt_sync(hwdev, MAG_CMD_GET_MAG_CNT, &stats_info,
+ err = mag_msg_to_mgmt_sync(hwdev, MAG_CMD_GET_RSFEC_CNT, &stats_info,
sizeof(stats_info),
- port_stats, &out_size);
+ port_stats, &out_size);
if (err || !out_size || port_stats->head.status) {
nic_err(nic_io->dev_hdl,
"Failed to get rsfec statistics, err: %d, status: 0x%x, out size: 0x%x\n",
@@ -165,8 +212,7 @@ int hinic3_get_phy_rsfec_stats(void *hwdev, struct mag_cmd_rsfec_stats *stats)
goto out;
}
- memcpy(stats, &port_stats->mag_csr[MAG_RX_RSFEC_ERR_CW_CNT],
- sizeof(u32));
+ stats->rx_err_lane_phy = port_stats->rx_err_lane;
out:
kfree(port_stats);
@@ -643,6 +689,33 @@ void print_port_info(struct hinic3_nic_io *nic_io,
port_info->cur_link_machine_state);
}
+#ifndef __UEFI__
+#define BIFUR_MAX_PORT_ID 2
+void hinic3_get_link_state_in_bifur_scene(
+ struct mag_cmd_get_link_status *get_link,
+ struct hinic3_nic_io *nic_io,
+ struct mag_cmd_get_link_status *in_param)
+{
+ bool in_bifur_scene = false;
+ struct pci_dev *pdev = NULL;
+
+ if (nic_io->pcidev_hdl != NULL) {
+ pdev = nic_io->pcidev_hdl;
+ if (pdev->subsystem_device == BIFUR_RESOURCE_PF_SSID)
+ in_bifur_scene = true;
+
+ }
+
+ if (in_bifur_scene != true ||
+ in_param == NULL ||
+ in_param->port_id >= BIFUR_MAX_PORT_ID) {
+ return;
+ }
+ get_link->status = hinic3_get_bifur_link_status(nic_io->hwdev,
+ in_param->port_id);
+}
+#endif
+
static int hinic3_get_vf_link_status_msg_handler(struct hinic3_nic_io *nic_io,
u16 vf_id, void *buf_in,
u16 in_size, void *buf_out,
@@ -658,8 +731,13 @@ static int hinic3_get_vf_link_status_msg_handler(struct hinic3_nic_io *nic_io,
if (link_forced)
get_link->status = link_up ?
HINIC3_LINK_UP : HINIC3_LINK_DOWN;
- else
+ else {
get_link->status = nic_io->link_status;
+#ifndef __UEFI__
+ hinic3_get_link_state_in_bifur_scene(get_link, nic_io,
+ (struct mag_cmd_get_link_status *)buf_in);
+#endif
+ }
get_link->head.status = 0;
*out_size = sizeof(*get_link);
@@ -707,12 +785,13 @@ static void link_status_event_handler(void *hwdev, void *buf_in,
{
struct mag_cmd_get_link_status *link_status = NULL;
struct mag_cmd_get_link_status *ret_link_status = NULL;
- struct hinic3_event_info event_info = {0};
+ struct hinic3_event_info event_info = {};
struct hinic3_event_link_info *link_info = (void *)event_info.event_data;
struct hinic3_nic_io *nic_io = NULL;
#ifndef __UEFI__
struct pci_dev *pdev = NULL;
#endif
+
/* Ignore link change event */
if (hinic3_is_bm_slave_host(hwdev))
return;
@@ -734,16 +813,15 @@ static void link_status_event_handler(void *hwdev, void *buf_in,
event_info.service = EVENT_SRV_NIC;
event_info.type = link_status->status ?
- EVENT_NIC_LINK_UP : EVENT_NIC_LINK_DOWN;
+ EVENT_NIC_LINK_UP : EVENT_NIC_LINK_DOWN;
hinic3_event_callback(hwdev, &event_info);
#ifndef __UEFI__
- if (nic_io->pcidev_hdl) {
+ if (nic_io->pcidev_hdl != NULL) {
pdev = nic_io->pcidev_hdl;
- if (pdev->subsystem_device == BIFUR_RESOURCE_PF_SSID) {
+ if (pdev->subsystem_device == BIFUR_RESOURCE_PF_SSID)
return;
- }
}
#endif
diff --git a/drivers/net/ethernet/huawei/hinic3/hinic3_main.c b/drivers/net/ethernet/huawei/hinic3/hinic3_main.c
index 7327ee5..b973b98 100644
--- a/drivers/net/ethernet/huawei/hinic3/hinic3_main.c
+++ b/drivers/net/ethernet/huawei/hinic3/hinic3_main.c
@@ -85,6 +85,14 @@ MODULE_PARM_DESC(page_pool_enabled, "enable/disable page_pool feature for rxq pa
#define HINIC3_SQ_DEPTH 1024
#define HINIC3_RQ_DEPTH 1024
+static u32 rq_depth = HINIC3_RQ_DEPTH;
+module_param(rq_depth, uint, 0444);
+MODULE_PARM_DESC(rq_depth, "Set rq_depth, must be [128-16384], default is 1024");
+
+static u32 sq_depth = HINIC3_SQ_DEPTH;
+module_param(sq_depth, uint, 0444);
+MODULE_PARM_DESC(sq_depth, "Set sq_depth, must be [128-65536], default is 1024");
+
#define LRO_ENABLE 1
enum hinic3_rx_buff_len {
@@ -185,13 +193,8 @@ static int hinic3_netdev_event(struct notifier_block *notifier,
ndev->vlan_features &= (~HINIC3_VLAN_CLEAR_OFFLOAD);
} else if (vlan_depth > HINIC3_MAX_VLAN_DEPTH_OFFLOAD_SUPPORT) {
#ifdef HAVE_NDO_SET_FEATURES
-#ifdef HAVE_RHEL6_NET_DEVICE_OPS_EXT
- set_netdev_hw_features(ndev,
- get_netdev_hw_features(ndev) &
- (~HINIC3_VLAN_CLEAR_OFFLOAD));
-#else
+
ndev->hw_features &= (~HINIC3_VLAN_CLEAR_OFFLOAD);
-#endif
#endif
ndev->features &= (~HINIC3_VLAN_CLEAR_OFFLOAD);
}
@@ -293,19 +296,10 @@ static void netdev_feature_init(struct net_device *netdev)
netdev->vlan_features |= NETIF_F_LRO;
}
-#ifdef HAVE_RHEL6_NET_DEVICE_OPS_EXT
- hw_features |= get_netdev_hw_features(netdev);
-#else
hw_features |= netdev->hw_features;
-#endif
-
hw_features |= netdev->features;
-#ifdef HAVE_RHEL6_NET_DEVICE_OPS_EXT
- set_netdev_hw_features(netdev, hw_features);
-#else
netdev->hw_features = hw_features;
-#endif
#ifdef IFF_UNICAST_FLT
netdev->priv_flags |= IFF_UNICAST_FLT;
@@ -451,6 +445,9 @@ static void hinic3_sw_deinit(struct hinic3_nic_dev *nic_dev)
hinic3_global_func_id(nic_dev->hwdev),
HINIC3_CHANNEL_NIC);
+ hinic3_cmd_vf_lag(nic_dev->hwdev, hinic3_global_func_id(nic_dev->hwdev),
+ HINIC3_CHANNEL_NIC);
+
hinic3_clear_rss_config(nic_dev);
hinic3_dcb_deinit(nic_dev);
@@ -476,7 +473,7 @@ static int hinic3_set_default_mac(struct hinic3_nic_dev *nic_dev)
u8 mac_addr[ETH_ALEN];
int err = 0;
- err = hinic3_get_default_mac(nic_dev->hwdev, mac_addr);
+ err = hinic3_get_default_mac(nic_dev->hwdev, mac_addr, ETH_ALEN);
if (err) {
nic_err(&nic_dev->pdev->dev, "Failed to get MAC address\n");
return err;
@@ -486,13 +483,13 @@ static int hinic3_set_default_mac(struct hinic3_nic_dev *nic_dev)
if (!is_valid_ether_addr(netdev->dev_addr)) {
if (!HINIC3_FUNC_IS_VF(nic_dev->hwdev)) {
- nic_err(&nic_dev->pdev->dev, "Invalid MAC address %pM\n",
- netdev->dev_addr);
+ nic_err(&nic_dev->pdev->dev,
+ "Invalid MAC address %pM\n",
+ netdev->dev_addr);
return -EIO;
- }
+ }
- nic_info(&nic_dev->pdev->dev,
- "Invalid MAC address %pM, using random\n",
+ nic_info(&nic_dev->pdev->dev, "Invalid MAC address %pM, using random\n",
netdev->dev_addr);
eth_hw_addr_random(netdev);
}
@@ -506,13 +503,39 @@ static int hinic3_set_default_mac(struct hinic3_nic_dev *nic_dev)
*/
if (err && err != HINIC3_PF_SET_VF_ALREADY)
nic_err(&nic_dev->pdev->dev, "Failed to set default MAC\n");
-
if (err == HINIC3_PF_SET_VF_ALREADY)
return 0;
return err;
}
+static void hinic3_set_sq_rq_depth(struct hinic3_nic_dev *nic_dev)
+{
+ u32 new_sq_depth, new_rq_depth;
+
+ nic_dev->q_params.sq_depth = HINIC3_SQ_DEPTH;
+ nic_dev->q_params.rq_depth = HINIC3_RQ_DEPTH;
+ if (sq_depth > HINIC3_MAX_TX_QUEUE_DEPTH ||
+ sq_depth < HINIC3_MIN_QUEUE_DEPTH) {
+ nic_warn(&nic_dev->pdev->dev,
+ "tx queue depth out of range tx[%d-%d], use default value\n",
+ HINIC3_MIN_QUEUE_DEPTH, HINIC3_MAX_TX_QUEUE_DEPTH);
+ } else {
+ new_sq_depth = (u32)(1U << (u16)ilog2(sq_depth));
+ nic_dev->q_params.sq_depth = new_sq_depth;
+ }
+
+ if (rq_depth > HINIC3_MAX_RX_QUEUE_DEPTH ||
+ rq_depth < HINIC3_MIN_QUEUE_DEPTH) {
+ nic_warn(&nic_dev->pdev->dev,
+ "rx queue depth out of range rx[%d-%d], use default value\n",
+ HINIC3_MIN_QUEUE_DEPTH, HINIC3_MAX_RX_QUEUE_DEPTH);
+ } else {
+ new_rq_depth = (u32)(1U << (u16)ilog2(rq_depth));
+ nic_dev->q_params.rq_depth = new_rq_depth;
+ }
+}
+
static void hinic3_outband_cfg_init(struct hinic3_nic_dev *nic_dev)
{
u16 outband_default_vid = 0;
@@ -550,8 +573,7 @@ static int hinic3_sw_init(struct hinic3_nic_dev *nic_dev)
return -EFAULT;
}
- nic_dev->q_params.sq_depth = HINIC3_SQ_DEPTH;
- nic_dev->q_params.rq_depth = HINIC3_RQ_DEPTH;
+ hinic3_set_sq_rq_depth(nic_dev);
hinic3_try_to_enable_rss(nic_dev);
@@ -1142,16 +1164,31 @@ static void nic_remove(struct hinic3_lld_dev *lld_dev, void *adapter)
{
struct hinic3_nic_dev *nic_dev = adapter;
struct net_device *netdev = NULL;
+#ifdef HIUDK_SDK
+ int is_use_vram = get_use_vram_flag();
+#endif
if (!nic_dev || !hinic3_support_nic(lld_dev->hwdev, NULL))
return;
nic_info(&lld_dev->pdev->dev, "NIC service remove begin\n");
+#ifdef HAVE_XDP_SUPPORT
+ nic_dev->remove_flag = true;
+#endif
netdev = nic_dev->netdev;
- if (lld_dev->pdev->subsystem_device != BIFUR_RESOURCE_PF_SSID)
+ if (lld_dev->pdev->subsystem_device != BIFUR_RESOURCE_PF_SSID) {
+ /* The kernel function deregisters the network device and
+ * releases related resources such as queues and mounted XDP
+ * programs.
+ */
unregister_netdev(netdev);
+ }
+
+#ifdef HAVE_XDP_SUPPORT
+ nic_dev->remove_flag = false;
+#endif
#ifdef HAVE_MULTI_VLAN_OFFLOAD_EN
hinic3_unregister_notifier(nic_dev);
@@ -1159,6 +1196,7 @@ static void nic_remove(struct hinic3_lld_dev *lld_dev, void *adapter)
if (!HINIC3_FUNC_IS_VF(nic_dev->hwdev))
cancel_delayed_work_sync(&nic_dev->vport_stats_work);
+
cancel_delayed_work_sync(&nic_dev->periodic_work);
cancel_delayed_work_sync(&nic_dev->rxq_check_work);
cancel_work_sync(&nic_dev->rx_mode_work);
@@ -1169,6 +1207,9 @@ static void nic_remove(struct hinic3_lld_dev *lld_dev, void *adapter)
if (hinic3_get_bond_create_mode(lld_dev->hwdev) != 0)
hinic3_bond_deinit(nic_dev);
+ if (!HINIC3_FUNC_IS_VF(nic_dev->hwdev))
+ hinic3_bond_flush_workqueue(nic_dev->hwdev);
+
hinic3_update_nic_feature(nic_dev->hwdev, 0);
hinic3_set_nic_feature_to_hw(nic_dev->hwdev);
@@ -1180,8 +1221,21 @@ static void nic_remove(struct hinic3_lld_dev *lld_dev, void *adapter)
kfree(nic_dev->vlan_bitmap);
nic_dev->vlan_bitmap = NULL;
+#ifdef HIUDK_SDK
+ if (is_use_vram != 0)
+ hi_vram_kfree((void *)nic_dev->nic_vram, nic_dev->nic_vram_name,
+ sizeof(struct hinic3_vram));
+ else
+ kfree(nic_dev->nic_vram);
+#endif
+
free_netdev(netdev);
+#ifdef HIUDK_SDK
+ if (is_use_vram != 0)
+ hiudk_unregister_flush_fn(lld_dev);
+#endif
+
nic_info(&lld_dev->pdev->dev, "NIC service removed\n");
}
diff --git a/drivers/net/ethernet/huawei/hinic3/hinic3_mt.h b/drivers/net/ethernet/huawei/hinic3/hinic3_mt.h
index 5bd4c3d..59ab6e9 100644
--- a/drivers/net/ethernet/huawei/hinic3/hinic3_mt.h
+++ b/drivers/net/ethernet/huawei/hinic3/hinic3_mt.h
@@ -210,6 +210,10 @@ enum driver_cmd_type {
PORT_ID,
SET_RX_PF_BW_LIMIT = 0x43,
+ MONITOR_MAC_SPEED,
+ GET_FUNC_ID,
+ SET_MAC_SPEED_STATUS,
+ BOND_DEFAULT_OFFLOAD,
GET_FUNC_CAP = 0x50,
GET_XSFP_PRESENT = 0x51,
@@ -229,6 +233,17 @@ enum driver_cmd_type {
BIFUR_SET_ENABLE = 0xc0,
BIFUR_GET_ENABLE = 0xc1,
+ ROCE_CMD_SET_DSCP = 0xd0,
+ ROCE_CMD_GET_DSCP = 0xd1,
+ ROCE_CMD_CLEAR_DSCP = 0xd2,
+ ROCE_CMD_GET_ECN = 0xd3,
+ ROCE_CMD_SET_ECN = 0xd4,
+ ROCE_CMD_CLEAR_ECN = 0xd5,
+
+ ROCE_CMD_SET_TSO = 0xe0,
+ ROCE_CMD_GET_TSO = 0xe1,
+ ROCE_CMD_CLEAR_TSO = 0xe2,
+
VM_COMPAT_TEST = 0xFF
};
@@ -325,6 +340,10 @@ struct hinic3_hw_stats {
#define IFNAMSIZ 16
#endif
+#ifndef IB_DEVICE_NAME_MAX
+#define IB_DEVICE_NAME_MAX 64
+#endif
+
struct pf_info {
char name[IFNAMSIZ];
char bus_info[BUSINFO_LEN];
@@ -477,7 +496,10 @@ struct hinic3_mt_qos_info { /* delete */
u16 op_code;
u8 valid_cos_bitmap;
u8 valid_up_bitmap;
- u32 rsvd1;
+ /* 当ib设备名过长,超出device_name长度时
+ * 使用这个buffer
+ */
+ char ib_device_name[IB_DEVICE_NAME_MAX];
};
struct hinic3_mt_dcb_state {
@@ -581,7 +603,10 @@ struct msg_module {
int bus_num;
u8 port_id;
u8 rsvd1[3];
- u32 rsvd2[4];
+ /* 当ib设备名过长,超出device_name长度时
+ * 使用这个buffer
+ */
+ char ib_device_name[IB_DEVICE_NAME_MAX];
};
struct hinic3_mt_qos_cos_cfg {
diff --git a/drivers/net/ethernet/huawei/hinic3/hinic3_netdev_ops.c b/drivers/net/ethernet/huawei/hinic3/hinic3_netdev_ops.c
index c4b3d5b..a3879aa 100644
--- a/drivers/net/ethernet/huawei/hinic3/hinic3_netdev_ops.c
+++ b/drivers/net/ethernet/huawei/hinic3/hinic3_netdev_ops.c
@@ -30,7 +30,8 @@
#include "hinic3_rx.h"
#include "hinic3_dcb.h"
#include "hinic3_nic_prof.h"
-
+#include "hinic3_bond.h"
+#include "sw_cmdq_ops.h"
#include "nic_npu_cmd.h"
#include "vram_common.h"
@@ -39,6 +40,10 @@
#define HINIC3_LRO_DEFAULT_COAL_PKT_SIZE 32
#define HINIC3_LRO_DEFAULT_TIME_LIMIT 16
+#define HINIC3_SOFT_LRO_ENABLE 0
+#define HINIC3_SOFT_LRO_DISABLE 1
+#define HINIC3_LRO_MAX_COL_NUM 15
+
#define HINIC3_WAIT_FLUSH_QP_RESOURCE_TIMEOUT 100
static void hinic3_nic_set_rx_mode(struct net_device *netdev)
{
@@ -541,12 +546,13 @@ int hinic3_vport_up(struct hinic3_nic_dev *nic_dev)
queue_delayed_work(nic_dev->workq, &nic_dev->moderation_task,
HINIC3_MODERATONE_DELAY);
if (test_bit(HINIC3_RXQ_RECOVERY, &nic_dev->flags))
- queue_delayed_work(nic_dev->workq,
- &nic_dev->rxq_check_work, HZ);
+ queue_delayed_work(nic_dev->workq, &nic_dev->rxq_check_work,
+ HZ);
hinic3_print_link_message(nic_dev, link_status);
- if (!HINIC3_FUNC_IS_VF(nic_dev->hwdev))
+ if (!HINIC3_FUNC_IS_VF(nic_dev->hwdev) &&
+ !hinic3_is_bond_offload(nic_dev->lld_dev))
hinic3_notify_all_vfs_link_changed(nic_dev->hwdev, link_status);
return 0;
@@ -618,10 +624,11 @@ void hinic3_vport_down(struct hinic3_nic_dev *nic_dev)
cancel_delayed_work_sync(&nic_dev->moderation_task);
if (hinic3_get_chip_present_flag(nic_dev->hwdev)) {
- if (!HINIC3_FUNC_IS_VF(nic_dev->hwdev))
+ if (!HINIC3_FUNC_IS_VF(nic_dev->hwdev) &&
+ !hinic3_is_bond_offload(nic_dev->lld_dev))
hinic3_notify_all_vfs_link_changed(nic_dev->hwdev, 0);
- if (is_in_kexec != 0)
+ if (nic_dev->state != 0)
nicif_info(nic_dev, drv, nic_dev->netdev, "Skip changing mag status!\n");
else
hinic3_maybe_set_port_state(nic_dev, false);
@@ -631,15 +638,29 @@ void hinic3_vport_down(struct hinic3_nic_dev *nic_dev)
HINIC3_CHANNEL_NIC);
hinic3_flush_txqs(nic_dev->netdev);
-
if (is_in_kexec == 0)
msleep(HINIC3_WAIT_FLUSH_QP_RESOURCE_TIMEOUT);
else
(void)hinic3_flush_rq_and_check(nic_dev, glb_func_id);
+
hinic3_flush_qps_res(nic_dev->hwdev);
}
}
+static void hinic3_cqe_paddr_pass(struct hinic3_dyna_txrxq_params *q_params,
+ struct hinic3_dyna_qp_params *qp_params)
+{
+ struct hinic3_dyna_rxq_res *rqres = NULL;
+ struct hinic3_io_queue *rq = NULL;
+ u32 idx;
+
+ for (idx = 0; idx < q_params->num_qps; idx++) {
+ rqres = &q_params->rxqs_res[idx];
+ rq = &qp_params->rqs[idx];
+ rq->cqe_start_paddr = rqres->cqe_start_paddr;
+ }
+}
+
int hinic3_change_channel_settings(struct hinic3_nic_dev *nic_dev,
struct hinic3_dyna_txrxq_params *trxq_params,
hinic3_reopen_handler reopen_handler,
@@ -683,6 +704,8 @@ int hinic3_change_channel_settings(struct hinic3_nic_dev *nic_dev,
if (reopen_handler)
reopen_handler(nic_dev, priv_data);
+ hinic3_cqe_paddr_pass(trxq_params, &new_qp_params);
+
err = hinic3_open_channel(nic_dev, &new_qp_params, trxq_params);
if (err)
goto open_channel_err;
@@ -705,10 +728,9 @@ open_channel_err:
return err;
}
-int hinic3_open(struct net_device *netdev)
+static int hinic3_pre_open(struct net_device *netdev)
{
struct hinic3_nic_dev *nic_dev = netdev_priv(netdev);
- struct hinic3_dyna_qp_params qp_params = {0};
int err;
if (test_bit(HINIC3_INTF_UP, &nic_dev->flags)) {
@@ -717,10 +739,21 @@ int hinic3_open(struct net_device *netdev)
}
err = hinic3_init_nicio_res(nic_dev->hwdev);
- if (err) {
+ if (err != 0)
nicif_err(nic_dev, drv, netdev, "Failed to init nicio resources\n");
+
+ return err;
+}
+
+int hinic3_open(struct net_device *netdev)
+{
+ struct hinic3_nic_dev *nic_dev = netdev_priv(netdev);
+ struct hinic3_dyna_qp_params qp_params = {0};
+ int err;
+
+ err = hinic3_pre_open(netdev);
+ if (err != 0)
return err;
- }
err = hinic3_setup_num_qps(nic_dev);
if (err) {
@@ -733,6 +766,8 @@ int hinic3_open(struct net_device *netdev)
if (err)
goto alloc_channel_res_err;
+ hinic3_cqe_paddr_pass(&nic_dev->q_params, &qp_params);
+
err = hinic3_open_channel(nic_dev, &qp_params, &nic_dev->q_params);
if (err)
goto open_channel_err;
@@ -785,6 +820,22 @@ static void hinic3_delete_napi(struct hinic3_nic_dev *nic_dev)
hinic3_free_irq_vram(nic_dev, &nic_dev->q_params);
}
+#ifdef HAVE_XDP_SUPPORT
+int hinic3_safe_switch_channels(struct hinic3_nic_dev *nic_dev)
+{
+ struct hinic3_dyna_txrxq_params q_params = {0};
+
+ q_params = nic_dev->q_params;
+ q_params.sq_depth = nic_dev->q_params.sq_depth;
+ q_params.rq_depth = nic_dev->q_params.rq_depth;
+ q_params.txqs_res = NULL;
+ q_params.rxqs_res = NULL;
+ q_params.irq_cfg = NULL;
+
+ return hinic3_change_channel_settings(nic_dev, &q_params, NULL, NULL);
+}
+#endif
+
int hinic3_close(struct net_device *netdev)
{
struct hinic3_nic_dev *nic_dev = netdev_priv(netdev);
@@ -1433,6 +1484,8 @@ static int set_feature_lro(struct hinic3_nic_dev *nic_dev,
netdev_features_t changed = wanted_features ^ features;
bool en = !!(wanted_features & NETIF_F_LRO);
int err;
+ u8 cqe_coal_state, cqe_coal_max_num;
+ u8 lro_soft_en = HINIC3_SOFT_LRO_ENABLE;
if (!(changed & NETIF_F_LRO))
return 0;
@@ -1445,9 +1498,18 @@ static int set_feature_lro(struct hinic3_nic_dev *nic_dev,
}
#endif
+ if (en) {
+ hinic3_get_cqe_coalesce_info(nic_dev->hwdev,
+ &cqe_coal_state, &cqe_coal_max_num);
+ lro_soft_en = (cqe_coal_state == 1) ? HINIC3_SOFT_LRO_DISABLE :
+ HINIC3_SOFT_LRO_ENABLE;
+ }
err = hinic3_set_rx_lro_state(nic_dev->hwdev, en,
HINIC3_LRO_DEFAULT_TIME_LIMIT,
- HINIC3_LRO_DEFAULT_COAL_PKT_SIZE);
+ HINIC3_LRO_DEFAULT_COAL_PKT_SIZE,
+ HINIC3_SOFT_LRO_ENABLE,
+ HINIC3_LRO_DEFAULT_COAL_PKT_SIZE,
+ HINIC3_LRO_MAX_COL_NUM);
if (err) {
hinic3_err(nic_dev, drv, "%s lro failed\n",
SET_FEATURES_OP_STR(en));
@@ -1560,12 +1622,8 @@ static int set_features(struct hinic3_nic_dev *nic_dev,
return 0;
}
-#ifdef HAVE_RHEL6_NET_DEVICE_OPS_EXT
-static int hinic3_set_features(struct net_device *netdev, u32 features)
-#else
static int hinic3_set_features(struct net_device *netdev,
netdev_features_t features)
-#endif
{
struct hinic3_nic_dev *nic_dev = netdev_priv(netdev);
@@ -1580,12 +1638,8 @@ int hinic3_set_hw_features(struct hinic3_nic_dev *nic_dev)
nic_dev->netdev->features);
}
-#ifdef HAVE_RHEL6_NET_DEVICE_OPS_EXT
-static u32 hinic3_fix_features(struct net_device *netdev, u32 features)
-#else
static netdev_features_t hinic3_fix_features(struct net_device *netdev,
netdev_features_t features)
-#endif
{
netdev_features_t features_tmp = features;
@@ -1902,9 +1956,9 @@ static int hinic3_xdp_setup(struct hinic3_nic_dev *nic_dev,
int max_mtu = hinic3_xdp_max_mtu(nic_dev);
int q_id;
- if (nic_dev->netdev->mtu > max_mtu) {
+ if (nic_dev->netdev->mtu > (u32)max_mtu) {
nicif_err(nic_dev, drv, nic_dev->netdev,
- "Failed to setup xdp program, the current MTU %d is larger than max allowed MTU %d\n",
+ "Failed to setup xdp program, the current MTU %u is larger than max allowed MTU %d\n",
nic_dev->netdev->mtu, max_mtu);
NL_SET_ERR_MSG_MOD(extack,
"MTU too large for loading xdp program");
@@ -1926,6 +1980,9 @@ static int hinic3_xdp_setup(struct hinic3_nic_dev *nic_dev,
if (old_prog)
bpf_prog_put(old_prog);
+ if (!nic_dev->remove_flag)
+ return hinic3_safe_switch_channels(nic_dev);
+
return 0;
}
@@ -1940,12 +1997,6 @@ static int hinic3_xdp(struct net_device *netdev, struct netdev_xdp *xdp)
switch (xdp->command) {
case XDP_SETUP_PROG:
return hinic3_xdp_setup(nic_dev, xdp->prog, xdp->extack);
-#ifdef HAVE_XDP_QUERY_PROG
- case XDP_QUERY_PROG:
- xdp->prog_id = nic_dev->xdp_prog ?
- nic_dev->xdp_prog->aux->id : 0;
- return 0;
-#endif
default:
return -EINVAL;
}
@@ -1965,11 +2016,7 @@ static const struct net_device_ops hinic3_netdev_ops = {
.ndo_tx_timeout = hinic3_tx_timeout,
.ndo_select_queue = hinic3_select_queue,
-#ifdef HAVE_RHEL7_NETDEV_OPS_EXT_NDO_CHANGE_MTU
- .extended.ndo_change_mtu = hinic3_change_mtu,
-#else
.ndo_change_mtu = hinic3_change_mtu,
-#endif
.ndo_set_mac_address = hinic3_set_mac_addr,
.ndo_validate_addr = eth_validate_addr,
@@ -1978,15 +2025,6 @@ static const struct net_device_ops hinic3_netdev_ops = {
.ndo_vlan_rx_kill_vid = hinic3_vlan_rx_kill_vid,
#endif
-#ifdef HAVE_RHEL7_NET_DEVICE_OPS_EXT
- /* RHEL7 requires this to be defined to enable extended ops. RHEL7
- * uses the function get_ndo_ext to retrieve offsets for extended
- * fields from with the net_device_ops struct and ndo_size is checked
- * to determine whether or not the offset is valid.
- */
- .ndo_size = sizeof(const struct net_device_ops),
-#endif
-
#ifdef IFLA_VF_MAX
.ndo_set_vf_mac = hinic3_ndo_set_vf_mac,
#ifdef HAVE_RHEL7_NETDEV_OPS_EXT_NDO_SET_VF_VLAN
@@ -2004,11 +2042,7 @@ static const struct net_device_ops hinic3_netdev_ops = {
#endif
#ifdef HAVE_NDO_SET_VF_TRUST
-#ifdef HAVE_RHEL7_NET_DEVICE_OPS_EXT
- .extended.ndo_set_vf_trust = hinic3_ndo_set_vf_trust,
-#else
.ndo_set_vf_trust = hinic3_ndo_set_vf_trust,
-#endif /* HAVE_RHEL7_NET_DEVICE_OPS_EXT */
#endif /* HAVE_NDO_SET_VF_TRUST */
.ndo_get_vf_config = hinic3_ndo_get_vf_config,
@@ -2021,19 +2055,13 @@ static const struct net_device_ops hinic3_netdev_ops = {
.ndo_set_rx_mode = hinic3_nic_set_rx_mode,
#ifdef HAVE_XDP_SUPPORT
+ .ndo_xdp_xmit = hinic3_xdp_xmit_frames,
#ifdef HAVE_NDO_BPF_NETDEV_BPF
.ndo_bpf = hinic3_xdp,
#else
.ndo_xdp = hinic3_xdp,
#endif
#endif
-#ifdef HAVE_RHEL6_NET_DEVICE_OPS_EXT
-};
-
-/* RHEL6 keeps these operations in a separate structure */
-static const struct net_device_ops_ext hinic3_netdev_ops_ext = {
- .size = sizeof(struct net_device_ops_ext),
-#endif /* HAVE_RHEL6_NET_DEVICE_OPS_EXT */
#ifdef HAVE_NDO_SET_VF_LINK_STATE
.ndo_set_vf_link_state = hinic3_ndo_set_vf_link_state,
@@ -2059,20 +2087,7 @@ static const struct net_device_ops hinic3vf_netdev_ops = {
.ndo_tx_timeout = hinic3_tx_timeout,
.ndo_select_queue = hinic3_select_queue,
-#ifdef HAVE_RHEL7_NET_DEVICE_OPS_EXT
- /* RHEL7 requires this to be defined to enable extended ops. RHEL7
- * uses the function get_ndo_ext to retrieve offsets for extended
- * fields from with the net_device_ops struct and ndo_size is checked
- * to determine whether or not the offset is valid.
- */
- .ndo_size = sizeof(const struct net_device_ops),
-#endif
-
-#ifdef HAVE_RHEL7_NETDEV_OPS_EXT_NDO_CHANGE_MTU
- .extended.ndo_change_mtu = hinic3_change_mtu,
-#else
.ndo_change_mtu = hinic3_change_mtu,
-#endif
.ndo_set_mac_address = hinic3_set_mac_addr,
.ndo_validate_addr = eth_validate_addr,
@@ -2087,39 +2102,22 @@ static const struct net_device_ops hinic3vf_netdev_ops = {
.ndo_set_rx_mode = hinic3_nic_set_rx_mode,
-#ifdef HAVE_XDP_SUPPORT
#ifdef HAVE_NDO_BPF_NETDEV_BPF
- .ndo_bpf = hinic3_xdp,
+ .ndo_bpf = hinic3_xdp,
#else
- .ndo_xdp = hinic3_xdp,
-#endif
+ .ndo_xdp = hinic3_xdp,
#endif
-#ifdef HAVE_RHEL6_NET_DEVICE_OPS_EXT
-};
-/* RHEL6 keeps these operations in a separate structure */
-static const struct net_device_ops_ext hinic3vf_netdev_ops_ext = {
- .size = sizeof(struct net_device_ops_ext),
-#endif /* HAVE_RHEL6_NET_DEVICE_OPS_EXT */
-
-#ifdef HAVE_NDO_SET_FEATURES
.ndo_fix_features = hinic3_fix_features,
.ndo_set_features = hinic3_set_features,
-#endif /* HAVE_NDO_SET_FEATURES */
};
void hinic3_set_netdev_ops(struct hinic3_nic_dev *nic_dev)
{
if (!HINIC3_FUNC_IS_VF(nic_dev->hwdev)) {
nic_dev->netdev->netdev_ops = &hinic3_netdev_ops;
-#ifdef HAVE_RHEL6_NET_DEVICE_OPS_EXT
- set_netdev_ops_ext(nic_dev->netdev, &hinic3_netdev_ops_ext);
-#endif /* HAVE_RHEL6_NET_DEVICE_OPS_EXT */
} else {
nic_dev->netdev->netdev_ops = &hinic3vf_netdev_ops;
-#ifdef HAVE_RHEL6_NET_DEVICE_OPS_EXT
- set_netdev_ops_ext(nic_dev->netdev, &hinic3vf_netdev_ops_ext);
-#endif /* HAVE_RHEL6_NET_DEVICE_OPS_EXT */
}
}
diff --git a/drivers/net/ethernet/huawei/hinic3/hinic3_nic.h b/drivers/net/ethernet/huawei/hinic3/hinic3_nic.h
index d8c5419..1a1e03f 100644
--- a/drivers/net/ethernet/huawei/hinic3/hinic3_nic.h
+++ b/drivers/net/ethernet/huawei/hinic3/hinic3_nic.h
@@ -156,7 +156,8 @@ struct hinic3_nic_io {
u8 __iomem *rqs_db_addr;
u16 max_vfs;
- u16 rsvd3;
+ u8 cqe_coal_en;
+ u8 rsvd3;
u32 rsvd4;
struct vf_data_storage *vf_infos;
diff --git a/drivers/net/ethernet/huawei/hinic3/hinic3_nic_cfg.c b/drivers/net/ethernet/huawei/hinic3/hinic3_nic_cfg.c
index fc3c90a..bab9ff5 100644
--- a/drivers/net/ethernet/huawei/hinic3/hinic3_nic_cfg.c
+++ b/drivers/net/ethernet/huawei/hinic3/hinic3_nic_cfg.c
@@ -351,12 +351,11 @@ int hinic3_update_mac(void *hwdev, const u8 *old_mac, u8 *new_mac, u16 vlan_id,
if (!hwdev || !old_mac || !new_mac)
return -EINVAL;
- memset(&mac_info, 0, sizeof(mac_info));
+ (void)memset(&mac_info, 0, sizeof(mac_info));
nic_io = hinic3_get_service_adapter(hwdev, SERVICE_T_NIC);
if (!nic_io)
return -EINVAL;
-
if ((vlan_id & HINIC_VLAN_ID_MASK) >= VLAN_N_VID) {
nic_err(nic_io->dev_hdl, "Invalid VLAN number: %d\n",
(vlan_id & HINIC_VLAN_ID_MASK));
@@ -382,7 +381,7 @@ int hinic3_update_mac(void *hwdev, const u8 *old_mac, u8 *new_mac, u16 vlan_id,
if (PF_SET_VF_MAC(hwdev, mac_info.msg_head.status)) {
nic_warn(nic_io->dev_hdl, "PF has already set VF MAC. Ignore update operation\n");
- return HINIC3_PF_SET_VF_ALREADY;
+ return 0;
}
if (mac_info.msg_head.status == HINIC3_MGMT_STATUS_EXIST) {
@@ -393,7 +392,7 @@ int hinic3_update_mac(void *hwdev, const u8 *old_mac, u8 *new_mac, u16 vlan_id,
return 0;
}
-int hinic3_get_default_mac(void *hwdev, u8 *mac_addr)
+int hinic3_get_default_mac(void *hwdev, u8 *mac_addr, int ether_len)
{
struct hinic3_port_mac_set mac_info;
u16 out_size = sizeof(mac_info);
@@ -403,7 +402,7 @@ int hinic3_get_default_mac(void *hwdev, u8 *mac_addr)
if (!hwdev || !mac_addr)
return -EINVAL;
- memset(&mac_info, 0, sizeof(mac_info));
+ (void)memset(&mac_info, 0, sizeof(mac_info));
nic_io = hinic3_get_service_adapter(hwdev, SERVICE_T_NIC);
if (!nic_io)
@@ -903,6 +902,41 @@ int hinic3_get_vport_stats(void *hwdev, u16 func_id,
return 0;
}
+int hinic3_get_cir_drop(void *hwdev, u16 func_id, struct hinic3_cir_drop *stats)
+{
+ struct hinic3_port_stats_info stats_info;
+ struct hinic3_cmd_get_dp_info_resp vport_stats;
+ u16 out_size = sizeof(vport_stats);
+ struct hinic3_nic_io *nic_io = NULL;
+ int err;
+
+ if (!hwdev || !stats)
+ return -EINVAL;
+
+ (void)memset(&stats_info, 0, sizeof(stats_info));
+ (void)memset(&vport_stats, 0, sizeof(vport_stats));
+
+ nic_io = hinic3_get_service_adapter(hwdev, SERVICE_T_NIC);
+ if (!nic_io)
+ return -EINVAL;
+
+ stats_info.func_id = func_id;
+
+ err = l2nic_msg_to_mgmt_sync(hwdev, HINIC3_NIC_CMD_GET_CIR_DROP,
+ &stats_info, sizeof(stats_info),
+ &vport_stats, &out_size);
+ if (err || !out_size || vport_stats.head.status) {
+ nic_err(nic_io->dev_hdl,
+ "Failed to get CPB cir drop, err: %d, status: 0x%x, out size: 0x%x\n",
+ err, vport_stats.head.status, out_size);
+ return -EFAULT;
+ }
+
+ memcpy(stats, &vport_stats.value, sizeof(struct hinic3_cir_drop));
+
+ return 0;
+}
+
static int hinic3_set_function_table(struct hinic3_nic_io *nic_io,
u32 cfg_bitmap,
const struct hinic3_func_tbl_cfg *cfg)
@@ -1556,8 +1590,42 @@ static int hinic3_set_rx_lro_timer(void *hwdev, u32 timer_value)
return 0;
}
+static int hinic3_set_lro_cfg(void *hwdev, u8 data, u8 data_type)
+{
+ struct hinic3_nic_io *nic_io = NULL;
+ struct hinic3_cmd_lro_cfg lro_cfg;
+ u16 out_size = sizeof(lro_cfg);
+ int err;
+
+ if (!hwdev)
+ return -EINVAL;
+
+ nic_io = hinic3_get_service_adapter(hwdev, SERVICE_T_NIC);
+ if (!nic_io)
+ return -EINVAL;
+
+ (void)memset(&lro_cfg, 0, sizeof(lro_cfg));
+ lro_cfg.func_id = hinic3_global_func_id(hwdev);
+ lro_cfg.opcode = HINIC3_CMD_OP_SET;
+ lro_cfg.data = data;
+ lro_cfg.data_type = data_type;
+
+ err = l2nic_msg_to_mgmt_sync(hwdev, HINIC3_NIC_CMD_LRO_CFG,
+ &lro_cfg, sizeof(lro_cfg),
+ &lro_cfg, &out_size);
+ if (err != 0 || out_size == 0 || lro_cfg.msg_head.status != 0) {
+ nic_err(nic_io->dev_hdl, "Failed to set soft lro cfg, err: %d, status: 0x%x, out size: 0x%x\n",
+ err, lro_cfg.msg_head.status, out_size);
+
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
int hinic3_set_rx_lro_state(void *hwdev, u8 lro_en, u32 lro_timer,
- u32 lro_max_pkt_len)
+ u32 lro_max_pkt_len, u8 soft_lro_disable,
+ u8 hw_lro_max_len, u8 hw_lro_max_num)
{
struct hinic3_nic_io *nic_io = NULL;
u8 ipv4_en = 0, ipv6_en = 0;
@@ -1580,6 +1648,18 @@ int hinic3_set_rx_lro_state(void *hwdev, u8 lro_en, u32 lro_timer,
if (err != 0)
return err;
+ err = hinic3_set_lro_cfg(hwdev, soft_lro_disable, NIC_SOFT_LRO_DISABLE);
+ if (err != 0)
+ nic_warn(nic_io->dev_hdl, "Set soft LRO state failed, please check fw version first\n");
+
+ err = hinic3_set_lro_cfg(hwdev, hw_lro_max_len, NIC_HW_LRO_MAX_LEN);
+ if (err != 0)
+ nic_warn(nic_io->dev_hdl, "Set hw LRO max len failed, please check fw version first\n");
+
+ err = hinic3_set_lro_cfg(hwdev, hw_lro_max_num, NIC_HW_LRO_MAX_NUM);
+ if (err != 0)
+ nic_warn(nic_io->dev_hdl, "Set hw LRO max num failed, please check fw version first\n");
+
/* we don't set LRO timer for VF */
if (hinic3_func_type(hwdev) == TYPE_VF)
return 0;
diff --git a/drivers/net/ethernet/huawei/hinic3/hinic3_nic_cfg.h b/drivers/net/ethernet/huawei/hinic3/hinic3_nic_cfg.h
index 60caf68..c6dcd43 100644
--- a/drivers/net/ethernet/huawei/hinic3/hinic3_nic_cfg.h
+++ b/drivers/net/ethernet/huawei/hinic3/hinic3_nic_cfg.h
@@ -228,10 +228,11 @@ int hinic3_update_mac(void *hwdev, const u8 *old_mac, u8 *new_mac, u16 vlan_id,
* @brief hinic3_get_default_mac - get default mac address
* @param hwdev: device pointer to hwdev
* @param mac_addr: mac address from hardware
+ * @param ether_len: the length of mac address
* @retval zero: success
* @retval non-zero: failure
*/
-int hinic3_get_default_mac(void *hwdev, u8 *mac_addr);
+int hinic3_get_default_mac(void *hwdev, u8 *mac_addr, int ether_len);
/* *
* @brief hinic3_set_port_mtu - set function mtu
@@ -261,6 +262,17 @@ int hinic3_get_link_state(void *hwdev, u8 *link_state);
*/
int hinic3_get_vport_stats(void *hwdev, u16 func_id, struct hinic3_vport_stats *stats);
+/* *
+ * @brief hinic3_get_cir_drop - get CPB cir drop counter
+ * @param hwdev: device pointer to hwdev
+ * @param func_id: function index
+ * @param stats: function stats
+ * @retval zero: success
+ * @retval non-zero: failure
+ */
+int hinic3_get_cir_drop(void *hwdev, u16 func_id,
+ struct hinic3_cir_drop *stats);
+
/* *
* @brief hinic3_notify_all_vfs_link_changed - notify to all vfs link changed
* @param hwdev: device pointer to hwdev
@@ -304,7 +316,8 @@ int hinic3_set_rx_vlan_offload(void *hwdev, u8 en);
* @retval non-zero: failure
*/
int hinic3_set_rx_lro_state(void *hwdev, u8 lro_en, u32 lro_timer,
- u32 lro_max_pkt_len);
+ u32 lro_max_pkt_len, u8 soft_lro_disable,
+ u8 hw_lro_max_len, u8 hw_lro_max_num);
/* *
* @brief hinic3_set_vf_spoofchk - set vf spoofchk
diff --git a/drivers/net/ethernet/huawei/hinic3/hinic3_nic_cmdq.h b/drivers/net/ethernet/huawei/hinic3/hinic3_nic_cmdq.h
index 461768d..b6fb28f 100644
--- a/drivers/net/ethernet/huawei/hinic3/hinic3_nic_cmdq.h
+++ b/drivers/net/ethernet/huawei/hinic3/hinic3_nic_cmdq.h
@@ -8,7 +8,6 @@
#include "hinic3_hw.h"
#include "hinic3_nic.h"
-#define HINIC3_Q_CTXT_MAX 31U /* (2048 - 8) / 64 */
#define HINIC3_QP_CTXT_HEADER_SIZE 16U
enum hinic3_qp_ctxt_type {
@@ -56,30 +55,9 @@ struct hinic3_sq_ctxt {
u32 wq_block_pfn_lo;
};
-struct hinic3_rq_ctxt {
- u32 ci_pi;
- u32 ceq_attr;
- u32 wq_pfn_hi_type_owner;
- u32 wq_pfn_lo;
-
- u32 rsvd[3];
- u32 cqe_sge_len;
-
- u32 pref_cache;
- u32 pref_ci_owner;
- u32 pref_wq_pfn_hi_ci;
- u32 pref_wq_pfn_lo;
-
- u32 pi_paddr_hi;
- u32 pi_paddr_lo;
- u32 wq_block_pfn_hi;
- u32 wq_block_pfn_lo;
-};
-
struct hinic3_nic_cmdq_ops *hinic3_nic_cmdq_get_sw_ops(void);
struct hinic3_nic_cmdq_ops *hinic3_nic_cmdq_get_hw_ops(void);
void hinic3_nic_cmdq_adapt_init(struct hinic3_nic_io *nic_io);
void hinic3_sq_prepare_ctxt(struct hinic3_io_queue *sq, u16 sq_id, struct hinic3_sq_ctxt *sq_ctxt);
-void hinic3_rq_prepare_ctxt(struct hinic3_io_queue *rq, struct hinic3_rq_ctxt *rq_ctxt);
#endif
diff --git a/drivers/net/ethernet/huawei/hinic3/hinic3_nic_dbg.c b/drivers/net/ethernet/huawei/hinic3/hinic3_nic_dbg.c
index b45c875..6c0684b 100644
--- a/drivers/net/ethernet/huawei/hinic3/hinic3_nic_dbg.c
+++ b/drivers/net/ethernet/huawei/hinic3/hinic3_nic_dbg.c
@@ -96,7 +96,7 @@ int hinic3_dbg_get_sq_info(void *hwdev, u16 q_id, struct nic_sq_info *sq_info,
sq_info->q_depth = sq->wq.q_depth;
sq_info->wqebb_size = sq->wq.wqebb_size;
- sq_info->ci_addr = sq->cons_idx_addr;
+ sq_info->ci_addr = sq->tx.cons_idx_addr;
sq_info->cla_addr = sq->wq.wq_block_paddr;
sq_info->slq_handle = sq;
diff --git a/drivers/net/ethernet/huawei/hinic3/hinic3_nic_dev.h b/drivers/net/ethernet/huawei/hinic3/hinic3_nic_dev.h
index e1a9d22..1680cda 100644
--- a/drivers/net/ethernet/huawei/hinic3/hinic3_nic_dev.h
+++ b/drivers/net/ethernet/huawei/hinic3/hinic3_nic_dev.h
@@ -18,7 +18,7 @@
#include "vram_common.h"
#define HINIC3_NIC_DRV_NAME "hinic3"
-#define HINIC3_NIC_DRV_VERSION "17.7.8.101"
+#define HINIC3_NIC_DRV_VERSION "17.12.2.102"
#define HINIC3_FUNC_IS_VF(hwdev) (hinic3_func_type(hwdev) == TYPE_VF)
@@ -259,6 +259,9 @@ struct hinic3_nic_dev {
struct hinic3_lld_dev *lld_dev;
void *hwdev;
+ /* Currently, 1 indicates is_in_kexec. */
+ u32 state;
+
int poll_weight;
u32 rsvd1;
unsigned long *vlan_bitmap;
@@ -311,6 +314,7 @@ struct hinic3_nic_dev {
struct hinic3_txq *txqs;
struct hinic3_rxq *rxqs;
struct hinic3_dyna_txrxq_params q_params;
+ u8 cqe_coal_en; /* use in rx */
u8 cqe_mode; /* rx_cqe */
u16 num_qp_irq;
@@ -336,6 +340,7 @@ struct hinic3_nic_dev {
#ifdef HAVE_XDP_SUPPORT
struct bpf_prog *xdp_prog;
+ bool remove_flag;
#endif
struct delayed_work periodic_work;
@@ -447,6 +452,7 @@ void hinic3_link_status_change(struct hinic3_nic_dev *nic_dev, bool status);
#ifdef HAVE_XDP_SUPPORT
bool hinic3_is_xdp_enable(struct hinic3_nic_dev *nic_dev);
int hinic3_xdp_max_mtu(struct hinic3_nic_dev *nic_dev);
+int hinic3_safe_switch_channels(struct hinic3_nic_dev *nic_dev);
#endif
#ifdef HAVE_UDP_TUNNEL_NIC_INFO
diff --git a/drivers/net/ethernet/huawei/hinic3/hinic3_nic_io.c b/drivers/net/ethernet/huawei/hinic3/hinic3_nic_io.c
index f3bb4c5..661a52b 100644
--- a/drivers/net/ethernet/huawei/hinic3/hinic3_nic_io.c
+++ b/drivers/net/ethernet/huawei/hinic3/hinic3_nic_io.c
@@ -20,6 +20,7 @@
#include "nic_npu_cmd.h"
#include "hinic3_nic_cmdq.h"
#include "hinic3_nic_io.h"
+#include "sw_cmdq_ops.h"
#define HINIC3_DEAULT_TX_CI_PENDING_LIMIT 1
#define HINIC3_DEAULT_TX_CI_COALESCING_TIME 1
@@ -58,180 +59,6 @@ MODULE_PARM_DESC(tx_drop_thd_off, "TX parameter drop_thd_off (default=0)");
#define HINIC3_CI_PADDR(base_paddr, q_id) ((base_paddr) + \
(q_id) * HINIC3_CI_Q_ADDR_SIZE)
-#define WQ_PREFETCH_MAX 4
-#define WQ_PREFETCH_MIN 1
-#define WQ_PREFETCH_THRESHOLD 256
-
-#define CI_IDX_HIGH_SHIFH 12
-
-#define CI_HIGN_IDX(val) ((val) >> CI_IDX_HIGH_SHIFH)
-
-#define SQ_CTXT_PI_IDX_SHIFT 0
-#define SQ_CTXT_CI_IDX_SHIFT 16
-
-#define SQ_CTXT_PI_IDX_MASK 0xFFFFU
-#define SQ_CTXT_CI_IDX_MASK 0xFFFFU
-
-#define SQ_CTXT_CI_PI_SET(val, member) (((val) & \
- SQ_CTXT_##member##_MASK) \
- << SQ_CTXT_##member##_SHIFT)
-
-#define SQ_CTXT_MODE_SP_FLAG_SHIFT 0
-#define SQ_CTXT_MODE_PKT_DROP_SHIFT 1
-
-#define SQ_CTXT_MODE_SP_FLAG_MASK 0x1U
-#define SQ_CTXT_MODE_PKT_DROP_MASK 0x1U
-
-#define SQ_CTXT_MODE_SET(val, member) (((val) & \
- SQ_CTXT_MODE_##member##_MASK) \
- << SQ_CTXT_MODE_##member##_SHIFT)
-
-#define SQ_CTXT_WQ_PAGE_HI_PFN_SHIFT 0
-#define SQ_CTXT_WQ_PAGE_OWNER_SHIFT 23
-
-#define SQ_CTXT_WQ_PAGE_HI_PFN_MASK 0xFFFFFU
-#define SQ_CTXT_WQ_PAGE_OWNER_MASK 0x1U
-
-#define SQ_CTXT_WQ_PAGE_SET(val, member) (((val) & \
- SQ_CTXT_WQ_PAGE_##member##_MASK) \
- << SQ_CTXT_WQ_PAGE_##member##_SHIFT)
-
-#define SQ_CTXT_PKT_DROP_THD_ON_SHIFT 0
-#define SQ_CTXT_PKT_DROP_THD_OFF_SHIFT 16
-
-#define SQ_CTXT_PKT_DROP_THD_ON_MASK 0xFFFFU
-#define SQ_CTXT_PKT_DROP_THD_OFF_MASK 0xFFFFU
-
-#define SQ_CTXT_PKT_DROP_THD_SET(val, member) (((val) & \
- SQ_CTXT_PKT_DROP_##member##_MASK) \
- << SQ_CTXT_PKT_DROP_##member##_SHIFT)
-
-#define SQ_CTXT_GLOBAL_SQ_ID_SHIFT 0
-
-#define SQ_CTXT_GLOBAL_SQ_ID_MASK 0x1FFFU
-
-#define SQ_CTXT_GLOBAL_QUEUE_ID_SET(val, member) (((val) & \
- SQ_CTXT_##member##_MASK) \
- << SQ_CTXT_##member##_SHIFT)
-
-#define SQ_CTXT_VLAN_TAG_SHIFT 0
-#define SQ_CTXT_VLAN_TYPE_SEL_SHIFT 16
-#define SQ_CTXT_VLAN_INSERT_MODE_SHIFT 19
-#define SQ_CTXT_VLAN_CEQ_EN_SHIFT 23
-
-#define SQ_CTXT_VLAN_TAG_MASK 0xFFFFU
-#define SQ_CTXT_VLAN_TYPE_SEL_MASK 0x7U
-#define SQ_CTXT_VLAN_INSERT_MODE_MASK 0x3U
-#define SQ_CTXT_VLAN_CEQ_EN_MASK 0x1U
-
-#define SQ_CTXT_VLAN_CEQ_SET(val, member) (((val) & \
- SQ_CTXT_VLAN_##member##_MASK) \
- << SQ_CTXT_VLAN_##member##_SHIFT)
-
-#define SQ_CTXT_PREF_CACHE_THRESHOLD_SHIFT 0
-#define SQ_CTXT_PREF_CACHE_MAX_SHIFT 14
-#define SQ_CTXT_PREF_CACHE_MIN_SHIFT 25
-
-#define SQ_CTXT_PREF_CACHE_THRESHOLD_MASK 0x3FFFU
-#define SQ_CTXT_PREF_CACHE_MAX_MASK 0x7FFU
-#define SQ_CTXT_PREF_CACHE_MIN_MASK 0x7FU
-
-#define SQ_CTXT_PREF_CI_HI_SHIFT 0
-#define SQ_CTXT_PREF_OWNER_SHIFT 4
-
-#define SQ_CTXT_PREF_CI_HI_MASK 0xFU
-#define SQ_CTXT_PREF_OWNER_MASK 0x1U
-
-#define SQ_CTXT_PREF_WQ_PFN_HI_SHIFT 0
-#define SQ_CTXT_PREF_CI_LOW_SHIFT 20
-
-#define SQ_CTXT_PREF_WQ_PFN_HI_MASK 0xFFFFFU
-#define SQ_CTXT_PREF_CI_LOW_MASK 0xFFFU
-
-#define SQ_CTXT_PREF_SET(val, member) (((val) & \
- SQ_CTXT_PREF_##member##_MASK) \
- << SQ_CTXT_PREF_##member##_SHIFT)
-
-#define SQ_CTXT_WQ_BLOCK_PFN_HI_SHIFT 0
-
-#define SQ_CTXT_WQ_BLOCK_PFN_HI_MASK 0x7FFFFFU
-
-#define SQ_CTXT_WQ_BLOCK_SET(val, member) (((val) & \
- SQ_CTXT_WQ_BLOCK_##member##_MASK) \
- << SQ_CTXT_WQ_BLOCK_##member##_SHIFT)
-
-#define RQ_CTXT_PI_IDX_SHIFT 0
-#define RQ_CTXT_CI_IDX_SHIFT 16
-
-#define RQ_CTXT_PI_IDX_MASK 0xFFFFU
-#define RQ_CTXT_CI_IDX_MASK 0xFFFFU
-
-#define RQ_CTXT_CI_PI_SET(val, member) (((val) & \
- RQ_CTXT_##member##_MASK) \
- << RQ_CTXT_##member##_SHIFT)
-
-#define RQ_CTXT_CEQ_ATTR_INTR_SHIFT 21
-#define RQ_CTXT_CEQ_ATTR_EN_SHIFT 31
-
-#define RQ_CTXT_CEQ_ATTR_INTR_MASK 0x3FFU
-#define RQ_CTXT_CEQ_ATTR_EN_MASK 0x1U
-
-#define RQ_CTXT_CEQ_ATTR_SET(val, member) (((val) & \
- RQ_CTXT_CEQ_ATTR_##member##_MASK) \
- << RQ_CTXT_CEQ_ATTR_##member##_SHIFT)
-
-#define RQ_CTXT_WQ_PAGE_HI_PFN_SHIFT 0
-#define RQ_CTXT_WQ_PAGE_WQE_TYPE_SHIFT 28
-#define RQ_CTXT_WQ_PAGE_OWNER_SHIFT 31
-
-#define RQ_CTXT_WQ_PAGE_HI_PFN_MASK 0xFFFFFU
-#define RQ_CTXT_WQ_PAGE_WQE_TYPE_MASK 0x3U
-#define RQ_CTXT_WQ_PAGE_OWNER_MASK 0x1U
-
-#define RQ_CTXT_WQ_PAGE_SET(val, member) (((val) & \
- RQ_CTXT_WQ_PAGE_##member##_MASK) << \
- RQ_CTXT_WQ_PAGE_##member##_SHIFT)
-
-#define RQ_CTXT_CQE_LEN_SHIFT 28
-
-#define RQ_CTXT_CQE_LEN_MASK 0x3U
-
-#define RQ_CTXT_CQE_LEN_SET(val, member) (((val) & \
- RQ_CTXT_##member##_MASK) << \
- RQ_CTXT_##member##_SHIFT)
-
-#define RQ_CTXT_PREF_CACHE_THRESHOLD_SHIFT 0
-#define RQ_CTXT_PREF_CACHE_MAX_SHIFT 14
-#define RQ_CTXT_PREF_CACHE_MIN_SHIFT 25
-
-#define RQ_CTXT_PREF_CACHE_THRESHOLD_MASK 0x3FFFU
-#define RQ_CTXT_PREF_CACHE_MAX_MASK 0x7FFU
-#define RQ_CTXT_PREF_CACHE_MIN_MASK 0x7FU
-
-#define RQ_CTXT_PREF_CI_HI_SHIFT 0
-#define RQ_CTXT_PREF_OWNER_SHIFT 4
-
-#define RQ_CTXT_PREF_CI_HI_MASK 0xFU
-#define RQ_CTXT_PREF_OWNER_MASK 0x1U
-
-#define RQ_CTXT_PREF_WQ_PFN_HI_SHIFT 0
-#define RQ_CTXT_PREF_CI_LOW_SHIFT 20
-
-#define RQ_CTXT_PREF_WQ_PFN_HI_MASK 0xFFFFFU
-#define RQ_CTXT_PREF_CI_LOW_MASK 0xFFFU
-
-#define RQ_CTXT_PREF_SET(val, member) (((val) & \
- RQ_CTXT_PREF_##member##_MASK) << \
- RQ_CTXT_PREF_##member##_SHIFT)
-
-#define RQ_CTXT_WQ_BLOCK_PFN_HI_SHIFT 0
-
-#define RQ_CTXT_WQ_BLOCK_PFN_HI_MASK 0x7FFFFFU
-
-#define RQ_CTXT_WQ_BLOCK_SET(val, member) (((val) & \
- RQ_CTXT_WQ_BLOCK_##member##_MASK) << \
- RQ_CTXT_WQ_BLOCK_##member##_SHIFT)
-
#define SIZE_16BYTES(size) (ALIGN((size), 16) >> 4)
#define WQ_PAGE_PFN_SHIFT 12
@@ -287,7 +114,8 @@ int hinic3_get_rq_wqe_type(void *hwdev)
return rq_wqe_type;
}
-static int hinic3_create_rq(struct hinic3_nic_io *nic_io, struct hinic3_io_queue *rq,
+static int hinic3_create_rq(struct hinic3_nic_io *nic_io,
+ struct hinic3_io_queue *rq,
u16 q_id, u32 rq_depth, u16 rq_msix_idx)
{
int err;
@@ -306,19 +134,50 @@ static int hinic3_create_rq(struct hinic3_nic_io *nic_io, struct hinic3_io_queue
rq->msix_entry_idx = rq_msix_idx;
err = hinic3_wq_create(nic_io->hwdev, &rq->wq, rq_depth,
- (u16)BIT(HINIC3_RQ_WQEBB_SHIFT + rq->wqe_type));
+ (u16)BIT(HINIC3_RQ_WQEBB_SHIFT + rq_wqe_type));
if (err != 0) {
sdk_err(nic_io->dev_hdl, "Failed to create rx queue(%u) wq\n",
q_id);
return err;
}
+ rq->rx.pi_virt_addr = dma_zalloc_coherent(nic_io->dev_hdl, PAGE_SIZE,
+ &rq->rx.pi_dma_addr,
+ GFP_KERNEL);
+ if (!rq->rx.pi_virt_addr) {
+ hinic3_wq_destroy(&rq->wq);
+ nic_err(nic_io->dev_hdl, "Failed to allocate rq pi virt addr\n");
+ return -ENOMEM;
+ }
+
+ rq->rx_ci_vaddr = dma_zalloc_coherent(nic_io->dev_hdl, PAGE_SIZE,
+ &rq->rx_ci_paddr, GFP_KERNEL);
+ if (!rq->rx_ci_vaddr) {
+ hinic3_wq_destroy(&rq->wq);
+
+ dma_free_coherent(nic_io->dev_hdl, PAGE_SIZE, rq->rx.pi_virt_addr,
+ rq->rx.pi_dma_addr);
+ nic_err(nic_io->dev_hdl, "Failed to allocate rq ci vaddr\n");
+ return -ENOMEM;
+ }
+
return 0;
}
-static void hinic3_destroy_rq(struct hinic3_nic_io *nic_io, struct hinic3_io_queue *rq)
+static void hinic3_destroy_rq(struct hinic3_nic_io *nic_io,
+ struct hinic3_io_queue *rq)
{
+ dma_free_coherent(nic_io->dev_hdl, PAGE_SIZE, rq->rx_ci_vaddr,
+ rq->rx_ci_paddr);
+
+ dma_free_coherent(nic_io->dev_hdl, PAGE_SIZE, rq->rx.pi_virt_addr,
+ rq->rx.pi_dma_addr);
+
+#ifdef HIUDK_ULD
+ hinic3_wq_destroy(nic_io->hwdev, &rq->wq);
+#else
hinic3_wq_destroy(&rq->wq);
+#endif
}
static int create_qp(struct hinic3_nic_io *nic_io, struct hinic3_io_queue *sq,
@@ -542,13 +401,15 @@ static void init_qps_info(struct hinic3_nic_io *nic_io,
nic_io->sq = qp_params->sqs;
nic_io->rq = qp_params->rqs;
for (q_id = 0; q_id < nic_io->num_qps; q_id++) {
- sqs[q_id].cons_idx_addr = HINIC3_CI_VADDR(nic_io->sq_ci_vaddr_base, q_id);
+ sqs[q_id].tx.cons_idx_addr =
+ HINIC3_CI_VADDR(nic_io->sq_ci_vaddr_base, q_id);
/* clear ci value */
- *(u16 *)sqs[q_id].cons_idx_addr = 0;
+ *(u16 *)sqs[q_id].tx.cons_idx_addr = 0;
sqs[q_id].db_addr = nic_io->sqs_db_addr;
- rqs[q_id].cons_idx_addr = HINIC3_CI_VADDR(nic_io->rq_ci_vaddr_base, q_id);
- *(u32 *)rqs[q_id].cons_idx_addr = 0;
+ rqs[q_id].rx_cons_idx_addr =
+ HINIC3_CI_VADDR(nic_io->rq_ci_vaddr_base, q_id);
+ *(u32 *)rqs[q_id].rx_cons_idx_addr = 0;
/* The first num_qps doorbell is used by sq */
rqs[q_id].db_addr = nic_io->rqs_db_addr;
}
@@ -736,7 +597,7 @@ void hinic3_sq_prepare_ctxt(struct hinic3_io_queue *sq, u16 sq_id,
hinic3_cpu_to_be32(sq_ctxt, sizeof(*sq_ctxt));
}
-static void hinic3_rq_prepare_ctxt_get_wq_info(struct hinic3_io_queue *rq,
+void hinic3_rq_prepare_ctxt_get_wq_info(struct hinic3_io_queue *rq,
u32 *wq_page_pfn_hi, u32 *wq_page_pfn_lo,
u32 *wq_block_pfn_hi, u32 *wq_block_pfn_lo)
{
@@ -754,77 +615,6 @@ static void hinic3_rq_prepare_ctxt_get_wq_info(struct hinic3_io_queue *rq,
*wq_block_pfn_lo = lower_32_bits(wq_block_pfn);
}
-void hinic3_rq_prepare_ctxt(struct hinic3_io_queue *rq, struct hinic3_rq_ctxt *rq_ctxt)
-{
- u32 wq_page_pfn_hi, wq_page_pfn_lo;
- u32 wq_block_pfn_hi, wq_block_pfn_lo;
- u16 pi_start, ci_start;
- u16 wqe_type = rq->wqe_type;
-
- /* RQ depth is in unit of 8Bytes */
- ci_start = (u16)((u32)hinic3_get_rq_local_ci(rq) << wqe_type);
- pi_start = (u16)((u32)hinic3_get_rq_local_pi(rq) << wqe_type);
-
- hinic3_rq_prepare_ctxt_get_wq_info(rq, &wq_page_pfn_hi, &wq_page_pfn_lo,
- &wq_block_pfn_hi, &wq_block_pfn_lo);
-
- rq_ctxt->ci_pi =
- RQ_CTXT_CI_PI_SET(ci_start, CI_IDX) |
- RQ_CTXT_CI_PI_SET(pi_start, PI_IDX);
-
- rq_ctxt->ceq_attr = RQ_CTXT_CEQ_ATTR_SET(0, EN) |
- RQ_CTXT_CEQ_ATTR_SET(rq->msix_entry_idx, INTR);
-
- rq_ctxt->wq_pfn_hi_type_owner =
- RQ_CTXT_WQ_PAGE_SET(wq_page_pfn_hi, HI_PFN) |
- RQ_CTXT_WQ_PAGE_SET(1, OWNER);
-
- switch (wqe_type) {
- case HINIC3_EXTEND_RQ_WQE:
- /* use 32Byte WQE with SGE for CQE */
- rq_ctxt->wq_pfn_hi_type_owner |=
- RQ_CTXT_WQ_PAGE_SET(0, WQE_TYPE);
- break;
- case HINIC3_NORMAL_RQ_WQE:
- /* use 16Byte WQE with 32Bytes SGE for CQE */
- rq_ctxt->wq_pfn_hi_type_owner |=
- RQ_CTXT_WQ_PAGE_SET(2, WQE_TYPE);
- rq_ctxt->cqe_sge_len = RQ_CTXT_CQE_LEN_SET(1, CQE_LEN);
- break;
- case HINIC3_COMPACT_RQ_WQE:
- /* use 8Byte WQE */
- rq_ctxt->wq_pfn_hi_type_owner |= RQ_CTXT_WQ_PAGE_SET(3, WQE_TYPE);
- break;
- default:
- pr_err("Invalid rq wqe type: %u", wqe_type);
- }
-
- rq_ctxt->wq_pfn_lo = wq_page_pfn_lo;
-
- rq_ctxt->pref_cache =
- RQ_CTXT_PREF_SET(WQ_PREFETCH_MIN, CACHE_MIN) |
- RQ_CTXT_PREF_SET(WQ_PREFETCH_MAX, CACHE_MAX) |
- RQ_CTXT_PREF_SET(WQ_PREFETCH_THRESHOLD, CACHE_THRESHOLD);
-
- rq_ctxt->pref_ci_owner =
- RQ_CTXT_PREF_SET(CI_HIGN_IDX(ci_start), CI_HI) |
- RQ_CTXT_PREF_SET(1, OWNER);
-
- rq_ctxt->pref_wq_pfn_hi_ci =
- RQ_CTXT_PREF_SET(wq_page_pfn_hi, WQ_PFN_HI) |
- RQ_CTXT_PREF_SET(ci_start, CI_LOW);
-
- rq_ctxt->pref_wq_pfn_lo = wq_page_pfn_lo;
-
-
- rq_ctxt->wq_block_pfn_hi =
- RQ_CTXT_WQ_BLOCK_SET(wq_block_pfn_hi, PFN_HI);
-
- rq_ctxt->wq_block_pfn_lo = wq_block_pfn_lo;
-
- hinic3_cpu_to_be32(rq_ctxt, sizeof(*rq_ctxt));
-}
-
static inline u16 hinic3_get_max_ctxts(u16 num_qps, u16 cmd_buf_size)
{
u16 max_ctxts = (cmd_buf_size - HINIC3_QP_CTXT_HEADER_SIZE) / sizeof(struct hinic3_rq_ctxt);
@@ -872,6 +662,20 @@ static int init_sq_ctxts(struct hinic3_nic_io *nic_io)
return err;
}
+u8 hinic3_get_nic_io_cqe_coal_state(void *hwdev)
+{
+ struct hinic3_nic_io *nic_io = NULL;
+
+ if (!hwdev)
+ return 0;
+
+ nic_io = hinic3_get_service_adapter(hwdev, SERVICE_T_NIC);
+ if (!nic_io)
+ return 0;
+
+ return nic_io->cqe_coal_en;
+}
+
static int init_rq_ctxts(struct hinic3_nic_io *nic_io)
{
struct hinic3_cmd_buf *cmd_buf = NULL;
diff --git a/drivers/net/ethernet/huawei/hinic3/hinic3_nic_io.h b/drivers/net/ethernet/huawei/hinic3/hinic3_nic_io.h
index c5e02ad..2cf2c30 100644
--- a/drivers/net/ethernet/huawei/hinic3/hinic3_nic_io.h
+++ b/drivers/net/ethernet/huawei/hinic3/hinic3_nic_io.h
@@ -19,12 +19,233 @@
#define HINIC3_SQ_WQEBB_SIZE BIT(HINIC3_SQ_WQEBB_SHIFT)
#define HINIC3_CQE_SIZE_SHIFT 4
+#define HINIC3_Q_CTXT_MAX 31 /* (2048 - 8) / 64 */
+
+#define RQ_CTXT_PI_IDX_SHIFT 0
+#define RQ_CTXT_CI_IDX_SHIFT 16
+
+#define RQ_CTXT_PI_IDX_MASK 0xFFFFU
+#define RQ_CTXT_CI_IDX_MASK 0xFFFFU
+
+#define RQ_CTXT_CI_PI_SET(val, member) (((val) & \
+ RQ_CTXT_##member##_MASK) \
+ << RQ_CTXT_##member##_SHIFT)
+
+#define SQ_CTXT_SIZE(num_sqs) ((u16)(sizeof(struct hinic3_qp_ctxt_header) \
+ + (num_sqs) * sizeof(struct hinic3_sq_ctxt)))
+
+#define RQ_CTXT_SIZE(num_rqs) ((u16)(sizeof(struct hinic3_qp_ctxt_header) \
+ + (num_rqs) * sizeof(struct hinic3_rq_ctxt)))
+
+#define CI_IDX_HIGH_SHIFH 12
+
+#define CI_HIGN_IDX(val) ((val) >> CI_IDX_HIGH_SHIFH)
+
+#define SQ_CTXT_PI_IDX_SHIFT 0
+#define SQ_CTXT_CI_IDX_SHIFT 16
+
+#define SQ_CTXT_PI_IDX_MASK 0xFFFFU
+#define SQ_CTXT_CI_IDX_MASK 0xFFFFU
+
+#define SQ_CTXT_CI_PI_SET(val, member) (((val) & \
+ SQ_CTXT_##member##_MASK) \
+ << SQ_CTXT_##member##_SHIFT)
+
+#define SQ_CTXT_MODE_SP_FLAG_SHIFT 0
+#define SQ_CTXT_MODE_PKT_DROP_SHIFT 1
+
+#define SQ_CTXT_MODE_SP_FLAG_MASK 0x1U
+#define SQ_CTXT_MODE_PKT_DROP_MASK 0x1U
+
+#define SQ_CTXT_MODE_SET(val, member) (((val) & \
+ SQ_CTXT_MODE_##member##_MASK) \
+ << SQ_CTXT_MODE_##member##_SHIFT)
+
+#define SQ_CTXT_WQ_PAGE_HI_PFN_SHIFT 0
+#define SQ_CTXT_WQ_PAGE_OWNER_SHIFT 23
+
+#define SQ_CTXT_WQ_PAGE_HI_PFN_MASK 0xFFFFFU
+#define SQ_CTXT_WQ_PAGE_OWNER_MASK 0x1U
+
+#define SQ_CTXT_WQ_PAGE_SET(val, member) (((val) & \
+ SQ_CTXT_WQ_PAGE_##member##_MASK) \
+ << SQ_CTXT_WQ_PAGE_##member##_SHIFT)
+
+#define SQ_CTXT_PKT_DROP_THD_ON_SHIFT 0
+#define SQ_CTXT_PKT_DROP_THD_OFF_SHIFT 16
+
+#define SQ_CTXT_PKT_DROP_THD_ON_MASK 0xFFFFU
+#define SQ_CTXT_PKT_DROP_THD_OFF_MASK 0xFFFFU
+
+#define SQ_CTXT_PKT_DROP_THD_SET(val, member) (((val) & \
+ SQ_CTXT_PKT_DROP_##member##_MASK) \
+ << SQ_CTXT_PKT_DROP_##member##_SHIFT)
+
+#define SQ_CTXT_GLOBAL_SQ_ID_SHIFT 0
+
+#define SQ_CTXT_GLOBAL_SQ_ID_MASK 0x1FFFU
+
+#define SQ_CTXT_GLOBAL_QUEUE_ID_SET(val, member) (((val) & \
+ SQ_CTXT_##member##_MASK) \
+ << SQ_CTXT_##member##_SHIFT)
+
+#define SQ_CTXT_VLAN_TAG_SHIFT 0
+#define SQ_CTXT_VLAN_TYPE_SEL_SHIFT 16
+#define SQ_CTXT_VLAN_INSERT_MODE_SHIFT 19
+#define SQ_CTXT_VLAN_CEQ_EN_SHIFT 23
+
+#define SQ_CTXT_VLAN_TAG_MASK 0xFFFFU
+#define SQ_CTXT_VLAN_TYPE_SEL_MASK 0x7U
+#define SQ_CTXT_VLAN_INSERT_MODE_MASK 0x3U
+#define SQ_CTXT_VLAN_CEQ_EN_MASK 0x1U
+
+#define SQ_CTXT_VLAN_CEQ_SET(val, member) (((val) & \
+ SQ_CTXT_VLAN_##member##_MASK) \
+ << SQ_CTXT_VLAN_##member##_SHIFT)
+
+#define SQ_CTXT_PREF_CACHE_THRESHOLD_SHIFT 0
+#define SQ_CTXT_PREF_CACHE_MAX_SHIFT 14
+#define SQ_CTXT_PREF_CACHE_MIN_SHIFT 25
+
+#define SQ_CTXT_PREF_CACHE_THRESHOLD_MASK 0x3FFFU
+#define SQ_CTXT_PREF_CACHE_MAX_MASK 0x7FFU
+#define SQ_CTXT_PREF_CACHE_MIN_MASK 0x7FU
+
+#define SQ_CTXT_PREF_CI_HI_SHIFT 0
+#define SQ_CTXT_PREF_OWNER_SHIFT 4
+
+#define SQ_CTXT_PREF_CI_HI_MASK 0xFU
+#define SQ_CTXT_PREF_OWNER_MASK 0x1U
+
+#define SQ_CTXT_PREF_WQ_PFN_HI_SHIFT 0
+#define SQ_CTXT_PREF_CI_LOW_SHIFT 20
+
+#define SQ_CTXT_PREF_WQ_PFN_HI_MASK 0xFFFFFU
+#define SQ_CTXT_PREF_CI_LOW_MASK 0xFFFU
+
+#define SQ_CTXT_PREF_SET(val, member) (((val) & \
+ SQ_CTXT_PREF_##member##_MASK) \
+ << SQ_CTXT_PREF_##member##_SHIFT)
+
+#define SQ_CTXT_WQ_BLOCK_PFN_HI_SHIFT 0
+
+#define SQ_CTXT_WQ_BLOCK_PFN_HI_MASK 0x7FFFFFU
+
+#define SQ_CTXT_WQ_BLOCK_SET(val, member) (((val) & \
+ SQ_CTXT_WQ_BLOCK_##member##_MASK) \
+ << SQ_CTXT_WQ_BLOCK_##member##_SHIFT)
+
+#define RQ_CTXT_PI_IDX_SHIFT 0
+#define RQ_CTXT_CI_IDX_SHIFT 16
+
+#define RQ_CTXT_PI_IDX_MASK 0xFFFFU
+#define RQ_CTXT_CI_IDX_MASK 0xFFFFU
+
+#define RQ_CTXT_CI_PI_SET(val, member) (((val) & \
+ RQ_CTXT_##member##_MASK) \
+ << RQ_CTXT_##member##_SHIFT)
+
+#define RQ_CTXT_CEQ_ATTR_INTR_SHIFT 21
+#define RQ_CTXT_CEQ_ATTR_EN_SHIFT 31
+
+#define RQ_CTXT_CEQ_ATTR_INTR_MASK 0x3FFU
+#define RQ_CTXT_CEQ_ATTR_EN_MASK 0x1U
+
+#define RQ_CTXT_CEQ_ATTR_ARM_SHIFT 30
+#define RQ_CTXT_CEQ_ATTR_ARM_MASK 0x1U
+
+#define RQ_CTXT_CEQ_ATTR_SET(val, member) (((val) & \
+ RQ_CTXT_CEQ_ATTR_##member##_MASK) \
+ << RQ_CTXT_CEQ_ATTR_##member##_SHIFT)
+
+#define RQ_CTXT_WQ_PAGE_HI_PFN_SHIFT 0
+#define RQ_CTXT_WQ_PAGE_WQE_TYPE_SHIFT 28
+#define RQ_CTXT_WQ_PAGE_OWNER_SHIFT 31
+
+#define RQ_CTXT_WQ_PAGE_HI_PFN_MASK 0xFFFFFU
+#define RQ_CTXT_WQ_PAGE_WQE_TYPE_MASK 0x3U
+#define RQ_CTXT_WQ_PAGE_OWNER_MASK 0x1U
+
+#define RQ_CTXT_WQ_PAGE_SET(val, member) (((val) & \
+ RQ_CTXT_WQ_PAGE_##member##_MASK) << \
+ RQ_CTXT_WQ_PAGE_##member##_SHIFT)
+
+#define RQ_CTXT_CQE_LEN_SHIFT 28
+
+#define RQ_CTXT_CQE_LEN_MASK 0x3U
+
+#define RQ_CTXT_MAX_COUNT_SHIFT 18
+#define RQ_CTXT_MAX_COUNT_MASK 0x3FFU
+
+#define RQ_CTXT_CQE_LEN_SET(val, member) (((val) & \
+ RQ_CTXT_##member##_MASK) << \
+ RQ_CTXT_##member##_SHIFT)
+
+#define RQ_CTXT_PREF_CACHE_THRESHOLD_SHIFT 0
+#define RQ_CTXT_PREF_CACHE_MAX_SHIFT 14
+#define RQ_CTXT_PREF_CACHE_MIN_SHIFT 25
+
+#define RQ_CTXT_PREF_CACHE_THRESHOLD_MASK 0x3FFFU
+#define RQ_CTXT_PREF_CACHE_MAX_MASK 0x7FFU
+#define RQ_CTXT_PREF_CACHE_MIN_MASK 0x7FU
+
+#define RQ_CTXT_PREF_CI_HI_SHIFT 0
+#define RQ_CTXT_PREF_OWNER_SHIFT 4
+
+#define RQ_CTXT_PREF_CI_HI_MASK 0xFU
+#define RQ_CTXT_PREF_OWNER_MASK 0x1U
+
+#define RQ_CTXT_PREF_WQ_PFN_HI_SHIFT 0
+#define RQ_CTXT_PREF_CI_LOW_SHIFT 20
+
+#define RQ_CTXT_PREF_WQ_PFN_HI_MASK 0xFFFFFU
+#define RQ_CTXT_PREF_CI_LOW_MASK 0xFFFU
+
+#define RQ_CTXT_PREF_SET(val, member) (((val) & \
+ RQ_CTXT_PREF_##member##_MASK) << \
+ RQ_CTXT_PREF_##member##_SHIFT)
+
+#define RQ_CTXT_WQ_BLOCK_PFN_HI_SHIFT 0
+
+#define RQ_CTXT_WQ_BLOCK_PFN_HI_MASK 0x7FFFFFU
+
+#define RQ_CTXT_WQ_BLOCK_SET(val, member) (((val) & \
+ RQ_CTXT_WQ_BLOCK_##member##_MASK) << \
+ RQ_CTXT_WQ_BLOCK_##member##_SHIFT)
+
+#define WQ_PREFETCH_MAX 4
+#define WQ_PREFETCH_MIN 1
+#define WQ_PREFETCH_THRESHOLD 256
+
enum hinic3_rq_wqe_type {
HINIC3_COMPACT_RQ_WQE,
HINIC3_NORMAL_RQ_WQE,
HINIC3_EXTEND_RQ_WQE,
};
+struct hinic3_rq_ctxt {
+ u32 ci_pi;
+ u32 ceq_attr;
+ u32 wq_pfn_hi_type_owner;
+ u32 wq_pfn_lo;
+
+ u32 ci_paddr_hi;
+ u32 ci_paddr_lo;
+
+ u32 rsvd;
+ u32 cqe_sge_len;
+
+ u32 pref_cache;
+ u32 pref_ci_owner;
+ u32 pref_wq_pfn_hi_ci;
+ u32 pref_wq_pfn_lo;
+
+ u32 pi_paddr_hi;
+ u32 pi_paddr_lo;
+ u32 wq_block_pfn_hi;
+ u32 wq_block_pfn_lo;
+};
+
struct hinic3_io_queue {
struct hinic3_wq wq;
union {
@@ -38,7 +259,22 @@ struct hinic3_io_queue {
u16 msix_entry_idx;
u8 __iomem *db_addr;
- void *cons_idx_addr;
+
+ union {
+ struct {
+ void *cons_idx_addr;
+ } tx;
+
+ struct {
+ u16 *pi_virt_addr;
+ dma_addr_t pi_dma_addr;
+ } rx;
+ };
+
+ void *rx_cons_idx_addr;
+ void *rx_ci_vaddr;
+ dma_addr_t rx_ci_paddr;
+ dma_addr_t cqe_start_paddr;
} ____cacheline_aligned;
struct hinic3_nic_db {
@@ -119,7 +355,7 @@ static inline u16 hinic3_get_sq_local_pi(const struct hinic3_io_queue *sq)
static inline u16 hinic3_get_sq_hw_ci(const struct hinic3_io_queue *sq)
{
return WQ_MASK_IDX(&sq->wq,
- hinic3_hw_cpu16(*(u16 *)sq->cons_idx_addr));
+ hinic3_hw_cpu16(*(u16 *)sq->tx.cons_idx_addr));
}
/* *
@@ -132,7 +368,7 @@ static inline u16 hinic3_get_rq_hw_ci(const struct hinic3_io_queue *rq)
u16 hw_ci;
u32 rq_ci_wb;
- rq_ci_wb = hinic3_hw_cpu32(*(u32 *)rq->cons_idx_addr);
+ rq_ci_wb = hinic3_hw_cpu32(*(u32 *)rq->rx_cons_idx_addr);
hw_ci = ((struct hinic3_rq_ci_wb *) &rq_ci_wb)->dw0.bs.hw_ci;
return WQ_MASK_IDX(&rq->wq, hw_ci);
@@ -336,5 +572,11 @@ int hinic3_init_qps(void *hwdev, struct hinic3_dyna_qp_params *qp_params);
void hinic3_deinit_qps(void *hwdev, struct hinic3_dyna_qp_params *qp_params);
int hinic3_init_nicio_res(void *hwdev);
void hinic3_deinit_nicio_res(void *hwdev);
+u8 hinic3_get_nic_io_cqe_coal_state(void *hwdev);
int hinic3_get_rq_wqe_type(void *hwdev);
+void hinic3_rq_prepare_ctxt_get_wq_info(struct hinic3_io_queue *rq,
+ u32 *wq_page_pfn_hi,
+ u32 *wq_page_pfn_lo,
+ u32 *wq_block_pfn_hi,
+ u32 *wq_block_pfn_lo);
#endif
diff --git a/drivers/net/ethernet/huawei/hinic3/hinic3_nic_prof.c b/drivers/net/ethernet/huawei/hinic3/hinic3_nic_prof.c
index 9ea93a0..3572873 100644
--- a/drivers/net/ethernet/huawei/hinic3/hinic3_nic_prof.c
+++ b/drivers/net/ethernet/huawei/hinic3/hinic3_nic_prof.c
@@ -7,7 +7,6 @@
#include <linux/netdevice.h>
#include <linux/device.h>
#include <linux/types.h>
-#include <linux/errno.h>
#include "ossl_knl.h"
#include "hinic3_nic_dev.h"
diff --git a/drivers/net/ethernet/huawei/hinic3/hinic3_nic_qp.h b/drivers/net/ethernet/huawei/hinic3/hinic3_nic_qp.h
index 67bb86d..b6f4e35 100644
--- a/drivers/net/ethernet/huawei/hinic3/hinic3_nic_qp.h
+++ b/drivers/net/ethernet/huawei/hinic3/hinic3_nic_qp.h
@@ -137,6 +137,14 @@
#define HINIC3_GET_ESP_NEXT_HEAD(decry_info) \
RQ_CQE_DECRY_INFO_GET(decry_info, ESP_NEXT_HEAD)
+#define RX_CQE_COALESCE_SHIFT 31
+#define RX_CQE_COALESCE_MASK 0x1U
+
+#define RX_HW_CI_SHIFT 0
+#define RX_HW_CI_MASK 0xFFFFU
+#define HINIC3_GET_RX_HW_CI(value) \
+ (((value) >> RX_HW_CI_SHIFT) & RX_HW_CI_MASK)
+
/* compact cqe field */
/* cqe dw0 */
#define RQ_COMPACT_CQE_STATUS_RXDONE_SHIFT 31
diff --git a/drivers/net/ethernet/huawei/hinic3/hinic3_rss.c b/drivers/net/ethernet/huawei/hinic3/hinic3_rss.c
index dcd79ee..da75847 100644
--- a/drivers/net/ethernet/huawei/hinic3/hinic3_rss.c
+++ b/drivers/net/ethernet/huawei/hinic3/hinic3_rss.c
@@ -31,10 +31,9 @@ MODULE_PARM_DESC(num_qps, "Number of Queue Pairs (default=0)");
#define MOD_PARA_VALIDATE_NUM_QPS(nic_dev, num_qps, out_qps) do { \
if ((num_qps) > (nic_dev)->max_qps) \
nic_warn(&(nic_dev)->pdev->dev, \
- "Module Parameter %s value %u is out of range, " \
- "Maximum value for the device: %u, using %u\n", \
- #num_qps, num_qps, (nic_dev)->max_qps, \
- (nic_dev)->max_qps); \
+ "Module Parameter %s value %u is out of range, " \
+ "Maximum value for the device: %u\n", \
+ #num_qps, num_qps, (nic_dev)->max_qps); \
if ((num_qps) > (nic_dev)->max_qps) \
(out_qps) = (nic_dev)->max_qps; \
else if ((num_qps) > 0) \
@@ -289,7 +288,11 @@ static void decide_num_qps(struct hinic3_nic_dev *nic_dev)
if (!num_cpus)
num_cpus = max_num_cpus;
- nic_dev->q_params.num_qps = (u16)min_t(u16, tmp_num_qps, num_cpus);
+ if (num_qps == 0)
+ nic_dev->q_params.num_qps = (u16)min_t(u16,
+ tmp_num_qps, num_cpus);
+ else
+ nic_dev->q_params.num_qps = tmp_num_qps;
nic_dev->nic_vram->vram_num_qps = nic_dev->q_params.num_qps;
nicif_info(nic_dev, drv, nic_dev->netdev,
"init num qps 1:%u 2:%u\n",
diff --git a/drivers/net/ethernet/huawei/hinic3/hinic3_rx.c b/drivers/net/ethernet/huawei/hinic3/hinic3_rx.c
index 936258c..ad070c0 100644
--- a/drivers/net/ethernet/huawei/hinic3/hinic3_rx.c
+++ b/drivers/net/ethernet/huawei/hinic3/hinic3_rx.c
@@ -22,6 +22,7 @@
#include <linux/module.h>
#include <linux/compiler.h>
#include <linux/filter.h>
+#include <linux/bpf_trace.h>
#include "ossl_knl.h"
#include "hinic3_crm.h"
@@ -31,7 +32,11 @@
#include "hinic3_srv_nic.h"
#include "hinic3_nic_dev.h"
#include "hinic3_rss.h"
+#include "hinic3_nic.h"
+#include "nic_mpu_cmd.h"
#include "hinic3_rx.h"
+#include "hinic3_tx.h"
+#include "hinic3_hwdev.h"
/* performance: ci addr RTE_CACHE_SIZE(64B) alignment */
#define HINIC3_RX_HDR_SIZE 256
@@ -159,7 +164,14 @@ static u32 hinic3_rx_fill_buffers(struct hinic3_rxq *rxq)
break;
}
+#ifdef HAVE_XDP_SUPPORT
+ dma_addr = (rxq->xdp_headroom_flag == 0) ?
+ rx_info->buf_dma_addr + rx_info->page_offset :
+ rx_info->buf_dma_addr +
+ rx_info->page_offset + XDP_PACKET_HEADROOM;
+#else
dma_addr = rx_info->buf_dma_addr + rx_info->page_offset;
+#endif
rq_wqe = rx_info->rq_wqe;
@@ -469,8 +481,13 @@ void hinic3_rxq_get_stats(struct hinic3_rxq *rxq,
stats->csum_errors = rxq_stats->csum_errors;
stats->other_errors = rxq_stats->other_errors;
stats->dropped = rxq_stats->dropped;
+ stats->rx_buf_empty = rxq_stats->rx_buf_empty;
+#ifdef HAVE_XDP_SUPPORT
stats->xdp_dropped = rxq_stats->xdp_dropped;
+ stats->xdp_redirected = rxq_stats->xdp_redirected;
stats->rx_buf_empty = rxq_stats->rx_buf_empty;
+ stats->xdp_large_pkt = rxq_stats->xdp_large_pkt;
+#endif
} while (u64_stats_fetch_retry(&rxq_stats->syncp, start));
u64_stats_update_end(&stats->syncp);
}
@@ -484,14 +501,17 @@ void hinic3_rxq_clean_stats(struct hinic3_rxq_stats *rxq_stats)
rxq_stats->csum_errors = 0;
rxq_stats->other_errors = 0;
rxq_stats->dropped = 0;
- rxq_stats->xdp_dropped = 0;
rxq_stats->rx_buf_empty = 0;
rxq_stats->alloc_skb_err = 0;
rxq_stats->alloc_rx_buf_err = 0;
- rxq_stats->xdp_large_pkt = 0;
rxq_stats->restore_drop_sge = 0;
rxq_stats->rsvd2 = 0;
+#ifdef HAVE_XDP_SUPPORT
+ rxq_stats->xdp_dropped = 0;
+ rxq_stats->xdp_redirected = 0;
+ rxq_stats->xdp_large_pkt = 0;
+#endif
u64_stats_update_end(&rxq_stats->syncp);
}
@@ -760,6 +780,8 @@ enum hinic3_xdp_status {
// pkt action
HINIC3_XDP_PKT_PASS,
HINIC3_XDP_PKT_DROP,
+ HINIC3_XDP_PKT_REDIRECT,
+ HINIC3_XDP_PKT_TX,
};
static void update_drop_rx_info(struct hinic3_rxq *rxq, u16 weqbb_num)
@@ -786,71 +808,6 @@ discard_direct:
}
}
-int hinic3_run_xdp(struct hinic3_rxq *rxq, u32 pkt_len, struct xdp_buff *xdp)
-{
- struct bpf_prog *xdp_prog = NULL;
- struct hinic3_rx_info *rx_info = NULL;
- struct net_device *netdev = rxq->netdev;
- int result = HINIC3_XDP_PKT_PASS;
- u16 weqbb_num = 1; /* xdp can only use one rx_buff */
- u8 *va = NULL;
- u32 act;
-
- rcu_read_lock();
- xdp_prog = READ_ONCE(rxq->xdp_prog);
- if (!xdp_prog) {
- result = HINIC3_XDP_PROG_EMPTY;
- goto unlock_rcu;
- }
-
- if (unlikely(pkt_len > rxq->buf_len)) {
- RXQ_STATS_INC(rxq, xdp_large_pkt);
- weqbb_num = HINIC3_GET_SGE_NUM(pkt_len, rxq);
- result = HINIC3_XDP_PKT_DROP;
- goto xdp_out;
- }
-
- rx_info = &rxq->rx_info[rxq->cons_idx & rxq->q_mask];
- va = (u8 *)page_address(rx_info->page) + rx_info->page_offset;
- prefetch(va);
- dma_sync_single_range_for_cpu(rxq->dev, rx_info->buf_dma_addr,
- rx_info->page_offset,
- rxq->buf_len, DMA_FROM_DEVICE);
- xdp->data = va;
- xdp->data_hard_start = xdp->data;
- xdp->data_end = xdp->data + pkt_len;
-#ifdef HAVE_XDP_FRAME_SZ
- xdp->frame_sz = rxq->buf_len;
-#endif
-#ifdef HAVE_XDP_DATA_META
- xdp_set_data_meta_invalid(xdp);
-#endif
- prefetchw(xdp->data_hard_start);
- act = bpf_prog_run_xdp(xdp_prog, xdp);
- switch (act) {
- case XDP_PASS:
- result = HINIC3_XDP_PKT_PASS;
- break;
- case XDP_DROP:
- result = HINIC3_XDP_PKT_DROP;
- break;
- default:
- result = HINIC3_XDP_PKT_DROP;
- bpf_warn_invalid_xdp_action(netdev, xdp_prog, act);
- }
-
-xdp_out:
- if (result == HINIC3_XDP_PKT_DROP) {
- RXQ_STATS_INC(rxq, xdp_dropped);
- update_drop_rx_info(rxq, weqbb_num);
- }
-
-unlock_rcu:
- rcu_read_unlock();
-
- return result;
-}
-
static bool hinic3_add_rx_frag_with_xdp(struct hinic3_rxq *rxq, u32 pkt_len,
struct hinic3_rx_info *rx_info,
struct sk_buff *skb, struct xdp_buff *xdp)
@@ -897,6 +854,106 @@ umap_page:
return false;
}
+static int hinic3_run_xdp_prog(struct hinic3_rxq *rxq,
+ struct bpf_prog *xdp_prog, struct xdp_buff *xdp,
+ u32 *pkt_len)
+{
+ u32 act;
+ int err;
+ int result = HINIC3_XDP_PKT_DROP;
+ struct net_device *netdev = rxq->netdev;
+
+ act = bpf_prog_run_xdp(xdp_prog, xdp);
+ switch (act) {
+ case XDP_PASS:
+ *pkt_len = xdp->data_end - xdp->data;
+ result = HINIC3_XDP_PKT_PASS;
+ break;
+ case XDP_TX:
+ if (unlikely(!hinic3_xmit_xdp_buff(netdev, rxq->q_id, xdp)))
+ goto out_failure;
+ result = HINIC3_XDP_PKT_TX;
+ break;
+ case XDP_REDIRECT:
+ err = xdp_do_redirect(netdev, xdp, xdp_prog);
+ if (unlikely(err != 0))
+ goto out_failure;
+
+ result = HINIC3_XDP_PKT_REDIRECT;
+ break;
+ case XDP_ABORTED:
+ goto out_failure;
+ case XDP_DROP:
+ break;
+ default:
+ bpf_warn_invalid_xdp_action(rxq->netdev, xdp_prog, act);
+out_failure:
+ trace_xdp_exception(netdev, xdp_prog, act);
+ }
+
+ return result;
+}
+
+static int hinic3_run_xdp(struct hinic3_rxq *rxq, u32 *pkt_len,
+ struct xdp_buff *xdp)
+{
+ struct bpf_prog *xdp_prog = NULL;
+ struct hinic3_rx_info *rx_info = NULL;
+ int result = HINIC3_XDP_PKT_PASS;
+ u16 weqbb_num = 1; /* xdp can only use one rx_buff */
+ u8 *va = NULL;
+
+ rcu_read_lock();
+ xdp_prog = READ_ONCE(rxq->xdp_prog);
+ if (!xdp_prog) {
+ result = HINIC3_XDP_PROG_EMPTY;
+ goto unlock_rcu;
+ }
+ if (unlikely(*pkt_len > rxq->buf_len)) {
+ RXQ_STATS_INC(rxq, xdp_large_pkt);
+ weqbb_num = HINIC3_GET_SGE_NUM(*pkt_len, rxq);
+ result = HINIC3_XDP_PKT_DROP;
+ goto xdp_out;
+ }
+
+ rx_info = &rxq->rx_info[rxq->cons_idx & rxq->q_mask];
+ va = (u8 *)page_address(rx_info->page) + rx_info->page_offset;
+ prefetch(va);
+ dma_sync_single_range_for_cpu(rxq->dev, rx_info->buf_dma_addr,
+ rx_info->page_offset,
+ rxq->buf_len, DMA_FROM_DEVICE);
+ xdp->data_hard_start = va;
+ xdp->data = va + XDP_PACKET_HEADROOM;
+ xdp->data_end = xdp->data + *pkt_len;
+ xdp->rxq = &rxq->xdp_rxq;
+#ifdef HAVE_XDP_FRAME_SZ
+ xdp->frame_sz = rxq->buf_len;
+#endif
+#ifdef HAVE_XDP_DATA_META
+ xdp_set_data_meta_invalid(xdp);
+#endif
+ prefetchw(xdp->data_hard_start);
+
+ result = hinic3_run_xdp_prog(rxq, xdp_prog, xdp, pkt_len);
+xdp_out:
+ switch (result) {
+ case HINIC3_XDP_PKT_DROP:
+ RXQ_STATS_INC(rxq, xdp_dropped);
+ break;
+ case HINIC3_XDP_PKT_REDIRECT:
+ RXQ_STATS_INC(rxq, xdp_redirected);
+ break;
+ default:
+ break;
+ }
+ if (result != HINIC3_XDP_PKT_PASS)
+ update_drop_rx_info(rxq, weqbb_num);
+unlock_rcu:
+ rcu_read_unlock();
+
+ return result;
+}
+
static struct sk_buff *hinic3_fetch_rx_buffer_xdp(struct hinic3_rxq *rxq,
u32 pkt_len,
struct xdp_buff *xdp)
@@ -929,19 +986,24 @@ static struct sk_buff *hinic3_fetch_rx_buffer_xdp(struct hinic3_rxq *rxq,
#endif
static int recv_one_pkt(struct hinic3_rxq *rxq,
- struct hinic3_cqe_info *cqe_info)
+ struct hinic3_cqe_info *cqe_info, u32 rx_pkt_len)
{
struct sk_buff *skb = NULL;
struct net_device *netdev = rxq->netdev;
struct hinic3_nic_dev *nic_dev = netdev_priv(rxq->netdev);
+ u32 pkt_len = rx_pkt_len;
#ifdef HAVE_XDP_SUPPORT
u32 xdp_status;
struct xdp_buff xdp = { 0 };
- xdp_status = (u32)(hinic3_run_xdp(rxq, cqe_info->pkt_len, &xdp));
- if (xdp_status == HINIC3_XDP_PKT_DROP)
+ xdp_status = (u32)(hinic3_run_xdp(rxq, &pkt_len, &xdp));
+ // XDP_REDIRECT & XDP_TX: ring buffer flip
+ if (xdp_status == HINIC3_XDP_PKT_REDIRECT ||
+ xdp_status == HINIC3_XDP_PKT_TX
+ || xdp_status == HINIC3_XDP_PKT_DROP) {
return 0;
+ }
// build skb
if (xdp_status != HINIC3_XDP_PROG_EMPTY) {
@@ -995,11 +1057,10 @@ static int recv_one_pkt(struct hinic3_rxq *rxq,
#else
napi_gro_flush(&rxq->irq_cfg->napi);
#endif
- netif_receive_skb(skb);
- } else {
- napi_gro_receive(&rxq->irq_cfg->napi, skb);
}
+ napi_gro_receive(&rxq->irq_cfg->napi, skb);
+
return 0;
}
@@ -1115,25 +1176,47 @@ static bool rx_separate_cqe_done(void *rx_queue, void **rx_cqe)
return true;
}
+#ifdef HAVE_XDP_SUPPORT
+static inline void hinic3_xdp_flush_if_needed(const struct hinic3_nic_dev
+ *nic_dev)
+{
+ if (unlikely(rcu_access_pointer(nic_dev->xdp_prog))) {
+ xdp_do_flush_map();
+ }
+}
+#endif
+
int hinic3_rx_poll(struct hinic3_rxq *rxq, int budget)
{
struct hinic3_nic_dev *nic_dev = netdev_priv(rxq->netdev);
- u32 dropped = 0;
+ u32 sw_ci, dropped = 0;
struct hinic3_rq_cqe *rx_cqe = NULL;
struct hinic3_cqe_info cqe_info = { 0 };
u64 rx_bytes = 0;
int pkts = 0, nr_pkts = 0;
u16 num_wqe = 0;
+ u32 hw_ci_value, pkt_len, vlan_len;
+ u16 current_hw_ci = 0;
while (likely(pkts < budget)) {
+ sw_ci = rxq->cons_idx & rxq->q_mask;
if (!nic_dev->tx_rx_ops.rx_cqe_done(rxq, (void **)&rx_cqe))
break;
-
+ if (nic_dev->cqe_coal_en == HINIC3_CQE_COAL_EN) {
+ hw_ci_value = hinic3_hw_cpu32(
+ rxq->rx_ci_index->current_hw_ci);
+ current_hw_ci = (HINIC3_GET_RX_HW_CI(hw_ci_value) >>
+ rxq->rq->wqe_type) & rxq->q_mask;
+ if (unlikely(sw_ci == current_hw_ci))
+ break;
+ }
/* make sure we read rx_done before packet length */
rmb();
nic_dev->tx_rx_ops.rx_get_cqe_info(rx_cqe, &cqe_info, nic_dev->cqe_mode);
- if (recv_one_pkt(rxq, &cqe_info))
+ vlan_len = hinic3_hw_cpu32(rx_cqe->vlan_len);
+ pkt_len = HINIC3_GET_RX_PKT_LEN(vlan_len);
+ if (recv_one_pkt(rxq, &cqe_info, pkt_len))
break;
rx_bytes += cqe_info.pkt_len;
@@ -1159,6 +1242,9 @@ int hinic3_rx_poll(struct hinic3_rxq *rxq, int budget)
rxq->rxq_stats.bytes += rx_bytes;
rxq->rxq_stats.dropped += (u64)dropped;
u64_stats_update_end(&rxq->rxq_stats.syncp);
+#ifdef HAVE_XDP_SUPPORT
+ hinic3_xdp_flush_if_needed(nic_dev);
+#endif
return pkts;
}
@@ -1267,10 +1353,12 @@ void hinic3_free_rxqs_res(struct hinic3_nic_dev *nic_dev, u16 num_rq,
u32 rq_depth, struct hinic3_dyna_rxq_res *rxqs_res)
{
struct hinic3_dyna_rxq_res *rqres = NULL;
+ struct hinic3_rxq *rxq = NULL;
u64 cqe_mem_size = sizeof(struct hinic3_rq_cqe) * rq_depth;
int idx;
for (idx = 0; idx < num_rq; idx++) {
+ rxq = &nic_dev->rxqs[idx];
rqres = &rxqs_res[idx];
hinic3_rx_free_buffers(nic_dev, rq_depth, rqres->rx_info);
@@ -1284,20 +1372,64 @@ void hinic3_free_rxqs_res(struct hinic3_nic_dev *nic_dev, u16 num_rq,
rqres->cqe_start_paddr);
}
kfree(rqres->rx_info);
+#ifdef HAVE_XDP_SUPPORT
+ xdp_rxq_info_unreg(&rxq->xdp_rxq);
+#endif
}
}
+static int hinic3_fill_rxqs_wqe_buffer(struct hinic3_dyna_rxq_res *rqres,
+ u32 rq_depth,
+ struct hinic3_rxq *rxq, struct hinic3_nic_dev *nic_dev)
+{
+ struct hinic3_rq_cqe *cqe_va = NULL;
+ dma_addr_t cqe_pa;
+ u32 idx;
+ u32 pkts;
+ /* fill cqe */
+ cqe_va = (struct hinic3_rq_cqe *)rqres->cqe_start_vaddr;
+ cqe_pa = rqres->cqe_start_paddr;
+ for (idx = 0; idx < rq_depth; idx++) {
+ rxq->rx_info[idx].cqe = cqe_va;
+ rxq->rx_info[idx].cqe_dma = cqe_pa;
+ cqe_va++;
+ cqe_pa += sizeof(*rxq->rx_info->cqe);
+ }
+
+ rxq->rq = hinic3_get_nic_queue(nic_dev->hwdev, rxq->q_id, HINIC3_RQ);
+ if (!rxq->rq) {
+ nicif_err(nic_dev, drv, nic_dev->netdev, "Failed to get rq\n");
+ return -EINVAL;
+ }
+
+ rxq->rx_ci_index = (struct hinic3_rx_ci_index *)rxq->rq->rx_ci_vaddr;
+ pkts = hinic3_rx_fill_wqe(rxq);
+ if (pkts != rxq->q_depth) {
+ nicif_err(nic_dev, drv, nic_dev->netdev,
+ "Failed to fill rx wqe\n");
+ return -EFAULT;
+ }
+
+ pkts = hinic3_rx_fill_buffers(rxq);
+ if (!pkts) {
+ nicif_err(nic_dev, drv, nic_dev->netdev,
+ "Failed to fill Rx buffer\n");
+ return -ENOMEM;
+ }
+
+ return 0;
+}
+
int hinic3_configure_rxqs(struct hinic3_nic_dev *nic_dev, u16 num_rq,
u32 rq_depth, struct hinic3_dyna_rxq_res *rxqs_res)
{
struct hinic3_dyna_rxq_res *rqres = NULL;
struct irq_info *msix_entry = NULL;
struct hinic3_rxq *rxq = NULL;
- struct hinic3_rq_cqe *cqe_va = NULL;
- dma_addr_t cqe_pa;
u16 q_id;
- u32 idx;
- u32 pkts;
+ int err;
+
+ nic_dev->cqe_coal_en = hinic3_get_nic_io_cqe_coal_state(nic_dev->hwdev);
nic_dev->rxq_get_err_times = 0;
for (q_id = 0; q_id < num_rq; q_id++) {
@@ -1323,38 +1455,18 @@ int hinic3_configure_rxqs(struct hinic3_nic_dev *nic_dev, u16 num_rq,
rxq->restore_buf_num = 0;
rxq->rx_info = rqres->rx_info;
+#ifdef HAVE_XDP_SUPPORT
+ rxq->xdp_headroom_flag = (nic_dev->xdp_prog != NULL) ? 1 : 0;
+ err = xdp_rxq_info_reg(&rxq->xdp_rxq, rxq->netdev, q_id, q_id);
+ if (err != 0)
+ return err;
+#endif
- /* fill cqe */
- if (nic_dev->cqe_mode == HINIC3_RQ_CQE_SEPARATE) {
- cqe_va = (struct hinic3_rq_cqe *)rqres->cqe_start_vaddr;
- cqe_pa = rqres->cqe_start_paddr;
- for (idx = 0; idx < rq_depth; idx++) {
- rxq->rx_info[idx].cqe = cqe_va;
- rxq->rx_info[idx].cqe_dma = cqe_pa;
- cqe_va++;
- cqe_pa += sizeof(*rxq->rx_info->cqe);
- }
- }
-
- rxq->rq = hinic3_get_nic_queue(nic_dev->hwdev, rxq->q_id,
- HINIC3_RQ);
- if (!rxq->rq) {
- nicif_err(nic_dev, drv, nic_dev->netdev, "Failed to get rq\n");
- return -EINVAL;
- }
-
- pkts = hinic3_rx_fill_wqe(rxq);
- if (pkts != rxq->q_depth) {
- nicif_err(nic_dev, drv, nic_dev->netdev, "Failed to fill rx wqe\n");
- return -EFAULT;
- }
+ err = hinic3_fill_rxqs_wqe_buffer(rqres, rq_depth,
+ rxq, nic_dev);
+ if (err != 0)
+ return err;
- pkts = hinic3_rx_fill_buffers(rxq);
- if (!pkts) {
- nicif_err(nic_dev, drv, nic_dev->netdev,
- "Failed to fill Rx buffer\n");
- return -ENOMEM;
- }
}
return 0;
@@ -1625,3 +1737,31 @@ void hinic3_rxq_check_work_handler(struct work_struct *work)
free_rxq_info:
kfree(rxq_info);
}
+
+void hinic3_cmd_vf_lag(void *hwdev, u16 func_id, u16 channel)
+{
+ struct hinic3_vf_lag_cmd vf_lag_info = { 0 };
+ u16 out_size = sizeof(vf_lag_info);
+ struct hinic3_nic_io *nic_io = NULL;
+ int err;
+
+ if (!hwdev || (hinic3_func_type(hwdev) != TYPE_VF))
+ return;
+
+ nic_io = (struct hinic3_nic_io *)hinic3_get_service_adapter(hwdev,
+ SERVICE_T_NIC);
+ if (!nic_io)
+ return;
+
+ vf_lag_info.func_id = func_id;
+ vf_lag_info.opcode = FLOW_BIFUR_CMD_SET;
+ vf_lag_info.en_flag = 0;
+
+ err = l2nic_msg_to_mgmt_sync_ch(hwdev, HINIC3_NIC_CMD_CFG_VF_LAG,
+ &vf_lag_info, sizeof(vf_lag_info),
+ &vf_lag_info, &out_size, channel);
+ if (err || !out_size || vf_lag_info.msg_head.status)
+ nic_err(nic_io->dev_hdl, "Failed to disable vf_lag function: 0x%x, err: %d, status: 0x%x, out size: 0x%x.\n",
+ func_id, err, vf_lag_info.msg_head.status, out_size);
+
+}
diff --git a/drivers/net/ethernet/huawei/hinic3/hinic3_rx.h b/drivers/net/ethernet/huawei/hinic3/hinic3_rx.h
index 480f787..2403b55 100644
--- a/drivers/net/ethernet/huawei/hinic3/hinic3_rx.h
+++ b/drivers/net/ethernet/huawei/hinic3/hinic3_rx.h
@@ -7,7 +7,9 @@
#ifdef HAVE_PAGE_POOL_SUPPORT
#include <net/page_pool/helpers.h>
#endif
-
+#ifdef HAVE_XDP_SUPPORT
+#include <net/xdp.h>
+#endif
#include <linux/types.h>
#include <linux/device.h>
#include <linux/mm_types.h>
@@ -39,6 +41,8 @@
#define HINIC3_RQ_CQE_SEPARATE 0
#define HINIC3_RQ_CQE_INTEGRATE 1
+#define HINIC3_CQE_COAL_EN 1
+
struct hinic3_rxq_stats {
u64 packets;
u64 bytes;
@@ -46,14 +50,17 @@ struct hinic3_rxq_stats {
u64 csum_errors;
u64 other_errors;
u64 dropped;
- u64 xdp_dropped;
u64 rx_buf_empty;
u64 alloc_skb_err;
u64 alloc_rx_buf_err;
- u64 xdp_large_pkt;
u64 restore_drop_sge;
u64 rsvd2;
+#ifdef HAVE_XDP_SUPPORT
+ u64 xdp_dropped;
+ u64 xdp_redirected;
+ u64 xdp_large_pkt;
+#endif
#ifdef HAVE_NDO_GET_STATS64
struct u64_stats_sync syncp;
#else
@@ -61,6 +68,12 @@ struct hinic3_rxq_stats {
#endif
};
+/* record hw ci combaDMA by ucode in CQE Coalescing scenario */
+struct hinic3_rx_ci_index {
+ u32 current_hw_ci;
+ u32 rsvd[3];
+};
+
struct hinic3_rx_info {
dma_addr_t buf_dma_addr;
@@ -97,12 +110,18 @@ struct hinic3_rxq {
u32 irq_id;
u16 msix_entry_idx;
+#ifdef HAVE_XDP_SUPPORT
+ u16 xdp_headroom_flag;
+#else
u16 rsvd3;
+#endif
+ struct hinic3_rx_ci_index *rx_ci_index;
struct hinic3_rx_info *rx_info;
struct hinic3_io_queue *rq;
#ifdef HAVE_XDP_SUPPORT
struct bpf_prog *xdp_prog;
+ struct xdp_rxq_info xdp_rxq;
#endif
struct hinic3_irq *irq_cfg;
@@ -173,4 +192,6 @@ void hinic3_rx_get_compact_cqe_info(void *rx_cqe, void *cqe_info, u8 cqe_mode);
void hinic3_rxq_check_work_handler(struct work_struct *work);
+void hinic3_cmd_vf_lag(void *hwdev, u16 func_id, u16 channel);
+
#endif
diff --git a/drivers/net/ethernet/huawei/hinic3/hinic3_tx.c b/drivers/net/ethernet/huawei/hinic3/hinic3_tx.c
index 99264c7..e3fcf54 100644
--- a/drivers/net/ethernet/huawei/hinic3/hinic3_tx.c
+++ b/drivers/net/ethernet/huawei/hinic3/hinic3_tx.c
@@ -57,6 +57,11 @@ void hinic3_txq_get_stats(struct hinic3_txq *txq,
stats->busy = txq_stats->busy;
stats->wake = txq_stats->wake;
stats->dropped = txq_stats->dropped;
+#ifdef HAVE_XDP_SUPPORT
+ stats->xdp_dropped = txq_stats->xdp_dropped;
+ stats->xdp_xmits = txq_stats->xdp_xmits;
+ stats->map_xdpf_err = txq_stats->map_xdpf_err;
+#endif
} while (u64_stats_fetch_retry(&txq_stats->syncp, start));
u64_stats_update_end(&stats->syncp);
}
@@ -78,6 +83,11 @@ void hinic3_txq_clean_stats(struct hinic3_txq_stats *txq_stats)
txq_stats->frag_size_err = 0;
txq_stats->rsvd1 = 0;
txq_stats->rsvd2 = 0;
+#ifdef HAVE_XDP_SUPPORT
+ txq_stats->xdp_dropped = 0;
+ txq_stats->xdp_xmits = 0;
+ txq_stats->map_xdpf_err = 0;
+#endif
u64_stats_update_end(&txq_stats->syncp);
}
@@ -97,7 +107,7 @@ static inline void hinic3_set_buf_desc(struct hinic3_sq_bufdesc *buf_descs,
buf_descs->len = hinic3_hw_be32(len);
}
-static int tx_map_skb(struct hinic3_nic_dev *nic_dev, struct sk_buff *skb,
+int tx_map_skb(struct hinic3_nic_dev *nic_dev, struct sk_buff *skb,
u16 valid_nr_frags, struct hinic3_txq *txq,
struct hinic3_tx_info *tx_info,
struct hinic3_sq_wqe_combo *wqe_combo)
@@ -473,7 +483,7 @@ u32 hinic3_tx_offload(struct sk_buff *skb, struct hinic3_offload_info *offload_i
return offload;
}
-static void get_pkt_stats(struct hinic3_tx_info *tx_info, struct sk_buff *skb)
+void get_pkt_stats(struct hinic3_tx_info *tx_info, struct sk_buff *skb)
{
u32 ihs, hdr_len;
@@ -504,7 +514,7 @@ static void get_pkt_stats(struct hinic3_tx_info *tx_info, struct sk_buff *skb)
tx_info->num_pkts = 1;
}
-static inline int hinic3_maybe_stop_tx(struct hinic3_txq *txq, u16 wqebb_cnt)
+inline int hinic3_maybe_stop_tx(struct hinic3_txq *txq, u16 wqebb_cnt)
{
if (likely(hinic3_get_sq_free_wqebbs(txq->sq) >= wqebb_cnt))
return 0;
@@ -523,7 +533,7 @@ static inline int hinic3_maybe_stop_tx(struct hinic3_txq *txq, u16 wqebb_cnt)
return 0;
}
-static u16 hinic3_set_wqe_combo(struct hinic3_txq *txq,
+u16 hinic3_set_wqe_combo(struct hinic3_txq *txq,
struct hinic3_sq_wqe_combo *wqe_combo,
u16 num_sge, u16 *curr_pi)
{
@@ -666,7 +676,7 @@ void hinic3_tx_set_compact_offload_wqe_task(void *wqe_combo, void *offload_info)
* hinic3_prepare_sq_ctrl - init sq wqe cs
* @nr_descs: total sge_num, include bd0 in cs
*/
-static void hinic3_prepare_sq_ctrl(struct hinic3_sq_wqe_combo *wqe_combo,
+void hinic3_prepare_sq_ctrl(struct hinic3_sq_wqe_combo *wqe_combo,
struct hinic3_queue_info *queue_info, int nr_descs, u16 owner)
{
struct hinic3_sq_wqe_desc *wqe_desc = wqe_combo->ctrl_bd0;
@@ -1107,3 +1117,172 @@ int hinic3_flush_txqs(struct net_device *netdev)
return 0;
}
+#ifdef HAVE_XDP_SUPPORT
+int tx_map_xdpf(struct hinic3_nic_dev *nic_dev, struct xdp_frame *frame,
+ struct hinic3_txq *txq, struct hinic3_xdp_tx_info *tx_info,
+ struct hinic3_sq_wqe_combo *wqe_combo)
+{
+ struct hinic3_sq_wqe_desc *wqe_desc = wqe_combo->ctrl_bd0;
+ struct hinic3_dma_info *dma_info = tx_info->dma_info;
+ struct pci_dev *pdev = nic_dev->pdev;
+
+ dma_info->dma = dma_map_single(&pdev->dev, frame->data,
+ frame->len, DMA_TO_DEVICE);
+ if (dma_mapping_error(&pdev->dev, dma_info->dma)) {
+ TXQ_STATS_INC(txq, map_xdpf_err);
+ return -EIO;
+ }
+ dma_info->len = frame->len;
+
+ wqe_desc->hi_addr = hinic3_hw_be32(upper_32_bits(dma_info->dma));
+ wqe_desc->lo_addr = hinic3_hw_be32(lower_32_bits(dma_info->dma));
+
+ wqe_desc->ctrl_len = dma_info->len;
+
+ return 0;
+}
+
+void hinic3_prepare_xdp_sq_ctrl(struct hinic3_sq_wqe_combo *wqe_combo,
+ u16 owner)
+{
+ struct hinic3_sq_wqe_desc *wqe_desc = wqe_combo->ctrl_bd0;
+
+ wqe_desc->ctrl_len |=
+ SQ_CTRL_SET(SQ_NORMAL_WQE, DATA_FORMAT) |
+ SQ_CTRL_SET(wqe_combo->wqe_type, EXTENDED) |
+ SQ_CTRL_SET(owner, OWNER);
+
+ wqe_desc->ctrl_len = hinic3_hw_be32(wqe_desc->ctrl_len);
+ wqe_desc->queue_info = 0;
+}
+
+int hinic3_xdp_xmit_frame(struct hinic3_nic_dev *nic_dev,
+ struct hinic3_txq *txq, struct xdp_frame *xdpf)
+{
+ struct hinic3_sq_wqe_combo wqe_combo = {0};
+ struct hinic3_xdp_tx_info *xdp_tx_info = NULL;
+ struct hinic3_tx_info *tx_info = NULL;
+ u16 pi = 0, owner = 0;
+
+ if (unlikely(hinic3_maybe_stop_tx(txq, 1))) {
+ TXQ_STATS_INC(txq, busy);
+ return NETDEV_TX_BUSY;
+ }
+
+ wqe_combo.ctrl_bd0 = hinic3_get_sq_one_wqebb(txq->sq, &pi);
+ wqe_combo.task_type = SQ_WQE_TASKSECT_4BYTES;
+ wqe_combo.wqe_type = SQ_WQE_COMPACT_TYPE;
+ owner = hinic3_get_and_update_sq_owner(txq->sq, pi, 1);
+
+ xdp_tx_info = kzalloc(sizeof(*xdp_tx_info), GFP_ATOMIC);
+ if (!xdp_tx_info) {
+ hinic3_rollback_sq_wqebbs(txq->sq, 1, owner);
+ return -ENOMEM;
+ }
+
+ tx_info = &txq->tx_info[pi];
+ tx_info->wqebb_cnt = 1;
+ xdp_tx_info->dma_info = tx_info->dma_info;
+ xdp_tx_info->xdpf = xdpf;
+
+ if (tx_map_xdpf(nic_dev, xdpf, txq, xdp_tx_info, &wqe_combo) != 0) {
+ kfree(xdp_tx_info);
+ hinic3_rollback_sq_wqebbs(txq->sq, 1, owner);
+ return -EIO;
+ }
+ hinic3_prepare_xdp_sq_ctrl(&wqe_combo, owner);
+ TXQ_STATS_INC(txq, xdp_xmits);
+ wmb(); /* ensure wqe info before updating ci */
+
+ return 0;
+}
+
+int hinic3_xdp_xmit_frames(struct net_device *dev, int n,
+ struct xdp_frame **frames, u32 flags)
+{
+ struct hinic3_nic_dev *nic_dev = netdev_priv(dev);
+ struct hinic3_txq *txq;
+ u16 i, q_id, drops = 0;
+
+ if (unlikely(!netif_carrier_ok(dev))) {
+ HINIC3_NIC_STATS_INC(nic_dev, tx_carrier_off_drop);
+ return -NETDEV_TX_BUSY;
+ }
+
+ if (unlikely(flags & ~XDP_XMIT_FLAGS_MASK))
+ return -EINVAL;
+
+ q_id = raw_smp_processor_id() % nic_dev->q_params.num_qps;
+ txq = &nic_dev->txqs[q_id];
+
+ for (i = 0; i < n; i++) {
+ struct xdp_frame *xdpf = frames[i];
+
+ if (unlikely(hinic3_xdp_xmit_frame(nic_dev, txq, xdpf))) {
+ xdp_return_frame(xdpf);
+ TXQ_STATS_INC(txq, xdp_dropped);
+ drops++;
+ }
+ }
+
+ if (flags & XDP_XMIT_FLUSH) {
+ hinic3_write_db(txq->sq, txq->cos, SQ_CFLAG_DP,
+ hinic3_get_sq_local_pi(txq->sq));
+ }
+ return n - drops;
+}
+
+struct xdp_frame *xdp_convert_to_frame(struct xdp_buff *xdp,
+ struct hinic3_nic_dev *nic_dev)
+{
+ struct xdp_frame *xdp_frame;
+ int metasize, headroom;
+
+ if (xdp->rxq->mem.type == MEM_TYPE_XSK_BUFF_POOL)
+ return xdp_convert_zc_to_xdp_frame(xdp);
+ xdp_frame = xdp->data_hard_start;
+ headroom = xdp->data - xdp->data_hard_start;
+ metasize = xdp->data - xdp->data_meta;
+ metasize = metasize > 0 ? metasize : 0;
+ if (unlikely((headroom - metasize) < sizeof(*xdp_frame)))
+ return NULL;
+ if (unlikely(xdp->data_end > xdp_data_hard_end(xdp))) {
+ nicif_err(nic_dev, drv, nic_dev->netdev,
+ "Missing reserved tailroom\n");
+ return NULL;
+ }
+ xdp_frame->frame_sz = xdp->frame_sz;
+ xdp_frame->data = xdp->data;
+ xdp_frame->len = xdp->data_end - xdp->data;
+ xdp_frame->headroom = (u16)(headroom - sizeof(*xdp_frame));
+ xdp_frame->metasize = (u32)metasize;
+ xdp_frame->mem = xdp->rxq->mem;
+
+ return xdp_frame;
+}
+
+bool hinic3_xmit_xdp_buff(struct net_device *netdev, u16 q_id,
+ struct xdp_buff *xdp)
+{
+ struct hinic3_nic_dev *nic_dev = netdev_priv(netdev);
+ struct hinic3_txq *txq;
+ struct xdp_frame *xdpf;
+
+ xdpf = xdp_convert_to_frame(xdp, nic_dev);
+ if (!xdpf) {
+ TXQ_STATS_INC(&nic_dev->txqs[q_id], xdp_dropped);
+ return false;
+ }
+ txq = &nic_dev->txqs[q_id];
+
+ if (unlikely(hinic3_xdp_xmit_frame(nic_dev, txq, xdpf) != 0)) {
+ xdp_return_frame(xdpf);
+ TXQ_STATS_INC(txq, xdp_dropped);
+ return false;
+ }
+ hinic3_write_db(txq->sq, txq->cos, SQ_CFLAG_DP,
+ hinic3_get_sq_local_pi(txq->sq));
+
+ return true;
+}
+#endif
diff --git a/drivers/net/ethernet/huawei/hinic3/hinic3_tx.h b/drivers/net/ethernet/huawei/hinic3/hinic3_tx.h
index 479466d..9b149da 100644
--- a/drivers/net/ethernet/huawei/hinic3/hinic3_tx.h
+++ b/drivers/net/ethernet/huawei/hinic3/hinic3_tx.h
@@ -36,6 +36,9 @@ struct hinic3_txq_stats {
u64 busy;
u64 wake;
u64 dropped;
+ u64 xdp_dropped;
+ u64 xdp_xmits;
+ u64 map_xdpf_err;
/* Subdivision statistics show in private tool */
u64 skb_pad_err;
@@ -70,6 +73,11 @@ union hinic3_ip {
unsigned char *hdr;
};
+struct hinic3_xdp_tx_info {
+ struct xdp_frame *xdpf;
+ struct hinic3_dma_info *dma_info;
+};
+
struct hinic3_tx_info {
struct sk_buff *skb;
@@ -98,6 +106,7 @@ struct hinic3_txq {
u32 q_depth;
u32 rsvd2;
+ struct hinic3_xdp_tx_info *xdp_tx_info;
struct hinic3_tx_info *tx_info;
struct hinic3_io_queue *sq;
@@ -142,6 +151,28 @@ int hinic3_flush_txqs(struct net_device *netdev);
void hinic3_set_txq_cos(struct hinic3_nic_dev *nic_dev, u16 start_qid,
u16 q_num, u8 cos);
+int hinic3_maybe_stop_tx(struct hinic3_txq *txq, u16 wqebb_cnt);
+
+u32 hinic3_tx_offload(struct sk_buff *skb,
+ struct hinic3_offload_info *offload_info,
+ struct hinic3_queue_info *queue_info,
+ struct hinic3_txq *txq);
+
+int tx_map_skb(struct hinic3_nic_dev *nic_dev, struct sk_buff *skb,
+ u16 valid_nr_frags, struct hinic3_txq *txq,
+ struct hinic3_tx_info *tx_info,
+ struct hinic3_sq_wqe_combo *wqe_combo);
+
+void hinic3_prepare_sq_ctrl(struct hinic3_sq_wqe_combo *wqe_combo,
+ struct hinic3_queue_info *queue_info,
+ int nr_descs, u16 owner);
+
+void get_pkt_stats(struct hinic3_tx_info *tx_info, struct sk_buff *skb);
+
+u16 hinic3_set_wqe_combo(struct hinic3_txq *txq,
+ struct hinic3_sq_wqe_combo *wqe_combo,
+ u16 num_sge, u16 *curr_pi);
+
void hinic3_tx_set_wqebb_cnt(void *wqe_combo, u32 offload, u16 num_sge);
void hinic3_tx_set_compact_offload_wqebb_cnt(void *wqe_combo, u32 offload, u16 num_sge);
@@ -162,4 +193,21 @@ static inline __sum16 csum_magic(union hinic3_ip *ip, unsigned short proto)
csum_ipv6_magic(&ip->v6->saddr, &ip->v6->daddr, 0, proto, 0);
}
+int tx_map_xdpf(struct hinic3_nic_dev *nic_dev, struct xdp_frame *frame,
+ struct hinic3_txq *txq, struct hinic3_xdp_tx_info *tx_info,
+ struct hinic3_sq_wqe_combo *wqe_combo);
+
+void hinic3_prepare_xdp_sq_ctrl(struct hinic3_sq_wqe_combo *wqe_combo,
+ u16 owner);
+
+int hinic3_xdp_xmit_frame(struct hinic3_nic_dev *nic_dev,
+ struct hinic3_txq *txq, struct xdp_frame *xdpf);
+
+int hinic3_xdp_xmit_frames(struct net_device *dev, int n,
+ struct xdp_frame **frames, u32 flags);
+bool hinic3_xmit_xdp_buff(struct net_device *netdev, u16 q_id,
+ struct xdp_buff *xdp);
+
+struct xdp_frame *xdp_convert_to_frame(struct xdp_buff *xdp,
+ struct hinic3_nic_dev *nic_dev);
#endif
diff --git a/drivers/net/ethernet/huawei/hinic3/hw/hinic3_dev_mgmt.c b/drivers/net/ethernet/huawei/hinic3/hw/hinic3_dev_mgmt.c
index 0981d94..9ff0cb3 100644
--- a/drivers/net/ethernet/huawei/hinic3/hw/hinic3_dev_mgmt.c
+++ b/drivers/net/ethernet/huawei/hinic3/hw/hinic3_dev_mgmt.c
@@ -4,6 +4,7 @@
#define pr_fmt(fmt) KBUILD_MODNAME ": [COMM]" fmt
#include <net/addrconf.h>
+
#include <linux/kernel.h>
#include <linux/pci.h>
#include <linux/device.h>
@@ -383,7 +384,7 @@ out:
static int get_dynamic_uld_dev_name(struct hinic3_pcidev *dev, enum hinic3_service_type type,
char *ifname)
{
- u32 out_size = IFNAMSIZ;
+ u32 out_size = type == SERVICE_T_ROCE ? IB_DEVICE_NAME_MAX : IFNAMSIZ;
if (!g_uld_info[type].ioctl)
return -EFAULT;
@@ -392,7 +393,30 @@ static int get_dynamic_uld_dev_name(struct hinic3_pcidev *dev, enum hinic3_servi
NULL, 0, ifname, &out_size);
}
-static bool is_pcidev_match_dev_name(const char *dev_name, struct hinic3_pcidev *dev,
+static bool judge_by_ib_dev_list(const char *dev_name)
+{
+ struct card_node *chip_node = NULL;
+ struct hinic3_pcidev *dev = NULL;
+ char ib_dev_name[IB_DEVICE_NAME_MAX] = {0};
+
+ list_for_each_entry(chip_node, &g_hinic3_chip_list, node) {
+ list_for_each_entry(dev, &chip_node->func_list, node) {
+ if (dev->uld_dev[SERVICE_T_ROCE] == NULL)
+ continue;
+
+ if (get_dynamic_uld_dev_name(dev, SERVICE_T_ROCE,
+ (char *)ib_dev_name) != 0)
+ continue;
+
+ if (strcmp(ib_dev_name, dev_name) == 0)
+ return true;
+ }
+ }
+ return false;
+}
+
+static bool is_pcidev_match_dev_name(const char *dev_name,
+ struct hinic3_pcidev *dev,
enum hinic3_service_type type)
{
enum hinic3_service_type i;
@@ -404,8 +428,14 @@ static bool is_pcidev_match_dev_name(const char *dev_name, struct hinic3_pcidev
if (type == SERVICE_T_MAX) {
for (i = SERVICE_T_OVS; i < SERVICE_T_MAX; i++) {
- if (!strncmp(dev->uld_dev_name[i], dev_name, IFNAMSIZ))
+ if (i == SERVICE_T_ROCE &&
+ judge_by_ib_dev_list(dev_name))
return true;
+ else if ((i != SERVICE_T_ROCE) &&
+ (strncmp(dev->uld_dev_name[i],
+ dev_name, IFNAMSIZ) == 0))
+ return true;
+
}
} else {
if (!strncmp(dev->uld_dev_name[type], dev_name, IFNAMSIZ))
@@ -421,22 +451,30 @@ static bool is_pcidev_match_dev_name(const char *dev_name, struct hinic3_pcidev
return false;
}
-static struct hinic3_lld_dev *get_lld_dev_by_dev_name(const char *dev_name,
- enum hinic3_service_type type, bool hold)
+static struct hinic3_lld_dev *get_lld_from_ib_dev_list(const char *dev_name,
+ bool hold)
{
struct card_node *chip_node = NULL;
struct hinic3_pcidev *dev = NULL;
-
- lld_hold();
+ char ib_dev_name[IB_DEVICE_NAME_MAX] = {0};
list_for_each_entry(chip_node, &g_hinic3_chip_list, node) {
list_for_each_entry(dev, &chip_node->func_list, node) {
- if (is_pcidev_match_dev_name(dev_name, dev, type)) {
- if (hold)
- lld_dev_hold(&dev->lld_dev);
- lld_put();
- return &dev->lld_dev;
- }
+ if (dev->uld_dev[SERVICE_T_ROCE] == NULL)
+ continue;
+
+ if (get_dynamic_uld_dev_name(dev, SERVICE_T_ROCE,
+ (char *)ib_dev_name) != 0)
+ continue;
+
+ if (strcmp(ib_dev_name, dev_name) != 0)
+ continue;
+
+ if (hold)
+ lld_dev_hold(&dev->lld_dev);
+
+ lld_put();
+ return &dev->lld_dev;
}
}
@@ -445,7 +483,46 @@ static struct hinic3_lld_dev *get_lld_dev_by_dev_name(const char *dev_name,
return NULL;
}
-struct hinic3_lld_dev *hinic3_get_lld_dev_by_chip_and_port(const char *chip_name, u8 port_id)
+static struct hinic3_lld_dev *get_lld_by_uld_name(const char *dev_name,
+ enum hinic3_service_type type,
+ bool hold)
+{
+ struct card_node *chip_node = NULL;
+ struct hinic3_pcidev *dev = NULL;
+ bool flag;
+
+ list_for_each_entry(chip_node, &g_hinic3_chip_list, node) {
+ list_for_each_entry(dev, &chip_node->func_list, node) {
+ flag = is_pcidev_match_dev_name(dev_name, dev, type);
+ if (!flag)
+ continue;
+
+ if (hold)
+ lld_dev_hold(&dev->lld_dev);
+
+ lld_put();
+ return &dev->lld_dev;
+ }
+ }
+ lld_put();
+
+ return NULL;
+}
+
+static struct hinic3_lld_dev *get_lld_dev_by_dev_name(const char *dev_name,
+ enum hinic3_service_type type,
+ bool hold)
+{
+ lld_hold();
+ if (type == SERVICE_T_ROCE) {
+ return get_lld_from_ib_dev_list(dev_name, hold);
+ } else {
+ return get_lld_by_uld_name(dev_name, type, hold);
+ }
+}
+
+struct hinic3_lld_dev *hinic3_get_lld_dev_by_chip_and_port(
+ const char *chip_name, u8 port_id)
{
struct card_node *chip_node = NULL;
struct hinic3_pcidev *dev = NULL;
@@ -457,7 +534,8 @@ struct hinic3_lld_dev *hinic3_get_lld_dev_by_chip_and_port(const char *chip_name
continue;
if (hinic3_physical_port_id(dev->hwdev) == port_id &&
- !strncmp(chip_node->chip_name, chip_name, IFNAMSIZ)) {
+ !strncmp(chip_node->chip_name, chip_name,
+ IFNAMSIZ)) {
lld_dev_hold(&dev->lld_dev);
lld_put();
@@ -703,9 +781,12 @@ void hinic3_get_os_hot_replace_info(void *oshr_info)
struct card_node *hinic3_get_chip_node_by_lld(struct hinic3_lld_dev *lld_dev)
{
struct hinic3_pcidev *pci_adapter = pci_get_drvdata(lld_dev->pdev);
+ if (!pci_adapter)
+ return NULL;
return pci_adapter->chip_node;
}
+EXPORT_SYMBOL(hinic3_get_chip_node_by_lld);
static struct card_node *hinic3_get_chip_node_by_hwdev(const void *hwdev)
{
diff --git a/drivers/net/ethernet/huawei/hinic3/hw/hinic3_dev_mgmt.h b/drivers/net/ethernet/huawei/hinic3/hw/hinic3_dev_mgmt.h
index 9815082..6165521 100644
--- a/drivers/net/ethernet/huawei/hinic3/hw/hinic3_dev_mgmt.h
+++ b/drivers/net/ethernet/huawei/hinic3/hw/hinic3_dev_mgmt.h
@@ -37,6 +37,11 @@ enum {
HINIC3_IN_REMOVE = 4,
};
+enum {
+ HIROCE_STF_CHANGE = 0,
+ HIROCE_STF_NOT_CHANGE = 1,
+};
+
/* Structure pcidev private */
struct hinic3_pcidev {
struct pci_dev *pcidev;
@@ -82,7 +87,8 @@ struct hinic3_pcidev {
spinlock_t uld_lock; /* uld_state lock */
u16 probe_fault_level;
- u16 rsvd2;
+ u16 roce_stf_nochange : 1;
+ u16 rsvd2 : 15;
u64 rsvd4;
struct workqueue_struct *multi_host_mgmt_workq;
diff --git a/drivers/net/ethernet/huawei/hinic3/hw/hinic3_hw_cfg.c b/drivers/net/ethernet/huawei/hinic3/hw/hinic3_hw_cfg.c
index 41c439a..60d43e7 100644
--- a/drivers/net/ethernet/huawei/hinic3/hw/hinic3_hw_cfg.c
+++ b/drivers/net/ethernet/huawei/hinic3/hw/hinic3_hw_cfg.c
@@ -718,7 +718,7 @@ static int cfg_init_eq(struct hinic3_hwdev *dev)
for (i = 0; i < num_ceq; ++i) {
eq[i].eqn = i;
- eq[i].free = CFG_FREE;
+ eq[i].freed = CFG_FREE;
eq[i].type = SERVICE_T_MAX;
}
@@ -751,7 +751,8 @@ int hinic3_vector_to_eqn(void *hwdev, enum hinic3_service_type type, int vector)
vector_num = (vector_num % cfg_mgmt->eq_info.num_ceq) + CFG_RDMA_CEQ_BASE;
eq = cfg_mgmt->eq_info.eq;
- if (eq[vector_num].type == SERVICE_T_ROCE && eq[vector_num].free == CFG_BUSY)
+ if (eq[vector_num].type == SERVICE_T_ROCE &&
+ eq[vector_num].freed == CFG_BUSY)
eqn = eq[vector_num].eqn;
return eqn;
@@ -844,7 +845,7 @@ static int cfg_enable_interrupt(struct hinic3_hwdev *dev)
/* u32 kernel uses to write allocated vector */
irq_info[i].info.irq_id = entry[i].vector;
irq_info[i].type = SERVICE_T_MAX;
- irq_info[i].free = CFG_FREE;
+ irq_info[i].freed = CFG_FREE;
}
kfree(entry);
@@ -898,14 +899,14 @@ int hinic3_alloc_irqs(void *hwdev, enum hinic3_service_type type, u16 num,
for (i = 0; i < num_new; i++) {
for (j = 0; j < max_num_irq; j++) {
- if (alloc_info[j].free == CFG_FREE) {
+ if (alloc_info[j].freed == CFG_FREE) {
if (irq_info->num_irq_remain == 0) {
sdk_err(dev->dev_hdl, "No free irq resource in cfg mgmt\n");
mutex_unlock(&irq_info->irq_mutex);
return -EINVAL;
}
alloc_info[j].type = type;
- alloc_info[j].free = CFG_BUSY;
+ alloc_info[j].freed = CFG_BUSY;
irq_info_array[i].msix_entry_idx =
alloc_info[j].info.msix_entry_idx;
@@ -945,8 +946,8 @@ void hinic3_free_irq(void *hwdev, enum hinic3_service_type type, u32 irq_id)
for (i = 0; i < max_num_irq; i++) {
if (irq_id == alloc_info[i].info.irq_id &&
type == alloc_info[i].type) {
- if (alloc_info[i].free == CFG_BUSY) {
- alloc_info[i].free = CFG_FREE;
+ if (alloc_info[i].freed == CFG_BUSY) {
+ alloc_info[i].freed = CFG_FREE;
irq_info->num_irq_remain++;
if (irq_info->num_irq_remain > max_num_irq) {
sdk_err(dev->dev_hdl, "Find target,but over range\n");
@@ -1007,9 +1008,9 @@ int hinic3_alloc_ceqs(void *hwdev, enum hinic3_service_type type, int num,
}
for (j = CFG_RDMA_CEQ_BASE; j < eq->num_ceq; j++) {
- if (eq->eq[j].free == CFG_FREE) {
+ if (eq->eq[j].freed == CFG_FREE) {
eq->eq[j].type = type;
- eq->eq[j].free = CFG_BUSY;
+ eq->eq[j].freed = CFG_BUSY;
eq->num_ceq_remain--;
ceq_id_array[i] = eq->eq[j].eqn;
(*act_num)++;
@@ -1043,8 +1044,8 @@ void hinic3_free_ceq(void *hwdev, enum hinic3_service_type type, int ceq_id)
for (i = 0; i < num_ceq; i++) {
if (ceq_id == eq->eq[i].eqn &&
type == cfg_mgmt->eq_info.eq[i].type) {
- if (eq->eq[i].free == CFG_BUSY) {
- eq->eq[i].free = CFG_FREE;
+ if (eq->eq[i].freed == CFG_BUSY) {
+ eq->eq[i].freed = CFG_FREE;
eq->num_ceq_remain++;
if (eq->num_ceq_remain > num_ceq)
eq->num_ceq_remain %= num_ceq;
@@ -1532,6 +1533,44 @@ u8 hinic3_physical_port_id(void *hwdev)
}
EXPORT_SYMBOL(hinic3_physical_port_id);
+void hinic3_set_bifur_link_status(void *hwdev, u8 port_id, u8 status)
+{
+struct hinic3_hwdev *dev = hwdev;
+
+ if (dev == NULL) {
+ pr_err("Hwdev pointer is NULL for set bifur link status\n");
+ return;
+ }
+
+ if (port_id >= BIFUR_MAX_LINK_STATUS_NUM) {
+ pr_err("port id:0x%x out of range for set bifur link status\n",
+ port_id);
+ return;
+ }
+
+ dev->bifur_link_status[port_id] = status;
+}
+EXPORT_SYMBOL(hinic3_set_bifur_link_status);
+
+u8 hinic3_get_bifur_link_status(void *hwdev, u8 port_id)
+{
+struct hinic3_hwdev *dev = hwdev;
+
+if (dev == NULL) {
+ pr_err("Hwdev pointer is NULL for getting bifur link status\n");
+ return 0;
+}
+
+if (port_id >= BIFUR_MAX_LINK_STATUS_NUM) {
+ pr_err("port id:0x%x out of range for get bifur link status\n",
+ port_id);
+ return 0;
+}
+
+return dev->bifur_link_status[port_id];
+}
+EXPORT_SYMBOL(hinic3_get_bifur_link_status);
+
u16 hinic3_func_max_vf(void *hwdev)
{
struct hinic3_hwdev *dev = hwdev;
diff --git a/drivers/net/ethernet/huawei/hinic3/hw/hinic3_hw_cfg.h b/drivers/net/ethernet/huawei/hinic3/hw/hinic3_hw_cfg.h
index 2f2310a..b9996d0 100644
--- a/drivers/net/ethernet/huawei/hinic3/hw/hinic3_hw_cfg.h
+++ b/drivers/net/ethernet/huawei/hinic3/hw/hinic3_hw_cfg.h
@@ -258,7 +258,7 @@ struct svc_cap_info {
struct cfg_eq {
enum hinic3_service_type type;
int eqn;
- int free; /* 1 - alocated, 0- freed */
+ int freed; /* 1 - alocated, 0- freed */
};
struct cfg_eq_info {
@@ -274,7 +274,7 @@ struct cfg_eq_info {
struct irq_alloc_info_st {
enum hinic3_service_type type;
- int free; /* 1 - alocated, 0- freed */
+ int freed; /* 1 - alocated, 0- freed */
struct irq_info info;
};
diff --git a/drivers/net/ethernet/huawei/hinic3/hw/hinic3_hw_comm.c b/drivers/net/ethernet/huawei/hinic3/hw/hinic3_hw_comm.c
index 8659e0b..c80623d 100644
--- a/drivers/net/ethernet/huawei/hinic3/hw/hinic3_hw_comm.c
+++ b/drivers/net/ethernet/huawei/hinic3/hw/hinic3_hw_comm.c
@@ -551,7 +551,8 @@ int hinic3_set_ppf_flr_type(void *hwdev, enum hinic3_ppf_flr_type flr_type)
&flr_type_set, sizeof(flr_type_set),
&flr_type_set, &out_size);
if (err || !out_size || flr_type_set.head.status) {
- sdk_err(dev->dev_hdl, "Failed to set ppf flr type, err: %d, status: 0x%x, out size: 0x%x\n",
+ sdk_err(dev->dev_hdl,
+ "Failed to set ppf flr type, err: %d, status: 0x%x, out size: 0x%x\n",
err, flr_type_set.head.status, out_size);
return -EIO;
}
@@ -1555,6 +1556,71 @@ free_buf:
}
EXPORT_SYMBOL(hinic3_get_hw_pf_infos);
+int hinic3_get_pf_by_func(void *hwdev, u16 func_id, u8 *pf_id)
+{
+ struct comm_cmd_get_pf_by_func *pf_by_func = NULL;
+ u16 out_size = sizeof(*pf_by_func);
+ int err = 0;
+
+ if (!hwdev || !pf_id)
+ return -EINVAL;
+
+ pf_by_func = kzalloc(sizeof(*pf_by_func), GFP_KERNEL);
+ if (!pf_by_func)
+ return -ENOMEM;
+ pf_by_func->func_id = func_id;
+
+ err = comm_msg_to_mgmt_sync(hwdev, COMM_MGMT_CMD_GET_PF_BY_FUNC,
+ pf_by_func, sizeof(*pf_by_func),
+ pf_by_func, &out_size);
+ if (pf_by_func->head.status != 0 || err != 0 || out_size == 0) {
+ sdk_err(((struct hinic3_hwdev *)hwdev)->dev_hdl,
+ "Failed to get pf by func, err: %d, status: 0x%x, out size: 0x%x\n",
+ err, pf_by_func->head.status, out_size);
+ err = -EIO;
+ goto free_buf;
+ }
+
+ *pf_id = pf_by_func->pf_id;
+
+free_buf:
+ kfree(pf_by_func);
+ return err;
+}
+EXPORT_SYMBOL(hinic3_get_pf_by_func);
+
+int hinic3_get_pf_bus_by_dev(void *hwdev, u8 *bus_num)
+{
+ struct cmd_get_pf_bus_info_s *pf_bus_by_dev = NULL;
+ u16 out_size = sizeof(*pf_bus_by_dev);
+ int err = 0;
+
+ if (hwdev == NULL || bus_num == NULL)
+ return -EINVAL;
+
+ pf_bus_by_dev = kzalloc(sizeof(*pf_bus_by_dev), GFP_KERNEL);
+ if (pf_bus_by_dev == NULL)
+ return -ENOMEM;
+
+ err = comm_msg_to_mgmt_sync(hwdev, COMM_MGMT_CMD_GET_PF_BUS_BY_DEV,
+ pf_bus_by_dev, sizeof(*pf_bus_by_dev),
+ pf_bus_by_dev, &out_size);
+ if (pf_bus_by_dev->head.status != 0 || err != 0 || out_size == 0) {
+ sdk_err(((struct hinic3_hwdev *)hwdev)->dev_hdl,
+ "Failed to get pf by func, err: %d, status: 0x%x, out size: 0x%x\n",
+ err, pf_bus_by_dev->head.status, out_size);
+ err = -EIO;
+ goto free_buf;
+ }
+
+ *bus_num = pf_bus_by_dev->bus_num;
+
+free_buf:
+ kfree(pf_bus_by_dev);
+ return err;
+}
+EXPORT_SYMBOL(hinic3_get_pf_bus_by_dev);
+
int hinic3_get_global_attr(void *hwdev, struct comm_global_attr *attr)
{
struct comm_cmd_get_glb_attr get_attr;
diff --git a/drivers/net/ethernet/huawei/hinic3/hw/hinic3_hwdev.c b/drivers/net/ethernet/huawei/hinic3/hw/hinic3_hwdev.c
index c317f4a..117d4df 100644
--- a/drivers/net/ethernet/huawei/hinic3/hw/hinic3_hwdev.c
+++ b/drivers/net/ethernet/huawei/hinic3/hw/hinic3_hwdev.c
@@ -2022,6 +2022,17 @@ u8 hinic3_max_pf_num(void *hwdev)
}
EXPORT_SYMBOL(hinic3_max_pf_num);
+void *hinic3_ppf_hwdev(void *hwdev)
+{
+ struct hinic3_hwdev *dev = hwdev;
+
+ if (!dev)
+ return NULL;
+
+ return dev->ppf_hwdev;
+}
+EXPORT_SYMBOL(hinic3_ppf_hwdev);
+
void hinic3_fault_event_report(void *hwdev, u16 src, u16 level)
{
if (!hwdev)
diff --git a/drivers/net/ethernet/huawei/hinic3/hw/hinic3_hwdev.h b/drivers/net/ethernet/huawei/hinic3/hw/hinic3_hwdev.h
index 0ca639f..e365839 100644
--- a/drivers/net/ethernet/huawei/hinic3/hw/hinic3_hwdev.h
+++ b/drivers/net/ethernet/huawei/hinic3/hw/hinic3_hwdev.h
@@ -121,6 +121,8 @@ struct mqm_eqm_vram_name_s {
char vram_name[VRAM_NAME_MAX_LEN];
};
+#define BIFUR_MAX_LINK_STATUS_NUM 4
+
struct hinic3_hwdev {
void *adapter_hdl; /* pointer to hinic3_pcidev or NDIS_Adapter */
void *pcidev_hdl; /* pointer to pcidev or Handler */
@@ -196,7 +198,8 @@ struct hinic3_hwdev {
enum hinic3_func_mode func_mode;
enum hinic3_hot_plug_mode hot_plug_mode;
enum hinic3_os_hot_replace_mode hot_replace_mode;
- u32 rsvd3;
+
+ u8 bifur_link_status[BIFUR_MAX_LINK_STATUS_NUM];
DECLARE_BITMAP(func_probe_in_host, MAX_FUNCTION_NUM);
DECLARE_BITMAP(netdev_setup_state, MAX_FUNCTION_NUM);
@@ -234,5 +237,6 @@ struct hinic3_hwdev {
#define COMM_SUPPORT_ONLY_ENHANCE_CMDQ(hwdev) COMM_FEATURE_QW0(hwdev, ONLY_ENHANCE_CMDQ)
void set_func_host_mode(struct hinic3_hwdev *hwdev, enum hinic3_func_mode mode);
+void *hinic3_get_service_adapter(void *hwdev, enum hinic3_service_type type);
#endif
diff --git a/drivers/net/ethernet/huawei/hinic3/hw/hinic3_lld.c b/drivers/net/ethernet/huawei/hinic3/hw/hinic3_lld.c
index b7f9db5..f4452d0 100644
--- a/drivers/net/ethernet/huawei/hinic3/hw/hinic3_lld.c
+++ b/drivers/net/ethernet/huawei/hinic3/hw/hinic3_lld.c
@@ -1716,6 +1716,25 @@ static void hinic3_probe_success_process(struct hinic3_pcidev *pci_adapter)
mutex_unlock(&pci_adapter->pdev_mutex);
}
+static void hinic3_probe_update_chip_node_info(
+ struct hinic3_pcidev *pci_adapter)
+{
+ struct pci_dev *pdev = pci_adapter->pcidev;
+ struct card_node *chip_node = pci_adapter->chip_node;
+ struct hinic3_board_info board_info = {0};
+
+ if (hinic3_get_pf_bus_by_dev(pci_adapter->hwdev,
+ &(chip_node->hw_bus_num)) != 0)
+ sdk_err(&pdev->dev, "Failed to get pf bus by dev\n");
+
+ if (hinic3_get_board_info(pci_adapter->hwdev, &board_info,
+ HINIC3_CHANNEL_COMM) == 0)
+ chip_node->board_type = board_info.board_type;
+ else
+ sdk_err(&pdev->dev, "Failed to get board info\n");
+
+}
+
static int hinic3_probe_func(struct hinic3_pcidev *pci_adapter)
{
struct pci_dev *pdev = pci_adapter->pcidev;
@@ -1759,7 +1778,7 @@ static int hinic3_probe_func(struct hinic3_pcidev *pci_adapter)
goto set_bdf_err;
}
}
-
+ hinic3_probe_update_chip_node_info(pci_adapter);
hinic3_probe_success_process(pci_adapter);
return 0;
@@ -1953,9 +1972,7 @@ static void hinic3_probe_vf_add_dwork(struct pci_dev *pdev)
if (!hinic3_is_host_vmsec_enable(pdev))
return;
-#if defined(CONFIG_SP_VID_DID)
- if (pdev->vendor == PCI_VENDOR_ID_SPNIC && pdev->device == HINIC3_DEV_SDI_5_1_ID_VF) {
-#elif defined(CONFIG_NF_VID_DID)
+#if defined(CONFIG_NF_VID_DID)
if (pdev->vendor == PCI_VENDOR_ID_NF && pdev->device == NFNIC_DEV_ID_VF) {
#else
if (pdev->vendor == PCI_VENDOR_ID_HUAWEI && pdev->device == HINIC3_DEV_SDI_5_0_ID_VF) {
@@ -2312,14 +2329,7 @@ free_pf_info:
EXPORT_SYMBOL(hinic3_set_vf_service_state);
static const struct pci_device_id hinic3_pci_table[] = {
-#if defined(CONFIG_SP_VID_DID)
- {PCI_VDEVICE(SPNIC, HINIC3_DEV_ID_STANDARD), 0},
- {PCI_VDEVICE(SPNIC, HINIC3_DEV_ID_SDI_5_1_PF), 0},
- {PCI_VDEVICE(SPNIC, HINIC3_DEV_ID_SDI_5_0_PF), 0},
- {PCI_VDEVICE(SPNIC, HINIC3_DEV_ID_SPN120), 0},
- {PCI_VDEVICE(SPNIC, HINIC3_DEV_ID_VF), 0},
- {PCI_VDEVICE(SPNIC, HINIC3_DEV_SDI_5_1_ID_VF), 0},
-#elif defined(CONFIG_NF_VID_DID)
+#ifdef CONFIG_NF_VID_DID
{PCI_VDEVICE(NF, NFNIC_DEV_ID_STANDARD), 0},
{PCI_VDEVICE(NF, NFNIC_DEV_ID_VF), 0},
#else
diff --git a/drivers/net/ethernet/huawei/hinic3/hw/hinic3_mgmt.c b/drivers/net/ethernet/huawei/hinic3/hw/hinic3_mgmt.c
index 4718458..0e09e2f 100644
--- a/drivers/net/ethernet/huawei/hinic3/hw/hinic3_mgmt.c
+++ b/drivers/net/ethernet/huawei/hinic3/hw/hinic3_mgmt.c
@@ -554,7 +554,7 @@ int hinic3_msg_to_mgmt_api_chain_sync(void *hwdev, u8 mod, u16 cmd,
if (!COMM_SUPPORT_API_CHAIN((struct hinic3_hwdev *)hwdev)) {
sdk_err(((struct hinic3_hwdev *)hwdev)->dev_hdl,
- "PF don't support api chain\n");
+ "PF doesn't support api chain\n");
return -EPERM;
}
@@ -573,11 +573,11 @@ int hinic3_msg_to_mgmt_api_chain_async(void *hwdev, u8 mod, u16 cmd,
if (hinic3_func_type(hwdev) == TYPE_VF) {
err = -EFAULT;
sdk_err(((struct hinic3_hwdev *)hwdev)->dev_hdl,
- "VF don't support async cmd\n");
+ "VF doesn't support async cmd\n");
} else if (!COMM_SUPPORT_API_CHAIN((struct hinic3_hwdev *)hwdev)) {
err = -EPERM;
sdk_err(((struct hinic3_hwdev *)hwdev)->dev_hdl,
- "PF don't support api chain\n");
+ "PF doesn't support api chain\n");
} else {
err = hinic3_pf_to_mgmt_async(hwdev, mod, cmd, buf_in, in_size);
}
diff --git a/drivers/net/ethernet/huawei/hinic3/hw/hinic3_nictool.c b/drivers/net/ethernet/huawei/hinic3/hw/hinic3_nictool.c
index 5a5ea53..d1caa03 100644
--- a/drivers/net/ethernet/huawei/hinic3/hw/hinic3_nictool.c
+++ b/drivers/net/ethernet/huawei/hinic3/hw/hinic3_nictool.c
@@ -45,6 +45,11 @@ struct hw_drv_module_handle {
hw_driv_module driv_func;
};
+struct nictool_private_data {
+ u32 cmd;
+ struct hinic3_lld_dev *lld_dev;
+};
+
static int get_single_card_info(struct hinic3_lld_dev *lld_dev, const void *buf_in,
u32 in_size, void *buf_out, u32 *out_size)
{
@@ -180,6 +185,11 @@ static int get_pf_dev_info(struct hinic3_lld_dev *lld_dev, const void *buf_in, u
struct card_node *card_info = hinic3_get_chip_node_by_lld(lld_dev);
int id, err;
+ if (card_info == NULL) {
+ pr_err("Invalid card info\n");
+ return -EINVAL;
+ }
+
if (!buf_out || *out_size != sizeof(struct pf_dev_info) * PF_DEV_INFO_NUM) {
pr_err("Invalid parameter: out_buf_size %u, expect %lu\n",
*out_size, sizeof(*dev_info) * PF_DEV_INFO_NUM);
@@ -240,6 +250,11 @@ static int free_knl_mem(struct hinic3_lld_dev *lld_dev, const void *buf_in, u32
struct card_node *card_info = hinic3_get_chip_node_by_lld(lld_dev);
int id, err;
+ if (card_info == NULL) {
+ pr_err("Invalid card info\n");
+ return -EINVAL;
+ }
+
err = sscanf(card_info->chip_name, HINIC3_CHIP_NAME "%d", &id);
if (err < 0) {
pr_err("Failed to get card id\n");
@@ -294,6 +309,11 @@ static int get_card_func_info(struct hinic3_lld_dev *lld_dev, const void *buf_in
struct card_node *card_info = hinic3_get_chip_node_by_lld(lld_dev);
int err, id = 0;
+ if (card_info == NULL) {
+ pr_err("Invalid card info\n");
+ return -EINVAL;
+ }
+
err = card_info_param_valid(card_info->chip_name, buf_out, *out_size, &id);
if (err)
return err;
@@ -326,6 +346,11 @@ static int get_pf_cap_info(struct hinic3_lld_dev *lld_dev, const void *buf_in, u
struct svc_cap_info *svc_cap_info_in = (struct svc_cap_info *)buf_in;
struct svc_cap_info *svc_cap_info_out = (struct svc_cap_info *)buf_out;
+ if (card_info == NULL) {
+ pr_err("Invalid card info\n");
+ return -EINVAL;
+ }
+
if (*out_size != sizeof(struct svc_cap_info) || in_size != sizeof(struct svc_cap_info) ||
!buf_in || !buf_out) {
pr_err("Invalid parameter: out_buf_size %u, in_size: %u, expect %lu\n",
@@ -370,7 +395,7 @@ static int get_hw_drv_version(struct hinic3_lld_dev *lld_dev, const void *buf_in
}
snprintf(ver_info->ver, sizeof(ver_info->ver), "%s %s", HINIC3_DRV_VERSION,
- "2025-05-08_00:00:08");
+ "2025-11-17_00:00:00");
return 0;
}
@@ -419,28 +444,6 @@ static int get_mbox_cnt(struct hinic3_lld_dev *lld_dev, const void *buf_in,
}
#endif
-struct hw_drv_module_handle hw_driv_module_cmd_handle[] = {
- {FUNC_TYPE, get_func_type},
- {GET_FUNC_IDX, get_func_id},
- {GET_HW_STATS, (hw_driv_module)get_hw_driver_stats},
- {CLEAR_HW_STATS, clear_hw_driver_stats},
- {GET_SELF_TEST_RES, get_self_test_result},
- {GET_CHIP_FAULT_STATS, (hw_driv_module)get_chip_faults_stats},
- {GET_SINGLE_CARD_INFO, (hw_driv_module)get_single_card_info},
- {IS_DRV_IN_VM, is_driver_in_vm},
- {GET_CHIP_ID, get_all_chip_id_cmd},
- {GET_PF_DEV_INFO, get_pf_dev_info},
- {CMD_FREE_MEM, free_knl_mem},
- {GET_CHIP_INFO, get_card_func_info},
- {GET_FUNC_CAP, get_pf_cap_info},
- {GET_DRV_VERSION, get_hw_drv_version},
- {GET_PF_ID, get_pf_id},
-#ifndef __HIFC__
- {GET_OS_HOT_REPLACE_INFO, get_os_hot_replace_info},
- {GET_MBOX_CNT, (hw_driv_module)get_mbox_cnt},
-#endif
-};
-
static int alloc_tmp_buf(void *hwdev, struct msg_module *nt_msg, u32 in_size,
void **buf_in, u32 out_size, void **buf_out)
{
@@ -476,6 +479,27 @@ static void free_tmp_buf(void *hwdev, struct msg_module *nt_msg,
static int send_to_hw_driver(struct hinic3_lld_dev *lld_dev, struct msg_module *nt_msg,
const void *buf_in, u32 in_size, void *buf_out, u32 *out_size)
{
+ struct hw_drv_module_handle hw_driv_module_cmd_handle[] = {
+ {FUNC_TYPE, get_func_type},
+ {GET_FUNC_IDX, get_func_id},
+ {GET_HW_STATS, (hw_driv_module)get_hw_driver_stats},
+ {CLEAR_HW_STATS, clear_hw_driver_stats},
+ {GET_SELF_TEST_RES, get_self_test_result},
+ {GET_CHIP_FAULT_STATS, (hw_driv_module)get_chip_faults_stats},
+ {GET_SINGLE_CARD_INFO, (hw_driv_module)get_single_card_info},
+ {IS_DRV_IN_VM, is_driver_in_vm},
+ {GET_CHIP_ID, get_all_chip_id_cmd},
+ {GET_PF_DEV_INFO, get_pf_dev_info},
+ {CMD_FREE_MEM, free_knl_mem},
+ {GET_CHIP_INFO, get_card_func_info},
+ {GET_FUNC_CAP, get_pf_cap_info},
+ {GET_DRV_VERSION, get_hw_drv_version},
+ {GET_PF_ID, get_pf_id},
+ #ifndef __HIFC__
+ {GET_OS_HOT_REPLACE_INFO, get_os_hot_replace_info},
+ {GET_MBOX_CNT, (hw_driv_module)get_mbox_cnt},
+ #endif
+ };
int index, num_cmds = (int)(sizeof(hw_driv_module_cmd_handle) /
sizeof(hw_driv_module_cmd_handle[0]));
enum driver_cmd_type cmd_type =
@@ -579,6 +603,7 @@ static int cmd_parameter_valid(struct msg_module *nt_msg, unsigned long arg,
}
nt_msg->device_name[IFNAMSIZ - 1] = '\0';
+ nt_msg->ib_device_name[IB_DEVICE_NAME_MAX - 1] = '\0';
return 0;
}
@@ -594,17 +619,25 @@ static struct hinic3_lld_dev *get_lld_dev_by_nt_msg(struct msg_module *nt_msg)
} else if (nt_msg->module == SEND_TO_CUSTOM_DRIVER &&
nt_msg->msg_formate == CMD_CUSTOM_BOND_GET_CHIP_NAME) {
lld_dev = hinic3_get_lld_dev_by_dev_name(nt_msg->device_name, SERVICE_T_MAX);
- } else if (nt_msg->module == SEND_TO_VBS_DRIVER || nt_msg->module == SEND_TO_BIFUR_DRIVER) {
+ } else if (nt_msg->module == SEND_TO_VBS_DRIVER ||
+ nt_msg->module == SEND_TO_BIFUR_DRIVER ||
+ nt_msg->msg_formate == BOND_DEFAULT_OFFLOAD) {
lld_dev = hinic3_get_lld_dev_by_chip_name(nt_msg->device_name);
} else if (nt_msg->module >= SEND_TO_SRV_DRV_BASE && nt_msg->module < SEND_TO_DRIVER_MAX &&
nt_msg->msg_formate != GET_DRV_VERSION) {
lld_dev = hinic3_get_lld_dev_by_dev_name(nt_msg->device_name,
nt_msg->module - SEND_TO_SRV_DRV_BASE);
+ if (!lld_dev && nt_msg->module == SEND_TO_ROCE_DRIVER)
+ lld_dev = hinic3_get_lld_dev_by_dev_name(
+ nt_msg->ib_device_name, SERVICE_T_ROCE);
} else {
lld_dev = hinic3_get_lld_dev_by_chip_name(nt_msg->device_name);
if (!lld_dev)
lld_dev = hinic3_get_lld_dev_by_dev_name(nt_msg->device_name,
SERVICE_T_MAX);
+ if (!lld_dev)
+ lld_dev = hinic3_get_lld_dev_by_dev_name(
+ nt_msg->ib_device_name, SERVICE_T_ROCE);
}
return lld_dev;
@@ -639,6 +672,13 @@ static long hinicadm_k_unlocked_ioctl(struct file *pfile, unsigned long arg)
return 0;
}
+ if (pfile->private_data != NULL) {
+ struct nictool_private_data *private_data =
+ (struct nictool_private_data *)pfile->private_data;
+ private_data->cmd = nt_msg.msg_formate;
+ private_data->lld_dev = lld_dev;
+ }
+
ret = alloc_tmp_buf(hinic3_get_sdk_hwdev_by_lld(lld_dev), &nt_msg,
in_size, &buf_in, out_size_expect, &buf_out);
if (ret) {
@@ -755,11 +795,44 @@ static long dbgtool_k_unlocked_ioctl(struct file *pfile,
static int nictool_k_release(struct inode *pnode, struct file *pfile)
{
+ if (pfile->private_data != NULL) {
+ struct nictool_private_data *private_data =
+ (struct nictool_private_data *)pfile->private_data;
+ if (private_data->cmd == SET_MAC_SPEED_STATUS) {
+ struct msg_module nt_msg;
+ enum mac_speed_status buf_in = STOP_STATUS;
+ int ret = 0;
+
+ nt_msg.module = SEND_TO_NIC_DRIVER;
+ nt_msg.msg_formate = SET_MAC_SPEED_STATUS;
+ ret = nictool_exec_cmd(private_data->lld_dev, &nt_msg,
+ (void *)&buf_in,
+ sizeof(enum mac_speed_status),
+ NULL, NULL);
+ if (ret != 0) {
+ pr_err("Nictool k release failed, module: %u, ret: %d.\n",
+ nt_msg.module, ret);
+ return ret;
+ }
+ }
+ kfree(pfile->private_data);
+ pfile->private_data = NULL;
+ }
+
return 0;
}
static int nictool_k_open(struct inode *pnode, struct file *pfile)
{
+ struct nictool_private_data *private_data =
+ (struct nictool_private_data *)
+ kzalloc(sizeof(struct nictool_private_data), GFP_KERNEL);
+ if (private_data == NULL) {
+ pr_err("Failed to allocate nictool_private_data\n");
+ return -ENOMEM;
+ }
+ pfile->private_data = (void *)private_data;
+
return 0;
}
@@ -801,7 +874,7 @@ static int hinic3_mem_mmap(struct file *filp, struct vm_area_struct *vma)
}
/* old version of tool set vma->vm_pgoff to 0 */
- phy_addr = offset ? offset : g_card_phy_addr[card_id];
+ phy_addr = (offset != 0) ? offset : g_card_phy_addr[card_id];
/* check phy_addr valid */
if (phy_addr != g_card_phy_addr[card_id]) {
diff --git a/drivers/net/ethernet/huawei/hinic3/hw/hinic3_nictool.h b/drivers/net/ethernet/huawei/hinic3/hw/hinic3_nictool.h
index c943dfc..f83f3fe 100644
--- a/drivers/net/ethernet/huawei/hinic3/hw/hinic3_nictool.h
+++ b/drivers/net/ethernet/huawei/hinic3/hw/hinic3_nictool.h
@@ -15,6 +15,12 @@
#define MAX_CARD_NUM (64)
+enum mac_speed_status {
+ STOP_STATUS,
+ RUN_STATUS,
+ READY_STATUS,
+};
+
int nictool_k_init(void *hwdev, void *chip_node);
void nictool_k_uninit(void *hwdev, void *chip_node);
diff --git a/drivers/net/ethernet/huawei/hinic3/include/bond/bond_common_defs.h b/drivers/net/ethernet/huawei/hinic3/include/bond/bond_common_defs.h
index 01ab739..8e3438b 100644
--- a/drivers/net/ethernet/huawei/hinic3/include/bond/bond_common_defs.h
+++ b/drivers/net/ethernet/huawei/hinic3/include/bond/bond_common_defs.h
@@ -14,6 +14,17 @@
(((_id) >= BOND_FIRST_ID) && ((_id) <= BOND_MAX_ID))
#define BOND_ID_IS_INVALID(_id) (!(BOND_ID_IS_VALID(_id)))
+#define MAX_FUNC_NUM (1024)
+#define U32_BITS_NUM 32
+#define FUNC_OFFLOAD_BITMAP_LEN (MAX_FUNC_NUM / U32_BITS_NUM)
+
+#define ARRAY_BITMAP_SET(bm, bit) \
+ ((bm)[(bit) / U32_BITS_NUM] |= (1LU << ((bit) % U32_BITS_NUM)))
+#define ARRAY_BITMAP_CLR(bm, bit) \
+ ((bm)[(bit) / U32_BITS_NUM] &= ~(1LU << ((bit) % U32_BITS_NUM)))
+#define ARRAY_BITMAP_JUDGE(bm, bit) \
+ ((bm)[(bit) / U32_BITS_NUM] & (1LU << ((bit) % U32_BITS_NUM)))
+
enum bond_group_id {
BOND_FIRST_ID = 1,
BOND_MAX_ID = 4,
@@ -70,4 +81,9 @@ struct tag_bond_get {
struct tag_bond_port_attr attr[BOND_PORT_MAX_NUM];
};
+#define TX_BIFUR_EN(bifur_en, bond_mode) \
+ (((bifur_en) != 0) && \
+ (((bond_mode) == OVS_BOND_MODE_BALANCE) || \
+ ((bond_mode)) == OVS_BOND_MODE_LACP))
+
#endif /** BOND_COMMON_DEFS_H */
diff --git a/drivers/net/ethernet/huawei/hinic3/include/cfg_mgmt/cfg_mgmt_mpu_cmd_defs.h b/drivers/net/ethernet/huawei/hinic3/include/cfg_mgmt/cfg_mgmt_mpu_cmd_defs.h
index f9737ea..1f89662 100644
--- a/drivers/net/ethernet/huawei/hinic3/include/cfg_mgmt/cfg_mgmt_mpu_cmd_defs.h
+++ b/drivers/net/ethernet/huawei/hinic3/include/cfg_mgmt/cfg_mgmt_mpu_cmd_defs.h
@@ -22,6 +22,7 @@ enum servic_bit_define {
SERVICE_BIT_MIGRATE = 12,
SERVICE_BIT_VROCE = 13,
SERVICE_BIT_BIFUR = 14,
+ SERVICE_BIT_TXBOND = 15,
SERVICE_BIT_MAX
};
@@ -40,6 +41,7 @@ enum servic_bit_define {
#define CFG_SERVICE_MASK_MIGRATE (0x1 << SERVICE_BIT_MIGRATE)
#define CFG_SERVICE_MASK_VROCE (0x1 << SERVICE_BIT_VROCE)
#define CFG_SERVICE_MASK_BIFUR (0x1 << SERVICE_BIT_BIFUR)
+#define CFG_SERVICE_MASK_TXBOND (0x1 << SERVICE_BIT_TXBOND)
/* Definition of the scenario ID in the cfg_data, which is used for SML memory allocation. */
enum scenes_id_define {
diff --git a/drivers/net/ethernet/huawei/hinic3/include/hinic3_lld.h b/drivers/net/ethernet/huawei/hinic3/include/hinic3_lld.h
index e36ba1d..e077fcd 100644
--- a/drivers/net/ethernet/huawei/hinic3/include/hinic3_lld.h
+++ b/drivers/net/ethernet/huawei/hinic3/include/hinic3_lld.h
@@ -147,6 +147,13 @@ struct hinic3_lld_dev *hinic3_get_ppf_lld_dev(struct hinic3_lld_dev *lld_dev);
**/
struct hinic3_lld_dev *hinic3_get_ppf_lld_dev_unsafe(struct hinic3_lld_dev *lld_dev);
+/**
+ * @brief hinic3_get_chip_node_by_lld -
+ * get chip node device by current function's lld device
+ * @param lld_dev: current function's lld device
+ **/
+struct card_node *hinic3_get_chip_node_by_lld(struct hinic3_lld_dev *lld_dev);
+
/**
* @brief uld_dev_hold - get reference to uld_dev
* @param lld_dev: lld device
diff --git a/drivers/net/ethernet/huawei/hinic3/include/mpu/mag_mpu_cmd.h b/drivers/net/ethernet/huawei/hinic3/include/mpu/mag_mpu_cmd.h
index 4cd6f94..1e68d3c 100644
--- a/drivers/net/ethernet/huawei/hinic3/include/mpu/mag_mpu_cmd.h
+++ b/drivers/net/ethernet/huawei/hinic3/include/mpu/mag_mpu_cmd.h
@@ -60,6 +60,10 @@ enum mag_cmd {
MAG_CMD_GET_PCS_ERR_CNT = 154, /* pcs err count @see struct mag_cmd_event_port_info */
MAG_CMD_GET_MAG_CNT = 155, /* fec code count @see struct mag_cmd_get_mag_cnt */
MAG_CMD_DUMP_ANTRAIN_INFO = 156, /* dump anlt info @see mag_cmd_dump_antrain_info */
+ /* < rsfec code count @see struct mag_cmd_get_rsfec_cnt */
+ MAG_CMD_GET_RSFEC_CNT = 157,
+ /* < get speed info @see struct mag_cmd_get_port_speed_info */
+ MAG_CMD_GET_PORT_SPEED = 158,
/* patch reserve cmd */
MAG_CMD_PATCH_RSVD_0 = 200,
diff --git a/drivers/net/ethernet/huawei/hinic3/include/mpu/mpu_inband_cmd.h b/drivers/net/ethernet/huawei/hinic3/include/mpu/mpu_inband_cmd.h
index 3841bb5..053334e 100644
--- a/drivers/net/ethernet/huawei/hinic3/include/mpu/mpu_inband_cmd.h
+++ b/drivers/net/ethernet/huawei/hinic3/include/mpu/mpu_inband_cmd.h
@@ -53,6 +53,12 @@ enum hinic3_mgmt_cmd {
COMM_MGMT_CMD_GET_SDI_INFO, /**< get sdi info @see comm_cmd_sdi_info */
COMM_MGMT_CMD_ROOT_CTX_LOAD, /* get root context info @see comm_cmd_root_ctx_load_req_s */
COMM_MGMT_CMD_GET_HW_BOND, /**< get bond info @see comm_cmd_hw_bond_infos */
+ /**< save mpu and npu version @see mpu_and_npu_version_s */
+ COMM_MGMT_CMD_MPU_AND_NPU_VER,
+ /**< get pf id by func id, which includes vf_id and pf_id */
+ COMM_MGMT_CMD_GET_PF_BY_FUNC,
+ /**< pf bus info @see struct cmd_get_pf_bus_info_s */
+ COMM_MGMT_CMD_GET_PF_BUS_BY_DEV,
COMM_MGMT_CMD_UPDATE_FW = 80, /* update firmware @see cmd_update_fw @see comm_info_head */
COMM_MGMT_CMD_ACTIVE_FW, /**< cold active firmware @see cmd_active_firmware */
diff --git a/drivers/net/ethernet/huawei/hinic3/include/mpu/mpu_inband_cmd_defs.h b/drivers/net/ethernet/huawei/hinic3/include/mpu/mpu_inband_cmd_defs.h
index 698730f..c2eb255 100644
--- a/drivers/net/ethernet/huawei/hinic3/include/mpu/mpu_inband_cmd_defs.h
+++ b/drivers/net/ethernet/huawei/hinic3/include/mpu/mpu_inband_cmd_defs.h
@@ -362,7 +362,7 @@ struct hinic3_board_info {
u8 board_id; /**< board id */
u32 rsvd;
u32 service_en_bitmap; /**< service en bitmap */
- u8 scenes_id; /**< scenes id */
+ u8 scenes_id; /**< scene id */
u8 cfg_template_id; /**< cfg template index */
u8 hardware_id; /**< hardware id */
u8 spu_en; /**< spu enable flag */
@@ -420,6 +420,14 @@ struct comm_cmd_hw_pf_infos {
struct hinic3_hw_pf_infos infos; /**< all pf info @see struct hinic3_hw_pf_infos */
};
+struct comm_cmd_get_pf_by_func {
+ struct mgmt_msg_head head;
+
+ u16 func_id;
+ u8 pf_id;
+ u8 rsvd1;
+};
+
struct comm_cmd_bdf_info {
struct mgmt_msg_head head;
@@ -809,6 +817,12 @@ struct cmd_get_bdf_info_s {
u32 vf_num; /**< vf num */
};
+struct cmd_get_pf_bus_info_s {
+ struct mgmt_msg_head head;
+ u8 bus_num;
+ u8 rsv[3];
+};
+
#define CPI_TCAM_DBG_CMD_SET_TASK_ENABLE_VALID 0x1
#define CPI_TCAM_DBG_CMD_SET_TIME_INTERVAL_VALID 0x2
#define CPI_TCAM_DBG_CMD_TYPE_SET 0
diff --git a/drivers/net/ethernet/huawei/hinic3/include/mpu/nic_cfg_comm.h b/drivers/net/ethernet/huawei/hinic3/include/mpu/nic_cfg_comm.h
index 5b2bdc8..0e40417 100644
--- a/drivers/net/ethernet/huawei/hinic3/include/mpu/nic_cfg_comm.h
+++ b/drivers/net/ethernet/huawei/hinic3/include/mpu/nic_cfg_comm.h
@@ -25,6 +25,12 @@ enum nic_rss_hash_type {
NIC_RSS_HASH_TYPE_MAX /* MUST BE THE LAST ONE */
};
+enum hinic3_nic_capture_packet_mode {
+ ROCE_CAPTURE_PKT_MODE = 0,
+ NIC_CAPTURE_PKT_MODE,
+ CAPTURE_PKT_MAX
+};
+
#define NIC_RSS_INDIR_SIZE 256
#define NIC_RSS_KEY_SIZE 40
diff --git a/drivers/net/ethernet/huawei/hinic3/mag_mpu_cmd_defs.h b/drivers/net/ethernet/huawei/hinic3/mag_mpu_cmd_defs.h
index c8533e5..2807bf9 100644
--- a/drivers/net/ethernet/huawei/hinic3/mag_mpu_cmd_defs.h
+++ b/drivers/net/ethernet/huawei/hinic3/mag_mpu_cmd_defs.h
@@ -5,6 +5,7 @@
#define MAG_MPU_CMD_DEFS_H
#include "mpu_cmd_base_defs.h"
+#include "bond_common_defs.h"
/* serdes cmd struct define */
#define CMD_ARRAY_BUF_SIZE 64
@@ -480,6 +481,33 @@ enum mag_wire_type {
MAG_CMD_WIRE_TYPE_BACKPLANE = 0x42
};
+#define MAX_NUM_OF_PATH_ULOG 128
+struct mag_cmd_monitor_mac_speed {
+ struct mgmt_msg_head head;
+
+ u32 time;
+ u32 cpu_id;
+ u8 direction;
+ u8 number;
+ u8 status;
+ u8 log_file[MAX_NUM_OF_PATH_ULOG];
+ u8 rsvd;
+};
+
+#define ETH_ALEN 6
+struct mag_cmd_bond_default_offload {
+ struct mgmt_msg_head head;
+
+ u16 func_id;
+ u16 vf_num;
+ u16 bond_id;
+ u8 enable;
+ u8 slaves;
+ u8 mac[ETH_ALEN];
+ u8 is_offload;
+ u8 sync_flag;
+};
+
struct mag_cmd_get_xsfp_info {
struct mgmt_msg_head head;
@@ -683,7 +711,7 @@ struct mag_cmd_event_port_info {
};
struct mag_cmd_rsfec_stats {
- u32 rx_err_lane_phy;
+ u64 rx_err_lane_phy;
};
struct mag_cmd_port_stats {
@@ -868,6 +896,31 @@ struct mag_port_stats {
u64 rx_unfilter_pkts_port;
};
+struct mag_port_speed {
+ u64 time_stamp;
+ u64 mac_total_octs_num;
+};
+
+struct mag_speed_info {
+ u8 direction;
+ u8 length;
+ u8 rsvd0[2];
+};
+
+struct mag_cmd_port_speed_info {
+ struct mgmt_msg_head head;
+
+ u8 port_id;
+ struct mag_speed_info info;
+ u8 rsvd0[3];
+};
+
+struct mag_cmd_get_port_speed {
+ struct mgmt_msg_head head;
+
+ struct mag_port_speed *speed;
+};
+
struct mag_cmd_port_stats_info {
struct mgmt_msg_head head;
@@ -901,6 +954,16 @@ struct mag_cmd_get_mag_cnt {
u32 mag_csr[128];
};
+struct mag_cmd_get_rsfec_cnt {
+ struct mgmt_msg_head head;
+
+ u8 port_id;
+ u8 len;
+ u8 rsvd0[2];
+
+ u64 rx_err_lane;
+};
+
struct mag_cmd_dump_antrain_info {
struct mgmt_msg_head head;
diff --git a/drivers/net/ethernet/huawei/hinic3/nic_mpu_cmd.h b/drivers/net/ethernet/huawei/hinic3/nic_mpu_cmd.h
index b0114a0..e54f9f7 100644
--- a/drivers/net/ethernet/huawei/hinic3/nic_mpu_cmd.h
+++ b/drivers/net/ethernet/huawei/hinic3/nic_mpu_cmd.h
@@ -35,8 +35,6 @@ enum hinic3_nic_cmd {
HINIC3_NIC_CMD_CACHE_OUT_QP_RES,
HINIC3_NIC_CMD_SET_FUNC_ER_FWD_ID,
- HINIC3_NIC_CMD_SET_RQ_CI_CTX,
-
/* MAC & VLAN CFG & VXLAN CFG */
HINIC3_NIC_CMD_GET_MAC = 20,
HINIC3_NIC_CMD_SET_MAC,
@@ -53,13 +51,20 @@ enum hinic3_nic_cmd {
HINIC3_NIC_CMD_RX_RATE_CFG,
HINIC3_NIC_CMD_WR_ORDERING_CFG,
-
+ /* Bond mac address sync to function @see > cmd_bond_mac_sync */
+ HINIC3_NIC_CMD_MAC_SYNC,
+ HINIC3_NIC_CMD_SET_RQ_CI_CTX,
+ HINIC3_NIC_CMD_SET_RQ_ENABLE,
/* SR-IOV */
HINIC3_NIC_CMD_CFG_VF_VLAN = 40,
HINIC3_NIC_CMD_SET_SPOOPCHK_STATE,
/* RATE LIMIT */
HINIC3_NIC_CMD_SET_MAX_MIN_RATE,
+ /** CQE COALESCE CFG */
+ HINIC3_NIC_CMD_CFG_CQE_COALESCE_OFFLOAD,
+ HINIC3_NIC_CMD_CFG_CQE_COALESCE_OFFLOAD_TIMER,
+
/* RSS CFG */
HINIC3_NIC_CMD_RSS_CFG = 60,
HINIC3_NIC_CMD_RSS_TEMP_MGR, /* TODO: delete after implement nego cmd */
@@ -108,6 +113,7 @@ enum hinic3_nic_cmd {
HINIC3_NIC_CMD_QOS_MAP_CFG,
HINIC3_NIC_CMD_FORCE_PKT_DROP,
HINIC3_NIC_CMD_CFG_TX_PROMISC_SKIP = 114,
+ HINIC3_NIC_CMD_GET_CIR_DROP,
HINIC3_NIC_CMD_SET_PORT_FLOW_BIFUR_ENABLE = 117,
HINIC3_NIC_CMD_TX_PAUSE_EXCP_NOTICE = 118,
HINIC3_NIC_CMD_INQUIRT_PAUSE_CFG = 119,
@@ -130,6 +136,7 @@ enum hinic3_nic_cmd {
HINIC3_NIC_CMD_SET_UCAPTURE_OPT = 160, /* TODO: move to roce */
HINIC3_NIC_CMD_SET_VHD_CFG,
+ HINIC3_NIC_CMD_GET_UCAPTURE_INFO, /**< Get capture packet enable info */
/* OUT OF BAND */
HINIC3_NIC_CMD_GET_OUTBAND_CFG = 170, /* Get outband vlan cfg info */
@@ -171,6 +178,14 @@ enum hinic3_nic_cmd {
HINIC3_NIC_CMD_GET_RQ_INFO = 241,
+ /** LRO CFG */
+ /* < Set/Get LRO cfg @see > mpu_nic_cmd_lro_cfg */
+ HINIC3_NIC_CMD_LRO_CFG,
+
+ /* VF_LAG */
+ HINIC3_NIC_CMD_CFG_VF_LAG,
+ HINIC3_NIC_CMD_VF_LAG_SYNC_BOND_STATE,
+
HINIC3_NIC_CMD_MAX = 256,
};
diff --git a/drivers/net/ethernet/huawei/hinic3/nic_mpu_cmd_defs.h b/drivers/net/ethernet/huawei/hinic3/nic_mpu_cmd_defs.h
index 5c28573..4c837de 100644
--- a/drivers/net/ethernet/huawei/hinic3/nic_mpu_cmd_defs.h
+++ b/drivers/net/ethernet/huawei/hinic3/nic_mpu_cmd_defs.h
@@ -53,6 +53,12 @@ enum nic_feature_cap {
};
#define NIC_F_ALL_MASK 0x7FBFFFF /* 使能所有属性 */
+#define FLOW_BIFUR_CMD_SET 0
+#define FLOW_BIFUR_CMD_GET 1
+#define VF_LAG_VF_NUM_PER_GROUP 32
+#define VF_LAG_VF_NUM_GROUP_NUM 128
+#define MAX_VF_ID 4096
+#define VF_LAG_BOND_MIN_SLAVE_NUM 2
struct hinic3_mgmt_msg_head {
u8 status;
@@ -60,6 +66,25 @@ struct hinic3_mgmt_msg_head {
u8 rsvd0[6];
};
+struct mpu_vf_lag_bitmap {
+ u32 vf_bit_map[VF_LAG_VF_NUM_GROUP_NUM];
+};
+
+struct mpu_vf_lag_bitmap *get_g_vf_lag_bitmap(void);
+
+struct hinic3_vf_lag_cmd {
+ struct hinic3_mgmt_msg_head msg_head;
+
+ u16 func_id;
+ u8 opcode; /* 0 -> set, 1 -> get */
+ u8 en_flag; /* 0 -> disable, 1 -> enable */
+ u8 bond_active_num;
+ u8 bond_active_bitmap;
+ u8 mac_sync_flag;
+ u8 rsvd;
+ struct mpu_vf_lag_bitmap vf_lag_bitmap;
+};
+
#define NIC_MAX_FEATURE_QWORD 4
struct hinic3_cmd_feature_nego {
struct hinic3_mgmt_msg_head msg_head;
@@ -151,6 +176,10 @@ struct hinic3_port_stats_info {
u16 rsvd1;
};
+struct hinic3_cir_drop {
+ u64 rx_discard_phy;
+};
+
struct hinic3_vport_stats {
u64 tx_unicast_pkts_vport;
u64 tx_unicast_bytes_vport;
@@ -215,6 +244,14 @@ struct hinic3_port_stats {
struct hinic3_phy_fpga_port_stats stats;
};
+#define HINIC3_CMD_MAX_DP_DATA_NUM 50
+struct hinic3_cmd_get_dp_info_resp {
+ struct hinic3_mgmt_msg_head head;
+ u16 length;
+ u16 rsv;
+ u64 value[HINIC3_CMD_MAX_DP_DATA_NUM];
+};
+
struct hinic3_cmd_vport_stats {
struct hinic3_mgmt_msg_head msg_head;
@@ -321,6 +358,8 @@ struct hinic3_rq_cqe_ctx {
};
#define DFX_SM_TBL_BUF_MAX (768)
+#define MAC_SHADOW_TBL_8_4_SIZE 12
+#define VF_LAG_TABLE_ARG_NUM 64
struct nic_cmd_dfx_sm_table {
struct hinic3_mgmt_msg_head msg_head;
@@ -340,7 +379,7 @@ struct hinic3_cmd_vlan_offload {
/* ucode capture cfg info */
struct nic_cmd_capture_info {
struct hinic3_mgmt_msg_head msg_head;
- u32 op_type;
+ u32 op_type; /* 0 -- roce, 1 -- nic */
u32 func_port;
u32 is_en_trx;
u32 offset_cos;
@@ -376,6 +415,28 @@ struct hinic3_cmd_local_lro_state {
u8 state; /* 0: disable, 1: enable */
};
+/* lro_cfg data_type */
+#define LRO_OP_SET 1
+#define LRO_OP_GET 0
+
+enum {
+ NIC_SOFT_LRO_DISABLE = 0,
+ NIC_HW_LRO_MAX_LEN,
+ NIC_HW_LRO_MAX_NUM,
+ NIC_HW_LRO_TIMEOUT,
+ NIC_LRO_CFG_MAX
+};
+
+struct hinic3_cmd_lro_cfg {
+ struct hinic3_mgmt_msg_head msg_head;
+
+ u16 func_id;
+ u8 data;
+ u8 data_type;
+ u8 opcode; /* 0: get state, 1: set state */
+ u8 rsvd1[3];
+};
+
struct hinic3_cmd_gtp_inner_parse_status {
struct hinic3_mgmt_msg_head msg_head;
@@ -384,6 +445,29 @@ struct hinic3_cmd_gtp_inner_parse_status {
u8 status; /* 0: disable, 1: enable */
};
+#define HINIC3_CMD_TYPE_STATE 0
+#define HINIC3_CMD_TYPE_NUM 1
+
+struct hinic3_cmd_cqe_coalesce_offload {
+ struct hinic3_mgmt_msg_head msg_head;
+
+ u16 func_id;
+ u8 opcode; /* 0: get state, 1: set state */
+ u8 optype; /* 0: state, 1: max_num */
+ u8 state; /* 0: disable, 1: enable */
+ u8 max_num;
+ u8 rsvd[2];
+};
+
+struct hinic3_cmd_cqe_coalesce_timer {
+ struct hinic3_mgmt_msg_head msg_head;
+
+ u8 opcode; /* 1: set timer value, 0: get timer value */
+ u8 rsvd1;
+ u16 rsvd2;
+ u32 timer;
+};
+
struct hinic3_cmd_vf_vlan_config {
struct hinic3_mgmt_msg_head msg_head;
--
2.43.0
2
1
Zicheng Qu (11):
xsched: unify log prefix format and remove duplicated prefix macros
xsched: enforce valid xsched scheduler config dependencies
xsched: add missing spin_unlock() in xcu_move_task() error path
xsched: avoid sleeping while holding spinlock in xcu_move_task()
xsched: remove parent->lock and rely on cgroup_mutex for shares update
xsched: unify root detection logic for cgroups
xsched: replace hard-coded numeric values
xsched: modify the logic for inc and dec the count value
xsched: add null check for sched in xsched_xse_set_class
xsched: rename vstream->id to vstream->sq_id
xsched: move stream_lock into xsched_vsm_add_tail() to avoid sleeping
in atomic context
arch/arm64/configs/openeuler_defconfig | 3 +-
arch/x86/configs/openeuler_defconfig | 3 +-
drivers/xcu/xcu_group.c | 2 +
include/linux/vstream.h | 2 +-
include/linux/xsched.h | 76 ++++++++---------------
kernel/xsched/Kconfig | 5 +-
kernel/xsched/cgroup.c | 79 ++++++++++--------------
kernel/xsched/core.c | 83 ++++++++++++++------------
kernel/xsched/vstream.c | 29 +++------
9 files changed, 119 insertions(+), 163 deletions(-)
--
2.34.1
2
12
This patch set fix CVE-2024-53179
Paulo Alcantara (1):
smb: client: fix use-after-free of signing key
Shyam Prasad N (1):
cifs: missed ref-counting smb session in find
fs/cifs/smb2proto.h | 2 --
fs/cifs/smb2transport.c | 59 ++++++++++++++++++++++++++++++-----------
2 files changed, 44 insertions(+), 17 deletions(-)
--
2.39.2
2
3
[PATCH OLK-6.6] netfilter: nft_objref: validate objref and objrefmap expressions
by Dong Chenchen 25 Nov '25
by Dong Chenchen 25 Nov '25
25 Nov '25
From: Fernando Fernandez Mancera <fmancera(a)suse.de>
stable inclusion
from stable-v6.6.113
commit 0028e0134c64d9ed21728341a74fcfc59cd0f944
category: bugfix
bugzilla: https://gitee.com/src-openeuler/kernel/issues/ID6BWD
CVE: CVE-2025-40206
Reference: https://git.kernel.org/pub/scm/linux/kernel/git/stable/linux.git/commit/?id…
--------------------------------
[ Upstream commit f359b809d54c6e3dd1d039b97e0b68390b0e53e4 ]
Referencing a synproxy stateful object from OUTPUT hook causes kernel
crash due to infinite recursive calls:
BUG: TASK stack guard page was hit at 000000008bda5b8c (stack is 000000003ab1c4a5..00000000494d8b12)
[...]
Call Trace:
__find_rr_leaf+0x99/0x230
fib6_table_lookup+0x13b/0x2d0
ip6_pol_route+0xa4/0x400
fib6_rule_lookup+0x156/0x240
ip6_route_output_flags+0xc6/0x150
__nf_ip6_route+0x23/0x50
synproxy_send_tcp_ipv6+0x106/0x200
synproxy_send_client_synack_ipv6+0x1aa/0x1f0
nft_synproxy_do_eval+0x263/0x310
nft_do_chain+0x5a8/0x5f0 [nf_tables
nft_do_chain_inet+0x98/0x110
nf_hook_slow+0x43/0xc0
__ip6_local_out+0xf0/0x170
ip6_local_out+0x17/0x70
synproxy_send_tcp_ipv6+0x1a2/0x200
synproxy_send_client_synack_ipv6+0x1aa/0x1f0
[...]
Implement objref and objrefmap expression validate functions.
Currently, only NFT_OBJECT_SYNPROXY object type requires validation.
This will also handle a jump to a chain using a synproxy object from the
OUTPUT hook.
Now when trying to reference a synproxy object in the OUTPUT hook, nft
will produce the following error:
synproxy_crash.nft: Error: Could not process rule: Operation not supported
synproxy name mysynproxy
^^^^^^^^^^^^^^^^^^^^^^^^
Fixes: ee394f96ad75 ("netfilter: nft_synproxy: add synproxy stateful object support")
Reported-by: Georg Pfuetzenreuter <georg.pfuetzenreuter(a)suse.com>
Closes: https://bugzilla.suse.com/1250237
Signed-off-by: Fernando Fernandez Mancera <fmancera(a)suse.de>
Reviewed-by: Pablo Neira Ayuso <pablo(a)netfilter.org>
Signed-off-by: Florian Westphal <fw(a)strlen.de>
Signed-off-by: Sasha Levin <sashal(a)kernel.org>
Conflicts:
net/netfilter/nft_objref.c
[commit eaf9b2c875ec is not backport]
Signed-off-by: Dong Chenchen <dongchenchen2(a)huawei.com>
---
net/netfilter/nft_objref.c | 41 ++++++++++++++++++++++++++++++++++++++
1 file changed, 41 insertions(+)
diff --git a/net/netfilter/nft_objref.c b/net/netfilter/nft_objref.c
index 509011b1ef59..2e024f4dd603 100644
--- a/net/netfilter/nft_objref.c
+++ b/net/netfilter/nft_objref.c
@@ -22,6 +22,36 @@ void nft_objref_eval(const struct nft_expr *expr,
obj->ops->eval(obj, regs, pkt);
}
+static int nft_objref_validate_obj_type(const struct nft_ctx *ctx, u32 type)
+{
+ unsigned int hooks;
+
+ switch (type) {
+ case NFT_OBJECT_SYNPROXY:
+ if (ctx->family != NFPROTO_IPV4 &&
+ ctx->family != NFPROTO_IPV6 &&
+ ctx->family != NFPROTO_INET)
+ return -EOPNOTSUPP;
+
+ hooks = (1 << NF_INET_LOCAL_IN) | (1 << NF_INET_FORWARD);
+
+ return nft_chain_validate_hooks(ctx->chain, hooks);
+ default:
+ break;
+ }
+
+ return 0;
+}
+
+static int nft_objref_validate(const struct nft_ctx *ctx,
+ const struct nft_expr *expr,
+ const struct nft_data **data)
+{
+ struct nft_object *obj = nft_objref_priv(expr);
+
+ return nft_objref_validate_obj_type(ctx, obj->ops->type->type);
+}
+
static int nft_objref_init(const struct nft_ctx *ctx,
const struct nft_expr *expr,
const struct nlattr * const tb[])
@@ -93,6 +123,7 @@ static const struct nft_expr_ops nft_objref_ops = {
.activate = nft_objref_activate,
.deactivate = nft_objref_deactivate,
.dump = nft_objref_dump,
+ .validate = nft_objref_validate,
.reduce = NFT_REDUCE_READONLY,
};
@@ -198,6 +229,15 @@ static void nft_objref_map_destroy(const struct nft_ctx *ctx,
nf_tables_destroy_set(ctx, priv->set);
}
+static int nft_objref_map_validate(const struct nft_ctx *ctx,
+ const struct nft_expr *expr,
+ const struct nft_data **data)
+{
+ const struct nft_objref_map *priv = nft_expr_priv(expr);
+
+ return nft_objref_validate_obj_type(ctx, priv->set->objtype);
+}
+
static const struct nft_expr_ops nft_objref_map_ops = {
.type = &nft_objref_type,
.size = NFT_EXPR_SIZE(sizeof(struct nft_objref_map)),
@@ -207,6 +247,7 @@ static const struct nft_expr_ops nft_objref_map_ops = {
.deactivate = nft_objref_map_deactivate,
.destroy = nft_objref_map_destroy,
.dump = nft_objref_map_dump,
+ .validate = nft_objref_map_validate,
.reduce = NFT_REDUCE_READONLY,
};
--
2.25.1
2
1
25 Nov '25
From: Dmitry Safonov <dima(a)arista.com>
stable inclusion
from stable-v6.6.114
commit 48294a67863c9cfa367abb66bbf0ef6548ae124f
category: bugfix
bugzilla: https://gitee.com/src-openeuler/kernel/issues/ID6BBR
CVE: CVE-2025-40173
Reference: https://git.kernel.org/pub/scm/linux/kernel/git/stable/linux.git/commit/?id…
--------------------------------
[ Upstream commit 21f4d45eba0b2dcae5dbc9e5e0ad08735c993f16 ]
Similarly to ipv4 tunnel, ipv6 version updates dev->needed_headroom, too.
While ipv4 tunnel headroom adjustment growth was limited in
commit 5ae1e9922bbd ("net: ip_tunnel: prevent perpetual headroom growth"),
ipv6 tunnel yet increases the headroom without any ceiling.
Reflect ipv4 tunnel headroom adjustment limit on ipv6 version.
Credits to Francesco Ruggeri, who was originally debugging this issue
and wrote local Arista-specific patch and a reproducer.
Fixes: 8eb30be0352d ("ipv6: Create ip6_tnl_xmit")
Cc: Florian Westphal <fw(a)strlen.de>
Cc: Francesco Ruggeri <fruggeri05(a)gmail.com>
Signed-off-by: Dmitry Safonov <dima(a)arista.com>
Link: https://patch.msgid.link/20251009-ip6_tunnel-headroom-v2-1-8e4dbd8f7e35@ari…
Signed-off-by: Jakub Kicinski <kuba(a)kernel.org>
Signed-off-by: Sasha Levin <sashal(a)kernel.org>
Signed-off-by: Dong Chenchen <dongchenchen2(a)huawei.com>
---
include/net/ip_tunnels.h | 15 +++++++++++++++
net/ipv4/ip_tunnel.c | 14 --------------
net/ipv6/ip6_tunnel.c | 3 +--
3 files changed, 16 insertions(+), 16 deletions(-)
diff --git a/include/net/ip_tunnels.h b/include/net/ip_tunnels.h
index 006a61ddd36f..3d36794cb189 100644
--- a/include/net/ip_tunnels.h
+++ b/include/net/ip_tunnels.h
@@ -489,6 +489,21 @@ struct metadata_dst *iptunnel_metadata_reply(struct metadata_dst *md,
int skb_tunnel_check_pmtu(struct sk_buff *skb, struct dst_entry *encap_dst,
int headroom, bool reply);
+static inline void ip_tunnel_adj_headroom(struct net_device *dev,
+ unsigned int headroom)
+{
+ /* we must cap headroom to some upperlimit, else pskb_expand_head
+ * will overflow header offsets in skb_headers_offset_update().
+ */
+ const unsigned int max_allowed = 512;
+
+ if (headroom > max_allowed)
+ headroom = max_allowed;
+
+ if (headroom > READ_ONCE(dev->needed_headroom))
+ WRITE_ONCE(dev->needed_headroom, headroom);
+}
+
int iptunnel_handle_offloads(struct sk_buff *skb, int gso_type_mask);
static inline int iptunnel_pull_offloads(struct sk_buff *skb)
diff --git a/net/ipv4/ip_tunnel.c b/net/ipv4/ip_tunnel.c
index b5d64cd3ab0a..090403c8cc6c 100644
--- a/net/ipv4/ip_tunnel.c
+++ b/net/ipv4/ip_tunnel.c
@@ -567,20 +567,6 @@ static int tnl_update_pmtu(struct net_device *dev, struct sk_buff *skb,
return 0;
}
-static void ip_tunnel_adj_headroom(struct net_device *dev, unsigned int headroom)
-{
- /* we must cap headroom to some upperlimit, else pskb_expand_head
- * will overflow header offsets in skb_headers_offset_update().
- */
- static const unsigned int max_allowed = 512;
-
- if (headroom > max_allowed)
- headroom = max_allowed;
-
- if (headroom > READ_ONCE(dev->needed_headroom))
- WRITE_ONCE(dev->needed_headroom, headroom);
-}
-
void ip_md_tunnel_xmit(struct sk_buff *skb, struct net_device *dev,
u8 proto, int tunnel_hlen)
{
diff --git a/net/ipv6/ip6_tunnel.c b/net/ipv6/ip6_tunnel.c
index 97905d4174ec..c70ff45649ad 100644
--- a/net/ipv6/ip6_tunnel.c
+++ b/net/ipv6/ip6_tunnel.c
@@ -1255,8 +1255,7 @@ int ip6_tnl_xmit(struct sk_buff *skb, struct net_device *dev, __u8 dsfield,
*/
max_headroom = LL_RESERVED_SPACE(dst->dev) + sizeof(struct ipv6hdr)
+ dst->header_len + t->hlen;
- if (max_headroom > READ_ONCE(dev->needed_headroom))
- WRITE_ONCE(dev->needed_headroom, max_headroom);
+ ip_tunnel_adj_headroom(dev, max_headroom);
err = ip6_tnl_encap(skb, t, &proto, fl6);
if (err)
--
2.25.1
2
1
[PATCH OLK-6.6] tls: wait for pending async decryptions if tls_strp_msg_hold fails
by Dong Chenchen 25 Nov '25
by Dong Chenchen 25 Nov '25
25 Nov '25
From: Sabrina Dubroca <sd(a)queasysnail.net>
stable inclusion
from stable-v6.6.114
commit c61d4368197d65c4809d9271f3b85325a600586a
category: bugfix
bugzilla: https://gitee.com/src-openeuler/kernel/issues/ID6BBY
CVE: CVE-2025-40176
Reference: https://git.kernel.org/pub/scm/linux/kernel/git/stable/linux.git/commit/?id…
--------------------------------
[ Upstream commit b8a6ff84abbcbbc445463de58704686011edc8e1 ]
Async decryption calls tls_strp_msg_hold to create a clone of the
input skb to hold references to the memory it uses. If we fail to
allocate that clone, proceeding with async decryption can lead to
various issues (UAF on the skb, writing into userspace memory after
the recv() call has returned).
In this case, wait for all pending decryption requests.
Fixes: 84c61fe1a75b ("tls: rx: do not use the standard strparser")
Reported-by: Jann Horn <jannh(a)google.com>
Signed-off-by: Sabrina Dubroca <sd(a)queasysnail.net>
Link: https://patch.msgid.link/b9fe61dcc07dab15da9b35cf4c7d86382a98caf2.176043204…
Signed-off-by: Jakub Kicinski <kuba(a)kernel.org>
Signed-off-by: Sasha Levin <sashal(a)kernel.org>
Signed-off-by: Dong Chenchen <dongchenchen2(a)huawei.com>
---
net/tls/tls_sw.c | 6 ++++--
1 file changed, 4 insertions(+), 2 deletions(-)
diff --git a/net/tls/tls_sw.c b/net/tls/tls_sw.c
index bf445a518883..d7c1b7c1b1db 100644
--- a/net/tls/tls_sw.c
+++ b/net/tls/tls_sw.c
@@ -1612,8 +1612,10 @@ static int tls_decrypt_sg(struct sock *sk, struct iov_iter *out_iov,
if (unlikely(darg->async)) {
err = tls_strp_msg_hold(&ctx->strp, &ctx->async_hold);
- if (err)
- __skb_queue_tail(&ctx->async_hold, darg->skb);
+ if (err) {
+ err = tls_decrypt_async_wait(ctx);
+ darg->async = false;
+ }
return err;
}
--
2.25.1
2
1
From: Phillip Lougher <phillip(a)squashfs.org.uk>
mainline inclusion
from mainline-v6.17-rc4
commit 74058c0a9fc8b2b4d5f4a0ef7ee2cfa66a9e49cf
category: bugfix
bugzilla: https://gitee.com/src-openeuler/kernel/issues/ID3WGW
CVE: CVE-2025-40049
Reference: https://git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git/commit/?…
--------------------------------
Syzkaller reports a "KMSAN: uninit-value in squashfs_get_parent" bug.
This is caused by open_by_handle_at() being called with a file handle
containing an invalid parent inode number. In particular the inode number
is that of a symbolic link, rather than a directory.
Squashfs_get_parent() gets called with that symbolic link inode, and
accesses the parent member field.
unsigned int parent_ino = squashfs_i(inode)->parent;
Because non-directory inodes in Squashfs do not have a parent value, this
is uninitialised, and this causes an uninitialised value access.
The fix is to initialise parent with the invalid inode 0, which will cause
an EINVAL error to be returned.
Regular inodes used to share the parent field with the block_list_start
field. This is removed in this commit to enable the parent field to
contain the invalid inode number 0.
Link: https://lkml.kernel.org/r/20250918233308.293861-1-phillip@squashfs.org.uk
Fixes: 122601408d20 ("Squashfs: export operations")
Signed-off-by: Phillip Lougher <phillip(a)squashfs.org.uk>
Reported-by: syzbot+157bdef5cf596ad0da2c(a)syzkaller.appspotmail.com
Closes: https://lore.kernel.org/all/68cc2431.050a0220.139b6.0001.GAE@google.com/
Cc: <stable(a)vger.kernel.org>
Signed-off-by: Andrew Morton <akpm(a)linux-foundation.org>
Signed-off-by: Long Li <leo.lilong(a)huawei.com>
---
fs/squashfs/inode.c | 7 +++++++
fs/squashfs/squashfs_fs_i.h | 2 +-
2 files changed, 8 insertions(+), 1 deletion(-)
diff --git a/fs/squashfs/inode.c b/fs/squashfs/inode.c
index d5918eba27e3..53104f25de51 100644
--- a/fs/squashfs/inode.c
+++ b/fs/squashfs/inode.c
@@ -165,6 +165,7 @@ int squashfs_read_inode(struct inode *inode, long long ino)
squashfs_i(inode)->start = le32_to_cpu(sqsh_ino->start_block);
squashfs_i(inode)->block_list_start = block;
squashfs_i(inode)->offset = offset;
+ squashfs_i(inode)->parent = 0;
inode->i_data.a_ops = &squashfs_aops;
TRACE("File inode %x:%x, start_block %llx, block_list_start "
@@ -212,6 +213,7 @@ int squashfs_read_inode(struct inode *inode, long long ino)
squashfs_i(inode)->start = le64_to_cpu(sqsh_ino->start_block);
squashfs_i(inode)->block_list_start = block;
squashfs_i(inode)->offset = offset;
+ squashfs_i(inode)->parent = 0;
inode->i_data.a_ops = &squashfs_aops;
TRACE("File inode %x:%x, start_block %llx, block_list_start "
@@ -292,6 +294,7 @@ int squashfs_read_inode(struct inode *inode, long long ino)
inode->i_mode |= S_IFLNK;
squashfs_i(inode)->start = block;
squashfs_i(inode)->offset = offset;
+ squashfs_i(inode)->parent = 0;
if (type == SQUASHFS_LSYMLINK_TYPE) {
__le32 xattr;
@@ -329,6 +332,7 @@ int squashfs_read_inode(struct inode *inode, long long ino)
set_nlink(inode, le32_to_cpu(sqsh_ino->nlink));
rdev = le32_to_cpu(sqsh_ino->rdev);
init_special_inode(inode, inode->i_mode, new_decode_dev(rdev));
+ squashfs_i(inode)->parent = 0;
TRACE("Device inode %x:%x, rdev %x\n",
SQUASHFS_INODE_BLK(ino), offset, rdev);
@@ -353,6 +357,7 @@ int squashfs_read_inode(struct inode *inode, long long ino)
set_nlink(inode, le32_to_cpu(sqsh_ino->nlink));
rdev = le32_to_cpu(sqsh_ino->rdev);
init_special_inode(inode, inode->i_mode, new_decode_dev(rdev));
+ squashfs_i(inode)->parent = 0;
TRACE("Device inode %x:%x, rdev %x\n",
SQUASHFS_INODE_BLK(ino), offset, rdev);
@@ -373,6 +378,7 @@ int squashfs_read_inode(struct inode *inode, long long ino)
inode->i_mode |= S_IFSOCK;
set_nlink(inode, le32_to_cpu(sqsh_ino->nlink));
init_special_inode(inode, inode->i_mode, 0);
+ squashfs_i(inode)->parent = 0;
break;
}
case SQUASHFS_LFIFO_TYPE:
@@ -392,6 +398,7 @@ int squashfs_read_inode(struct inode *inode, long long ino)
inode->i_op = &squashfs_inode_ops;
set_nlink(inode, le32_to_cpu(sqsh_ino->nlink));
init_special_inode(inode, inode->i_mode, 0);
+ squashfs_i(inode)->parent = 0;
break;
}
default:
diff --git a/fs/squashfs/squashfs_fs_i.h b/fs/squashfs/squashfs_fs_i.h
index 2c82d6f2a456..8e497ac07b9a 100644
--- a/fs/squashfs/squashfs_fs_i.h
+++ b/fs/squashfs/squashfs_fs_i.h
@@ -16,6 +16,7 @@ struct squashfs_inode_info {
u64 xattr;
unsigned int xattr_size;
int xattr_count;
+ int parent;
union {
struct {
u64 fragment_block;
@@ -27,7 +28,6 @@ struct squashfs_inode_info {
u64 dir_idx_start;
int dir_idx_offset;
int dir_idx_cnt;
- int parent;
};
};
struct inode vfs_inode;
--
2.39.2
2
1
[PATCH OLK-6.6] media: uvcvideo: Mark invalid entities with id UVC_INVALID_ENTITY_ID
by Long Li 25 Nov '25
by Long Li 25 Nov '25
25 Nov '25
From: Thadeu Lima de Souza Cascardo <cascardo(a)igalia.com>
mainline inclusion
from mainline-v6.17-rc1
commit 0e2ee70291e64a30fe36960c85294726d34a103e
category: bugfix
bugzilla: https://gitee.com/src-openeuler/kernel/issues/ID2QLY
CVE: CVE-2025-40016
Reference: https://git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git/commit/?…
--------------------------------
Per UVC 1.1+ specification 3.7.2, units and terminals must have a non-zero
unique ID.
```
Each Unit and Terminal within the video function is assigned a unique
identification number, the Unit ID (UID) or Terminal ID (TID), contained in
the bUnitID or bTerminalID field of the descriptor. The value 0x00 is
reserved for undefined ID,
```
If we add a new entity with id 0 or a duplicated ID, it will be marked
as UVC_INVALID_ENTITY_ID.
In a previous attempt commit 3dd075fe8ebb ("media: uvcvideo: Require
entities to have a non-zero unique ID"), we ignored all the invalid units,
this broke a lot of non-compatible cameras. Hopefully we are more lucky
this time.
This also prevents some syzkaller reproducers from triggering warnings due
to a chain of entities referring to themselves. In one particular case, an
Output Unit is connected to an Input Unit, both with the same ID of 1. But
when looking up for the source ID of the Output Unit, that same entity is
found instead of the input entity, which leads to such warnings.
In another case, a backward chain was considered finished as the source ID
was 0. Later on, that entity was found, but its pads were not valid.
Here is a sample stack trace for one of those cases.
[ 20.650953] usb 1-1: new high-speed USB device number 2 using dummy_hcd
[ 20.830206] usb 1-1: Using ep0 maxpacket: 8
[ 20.833501] usb 1-1: config 0 descriptor??
[ 21.038518] usb 1-1: string descriptor 0 read error: -71
[ 21.038893] usb 1-1: Found UVC 0.00 device <unnamed> (2833:0201)
[ 21.039299] uvcvideo 1-1:0.0: Entity type for entity Output 1 was not initialized!
[ 21.041583] uvcvideo 1-1:0.0: Entity type for entity Input 1 was not initialized!
[ 21.042218] ------------[ cut here ]------------
[ 21.042536] WARNING: CPU: 0 PID: 9 at drivers/media/mc/mc-entity.c:1147 media_create_pad_link+0x2c4/0x2e0
[ 21.043195] Modules linked in:
[ 21.043535] CPU: 0 UID: 0 PID: 9 Comm: kworker/0:1 Not tainted 6.11.0-rc7-00030-g3480e43aeccf #444
[ 21.044101] Hardware name: QEMU Standard PC (Q35 + ICH9, 2009), BIOS 1.15.0-1 04/01/2014
[ 21.044639] Workqueue: usb_hub_wq hub_event
[ 21.045100] RIP: 0010:media_create_pad_link+0x2c4/0x2e0
[ 21.045508] Code: fe e8 20 01 00 00 b8 f4 ff ff ff 48 83 c4 30 5b 41 5c 41 5d 41 5e 41 5f 5d c3 cc cc cc cc 0f 0b eb e9 0f 0b eb 0a 0f 0b eb 06 <0f> 0b eb 02 0f 0b b8 ea ff ff ff eb d4 66 2e 0f 1f 84 00 00 00 00
[ 21.046801] RSP: 0018:ffffc9000004b318 EFLAGS: 00010246
[ 21.047227] RAX: ffff888004e5d458 RBX: 0000000000000000 RCX: ffffffff818fccf1
[ 21.047719] RDX: 000000000000007b RSI: 0000000000000000 RDI: ffff888004313290
[ 21.048241] RBP: ffff888004313290 R08: 0001ffffffffffff R09: 0000000000000000
[ 21.048701] R10: 0000000000000013 R11: 0001888004313290 R12: 0000000000000003
[ 21.049138] R13: ffff888004313080 R14: ffff888004313080 R15: 0000000000000000
[ 21.049648] FS: 0000000000000000(0000) GS:ffff88803ec00000(0000) knlGS:0000000000000000
[ 21.050271] CS: 0010 DS: 0000 ES: 0000 CR0: 0000000080050033
[ 21.050688] CR2: 0000592cc27635b0 CR3: 000000000431c000 CR4: 0000000000750ef0
[ 21.051136] PKRU: 55555554
[ 21.051331] Call Trace:
[ 21.051480] <TASK>
[ 21.051611] ? __warn+0xc4/0x210
[ 21.051861] ? media_create_pad_link+0x2c4/0x2e0
[ 21.052252] ? report_bug+0x11b/0x1a0
[ 21.052540] ? trace_hardirqs_on+0x31/0x40
[ 21.052901] ? handle_bug+0x3d/0x70
[ 21.053197] ? exc_invalid_op+0x1a/0x50
[ 21.053511] ? asm_exc_invalid_op+0x1a/0x20
[ 21.053924] ? media_create_pad_link+0x91/0x2e0
[ 21.054364] ? media_create_pad_link+0x2c4/0x2e0
[ 21.054834] ? media_create_pad_link+0x91/0x2e0
[ 21.055131] ? _raw_spin_unlock+0x1e/0x40
[ 21.055441] ? __v4l2_device_register_subdev+0x202/0x210
[ 21.055837] uvc_mc_register_entities+0x358/0x400
[ 21.056144] uvc_register_chains+0x1fd/0x290
[ 21.056413] uvc_probe+0x380e/0x3dc0
[ 21.056676] ? __lock_acquire+0x5aa/0x26e0
[ 21.056946] ? find_held_lock+0x33/0xa0
[ 21.057196] ? kernfs_activate+0x70/0x80
[ 21.057533] ? usb_match_dynamic_id+0x1b/0x70
[ 21.057811] ? find_held_lock+0x33/0xa0
[ 21.058047] ? usb_match_dynamic_id+0x55/0x70
[ 21.058330] ? lock_release+0x124/0x260
[ 21.058657] ? usb_match_one_id_intf+0xa2/0x100
[ 21.058997] usb_probe_interface+0x1ba/0x330
[ 21.059399] really_probe+0x1ba/0x4c0
[ 21.059662] __driver_probe_device+0xb2/0x180
[ 21.059944] driver_probe_device+0x5a/0x100
[ 21.060170] __device_attach_driver+0xe9/0x160
[ 21.060427] ? __pfx___device_attach_driver+0x10/0x10
[ 21.060872] bus_for_each_drv+0xa9/0x100
[ 21.061312] __device_attach+0xed/0x190
[ 21.061812] device_initial_probe+0xe/0x20
[ 21.062229] bus_probe_device+0x4d/0xd0
[ 21.062590] device_add+0x308/0x590
[ 21.062912] usb_set_configuration+0x7b6/0xaf0
[ 21.063403] usb_generic_driver_probe+0x36/0x80
[ 21.063714] usb_probe_device+0x7b/0x130
[ 21.063936] really_probe+0x1ba/0x4c0
[ 21.064111] __driver_probe_device+0xb2/0x180
[ 21.064577] driver_probe_device+0x5a/0x100
[ 21.065019] __device_attach_driver+0xe9/0x160
[ 21.065403] ? __pfx___device_attach_driver+0x10/0x10
[ 21.065820] bus_for_each_drv+0xa9/0x100
[ 21.066094] __device_attach+0xed/0x190
[ 21.066535] device_initial_probe+0xe/0x20
[ 21.066992] bus_probe_device+0x4d/0xd0
[ 21.067250] device_add+0x308/0x590
[ 21.067501] usb_new_device+0x347/0x610
[ 21.067817] hub_event+0x156b/0x1e30
[ 21.068060] ? process_scheduled_works+0x48b/0xaf0
[ 21.068337] process_scheduled_works+0x5a3/0xaf0
[ 21.068668] worker_thread+0x3cf/0x560
[ 21.068932] ? kthread+0x109/0x1b0
[ 21.069133] kthread+0x197/0x1b0
[ 21.069343] ? __pfx_worker_thread+0x10/0x10
[ 21.069598] ? __pfx_kthread+0x10/0x10
[ 21.069908] ret_from_fork+0x32/0x40
[ 21.070169] ? __pfx_kthread+0x10/0x10
[ 21.070424] ret_from_fork_asm+0x1a/0x30
[ 21.070737] </TASK>
Reported-by: syzbot+0584f746fde3d52b4675(a)syzkaller.appspotmail.com
Closes: https://syzkaller.appspot.com/bug?extid=0584f746fde3d52b4675
Reported-by: syzbot+dd320d114deb3f5bb79b(a)syzkaller.appspotmail.com
Closes: https://syzkaller.appspot.com/bug?extid=dd320d114deb3f5bb79b
Reported-by: Youngjun Lee <yjjuny.lee(a)samsung.com>
Fixes: a3fbc2e6bb05 ("media: mc-entity.c: use WARN_ON, validate link pads")
Cc: stable(a)vger.kernel.org
Signed-off-by: Thadeu Lima de Souza Cascardo <cascardo(a)igalia.com>
Co-developed-by: Ricardo Ribalda <ribalda(a)chromium.org>
Signed-off-by: Ricardo Ribalda <ribalda(a)chromium.org>
Reviewed-by: Laurent Pinchart <laurent.pinchart(a)ideasonboard.com>
Reviewed-by: Hans de Goede <hansg(a)kernel.org>
Signed-off-by: Hans de Goede <hansg(a)kernel.org>
Signed-off-by: Laurent Pinchart <laurent.pinchart(a)ideasonboard.com>
Signed-off-by: Hans Verkuil <hverkuil+cisco(a)kernel.org>
Signed-off-by: Long Li <leo.lilong(a)huawei.com>
---
drivers/media/usb/uvc/uvc_driver.c | 73 +++++++++++++++++++-----------
drivers/media/usb/uvc/uvcvideo.h | 2 +
2 files changed, 48 insertions(+), 27 deletions(-)
diff --git a/drivers/media/usb/uvc/uvc_driver.c b/drivers/media/usb/uvc/uvc_driver.c
index f98dff65264d..832590a7767c 100644
--- a/drivers/media/usb/uvc/uvc_driver.c
+++ b/drivers/media/usb/uvc/uvc_driver.c
@@ -135,6 +135,9 @@ struct uvc_entity *uvc_entity_by_id(struct uvc_device *dev, int id)
{
struct uvc_entity *entity;
+ if (id == UVC_INVALID_ENTITY_ID)
+ return NULL;
+
list_for_each_entry(entity, &dev->entities, list) {
if (entity->id == id)
return entity;
@@ -778,14 +781,27 @@ static const u8 uvc_media_transport_input_guid[16] =
UVC_GUID_UVC_MEDIA_TRANSPORT_INPUT;
static const u8 uvc_processing_guid[16] = UVC_GUID_UVC_PROCESSING;
-static struct uvc_entity *uvc_alloc_entity(u16 type, u16 id,
- unsigned int num_pads, unsigned int extra_size)
+static struct uvc_entity *uvc_alloc_new_entity(struct uvc_device *dev, u16 type,
+ u16 id, unsigned int num_pads,
+ unsigned int extra_size)
{
struct uvc_entity *entity;
unsigned int num_inputs;
unsigned int size;
unsigned int i;
+ /* Per UVC 1.1+ spec 3.7.2, the ID should be non-zero. */
+ if (id == 0) {
+ dev_err(&dev->intf->dev, "Found Unit with invalid ID 0\n");
+ id = UVC_INVALID_ENTITY_ID;
+ }
+
+ /* Per UVC 1.1+ spec 3.7.2, the ID is unique. */
+ if (uvc_entity_by_id(dev, id)) {
+ dev_err(&dev->intf->dev, "Found multiple Units with ID %u\n", id);
+ id = UVC_INVALID_ENTITY_ID;
+ }
+
extra_size = roundup(extra_size, sizeof(*entity->pads));
if (num_pads)
num_inputs = type & UVC_TERM_OUTPUT ? num_pads : num_pads - 1;
@@ -795,7 +811,7 @@ static struct uvc_entity *uvc_alloc_entity(u16 type, u16 id,
+ num_inputs;
entity = kzalloc(size, GFP_KERNEL);
if (entity == NULL)
- return NULL;
+ return ERR_PTR(-ENOMEM);
entity->id = id;
entity->type = type;
@@ -907,10 +923,10 @@ static int uvc_parse_vendor_control(struct uvc_device *dev,
break;
}
- unit = uvc_alloc_entity(UVC_VC_EXTENSION_UNIT, buffer[3],
- p + 1, 2*n);
- if (unit == NULL)
- return -ENOMEM;
+ unit = uvc_alloc_new_entity(dev, UVC_VC_EXTENSION_UNIT,
+ buffer[3], p + 1, 2 * n);
+ if (IS_ERR(unit))
+ return PTR_ERR(unit);
memcpy(unit->guid, &buffer[4], 16);
unit->extension.bNumControls = buffer[20];
@@ -1019,10 +1035,10 @@ static int uvc_parse_standard_control(struct uvc_device *dev,
return -EINVAL;
}
- term = uvc_alloc_entity(type | UVC_TERM_INPUT, buffer[3],
- 1, n + p);
- if (term == NULL)
- return -ENOMEM;
+ term = uvc_alloc_new_entity(dev, type | UVC_TERM_INPUT,
+ buffer[3], 1, n + p);
+ if (IS_ERR(term))
+ return PTR_ERR(term);
if (UVC_ENTITY_TYPE(term) == UVC_ITT_CAMERA) {
term->camera.bControlSize = n;
@@ -1078,10 +1094,10 @@ static int uvc_parse_standard_control(struct uvc_device *dev,
return 0;
}
- term = uvc_alloc_entity(type | UVC_TERM_OUTPUT, buffer[3],
- 1, 0);
- if (term == NULL)
- return -ENOMEM;
+ term = uvc_alloc_new_entity(dev, type | UVC_TERM_OUTPUT,
+ buffer[3], 1, 0);
+ if (IS_ERR(term))
+ return PTR_ERR(term);
memcpy(term->baSourceID, &buffer[7], 1);
@@ -1100,9 +1116,10 @@ static int uvc_parse_standard_control(struct uvc_device *dev,
return -EINVAL;
}
- unit = uvc_alloc_entity(buffer[2], buffer[3], p + 1, 0);
- if (unit == NULL)
- return -ENOMEM;
+ unit = uvc_alloc_new_entity(dev, buffer[2], buffer[3],
+ p + 1, 0);
+ if (IS_ERR(unit))
+ return PTR_ERR(unit);
memcpy(unit->baSourceID, &buffer[5], p);
@@ -1122,9 +1139,9 @@ static int uvc_parse_standard_control(struct uvc_device *dev,
return -EINVAL;
}
- unit = uvc_alloc_entity(buffer[2], buffer[3], 2, n);
- if (unit == NULL)
- return -ENOMEM;
+ unit = uvc_alloc_new_entity(dev, buffer[2], buffer[3], 2, n);
+ if (IS_ERR(unit))
+ return PTR_ERR(unit);
memcpy(unit->baSourceID, &buffer[4], 1);
unit->processing.wMaxMultiplier =
@@ -1151,9 +1168,10 @@ static int uvc_parse_standard_control(struct uvc_device *dev,
return -EINVAL;
}
- unit = uvc_alloc_entity(buffer[2], buffer[3], p + 1, n);
- if (unit == NULL)
- return -ENOMEM;
+ unit = uvc_alloc_new_entity(dev, buffer[2], buffer[3],
+ p + 1, n);
+ if (IS_ERR(unit))
+ return PTR_ERR(unit);
memcpy(unit->guid, &buffer[4], 16);
unit->extension.bNumControls = buffer[20];
@@ -1293,9 +1311,10 @@ static int uvc_gpio_parse(struct uvc_device *dev)
return dev_err_probe(&dev->intf->dev, irq,
"No IRQ for privacy GPIO\n");
- unit = uvc_alloc_entity(UVC_EXT_GPIO_UNIT, UVC_EXT_GPIO_UNIT_ID, 0, 1);
- if (!unit)
- return -ENOMEM;
+ unit = uvc_alloc_new_entity(dev, UVC_EXT_GPIO_UNIT,
+ UVC_EXT_GPIO_UNIT_ID, 0, 1);
+ if (IS_ERR(unit))
+ return PTR_ERR(unit);
unit->gpio.gpio_privacy = gpio_privacy;
unit->gpio.irq = irq;
diff --git a/drivers/media/usb/uvc/uvcvideo.h b/drivers/media/usb/uvc/uvcvideo.h
index e99bfaa62266..cbb5ce963cd0 100644
--- a/drivers/media/usb/uvc/uvcvideo.h
+++ b/drivers/media/usb/uvc/uvcvideo.h
@@ -41,6 +41,8 @@
#define UVC_EXT_GPIO_UNIT 0x7ffe
#define UVC_EXT_GPIO_UNIT_ID 0x100
+#define UVC_INVALID_ENTITY_ID 0xffff
+
/* ------------------------------------------------------------------------
* Driver specific constants.
*/
--
2.39.2
2
1