[PATCH OLK-6.6] [PATCH OLK-6.6] driver inclusion category: feature bugzilla: https://gitee.com/openeuler/kernel/issues/IBWWHE --------------------------------------- Currently, the NFS client can use only one server IP address at a single mount point. As a result, the hardware capability of multiple storage nodes and NICs cannot be fully utilized. In multiple financial sites, the performance cannot meet service requirements. In addition, when a single link is faulty, services are suspended. The reliability

27 Apr
2025
27 Apr
'25
9:28 a.m.
---
fs/lockd/clntlock.c | 1 +
fs/lockd/host.c | 11 +-
fs/nfs/Kconfig | 11 +
fs/nfs/Makefile | 5 +-
fs/nfs/client.c | 28 +-
fs/nfs/dir.c | 11 +-
fs/nfs/enfs/Makefile | 79 +
fs/nfs/enfs/enfs/enfs_init.c | 78 +
fs/nfs/enfs/enfs/enfs_multipath_client.c | 440 +
fs/nfs/enfs/enfs/enfs_multipath_parse.c | 698 +
fs/nfs/enfs/enfsrpc/enfs_rpc_init.c | 9 +
fs/nfs/enfs/enfsrpc/enfs_rpc_init.h | 13 +
fs/nfs/enfs/enfsrpc/enfs_rpc_proc.c | 40 +
fs/nfs/enfs/enfsrpc/enfs_rpc_proc.h | 20 +
.../enfs/enfsrpc/lookupcache/enfs_lookup_cache.c | 517 +
fs/nfs/enfs/include/dns_internal.h | 61 +
fs/nfs/enfs/include/enfs.h | 139 +
fs/nfs/enfs/include/enfs_config.h | 62 +
fs/nfs/enfs/include/enfs_errcode.h | 16 +
fs/nfs/enfs/include/enfs_log.h | 25 +
fs/nfs/enfs/include/enfs_lookup_cache.h | 74 +
fs/nfs/enfs/include/enfs_multipath.h | 25 +
fs/nfs/enfs/include/enfs_multipath_client.h | 29 +
fs/nfs/enfs/include/enfs_multipath_parse.h | 21 +
fs/nfs/enfs/include/enfs_proc.h | 16 +
fs/nfs/enfs/include/enfs_remount.h | 15 +
fs/nfs/enfs/include/enfs_roundrobin.h | 14 +
fs/nfs/enfs/include/enfs_tp_common.h | 129 +
fs/nfs/enfs/include/exten_call.h | 256 +
fs/nfs/enfs/include/init.h | 16 +
fs/nfs/enfs/include/pm_state.h | 27 +
fs/nfs/enfs/include/shard.h | 25 +
.../include/unify_multipath/dpc_rpc_client_api.h | 196 +
.../enfs/include/unify_multipath/multipath_api.h | 214 +
.../enfs/include/unify_multipath/multipath_types.h | 34 +
fs/nfs/enfs/init.c | 76 +
fs/nfs/enfs/mgmt/config/enfs_config.c | 722 +
fs/nfs/enfs/mgmt/mgmt_init.c | 22 +
fs/nfs/enfs/mgmt/mgmt_init.h | 17 +
fs/nfs/enfs/multipath/failover/failover_com.h | 29 +
fs/nfs/enfs/multipath/failover/failover_path.c | 311 +
fs/nfs/enfs/multipath/failover/failover_path.h | 18 +
fs/nfs/enfs/multipath/failover/failover_time.c | 119 +
fs/nfs/enfs/multipath/failover/failover_time.h | 15 +
.../enfs/multipath/load_balance/enfs_roundrobin.c | 357 +
fs/nfs/enfs/multipath/load_balance/shard_route.c | 1989 +++
fs/nfs/enfs/multipath/path_mgmt/dns_process.c | 955 ++
fs/nfs/enfs/multipath/path_mgmt/enfs_multipath.c | 1080 ++
fs/nfs/enfs/multipath/path_mgmt/enfs_path.c | 45 +
fs/nfs/enfs/multipath/path_mgmt/enfs_path.h | 11 +
fs/nfs/enfs/multipath/path_mgmt/enfs_proc.c | 663 +
fs/nfs/enfs/multipath/path_mgmt/enfs_remount.c | 236 +
fs/nfs/enfs/multipath/path_mgmt/exten_call.c | 1018 ++
fs/nfs/enfs/multipath/path_mgmt/pm_ping.c | 449 +
fs/nfs/enfs/multipath/path_mgmt/pm_ping.h | 27 +
fs/nfs/enfs/multipath/path_mgmt/pm_state.c | 204 +
fs/nfs/enfs/unify_multipath/Makefile | 13 +
fs/nfs/enfs/unify_multipath/Rules.mak | 52 +
.../unify_multipath/dpc_adapter/Makefile.kernel | 31 +
.../enfs/unify_multipath/dpc_adapter/dpc_adapter.c | 6 +
.../dpc_adapter/dpc_adapter_module.c | 109 +
fs/nfs/enfs/unify_multipath/dpc_adapter/dpc_rpc.h | 14 +
.../unify_multipath/dpc_adapter/dpc_rpc_client.c | 411 +
.../unify_multipath/dpc_adapter/dpc_rpc_client.h | 32 +
.../dpc_adapter/dpc_rpc_client_null_call.c | 94 +
.../dpc_adapter/dpc_rpc_client_null_call.h | 21 +
.../dpc_adapter/dpc_rpc_client_read.c | 147 +
.../dpc_adapter/dpc_rpc_client_read.h | 23 +
.../dpc_adapter/dpc_rpc_client_write.c | 119 +
.../dpc_adapter/dpc_rpc_client_write.h | 21 +
.../unify_multipath/dpc_adapter/dpc_rpc_conn.c | 158 +
.../unify_multipath/dpc_adapter/dpc_rpc_conn.h | 14 +
.../dpc_adapter/dpc_rpc_io_common.c | 176 +
.../dpc_adapter/dpc_rpc_io_common.h | 269 +
.../dpc_adapter/dpc_rpc_mulp_proc.c | 269 +
.../dpc_adapter/dpc_rpc_mulp_proc.h | 21 +
.../dpc_adapter/dpc_rpc_mulp_proc_shard_view.c | 320 +
.../dpc_adapter/dpc_rpc_mulp_proc_shard_view.h | 21 +
.../unify_multipath/dpc_adapter/dpc_rpc_proc.c | 334 +
.../unify_multipath/dpc_adapter/dpc_rpc_proc.h | 16 +
.../unify_multipath/dpc_adapter/dpc_rpc_util.c | 46 +
.../unify_multipath/dpc_adapter/dpc_rpc_util.h | 11 +
.../enfs/unify_multipath/dpc_adapter/dpc_rpc_xdr.c | 81 +
.../enfs/unify_multipath/dpc_adapter/dpc_rpc_xdr.h | 15 +
.../unify_multipath/include/dpc_kernel_version.h | 86 +
fs/nfs/enfs/unify_multipath/include/mulp_log.h | 105 +
fs/nfs/enfs/unify_multipath/include/mulp_porting.h | 148 +
fs/nfs/enfs/unify_multipath/infra/mulp_proc.c | 29 +
fs/nfs/enfs/unify_multipath/infra/mulp_tp.c | 644 +
fs/nfs/enfs/unify_multipath/infra/mulp_tp.h | 44 +
fs/nfs/enfs/unify_multipath/infra/mulp_tp_common.h | 126 +
.../infra/multipath_infra_adapter.c | 49 +
fs/nfs/enfs/unify_multipath/multipath/Makefile | 33 +
.../multipath/adapter/diagnose/mulp_diagnose.c | 5 +
.../unify_multipath/multipath/adapter/mulp_init.c | 148 +
.../adapter/multipath_adapter/mulp_adapter.c | 217 +
.../multipath/include/mulp_dataset.h | 104 +
.../multipath/include/mulp_load_balance.h | 33 +
.../multipath/include/mulp_multipath_adapter.h | 22 +
.../multipath/include/mulp_path_detect.h | 13 +
.../multipath/include/mulp_path_mgmt.h | 132 +
.../multipath/include/mulp_shard_view.h | 28 +
.../unify_multipath/multipath/include/multipath.h | 447 +
.../multipath/model/config/mulp_config.c | 5 +
.../multipath/model/dataset/mulp_dataset.c | 248 +
.../multipath/model/shard_view/mulp_shardview.c | 311 +
fs/nfs/enfs/unify_multipath/multipath/multipath.c | 6 +
.../unify_multipath/multipath/multipath_module.c | 25 +
.../multipath/path_mgmt/mpath_create.c | 43 +
.../service/load_balance/mulp_load_balance.c | 43 +
.../multipath/service/path_mgmt/mulp_path_detect.c | 201 +
.../multipath/service/path_mgmt/mulp_path_mgmt.c | 1797 +++
.../service/path_mgmt/mulp_path_mgmt_inner.h | 117 +
.../nfs_adapter/nfs_multipath_adapter.c | 5 +
fs/nfs/enfs/unify_multipath/sunrpc/Makefile | 66 +
.../kernel/5.10.0-182.0.0.95.oe2203sp3/Makefile | 34 +
.../5.10.0-182.0.0.95.oe2203sp3/xprtrdma/Makefile | 8 +
.../xprtrdma/backchannel.c | 284 +
.../xprtrdma/frwr_ops.c | 634 +
.../5.10.0-182.0.0.95.oe2203sp3/xprtrdma/module.c | 52 +
.../xprtrdma/rpc_rdma.c | 1513 +++
.../xprtrdma/svc_rdma.c | 252 +
.../xprtrdma/svc_rdma_backchannel.c | 281 +
.../xprtrdma/svc_rdma_recvfrom.c | 881 ++
.../xprtrdma/svc_rdma_rw.c | 885 ++
.../xprtrdma/svc_rdma_sendto.c | 1006 ++
.../xprtrdma/svc_rdma_transport.c | 614 +
.../xprtrdma/transport.c | 813 ++
.../5.10.0-182.0.0.95.oe2203sp3/xprtrdma/verbs.c | 1539 +++
.../xprtrdma/xprt_rdma.h | 595 +
.../kernel/5.10.0-228.0.0.127.oe2203sp4/Makefile | 34 +
.../5.10.0-228.0.0.127.oe2203sp4/xprtrdma/Makefile | 8 +
.../xprtrdma/backchannel.c | 284 +
.../xprtrdma/frwr_ops.c | 656 +
.../5.10.0-228.0.0.127.oe2203sp4/xprtrdma/module.c | 52 +
.../xprtrdma/rpc_rdma.c | 1513 +++
.../xprtrdma/svc_rdma.c | 252 +
.../xprtrdma/svc_rdma_backchannel.c | 281 +
.../xprtrdma/svc_rdma_recvfrom.c | 881 ++
.../xprtrdma/svc_rdma_rw.c | 885 ++
.../xprtrdma/svc_rdma_sendto.c | 1006 ++
.../xprtrdma/svc_rdma_transport.c | 614 +
.../xprtrdma/transport.c | 819 ++
.../5.10.0-228.0.0.127.oe2203sp4/xprtrdma/verbs.c | 1552 +++
.../xprtrdma/xprt_rdma.h | 598 +
.../.vscode/.cache/clangd/wecode-cpp.db | Bin 0 -> 135168 bytes
.../.vscode/libing-local-build-logs/audit.json | 15 +
.../local-build-2025-03.log | 79 +
.../.vscode/libing-local-build-rsyncd/rsyncd.conf | 12 +
.../.vscode/libing-local-build-rsyncd/rsyncd.log | 1 +
.../.vscode/libing-local-build-rsyncd/rsyncd.pid | 1 +
.../libing-local-build-rsyncd/rsyncd.secrets | 1 +
.../libing-local-build-rsyncd/rsyncdPortFile | 1 +
.../.vscode/settings.json | 6 +
.../.vscode/staticCheckTasks.json | 24 +
.../.vscode/tags-34.wecode-db | Bin 0 -> 1011712 bytes
.../.vscode/tags-34.wecode-lock | Bin 0 -> 4096 bytes
.../kernel/5.10.0-250.0.0.154.oe2203sp4/Makefile | 34 +
.../5.10.0-250.0.0.154.oe2203sp4/xprtrdma/Makefile | 8 +
.../xprtrdma/backchannel.c | 284 +
.../xprtrdma/frwr_ops.c | 656 +
.../5.10.0-250.0.0.154.oe2203sp4/xprtrdma/module.c | 52 +
.../xprtrdma/rpc_rdma.c | 1513 +++
.../xprtrdma/svc_rdma.c | 252 +
.../xprtrdma/svc_rdma_backchannel.c | 281 +
.../xprtrdma/svc_rdma_recvfrom.c | 881 ++
.../xprtrdma/svc_rdma_rw.c | 885 ++
.../xprtrdma/svc_rdma_sendto.c | 1006 ++
.../xprtrdma/svc_rdma_transport.c | 614 +
.../xprtrdma/transport.c | 819 ++
.../5.10.0-250.0.0.154.oe2203sp4/xprtrdma/verbs.c | 1552 +++
.../xprtrdma/xprt_rdma.h | 598 +
.../kernel/5.10.0-60.18.0.50.oe2203/Makefile | 34 +
.../5.10.0-60.18.0.50.oe2203/xprtrdma/Makefile | 8 +
.../xprtrdma/backchannel.c | 284 +
.../5.10.0-60.18.0.50.oe2203/xprtrdma/frwr_ops.c | 634 +
.../5.10.0-60.18.0.50.oe2203/xprtrdma/module.c | 52 +
.../5.10.0-60.18.0.50.oe2203/xprtrdma/rpc_rdma.c | 1508 +++
.../5.10.0-60.18.0.50.oe2203/xprtrdma/svc_rdma.c | 252 +
.../xprtrdma/svc_rdma_backchannel.c | 281 +
.../xprtrdma/svc_rdma_recvfrom.c | 881 ++
.../xprtrdma/svc_rdma_rw.c | 885 ++
.../xprtrdma/svc_rdma_sendto.c | 1006 ++
.../xprtrdma/svc_rdma_transport.c | 614 +
.../5.10.0-60.18.0.50.oe2203/xprtrdma/transport.c | 811 ++
.../5.10.0-60.18.0.50.oe2203/xprtrdma/verbs.c | 1536 +++
.../5.10.0-60.18.0.50.oe2203/xprtrdma/xprt_rdma.h | 595 +
.../openeuler/.vscode/.cache/clangd/wecode-cpp.db | Bin 0 -> 135168 bytes
.../.vscode/libing-local-build-logs/audit.json | 15 +
.../local-build-2025-04.log | 143 +
.../.vscode/libing-local-build-rsyncd/rsyncd.conf | 12 +
.../.vscode/libing-local-build-rsyncd/rsyncd.log | 2 +
.../.vscode/libing-local-build-rsyncd/rsyncd.pid | 1 +
.../libing-local-build-rsyncd/rsyncd.secrets | 1 +
.../libing-local-build-rsyncd/rsyncdPortFile | 1 +
.../sunrpc/mlnx/openeuler/.vscode/settings.json | 6 +
.../mlnx/openeuler/.vscode/staticCheckTasks.json | 24 +
.../mlnx/openeuler/.vscode/tags-33.wecode-db | Bin 0 -> 8617984 bytes
.../mlnx/openeuler/.vscode/tags-33.wecode-lock | Bin 0 -> 4096 bytes
.../unify_multipath/sunrpc/mlnx/openeuler/Makefile | 36 +
.../sunrpc/mlnx/openeuler/Module.aarch64.symvers | 610 +
.../sunrpc/mlnx/openeuler/Module.x86_64.symvers | 631 +
.../sunrpc/mlnx/openeuler/compat/config.h | 3118 +++++
.../mlnx/openeuler/include/asm-generic/bug.h | 12 +
.../mlnx/openeuler/include/linux/auxiliary_bus.h | 267 +
.../sunrpc/mlnx/openeuler/include/linux/bit.h | 14 +
.../sunrpc/mlnx/openeuler/include/linux/bitfield.h | 165 +
.../sunrpc/mlnx/openeuler/include/linux/bitmap.h | 41 +
.../sunrpc/mlnx/openeuler/include/linux/bitops.h | 17 +
.../sunrpc/mlnx/openeuler/include/linux/bits.h | 24 +
.../mlnx/openeuler/include/linux/blk-mq-pci.h | 43 +
.../mlnx/openeuler/include/linux/blk-mq-rdma.h | 20 +
.../sunrpc/mlnx/openeuler/include/linux/blk-mq.h | 172 +
.../mlnx/openeuler/include/linux/blk_types.h | 12 +
.../sunrpc/mlnx/openeuler/include/linux/blkdev.h | 95 +
.../sunrpc/mlnx/openeuler/include/linux/bpf.h | 56 +
.../mlnx/openeuler/include/linux/bpf_trace.h | 11 +
.../mlnx/openeuler/include/linux/build_bug.h | 17 +
.../sunrpc/mlnx/openeuler/include/linux/cdev.h | 48 +
.../mlnx/openeuler/include/linux/cgroup_rdma.h | 10 +
.../mlnx/openeuler/include/linux/compat-2.6.h | 81 +
.../mlnx/openeuler/include/linux/compat-3.10.h | 8 +
.../mlnx/openeuler/include/linux/compat-3.12.h | 16 +
.../mlnx/openeuler/include/linux/compat-3.15.h | 13 +
.../mlnx/openeuler/include/linux/compat-4.0.h | 26 +
.../mlnx/openeuler/include/linux/compat-4.1.h | 23 +
.../mlnx/openeuler/include/linux/compat-4.10.h | 102 +
.../mlnx/openeuler/include/linux/compat_fix.h | 55 +
.../mlnx/openeuler/include/linux/compiler-clang.h | 26 +
.../mlnx/openeuler/include/linux/compiler-gcc.h | 26 +
.../mlnx/openeuler/include/linux/compiler-intel.h | 17 +
.../sunrpc/mlnx/openeuler/include/linux/compiler.h | 54 +
.../openeuler/include/linux/compiler_attributes.h | 28 +
.../sunrpc/mlnx/openeuler/include/linux/dcbnl.h | 54 +
.../sunrpc/mlnx/openeuler/include/linux/device.h | 114 +
.../sunrpc/mlnx/openeuler/include/linux/dim.h | 351 +
.../sunrpc/mlnx/openeuler/include/linux/ethtool.h | 272 +
.../sunrpc/mlnx/openeuler/include/linux/export.h | 16 +
.../sunrpc/mlnx/openeuler/include/linux/filter.h | 61 +
.../sunrpc/mlnx/openeuler/include/linux/firmware.h | 13 +
.../sunrpc/mlnx/openeuler/include/linux/fs.h | 22 +
.../sunrpc/mlnx/openeuler/include/linux/gfp.h | 26 +
.../mlnx/openeuler/include/linux/hashtable.h | 124 +
.../sunrpc/mlnx/openeuler/include/linux/idr.h | 20 +
.../sunrpc/mlnx/openeuler/include/linux/if_ether.h | 15 +
.../sunrpc/mlnx/openeuler/include/linux/if_link.h | 26 +
.../sunrpc/mlnx/openeuler/include/linux/if_vlan.h | 41 +
.../include/linux/indirect_call_wrapper.h | 71 +
.../sunrpc/mlnx/openeuler/include/linux/inet.h | 122 +
.../sunrpc/mlnx/openeuler/include/linux/inet_lro.h | 10 +
.../mlnx/openeuler/include/linux/inetdevice.h | 54 +
.../mlnx/openeuler/include/linux/interval_tree.h | 16 +
.../sunrpc/mlnx/openeuler/include/linux/irq_poll.h | 10 +
.../sunrpc/mlnx/openeuler/include/linux/kconfig.h | 12 +
.../mlnx/openeuler/include/linux/kern_levels.h | 22 +
.../sunrpc/mlnx/openeuler/include/linux/kernel.h | 31 +
.../sunrpc/mlnx/openeuler/include/linux/kmod.h | 33 +
.../sunrpc/mlnx/openeuler/include/linux/kref.h | 16 +
.../sunrpc/mlnx/openeuler/include/linux/list.h | 32 +
.../sunrpc/mlnx/openeuler/include/linux/llist.h | 13 +
.../sunrpc/mlnx/openeuler/include/linux/lockdep.h | 15 +
.../sunrpc/mlnx/openeuler/include/linux/log2.h | 37 +
.../mlnx/openeuler/include/linux/mlx5/accel.h | 170 +
.../sunrpc/mlnx/openeuler/include/linux/mlx5/cq.h | 207 +
.../mlnx/openeuler/include/linux/mlx5/device.h | 1554 +++
.../mlnx/openeuler/include/linux/mlx5/doorbell.h | 60 +
.../mlnx/openeuler/include/linux/mlx5/driver.h | 1598 +++
.../sunrpc/mlnx/openeuler/include/linux/mlx5/eq.h | 63 +
.../mlnx/openeuler/include/linux/mlx5/eswitch.h | 215 +
.../sunrpc/mlnx/openeuler/include/linux/mlx5/fs.h | 322 +
.../mlnx/openeuler/include/linux/mlx5/fs_helpers.h | 142 +
.../mlnx/openeuler/include/linux/mlx5/macsec.h | 9 +
.../mlnx/openeuler/include/linux/mlx5/mlx5_ifc.h | 12470 +++++++++++++++++
.../openeuler/include/linux/mlx5/mlx5_ifc_fpga.h | 616 +
.../openeuler/include/linux/mlx5/mlx5_ifc_vdpa.h | 168 +
.../mlnx/openeuler/include/linux/mlx5/mpfs.h | 18 +
.../mlnx/openeuler/include/linux/mlx5/nvmf.h | 112 +
.../mlnx/openeuler/include/linux/mlx5/port.h | 255 +
.../sunrpc/mlnx/openeuler/include/linux/mlx5/qp.h | 582 +
.../mlnx/openeuler/include/linux/mlx5/rsc_dump.h | 51 +
.../mlnx/openeuler/include/linux/mlx5/transobj.h | 89 +
.../mlnx/openeuler/include/linux/mlx5/vport.h | 138 +
.../sunrpc/mlnx/openeuler/include/linux/mm.h | 108 +
.../mlnx/openeuler/include/linux/mmu_notifier.h | 18 +
.../mlnx/openeuler/include/linux/mod_devicetable.h | 916 ++
.../sunrpc/mlnx/openeuler/include/linux/module.h | 19 +
.../sunrpc/mlnx/openeuler/include/linux/net.h | 25 +
.../mlnx/openeuler/include/linux/netdev_features.h | 19 +
.../mlnx/openeuler/include/linux/netdevice.h | 338 +
.../sunrpc/mlnx/openeuler/include/linux/nodemask.h | 17 +
.../sunrpc/mlnx/openeuler/include/linux/nospec.h | 70 +
.../mlnx/openeuler/include/linux/nvme-fc-driver.h | 1063 ++
.../sunrpc/mlnx/openeuler/include/linux/nvme-fc.h | 438 +
.../sunrpc/mlnx/openeuler/include/linux/nvme-pci.h | 16 +
.../mlnx/openeuler/include/linux/nvme-peer.h | 64 +
.../mlnx/openeuler/include/linux/nvme-rdma.h | 95 +
.../sunrpc/mlnx/openeuler/include/linux/nvme.h | 1683 +++
.../sunrpc/mlnx/openeuler/include/linux/overflow.h | 307 +
.../sunrpc/mlnx/openeuler/include/linux/page_ref.h | 35 +
.../mlnx/openeuler/include/linux/pci-p2pdma.h | 104 +
.../sunrpc/mlnx/openeuler/include/linux/pci.h | 121 +
.../sunrpc/mlnx/openeuler/include/linux/pci_regs.h | 53 +
.../sunrpc/mlnx/openeuler/include/linux/pm_qos.h | 85 +
.../sunrpc/mlnx/openeuler/include/linux/poll.h | 9 +
.../mlnx/openeuler/include/linux/radix-tree.h | 23 +
.../sunrpc/mlnx/openeuler/include/linux/rbtree.h | 32 +
.../sunrpc/mlnx/openeuler/include/linux/rculist.h | 35 +
.../sunrpc/mlnx/openeuler/include/linux/rcupdate.h | 55 +
.../sunrpc/mlnx/openeuler/include/linux/refcount.h | 48 +
.../mlnx/openeuler/include/linux/rhashtable.h | 2150 +++
.../mlnx/openeuler/include/linux/scatterlist.h | 152 +
.../sunrpc/mlnx/openeuler/include/linux/sched.h | 23 +
.../sunrpc/mlnx/openeuler/include/linux/sched/mm.h | 36 +
.../mlnx/openeuler/include/linux/sched/signal.h | 10 +
.../mlnx/openeuler/include/linux/sched/task.h | 10 +
.../sunrpc/mlnx/openeuler/include/linux/sdt.h | 16 +
.../sunrpc/mlnx/openeuler/include/linux/seq_file.h | 23 +
.../sunrpc/mlnx/openeuler/include/linux/skbuff.h | 33 +
.../sunrpc/mlnx/openeuler/include/linux/slab.h | 47 +
.../sunrpc/mlnx/openeuler/include/linux/stddef.h | 27 +
.../sunrpc/mlnx/openeuler/include/linux/string.h | 54 +
.../mlnx/openeuler/include/linux/sunrpc/auth.h | 13 +
.../mlnx/openeuler/include/linux/sunrpc/rpc_rdma.h | 191 +
.../openeuler/include/linux/sunrpc/rpc_rdma_cid.h | 24 +
.../mlnx/openeuler/include/linux/sunrpc/svc_rdma.h | 302 +
.../openeuler/include/linux/sunrpc/svc_rdma_pcl.h | 128 +
.../mlnx/openeuler/include/linux/sunrpc/xprtrdma.h | 73 +
.../sunrpc/mlnx/openeuler/include/linux/sysfs.h | 30 +
.../sunrpc/mlnx/openeuler/include/linux/t10-pi.h | 231 +
.../mlnx/openeuler/include/linux/timekeeping.h | 15 +
.../sunrpc/mlnx/openeuler/include/linux/types.h | 27 +
.../sunrpc/mlnx/openeuler/include/linux/uaccess.h | 12 +
.../sunrpc/mlnx/openeuler/include/linux/units.h | 94 +
.../sunrpc/mlnx/openeuler/include/linux/uuid.h | 73 +
.../sunrpc/mlnx/openeuler/include/linux/xarray.h | 1836 +++
.../sunrpc/mlnx/openeuler/include/linux/xz.h | 284 +
.../sunrpc/mlnx/openeuler/include/net/addrconf.h | 57 +
.../sunrpc/mlnx/openeuler/include/net/bareudp.h | 19 +
.../sunrpc/mlnx/openeuler/include/net/bonding.h | 184 +
.../sunrpc/mlnx/openeuler/include/net/devlink.h | 210 +
.../sunrpc/mlnx/openeuler/include/net/dst.h | 23 +
.../mlnx/openeuler/include/net/dst_metadata.h | 102 +
.../mlnx/openeuler/include/net/flow_dissector.h | 537 +
.../sunrpc/mlnx/openeuler/include/net/flow_keys.h | 26 +
.../mlnx/openeuler/include/net/flow_offload.h | 391 +
.../sunrpc/mlnx/openeuler/include/net/geneve.h | 21 +
.../sunrpc/mlnx/openeuler/include/net/gre.h | 22 +
.../sunrpc/mlnx/openeuler/include/net/ip_fib.h | 24 +
.../sunrpc/mlnx/openeuler/include/net/ip_tunnels.h | 158 +
.../sunrpc/mlnx/openeuler/include/net/ipv6.h | 24 +
.../sunrpc/mlnx/openeuler/include/net/ipv6_stubs.h | 10 +
.../sunrpc/mlnx/openeuler/include/net/macsec.h | 18 +
.../sunrpc/mlnx/openeuler/include/net/mlxdevm.h | 449 +
.../include/net/netfilter/nf_flow_table.h | 14 +
.../include/net/netfilter/nf_flow_table_4_18.h | 235 +
.../sunrpc/mlnx/openeuler/include/net/netlink.h | 39 +
.../sunrpc/mlnx/openeuler/include/net/pkt_cls.h | 230 +
.../sunrpc/mlnx/openeuler/include/net/psample.h | 28 +
.../sunrpc/mlnx/openeuler/include/net/sock.h | 45 +
.../sunrpc/mlnx/openeuler/include/net/switchdev.h | 63 +
.../mlnx/openeuler/include/net/tc_act/tc_csum.h | 34 +
.../mlnx/openeuler/include/net/tc_act/tc_ct.h | 17 +
.../mlnx/openeuler/include/net/tc_act/tc_ct_4_18.h | 94 +
.../mlnx/openeuler/include/net/tc_act/tc_gact.h | 120 +
.../mlnx/openeuler/include/net/tc_act/tc_mirred.h | 107 +
.../mlnx/openeuler/include/net/tc_act/tc_mpls.h | 10 +
.../mlnx/openeuler/include/net/tc_act/tc_pedit.h | 96 +
.../openeuler/include/net/tc_act/tc_tunnel_key.h | 212 +
.../mlnx/openeuler/include/net/tc_act/tc_vlan.h | 65 +
.../sunrpc/mlnx/openeuler/include/net/tls.h | 27 +
.../sunrpc/mlnx/openeuler/include/net/vxlan.h | 33 +
.../sunrpc/mlnx/openeuler/include/net/xdp.h | 17 +
.../sunrpc/mlnx/openeuler/include/net/xfrm.h | 12 +
.../sunrpc/mlnx/openeuler/include/rdma/ib.h | 82 +
.../sunrpc/mlnx/openeuler/include/rdma/ib_addr.h | 295 +
.../sunrpc/mlnx/openeuler/include/rdma/ib_cache.h | 118 +
.../sunrpc/mlnx/openeuler/include/rdma/ib_cm.h | 574 +
.../sunrpc/mlnx/openeuler/include/rdma/ib_hdrs.h | 307 +
.../sunrpc/mlnx/openeuler/include/rdma/ib_mad.h | 819 ++
.../mlnx/openeuler/include/rdma/ib_marshall.h | 28 +
.../sunrpc/mlnx/openeuler/include/rdma/ib_pack.h | 284 +
.../sunrpc/mlnx/openeuler/include/rdma/ib_pma.h | 130 +
.../sunrpc/mlnx/openeuler/include/rdma/ib_sa.h | 609 +
.../sunrpc/mlnx/openeuler/include/rdma/ib_smi.h | 158 +
.../sunrpc/mlnx/openeuler/include/rdma/ib_sysfs.h | 37 +
.../sunrpc/mlnx/openeuler/include/rdma/ib_umem.h | 270 +
.../mlnx/openeuler/include/rdma/ib_umem_odp.h | 224 +
.../sunrpc/mlnx/openeuler/include/rdma/ib_verbs.h | 5055 +++++++
.../mlnx/openeuler/include/rdma/ib_verbs_nvmf.h | 63 +
.../openeuler/include/rdma/ib_verbs_nvmf_def.h | 53 +
.../sunrpc/mlnx/openeuler/include/rdma/iba.h | 146 +
.../mlnx/openeuler/include/rdma/ibta_vol1_c12.h | 219 +
.../sunrpc/mlnx/openeuler/include/rdma/iw_cm.h | 226 +
.../mlnx/openeuler/include/rdma/iw_portmap.h | 65 +
.../sunrpc/mlnx/openeuler/include/rdma/lag.h | 27 +
.../sunrpc/mlnx/openeuler/include/rdma/mr_pool.h | 17 +
.../sunrpc/mlnx/openeuler/include/rdma/opa_addr.h | 91 +
.../mlnx/openeuler/include/rdma/opa_port_info.h | 385 +
.../sunrpc/mlnx/openeuler/include/rdma/opa_smi.h | 124 +
.../sunrpc/mlnx/openeuler/include/rdma/opa_vnic.h | 97 +
.../sunrpc/mlnx/openeuler/include/rdma/peer_mem.h | 175 +
.../sunrpc/mlnx/openeuler/include/rdma/rdma_cm.h | 394 +
.../mlnx/openeuler/include/rdma/rdma_cm_ib.h | 27 +
.../mlnx/openeuler/include/rdma/rdma_counter.h | 73 +
.../mlnx/openeuler/include/rdma/rdma_netlink.h | 131 +
.../sunrpc/mlnx/openeuler/include/rdma/rdma_vt.h | 532 +
.../sunrpc/mlnx/openeuler/include/rdma/rdmavt_cq.h | 67 +
.../sunrpc/mlnx/openeuler/include/rdma/rdmavt_mr.h | 155 +
.../sunrpc/mlnx/openeuler/include/rdma/rdmavt_qp.h | 1003 ++
.../sunrpc/mlnx/openeuler/include/rdma/restrack.h | 186 +
.../sunrpc/mlnx/openeuler/include/rdma/rw.h | 73 +
.../sunrpc/mlnx/openeuler/include/rdma/signature.h | 124 +
.../mlnx/openeuler/include/rdma/tid_rdma_defs.h | 108 +
.../mlnx/openeuler/include/rdma/uverbs_ioctl.h | 1026 ++
.../openeuler/include/rdma/uverbs_named_ioctl.h | 97 +
.../mlnx/openeuler/include/rdma/uverbs_std_types.h | 178 +
.../mlnx/openeuler/include/rdma/uverbs_types.h | 184 +
.../sunrpc/mlnx/openeuler/include/scsi/iser.h | 78 +
.../sunrpc/mlnx/openeuler/include/scsi/scsi.h | 12 +
.../mlnx/openeuler/include/scsi/scsi_device.h | 20 +
.../openeuler/include/scsi/scsi_transport_srp.h | 145 +
.../sunrpc/mlnx/openeuler/include/scsi/srp.h | 310 +
.../mlnx/openeuler/include/trace/events/ib_mad.h | 406 +
.../mlnx/openeuler/include/trace/events/ib_umad.h | 128 +
.../mlnx/openeuler/include/trace/events/rdma.h | 168 +
.../openeuler/include/trace/events/rdma_core.h | 394 +
.../mlnx/openeuler/include/trace/events/rpcrdma.h | 2247 ++++
.../openeuler/include/trace/events/sunrpc_base.h | 18 +
.../mlnx/openeuler/include/uapi/linux/devlink.h | 99 +
.../mlnx/openeuler/include/uapi/linux/eventpoll.h | 18 +
.../mlnx/openeuler/include/uapi/linux/net_tstamp.h | 12 +
.../mlnx/openeuler/include/uapi/linux/nvme_ioctl.h | 104 +
.../mlnx/openeuler/include/uapi/linux/pkt_cls.h | 294 +
.../openeuler/include/uapi/linux/tc_act/tc_ct.h | 18 +
.../include/uapi/linux/tc_act/tc_ct_4_18.h | 43 +
.../openeuler/include/uapi/linux/tc_act/tc_pedit.h | 79 +
.../include/uapi/linux/tc_act/tc_tunnel_key.h | 75 +
.../include/uapi/mlxdevm/mlxdevm_netlink.h | 206 +
.../mlnx/openeuler/include/uapi/rdma/bnxt_re-abi.h | 126 +
.../mlnx/openeuler/include/uapi/rdma/cxgb4-abi.h | 115 +
.../mlnx/openeuler/include/uapi/rdma/efa-abi.h | 133 +
.../openeuler/include/uapi/rdma/hfi/hfi1_ioctl.h | 174 +
.../openeuler/include/uapi/rdma/hfi/hfi1_user.h | 268 +
.../mlnx/openeuler/include/uapi/rdma/hns-abi.h | 99 +
.../include/uapi/rdma/ib_user_ioctl_cmds.h | 385 +
.../include/uapi/rdma/ib_user_ioctl_verbs.h | 270 +
.../mlnx/openeuler/include/uapi/rdma/ib_user_mad.h | 239 +
.../mlnx/openeuler/include/uapi/rdma/ib_user_sa.h | 77 +
.../openeuler/include/uapi/rdma/ib_user_verbs.h | 1301 ++
.../mlnx/openeuler/include/uapi/rdma/irdma-abi.h | 111 +
.../mlnx/openeuler/include/uapi/rdma/mlx4-abi.h | 191 +
.../mlnx/openeuler/include/uapi/rdma/mlx5-abi.h | 523 +
.../include/uapi/rdma/mlx5_user_ioctl_cmds.h | 361 +
.../include/uapi/rdma/mlx5_user_ioctl_verbs.h | 127 +
.../mlnx/openeuler/include/uapi/rdma/mthca-abi.h | 112 +
.../mlnx/openeuler/include/uapi/rdma/ocrdma-abi.h | 152 +
.../mlnx/openeuler/include/uapi/rdma/qedr-abi.h | 174 +
.../openeuler/include/uapi/rdma/rdma_netlink.h | 595 +
.../openeuler/include/uapi/rdma/rdma_user_cm.h | 341 +
.../openeuler/include/uapi/rdma/rdma_user_ioctl.h | 91 +
.../include/uapi/rdma/rdma_user_ioctl_cmds.h | 87 +
.../openeuler/include/uapi/rdma/rdma_user_rxe.h | 223 +
.../mlnx/openeuler/include/uapi/rdma/rvt-abi.h | 66 +
.../mlnx/openeuler/include/uapi/rdma/siw-abi.h | 186 +
.../openeuler/include/uapi/rdma/vmw_pvrdma-abi.h | 310 +
.../sunrpc/mlnx/openeuler/xprtrdma/Makefile | 9 +
.../mlnx/openeuler/xprtrdma/Module.supported | 3 +
.../sunrpc/mlnx/openeuler/xprtrdma/backchannel.c | 423 +
.../sunrpc/mlnx/openeuler/xprtrdma/frwr_ops.c | 775 ++
.../sunrpc/mlnx/openeuler/xprtrdma/module.c | 56 +
.../sunrpc/mlnx/openeuler/xprtrdma/nvfs.h | 103 +
.../sunrpc/mlnx/openeuler/xprtrdma/nvfs_rpc_rdma.c | 43 +
.../sunrpc/mlnx/openeuler/xprtrdma/nvfs_rpc_rdma.h | 59 +
.../sunrpc/mlnx/openeuler/xprtrdma/rpc_rdma.c | 1683 +++
.../sunrpc/mlnx/openeuler/xprtrdma/rpcrdma_dummy.c | 59 +
.../sunrpc/mlnx/openeuler/xprtrdma/svc_rdma.c | 315 +
.../mlnx/openeuler/xprtrdma/svc_rdma_backchannel.c | 358 +
.../sunrpc/mlnx/openeuler/xprtrdma/svc_rdma_pcl.c | 318 +
.../mlnx/openeuler/xprtrdma/svc_rdma_recvfrom.c | 1476 ++
.../sunrpc/mlnx/openeuler/xprtrdma/svc_rdma_rw.c | 1599 +++
.../mlnx/openeuler/xprtrdma/svc_rdma_sendto.c | 1525 +++
.../mlnx/openeuler/xprtrdma/svc_rdma_transport.c | 715 +
.../sunrpc/mlnx/openeuler/xprtrdma/svcrdma_dummy.c | 76 +
.../sunrpc/mlnx/openeuler/xprtrdma/transport.c | 956 ++
.../sunrpc/mlnx/openeuler/xprtrdma/verbs.c | 1581 +++
.../sunrpc/mlnx/openeuler/xprtrdma/xprt_rdma.h | 693 +
.../mlnx/openeuler/xprtrdma/xprtrdma_dummy.c | 76 +
.../sunrpc/mlnx/ubuntu/mlnx-24.07-0.6.1.0/Makefile | 37 +
.../Module.5.15.0-25-generic-64k.symvers | 618 +
.../Module.5.15.0-25-generic.symvers | 618 +
.../mlnx/ubuntu/mlnx-24.07-0.6.1.0/compat/config.h | 2442 ++++
.../drivers/infiniband/debug/memtrack.h | 110 +
.../drivers/infiniband/debug/mtrack.h | 1213 ++
.../mlnx-24.07-0.6.1.0/include/asm-generic/bug.h | 12 +
.../ubuntu/mlnx-24.07-0.6.1.0/include/asm/io.h | 62 +
.../include/linux/auxiliary_bus.h | 267 +
.../ubuntu/mlnx-24.07-0.6.1.0/include/linux/bit.h | 14 +
.../mlnx-24.07-0.6.1.0/include/linux/bitfield.h | 30 +
.../mlnx-24.07-0.6.1.0/include/linux/bitmap.h | 9 +
.../mlnx-24.07-0.6.1.0/include/linux/bitops.h | 26 +
.../ubuntu/mlnx-24.07-0.6.1.0/include/linux/bits.h | 24 +
.../mlnx-24.07-0.6.1.0/include/linux/blk-mq-pci.h | 8 +
.../mlnx-24.07-0.6.1.0/include/linux/blk-mq.h | 78 +
.../mlnx-24.07-0.6.1.0/include/linux/blk_types.h | 8 +
.../mlnx-24.07-0.6.1.0/include/linux/blkdev.h | 27 +
.../ubuntu/mlnx-24.07-0.6.1.0/include/linux/bpf.h | 37 +
.../mlnx-24.07-0.6.1.0/include/linux/bpf_trace.h | 9 +
.../mlnx-24.07-0.6.1.0/include/linux/build_bug.h | 15 +
.../ubuntu/mlnx-24.07-0.6.1.0/include/linux/cdev.h | 8 +
.../mlnx-24.07-0.6.1.0/include/linux/cgroup_rdma.h | 8 +
.../mlnx-24.07-0.6.1.0/include/linux/cleanup.h | 182 +
.../mlnx-24.07-0.6.1.0/include/linux/compat-2.6.h | 39 +
.../include/linux/compiler-clang.h | 12 +
.../include/linux/compiler-gcc.h | 40 +
.../include/linux/compiler-intel.h | 8 +
.../mlnx-24.07-0.6.1.0/include/linux/compiler.h | 54 +
.../include/linux/compiler_attributes.h | 46 +
.../mlnx-24.07-0.6.1.0/include/linux/cpu_rmap.h | 16 +
.../mlnx-24.07-0.6.1.0/include/linux/dcbnl.h | 12 +
.../mlnx-24.07-0.6.1.0/include/linux/device.h | 114 +
.../ubuntu/mlnx-24.07-0.6.1.0/include/linux/dim.h | 352 +
.../mlnx-24.07-0.6.1.0/include/linux/ethtool.h | 272 +
.../mlnx-24.07-0.6.1.0/include/linux/export.h | 16 +
.../mlnx-24.07-0.6.1.0/include/linux/filter.h | 72 +
.../mlnx-24.07-0.6.1.0/include/linux/firmware.h | 9 +
.../ubuntu/mlnx-24.07-0.6.1.0/include/linux/fs.h | 22 +
.../mlnx-24.07-0.6.1.0/include/linux/fwctl.h | 112 +
.../ubuntu/mlnx-24.07-0.6.1.0/include/linux/gfp.h | 15 +
.../mlnx-24.07-0.6.1.0/include/linux/hashtable.h | 124 +
.../ubuntu/mlnx-24.07-0.6.1.0/include/linux/idr.h | 10 +
.../mlnx-24.07-0.6.1.0/include/linux/if_ether.h | 8 +
.../mlnx-24.07-0.6.1.0/include/linux/if_link.h | 8 +
.../mlnx-24.07-0.6.1.0/include/linux/if_vlan.h | 41 +
.../include/linux/indirect_call_wrapper.h | 71 +
.../ubuntu/mlnx-24.07-0.6.1.0/include/linux/inet.h | 13 +
.../mlnx-24.07-0.6.1.0/include/linux/inetdevice.h | 54 +
.../mlnx-24.07-0.6.1.0/include/linux/interrupt.h | 21 +
.../include/linux/interval_tree.h | 16 +
.../mlnx-24.07-0.6.1.0/include/linux/irq_poll.h | 8 +
.../mlnx-24.07-0.6.1.0/include/linux/kconfig.h | 12 +
.../mlnx-24.07-0.6.1.0/include/linux/kern_levels.h | 8 +
.../mlnx-24.07-0.6.1.0/include/linux/kernel.h | 43 +
.../ubuntu/mlnx-24.07-0.6.1.0/include/linux/kmod.h | 33 +
.../ubuntu/mlnx-24.07-0.6.1.0/include/linux/kref.h | 8 +
.../ubuntu/mlnx-24.07-0.6.1.0/include/linux/list.h | 44 +
.../mlnx-24.07-0.6.1.0/include/linux/llist.h | 13 +
.../mlnx-24.07-0.6.1.0/include/linux/lockdep.h | 15 +
.../ubuntu/mlnx-24.07-0.6.1.0/include/linux/log2.h | 37 +
.../mlnx-24.07-0.6.1.0/include/linux/mlx5/cq.h | 207 +
.../mlnx-24.07-0.6.1.0/include/linux/mlx5/device.h | 1539 +++
.../include/linux/mlx5/doorbell.h | 60 +
.../mlnx-24.07-0.6.1.0/include/linux/mlx5/driver.h | 1678 +++
.../mlnx-24.07-0.6.1.0/include/linux/mlx5/eq.h | 63 +
.../include/linux/mlx5/eswitch.h | 224 +
.../mlnx-24.07-0.6.1.0/include/linux/mlx5/fs.h | 350 +
.../include/linux/mlx5/fs_helpers.h | 94 +
.../mlnx-24.07-0.6.1.0/include/linux/mlx5/macsec.h | 32 +
.../include/linux/mlx5/mlx5_ifc.h | 13363 +++++++++++++++++++
.../include/linux/mlx5/mlx5_ifc_fpga.h | 381 +
.../include/linux/mlx5/mlx5_ifc_vdpa.h | 224 +
.../mlnx-24.07-0.6.1.0/include/linux/mlx5/mpfs.h | 18 +
.../mlnx-24.07-0.6.1.0/include/linux/mlx5/nvmf.h | 112 +
.../mlnx-24.07-0.6.1.0/include/linux/mlx5/port.h | 269 +
.../mlnx-24.07-0.6.1.0/include/linux/mlx5/qp.h | 600 +
.../include/linux/mlx5/rsc_dump.h | 51 +
.../include/linux/mlx5/transobj.h | 89 +
.../mlnx-24.07-0.6.1.0/include/linux/mlx5/vport.h | 144 +
.../ubuntu/mlnx-24.07-0.6.1.0/include/linux/mm.h | 18 +
.../include/linux/mmu_notifier.h | 18 +
.../include/linux/mod_devicetable.h | 982 ++
.../mlnx-24.07-0.6.1.0/include/linux/module.h | 19 +
.../ubuntu/mlnx-24.07-0.6.1.0/include/linux/net.h | 25 +
.../include/linux/netdev_features.h | 19 +
.../mlnx-24.07-0.6.1.0/include/linux/netdevice.h | 165 +
.../mlnx-24.07-0.6.1.0/include/linux/nodemask.h | 17 +
.../mlnx-24.07-0.6.1.0/include/linux/nospec.h | 74 +
.../include/linux/nvme-fc-driver.h | 1077 ++
.../mlnx-24.07-0.6.1.0/include/linux/nvme-fc.h | 438 +
.../mlnx-24.07-0.6.1.0/include/linux/nvme-pci.h | 16 +
.../mlnx-24.07-0.6.1.0/include/linux/nvme-peer.h | 64 +
.../mlnx-24.07-0.6.1.0/include/linux/nvme-rdma.h | 99 +
.../ubuntu/mlnx-24.07-0.6.1.0/include/linux/nvme.h | 2040 +++
.../mlnx-24.07-0.6.1.0/include/linux/overflow.h | 157 +
.../mlnx-24.07-0.6.1.0/include/linux/page_ref.h | 8 +
.../mlnx-24.07-0.6.1.0/include/linux/panic.h | 14 +
.../mlnx-24.07-0.6.1.0/include/linux/pci-p2pdma.h | 93 +
.../ubuntu/mlnx-24.07-0.6.1.0/include/linux/pci.h | 72 +
.../mlnx-24.07-0.6.1.0/include/linux/pci_regs.h | 53 +
.../mlnx-24.07-0.6.1.0/include/linux/pm_qos.h | 83 +
.../ubuntu/mlnx-24.07-0.6.1.0/include/linux/poll.h | 9 +
.../mlnx-24.07-0.6.1.0/include/linux/radix-tree.h | 8 +
.../mlnx-24.07-0.6.1.0/include/linux/rbtree.h | 23 +
.../mlnx-24.07-0.6.1.0/include/linux/rculist.h | 35 +
.../mlnx-24.07-0.6.1.0/include/linux/rcupdate.h | 52 +
.../mlnx-24.07-0.6.1.0/include/linux/refcount.h | 8 +
.../mlnx-24.07-0.6.1.0/include/linux/scatterlist.h | 14 +
.../mlnx-24.07-0.6.1.0/include/linux/sched.h | 8 +
.../include/linux/sched/signal.h | 8 +
.../mlnx-24.07-0.6.1.0/include/linux/sched/task.h | 8 +
.../ubuntu/mlnx-24.07-0.6.1.0/include/linux/sdt.h | 16 +
.../mlnx-24.07-0.6.1.0/include/linux/seq_file.h | 29 +
.../mlnx-24.07-0.6.1.0/include/linux/skbuff.h | 28 +
.../ubuntu/mlnx-24.07-0.6.1.0/include/linux/slab.h | 28 +
.../mlnx-24.07-0.6.1.0/include/linux/stddef.h | 104 +
.../mlnx-24.07-0.6.1.0/include/linux/string.h | 31 +
.../mlnx-24.07-0.6.1.0/include/linux/sunrpc/auth.h | 13 +
.../include/linux/sunrpc/rpc_rdma.h | 191 +
.../include/linux/sunrpc/rpc_rdma_cid.h | 24 +
.../include/linux/sunrpc/svc_rdma.h | 385 +
.../include/linux/sunrpc/svc_rdma_pcl.h | 128 +
.../include/linux/sunrpc/xprtrdma.h | 73 +
.../mlnx-24.07-0.6.1.0/include/linux/sysfs.h | 30 +
.../mlnx-24.07-0.6.1.0/include/linux/t10-pi.h | 115 +
.../mlnx-24.07-0.6.1.0/include/linux/timekeeping.h | 8 +
.../mlnx-24.07-0.6.1.0/include/linux/types.h | 17 +
.../mlnx-24.07-0.6.1.0/include/linux/uaccess.h | 12 +
.../mlnx-24.07-0.6.1.0/include/linux/units.h | 94 +
.../ubuntu/mlnx-24.07-0.6.1.0/include/linux/uuid.h | 8 +
.../mlnx-24.07-0.6.1.0/include/linux/xarray.h | 1836 +++
.../ubuntu/mlnx-24.07-0.6.1.0/include/linux/xz.h | 284 +
.../mlnx-24.07-0.6.1.0/include/net/addrconf.h | 8 +
.../mlnx-24.07-0.6.1.0/include/net/bareudp.h | 19 +
.../mlnx-24.07-0.6.1.0/include/net/bonding.h | 175 +
.../mlnx-24.07-0.6.1.0/include/net/devlink.h | 71 +
.../ubuntu/mlnx-24.07-0.6.1.0/include/net/dst.h | 8 +
.../mlnx-24.07-0.6.1.0/include/net/dst_metadata.h | 61 +
.../include/net/flow_dissector.h | 29 +
.../mlnx-24.07-0.6.1.0/include/net/flow_keys.h | 20 +
.../mlnx-24.07-0.6.1.0/include/net/flow_offload.h | 398 +
.../ubuntu/mlnx-24.07-0.6.1.0/include/net/geneve.h | 21 +
.../ubuntu/mlnx-24.07-0.6.1.0/include/net/gre.h | 22 +
.../ubuntu/mlnx-24.07-0.6.1.0/include/net/ip_fib.h | 24 +
.../mlnx-24.07-0.6.1.0/include/net/ip_tunnels.h | 8 +
.../ubuntu/mlnx-24.07-0.6.1.0/include/net/ipv6.h | 16 +
.../mlnx-24.07-0.6.1.0/include/net/ipv6_stubs.h | 10 +
.../ubuntu/mlnx-24.07-0.6.1.0/include/net/macsec.h | 33 +
.../mlnx-24.07-0.6.1.0/include/net/mlxdevm.h | 473 +
.../include/net/netfilter/nf_flow_table.h | 14 +
.../include/net/netfilter/nf_flow_table_4_18.h | 235 +
.../mlnx-24.07-0.6.1.0/include/net/netlink.h | 10 +
.../mlnx-24.07-0.6.1.0/include/net/pkt_cls.h | 161 +
.../mlnx-24.07-0.6.1.0/include/net/psample.h | 8 +
.../ubuntu/mlnx-24.07-0.6.1.0/include/net/sock.h | 37 +
.../mlnx-24.07-0.6.1.0/include/net/switchdev.h | 16 +
.../include/net/tc_act/tc_csum.h | 9 +
.../mlnx-24.07-0.6.1.0/include/net/tc_act/tc_ct.h | 17 +
.../include/net/tc_act/tc_ct_4_18.h | 90 +
.../include/net/tc_act/tc_gact.h | 22 +
.../include/net/tc_act/tc_mirred.h | 70 +
.../include/net/tc_act/tc_mpls.h | 10 +
.../include/net/tc_act/tc_tunnel_key.h | 7 +
.../include/net/tc_act/tc_vlan.h | 14 +
.../ubuntu/mlnx-24.07-0.6.1.0/include/net/tls.h | 30 +
.../ubuntu/mlnx-24.07-0.6.1.0/include/net/vxlan.h | 19 +
.../ubuntu/mlnx-24.07-0.6.1.0/include/net/xdp.h | 19 +
.../mlnx-24.07-0.6.1.0/include/net/xdp_sock_drv.h | 22 +
.../ubuntu/mlnx-24.07-0.6.1.0/include/net/xfrm.h | 36 +
.../ubuntu/mlnx-24.07-0.6.1.0/include/rdma/ib.h | 81 +
.../mlnx-24.07-0.6.1.0/include/rdma/ib_addr.h | 250 +
.../mlnx-24.07-0.6.1.0/include/rdma/ib_cache.h | 118 +
.../ubuntu/mlnx-24.07-0.6.1.0/include/rdma/ib_cm.h | 568 +
.../mlnx-24.07-0.6.1.0/include/rdma/ib_hdrs.h | 307 +
.../mlnx-24.07-0.6.1.0/include/rdma/ib_mad.h | 821 ++
.../mlnx-24.07-0.6.1.0/include/rdma/ib_marshall.h | 28 +
.../mlnx-24.07-0.6.1.0/include/rdma/ib_pack.h | 289 +
.../mlnx-24.07-0.6.1.0/include/rdma/ib_pma.h | 130 +
.../ubuntu/mlnx-24.07-0.6.1.0/include/rdma/ib_sa.h | 609 +
.../mlnx-24.07-0.6.1.0/include/rdma/ib_smi.h | 158 +
.../mlnx-24.07-0.6.1.0/include/rdma/ib_sysfs.h | 37 +
.../mlnx-24.07-0.6.1.0/include/rdma/ib_umem.h | 280 +
.../mlnx-24.07-0.6.1.0/include/rdma/ib_umem_odp.h | 214 +
.../mlnx-24.07-0.6.1.0/include/rdma/ib_verbs.h | 4952 +++++++
.../include/rdma/ib_verbs_nvmf.h | 63 +
.../include/rdma/ib_verbs_nvmf_def.h | 53 +
.../ubuntu/mlnx-24.07-0.6.1.0/include/rdma/iba.h | 146 +
.../include/rdma/ibta_vol1_c12.h | 219 +
.../ubuntu/mlnx-24.07-0.6.1.0/include/rdma/iw_cm.h | 205 +
.../mlnx-24.07-0.6.1.0/include/rdma/iw_portmap.h | 65 +
.../ubuntu/mlnx-24.07-0.6.1.0/include/rdma/lag.h | 27 +
.../mlnx-24.07-0.6.1.0/include/rdma/mr_pool.h | 17 +
.../mlnx-24.07-0.6.1.0/include/rdma/opa_addr.h | 91 +
.../include/rdma/opa_port_info.h | 385 +
.../mlnx-24.07-0.6.1.0/include/rdma/opa_smi.h | 124 +
.../mlnx-24.07-0.6.1.0/include/rdma/opa_vnic.h | 96 +
.../mlnx-24.07-0.6.1.0/include/rdma/peer_mem.h | 175 +
.../mlnx-24.07-0.6.1.0/include/rdma/rdma_cm.h | 394 +
.../mlnx-24.07-0.6.1.0/include/rdma/rdma_cm_ib.h | 27 +
.../mlnx-24.07-0.6.1.0/include/rdma/rdma_counter.h | 69 +
.../mlnx-24.07-0.6.1.0/include/rdma/rdma_netlink.h | 127 +
.../mlnx-24.07-0.6.1.0/include/rdma/rdma_vt.h | 532 +
.../mlnx-24.07-0.6.1.0/include/rdma/rdmavt_cq.h | 67 +
.../mlnx-24.07-0.6.1.0/include/rdma/rdmavt_mr.h | 155 +
.../mlnx-24.07-0.6.1.0/include/rdma/rdmavt_qp.h | 1003 ++
.../mlnx-24.07-0.6.1.0/include/rdma/restrack.h | 176 +
.../ubuntu/mlnx-24.07-0.6.1.0/include/rdma/rw.h | 73 +
.../mlnx-24.07-0.6.1.0/include/rdma/signature.h | 124 +
.../include/rdma/tid_rdma_defs.h | 108 +
.../mlnx-24.07-0.6.1.0/include/rdma/uverbs_ioctl.h | 1043 ++
.../include/rdma/uverbs_named_ioctl.h | 97 +
.../include/rdma/uverbs_std_types.h | 178 +
.../mlnx-24.07-0.6.1.0/include/rdma/uverbs_types.h | 184 +
.../ubuntu/mlnx-24.07-0.6.1.0/include/scsi/iser.h | 78 +
.../ubuntu/mlnx-24.07-0.6.1.0/include/scsi/scsi.h | 12 +
.../mlnx-24.07-0.6.1.0/include/scsi/scsi_device.h | 8 +
.../include/scsi/scsi_transport_srp.h | 151 +
.../ubuntu/mlnx-24.07-0.6.1.0/include/scsi/srp.h | 310 +
.../include/trace/events/ib_mad.h | 393 +
.../include/trace/events/ib_umad.h | 128 +
.../include/trace/events/rdma_core.h | 394 +
.../include/trace/events/rpcrdma.h | 2333 ++++
.../mlnx-24.07-0.6.1.0/include/trace/misc/rdma.h | 168 +
.../mlnx-24.07-0.6.1.0/include/trace/misc/sunrpc.h | 18 +
.../mlnx-24.07-0.6.1.0/include/uapi/fwctl/fwctl.h | 137 +
.../mlnx-24.07-0.6.1.0/include/uapi/fwctl/mlx5.h | 36 +
.../include/uapi/linux/devlink.h | 53 +
.../include/uapi/linux/eventpoll.h | 18 +
.../include/uapi/linux/net_tstamp.h | 8 +
.../include/uapi/linux/nvme_ioctl.h | 54 +
.../include/uapi/linux/pkt_cls.h | 64 +
.../include/uapi/linux/tc_act/tc_ct.h | 18 +
.../include/uapi/linux/tc_act/tc_ct_4_18.h | 43 +
.../include/uapi/linux/tc_act/tc_tunnel_key.h | 73 +
.../include/uapi/mlxdevm/mlxdevm_netlink.h | 207 +
.../include/uapi/rdma/bnxt_re-abi.h | 205 +
.../include/uapi/rdma/cxgb4-abi.h | 115 +
.../mlnx-24.07-0.6.1.0/include/uapi/rdma/efa-abi.h | 156 +
.../include/uapi/rdma/erdma-abi.h | 49 +
.../include/uapi/rdma/hfi/hfi1_ioctl.h | 174 +
.../include/uapi/rdma/hfi/hfi1_user.h | 268 +
.../mlnx-24.07-0.6.1.0/include/uapi/rdma/hns-abi.h | 149 +
.../include/uapi/rdma/ib_user_ioctl_cmds.h | 385 +
.../include/uapi/rdma/ib_user_ioctl_verbs.h | 275 +
.../include/uapi/rdma/ib_user_mad.h | 239 +
.../include/uapi/rdma/ib_user_sa.h | 77 +
.../include/uapi/rdma/ib_user_verbs.h | 1364 ++
.../include/uapi/rdma/irdma-abi.h | 120 +
.../include/uapi/rdma/mana-abi.h | 66 +
.../include/uapi/rdma/mlx4-abi.h | 191 +
.../include/uapi/rdma/mlx5-abi.h | 526 +
.../include/uapi/rdma/mlx5_user_ioctl_cmds.h | 350 +
.../include/uapi/rdma/mlx5_user_ioctl_verbs.h | 114 +
.../include/uapi/rdma/mthca-abi.h | 112 +
.../include/uapi/rdma/ocrdma-abi.h | 152 +
.../include/uapi/rdma/qedr-abi.h | 174 +
.../include/uapi/rdma/rdma_netlink.h | 606 +
.../include/uapi/rdma/rdma_user_cm.h | 341 +
.../include/uapi/rdma/rdma_user_ioctl.h | 91 +
.../include/uapi/rdma/rdma_user_ioctl_cmds.h | 87 +
.../include/uapi/rdma/rdma_user_rxe.h | 231 +
.../mlnx-24.07-0.6.1.0/include/uapi/rdma/rvt-abi.h | 66 +
.../mlnx-24.07-0.6.1.0/include/uapi/rdma/siw-abi.h | 186 +
.../include/uapi/rdma/vmw_pvrdma-abi.h | 310 +
.../ubuntu/mlnx-24.07-0.6.1.0/xprtrdma/Makefile | 9 +
.../mlnx-24.07-0.6.1.0/xprtrdma/backchannel.c | 334 +
.../ubuntu/mlnx-24.07-0.6.1.0/xprtrdma/frwr_ops.c | 771 ++
.../ubuntu/mlnx-24.07-0.6.1.0/xprtrdma/module.c | 56 +
.../mlnx/ubuntu/mlnx-24.07-0.6.1.0/xprtrdma/nvfs.h | 103 +
.../mlnx-24.07-0.6.1.0/xprtrdma/nvfs_rpc_rdma.c | 43 +
.../mlnx-24.07-0.6.1.0/xprtrdma/nvfs_rpc_rdma.h | 59 +
.../ubuntu/mlnx-24.07-0.6.1.0/xprtrdma/rpc_rdma.c | 1611 +++
.../mlnx-24.07-0.6.1.0/xprtrdma/rpcrdma_dummy.c | 59 +
.../ubuntu/mlnx-24.07-0.6.1.0/xprtrdma/svc_rdma.c | 338 +
.../xprtrdma/svc_rdma_backchannel.c | 337 +
.../mlnx-24.07-0.6.1.0/xprtrdma/svc_rdma_pcl.c | 318 +
.../xprtrdma/svc_rdma_recvfrom.c | 1480 ++
.../mlnx-24.07-0.6.1.0/xprtrdma/svc_rdma_rw.c | 1753 +++
.../mlnx-24.07-0.6.1.0/xprtrdma/svc_rdma_sendto.c | 1717 +++
.../xprtrdma/svc_rdma_transport.c | 716 +
.../mlnx-24.07-0.6.1.0/xprtrdma/svcrdma_dummy.c | 76 +
.../ubuntu/mlnx-24.07-0.6.1.0/xprtrdma/transport.c | 929 ++
.../ubuntu/mlnx-24.07-0.6.1.0/xprtrdma/verbs.c | 1544 +++
.../ubuntu/mlnx-24.07-0.6.1.0/xprtrdma/xprt_rdma.h | 627 +
.../mlnx-24.07-0.6.1.0/xprtrdma/xprtrdma_dummy.c | 76 +
.../sunrpc/mlnx/ubuntu/mlnx-5.8-1.1.2.1/Makefile | 37 +
.../Module.5.15.0-25-generic-64k.symvers | 606 +
.../Module.5.15.0-25-generic.symvers | 618 +
.../mlnx/ubuntu/mlnx-5.8-1.1.2.1/compat/config.h | 3118 +++++
.../drivers/infiniband/debug/memtrack.h | 110 +
.../drivers/infiniband/debug/mtrack.h | 1032 ++
.../mlnx-5.8-1.1.2.1/include/asm-generic/bug.h | 12 +
.../mlnx-5.8-1.1.2.1/include/linux/auxiliary_bus.h | 267 +
.../ubuntu/mlnx-5.8-1.1.2.1/include/linux/bit.h | 14 +
.../mlnx-5.8-1.1.2.1/include/linux/bitfield.h | 165 +
.../ubuntu/mlnx-5.8-1.1.2.1/include/linux/bitmap.h | 41 +
.../ubuntu/mlnx-5.8-1.1.2.1/include/linux/bitops.h | 17 +
.../ubuntu/mlnx-5.8-1.1.2.1/include/linux/bits.h | 24 +
.../mlnx-5.8-1.1.2.1/include/linux/blk-mq-pci.h | 43 +
.../mlnx-5.8-1.1.2.1/include/linux/blk-mq-rdma.h | 20 +
.../ubuntu/mlnx-5.8-1.1.2.1/include/linux/blk-mq.h | 172 +
.../mlnx-5.8-1.1.2.1/include/linux/blk_types.h | 12 +
.../ubuntu/mlnx-5.8-1.1.2.1/include/linux/blkdev.h | 95 +
.../ubuntu/mlnx-5.8-1.1.2.1/include/linux/bpf.h | 56 +
.../mlnx-5.8-1.1.2.1/include/linux/bpf_trace.h | 11 +
.../mlnx-5.8-1.1.2.1/include/linux/build_bug.h | 17 +
.../ubuntu/mlnx-5.8-1.1.2.1/include/linux/cdev.h | 48 +
.../mlnx-5.8-1.1.2.1/include/linux/cgroup_rdma.h | 10 +
.../mlnx-5.8-1.1.2.1/include/linux/compat-2.6.h | 81 +
.../mlnx-5.8-1.1.2.1/include/linux/compat-3.10.h | 8 +
.../mlnx-5.8-1.1.2.1/include/linux/compat-3.12.h | 16 +
.../mlnx-5.8-1.1.2.1/include/linux/compat-3.15.h | 13 +
.../mlnx-5.8-1.1.2.1/include/linux/compat-4.0.h | 26 +
.../mlnx-5.8-1.1.2.1/include/linux/compat-4.1.h | 23 +
.../mlnx-5.8-1.1.2.1/include/linux/compat-4.10.h | 102 +
.../mlnx-5.8-1.1.2.1/include/linux/compat_fix.h | 55 +
.../include/linux/compiler-clang.h | 26 +
.../mlnx-5.8-1.1.2.1/include/linux/compiler-gcc.h | 26 +
.../include/linux/compiler-intel.h | 17 +
.../mlnx-5.8-1.1.2.1/include/linux/compiler.h | 54 +
.../include/linux/compiler_attributes.h | 28 +
.../ubuntu/mlnx-5.8-1.1.2.1/include/linux/dcbnl.h | 54 +
.../ubuntu/mlnx-5.8-1.1.2.1/include/linux/device.h | 114 +
.../ubuntu/mlnx-5.8-1.1.2.1/include/linux/dim.h | 351 +
.../mlnx-5.8-1.1.2.1/include/linux/ethtool.h | 272 +
.../ubuntu/mlnx-5.8-1.1.2.1/include/linux/export.h | 16 +
.../ubuntu/mlnx-5.8-1.1.2.1/include/linux/filter.h | 61 +
.../mlnx-5.8-1.1.2.1/include/linux/firmware.h | 13 +
.../ubuntu/mlnx-5.8-1.1.2.1/include/linux/fs.h | 22 +
.../ubuntu/mlnx-5.8-1.1.2.1/include/linux/gfp.h | 26 +
.../mlnx-5.8-1.1.2.1/include/linux/hashtable.h | 124 +
.../ubuntu/mlnx-5.8-1.1.2.1/include/linux/idr.h | 20 +
.../mlnx-5.8-1.1.2.1/include/linux/if_ether.h | 15 +
.../mlnx-5.8-1.1.2.1/include/linux/if_link.h | 26 +
.../mlnx-5.8-1.1.2.1/include/linux/if_vlan.h | 41 +
.../include/linux/indirect_call_wrapper.h | 71 +
.../ubuntu/mlnx-5.8-1.1.2.1/include/linux/inet.h | 122 +
.../mlnx-5.8-1.1.2.1/include/linux/inet_lro.h | 10 +
.../mlnx-5.8-1.1.2.1/include/linux/inetdevice.h | 54 +
.../mlnx-5.8-1.1.2.1/include/linux/interval_tree.h | 16 +
.../mlnx-5.8-1.1.2.1/include/linux/irq_poll.h | 10 +
.../mlnx-5.8-1.1.2.1/include/linux/kconfig.h | 12 +
.../mlnx-5.8-1.1.2.1/include/linux/kern_levels.h | 22 +
.../ubuntu/mlnx-5.8-1.1.2.1/include/linux/kernel.h | 31 +
.../ubuntu/mlnx-5.8-1.1.2.1/include/linux/kmod.h | 33 +
.../ubuntu/mlnx-5.8-1.1.2.1/include/linux/kref.h | 16 +
.../ubuntu/mlnx-5.8-1.1.2.1/include/linux/list.h | 32 +
.../ubuntu/mlnx-5.8-1.1.2.1/include/linux/llist.h | 13 +
.../mlnx-5.8-1.1.2.1/include/linux/lockdep.h | 15 +
.../ubuntu/mlnx-5.8-1.1.2.1/include/linux/log2.h | 37 +
.../mlnx-5.8-1.1.2.1/include/linux/mlx5/accel.h | 170 +
.../mlnx-5.8-1.1.2.1/include/linux/mlx5/cq.h | 207 +
.../mlnx-5.8-1.1.2.1/include/linux/mlx5/device.h | 1554 +++
.../mlnx-5.8-1.1.2.1/include/linux/mlx5/doorbell.h | 60 +
.../mlnx-5.8-1.1.2.1/include/linux/mlx5/driver.h | 1598 +++
.../mlnx-5.8-1.1.2.1/include/linux/mlx5/eq.h | 63 +
.../mlnx-5.8-1.1.2.1/include/linux/mlx5/eswitch.h | 215 +
.../mlnx-5.8-1.1.2.1/include/linux/mlx5/fs.h | 322 +
.../include/linux/mlx5/fs_helpers.h | 142 +
.../mlnx-5.8-1.1.2.1/include/linux/mlx5/macsec.h | 9 +
.../mlnx-5.8-1.1.2.1/include/linux/mlx5/mlx5_ifc.h | 12470 +++++++++++++++++
.../include/linux/mlx5/mlx5_ifc_fpga.h | 616 +
.../include/linux/mlx5/mlx5_ifc_vdpa.h | 168 +
.../mlnx-5.8-1.1.2.1/include/linux/mlx5/mpfs.h | 18 +
.../mlnx-5.8-1.1.2.1/include/linux/mlx5/nvmf.h | 112 +
.../mlnx-5.8-1.1.2.1/include/linux/mlx5/port.h | 255 +
.../mlnx-5.8-1.1.2.1/include/linux/mlx5/qp.h | 582 +
.../mlnx-5.8-1.1.2.1/include/linux/mlx5/rsc_dump.h | 51 +
.../mlnx-5.8-1.1.2.1/include/linux/mlx5/transobj.h | 89 +
.../mlnx-5.8-1.1.2.1/include/linux/mlx5/vport.h | 138 +
.../ubuntu/mlnx-5.8-1.1.2.1/include/linux/mm.h | 108 +
.../mlnx-5.8-1.1.2.1/include/linux/mmu_notifier.h | 18 +
.../include/linux/mod_devicetable.h | 916 ++
.../ubuntu/mlnx-5.8-1.1.2.1/include/linux/module.h | 19 +
.../ubuntu/mlnx-5.8-1.1.2.1/include/linux/net.h | 25 +
.../include/linux/netdev_features.h | 19 +
.../mlnx-5.8-1.1.2.1/include/linux/netdevice.h | 338 +
.../mlnx-5.8-1.1.2.1/include/linux/nodemask.h | 17 +
.../ubuntu/mlnx-5.8-1.1.2.1/include/linux/nospec.h | 70 +
.../include/linux/nvme-fc-driver.h | 1063 ++
.../mlnx-5.8-1.1.2.1/include/linux/nvme-fc.h | 438 +
.../mlnx-5.8-1.1.2.1/include/linux/nvme-pci.h | 16 +
.../mlnx-5.8-1.1.2.1/include/linux/nvme-peer.h | 64 +
.../mlnx-5.8-1.1.2.1/include/linux/nvme-rdma.h | 95 +
.../ubuntu/mlnx-5.8-1.1.2.1/include/linux/nvme.h | 1683 +++
.../mlnx-5.8-1.1.2.1/include/linux/overflow.h | 307 +
.../mlnx-5.8-1.1.2.1/include/linux/page_ref.h | 35 +
.../mlnx-5.8-1.1.2.1/include/linux/pci-p2pdma.h | 104 +
.../ubuntu/mlnx-5.8-1.1.2.1/include/linux/pci.h | 121 +
.../mlnx-5.8-1.1.2.1/include/linux/pci_regs.h | 53 +
.../ubuntu/mlnx-5.8-1.1.2.1/include/linux/pm_qos.h | 85 +
.../ubuntu/mlnx-5.8-1.1.2.1/include/linux/poll.h | 9 +
.../mlnx-5.8-1.1.2.1/include/linux/radix-tree.h | 23 +
.../ubuntu/mlnx-5.8-1.1.2.1/include/linux/rbtree.h | 32 +
.../mlnx-5.8-1.1.2.1/include/linux/rculist.h | 35 +
.../mlnx-5.8-1.1.2.1/include/linux/rcupdate.h | 55 +
.../mlnx-5.8-1.1.2.1/include/linux/refcount.h | 48 +
.../mlnx-5.8-1.1.2.1/include/linux/rhashtable.h | 2150 +++
.../mlnx-5.8-1.1.2.1/include/linux/scatterlist.h | 152 +
.../ubuntu/mlnx-5.8-1.1.2.1/include/linux/sched.h | 23 +
.../mlnx-5.8-1.1.2.1/include/linux/sched/mm.h | 36 +
.../mlnx-5.8-1.1.2.1/include/linux/sched/signal.h | 10 +
.../mlnx-5.8-1.1.2.1/include/linux/sched/task.h | 10 +
.../ubuntu/mlnx-5.8-1.1.2.1/include/linux/sdt.h | 16 +
.../mlnx-5.8-1.1.2.1/include/linux/seq_file.h | 23 +
.../ubuntu/mlnx-5.8-1.1.2.1/include/linux/skbuff.h | 33 +
.../ubuntu/mlnx-5.8-1.1.2.1/include/linux/slab.h | 47 +
.../ubuntu/mlnx-5.8-1.1.2.1/include/linux/stddef.h | 27 +
.../ubuntu/mlnx-5.8-1.1.2.1/include/linux/string.h | 54 +
.../mlnx-5.8-1.1.2.1/include/linux/sunrpc/auth.h | 13 +
.../include/linux/sunrpc/rpc_rdma.h | 191 +
.../include/linux/sunrpc/rpc_rdma_cid.h | 24 +
.../include/linux/sunrpc/svc_rdma.h | 302 +
.../include/linux/sunrpc/svc_rdma_pcl.h | 128 +
.../include/linux/sunrpc/xprtrdma.h | 73 +
.../ubuntu/mlnx-5.8-1.1.2.1/include/linux/sysfs.h | 30 +
.../ubuntu/mlnx-5.8-1.1.2.1/include/linux/t10-pi.h | 231 +
.../mlnx-5.8-1.1.2.1/include/linux/timekeeping.h | 15 +
.../ubuntu/mlnx-5.8-1.1.2.1/include/linux/types.h | 27 +
.../mlnx-5.8-1.1.2.1/include/linux/uaccess.h | 12 +
.../ubuntu/mlnx-5.8-1.1.2.1/include/linux/units.h | 94 +
.../ubuntu/mlnx-5.8-1.1.2.1/include/linux/uuid.h | 73 +
.../ubuntu/mlnx-5.8-1.1.2.1/include/linux/xarray.h | 1836 +++
.../ubuntu/mlnx-5.8-1.1.2.1/include/linux/xz.h | 284 +
.../ubuntu/mlnx-5.8-1.1.2.1/include/net/addrconf.h | 57 +
.../ubuntu/mlnx-5.8-1.1.2.1/include/net/bareudp.h | 19 +
.../ubuntu/mlnx-5.8-1.1.2.1/include/net/bonding.h | 184 +
.../ubuntu/mlnx-5.8-1.1.2.1/include/net/devlink.h | 210 +
.../mlnx/ubuntu/mlnx-5.8-1.1.2.1/include/net/dst.h | 23 +
.../mlnx-5.8-1.1.2.1/include/net/dst_metadata.h | 102 +
.../mlnx-5.8-1.1.2.1/include/net/flow_dissector.h | 537 +
.../mlnx-5.8-1.1.2.1/include/net/flow_keys.h | 26 +
.../mlnx-5.8-1.1.2.1/include/net/flow_offload.h | 391 +
.../ubuntu/mlnx-5.8-1.1.2.1/include/net/geneve.h | 21 +
.../mlnx/ubuntu/mlnx-5.8-1.1.2.1/include/net/gre.h | 22 +
.../ubuntu/mlnx-5.8-1.1.2.1/include/net/ip_fib.h | 24 +
.../mlnx-5.8-1.1.2.1/include/net/ip_tunnels.h | 158 +
.../ubuntu/mlnx-5.8-1.1.2.1/include/net/ipv6.h | 24 +
.../mlnx-5.8-1.1.2.1/include/net/ipv6_stubs.h | 10 +
.../ubuntu/mlnx-5.8-1.1.2.1/include/net/macsec.h | 18 +
.../ubuntu/mlnx-5.8-1.1.2.1/include/net/mlxdevm.h | 449 +
.../include/net/netfilter/nf_flow_table.h | 14 +
.../include/net/netfilter/nf_flow_table_4_18.h | 235 +
.../ubuntu/mlnx-5.8-1.1.2.1/include/net/netlink.h | 39 +
.../ubuntu/mlnx-5.8-1.1.2.1/include/net/pkt_cls.h | 230 +
.../ubuntu/mlnx-5.8-1.1.2.1/include/net/psample.h | 28 +
.../ubuntu/mlnx-5.8-1.1.2.1/include/net/sock.h | 45 +
.../mlnx-5.8-1.1.2.1/include/net/switchdev.h | 63 +
.../mlnx-5.8-1.1.2.1/include/net/tc_act/tc_csum.h | 34 +
.../mlnx-5.8-1.1.2.1/include/net/tc_act/tc_ct.h | 17 +
.../include/net/tc_act/tc_ct_4_18.h | 94 +
.../mlnx-5.8-1.1.2.1/include/net/tc_act/tc_gact.h | 120 +
.../include/net/tc_act/tc_mirred.h | 107 +
.../mlnx-5.8-1.1.2.1/include/net/tc_act/tc_mpls.h | 10 +
.../mlnx-5.8-1.1.2.1/include/net/tc_act/tc_pedit.h | 96 +
.../include/net/tc_act/tc_tunnel_key.h | 212 +
.../mlnx-5.8-1.1.2.1/include/net/tc_act/tc_vlan.h | 65 +
.../mlnx/ubuntu/mlnx-5.8-1.1.2.1/include/net/tls.h | 27 +
.../ubuntu/mlnx-5.8-1.1.2.1/include/net/vxlan.h | 33 +
.../mlnx/ubuntu/mlnx-5.8-1.1.2.1/include/net/xdp.h | 17 +
.../ubuntu/mlnx-5.8-1.1.2.1/include/net/xfrm.h | 12 +
.../mlnx/ubuntu/mlnx-5.8-1.1.2.1/include/rdma/ib.h | 82 +
.../ubuntu/mlnx-5.8-1.1.2.1/include/rdma/ib_addr.h | 295 +
.../mlnx-5.8-1.1.2.1/include/rdma/ib_cache.h | 118 +
.../ubuntu/mlnx-5.8-1.1.2.1/include/rdma/ib_cm.h | 574 +
.../ubuntu/mlnx-5.8-1.1.2.1/include/rdma/ib_hdrs.h | 307 +
.../ubuntu/mlnx-5.8-1.1.2.1/include/rdma/ib_mad.h | 819 ++
.../mlnx-5.8-1.1.2.1/include/rdma/ib_marshall.h | 28 +
.../ubuntu/mlnx-5.8-1.1.2.1/include/rdma/ib_pack.h | 284 +
.../ubuntu/mlnx-5.8-1.1.2.1/include/rdma/ib_pma.h | 130 +
.../ubuntu/mlnx-5.8-1.1.2.1/include/rdma/ib_sa.h | 609 +
.../ubuntu/mlnx-5.8-1.1.2.1/include/rdma/ib_smi.h | 158 +
.../mlnx-5.8-1.1.2.1/include/rdma/ib_sysfs.h | 37 +
.../ubuntu/mlnx-5.8-1.1.2.1/include/rdma/ib_umem.h | 270 +
.../mlnx-5.8-1.1.2.1/include/rdma/ib_umem_odp.h | 224 +
.../mlnx-5.8-1.1.2.1/include/rdma/ib_verbs.h | 5055 +++++++
.../mlnx-5.8-1.1.2.1/include/rdma/ib_verbs_nvmf.h | 63 +
.../include/rdma/ib_verbs_nvmf_def.h | 53 +
.../ubuntu/mlnx-5.8-1.1.2.1/include/rdma/iba.h | 146 +
.../mlnx-5.8-1.1.2.1/include/rdma/ibta_vol1_c12.h | 219 +
.../ubuntu/mlnx-5.8-1.1.2.1/include/rdma/iw_cm.h | 226 +
.../mlnx-5.8-1.1.2.1/include/rdma/iw_portmap.h | 65 +
.../ubuntu/mlnx-5.8-1.1.2.1/include/rdma/lag.h | 27 +
.../ubuntu/mlnx-5.8-1.1.2.1/include/rdma/mr_pool.h | 17 +
.../mlnx-5.8-1.1.2.1/include/rdma/opa_addr.h | 91 +
.../mlnx-5.8-1.1.2.1/include/rdma/opa_port_info.h | 385 +
.../ubuntu/mlnx-5.8-1.1.2.1/include/rdma/opa_smi.h | 124 +
.../mlnx-5.8-1.1.2.1/include/rdma/opa_vnic.h | 97 +
.../mlnx-5.8-1.1.2.1/include/rdma/peer_mem.h | 175 +
.../ubuntu/mlnx-5.8-1.1.2.1/include/rdma/rdma_cm.h | 394 +
.../mlnx-5.8-1.1.2.1/include/rdma/rdma_cm_ib.h | 27 +
.../mlnx-5.8-1.1.2.1/include/rdma/rdma_counter.h | 73 +
.../mlnx-5.8-1.1.2.1/include/rdma/rdma_netlink.h | 131 +
.../ubuntu/mlnx-5.8-1.1.2.1/include/rdma/rdma_vt.h | 532 +
.../mlnx-5.8-1.1.2.1/include/rdma/rdmavt_cq.h | 67 +
.../mlnx-5.8-1.1.2.1/include/rdma/rdmavt_mr.h | 155 +
.../mlnx-5.8-1.1.2.1/include/rdma/rdmavt_qp.h | 1003 ++
.../mlnx-5.8-1.1.2.1/include/rdma/restrack.h | 186 +
.../mlnx/ubuntu/mlnx-5.8-1.1.2.1/include/rdma/rw.h | 73 +
.../mlnx-5.8-1.1.2.1/include/rdma/signature.h | 124 +
.../mlnx-5.8-1.1.2.1/include/rdma/tid_rdma_defs.h | 108 +
.../mlnx-5.8-1.1.2.1/include/rdma/uverbs_ioctl.h | 1026 ++
.../include/rdma/uverbs_named_ioctl.h | 97 +
.../include/rdma/uverbs_std_types.h | 178 +
.../mlnx-5.8-1.1.2.1/include/rdma/uverbs_types.h | 184 +
.../ubuntu/mlnx-5.8-1.1.2.1/include/scsi/iser.h | 78 +
.../ubuntu/mlnx-5.8-1.1.2.1/include/scsi/scsi.h | 12 +
.../mlnx-5.8-1.1.2.1/include/scsi/scsi_device.h | 20 +
.../include/scsi/scsi_transport_srp.h | 145 +
.../ubuntu/mlnx-5.8-1.1.2.1/include/scsi/srp.h | 310 +
.../mlnx-5.8-1.1.2.1/include/trace/events/ib_mad.h | 406 +
.../include/trace/events/ib_umad.h | 128 +
.../mlnx-5.8-1.1.2.1/include/trace/events/rdma.h | 168 +
.../include/trace/events/rdma_core.h | 394 +
.../include/trace/events/rpcrdma.h | 2247 ++++
.../include/trace/events/sunrpc_base.h | 18 +
.../mlnx-5.8-1.1.2.1/include/uapi/linux/devlink.h | 99 +
.../include/uapi/linux/eventpoll.h | 18 +
.../include/uapi/linux/net_tstamp.h | 12 +
.../include/uapi/linux/nvme_ioctl.h | 104 +
.../mlnx-5.8-1.1.2.1/include/uapi/linux/pkt_cls.h | 294 +
.../include/uapi/linux/tc_act/tc_ct.h | 18 +
.../include/uapi/linux/tc_act/tc_ct_4_18.h | 43 +
.../include/uapi/linux/tc_act/tc_pedit.h | 79 +
.../include/uapi/linux/tc_act/tc_tunnel_key.h | 75 +
.../include/uapi/mlxdevm/mlxdevm_netlink.h | 206 +
.../include/uapi/rdma/bnxt_re-abi.h | 126 +
.../mlnx-5.8-1.1.2.1/include/uapi/rdma/cxgb4-abi.h | 115 +
.../mlnx-5.8-1.1.2.1/include/uapi/rdma/efa-abi.h | 133 +
.../include/uapi/rdma/hfi/hfi1_ioctl.h | 174 +
.../include/uapi/rdma/hfi/hfi1_user.h | 268 +
.../mlnx-5.8-1.1.2.1/include/uapi/rdma/hns-abi.h | 99 +
.../include/uapi/rdma/ib_user_ioctl_cmds.h | 385 +
.../include/uapi/rdma/ib_user_ioctl_verbs.h | 270 +
.../include/uapi/rdma/ib_user_mad.h | 239 +
.../include/uapi/rdma/ib_user_sa.h | 77 +
.../include/uapi/rdma/ib_user_verbs.h | 1301 ++
.../mlnx-5.8-1.1.2.1/include/uapi/rdma/irdma-abi.h | 111 +
.../mlnx-5.8-1.1.2.1/include/uapi/rdma/mlx4-abi.h | 191 +
.../mlnx-5.8-1.1.2.1/include/uapi/rdma/mlx5-abi.h | 523 +
.../include/uapi/rdma/mlx5_user_ioctl_cmds.h | 361 +
.../include/uapi/rdma/mlx5_user_ioctl_verbs.h | 127 +
.../mlnx-5.8-1.1.2.1/include/uapi/rdma/mthca-abi.h | 112 +
.../include/uapi/rdma/ocrdma-abi.h | 152 +
.../mlnx-5.8-1.1.2.1/include/uapi/rdma/qedr-abi.h | 174 +
.../include/uapi/rdma/rdma_netlink.h | 595 +
.../include/uapi/rdma/rdma_user_cm.h | 341 +
.../include/uapi/rdma/rdma_user_ioctl.h | 91 +
.../include/uapi/rdma/rdma_user_ioctl_cmds.h | 87 +
.../include/uapi/rdma/rdma_user_rxe.h | 223 +
.../mlnx-5.8-1.1.2.1/include/uapi/rdma/rvt-abi.h | 66 +
.../mlnx-5.8-1.1.2.1/include/uapi/rdma/siw-abi.h | 186 +
.../include/uapi/rdma/vmw_pvrdma-abi.h | 310 +
.../mlnx/ubuntu/mlnx-5.8-1.1.2.1/xprtrdma/Makefile | 9 +
.../ubuntu/mlnx-5.8-1.1.2.1/xprtrdma/backchannel.c | 423 +
.../ubuntu/mlnx-5.8-1.1.2.1/xprtrdma/frwr_ops.c | 775 ++
.../mlnx/ubuntu/mlnx-5.8-1.1.2.1/xprtrdma/module.c | 56 +
.../mlnx/ubuntu/mlnx-5.8-1.1.2.1/xprtrdma/nvfs.h | 103 +
.../mlnx-5.8-1.1.2.1/xprtrdma/nvfs_rpc_rdma.c | 43 +
.../mlnx-5.8-1.1.2.1/xprtrdma/nvfs_rpc_rdma.h | 59 +
.../ubuntu/mlnx-5.8-1.1.2.1/xprtrdma/rpc_rdma.c | 1683 +++
.../mlnx-5.8-1.1.2.1/xprtrdma/rpcrdma_dummy.c | 59 +
.../ubuntu/mlnx-5.8-1.1.2.1/xprtrdma/svc_rdma.c | 315 +
.../xprtrdma/svc_rdma_backchannel.c | 358 +
.../mlnx-5.8-1.1.2.1/xprtrdma/svc_rdma_pcl.c | 318 +
.../mlnx-5.8-1.1.2.1/xprtrdma/svc_rdma_recvfrom.c | 1476 ++
.../ubuntu/mlnx-5.8-1.1.2.1/xprtrdma/svc_rdma_rw.c | 1599 +++
.../mlnx-5.8-1.1.2.1/xprtrdma/svc_rdma_sendto.c | 1525 +++
.../mlnx-5.8-1.1.2.1/xprtrdma/svc_rdma_transport.c | 715 +
.../mlnx-5.8-1.1.2.1/xprtrdma/svcrdma_dummy.c | 76 +
.../ubuntu/mlnx-5.8-1.1.2.1/xprtrdma/transport.c | 956 ++
.../mlnx/ubuntu/mlnx-5.8-1.1.2.1/xprtrdma/verbs.c | 1581 +++
.../ubuntu/mlnx-5.8-1.1.2.1/xprtrdma/xprt_rdma.h | 693 +
.../mlnx-5.8-1.1.2.1/xprtrdma/xprtrdma_dummy.c | 76 +
fs/nfs/enfs_adapter.c | 292 +
fs/nfs/enfs_adapter.h | 62 +
fs/nfs/fs_context.c | 50 +-
fs/nfs/internal.h | 3 +
fs/nfs/nfs3xdr.c | 66 +
fs/nfs/nfs4client.c | 27 +-
fs/nfs/nfs4proc.c | 14 +-
fs/nfs/nfs4state.c | 64 +-
fs/nfs/super.c | 17 +-
include/linux/lockd/lockd.h | 1 +
include/linux/nfs_fs_sb.h | 13 +
include/linux/nfs_xdr.h | 6 +
include/linux/sunrpc/clnt.h | 53 +
include/linux/sunrpc/sched.h | 3 +
include/linux/sunrpc/xprt.h | 2 +
include/linux/sunrpc/xprtmultipath.h | 5 +
net/sunrpc/.kunitconfig | 29 +
net/sunrpc/clnt.c | 269 +-
net/sunrpc/sched.c | 15 +-
net/sunrpc/xprt.c | 126 +-
net/sunrpc/xprtmultipath.c | 14 +-
1083 files changed, 301354 insertions(+), 37 deletions(-)
create mode 100644 fs/nfs/enfs/Makefile
create mode 100644 fs/nfs/enfs/enfs/enfs_init.c
create mode 100644 fs/nfs/enfs/enfs/enfs_multipath_client.c
create mode 100644 fs/nfs/enfs/enfs/enfs_multipath_parse.c
create mode 100644 fs/nfs/enfs/enfsrpc/enfs_rpc_init.c
create mode 100644 fs/nfs/enfs/enfsrpc/enfs_rpc_init.h
create mode 100644 fs/nfs/enfs/enfsrpc/enfs_rpc_proc.c
create mode 100644 fs/nfs/enfs/enfsrpc/enfs_rpc_proc.h
create mode 100644 fs/nfs/enfs/enfsrpc/lookupcache/enfs_lookup_cache.c
create mode 100644 fs/nfs/enfs/include/dns_internal.h
create mode 100644 fs/nfs/enfs/include/enfs.h
create mode 100644 fs/nfs/enfs/include/enfs_config.h
create mode 100644 fs/nfs/enfs/include/enfs_errcode.h
create mode 100644 fs/nfs/enfs/include/enfs_log.h
create mode 100644 fs/nfs/enfs/include/enfs_lookup_cache.h
create mode 100644 fs/nfs/enfs/include/enfs_multipath.h
create mode 100644 fs/nfs/enfs/include/enfs_multipath_client.h
create mode 100644 fs/nfs/enfs/include/enfs_multipath_parse.h
create mode 100644 fs/nfs/enfs/include/enfs_proc.h
create mode 100644 fs/nfs/enfs/include/enfs_remount.h
create mode 100644 fs/nfs/enfs/include/enfs_roundrobin.h
create mode 100644 fs/nfs/enfs/include/enfs_tp_common.h
create mode 100644 fs/nfs/enfs/include/exten_call.h
create mode 100644 fs/nfs/enfs/include/init.h
create mode 100644 fs/nfs/enfs/include/pm_state.h
create mode 100644 fs/nfs/enfs/include/shard.h
create mode 100644 fs/nfs/enfs/include/unify_multipath/dpc_rpc_client_api.h
create mode 100644 fs/nfs/enfs/include/unify_multipath/multipath_api.h
create mode 100644 fs/nfs/enfs/include/unify_multipath/multipath_types.h
create mode 100644 fs/nfs/enfs/init.c
create mode 100644 fs/nfs/enfs/mgmt/config/enfs_config.c
create mode 100644 fs/nfs/enfs/mgmt/mgmt_init.c
create mode 100644 fs/nfs/enfs/mgmt/mgmt_init.h
create mode 100644 fs/nfs/enfs/multipath/failover/failover_com.h
create mode 100644 fs/nfs/enfs/multipath/failover/failover_path.c
create mode 100644 fs/nfs/enfs/multipath/failover/failover_path.h
create mode 100644 fs/nfs/enfs/multipath/failover/failover_time.c
create mode 100644 fs/nfs/enfs/multipath/failover/failover_time.h
create mode 100644 fs/nfs/enfs/multipath/load_balance/enfs_roundrobin.c
create mode 100644 fs/nfs/enfs/multipath/load_balance/shard_route.c
create mode 100644 fs/nfs/enfs/multipath/path_mgmt/dns_process.c
create mode 100644 fs/nfs/enfs/multipath/path_mgmt/enfs_multipath.c
create mode 100644 fs/nfs/enfs/multipath/path_mgmt/enfs_path.c
create mode 100644 fs/nfs/enfs/multipath/path_mgmt/enfs_path.h
create mode 100644 fs/nfs/enfs/multipath/path_mgmt/enfs_proc.c
create mode 100644 fs/nfs/enfs/multipath/path_mgmt/enfs_remount.c
create mode 100644 fs/nfs/enfs/multipath/path_mgmt/exten_call.c
create mode 100644 fs/nfs/enfs/multipath/path_mgmt/pm_ping.c
create mode 100644 fs/nfs/enfs/multipath/path_mgmt/pm_ping.h
create mode 100644 fs/nfs/enfs/multipath/path_mgmt/pm_state.c
create mode 100644 fs/nfs/enfs/unify_multipath/Makefile
create mode 100644 fs/nfs/enfs/unify_multipath/Rules.mak
create mode 100644 fs/nfs/enfs/unify_multipath/dpc_adapter/Makefile.kernel
create mode 100644 fs/nfs/enfs/unify_multipath/dpc_adapter/dpc_adapter.c
create mode 100644 fs/nfs/enfs/unify_multipath/dpc_adapter/dpc_adapter_module.c
create mode 100644 fs/nfs/enfs/unify_multipath/dpc_adapter/dpc_rpc.h
create mode 100644 fs/nfs/enfs/unify_multipath/dpc_adapter/dpc_rpc_client.c
create mode 100644 fs/nfs/enfs/unify_multipath/dpc_adapter/dpc_rpc_client.h
create mode 100644 fs/nfs/enfs/unify_multipath/dpc_adapter/dpc_rpc_client_null_call.c
create mode 100644 fs/nfs/enfs/unify_multipath/dpc_adapter/dpc_rpc_client_null_call.h
create mode 100644 fs/nfs/enfs/unify_multipath/dpc_adapter/dpc_rpc_client_read.c
create mode 100644 fs/nfs/enfs/unify_multipath/dpc_adapter/dpc_rpc_client_read.h
create mode 100644 fs/nfs/enfs/unify_multipath/dpc_adapter/dpc_rpc_client_write.c
create mode 100644 fs/nfs/enfs/unify_multipath/dpc_adapter/dpc_rpc_client_write.h
create mode 100644 fs/nfs/enfs/unify_multipath/dpc_adapter/dpc_rpc_conn.c
create mode 100644 fs/nfs/enfs/unify_multipath/dpc_adapter/dpc_rpc_conn.h
create mode 100644 fs/nfs/enfs/unify_multipath/dpc_adapter/dpc_rpc_io_common.c
create mode 100644 fs/nfs/enfs/unify_multipath/dpc_adapter/dpc_rpc_io_common.h
create mode 100644 fs/nfs/enfs/unify_multipath/dpc_adapter/dpc_rpc_mulp_proc.c
create mode 100644 fs/nfs/enfs/unify_multipath/dpc_adapter/dpc_rpc_mulp_proc.h
create mode 100644 fs/nfs/enfs/unify_multipath/dpc_adapter/dpc_rpc_mulp_proc_shard_view.c
create mode 100644 fs/nfs/enfs/unify_multipath/dpc_adapter/dpc_rpc_mulp_proc_shard_view.h
create mode 100644 fs/nfs/enfs/unify_multipath/dpc_adapter/dpc_rpc_proc.c
create mode 100644 fs/nfs/enfs/unify_multipath/dpc_adapter/dpc_rpc_proc.h
create mode 100644 fs/nfs/enfs/unify_multipath/dpc_adapter/dpc_rpc_util.c
create mode 100644 fs/nfs/enfs/unify_multipath/dpc_adapter/dpc_rpc_util.h
create mode 100644 fs/nfs/enfs/unify_multipath/dpc_adapter/dpc_rpc_xdr.c
create mode 100644 fs/nfs/enfs/unify_multipath/dpc_adapter/dpc_rpc_xdr.h
create mode 100644 fs/nfs/enfs/unify_multipath/include/dpc_kernel_version.h
create mode 100644 fs/nfs/enfs/unify_multipath/include/mulp_log.h
create mode 100644 fs/nfs/enfs/unify_multipath/include/mulp_porting.h
create mode 100644 fs/nfs/enfs/unify_multipath/infra/mulp_proc.c
create mode 100644 fs/nfs/enfs/unify_multipath/infra/mulp_tp.c
create mode 100644 fs/nfs/enfs/unify_multipath/infra/mulp_tp.h
create mode 100644 fs/nfs/enfs/unify_multipath/infra/mulp_tp_common.h
create mode 100644 fs/nfs/enfs/unify_multipath/infra/multipath_infra_adapter.c
create mode 100644 fs/nfs/enfs/unify_multipath/multipath/Makefile
create mode 100644 fs/nfs/enfs/unify_multipath/multipath/adapter/diagnose/mulp_diagnose.c
create mode 100644 fs/nfs/enfs/unify_multipath/multipath/adapter/mulp_init.c
create mode 100644 fs/nfs/enfs/unify_multipath/multipath/adapter/multipath_adapter/mulp_adapter.c
create mode 100644 fs/nfs/enfs/unify_multipath/multipath/include/mulp_dataset.h
create mode 100644 fs/nfs/enfs/unify_multipath/multipath/include/mulp_load_balance.h
create mode 100644 fs/nfs/enfs/unify_multipath/multipath/include/mulp_multipath_adapter.h
create mode 100644 fs/nfs/enfs/unify_multipath/multipath/include/mulp_path_detect.h
create mode 100644 fs/nfs/enfs/unify_multipath/multipath/include/mulp_path_mgmt.h
create mode 100644 fs/nfs/enfs/unify_multipath/multipath/include/mulp_shard_view.h
create mode 100644 fs/nfs/enfs/unify_multipath/multipath/include/multipath.h
create mode 100644 fs/nfs/enfs/unify_multipath/multipath/model/config/mulp_config.c
create mode 100644 fs/nfs/enfs/unify_multipath/multipath/model/dataset/mulp_dataset.c
create mode 100644 fs/nfs/enfs/unify_multipath/multipath/model/shard_view/mulp_shardview.c
create mode 100644 fs/nfs/enfs/unify_multipath/multipath/multipath.c
create mode 100644 fs/nfs/enfs/unify_multipath/multipath/multipath_module.c
create mode 100644 fs/nfs/enfs/unify_multipath/multipath/path_mgmt/mpath_create.c
create mode 100644 fs/nfs/enfs/unify_multipath/multipath/service/load_balance/mulp_load_balance.c
create mode 100644 fs/nfs/enfs/unify_multipath/multipath/service/path_mgmt/mulp_path_detect.c
create mode 100644 fs/nfs/enfs/unify_multipath/multipath/service/path_mgmt/mulp_path_mgmt.c
create mode 100644 fs/nfs/enfs/unify_multipath/multipath/service/path_mgmt/mulp_path_mgmt_inner.h
create mode 100644 fs/nfs/enfs/unify_multipath/nfs_adapter/nfs_multipath_adapter.c
create mode 100644 fs/nfs/enfs/unify_multipath/sunrpc/Makefile
create mode 100644 fs/nfs/enfs/unify_multipath/sunrpc/kernel/5.10.0-182.0.0.95.oe2203sp3/Makefile
create mode 100644 fs/nfs/enfs/unify_multipath/sunrpc/kernel/5.10.0-182.0.0.95.oe2203sp3/xprtrdma/Makefile
create mode 100644 fs/nfs/enfs/unify_multipath/sunrpc/kernel/5.10.0-182.0.0.95.oe2203sp3/xprtrdma/backchannel.c
create mode 100644 fs/nfs/enfs/unify_multipath/sunrpc/kernel/5.10.0-182.0.0.95.oe2203sp3/xprtrdma/frwr_ops.c
create mode 100644 fs/nfs/enfs/unify_multipath/sunrpc/kernel/5.10.0-182.0.0.95.oe2203sp3/xprtrdma/module.c
create mode 100644 fs/nfs/enfs/unify_multipath/sunrpc/kernel/5.10.0-182.0.0.95.oe2203sp3/xprtrdma/rpc_rdma.c
create mode 100644 fs/nfs/enfs/unify_multipath/sunrpc/kernel/5.10.0-182.0.0.95.oe2203sp3/xprtrdma/svc_rdma.c
create mode 100644 fs/nfs/enfs/unify_multipath/sunrpc/kernel/5.10.0-182.0.0.95.oe2203sp3/xprtrdma/svc_rdma_backchannel.c
create mode 100644 fs/nfs/enfs/unify_multipath/sunrpc/kernel/5.10.0-182.0.0.95.oe2203sp3/xprtrdma/svc_rdma_recvfrom.c
create mode 100644 fs/nfs/enfs/unify_multipath/sunrpc/kernel/5.10.0-182.0.0.95.oe2203sp3/xprtrdma/svc_rdma_rw.c
create mode 100644 fs/nfs/enfs/unify_multipath/sunrpc/kernel/5.10.0-182.0.0.95.oe2203sp3/xprtrdma/svc_rdma_sendto.c
create mode 100644 fs/nfs/enfs/unify_multipath/sunrpc/kernel/5.10.0-182.0.0.95.oe2203sp3/xprtrdma/svc_rdma_transport.c
create mode 100644 fs/nfs/enfs/unify_multipath/sunrpc/kernel/5.10.0-182.0.0.95.oe2203sp3/xprtrdma/transport.c
create mode 100644 fs/nfs/enfs/unify_multipath/sunrpc/kernel/5.10.0-182.0.0.95.oe2203sp3/xprtrdma/verbs.c
create mode 100644 fs/nfs/enfs/unify_multipath/sunrpc/kernel/5.10.0-182.0.0.95.oe2203sp3/xprtrdma/xprt_rdma.h
create mode 100644 fs/nfs/enfs/unify_multipath/sunrpc/kernel/5.10.0-228.0.0.127.oe2203sp4/Makefile
create mode 100644 fs/nfs/enfs/unify_multipath/sunrpc/kernel/5.10.0-228.0.0.127.oe2203sp4/xprtrdma/Makefile
create mode 100644 fs/nfs/enfs/unify_multipath/sunrpc/kernel/5.10.0-228.0.0.127.oe2203sp4/xprtrdma/backchannel.c
create mode 100644 fs/nfs/enfs/unify_multipath/sunrpc/kernel/5.10.0-228.0.0.127.oe2203sp4/xprtrdma/frwr_ops.c
create mode 100644 fs/nfs/enfs/unify_multipath/sunrpc/kernel/5.10.0-228.0.0.127.oe2203sp4/xprtrdma/module.c
create mode 100644 fs/nfs/enfs/unify_multipath/sunrpc/kernel/5.10.0-228.0.0.127.oe2203sp4/xprtrdma/rpc_rdma.c
create mode 100644 fs/nfs/enfs/unify_multipath/sunrpc/kernel/5.10.0-228.0.0.127.oe2203sp4/xprtrdma/svc_rdma.c
create mode 100644 fs/nfs/enfs/unify_multipath/sunrpc/kernel/5.10.0-228.0.0.127.oe2203sp4/xprtrdma/svc_rdma_backchannel.c
create mode 100644 fs/nfs/enfs/unify_multipath/sunrpc/kernel/5.10.0-228.0.0.127.oe2203sp4/xprtrdma/svc_rdma_recvfrom.c
create mode 100644 fs/nfs/enfs/unify_multipath/sunrpc/kernel/5.10.0-228.0.0.127.oe2203sp4/xprtrdma/svc_rdma_rw.c
create mode 100644 fs/nfs/enfs/unify_multipath/sunrpc/kernel/5.10.0-228.0.0.127.oe2203sp4/xprtrdma/svc_rdma_sendto.c
create mode 100644 fs/nfs/enfs/unify_multipath/sunrpc/kernel/5.10.0-228.0.0.127.oe2203sp4/xprtrdma/svc_rdma_transport.c
create mode 100644 fs/nfs/enfs/unify_multipath/sunrpc/kernel/5.10.0-228.0.0.127.oe2203sp4/xprtrdma/transport.c
create mode 100644 fs/nfs/enfs/unify_multipath/sunrpc/kernel/5.10.0-228.0.0.127.oe2203sp4/xprtrdma/verbs.c
create mode 100644 fs/nfs/enfs/unify_multipath/sunrpc/kernel/5.10.0-228.0.0.127.oe2203sp4/xprtrdma/xprt_rdma.h
create mode 100644 fs/nfs/enfs/unify_multipath/sunrpc/kernel/5.10.0-250.0.0.154.oe2203sp4/.vscode/.cache/clangd/wecode-cpp.db
create mode 100644 fs/nfs/enfs/unify_multipath/sunrpc/kernel/5.10.0-250.0.0.154.oe2203sp4/.vscode/libing-local-build-logs/audit.json
create mode 100644 fs/nfs/enfs/unify_multipath/sunrpc/kernel/5.10.0-250.0.0.154.oe2203sp4/.vscode/libing-local-build-logs/local-build-2025-03.log
create mode 100644 fs/nfs/enfs/unify_multipath/sunrpc/kernel/5.10.0-250.0.0.154.oe2203sp4/.vscode/libing-local-build-rsyncd/rsyncd.conf
create mode 100644 fs/nfs/enfs/unify_multipath/sunrpc/kernel/5.10.0-250.0.0.154.oe2203sp4/.vscode/libing-local-build-rsyncd/rsyncd.log
create mode 100644 fs/nfs/enfs/unify_multipath/sunrpc/kernel/5.10.0-250.0.0.154.oe2203sp4/.vscode/libing-local-build-rsyncd/rsyncd.pid
create mode 100644 fs/nfs/enfs/unify_multipath/sunrpc/kernel/5.10.0-250.0.0.154.oe2203sp4/.vscode/libing-local-build-rsyncd/rsyncd.secrets
create mode 100644 fs/nfs/enfs/unify_multipath/sunrpc/kernel/5.10.0-250.0.0.154.oe2203sp4/.vscode/libing-local-build-rsyncd/rsyncdPortFile
create mode 100644 fs/nfs/enfs/unify_multipath/sunrpc/kernel/5.10.0-250.0.0.154.oe2203sp4/.vscode/settings.json
create mode 100644 fs/nfs/enfs/unify_multipath/sunrpc/kernel/5.10.0-250.0.0.154.oe2203sp4/.vscode/staticCheckTasks.json
create mode 100644 fs/nfs/enfs/unify_multipath/sunrpc/kernel/5.10.0-250.0.0.154.oe2203sp4/.vscode/tags-34.wecode-db
create mode 100644 fs/nfs/enfs/unify_multipath/sunrpc/kernel/5.10.0-250.0.0.154.oe2203sp4/.vscode/tags-34.wecode-lock
create mode 100644 fs/nfs/enfs/unify_multipath/sunrpc/kernel/5.10.0-250.0.0.154.oe2203sp4/Makefile
create mode 100644 fs/nfs/enfs/unify_multipath/sunrpc/kernel/5.10.0-250.0.0.154.oe2203sp4/xprtrdma/Makefile
create mode 100644 fs/nfs/enfs/unify_multipath/sunrpc/kernel/5.10.0-250.0.0.154.oe2203sp4/xprtrdma/backchannel.c
create mode 100644 fs/nfs/enfs/unify_multipath/sunrpc/kernel/5.10.0-250.0.0.154.oe2203sp4/xprtrdma/frwr_ops.c
create mode 100644 fs/nfs/enfs/unify_multipath/sunrpc/kernel/5.10.0-250.0.0.154.oe2203sp4/xprtrdma/module.c
create mode 100644 fs/nfs/enfs/unify_multipath/sunrpc/kernel/5.10.0-250.0.0.154.oe2203sp4/xprtrdma/rpc_rdma.c
create mode 100644 fs/nfs/enfs/unify_multipath/sunrpc/kernel/5.10.0-250.0.0.154.oe2203sp4/xprtrdma/svc_rdma.c
create mode 100644 fs/nfs/enfs/unify_multipath/sunrpc/kernel/5.10.0-250.0.0.154.oe2203sp4/xprtrdma/svc_rdma_backchannel.c
create mode 100644 fs/nfs/enfs/unify_multipath/sunrpc/kernel/5.10.0-250.0.0.154.oe2203sp4/xprtrdma/svc_rdma_recvfrom.c
create mode 100644 fs/nfs/enfs/unify_multipath/sunrpc/kernel/5.10.0-250.0.0.154.oe2203sp4/xprtrdma/svc_rdma_rw.c
create mode 100644 fs/nfs/enfs/unify_multipath/sunrpc/kernel/5.10.0-250.0.0.154.oe2203sp4/xprtrdma/svc_rdma_sendto.c
create mode 100644 fs/nfs/enfs/unify_multipath/sunrpc/kernel/5.10.0-250.0.0.154.oe2203sp4/xprtrdma/svc_rdma_transport.c
create mode 100644 fs/nfs/enfs/unify_multipath/sunrpc/kernel/5.10.0-250.0.0.154.oe2203sp4/xprtrdma/transport.c
create mode 100644 fs/nfs/enfs/unify_multipath/sunrpc/kernel/5.10.0-250.0.0.154.oe2203sp4/xprtrdma/verbs.c
create mode 100644 fs/nfs/enfs/unify_multipath/sunrpc/kernel/5.10.0-250.0.0.154.oe2203sp4/xprtrdma/xprt_rdma.h
create mode 100644 fs/nfs/enfs/unify_multipath/sunrpc/kernel/5.10.0-60.18.0.50.oe2203/Makefile
create mode 100644 fs/nfs/enfs/unify_multipath/sunrpc/kernel/5.10.0-60.18.0.50.oe2203/xprtrdma/Makefile
create mode 100644 fs/nfs/enfs/unify_multipath/sunrpc/kernel/5.10.0-60.18.0.50.oe2203/xprtrdma/backchannel.c
create mode 100644 fs/nfs/enfs/unify_multipath/sunrpc/kernel/5.10.0-60.18.0.50.oe2203/xprtrdma/frwr_ops.c
create mode 100644 fs/nfs/enfs/unify_multipath/sunrpc/kernel/5.10.0-60.18.0.50.oe2203/xprtrdma/module.c
create mode 100644 fs/nfs/enfs/unify_multipath/sunrpc/kernel/5.10.0-60.18.0.50.oe2203/xprtrdma/rpc_rdma.c
create mode 100644 fs/nfs/enfs/unify_multipath/sunrpc/kernel/5.10.0-60.18.0.50.oe2203/xprtrdma/svc_rdma.c
create mode 100644 fs/nfs/enfs/unify_multipath/sunrpc/kernel/5.10.0-60.18.0.50.oe2203/xprtrdma/svc_rdma_backchannel.c
create mode 100644 fs/nfs/enfs/unify_multipath/sunrpc/kernel/5.10.0-60.18.0.50.oe2203/xprtrdma/svc_rdma_recvfrom.c
create mode 100644 fs/nfs/enfs/unify_multipath/sunrpc/kernel/5.10.0-60.18.0.50.oe2203/xprtrdma/svc_rdma_rw.c
create mode 100644 fs/nfs/enfs/unify_multipath/sunrpc/kernel/5.10.0-60.18.0.50.oe2203/xprtrdma/svc_rdma_sendto.c
create mode 100644 fs/nfs/enfs/unify_multipath/sunrpc/kernel/5.10.0-60.18.0.50.oe2203/xprtrdma/svc_rdma_transport.c
create mode 100644 fs/nfs/enfs/unify_multipath/sunrpc/kernel/5.10.0-60.18.0.50.oe2203/xprtrdma/transport.c
create mode 100644 fs/nfs/enfs/unify_multipath/sunrpc/kernel/5.10.0-60.18.0.50.oe2203/xprtrdma/verbs.c
create mode 100644 fs/nfs/enfs/unify_multipath/sunrpc/kernel/5.10.0-60.18.0.50.oe2203/xprtrdma/xprt_rdma.h
create mode 100644 fs/nfs/enfs/unify_multipath/sunrpc/mlnx/openeuler/.vscode/.cache/clangd/wecode-cpp.db
create mode 100644 fs/nfs/enfs/unify_multipath/sunrpc/mlnx/openeuler/.vscode/libing-local-build-logs/audit.json
create mode 100644 fs/nfs/enfs/unify_multipath/sunrpc/mlnx/openeuler/.vscode/libing-local-build-logs/local-build-2025-04.log
create mode 100644 fs/nfs/enfs/unify_multipath/sunrpc/mlnx/openeuler/.vscode/libing-local-build-rsyncd/rsyncd.conf
create mode 100644 fs/nfs/enfs/unify_multipath/sunrpc/mlnx/openeuler/.vscode/libing-local-build-rsyncd/rsyncd.log
create mode 100644 fs/nfs/enfs/unify_multipath/sunrpc/mlnx/openeuler/.vscode/libing-local-build-rsyncd/rsyncd.pid
create mode 100644 fs/nfs/enfs/unify_multipath/sunrpc/mlnx/openeuler/.vscode/libing-local-build-rsyncd/rsyncd.secrets
create mode 100644 fs/nfs/enfs/unify_multipath/sunrpc/mlnx/openeuler/.vscode/libing-local-build-rsyncd/rsyncdPortFile
create mode 100644 fs/nfs/enfs/unify_multipath/sunrpc/mlnx/openeuler/.vscode/settings.json
create mode 100644 fs/nfs/enfs/unify_multipath/sunrpc/mlnx/openeuler/.vscode/staticCheckTasks.json
create mode 100644 fs/nfs/enfs/unify_multipath/sunrpc/mlnx/openeuler/.vscode/tags-33.wecode-db
create mode 100644 fs/nfs/enfs/unify_multipath/sunrpc/mlnx/openeuler/.vscode/tags-33.wecode-lock
create mode 100644 fs/nfs/enfs/unify_multipath/sunrpc/mlnx/openeuler/Makefile
create mode 100644 fs/nfs/enfs/unify_multipath/sunrpc/mlnx/openeuler/Module.aarch64.symvers
create mode 100644 fs/nfs/enfs/unify_multipath/sunrpc/mlnx/openeuler/Module.x86_64.symvers
create mode 100644 fs/nfs/enfs/unify_multipath/sunrpc/mlnx/openeuler/compat/config.h
create mode 100644 fs/nfs/enfs/unify_multipath/sunrpc/mlnx/openeuler/include/asm-generic/bug.h
create mode 100644 fs/nfs/enfs/unify_multipath/sunrpc/mlnx/openeuler/include/linux/auxiliary_bus.h
create mode 100644 fs/nfs/enfs/unify_multipath/sunrpc/mlnx/openeuler/include/linux/bit.h
create mode 100644 fs/nfs/enfs/unify_multipath/sunrpc/mlnx/openeuler/include/linux/bitfield.h
create mode 100644 fs/nfs/enfs/unify_multipath/sunrpc/mlnx/openeuler/include/linux/bitmap.h
create mode 100644 fs/nfs/enfs/unify_multipath/sunrpc/mlnx/openeuler/include/linux/bitops.h
create mode 100644 fs/nfs/enfs/unify_multipath/sunrpc/mlnx/openeuler/include/linux/bits.h
create mode 100644 fs/nfs/enfs/unify_multipath/sunrpc/mlnx/openeuler/include/linux/blk-mq-pci.h
create mode 100644 fs/nfs/enfs/unify_multipath/sunrpc/mlnx/openeuler/include/linux/blk-mq-rdma.h
create mode 100644 fs/nfs/enfs/unify_multipath/sunrpc/mlnx/openeuler/include/linux/blk-mq.h
create mode 100644 fs/nfs/enfs/unify_multipath/sunrpc/mlnx/openeuler/include/linux/blk_types.h
create mode 100644 fs/nfs/enfs/unify_multipath/sunrpc/mlnx/openeuler/include/linux/blkdev.h
create mode 100644 fs/nfs/enfs/unify_multipath/sunrpc/mlnx/openeuler/include/linux/bpf.h
create mode 100644 fs/nfs/enfs/unify_multipath/sunrpc/mlnx/openeuler/include/linux/bpf_trace.h
create mode 100644 fs/nfs/enfs/unify_multipath/sunrpc/mlnx/openeuler/include/linux/build_bug.h
create mode 100644 fs/nfs/enfs/unify_multipath/sunrpc/mlnx/openeuler/include/linux/cdev.h
create mode 100644 fs/nfs/enfs/unify_multipath/sunrpc/mlnx/openeuler/include/linux/cgroup_rdma.h
create mode 100644 fs/nfs/enfs/unify_multipath/sunrpc/mlnx/openeuler/include/linux/compat-2.6.h
create mode 100644 fs/nfs/enfs/unify_multipath/sunrpc/mlnx/openeuler/include/linux/compat-3.10.h
create mode 100644 fs/nfs/enfs/unify_multipath/sunrpc/mlnx/openeuler/include/linux/compat-3.12.h
create mode 100644 fs/nfs/enfs/unify_multipath/sunrpc/mlnx/openeuler/include/linux/compat-3.15.h
create mode 100644 fs/nfs/enfs/unify_multipath/sunrpc/mlnx/openeuler/include/linux/compat-4.0.h
create mode 100644 fs/nfs/enfs/unify_multipath/sunrpc/mlnx/openeuler/include/linux/compat-4.1.h
create mode 100644 fs/nfs/enfs/unify_multipath/sunrpc/mlnx/openeuler/include/linux/compat-4.10.h
create mode 100644 fs/nfs/enfs/unify_multipath/sunrpc/mlnx/openeuler/include/linux/compat_fix.h
create mode 100644 fs/nfs/enfs/unify_multipath/sunrpc/mlnx/openeuler/include/linux/compiler-clang.h
create mode 100644 fs/nfs/enfs/unify_multipath/sunrpc/mlnx/openeuler/include/linux/compiler-gcc.h
create mode 100644 fs/nfs/enfs/unify_multipath/sunrpc/mlnx/openeuler/include/linux/compiler-intel.h
create mode 100644 fs/nfs/enfs/unify_multipath/sunrpc/mlnx/openeuler/include/linux/compiler.h
create mode 100644 fs/nfs/enfs/unify_multipath/sunrpc/mlnx/openeuler/include/linux/compiler_attributes.h
create mode 100644 fs/nfs/enfs/unify_multipath/sunrpc/mlnx/openeuler/include/linux/dcbnl.h
create mode 100644 fs/nfs/enfs/unify_multipath/sunrpc/mlnx/openeuler/include/linux/device.h
create mode 100644 fs/nfs/enfs/unify_multipath/sunrpc/mlnx/openeuler/include/linux/dim.h
create mode 100644 fs/nfs/enfs/unify_multipath/sunrpc/mlnx/openeuler/include/linux/ethtool.h
create mode 100644 fs/nfs/enfs/unify_multipath/sunrpc/mlnx/openeuler/include/linux/export.h
create mode 100644 fs/nfs/enfs/unify_multipath/sunrpc/mlnx/openeuler/include/linux/filter.h
create mode 100644 fs/nfs/enfs/unify_multipath/sunrpc/mlnx/openeuler/include/linux/firmware.h
create mode 100644 fs/nfs/enfs/unify_multipath/sunrpc/mlnx/openeuler/include/linux/fs.h
create mode 100644 fs/nfs/enfs/unify_multipath/sunrpc/mlnx/openeuler/include/linux/gfp.h
create mode 100644 fs/nfs/enfs/unify_multipath/sunrpc/mlnx/openeuler/include/linux/hashtable.h
create mode 100644 fs/nfs/enfs/unify_multipath/sunrpc/mlnx/openeuler/include/linux/idr.h
create mode 100644 fs/nfs/enfs/unify_multipath/sunrpc/mlnx/openeuler/include/linux/if_ether.h
create mode 100644 fs/nfs/enfs/unify_multipath/sunrpc/mlnx/openeuler/include/linux/if_link.h
create mode 100644 fs/nfs/enfs/unify_multipath/sunrpc/mlnx/openeuler/include/linux/if_vlan.h
create mode 100644 fs/nfs/enfs/unify_multipath/sunrpc/mlnx/openeuler/include/linux/indirect_call_wrapper.h
create mode 100644 fs/nfs/enfs/unify_multipath/sunrpc/mlnx/openeuler/include/linux/inet.h
create mode 100644 fs/nfs/enfs/unify_multipath/sunrpc/mlnx/openeuler/include/linux/inet_lro.h
create mode 100644 fs/nfs/enfs/unify_multipath/sunrpc/mlnx/openeuler/include/linux/inetdevice.h
create mode 100644 fs/nfs/enfs/unify_multipath/sunrpc/mlnx/openeuler/include/linux/interval_tree.h
create mode 100644 fs/nfs/enfs/unify_multipath/sunrpc/mlnx/openeuler/include/linux/irq_poll.h
create mode 100644 fs/nfs/enfs/unify_multipath/sunrpc/mlnx/openeuler/include/linux/kconfig.h
create mode 100644 fs/nfs/enfs/unify_multipath/sunrpc/mlnx/openeuler/include/linux/kern_levels.h
create mode 100644 fs/nfs/enfs/unify_multipath/sunrpc/mlnx/openeuler/include/linux/kernel.h
create mode 100644 fs/nfs/enfs/unify_multipath/sunrpc/mlnx/openeuler/include/linux/kmod.h
create mode 100644 fs/nfs/enfs/unify_multipath/sunrpc/mlnx/openeuler/include/linux/kref.h
create mode 100644 fs/nfs/enfs/unify_multipath/sunrpc/mlnx/openeuler/include/linux/list.h
create mode 100644 fs/nfs/enfs/unify_multipath/sunrpc/mlnx/openeuler/include/linux/llist.h
create mode 100644 fs/nfs/enfs/unify_multipath/sunrpc/mlnx/openeuler/include/linux/lockdep.h
create mode 100644 fs/nfs/enfs/unify_multipath/sunrpc/mlnx/openeuler/include/linux/log2.h
create mode 100644 fs/nfs/enfs/unify_multipath/sunrpc/mlnx/openeuler/include/linux/mlx5/accel.h
create mode 100644 fs/nfs/enfs/unify_multipath/sunrpc/mlnx/openeuler/include/linux/mlx5/cq.h
create mode 100644 fs/nfs/enfs/unify_multipath/sunrpc/mlnx/openeuler/include/linux/mlx5/device.h
create mode 100644 fs/nfs/enfs/unify_multipath/sunrpc/mlnx/openeuler/include/linux/mlx5/doorbell.h
create mode 100644 fs/nfs/enfs/unify_multipath/sunrpc/mlnx/openeuler/include/linux/mlx5/driver.h
create mode 100644 fs/nfs/enfs/unify_multipath/sunrpc/mlnx/openeuler/include/linux/mlx5/eq.h
create mode 100644 fs/nfs/enfs/unify_multipath/sunrpc/mlnx/openeuler/include/linux/mlx5/eswitch.h
create mode 100644 fs/nfs/enfs/unify_multipath/sunrpc/mlnx/openeuler/include/linux/mlx5/fs.h
create mode 100644 fs/nfs/enfs/unify_multipath/sunrpc/mlnx/openeuler/include/linux/mlx5/fs_helpers.h
create mode 100644 fs/nfs/enfs/unify_multipath/sunrpc/mlnx/openeuler/include/linux/mlx5/macsec.h
create mode 100644 fs/nfs/enfs/unify_multipath/sunrpc/mlnx/openeuler/include/linux/mlx5/mlx5_ifc.h
create mode 100644 fs/nfs/enfs/unify_multipath/sunrpc/mlnx/openeuler/include/linux/mlx5/mlx5_ifc_fpga.h
create mode 100644 fs/nfs/enfs/unify_multipath/sunrpc/mlnx/openeuler/include/linux/mlx5/mlx5_ifc_vdpa.h
create mode 100644 fs/nfs/enfs/unify_multipath/sunrpc/mlnx/openeuler/include/linux/mlx5/mpfs.h
create mode 100644 fs/nfs/enfs/unify_multipath/sunrpc/mlnx/openeuler/include/linux/mlx5/nvmf.h
create mode 100644 fs/nfs/enfs/unify_multipath/sunrpc/mlnx/openeuler/include/linux/mlx5/port.h
create mode 100644 fs/nfs/enfs/unify_multipath/sunrpc/mlnx/openeuler/include/linux/mlx5/qp.h
create mode 100644 fs/nfs/enfs/unify_multipath/sunrpc/mlnx/openeuler/include/linux/mlx5/rsc_dump.h
create mode 100644 fs/nfs/enfs/unify_multipath/sunrpc/mlnx/openeuler/include/linux/mlx5/transobj.h
create mode 100644 fs/nfs/enfs/unify_multipath/sunrpc/mlnx/openeuler/include/linux/mlx5/vport.h
create mode 100644 fs/nfs/enfs/unify_multipath/sunrpc/mlnx/openeuler/include/linux/mm.h
create mode 100644 fs/nfs/enfs/unify_multipath/sunrpc/mlnx/openeuler/include/linux/mmu_notifier.h
create mode 100644 fs/nfs/enfs/unify_multipath/sunrpc/mlnx/openeuler/include/linux/mod_devicetable.h
create mode 100644 fs/nfs/enfs/unify_multipath/sunrpc/mlnx/openeuler/include/linux/module.h
create mode 100644 fs/nfs/enfs/unify_multipath/sunrpc/mlnx/openeuler/include/linux/net.h
create mode 100644 fs/nfs/enfs/unify_multipath/sunrpc/mlnx/openeuler/include/linux/netdev_features.h
create mode 100644 fs/nfs/enfs/unify_multipath/sunrpc/mlnx/openeuler/include/linux/netdevice.h
create mode 100644 fs/nfs/enfs/unify_multipath/sunrpc/mlnx/openeuler/include/linux/nodemask.h
create mode 100644 fs/nfs/enfs/unify_multipath/sunrpc/mlnx/openeuler/include/linux/nospec.h
create mode 100644 fs/nfs/enfs/unify_multipath/sunrpc/mlnx/openeuler/include/linux/nvme-fc-driver.h
create mode 100644 fs/nfs/enfs/unify_multipath/sunrpc/mlnx/openeuler/include/linux/nvme-fc.h
create mode 100644 fs/nfs/enfs/unify_multipath/sunrpc/mlnx/openeuler/include/linux/nvme-pci.h
create mode 100644 fs/nfs/enfs/unify_multipath/sunrpc/mlnx/openeuler/include/linux/nvme-peer.h
create mode 100644 fs/nfs/enfs/unify_multipath/sunrpc/mlnx/openeuler/include/linux/nvme-rdma.h
create mode 100644 fs/nfs/enfs/unify_multipath/sunrpc/mlnx/openeuler/include/linux/nvme.h
create mode 100644 fs/nfs/enfs/unify_multipath/sunrpc/mlnx/openeuler/include/linux/overflow.h
create mode 100644 fs/nfs/enfs/unify_multipath/sunrpc/mlnx/openeuler/include/linux/page_ref.h
create mode 100644 fs/nfs/enfs/unify_multipath/sunrpc/mlnx/openeuler/include/linux/pci-p2pdma.h
create mode 100644 fs/nfs/enfs/unify_multipath/sunrpc/mlnx/openeuler/include/linux/pci.h
create mode 100644 fs/nfs/enfs/unify_multipath/sunrpc/mlnx/openeuler/include/linux/pci_regs.h
create mode 100644 fs/nfs/enfs/unify_multipath/sunrpc/mlnx/openeuler/include/linux/pm_qos.h
create mode 100644 fs/nfs/enfs/unify_multipath/sunrpc/mlnx/openeuler/include/linux/poll.h
create mode 100644 fs/nfs/enfs/unify_multipath/sunrpc/mlnx/openeuler/include/linux/radix-tree.h
create mode 100644 fs/nfs/enfs/unify_multipath/sunrpc/mlnx/openeuler/include/linux/rbtree.h
create mode 100644 fs/nfs/enfs/unify_multipath/sunrpc/mlnx/openeuler/include/linux/rculist.h
create mode 100644 fs/nfs/enfs/unify_multipath/sunrpc/mlnx/openeuler/include/linux/rcupdate.h
create mode 100644 fs/nfs/enfs/unify_multipath/sunrpc/mlnx/openeuler/include/linux/refcount.h
create mode 100644 fs/nfs/enfs/unify_multipath/sunrpc/mlnx/openeuler/include/linux/rhashtable.h
create mode 100644 fs/nfs/enfs/unify_multipath/sunrpc/mlnx/openeuler/include/linux/scatterlist.h
create mode 100644 fs/nfs/enfs/unify_multipath/sunrpc/mlnx/openeuler/include/linux/sched.h
create mode 100644 fs/nfs/enfs/unify_multipath/sunrpc/mlnx/openeuler/include/linux/sched/mm.h
create mode 100644 fs/nfs/enfs/unify_multipath/sunrpc/mlnx/openeuler/include/linux/sched/signal.h
create mode 100644 fs/nfs/enfs/unify_multipath/sunrpc/mlnx/openeuler/include/linux/sched/task.h
create mode 100644 fs/nfs/enfs/unify_multipath/sunrpc/mlnx/openeuler/include/linux/sdt.h
create mode 100644 fs/nfs/enfs/unify_multipath/sunrpc/mlnx/openeuler/include/linux/seq_file.h
create mode 100644 fs/nfs/enfs/unify_multipath/sunrpc/mlnx/openeuler/include/linux/skbuff.h
create mode 100644 fs/nfs/enfs/unify_multipath/sunrpc/mlnx/openeuler/include/linux/slab.h
create mode 100644 fs/nfs/enfs/unify_multipath/sunrpc/mlnx/openeuler/include/linux/stddef.h
create mode 100644 fs/nfs/enfs/unify_multipath/sunrpc/mlnx/openeuler/include/linux/string.h
create mode 100644 fs/nfs/enfs/unify_multipath/sunrpc/mlnx/openeuler/include/linux/sunrpc/auth.h
create mode 100644 fs/nfs/enfs/unify_multipath/sunrpc/mlnx/openeuler/include/linux/sunrpc/rpc_rdma.h
create mode 100644 fs/nfs/enfs/unify_multipath/sunrpc/mlnx/openeuler/include/linux/sunrpc/rpc_rdma_cid.h
create mode 100644 fs/nfs/enfs/unify_multipath/sunrpc/mlnx/openeuler/include/linux/sunrpc/svc_rdma.h
create mode 100644 fs/nfs/enfs/unify_multipath/sunrpc/mlnx/openeuler/include/linux/sunrpc/svc_rdma_pcl.h
create mode 100644 fs/nfs/enfs/unify_multipath/sunrpc/mlnx/openeuler/include/linux/sunrpc/xprtrdma.h
create mode 100644 fs/nfs/enfs/unify_multipath/sunrpc/mlnx/openeuler/include/linux/sysfs.h
create mode 100644 fs/nfs/enfs/unify_multipath/sunrpc/mlnx/openeuler/include/linux/t10-pi.h
create mode 100644 fs/nfs/enfs/unify_multipath/sunrpc/mlnx/openeuler/include/linux/timekeeping.h
create mode 100644 fs/nfs/enfs/unify_multipath/sunrpc/mlnx/openeuler/include/linux/types.h
create mode 100644 fs/nfs/enfs/unify_multipath/sunrpc/mlnx/openeuler/include/linux/uaccess.h
create mode 100644 fs/nfs/enfs/unify_multipath/sunrpc/mlnx/openeuler/include/linux/units.h
create mode 100644 fs/nfs/enfs/unify_multipath/sunrpc/mlnx/openeuler/include/linux/uuid.h
create mode 100644 fs/nfs/enfs/unify_multipath/sunrpc/mlnx/openeuler/include/linux/xarray.h
create mode 100644 fs/nfs/enfs/unify_multipath/sunrpc/mlnx/openeuler/include/linux/xz.h
create mode 100644 fs/nfs/enfs/unify_multipath/sunrpc/mlnx/openeuler/include/net/addrconf.h
create mode 100644 fs/nfs/enfs/unify_multipath/sunrpc/mlnx/openeuler/include/net/bareudp.h
create mode 100644 fs/nfs/enfs/unify_multipath/sunrpc/mlnx/openeuler/include/net/bonding.h
create mode 100644 fs/nfs/enfs/unify_multipath/sunrpc/mlnx/openeuler/include/net/devlink.h
create mode 100644 fs/nfs/enfs/unify_multipath/sunrpc/mlnx/openeuler/include/net/dst.h
create mode 100644 fs/nfs/enfs/unify_multipath/sunrpc/mlnx/openeuler/include/net/dst_metadata.h
create mode 100644 fs/nfs/enfs/unify_multipath/sunrpc/mlnx/openeuler/include/net/flow_dissector.h
create mode 100644 fs/nfs/enfs/unify_multipath/sunrpc/mlnx/openeuler/include/net/flow_keys.h
create mode 100644 fs/nfs/enfs/unify_multipath/sunrpc/mlnx/openeuler/include/net/flow_offload.h
create mode 100644 fs/nfs/enfs/unify_multipath/sunrpc/mlnx/openeuler/include/net/geneve.h
create mode 100644 fs/nfs/enfs/unify_multipath/sunrpc/mlnx/openeuler/include/net/gre.h
create mode 100644 fs/nfs/enfs/unify_multipath/sunrpc/mlnx/openeuler/include/net/ip_fib.h
create mode 100644 fs/nfs/enfs/unify_multipath/sunrpc/mlnx/openeuler/include/net/ip_tunnels.h
create mode 100644 fs/nfs/enfs/unify_multipath/sunrpc/mlnx/openeuler/include/net/ipv6.h
create mode 100644 fs/nfs/enfs/unify_multipath/sunrpc/mlnx/openeuler/include/net/ipv6_stubs.h
create mode 100644 fs/nfs/enfs/unify_multipath/sunrpc/mlnx/openeuler/include/net/macsec.h
create mode 100644 fs/nfs/enfs/unify_multipath/sunrpc/mlnx/openeuler/include/net/mlxdevm.h
create mode 100644 fs/nfs/enfs/unify_multipath/sunrpc/mlnx/openeuler/include/net/netfilter/nf_flow_table.h
create mode 100644 fs/nfs/enfs/unify_multipath/sunrpc/mlnx/openeuler/include/net/netfilter/nf_flow_table_4_18.h
create mode 100644 fs/nfs/enfs/unify_multipath/sunrpc/mlnx/openeuler/include/net/netlink.h
create mode 100644 fs/nfs/enfs/unify_multipath/sunrpc/mlnx/openeuler/include/net/pkt_cls.h
create mode 100644 fs/nfs/enfs/unify_multipath/sunrpc/mlnx/openeuler/include/net/psample.h
create mode 100644 fs/nfs/enfs/unify_multipath/sunrpc/mlnx/openeuler/include/net/sock.h
create mode 100644 fs/nfs/enfs/unify_multipath/sunrpc/mlnx/openeuler/include/net/switchdev.h
create mode 100644 fs/nfs/enfs/unify_multipath/sunrpc/mlnx/openeuler/include/net/tc_act/tc_csum.h
create mode 100644 fs/nfs/enfs/unify_multipath/sunrpc/mlnx/openeuler/include/net/tc_act/tc_ct.h
create mode 100644 fs/nfs/enfs/unify_multipath/sunrpc/mlnx/openeuler/include/net/tc_act/tc_ct_4_18.h
create mode 100644 fs/nfs/enfs/unify_multipath/sunrpc/mlnx/openeuler/include/net/tc_act/tc_gact.h
create mode 100644 fs/nfs/enfs/unify_multipath/sunrpc/mlnx/openeuler/include/net/tc_act/tc_mirred.h
create mode 100644 fs/nfs/enfs/unify_multipath/sunrpc/mlnx/openeuler/include/net/tc_act/tc_mpls.h
create mode 100644 fs/nfs/enfs/unify_multipath/sunrpc/mlnx/openeuler/include/net/tc_act/tc_pedit.h
create mode 100644 fs/nfs/enfs/unify_multipath/sunrpc/mlnx/openeuler/include/net/tc_act/tc_tunnel_key.h
create mode 100644 fs/nfs/enfs/unify_multipath/sunrpc/mlnx/openeuler/include/net/tc_act/tc_vlan.h
create mode 100644 fs/nfs/enfs/unify_multipath/sunrpc/mlnx/openeuler/include/net/tls.h
create mode 100644 fs/nfs/enfs/unify_multipath/sunrpc/mlnx/openeuler/include/net/vxlan.h
create mode 100644 fs/nfs/enfs/unify_multipath/sunrpc/mlnx/openeuler/include/net/xdp.h
create mode 100644 fs/nfs/enfs/unify_multipath/sunrpc/mlnx/openeuler/include/net/xfrm.h
create mode 100644 fs/nfs/enfs/unify_multipath/sunrpc/mlnx/openeuler/include/rdma/ib.h
create mode 100644 fs/nfs/enfs/unify_multipath/sunrpc/mlnx/openeuler/include/rdma/ib_addr.h
create mode 100644 fs/nfs/enfs/unify_multipath/sunrpc/mlnx/openeuler/include/rdma/ib_cache.h
create mode 100644 fs/nfs/enfs/unify_multipath/sunrpc/mlnx/openeuler/include/rdma/ib_cm.h
create mode 100644 fs/nfs/enfs/unify_multipath/sunrpc/mlnx/openeuler/include/rdma/ib_hdrs.h
create mode 100644 fs/nfs/enfs/unify_multipath/sunrpc/mlnx/openeuler/include/rdma/ib_mad.h
create mode 100644 fs/nfs/enfs/unify_multipath/sunrpc/mlnx/openeuler/include/rdma/ib_marshall.h
create mode 100644 fs/nfs/enfs/unify_multipath/sunrpc/mlnx/openeuler/include/rdma/ib_pack.h
create mode 100644 fs/nfs/enfs/unify_multipath/sunrpc/mlnx/openeuler/include/rdma/ib_pma.h
create mode 100644 fs/nfs/enfs/unify_multipath/sunrpc/mlnx/openeuler/include/rdma/ib_sa.h
create mode 100644 fs/nfs/enfs/unify_multipath/sunrpc/mlnx/openeuler/include/rdma/ib_smi.h
create mode 100644 fs/nfs/enfs/unify_multipath/sunrpc/mlnx/openeuler/include/rdma/ib_sysfs.h
create mode 100644 fs/nfs/enfs/unify_multipath/sunrpc/mlnx/openeuler/include/rdma/ib_umem.h
create mode 100644 fs/nfs/enfs/unify_multipath/sunrpc/mlnx/openeuler/include/rdma/ib_umem_odp.h
create mode 100644 fs/nfs/enfs/unify_multipath/sunrpc/mlnx/openeuler/include/rdma/ib_verbs.h
create mode 100644 fs/nfs/enfs/unify_multipath/sunrpc/mlnx/openeuler/include/rdma/ib_verbs_nvmf.h
create mode 100644 fs/nfs/enfs/unify_multipath/sunrpc/mlnx/openeuler/include/rdma/ib_verbs_nvmf_def.h
create mode 100644 fs/nfs/enfs/unify_multipath/sunrpc/mlnx/openeuler/include/rdma/iba.h
create mode 100644 fs/nfs/enfs/unify_multipath/sunrpc/mlnx/openeuler/include/rdma/ibta_vol1_c12.h
create mode 100644 fs/nfs/enfs/unify_multipath/sunrpc/mlnx/openeuler/include/rdma/iw_cm.h
create mode 100644 fs/nfs/enfs/unify_multipath/sunrpc/mlnx/openeuler/include/rdma/iw_portmap.h
create mode 100644 fs/nfs/enfs/unify_multipath/sunrpc/mlnx/openeuler/include/rdma/lag.h
create mode 100644 fs/nfs/enfs/unify_multipath/sunrpc/mlnx/openeuler/include/rdma/mr_pool.h
create mode 100644 fs/nfs/enfs/unify_multipath/sunrpc/mlnx/openeuler/include/rdma/opa_addr.h
create mode 100644 fs/nfs/enfs/unify_multipath/sunrpc/mlnx/openeuler/include/rdma/opa_port_info.h
create mode 100644 fs/nfs/enfs/unify_multipath/sunrpc/mlnx/openeuler/include/rdma/opa_smi.h
create mode 100644 fs/nfs/enfs/unify_multipath/sunrpc/mlnx/openeuler/include/rdma/opa_vnic.h
create mode 100644 fs/nfs/enfs/unify_multipath/sunrpc/mlnx/openeuler/include/rdma/peer_mem.h
create mode 100644 fs/nfs/enfs/unify_multipath/sunrpc/mlnx/openeuler/include/rdma/rdma_cm.h
create mode 100644 fs/nfs/enfs/unify_multipath/sunrpc/mlnx/openeuler/include/rdma/rdma_cm_ib.h
create mode 100644 fs/nfs/enfs/unify_multipath/sunrpc/mlnx/openeuler/include/rdma/rdma_counter.h
create mode 100644 fs/nfs/enfs/unify_multipath/sunrpc/mlnx/openeuler/include/rdma/rdma_netlink.h
create mode 100644 fs/nfs/enfs/unify_multipath/sunrpc/mlnx/openeuler/include/rdma/rdma_vt.h
create mode 100644 fs/nfs/enfs/unify_multipath/sunrpc/mlnx/openeuler/include/rdma/rdmavt_cq.h
create mode 100644 fs/nfs/enfs/unify_multipath/sunrpc/mlnx/openeuler/include/rdma/rdmavt_mr.h
create mode 100644 fs/nfs/enfs/unify_multipath/sunrpc/mlnx/openeuler/include/rdma/rdmavt_qp.h
create mode 100644 fs/nfs/enfs/unify_multipath/sunrpc/mlnx/openeuler/include/rdma/restrack.h
create mode 100644 fs/nfs/enfs/unify_multipath/sunrpc/mlnx/openeuler/include/rdma/rw.h
create mode 100644 fs/nfs/enfs/unify_multipath/sunrpc/mlnx/openeuler/include/rdma/signature.h
create mode 100644 fs/nfs/enfs/unify_multipath/sunrpc/mlnx/openeuler/include/rdma/tid_rdma_defs.h
create mode 100644 fs/nfs/enfs/unify_multipath/sunrpc/mlnx/openeuler/include/rdma/uverbs_ioctl.h
create mode 100644 fs/nfs/enfs/unify_multipath/sunrpc/mlnx/openeuler/include/rdma/uverbs_named_ioctl.h
create mode 100644 fs/nfs/enfs/unify_multipath/sunrpc/mlnx/openeuler/include/rdma/uverbs_std_types.h
create mode 100644 fs/nfs/enfs/unify_multipath/sunrpc/mlnx/openeuler/include/rdma/uverbs_types.h
create mode 100644 fs/nfs/enfs/unify_multipath/sunrpc/mlnx/openeuler/include/scsi/iser.h
create mode 100644 fs/nfs/enfs/unify_multipath/sunrpc/mlnx/openeuler/include/scsi/scsi.h
create mode 100644 fs/nfs/enfs/unify_multipath/sunrpc/mlnx/openeuler/include/scsi/scsi_device.h
create mode 100644 fs/nfs/enfs/unify_multipath/sunrpc/mlnx/openeuler/include/scsi/scsi_transport_srp.h
create mode 100644 fs/nfs/enfs/unify_multipath/sunrpc/mlnx/openeuler/include/scsi/srp.h
create mode 100644 fs/nfs/enfs/unify_multipath/sunrpc/mlnx/openeuler/include/trace/events/ib_mad.h
create mode 100644 fs/nfs/enfs/unify_multipath/sunrpc/mlnx/openeuler/include/trace/events/ib_umad.h
create mode 100644 fs/nfs/enfs/unify_multipath/sunrpc/mlnx/openeuler/include/trace/events/rdma.h
create mode 100644 fs/nfs/enfs/unify_multipath/sunrpc/mlnx/openeuler/include/trace/events/rdma_core.h
create mode 100644 fs/nfs/enfs/unify_multipath/sunrpc/mlnx/openeuler/include/trace/events/rpcrdma.h
create mode 100644 fs/nfs/enfs/unify_multipath/sunrpc/mlnx/openeuler/include/trace/events/sunrpc_base.h
create mode 100644 fs/nfs/enfs/unify_multipath/sunrpc/mlnx/openeuler/include/uapi/linux/devlink.h
create mode 100644 fs/nfs/enfs/unify_multipath/sunrpc/mlnx/openeuler/include/uapi/linux/eventpoll.h
create mode 100644 fs/nfs/enfs/unify_multipath/sunrpc/mlnx/openeuler/include/uapi/linux/net_tstamp.h
create mode 100644 fs/nfs/enfs/unify_multipath/sunrpc/mlnx/openeuler/include/uapi/linux/nvme_ioctl.h
create mode 100644 fs/nfs/enfs/unify_multipath/sunrpc/mlnx/openeuler/include/uapi/linux/pkt_cls.h
create mode 100644 fs/nfs/enfs/unify_multipath/sunrpc/mlnx/openeuler/include/uapi/linux/tc_act/tc_ct.h
create mode 100644 fs/nfs/enfs/unify_multipath/sunrpc/mlnx/openeuler/include/uapi/linux/tc_act/tc_ct_4_18.h
create mode 100644 fs/nfs/enfs/unify_multipath/sunrpc/mlnx/openeuler/include/uapi/linux/tc_act/tc_pedit.h
create mode 100644 fs/nfs/enfs/unify_multipath/sunrpc/mlnx/openeuler/include/uapi/linux/tc_act/tc_tunnel_key.h
create mode 100644 fs/nfs/enfs/unify_multipath/sunrpc/mlnx/openeuler/include/uapi/mlxdevm/mlxdevm_netlink.h
create mode 100644 fs/nfs/enfs/unify_multipath/sunrpc/mlnx/openeuler/include/uapi/rdma/bnxt_re-abi.h
create mode 100644 fs/nfs/enfs/unify_multipath/sunrpc/mlnx/openeuler/include/uapi/rdma/cxgb4-abi.h
create mode 100644 fs/nfs/enfs/unify_multipath/sunrpc/mlnx/openeuler/include/uapi/rdma/efa-abi.h
create mode 100644 fs/nfs/enfs/unify_multipath/sunrpc/mlnx/openeuler/include/uapi/rdma/hfi/hfi1_ioctl.h
create mode 100644 fs/nfs/enfs/unify_multipath/sunrpc/mlnx/openeuler/include/uapi/rdma/hfi/hfi1_user.h
create mode 100644 fs/nfs/enfs/unify_multipath/sunrpc/mlnx/openeuler/include/uapi/rdma/hns-abi.h
create mode 100644 fs/nfs/enfs/unify_multipath/sunrpc/mlnx/openeuler/include/uapi/rdma/ib_user_ioctl_cmds.h
create mode 100644 fs/nfs/enfs/unify_multipath/sunrpc/mlnx/openeuler/include/uapi/rdma/ib_user_ioctl_verbs.h
create mode 100644 fs/nfs/enfs/unify_multipath/sunrpc/mlnx/openeuler/include/uapi/rdma/ib_user_mad.h
create mode 100644 fs/nfs/enfs/unify_multipath/sunrpc/mlnx/openeuler/include/uapi/rdma/ib_user_sa.h
create mode 100644 fs/nfs/enfs/unify_multipath/sunrpc/mlnx/openeuler/include/uapi/rdma/ib_user_verbs.h
create mode 100644 fs/nfs/enfs/unify_multipath/sunrpc/mlnx/openeuler/include/uapi/rdma/irdma-abi.h
create mode 100644 fs/nfs/enfs/unify_multipath/sunrpc/mlnx/openeuler/include/uapi/rdma/mlx4-abi.h
create mode 100644 fs/nfs/enfs/unify_multipath/sunrpc/mlnx/openeuler/include/uapi/rdma/mlx5-abi.h
create mode 100644 fs/nfs/enfs/unify_multipath/sunrpc/mlnx/openeuler/include/uapi/rdma/mlx5_user_ioctl_cmds.h
create mode 100644 fs/nfs/enfs/unify_multipath/sunrpc/mlnx/openeuler/include/uapi/rdma/mlx5_user_ioctl_verbs.h
create mode 100644 fs/nfs/enfs/unify_multipath/sunrpc/mlnx/openeuler/include/uapi/rdma/mthca-abi.h
create mode 100644 fs/nfs/enfs/unify_multipath/sunrpc/mlnx/openeuler/include/uapi/rdma/ocrdma-abi.h
create mode 100644 fs/nfs/enfs/unify_multipath/sunrpc/mlnx/openeuler/include/uapi/rdma/qedr-abi.h
create mode 100644 fs/nfs/enfs/unify_multipath/sunrpc/mlnx/openeuler/include/uapi/rdma/rdma_netlink.h
create mode 100644 fs/nfs/enfs/unify_multipath/sunrpc/mlnx/openeuler/include/uapi/rdma/rdma_user_cm.h
create mode 100644 fs/nfs/enfs/unify_multipath/sunrpc/mlnx/openeuler/include/uapi/rdma/rdma_user_ioctl.h
create mode 100644 fs/nfs/enfs/unify_multipath/sunrpc/mlnx/openeuler/include/uapi/rdma/rdma_user_ioctl_cmds.h
create mode 100644 fs/nfs/enfs/unify_multipath/sunrpc/mlnx/openeuler/include/uapi/rdma/rdma_user_rxe.h
create mode 100644 fs/nfs/enfs/unify_multipath/sunrpc/mlnx/openeuler/include/uapi/rdma/rvt-abi.h
create mode 100644 fs/nfs/enfs/unify_multipath/sunrpc/mlnx/openeuler/include/uapi/rdma/siw-abi.h
create mode 100644 fs/nfs/enfs/unify_multipath/sunrpc/mlnx/openeuler/include/uapi/rdma/vmw_pvrdma-abi.h
create mode 100644 fs/nfs/enfs/unify_multipath/sunrpc/mlnx/openeuler/xprtrdma/Makefile
create mode 100644 fs/nfs/enfs/unify_multipath/sunrpc/mlnx/openeuler/xprtrdma/Module.supported
create mode 100644 fs/nfs/enfs/unify_multipath/sunrpc/mlnx/openeuler/xprtrdma/backchannel.c
create mode 100644 fs/nfs/enfs/unify_multipath/sunrpc/mlnx/openeuler/xprtrdma/frwr_ops.c
create mode 100644 fs/nfs/enfs/unify_multipath/sunrpc/mlnx/openeuler/xprtrdma/module.c
create mode 100644 fs/nfs/enfs/unify_multipath/sunrpc/mlnx/openeuler/xprtrdma/nvfs.h
create mode 100644 fs/nfs/enfs/unify_multipath/sunrpc/mlnx/openeuler/xprtrdma/nvfs_rpc_rdma.c
create mode 100644 fs/nfs/enfs/unify_multipath/sunrpc/mlnx/openeuler/xprtrdma/nvfs_rpc_rdma.h
create mode 100644 fs/nfs/enfs/unify_multipath/sunrpc/mlnx/openeuler/xprtrdma/rpc_rdma.c
create mode 100644 fs/nfs/enfs/unify_multipath/sunrpc/mlnx/openeuler/xprtrdma/rpcrdma_dummy.c
create mode 100644 fs/nfs/enfs/unify_multipath/sunrpc/mlnx/openeuler/xprtrdma/svc_rdma.c
create mode 100644 fs/nfs/enfs/unify_multipath/sunrpc/mlnx/openeuler/xprtrdma/svc_rdma_backchannel.c
create mode 100644 fs/nfs/enfs/unify_multipath/sunrpc/mlnx/openeuler/xprtrdma/svc_rdma_pcl.c
create mode 100644 fs/nfs/enfs/unify_multipath/sunrpc/mlnx/openeuler/xprtrdma/svc_rdma_recvfrom.c
create mode 100644 fs/nfs/enfs/unify_multipath/sunrpc/mlnx/openeuler/xprtrdma/svc_rdma_rw.c
create mode 100644 fs/nfs/enfs/unify_multipath/sunrpc/mlnx/openeuler/xprtrdma/svc_rdma_sendto.c
create mode 100644 fs/nfs/enfs/unify_multipath/sunrpc/mlnx/openeuler/xprtrdma/svc_rdma_transport.c
create mode 100644 fs/nfs/enfs/unify_multipath/sunrpc/mlnx/openeuler/xprtrdma/svcrdma_dummy.c
create mode 100644 fs/nfs/enfs/unify_multipath/sunrpc/mlnx/openeuler/xprtrdma/transport.c
create mode 100644 fs/nfs/enfs/unify_multipath/sunrpc/mlnx/openeuler/xprtrdma/verbs.c
create mode 100644 fs/nfs/enfs/unify_multipath/sunrpc/mlnx/openeuler/xprtrdma/xprt_rdma.h
create mode 100644 fs/nfs/enfs/unify_multipath/sunrpc/mlnx/openeuler/xprtrdma/xprtrdma_dummy.c
create mode 100644 fs/nfs/enfs/unify_multipath/sunrpc/mlnx/ubuntu/mlnx-24.07-0.6.1.0/Makefile
create mode 100644 fs/nfs/enfs/unify_multipath/sunrpc/mlnx/ubuntu/mlnx-24.07-0.6.1.0/Module.5.15.0-25-generic-64k.symvers
create mode 100644 fs/nfs/enfs/unify_multipath/sunrpc/mlnx/ubuntu/mlnx-24.07-0.6.1.0/Module.5.15.0-25-generic.symvers
create mode 100644 fs/nfs/enfs/unify_multipath/sunrpc/mlnx/ubuntu/mlnx-24.07-0.6.1.0/compat/config.h
create mode 100644 fs/nfs/enfs/unify_multipath/sunrpc/mlnx/ubuntu/mlnx-24.07-0.6.1.0/drivers/infiniband/debug/memtrack.h
create mode 100644 fs/nfs/enfs/unify_multipath/sunrpc/mlnx/ubuntu/mlnx-24.07-0.6.1.0/drivers/infiniband/debug/mtrack.h
create mode 100644 fs/nfs/enfs/unify_multipath/sunrpc/mlnx/ubuntu/mlnx-24.07-0.6.1.0/include/asm-generic/bug.h
create mode 100644 fs/nfs/enfs/unify_multipath/sunrpc/mlnx/ubuntu/mlnx-24.07-0.6.1.0/include/asm/io.h
create mode 100644 fs/nfs/enfs/unify_multipath/sunrpc/mlnx/ubuntu/mlnx-24.07-0.6.1.0/include/linux/auxiliary_bus.h
create mode 100644 fs/nfs/enfs/unify_multipath/sunrpc/mlnx/ubuntu/mlnx-24.07-0.6.1.0/include/linux/bit.h
create mode 100644 fs/nfs/enfs/unify_multipath/sunrpc/mlnx/ubuntu/mlnx-24.07-0.6.1.0/include/linux/bitfield.h
create mode 100644 fs/nfs/enfs/unify_multipath/sunrpc/mlnx/ubuntu/mlnx-24.07-0.6.1.0/include/linux/bitmap.h
create mode 100644 fs/nfs/enfs/unify_multipath/sunrpc/mlnx/ubuntu/mlnx-24.07-0.6.1.0/include/linux/bitops.h
create mode 100644 fs/nfs/enfs/unify_multipath/sunrpc/mlnx/ubuntu/mlnx-24.07-0.6.1.0/include/linux/bits.h
create mode 100644 fs/nfs/enfs/unify_multipath/sunrpc/mlnx/ubuntu/mlnx-24.07-0.6.1.0/include/linux/blk-mq-pci.h
create mode 100644 fs/nfs/enfs/unify_multipath/sunrpc/mlnx/ubuntu/mlnx-24.07-0.6.1.0/include/linux/blk-mq.h
create mode 100644 fs/nfs/enfs/unify_multipath/sunrpc/mlnx/ubuntu/mlnx-24.07-0.6.1.0/include/linux/blk_types.h
create mode 100644 fs/nfs/enfs/unify_multipath/sunrpc/mlnx/ubuntu/mlnx-24.07-0.6.1.0/include/linux/blkdev.h
create mode 100644 fs/nfs/enfs/unify_multipath/sunrpc/mlnx/ubuntu/mlnx-24.07-0.6.1.0/include/linux/bpf.h
create mode 100644 fs/nfs/enfs/unify_multipath/sunrpc/mlnx/ubuntu/mlnx-24.07-0.6.1.0/include/linux/bpf_trace.h
create mode 100644 fs/nfs/enfs/unify_multipath/sunrpc/mlnx/ubuntu/mlnx-24.07-0.6.1.0/include/linux/build_bug.h
create mode 100644 fs/nfs/enfs/unify_multipath/sunrpc/mlnx/ubuntu/mlnx-24.07-0.6.1.0/include/linux/cdev.h
create mode 100644 fs/nfs/enfs/unify_multipath/sunrpc/mlnx/ubuntu/mlnx-24.07-0.6.1.0/include/linux/cgroup_rdma.h
create mode 100644 fs/nfs/enfs/unify_multipath/sunrpc/mlnx/ubuntu/mlnx-24.07-0.6.1.0/include/linux/cleanup.h
create mode 100644 fs/nfs/enfs/unify_multipath/sunrpc/mlnx/ubuntu/mlnx-24.07-0.6.1.0/include/linux/compat-2.6.h
create mode 100644 fs/nfs/enfs/unify_multipath/sunrpc/mlnx/ubuntu/mlnx-24.07-0.6.1.0/include/linux/compiler-clang.h
create mode 100644 fs/nfs/enfs/unify_multipath/sunrpc/mlnx/ubuntu/mlnx-24.07-0.6.1.0/include/linux/compiler-gcc.h
create mode 100644 fs/nfs/enfs/unify_multipath/sunrpc/mlnx/ubuntu/mlnx-24.07-0.6.1.0/include/linux/compiler-intel.h
create mode 100644 fs/nfs/enfs/unify_multipath/sunrpc/mlnx/ubuntu/mlnx-24.07-0.6.1.0/include/linux/compiler.h
create mode 100644 fs/nfs/enfs/unify_multipath/sunrpc/mlnx/ubuntu/mlnx-24.07-0.6.1.0/include/linux/compiler_attributes.h
create mode 100644 fs/nfs/enfs/unify_multipath/sunrpc/mlnx/ubuntu/mlnx-24.07-0.6.1.0/include/linux/cpu_rmap.h
create mode 100644 fs/nfs/enfs/unify_multipath/sunrpc/mlnx/ubuntu/mlnx-24.07-0.6.1.0/include/linux/dcbnl.h
create mode 100644 fs/nfs/enfs/unify_multipath/sunrpc/mlnx/ubuntu/mlnx-24.07-0.6.1.0/include/linux/device.h
create mode 100644 fs/nfs/enfs/unify_multipath/sunrpc/mlnx/ubuntu/mlnx-24.07-0.6.1.0/include/linux/dim.h
create mode 100644 fs/nfs/enfs/unify_multipath/sunrpc/mlnx/ubuntu/mlnx-24.07-0.6.1.0/include/linux/ethtool.h
create mode 100644 fs/nfs/enfs/unify_multipath/sunrpc/mlnx/ubuntu/mlnx-24.07-0.6.1.0/include/linux/export.h
create mode 100644 fs/nfs/enfs/unify_multipath/sunrpc/mlnx/ubuntu/mlnx-24.07-0.6.1.0/include/linux/filter.h
create mode 100644 fs/nfs/enfs/unify_multipath/sunrpc/mlnx/ubuntu/mlnx-24.07-0.6.1.0/include/linux/firmware.h
create mode 100644 fs/nfs/enfs/unify_multipath/sunrpc/mlnx/ubuntu/mlnx-24.07-0.6.1.0/include/linux/fs.h
create mode 100644 fs/nfs/enfs/unify_multipath/sunrpc/mlnx/ubuntu/mlnx-24.07-0.6.1.0/include/linux/fwctl.h
create mode 100644 fs/nfs/enfs/unify_multipath/sunrpc/mlnx/ubuntu/mlnx-24.07-0.6.1.0/include/linux/gfp.h
create mode 100644 fs/nfs/enfs/unify_multipath/sunrpc/mlnx/ubuntu/mlnx-24.07-0.6.1.0/include/linux/hashtable.h
create mode 100644 fs/nfs/enfs/unify_multipath/sunrpc/mlnx/ubuntu/mlnx-24.07-0.6.1.0/include/linux/idr.h
create mode 100644 fs/nfs/enfs/unify_multipath/sunrpc/mlnx/ubuntu/mlnx-24.07-0.6.1.0/include/linux/if_ether.h
create mode 100644 fs/nfs/enfs/unify_multipath/sunrpc/mlnx/ubuntu/mlnx-24.07-0.6.1.0/include/linux/if_link.h
create mode 100644 fs/nfs/enfs/unify_multipath/sunrpc/mlnx/ubuntu/mlnx-24.07-0.6.1.0/include/linux/if_vlan.h
create mode 100644 fs/nfs/enfs/unify_multipath/sunrpc/mlnx/ubuntu/mlnx-24.07-0.6.1.0/include/linux/indirect_call_wrapper.h
create mode 100644 fs/nfs/enfs/unify_multipath/sunrpc/mlnx/ubuntu/mlnx-24.07-0.6.1.0/include/linux/inet.h
create mode 100644 fs/nfs/enfs/unify_multipath/sunrpc/mlnx/ubuntu/mlnx-24.07-0.6.1.0/include/linux/inetdevice.h
create mode 100644 fs/nfs/enfs/unify_multipath/sunrpc/mlnx/ubuntu/mlnx-24.07-0.6.1.0/include/linux/interrupt.h
create mode 100644 fs/nfs/enfs/unify_multipath/sunrpc/mlnx/ubuntu/mlnx-24.07-0.6.1.0/include/linux/interval_tree.h
create mode 100644 fs/nfs/enfs/unify_multipath/sunrpc/mlnx/ubuntu/mlnx-24.07-0.6.1.0/include/linux/irq_poll.h
create mode 100644 fs/nfs/enfs/unify_multipath/sunrpc/mlnx/ubuntu/mlnx-24.07-0.6.1.0/include/linux/kconfig.h
create mode 100644 fs/nfs/enfs/unify_multipath/sunrpc/mlnx/ubuntu/mlnx-24.07-0.6.1.0/include/linux/kern_levels.h
create mode 100644 fs/nfs/enfs/unify_multipath/sunrpc/mlnx/ubuntu/mlnx-24.07-0.6.1.0/include/linux/kernel.h
create mode 100644 fs/nfs/enfs/unify_multipath/sunrpc/mlnx/ubuntu/mlnx-24.07-0.6.1.0/include/linux/kmod.h
create mode 100644 fs/nfs/enfs/unify_multipath/sunrpc/mlnx/ubuntu/mlnx-24.07-0.6.1.0/include/linux/kref.h
create mode 100644 fs/nfs/enfs/unify_multipath/sunrpc/mlnx/ubuntu/mlnx-24.07-0.6.1.0/include/linux/list.h
create mode 100644 fs/nfs/enfs/unify_multipath/sunrpc/mlnx/ubuntu/mlnx-24.07-0.6.1.0/include/linux/llist.h
create mode 100644 fs/nfs/enfs/unify_multipath/sunrpc/mlnx/ubuntu/mlnx-24.07-0.6.1.0/include/linux/lockdep.h
create mode 100644 fs/nfs/enfs/unify_multipath/sunrpc/mlnx/ubuntu/mlnx-24.07-0.6.1.0/include/linux/log2.h
create mode 100644 fs/nfs/enfs/unify_multipath/sunrpc/mlnx/ubuntu/mlnx-24.07-0.6.1.0/include/linux/mlx5/cq.h
create mode 100644 fs/nfs/enfs/unify_multipath/sunrpc/mlnx/ubuntu/mlnx-24.07-0.6.1.0/include/linux/mlx5/device.h
create mode 100644 fs/nfs/enfs/unify_multipath/sunrpc/mlnx/ubuntu/mlnx-24.07-0.6.1.0/include/linux/mlx5/doorbell.h
create mode 100644 fs/nfs/enfs/unify_multipath/sunrpc/mlnx/ubuntu/mlnx-24.07-0.6.1.0/include/linux/mlx5/driver.h
create mode 100644 fs/nfs/enfs/unify_multipath/sunrpc/mlnx/ubuntu/mlnx-24.07-0.6.1.0/include/linux/mlx5/eq.h
create mode 100644 fs/nfs/enfs/unify_multipath/sunrpc/mlnx/ubuntu/mlnx-24.07-0.6.1.0/include/linux/mlx5/eswitch.h
create mode 100644 fs/nfs/enfs/unify_multipath/sunrpc/mlnx/ubuntu/mlnx-24.07-0.6.1.0/include/linux/mlx5/fs.h
create mode 100644 fs/nfs/enfs/unify_multipath/sunrpc/mlnx/ubuntu/mlnx-24.07-0.6.1.0/include/linux/mlx5/fs_helpers.h
create mode 100644 fs/nfs/enfs/unify_multipath/sunrpc/mlnx/ubuntu/mlnx-24.07-0.6.1.0/include/linux/mlx5/macsec.h
create mode 100644 fs/nfs/enfs/unify_multipath/sunrpc/mlnx/ubuntu/mlnx-24.07-0.6.1.0/include/linux/mlx5/mlx5_ifc.h
create mode 100644 fs/nfs/enfs/unify_multipath/sunrpc/mlnx/ubuntu/mlnx-24.07-0.6.1.0/include/linux/mlx5/mlx5_ifc_fpga.h
create mode 100644 fs/nfs/enfs/unify_multipath/sunrpc/mlnx/ubuntu/mlnx-24.07-0.6.1.0/include/linux/mlx5/mlx5_ifc_vdpa.h
create mode 100644 fs/nfs/enfs/unify_multipath/sunrpc/mlnx/ubuntu/mlnx-24.07-0.6.1.0/include/linux/mlx5/mpfs.h
create mode 100644 fs/nfs/enfs/unify_multipath/sunrpc/mlnx/ubuntu/mlnx-24.07-0.6.1.0/include/linux/mlx5/nvmf.h
create mode 100644 fs/nfs/enfs/unify_multipath/sunrpc/mlnx/ubuntu/mlnx-24.07-0.6.1.0/include/linux/mlx5/port.h
create mode 100644 fs/nfs/enfs/unify_multipath/sunrpc/mlnx/ubuntu/mlnx-24.07-0.6.1.0/include/linux/mlx5/qp.h
create mode 100644 fs/nfs/enfs/unify_multipath/sunrpc/mlnx/ubuntu/mlnx-24.07-0.6.1.0/include/linux/mlx5/rsc_dump.h
create mode 100644 fs/nfs/enfs/unify_multipath/sunrpc/mlnx/ubuntu/mlnx-24.07-0.6.1.0/include/linux/mlx5/transobj.h
create mode 100644 fs/nfs/enfs/unify_multipath/sunrpc/mlnx/ubuntu/mlnx-24.07-0.6.1.0/include/linux/mlx5/vport.h
create mode 100644 fs/nfs/enfs/unify_multipath/sunrpc/mlnx/ubuntu/mlnx-24.07-0.6.1.0/include/linux/mm.h
create mode 100644 fs/nfs/enfs/unify_multipath/sunrpc/mlnx/ubuntu/mlnx-24.07-0.6.1.0/include/linux/mmu_notifier.h
create mode 100644 fs/nfs/enfs/unify_multipath/sunrpc/mlnx/ubuntu/mlnx-24.07-0.6.1.0/include/linux/mod_devicetable.h
create mode 100644 fs/nfs/enfs/unify_multipath/sunrpc/mlnx/ubuntu/mlnx-24.07-0.6.1.0/include/linux/module.h
create mode 100644 fs/nfs/enfs/unify_multipath/sunrpc/mlnx/ubuntu/mlnx-24.07-0.6.1.0/include/linux/net.h
create mode 100644 fs/nfs/enfs/unify_multipath/sunrpc/mlnx/ubuntu/mlnx-24.07-0.6.1.0/include/linux/netdev_features.h
create mode 100644 fs/nfs/enfs/unify_multipath/sunrpc/mlnx/ubuntu/mlnx-24.07-0.6.1.0/include/linux/netdevice.h
create mode 100644 fs/nfs/enfs/unify_multipath/sunrpc/mlnx/ubuntu/mlnx-24.07-0.6.1.0/include/linux/nodemask.h
create mode 100644 fs/nfs/enfs/unify_multipath/sunrpc/mlnx/ubuntu/mlnx-24.07-0.6.1.0/include/linux/nospec.h
create mode 100644 fs/nfs/enfs/unify_multipath/sunrpc/mlnx/ubuntu/mlnx-24.07-0.6.1.0/include/linux/nvme-fc-driver.h
create mode 100644 fs/nfs/enfs/unify_multipath/sunrpc/mlnx/ubuntu/mlnx-24.07-0.6.1.0/include/linux/nvme-fc.h
create mode 100644 fs/nfs/enfs/unify_multipath/sunrpc/mlnx/ubuntu/mlnx-24.07-0.6.1.0/include/linux/nvme-pci.h
create mode 100644 fs/nfs/enfs/unify_multipath/sunrpc/mlnx/ubuntu/mlnx-24.07-0.6.1.0/include/linux/nvme-peer.h
create mode 100644 fs/nfs/enfs/unify_multipath/sunrpc/mlnx/ubuntu/mlnx-24.07-0.6.1.0/include/linux/nvme-rdma.h
create mode 100644 fs/nfs/enfs/unify_multipath/sunrpc/mlnx/ubuntu/mlnx-24.07-0.6.1.0/include/linux/nvme.h
create mode 100644 fs/nfs/enfs/unify_multipath/sunrpc/mlnx/ubuntu/mlnx-24.07-0.6.1.0/include/linux/overflow.h
create mode 100644 fs/nfs/enfs/unify_multipath/sunrpc/mlnx/ubuntu/mlnx-24.07-0.6.1.0/include/linux/page_ref.h
create mode 100644 fs/nfs/enfs/unify_multipath/sunrpc/mlnx/ubuntu/mlnx-24.07-0.6.1.0/include/linux/panic.h
create mode 100644 fs/nfs/enfs/unify_multipath/sunrpc/mlnx/ubuntu/mlnx-24.07-0.6.1.0/include/linux/pci-p2pdma.h
create mode 100644 fs/nfs/enfs/unify_multipath/sunrpc/mlnx/ubuntu/mlnx-24.07-0.6.1.0/include/linux/pci.h
create mode 100644 fs/nfs/enfs/unify_multipath/sunrpc/mlnx/ubuntu/mlnx-24.07-0.6.1.0/include/linux/pci_regs.h
create mode 100644 fs/nfs/enfs/unify_multipath/sunrpc/mlnx/ubuntu/mlnx-24.07-0.6.1.0/include/linux/pm_qos.h
create mode 100644 fs/nfs/enfs/unify_multipath/sunrpc/mlnx/ubuntu/mlnx-24.07-0.6.1.0/include/linux/poll.h
create mode 100644 fs/nfs/enfs/unify_multipath/sunrpc/mlnx/ubuntu/mlnx-24.07-0.6.1.0/include/linux/radix-tree.h
create mode 100644 fs/nfs/enfs/unify_multipath/sunrpc/mlnx/ubuntu/mlnx-24.07-0.6.1.0/include/linux/rbtree.h
create mode 100644 fs/nfs/enfs/unify_multipath/sunrpc/mlnx/ubuntu/mlnx-24.07-0.6.1.0/include/linux/rculist.h
create mode 100644 fs/nfs/enfs/unify_multipath/sunrpc/mlnx/ubuntu/mlnx-24.07-0.6.1.0/include/linux/rcupdate.h
create mode 100644 fs/nfs/enfs/unify_multipath/sunrpc/mlnx/ubuntu/mlnx-24.07-0.6.1.0/include/linux/refcount.h
create mode 100644 fs/nfs/enfs/unify_multipath/sunrpc/mlnx/ubuntu/mlnx-24.07-0.6.1.0/include/linux/scatterlist.h
create mode 100644 fs/nfs/enfs/unify_multipath/sunrpc/mlnx/ubuntu/mlnx-24.07-0.6.1.0/include/linux/sched.h
create mode 100644 fs/nfs/enfs/unify_multipath/sunrpc/mlnx/ubuntu/mlnx-24.07-0.6.1.0/include/linux/sched/signal.h
create mode 100644 fs/nfs/enfs/unify_multipath/sunrpc/mlnx/ubuntu/mlnx-24.07-0.6.1.0/include/linux/sched/task.h
create mode 100644 fs/nfs/enfs/unify_multipath/sunrpc/mlnx/ubuntu/mlnx-24.07-0.6.1.0/include/linux/sdt.h
create mode 100644 fs/nfs/enfs/unify_multipath/sunrpc/mlnx/ubuntu/mlnx-24.07-0.6.1.0/include/linux/seq_file.h
create mode 100644 fs/nfs/enfs/unify_multipath/sunrpc/mlnx/ubuntu/mlnx-24.07-0.6.1.0/include/linux/skbuff.h
create mode 100644 fs/nfs/enfs/unify_multipath/sunrpc/mlnx/ubuntu/mlnx-24.07-0.6.1.0/include/linux/slab.h
create mode 100644 fs/nfs/enfs/unify_multipath/sunrpc/mlnx/ubuntu/mlnx-24.07-0.6.1.0/include/linux/stddef.h
create mode 100644 fs/nfs/enfs/unify_multipath/sunrpc/mlnx/ubuntu/mlnx-24.07-0.6.1.0/include/linux/string.h
create mode 100644 fs/nfs/enfs/unify_multipath/sunrpc/mlnx/ubuntu/mlnx-24.07-0.6.1.0/include/linux/sunrpc/auth.h
create mode 100644 fs/nfs/enfs/unify_multipath/sunrpc/mlnx/ubuntu/mlnx-24.07-0.6.1.0/include/linux/sunrpc/rpc_rdma.h
create mode 100644 fs/nfs/enfs/unify_multipath/sunrpc/mlnx/ubuntu/mlnx-24.07-0.6.1.0/include/linux/sunrpc/rpc_rdma_cid.h
create mode 100644 fs/nfs/enfs/unify_multipath/sunrpc/mlnx/ubuntu/mlnx-24.07-0.6.1.0/include/linux/sunrpc/svc_rdma.h
create mode 100644 fs/nfs/enfs/unify_multipath/sunrpc/mlnx/ubuntu/mlnx-24.07-0.6.1.0/include/linux/sunrpc/svc_rdma_pcl.h
create mode 100644 fs/nfs/enfs/unify_multipath/sunrpc/mlnx/ubuntu/mlnx-24.07-0.6.1.0/include/linux/sunrpc/xprtrdma.h
create mode 100644 fs/nfs/enfs/unify_multipath/sunrpc/mlnx/ubuntu/mlnx-24.07-0.6.1.0/include/linux/sysfs.h
create mode 100644 fs/nfs/enfs/unify_multipath/sunrpc/mlnx/ubuntu/mlnx-24.07-0.6.1.0/include/linux/t10-pi.h
create mode 100644 fs/nfs/enfs/unify_multipath/sunrpc/mlnx/ubuntu/mlnx-24.07-0.6.1.0/include/linux/timekeeping.h
create mode 100644 fs/nfs/enfs/unify_multipath/sunrpc/mlnx/ubuntu/mlnx-24.07-0.6.1.0/include/linux/types.h
create mode 100644 fs/nfs/enfs/unify_multipath/sunrpc/mlnx/ubuntu/mlnx-24.07-0.6.1.0/include/linux/uaccess.h
create mode 100644 fs/nfs/enfs/unify_multipath/sunrpc/mlnx/ubuntu/mlnx-24.07-0.6.1.0/include/linux/units.h
create mode 100644 fs/nfs/enfs/unify_multipath/sunrpc/mlnx/ubuntu/mlnx-24.07-0.6.1.0/include/linux/uuid.h
create mode 100644 fs/nfs/enfs/unify_multipath/sunrpc/mlnx/ubuntu/mlnx-24.07-0.6.1.0/include/linux/xarray.h
create mode 100644 fs/nfs/enfs/unify_multipath/sunrpc/mlnx/ubuntu/mlnx-24.07-0.6.1.0/include/linux/xz.h
create mode 100644 fs/nfs/enfs/unify_multipath/sunrpc/mlnx/ubuntu/mlnx-24.07-0.6.1.0/include/net/addrconf.h
create mode 100644 fs/nfs/enfs/unify_multipath/sunrpc/mlnx/ubuntu/mlnx-24.07-0.6.1.0/include/net/bareudp.h
create mode 100644 fs/nfs/enfs/unify_multipath/sunrpc/mlnx/ubuntu/mlnx-24.07-0.6.1.0/include/net/bonding.h
create mode 100644 fs/nfs/enfs/unify_multipath/sunrpc/mlnx/ubuntu/mlnx-24.07-0.6.1.0/include/net/devlink.h
create mode 100644 fs/nfs/enfs/unify_multipath/sunrpc/mlnx/ubuntu/mlnx-24.07-0.6.1.0/include/net/dst.h
create mode 100644 fs/nfs/enfs/unify_multipath/sunrpc/mlnx/ubuntu/mlnx-24.07-0.6.1.0/include/net/dst_metadata.h
create mode 100644 fs/nfs/enfs/unify_multipath/sunrpc/mlnx/ubuntu/mlnx-24.07-0.6.1.0/include/net/flow_dissector.h
create mode 100644 fs/nfs/enfs/unify_multipath/sunrpc/mlnx/ubuntu/mlnx-24.07-0.6.1.0/include/net/flow_keys.h
create mode 100644 fs/nfs/enfs/unify_multipath/sunrpc/mlnx/ubuntu/mlnx-24.07-0.6.1.0/include/net/flow_offload.h
create mode 100644 fs/nfs/enfs/unify_multipath/sunrpc/mlnx/ubuntu/mlnx-24.07-0.6.1.0/include/net/geneve.h
create mode 100644 fs/nfs/enfs/unify_multipath/sunrpc/mlnx/ubuntu/mlnx-24.07-0.6.1.0/include/net/gre.h
create mode 100644 fs/nfs/enfs/unify_multipath/sunrpc/mlnx/ubuntu/mlnx-24.07-0.6.1.0/include/net/ip_fib.h
create mode 100644 fs/nfs/enfs/unify_multipath/sunrpc/mlnx/ubuntu/mlnx-24.07-0.6.1.0/include/net/ip_tunnels.h
create mode 100644 fs/nfs/enfs/unify_multipath/sunrpc/mlnx/ubuntu/mlnx-24.07-0.6.1.0/include/net/ipv6.h
create mode 100644 fs/nfs/enfs/unify_multipath/sunrpc/mlnx/ubuntu/mlnx-24.07-0.6.1.0/include/net/ipv6_stubs.h
create mode 100644 fs/nfs/enfs/unify_multipath/sunrpc/mlnx/ubuntu/mlnx-24.07-0.6.1.0/include/net/macsec.h
create mode 100644 fs/nfs/enfs/unify_multipath/sunrpc/mlnx/ubuntu/mlnx-24.07-0.6.1.0/include/net/mlxdevm.h
create mode 100644 fs/nfs/enfs/unify_multipath/sunrpc/mlnx/ubuntu/mlnx-24.07-0.6.1.0/include/net/netfilter/nf_flow_table.h
create mode 100644 fs/nfs/enfs/unify_multipath/sunrpc/mlnx/ubuntu/mlnx-24.07-0.6.1.0/include/net/netfilter/nf_flow_table_4_18.h
create mode 100644 fs/nfs/enfs/unify_multipath/sunrpc/mlnx/ubuntu/mlnx-24.07-0.6.1.0/include/net/netlink.h
create mode 100644 fs/nfs/enfs/unify_multipath/sunrpc/mlnx/ubuntu/mlnx-24.07-0.6.1.0/include/net/pkt_cls.h
create mode 100644 fs/nfs/enfs/unify_multipath/sunrpc/mlnx/ubuntu/mlnx-24.07-0.6.1.0/include/net/psample.h
create mode 100644 fs/nfs/enfs/unify_multipath/sunrpc/mlnx/ubuntu/mlnx-24.07-0.6.1.0/include/net/sock.h
create mode 100644 fs/nfs/enfs/unify_multipath/sunrpc/mlnx/ubuntu/mlnx-24.07-0.6.1.0/include/net/switchdev.h
create mode 100644 fs/nfs/enfs/unify_multipath/sunrpc/mlnx/ubuntu/mlnx-24.07-0.6.1.0/include/net/tc_act/tc_csum.h
create mode 100644 fs/nfs/enfs/unify_multipath/sunrpc/mlnx/ubuntu/mlnx-24.07-0.6.1.0/include/net/tc_act/tc_ct.h
create mode 100644 fs/nfs/enfs/unify_multipath/sunrpc/mlnx/ubuntu/mlnx-24.07-0.6.1.0/include/net/tc_act/tc_ct_4_18.h
create mode 100644 fs/nfs/enfs/unify_multipath/sunrpc/mlnx/ubuntu/mlnx-24.07-0.6.1.0/include/net/tc_act/tc_gact.h
create mode 100644 fs/nfs/enfs/unify_multipath/sunrpc/mlnx/ubuntu/mlnx-24.07-0.6.1.0/include/net/tc_act/tc_mirred.h
create mode 100644 fs/nfs/enfs/unify_multipath/sunrpc/mlnx/ubuntu/mlnx-24.07-0.6.1.0/include/net/tc_act/tc_mpls.h
create mode 100644 fs/nfs/enfs/unify_multipath/sunrpc/mlnx/ubuntu/mlnx-24.07-0.6.1.0/include/net/tc_act/tc_tunnel_key.h
create mode 100644 fs/nfs/enfs/unify_multipath/sunrpc/mlnx/ubuntu/mlnx-24.07-0.6.1.0/include/net/tc_act/tc_vlan.h
create mode 100644 fs/nfs/enfs/unify_multipath/sunrpc/mlnx/ubuntu/mlnx-24.07-0.6.1.0/include/net/tls.h
create mode 100644 fs/nfs/enfs/unify_multipath/sunrpc/mlnx/ubuntu/mlnx-24.07-0.6.1.0/include/net/vxlan.h
create mode 100644 fs/nfs/enfs/unify_multipath/sunrpc/mlnx/ubuntu/mlnx-24.07-0.6.1.0/include/net/xdp.h
create mode 100644 fs/nfs/enfs/unify_multipath/sunrpc/mlnx/ubuntu/mlnx-24.07-0.6.1.0/include/net/xdp_sock_drv.h
create mode 100644 fs/nfs/enfs/unify_multipath/sunrpc/mlnx/ubuntu/mlnx-24.07-0.6.1.0/include/net/xfrm.h
create mode 100644 fs/nfs/enfs/unify_multipath/sunrpc/mlnx/ubuntu/mlnx-24.07-0.6.1.0/include/rdma/ib.h
create mode 100644 fs/nfs/enfs/unify_multipath/sunrpc/mlnx/ubuntu/mlnx-24.07-0.6.1.0/include/rdma/ib_addr.h
create mode 100644 fs/nfs/enfs/unify_multipath/sunrpc/mlnx/ubuntu/mlnx-24.07-0.6.1.0/include/rdma/ib_cache.h
create mode 100644 fs/nfs/enfs/unify_multipath/sunrpc/mlnx/ubuntu/mlnx-24.07-0.6.1.0/include/rdma/ib_cm.h
create mode 100644 fs/nfs/enfs/unify_multipath/sunrpc/mlnx/ubuntu/mlnx-24.07-0.6.1.0/include/rdma/ib_hdrs.h
create mode 100644 fs/nfs/enfs/unify_multipath/sunrpc/mlnx/ubuntu/mlnx-24.07-0.6.1.0/include/rdma/ib_mad.h
create mode 100644 fs/nfs/enfs/unify_multipath/sunrpc/mlnx/ubuntu/mlnx-24.07-0.6.1.0/include/rdma/ib_marshall.h
create mode 100644 fs/nfs/enfs/unify_multipath/sunrpc/mlnx/ubuntu/mlnx-24.07-0.6.1.0/include/rdma/ib_pack.h
create mode 100644 fs/nfs/enfs/unify_multipath/sunrpc/mlnx/ubuntu/mlnx-24.07-0.6.1.0/include/rdma/ib_pma.h
create mode 100644 fs/nfs/enfs/unify_multipath/sunrpc/mlnx/ubuntu/mlnx-24.07-0.6.1.0/include/rdma/ib_sa.h
create mode 100644 fs/nfs/enfs/unify_multipath/sunrpc/mlnx/ubuntu/mlnx-24.07-0.6.1.0/include/rdma/ib_smi.h
create mode 100644 fs/nfs/enfs/unify_multipath/sunrpc/mlnx/ubuntu/mlnx-24.07-0.6.1.0/include/rdma/ib_sysfs.h
create mode 100644 fs/nfs/enfs/unify_multipath/sunrpc/mlnx/ubuntu/mlnx-24.07-0.6.1.0/include/rdma/ib_umem.h
create mode 100644 fs/nfs/enfs/unify_multipath/sunrpc/mlnx/ubuntu/mlnx-24.07-0.6.1.0/include/rdma/ib_umem_odp.h
create mode 100644 fs/nfs/enfs/unify_multipath/sunrpc/mlnx/ubuntu/mlnx-24.07-0.6.1.0/include/rdma/ib_verbs.h
create mode 100644 fs/nfs/enfs/unify_multipath/sunrpc/mlnx/ubuntu/mlnx-24.07-0.6.1.0/include/rdma/ib_verbs_nvmf.h
create mode 100644 fs/nfs/enfs/unify_multipath/sunrpc/mlnx/ubuntu/mlnx-24.07-0.6.1.0/include/rdma/ib_verbs_nvmf_def.h
create mode 100644 fs/nfs/enfs/unify_multipath/sunrpc/mlnx/ubuntu/mlnx-24.07-0.6.1.0/include/rdma/iba.h
create mode 100644 fs/nfs/enfs/unify_multipath/sunrpc/mlnx/ubuntu/mlnx-24.07-0.6.1.0/include/rdma/ibta_vol1_c12.h
create mode 100644 fs/nfs/enfs/unify_multipath/sunrpc/mlnx/ubuntu/mlnx-24.07-0.6.1.0/include/rdma/iw_cm.h
create mode 100644 fs/nfs/enfs/unify_multipath/sunrpc/mlnx/ubuntu/mlnx-24.07-0.6.1.0/include/rdma/iw_portmap.h
create mode 100644 fs/nfs/enfs/unify_multipath/sunrpc/mlnx/ubuntu/mlnx-24.07-0.6.1.0/include/rdma/lag.h
create mode 100644 fs/nfs/enfs/unify_multipath/sunrpc/mlnx/ubuntu/mlnx-24.07-0.6.1.0/include/rdma/mr_pool.h
create mode 100644 fs/nfs/enfs/unify_multipath/sunrpc/mlnx/ubuntu/mlnx-24.07-0.6.1.0/include/rdma/opa_addr.h
create mode 100644 fs/nfs/enfs/unify_multipath/sunrpc/mlnx/ubuntu/mlnx-24.07-0.6.1.0/include/rdma/opa_port_info.h
create mode 100644 fs/nfs/enfs/unify_multipath/sunrpc/mlnx/ubuntu/mlnx-24.07-0.6.1.0/include/rdma/opa_smi.h
create mode 100644 fs/nfs/enfs/unify_multipath/sunrpc/mlnx/ubuntu/mlnx-24.07-0.6.1.0/include/rdma/opa_vnic.h
create mode 100644 fs/nfs/enfs/unify_multipath/sunrpc/mlnx/ubuntu/mlnx-24.07-0.6.1.0/include/rdma/peer_mem.h
create mode 100644 fs/nfs/enfs/unify_multipath/sunrpc/mlnx/ubuntu/mlnx-24.07-0.6.1.0/include/rdma/rdma_cm.h
create mode 100644 fs/nfs/enfs/unify_multipath/sunrpc/mlnx/ubuntu/mlnx-24.07-0.6.1.0/include/rdma/rdma_cm_ib.h
create mode 100644 fs/nfs/enfs/unify_multipath/sunrpc/mlnx/ubuntu/mlnx-24.07-0.6.1.0/include/rdma/rdma_counter.h
create mode 100644 fs/nfs/enfs/unify_multipath/sunrpc/mlnx/ubuntu/mlnx-24.07-0.6.1.0/include/rdma/rdma_netlink.h
create mode 100644 fs/nfs/enfs/unify_multipath/sunrpc/mlnx/ubuntu/mlnx-24.07-0.6.1.0/include/rdma/rdma_vt.h
create mode 100644 fs/nfs/enfs/unify_multipath/sunrpc/mlnx/ubuntu/mlnx-24.07-0.6.1.0/include/rdma/rdmavt_cq.h
create mode 100644 fs/nfs/enfs/unify_multipath/sunrpc/mlnx/ubuntu/mlnx-24.07-0.6.1.0/include/rdma/rdmavt_mr.h
create mode 100644 fs/nfs/enfs/unify_multipath/sunrpc/mlnx/ubuntu/mlnx-24.07-0.6.1.0/include/rdma/rdmavt_qp.h
create mode 100644 fs/nfs/enfs/unify_multipath/sunrpc/mlnx/ubuntu/mlnx-24.07-0.6.1.0/include/rdma/restrack.h
create mode 100644 fs/nfs/enfs/unify_multipath/sunrpc/mlnx/ubuntu/mlnx-24.07-0.6.1.0/include/rdma/rw.h
create mode 100644 fs/nfs/enfs/unify_multipath/sunrpc/mlnx/ubuntu/mlnx-24.07-0.6.1.0/include/rdma/signature.h
create mode 100644 fs/nfs/enfs/unify_multipath/sunrpc/mlnx/ubuntu/mlnx-24.07-0.6.1.0/include/rdma/tid_rdma_defs.h
create mode 100644 fs/nfs/enfs/unify_multipath/sunrpc/mlnx/ubuntu/mlnx-24.07-0.6.1.0/include/rdma/uverbs_ioctl.h
create mode 100644 fs/nfs/enfs/unify_multipath/sunrpc/mlnx/ubuntu/mlnx-24.07-0.6.1.0/include/rdma/uverbs_named_ioctl.h
create mode 100644 fs/nfs/enfs/unify_multipath/sunrpc/mlnx/ubuntu/mlnx-24.07-0.6.1.0/include/rdma/uverbs_std_types.h
create mode 100644 fs/nfs/enfs/unify_multipath/sunrpc/mlnx/ubuntu/mlnx-24.07-0.6.1.0/include/rdma/uverbs_types.h
create mode 100644 fs/nfs/enfs/unify_multipath/sunrpc/mlnx/ubuntu/mlnx-24.07-0.6.1.0/include/scsi/iser.h
create mode 100644 fs/nfs/enfs/unify_multipath/sunrpc/mlnx/ubuntu/mlnx-24.07-0.6.1.0/include/scsi/scsi.h
create mode 100644 fs/nfs/enfs/unify_multipath/sunrpc/mlnx/ubuntu/mlnx-24.07-0.6.1.0/include/scsi/scsi_device.h
create mode 100644 fs/nfs/enfs/unify_multipath/sunrpc/mlnx/ubuntu/mlnx-24.07-0.6.1.0/include/scsi/scsi_transport_srp.h
create mode 100644 fs/nfs/enfs/unify_multipath/sunrpc/mlnx/ubuntu/mlnx-24.07-0.6.1.0/include/scsi/srp.h
create mode 100644 fs/nfs/enfs/unify_multipath/sunrpc/mlnx/ubuntu/mlnx-24.07-0.6.1.0/include/trace/events/ib_mad.h
create mode 100644 fs/nfs/enfs/unify_multipath/sunrpc/mlnx/ubuntu/mlnx-24.07-0.6.1.0/include/trace/events/ib_umad.h
create mode 100644 fs/nfs/enfs/unify_multipath/sunrpc/mlnx/ubuntu/mlnx-24.07-0.6.1.0/include/trace/events/rdma_core.h
create mode 100644 fs/nfs/enfs/unify_multipath/sunrpc/mlnx/ubuntu/mlnx-24.07-0.6.1.0/include/trace/events/rpcrdma.h
create mode 100644 fs/nfs/enfs/unify_multipath/sunrpc/mlnx/ubuntu/mlnx-24.07-0.6.1.0/include/trace/misc/rdma.h
create mode 100644 fs/nfs/enfs/unify_multipath/sunrpc/mlnx/ubuntu/mlnx-24.07-0.6.1.0/include/trace/misc/sunrpc.h
create mode 100644 fs/nfs/enfs/unify_multipath/sunrpc/mlnx/ubuntu/mlnx-24.07-0.6.1.0/include/uapi/fwctl/fwctl.h
create mode 100644 fs/nfs/enfs/unify_multipath/sunrpc/mlnx/ubuntu/mlnx-24.07-0.6.1.0/include/uapi/fwctl/mlx5.h
create mode 100644 fs/nfs/enfs/unify_multipath/sunrpc/mlnx/ubuntu/mlnx-24.07-0.6.1.0/include/uapi/linux/devlink.h
create mode 100644 fs/nfs/enfs/unify_multipath/sunrpc/mlnx/ubuntu/mlnx-24.07-0.6.1.0/include/uapi/linux/eventpoll.h
create mode 100644 fs/nfs/enfs/unify_multipath/sunrpc/mlnx/ubuntu/mlnx-24.07-0.6.1.0/include/uapi/linux/net_tstamp.h
create mode 100644 fs/nfs/enfs/unify_multipath/sunrpc/mlnx/ubuntu/mlnx-24.07-0.6.1.0/include/uapi/linux/nvme_ioctl.h
create mode 100644 fs/nfs/enfs/unify_multipath/sunrpc/mlnx/ubuntu/mlnx-24.07-0.6.1.0/include/uapi/linux/pkt_cls.h
create mode 100644 fs/nfs/enfs/unify_multipath/sunrpc/mlnx/ubuntu/mlnx-24.07-0.6.1.0/include/uapi/linux/tc_act/tc_ct.h
create mode 100644 fs/nfs/enfs/unify_multipath/sunrpc/mlnx/ubuntu/mlnx-24.07-0.6.1.0/include/uapi/linux/tc_act/tc_ct_4_18.h
create mode 100644 fs/nfs/enfs/unify_multipath/sunrpc/mlnx/ubuntu/mlnx-24.07-0.6.1.0/include/uapi/linux/tc_act/tc_tunnel_key.h
create mode 100644 fs/nfs/enfs/unify_multipath/sunrpc/mlnx/ubuntu/mlnx-24.07-0.6.1.0/include/uapi/mlxdevm/mlxdevm_netlink.h
create mode 100644 fs/nfs/enfs/unify_multipath/sunrpc/mlnx/ubuntu/mlnx-24.07-0.6.1.0/include/uapi/rdma/bnxt_re-abi.h
create mode 100644 fs/nfs/enfs/unify_multipath/sunrpc/mlnx/ubuntu/mlnx-24.07-0.6.1.0/include/uapi/rdma/cxgb4-abi.h
create mode 100644 fs/nfs/enfs/unify_multipath/sunrpc/mlnx/ubuntu/mlnx-24.07-0.6.1.0/include/uapi/rdma/efa-abi.h
create mode 100644 fs/nfs/enfs/unify_multipath/sunrpc/mlnx/ubuntu/mlnx-24.07-0.6.1.0/include/uapi/rdma/erdma-abi.h
create mode 100644 fs/nfs/enfs/unify_multipath/sunrpc/mlnx/ubuntu/mlnx-24.07-0.6.1.0/include/uapi/rdma/hfi/hfi1_ioctl.h
create mode 100644 fs/nfs/enfs/unify_multipath/sunrpc/mlnx/ubuntu/mlnx-24.07-0.6.1.0/include/uapi/rdma/hfi/hfi1_user.h
create mode 100644 fs/nfs/enfs/unify_multipath/sunrpc/mlnx/ubuntu/mlnx-24.07-0.6.1.0/include/uapi/rdma/hns-abi.h
create mode 100644 fs/nfs/enfs/unify_multipath/sunrpc/mlnx/ubuntu/mlnx-24.07-0.6.1.0/include/uapi/rdma/ib_user_ioctl_cmds.h
create mode 100644 fs/nfs/enfs/unify_multipath/sunrpc/mlnx/ubuntu/mlnx-24.07-0.6.1.0/include/uapi/rdma/ib_user_ioctl_verbs.h
create mode 100644 fs/nfs/enfs/unify_multipath/sunrpc/mlnx/ubuntu/mlnx-24.07-0.6.1.0/include/uapi/rdma/ib_user_mad.h
create mode 100644 fs/nfs/enfs/unify_multipath/sunrpc/mlnx/ubuntu/mlnx-24.07-0.6.1.0/include/uapi/rdma/ib_user_sa.h
create mode 100644 fs/nfs/enfs/unify_multipath/sunrpc/mlnx/ubuntu/mlnx-24.07-0.6.1.0/include/uapi/rdma/ib_user_verbs.h
create mode 100644 fs/nfs/enfs/unify_multipath/sunrpc/mlnx/ubuntu/mlnx-24.07-0.6.1.0/include/uapi/rdma/irdma-abi.h
create mode 100644 fs/nfs/enfs/unify_multipath/sunrpc/mlnx/ubuntu/mlnx-24.07-0.6.1.0/include/uapi/rdma/mana-abi.h
create mode 100644 fs/nfs/enfs/unify_multipath/sunrpc/mlnx/ubuntu/mlnx-24.07-0.6.1.0/include/uapi/rdma/mlx4-abi.h
create mode 100644 fs/nfs/enfs/unify_multipath/sunrpc/mlnx/ubuntu/mlnx-24.07-0.6.1.0/include/uapi/rdma/mlx5-abi.h
create mode 100644 fs/nfs/enfs/unify_multipath/sunrpc/mlnx/ubuntu/mlnx-24.07-0.6.1.0/include/uapi/rdma/mlx5_user_ioctl_cmds.h
create mode 100644 fs/nfs/enfs/unify_multipath/sunrpc/mlnx/ubuntu/mlnx-24.07-0.6.1.0/include/uapi/rdma/mlx5_user_ioctl_verbs.h
create mode 100644 fs/nfs/enfs/unify_multipath/sunrpc/mlnx/ubuntu/mlnx-24.07-0.6.1.0/include/uapi/rdma/mthca-abi.h
create mode 100644 fs/nfs/enfs/unify_multipath/sunrpc/mlnx/ubuntu/mlnx-24.07-0.6.1.0/include/uapi/rdma/ocrdma-abi.h
create mode 100644 fs/nfs/enfs/unify_multipath/sunrpc/mlnx/ubuntu/mlnx-24.07-0.6.1.0/include/uapi/rdma/qedr-abi.h
create mode 100644 fs/nfs/enfs/unify_multipath/sunrpc/mlnx/ubuntu/mlnx-24.07-0.6.1.0/include/uapi/rdma/rdma_netlink.h
create mode 100644 fs/nfs/enfs/unify_multipath/sunrpc/mlnx/ubuntu/mlnx-24.07-0.6.1.0/include/uapi/rdma/rdma_user_cm.h
create mode 100644 fs/nfs/enfs/unify_multipath/sunrpc/mlnx/ubuntu/mlnx-24.07-0.6.1.0/include/uapi/rdma/rdma_user_ioctl.h
create mode 100644 fs/nfs/enfs/unify_multipath/sunrpc/mlnx/ubuntu/mlnx-24.07-0.6.1.0/include/uapi/rdma/rdma_user_ioctl_cmds.h
create mode 100644 fs/nfs/enfs/unify_multipath/sunrpc/mlnx/ubuntu/mlnx-24.07-0.6.1.0/include/uapi/rdma/rdma_user_rxe.h
create mode 100644 fs/nfs/enfs/unify_multipath/sunrpc/mlnx/ubuntu/mlnx-24.07-0.6.1.0/include/uapi/rdma/rvt-abi.h
create mode 100644 fs/nfs/enfs/unify_multipath/sunrpc/mlnx/ubuntu/mlnx-24.07-0.6.1.0/include/uapi/rdma/siw-abi.h
create mode 100644 fs/nfs/enfs/unify_multipath/sunrpc/mlnx/ubuntu/mlnx-24.07-0.6.1.0/include/uapi/rdma/vmw_pvrdma-abi.h
create mode 100644 fs/nfs/enfs/unify_multipath/sunrpc/mlnx/ubuntu/mlnx-24.07-0.6.1.0/xprtrdma/Makefile
create mode 100644 fs/nfs/enfs/unify_multipath/sunrpc/mlnx/ubuntu/mlnx-24.07-0.6.1.0/xprtrdma/backchannel.c
create mode 100644 fs/nfs/enfs/unify_multipath/sunrpc/mlnx/ubuntu/mlnx-24.07-0.6.1.0/xprtrdma/frwr_ops.c
create mode 100644 fs/nfs/enfs/unify_multipath/sunrpc/mlnx/ubuntu/mlnx-24.07-0.6.1.0/xprtrdma/module.c
create mode 100644 fs/nfs/enfs/unify_multipath/sunrpc/mlnx/ubuntu/mlnx-24.07-0.6.1.0/xprtrdma/nvfs.h
create mode 100644 fs/nfs/enfs/unify_multipath/sunrpc/mlnx/ubuntu/mlnx-24.07-0.6.1.0/xprtrdma/nvfs_rpc_rdma.c
create mode 100644 fs/nfs/enfs/unify_multipath/sunrpc/mlnx/ubuntu/mlnx-24.07-0.6.1.0/xprtrdma/nvfs_rpc_rdma.h
create mode 100644 fs/nfs/enfs/unify_multipath/sunrpc/mlnx/ubuntu/mlnx-24.07-0.6.1.0/xprtrdma/rpc_rdma.c
create mode 100644 fs/nfs/enfs/unify_multipath/sunrpc/mlnx/ubuntu/mlnx-24.07-0.6.1.0/xprtrdma/rpcrdma_dummy.c
create mode 100644 fs/nfs/enfs/unify_multipath/sunrpc/mlnx/ubuntu/mlnx-24.07-0.6.1.0/xprtrdma/svc_rdma.c
create mode 100644 fs/nfs/enfs/unify_multipath/sunrpc/mlnx/ubuntu/mlnx-24.07-0.6.1.0/xprtrdma/svc_rdma_backchannel.c
create mode 100644 fs/nfs/enfs/unify_multipath/sunrpc/mlnx/ubuntu/mlnx-24.07-0.6.1.0/xprtrdma/svc_rdma_pcl.c
create mode 100644 fs/nfs/enfs/unify_multipath/sunrpc/mlnx/ubuntu/mlnx-24.07-0.6.1.0/xprtrdma/svc_rdma_recvfrom.c
create mode 100644 fs/nfs/enfs/unify_multipath/sunrpc/mlnx/ubuntu/mlnx-24.07-0.6.1.0/xprtrdma/svc_rdma_rw.c
create mode 100644 fs/nfs/enfs/unify_multipath/sunrpc/mlnx/ubuntu/mlnx-24.07-0.6.1.0/xprtrdma/svc_rdma_sendto.c
create mode 100644 fs/nfs/enfs/unify_multipath/sunrpc/mlnx/ubuntu/mlnx-24.07-0.6.1.0/xprtrdma/svc_rdma_transport.c
create mode 100644 fs/nfs/enfs/unify_multipath/sunrpc/mlnx/ubuntu/mlnx-24.07-0.6.1.0/xprtrdma/svcrdma_dummy.c
create mode 100644 fs/nfs/enfs/unify_multipath/sunrpc/mlnx/ubuntu/mlnx-24.07-0.6.1.0/xprtrdma/transport.c
create mode 100644 fs/nfs/enfs/unify_multipath/sunrpc/mlnx/ubuntu/mlnx-24.07-0.6.1.0/xprtrdma/verbs.c
create mode 100644 fs/nfs/enfs/unify_multipath/sunrpc/mlnx/ubuntu/mlnx-24.07-0.6.1.0/xprtrdma/xprt_rdma.h
create mode 100644 fs/nfs/enfs/unify_multipath/sunrpc/mlnx/ubuntu/mlnx-24.07-0.6.1.0/xprtrdma/xprtrdma_dummy.c
create mode 100644 fs/nfs/enfs/unify_multipath/sunrpc/mlnx/ubuntu/mlnx-5.8-1.1.2.1/Makefile
create mode 100644 fs/nfs/enfs/unify_multipath/sunrpc/mlnx/ubuntu/mlnx-5.8-1.1.2.1/Module.5.15.0-25-generic-64k.symvers
create mode 100644 fs/nfs/enfs/unify_multipath/sunrpc/mlnx/ubuntu/mlnx-5.8-1.1.2.1/Module.5.15.0-25-generic.symvers
create mode 100644 fs/nfs/enfs/unify_multipath/sunrpc/mlnx/ubuntu/mlnx-5.8-1.1.2.1/compat/config.h
create mode 100644 fs/nfs/enfs/unify_multipath/sunrpc/mlnx/ubuntu/mlnx-5.8-1.1.2.1/drivers/infiniband/debug/memtrack.h
create mode 100644 fs/nfs/enfs/unify_multipath/sunrpc/mlnx/ubuntu/mlnx-5.8-1.1.2.1/drivers/infiniband/debug/mtrack.h
create mode 100644 fs/nfs/enfs/unify_multipath/sunrpc/mlnx/ubuntu/mlnx-5.8-1.1.2.1/include/asm-generic/bug.h
create mode 100644 fs/nfs/enfs/unify_multipath/sunrpc/mlnx/ubuntu/mlnx-5.8-1.1.2.1/include/linux/auxiliary_bus.h
create mode 100644 fs/nfs/enfs/unify_multipath/sunrpc/mlnx/ubuntu/mlnx-5.8-1.1.2.1/include/linux/bit.h
create mode 100644 fs/nfs/enfs/unify_multipath/sunrpc/mlnx/ubuntu/mlnx-5.8-1.1.2.1/include/linux/bitfield.h
create mode 100644 fs/nfs/enfs/unify_multipath/sunrpc/mlnx/ubuntu/mlnx-5.8-1.1.2.1/include/linux/bitmap.h
create mode 100644 fs/nfs/enfs/unify_multipath/sunrpc/mlnx/ubuntu/mlnx-5.8-1.1.2.1/include/linux/bitops.h
create mode 100644 fs/nfs/enfs/unify_multipath/sunrpc/mlnx/ubuntu/mlnx-5.8-1.1.2.1/include/linux/bits.h
create mode 100644 fs/nfs/enfs/unify_multipath/sunrpc/mlnx/ubuntu/mlnx-5.8-1.1.2.1/include/linux/blk-mq-pci.h
create mode 100644 fs/nfs/enfs/unify_multipath/sunrpc/mlnx/ubuntu/mlnx-5.8-1.1.2.1/include/linux/blk-mq-rdma.h
create mode 100644 fs/nfs/enfs/unify_multipath/sunrpc/mlnx/ubuntu/mlnx-5.8-1.1.2.1/include/linux/blk-mq.h
create mode 100644 fs/nfs/enfs/unify_multipath/sunrpc/mlnx/ubuntu/mlnx-5.8-1.1.2.1/include/linux/blk_types.h
create mode 100644 fs/nfs/enfs/unify_multipath/sunrpc/mlnx/ubuntu/mlnx-5.8-1.1.2.1/include/linux/blkdev.h
create mode 100644 fs/nfs/enfs/unify_multipath/sunrpc/mlnx/ubuntu/mlnx-5.8-1.1.2.1/include/linux/bpf.h
create mode 100644 fs/nfs/enfs/unify_multipath/sunrpc/mlnx/ubuntu/mlnx-5.8-1.1.2.1/include/linux/bpf_trace.h
create mode 100644 fs/nfs/enfs/unify_multipath/sunrpc/mlnx/ubuntu/mlnx-5.8-1.1.2.1/include/linux/build_bug.h
create mode 100644 fs/nfs/enfs/unify_multipath/sunrpc/mlnx/ubuntu/mlnx-5.8-1.1.2.1/include/linux/cdev.h
create mode 100644 fs/nfs/enfs/unify_multipath/sunrpc/mlnx/ubuntu/mlnx-5.8-1.1.2.1/include/linux/cgroup_rdma.h
create mode 100644 fs/nfs/enfs/unify_multipath/sunrpc/mlnx/ubuntu/mlnx-5.8-1.1.2.1/include/linux/compat-2.6.h
create mode 100644 fs/nfs/enfs/unify_multipath/sunrpc/mlnx/ubuntu/mlnx-5.8-1.1.2.1/include/linux/compat-3.10.h
create mode 100644 fs/nfs/enfs/unify_multipath/sunrpc/mlnx/ubuntu/mlnx-5.8-1.1.2.1/include/linux/compat-3.12.h
create mode 100644 fs/nfs/enfs/unify_multipath/sunrpc/mlnx/ubuntu/mlnx-5.8-1.1.2.1/include/linux/compat-3.15.h
create mode 100644 fs/nfs/enfs/unify_multipath/sunrpc/mlnx/ubuntu/mlnx-5.8-1.1.2.1/include/linux/compat-4.0.h
create mode 100644 fs/nfs/enfs/unify_multipath/sunrpc/mlnx/ubuntu/mlnx-5.8-1.1.2.1/include/linux/compat-4.1.h
create mode 100644 fs/nfs/enfs/unify_multipath/sunrpc/mlnx/ubuntu/mlnx-5.8-1.1.2.1/include/linux/compat-4.10.h
create mode 100644 fs/nfs/enfs/unify_multipath/sunrpc/mlnx/ubuntu/mlnx-5.8-1.1.2.1/include/linux/compat_fix.h
create mode 100644 fs/nfs/enfs/unify_multipath/sunrpc/mlnx/ubuntu/mlnx-5.8-1.1.2.1/include/linux/compiler-clang.h
create mode 100644 fs/nfs/enfs/unify_multipath/sunrpc/mlnx/ubuntu/mlnx-5.8-1.1.2.1/include/linux/compiler-gcc.h
create mode 100644 fs/nfs/enfs/unify_multipath/sunrpc/mlnx/ubuntu/mlnx-5.8-1.1.2.1/include/linux/compiler-intel.h
create mode 100644 fs/nfs/enfs/unify_multipath/sunrpc/mlnx/ubuntu/mlnx-5.8-1.1.2.1/include/linux/compiler.h
create mode 100644 fs/nfs/enfs/unify_multipath/sunrpc/mlnx/ubuntu/mlnx-5.8-1.1.2.1/include/linux/compiler_attributes.h
create mode 100644 fs/nfs/enfs/unify_multipath/sunrpc/mlnx/ubuntu/mlnx-5.8-1.1.2.1/include/linux/dcbnl.h
create mode 100644 fs/nfs/enfs/unify_multipath/sunrpc/mlnx/ubuntu/mlnx-5.8-1.1.2.1/include/linux/device.h
create mode 100644 fs/nfs/enfs/unify_multipath/sunrpc/mlnx/ubuntu/mlnx-5.8-1.1.2.1/include/linux/dim.h
create mode 100644 fs/nfs/enfs/unify_multipath/sunrpc/mlnx/ubuntu/mlnx-5.8-1.1.2.1/include/linux/ethtool.h
create mode 100644 fs/nfs/enfs/unify_multipath/sunrpc/mlnx/ubuntu/mlnx-5.8-1.1.2.1/include/linux/export.h
create mode 100644 fs/nfs/enfs/unify_multipath/sunrpc/mlnx/ubuntu/mlnx-5.8-1.1.2.1/include/linux/filter.h
create mode 100644 fs/nfs/enfs/unify_multipath/sunrpc/mlnx/ubuntu/mlnx-5.8-1.1.2.1/include/linux/firmware.h
create mode 100644 fs/nfs/enfs/unify_multipath/sunrpc/mlnx/ubuntu/mlnx-5.8-1.1.2.1/include/linux/fs.h
create mode 100644 fs/nfs/enfs/unify_multipath/sunrpc/mlnx/ubuntu/mlnx-5.8-1.1.2.1/include/linux/gfp.h
create mode 100644 fs/nfs/enfs/unify_multipath/sunrpc/mlnx/ubuntu/mlnx-5.8-1.1.2.1/include/linux/hashtable.h
create mode 100644 fs/nfs/enfs/unify_multipath/sunrpc/mlnx/ubuntu/mlnx-5.8-1.1.2.1/include/linux/idr.h
create mode 100644 fs/nfs/enfs/unify_multipath/sunrpc/mlnx/ubuntu/mlnx-5.8-1.1.2.1/include/linux/if_ether.h
create mode 100644 fs/nfs/enfs/unify_multipath/sunrpc/mlnx/ubuntu/mlnx-5.8-1.1.2.1/include/linux/if_link.h
create mode 100644 fs/nfs/enfs/unify_multipath/sunrpc/mlnx/ubuntu/mlnx-5.8-1.1.2.1/include/linux/if_vlan.h
create mode 100644 fs/nfs/enfs/unify_multipath/sunrpc/mlnx/ubuntu/mlnx-5.8-1.1.2.1/include/linux/indirect_call_wrapper.h
create mode 100644 fs/nfs/enfs/unify_multipath/sunrpc/mlnx/ubuntu/mlnx-5.8-1.1.2.1/include/linux/inet.h
create mode 100644 fs/nfs/enfs/unify_multipath/sunrpc/mlnx/ubuntu/mlnx-5.8-1.1.2.1/include/linux/inet_lro.h
create mode 100644 fs/nfs/enfs/unify_multipath/sunrpc/mlnx/ubuntu/mlnx-5.8-1.1.2.1/include/linux/inetdevice.h
create mode 100644 fs/nfs/enfs/unify_multipath/sunrpc/mlnx/ubuntu/mlnx-5.8-1.1.2.1/include/linux/interval_tree.h
create mode 100644 fs/nfs/enfs/unify_multipath/sunrpc/mlnx/ubuntu/mlnx-5.8-1.1.2.1/include/linux/irq_poll.h
create mode 100644 fs/nfs/enfs/unify_multipath/sunrpc/mlnx/ubuntu/mlnx-5.8-1.1.2.1/include/linux/kconfig.h
create mode 100644 fs/nfs/enfs/unify_multipath/sunrpc/mlnx/ubuntu/mlnx-5.8-1.1.2.1/include/linux/kern_levels.h
create mode 100644 fs/nfs/enfs/unify_multipath/sunrpc/mlnx/ubuntu/mlnx-5.8-1.1.2.1/include/linux/kernel.h
create mode 100644 fs/nfs/enfs/unify_multipath/sunrpc/mlnx/ubuntu/mlnx-5.8-1.1.2.1/include/linux/kmod.h
create mode 100644 fs/nfs/enfs/unify_multipath/sunrpc/mlnx/ubuntu/mlnx-5.8-1.1.2.1/include/linux/kref.h
create mode 100644 fs/nfs/enfs/unify_multipath/sunrpc/mlnx/ubuntu/mlnx-5.8-1.1.2.1/include/linux/list.h
create mode 100644 fs/nfs/enfs/unify_multipath/sunrpc/mlnx/ubuntu/mlnx-5.8-1.1.2.1/include/linux/llist.h
create mode 100644 fs/nfs/enfs/unify_multipath/sunrpc/mlnx/ubuntu/mlnx-5.8-1.1.2.1/include/linux/lockdep.h
create mode 100644 fs/nfs/enfs/unify_multipath/sunrpc/mlnx/ubuntu/mlnx-5.8-1.1.2.1/include/linux/log2.h
create mode 100644 fs/nfs/enfs/unify_multipath/sunrpc/mlnx/ubuntu/mlnx-5.8-1.1.2.1/include/linux/mlx5/accel.h
create mode 100644 fs/nfs/enfs/unify_multipath/sunrpc/mlnx/ubuntu/mlnx-5.8-1.1.2.1/include/linux/mlx5/cq.h
create mode 100644 fs/nfs/enfs/unify_multipath/sunrpc/mlnx/ubuntu/mlnx-5.8-1.1.2.1/include/linux/mlx5/device.h
create mode 100644 fs/nfs/enfs/unify_multipath/sunrpc/mlnx/ubuntu/mlnx-5.8-1.1.2.1/include/linux/mlx5/doorbell.h
create mode 100644 fs/nfs/enfs/unify_multipath/sunrpc/mlnx/ubuntu/mlnx-5.8-1.1.2.1/include/linux/mlx5/driver.h
create mode 100644 fs/nfs/enfs/unify_multipath/sunrpc/mlnx/ubuntu/mlnx-5.8-1.1.2.1/include/linux/mlx5/eq.h
create mode 100644 fs/nfs/enfs/unify_multipath/sunrpc/mlnx/ubuntu/mlnx-5.8-1.1.2.1/include/linux/mlx5/eswitch.h
create mode 100644 fs/nfs/enfs/unify_multipath/sunrpc/mlnx/ubuntu/mlnx-5.8-1.1.2.1/include/linux/mlx5/fs.h
create mode 100644 fs/nfs/enfs/unify_multipath/sunrpc/mlnx/ubuntu/mlnx-5.8-1.1.2.1/include/linux/mlx5/fs_helpers.h
create mode 100644 fs/nfs/enfs/unify_multipath/sunrpc/mlnx/ubuntu/mlnx-5.8-1.1.2.1/include/linux/mlx5/macsec.h
create mode 100644 fs/nfs/enfs/unify_multipath/sunrpc/mlnx/ubuntu/mlnx-5.8-1.1.2.1/include/linux/mlx5/mlx5_ifc.h
create mode 100644 fs/nfs/enfs/unify_multipath/sunrpc/mlnx/ubuntu/mlnx-5.8-1.1.2.1/include/linux/mlx5/mlx5_ifc_fpga.h
create mode 100644 fs/nfs/enfs/unify_multipath/sunrpc/mlnx/ubuntu/mlnx-5.8-1.1.2.1/include/linux/mlx5/mlx5_ifc_vdpa.h
create mode 100644 fs/nfs/enfs/unify_multipath/sunrpc/mlnx/ubuntu/mlnx-5.8-1.1.2.1/include/linux/mlx5/mpfs.h
create mode 100644 fs/nfs/enfs/unify_multipath/sunrpc/mlnx/ubuntu/mlnx-5.8-1.1.2.1/include/linux/mlx5/nvmf.h
create mode 100644 fs/nfs/enfs/unify_multipath/sunrpc/mlnx/ubuntu/mlnx-5.8-1.1.2.1/include/linux/mlx5/port.h
create mode 100644 fs/nfs/enfs/unify_multipath/sunrpc/mlnx/ubuntu/mlnx-5.8-1.1.2.1/include/linux/mlx5/qp.h
create mode 100644 fs/nfs/enfs/unify_multipath/sunrpc/mlnx/ubuntu/mlnx-5.8-1.1.2.1/include/linux/mlx5/rsc_dump.h
create mode 100644 fs/nfs/enfs/unify_multipath/sunrpc/mlnx/ubuntu/mlnx-5.8-1.1.2.1/include/linux/mlx5/transobj.h
create mode 100644 fs/nfs/enfs/unify_multipath/sunrpc/mlnx/ubuntu/mlnx-5.8-1.1.2.1/include/linux/mlx5/vport.h
create mode 100644 fs/nfs/enfs/unify_multipath/sunrpc/mlnx/ubuntu/mlnx-5.8-1.1.2.1/include/linux/mm.h
create mode 100644 fs/nfs/enfs/unify_multipath/sunrpc/mlnx/ubuntu/mlnx-5.8-1.1.2.1/include/linux/mmu_notifier.h
create mode 100644 fs/nfs/enfs/unify_multipath/sunrpc/mlnx/ubuntu/mlnx-5.8-1.1.2.1/include/linux/mod_devicetable.h
create mode 100644 fs/nfs/enfs/unify_multipath/sunrpc/mlnx/ubuntu/mlnx-5.8-1.1.2.1/include/linux/module.h
create mode 100644 fs/nfs/enfs/unify_multipath/sunrpc/mlnx/ubuntu/mlnx-5.8-1.1.2.1/include/linux/net.h
create mode 100644 fs/nfs/enfs/unify_multipath/sunrpc/mlnx/ubuntu/mlnx-5.8-1.1.2.1/include/linux/netdev_features.h
create mode 100644 fs/nfs/enfs/unify_multipath/sunrpc/mlnx/ubuntu/mlnx-5.8-1.1.2.1/include/linux/netdevice.h
create mode 100644 fs/nfs/enfs/unify_multipath/sunrpc/mlnx/ubuntu/mlnx-5.8-1.1.2.1/include/linux/nodemask.h
create mode 100644 fs/nfs/enfs/unify_multipath/sunrpc/mlnx/ubuntu/mlnx-5.8-1.1.2.1/include/linux/nospec.h
create mode 100644 fs/nfs/enfs/unify_multipath/sunrpc/mlnx/ubuntu/mlnx-5.8-1.1.2.1/include/linux/nvme-fc-driver.h
create mode 100644 fs/nfs/enfs/unify_multipath/sunrpc/mlnx/ubuntu/mlnx-5.8-1.1.2.1/include/linux/nvme-fc.h
create mode 100644 fs/nfs/enfs/unify_multipath/sunrpc/mlnx/ubuntu/mlnx-5.8-1.1.2.1/include/linux/nvme-pci.h
create mode 100644 fs/nfs/enfs/unify_multipath/sunrpc/mlnx/ubuntu/mlnx-5.8-1.1.2.1/include/linux/nvme-peer.h
create mode 100644 fs/nfs/enfs/unify_multipath/sunrpc/mlnx/ubuntu/mlnx-5.8-1.1.2.1/include/linux/nvme-rdma.h
create mode 100644 fs/nfs/enfs/unify_multipath/sunrpc/mlnx/ubuntu/mlnx-5.8-1.1.2.1/include/linux/nvme.h
create mode 100644 fs/nfs/enfs/unify_multipath/sunrpc/mlnx/ubuntu/mlnx-5.8-1.1.2.1/include/linux/overflow.h
create mode 100644 fs/nfs/enfs/unify_multipath/sunrpc/mlnx/ubuntu/mlnx-5.8-1.1.2.1/include/linux/page_ref.h
create mode 100644 fs/nfs/enfs/unify_multipath/sunrpc/mlnx/ubuntu/mlnx-5.8-1.1.2.1/include/linux/pci-p2pdma.h
create mode 100644 fs/nfs/enfs/unify_multipath/sunrpc/mlnx/ubuntu/mlnx-5.8-1.1.2.1/include/linux/pci.h
create mode 100644 fs/nfs/enfs/unify_multipath/sunrpc/mlnx/ubuntu/mlnx-5.8-1.1.2.1/include/linux/pci_regs.h
create mode 100644 fs/nfs/enfs/unify_multipath/sunrpc/mlnx/ubuntu/mlnx-5.8-1.1.2.1/include/linux/pm_qos.h
create mode 100644 fs/nfs/enfs/unify_multipath/sunrpc/mlnx/ubuntu/mlnx-5.8-1.1.2.1/include/linux/poll.h
create mode 100644 fs/nfs/enfs/unify_multipath/sunrpc/mlnx/ubuntu/mlnx-5.8-1.1.2.1/include/linux/radix-tree.h
create mode 100644 fs/nfs/enfs/unify_multipath/sunrpc/mlnx/ubuntu/mlnx-5.8-1.1.2.1/include/linux/rbtree.h
create mode 100644 fs/nfs/enfs/unify_multipath/sunrpc/mlnx/ubuntu/mlnx-5.8-1.1.2.1/include/linux/rculist.h
create mode 100644 fs/nfs/enfs/unify_multipath/sunrpc/mlnx/ubuntu/mlnx-5.8-1.1.2.1/include/linux/rcupdate.h
create mode 100644 fs/nfs/enfs/unify_multipath/sunrpc/mlnx/ubuntu/mlnx-5.8-1.1.2.1/include/linux/refcount.h
create mode 100644 fs/nfs/enfs/unify_multipath/sunrpc/mlnx/ubuntu/mlnx-5.8-1.1.2.1/include/linux/rhashtable.h
create mode 100644 fs/nfs/enfs/unify_multipath/sunrpc/mlnx/ubuntu/mlnx-5.8-1.1.2.1/include/linux/scatterlist.h
create mode 100644 fs/nfs/enfs/unify_multipath/sunrpc/mlnx/ubuntu/mlnx-5.8-1.1.2.1/include/linux/sched.h
create mode 100644 fs/nfs/enfs/unify_multipath/sunrpc/mlnx/ubuntu/mlnx-5.8-1.1.2.1/include/linux/sched/mm.h
create mode 100644 fs/nfs/enfs/unify_multipath/sunrpc/mlnx/ubuntu/mlnx-5.8-1.1.2.1/include/linux/sched/signal.h
create mode 100644 fs/nfs/enfs/unify_multipath/sunrpc/mlnx/ubuntu/mlnx-5.8-1.1.2.1/include/linux/sched/task.h
create mode 100644 fs/nfs/enfs/unify_multipath/sunrpc/mlnx/ubuntu/mlnx-5.8-1.1.2.1/include/linux/sdt.h
create mode 100644 fs/nfs/enfs/unify_multipath/sunrpc/mlnx/ubuntu/mlnx-5.8-1.1.2.1/include/linux/seq_file.h
create mode 100644 fs/nfs/enfs/unify_multipath/sunrpc/mlnx/ubuntu/mlnx-5.8-1.1.2.1/include/linux/skbuff.h
create mode 100644 fs/nfs/enfs/unify_multipath/sunrpc/mlnx/ubuntu/mlnx-5.8-1.1.2.1/include/linux/slab.h
create mode 100644 fs/nfs/enfs/unify_multipath/sunrpc/mlnx/ubuntu/mlnx-5.8-1.1.2.1/include/linux/stddef.h
create mode 100644 fs/nfs/enfs/unify_multipath/sunrpc/mlnx/ubuntu/mlnx-5.8-1.1.2.1/include/linux/string.h
create mode 100644 fs/nfs/enfs/unify_multipath/sunrpc/mlnx/ubuntu/mlnx-5.8-1.1.2.1/include/linux/sunrpc/auth.h
create mode 100644 fs/nfs/enfs/unify_multipath/sunrpc/mlnx/ubuntu/mlnx-5.8-1.1.2.1/include/linux/sunrpc/rpc_rdma.h
create mode 100644 fs/nfs/enfs/unify_multipath/sunrpc/mlnx/ubuntu/mlnx-5.8-1.1.2.1/include/linux/sunrpc/rpc_rdma_cid.h
create mode 100644 fs/nfs/enfs/unify_multipath/sunrpc/mlnx/ubuntu/mlnx-5.8-1.1.2.1/include/linux/sunrpc/svc_rdma.h
create mode 100644 fs/nfs/enfs/unify_multipath/sunrpc/mlnx/ubuntu/mlnx-5.8-1.1.2.1/include/linux/sunrpc/svc_rdma_pcl.h
create mode 100644 fs/nfs/enfs/unify_multipath/sunrpc/mlnx/ubuntu/mlnx-5.8-1.1.2.1/include/linux/sunrpc/xprtrdma.h
create mode 100644 fs/nfs/enfs/unify_multipath/sunrpc/mlnx/ubuntu/mlnx-5.8-1.1.2.1/include/linux/sysfs.h
create mode 100644 fs/nfs/enfs/unify_multipath/sunrpc/mlnx/ubuntu/mlnx-5.8-1.1.2.1/include/linux/t10-pi.h
create mode 100644 fs/nfs/enfs/unify_multipath/sunrpc/mlnx/ubuntu/mlnx-5.8-1.1.2.1/include/linux/timekeeping.h
create mode 100644 fs/nfs/enfs/unify_multipath/sunrpc/mlnx/ubuntu/mlnx-5.8-1.1.2.1/include/linux/types.h
create mode 100644 fs/nfs/enfs/unify_multipath/sunrpc/mlnx/ubuntu/mlnx-5.8-1.1.2.1/include/linux/uaccess.h
create mode 100644 fs/nfs/enfs/unify_multipath/sunrpc/mlnx/ubuntu/mlnx-5.8-1.1.2.1/include/linux/units.h
create mode 100644 fs/nfs/enfs/unify_multipath/sunrpc/mlnx/ubuntu/mlnx-5.8-1.1.2.1/include/linux/uuid.h
create mode 100644 fs/nfs/enfs/unify_multipath/sunrpc/mlnx/ubuntu/mlnx-5.8-1.1.2.1/include/linux/xarray.h
create mode 100644 fs/nfs/enfs/unify_multipath/sunrpc/mlnx/ubuntu/mlnx-5.8-1.1.2.1/include/linux/xz.h
create mode 100644 fs/nfs/enfs/unify_multipath/sunrpc/mlnx/ubuntu/mlnx-5.8-1.1.2.1/include/net/addrconf.h
create mode 100644 fs/nfs/enfs/unify_multipath/sunrpc/mlnx/ubuntu/mlnx-5.8-1.1.2.1/include/net/bareudp.h
create mode 100644 fs/nfs/enfs/unify_multipath/sunrpc/mlnx/ubuntu/mlnx-5.8-1.1.2.1/include/net/bonding.h
create mode 100644 fs/nfs/enfs/unify_multipath/sunrpc/mlnx/ubuntu/mlnx-5.8-1.1.2.1/include/net/devlink.h
create mode 100644 fs/nfs/enfs/unify_multipath/sunrpc/mlnx/ubuntu/mlnx-5.8-1.1.2.1/include/net/dst.h
create mode 100644 fs/nfs/enfs/unify_multipath/sunrpc/mlnx/ubuntu/mlnx-5.8-1.1.2.1/include/net/dst_metadata.h
create mode 100644 fs/nfs/enfs/unify_multipath/sunrpc/mlnx/ubuntu/mlnx-5.8-1.1.2.1/include/net/flow_dissector.h
create mode 100644 fs/nfs/enfs/unify_multipath/sunrpc/mlnx/ubuntu/mlnx-5.8-1.1.2.1/include/net/flow_keys.h
create mode 100644 fs/nfs/enfs/unify_multipath/sunrpc/mlnx/ubuntu/mlnx-5.8-1.1.2.1/include/net/flow_offload.h
create mode 100644 fs/nfs/enfs/unify_multipath/sunrpc/mlnx/ubuntu/mlnx-5.8-1.1.2.1/include/net/geneve.h
create mode 100644 fs/nfs/enfs/unify_multipath/sunrpc/mlnx/ubuntu/mlnx-5.8-1.1.2.1/include/net/gre.h
create mode 100644 fs/nfs/enfs/unify_multipath/sunrpc/mlnx/ubuntu/mlnx-5.8-1.1.2.1/include/net/ip_fib.h
create mode 100644 fs/nfs/enfs/unify_multipath/sunrpc/mlnx/ubuntu/mlnx-5.8-1.1.2.1/include/net/ip_tunnels.h
create mode 100644 fs/nfs/enfs/unify_multipath/sunrpc/mlnx/ubuntu/mlnx-5.8-1.1.2.1/include/net/ipv6.h
create mode 100644 fs/nfs/enfs/unify_multipath/sunrpc/mlnx/ubuntu/mlnx-5.8-1.1.2.1/include/net/ipv6_stubs.h
create mode 100644 fs/nfs/enfs/unify_multipath/sunrpc/mlnx/ubuntu/mlnx-5.8-1.1.2.1/include/net/macsec.h
create mode 100644 fs/nfs/enfs/unify_multipath/sunrpc/mlnx/ubuntu/mlnx-5.8-1.1.2.1/include/net/mlxdevm.h
create mode 100644 fs/nfs/enfs/unify_multipath/sunrpc/mlnx/ubuntu/mlnx-5.8-1.1.2.1/include/net/netfilter/nf_flow_table.h
create mode 100644 fs/nfs/enfs/unify_multipath/sunrpc/mlnx/ubuntu/mlnx-5.8-1.1.2.1/include/net/netfilter/nf_flow_table_4_18.h
create mode 100644 fs/nfs/enfs/unify_multipath/sunrpc/mlnx/ubuntu/mlnx-5.8-1.1.2.1/include/net/netlink.h
create mode 100644 fs/nfs/enfs/unify_multipath/sunrpc/mlnx/ubuntu/mlnx-5.8-1.1.2.1/include/net/pkt_cls.h
create mode 100644 fs/nfs/enfs/unify_multipath/sunrpc/mlnx/ubuntu/mlnx-5.8-1.1.2.1/include/net/psample.h
create mode 100644 fs/nfs/enfs/unify_multipath/sunrpc/mlnx/ubuntu/mlnx-5.8-1.1.2.1/include/net/sock.h
create mode 100644 fs/nfs/enfs/unify_multipath/sunrpc/mlnx/ubuntu/mlnx-5.8-1.1.2.1/include/net/switchdev.h
create mode 100644 fs/nfs/enfs/unify_multipath/sunrpc/mlnx/ubuntu/mlnx-5.8-1.1.2.1/include/net/tc_act/tc_csum.h
create mode 100644 fs/nfs/enfs/unify_multipath/sunrpc/mlnx/ubuntu/mlnx-5.8-1.1.2.1/include/net/tc_act/tc_ct.h
create mode 100644 fs/nfs/enfs/unify_multipath/sunrpc/mlnx/ubuntu/mlnx-5.8-1.1.2.1/include/net/tc_act/tc_ct_4_18.h
create mode 100644 fs/nfs/enfs/unify_multipath/sunrpc/mlnx/ubuntu/mlnx-5.8-1.1.2.1/include/net/tc_act/tc_gact.h
create mode 100644 fs/nfs/enfs/unify_multipath/sunrpc/mlnx/ubuntu/mlnx-5.8-1.1.2.1/include/net/tc_act/tc_mirred.h
create mode 100644 fs/nfs/enfs/unify_multipath/sunrpc/mlnx/ubuntu/mlnx-5.8-1.1.2.1/include/net/tc_act/tc_mpls.h
create mode 100644 fs/nfs/enfs/unify_multipath/sunrpc/mlnx/ubuntu/mlnx-5.8-1.1.2.1/include/net/tc_act/tc_pedit.h
create mode 100644 fs/nfs/enfs/unify_multipath/sunrpc/mlnx/ubuntu/mlnx-5.8-1.1.2.1/include/net/tc_act/tc_tunnel_key.h
create mode 100644 fs/nfs/enfs/unify_multipath/sunrpc/mlnx/ubuntu/mlnx-5.8-1.1.2.1/include/net/tc_act/tc_vlan.h
create mode 100644 fs/nfs/enfs/unify_multipath/sunrpc/mlnx/ubuntu/mlnx-5.8-1.1.2.1/include/net/tls.h
create mode 100644 fs/nfs/enfs/unify_multipath/sunrpc/mlnx/ubuntu/mlnx-5.8-1.1.2.1/include/net/vxlan.h
create mode 100644 fs/nfs/enfs/unify_multipath/sunrpc/mlnx/ubuntu/mlnx-5.8-1.1.2.1/include/net/xdp.h
create mode 100644 fs/nfs/enfs/unify_multipath/sunrpc/mlnx/ubuntu/mlnx-5.8-1.1.2.1/include/net/xfrm.h
create mode 100644 fs/nfs/enfs/unify_multipath/sunrpc/mlnx/ubuntu/mlnx-5.8-1.1.2.1/include/rdma/ib.h
create mode 100644 fs/nfs/enfs/unify_multipath/sunrpc/mlnx/ubuntu/mlnx-5.8-1.1.2.1/include/rdma/ib_addr.h
create mode 100644 fs/nfs/enfs/unify_multipath/sunrpc/mlnx/ubuntu/mlnx-5.8-1.1.2.1/include/rdma/ib_cache.h
create mode 100644 fs/nfs/enfs/unify_multipath/sunrpc/mlnx/ubuntu/mlnx-5.8-1.1.2.1/include/rdma/ib_cm.h
create mode 100644 fs/nfs/enfs/unify_multipath/sunrpc/mlnx/ubuntu/mlnx-5.8-1.1.2.1/include/rdma/ib_hdrs.h
create mode 100644 fs/nfs/enfs/unify_multipath/sunrpc/mlnx/ubuntu/mlnx-5.8-1.1.2.1/include/rdma/ib_mad.h
create mode 100644 fs/nfs/enfs/unify_multipath/sunrpc/mlnx/ubuntu/mlnx-5.8-1.1.2.1/include/rdma/ib_marshall.h
create mode 100644 fs/nfs/enfs/unify_multipath/sunrpc/mlnx/ubuntu/mlnx-5.8-1.1.2.1/include/rdma/ib_pack.h
create mode 100644 fs/nfs/enfs/unify_multipath/sunrpc/mlnx/ubuntu/mlnx-5.8-1.1.2.1/include/rdma/ib_pma.h
create mode 100644 fs/nfs/enfs/unify_multipath/sunrpc/mlnx/ubuntu/mlnx-5.8-1.1.2.1/include/rdma/ib_sa.h
create mode 100644 fs/nfs/enfs/unify_multipath/sunrpc/mlnx/ubuntu/mlnx-5.8-1.1.2.1/include/rdma/ib_smi.h
create mode 100644 fs/nfs/enfs/unify_multipath/sunrpc/mlnx/ubuntu/mlnx-5.8-1.1.2.1/include/rdma/ib_sysfs.h
create mode 100644 fs/nfs/enfs/unify_multipath/sunrpc/mlnx/ubuntu/mlnx-5.8-1.1.2.1/include/rdma/ib_umem.h
create mode 100644 fs/nfs/enfs/unify_multipath/sunrpc/mlnx/ubuntu/mlnx-5.8-1.1.2.1/include/rdma/ib_umem_odp.h
create mode 100644 fs/nfs/enfs/unify_multipath/sunrpc/mlnx/ubuntu/mlnx-5.8-1.1.2.1/include/rdma/ib_verbs.h
create mode 100644 fs/nfs/enfs/unify_multipath/sunrpc/mlnx/ubuntu/mlnx-5.8-1.1.2.1/include/rdma/ib_verbs_nvmf.h
create mode 100644 fs/nfs/enfs/unify_multipath/sunrpc/mlnx/ubuntu/mlnx-5.8-1.1.2.1/include/rdma/ib_verbs_nvmf_def.h
create mode 100644 fs/nfs/enfs/unify_multipath/sunrpc/mlnx/ubuntu/mlnx-5.8-1.1.2.1/include/rdma/iba.h
create mode 100644 fs/nfs/enfs/unify_multipath/sunrpc/mlnx/ubuntu/mlnx-5.8-1.1.2.1/include/rdma/ibta_vol1_c12.h
create mode 100644 fs/nfs/enfs/unify_multipath/sunrpc/mlnx/ubuntu/mlnx-5.8-1.1.2.1/include/rdma/iw_cm.h
create mode 100644 fs/nfs/enfs/unify_multipath/sunrpc/mlnx/ubuntu/mlnx-5.8-1.1.2.1/include/rdma/iw_portmap.h
create mode 100644 fs/nfs/enfs/unify_multipath/sunrpc/mlnx/ubuntu/mlnx-5.8-1.1.2.1/include/rdma/lag.h
create mode 100644 fs/nfs/enfs/unify_multipath/sunrpc/mlnx/ubuntu/mlnx-5.8-1.1.2.1/include/rdma/mr_pool.h
create mode 100644 fs/nfs/enfs/unify_multipath/sunrpc/mlnx/ubuntu/mlnx-5.8-1.1.2.1/include/rdma/opa_addr.h
create mode 100644 fs/nfs/enfs/unify_multipath/sunrpc/mlnx/ubuntu/mlnx-5.8-1.1.2.1/include/rdma/opa_port_info.h
create mode 100644 fs/nfs/enfs/unify_multipath/sunrpc/mlnx/ubuntu/mlnx-5.8-1.1.2.1/include/rdma/opa_smi.h
create mode 100644 fs/nfs/enfs/unify_multipath/sunrpc/mlnx/ubuntu/mlnx-5.8-1.1.2.1/include/rdma/opa_vnic.h
create mode 100644 fs/nfs/enfs/unify_multipath/sunrpc/mlnx/ubuntu/mlnx-5.8-1.1.2.1/include/rdma/peer_mem.h
create mode 100644 fs/nfs/enfs/unify_multipath/sunrpc/mlnx/ubuntu/mlnx-5.8-1.1.2.1/include/rdma/rdma_cm.h
create mode 100644 fs/nfs/enfs/unify_multipath/sunrpc/mlnx/ubuntu/mlnx-5.8-1.1.2.1/include/rdma/rdma_cm_ib.h
create mode 100644 fs/nfs/enfs/unify_multipath/sunrpc/mlnx/ubuntu/mlnx-5.8-1.1.2.1/include/rdma/rdma_counter.h
create mode 100644 fs/nfs/enfs/unify_multipath/sunrpc/mlnx/ubuntu/mlnx-5.8-1.1.2.1/include/rdma/rdma_netlink.h
create mode 100644 fs/nfs/enfs/unify_multipath/sunrpc/mlnx/ubuntu/mlnx-5.8-1.1.2.1/include/rdma/rdma_vt.h
create mode 100644 fs/nfs/enfs/unify_multipath/sunrpc/mlnx/ubuntu/mlnx-5.8-1.1.2.1/include/rdma/rdmavt_cq.h
create mode 100644 fs/nfs/enfs/unify_multipath/sunrpc/mlnx/ubuntu/mlnx-5.8-1.1.2.1/include/rdma/rdmavt_mr.h
create mode 100644 fs/nfs/enfs/unify_multipath/sunrpc/mlnx/ubuntu/mlnx-5.8-1.1.2.1/include/rdma/rdmavt_qp.h
create mode 100644 fs/nfs/enfs/unify_multipath/sunrpc/mlnx/ubuntu/mlnx-5.8-1.1.2.1/include/rdma/restrack.h
create mode 100644 fs/nfs/enfs/unify_multipath/sunrpc/mlnx/ubuntu/mlnx-5.8-1.1.2.1/include/rdma/rw.h
create mode 100644 fs/nfs/enfs/unify_multipath/sunrpc/mlnx/ubuntu/mlnx-5.8-1.1.2.1/include/rdma/signature.h
create mode 100644 fs/nfs/enfs/unify_multipath/sunrpc/mlnx/ubuntu/mlnx-5.8-1.1.2.1/include/rdma/tid_rdma_defs.h
create mode 100644 fs/nfs/enfs/unify_multipath/sunrpc/mlnx/ubuntu/mlnx-5.8-1.1.2.1/include/rdma/uverbs_ioctl.h
create mode 100644 fs/nfs/enfs/unify_multipath/sunrpc/mlnx/ubuntu/mlnx-5.8-1.1.2.1/include/rdma/uverbs_named_ioctl.h
create mode 100644 fs/nfs/enfs/unify_multipath/sunrpc/mlnx/ubuntu/mlnx-5.8-1.1.2.1/include/rdma/uverbs_std_types.h
create mode 100644 fs/nfs/enfs/unify_multipath/sunrpc/mlnx/ubuntu/mlnx-5.8-1.1.2.1/include/rdma/uverbs_types.h
create mode 100644 fs/nfs/enfs/unify_multipath/sunrpc/mlnx/ubuntu/mlnx-5.8-1.1.2.1/include/scsi/iser.h
create mode 100644 fs/nfs/enfs/unify_multipath/sunrpc/mlnx/ubuntu/mlnx-5.8-1.1.2.1/include/scsi/scsi.h
create mode 100644 fs/nfs/enfs/unify_multipath/sunrpc/mlnx/ubuntu/mlnx-5.8-1.1.2.1/include/scsi/scsi_device.h
create mode 100644 fs/nfs/enfs/unify_multipath/sunrpc/mlnx/ubuntu/mlnx-5.8-1.1.2.1/include/scsi/scsi_transport_srp.h
create mode 100644 fs/nfs/enfs/unify_multipath/sunrpc/mlnx/ubuntu/mlnx-5.8-1.1.2.1/include/scsi/srp.h
create mode 100644 fs/nfs/enfs/unify_multipath/sunrpc/mlnx/ubuntu/mlnx-5.8-1.1.2.1/include/trace/events/ib_mad.h
create mode 100644 fs/nfs/enfs/unify_multipath/sunrpc/mlnx/ubuntu/mlnx-5.8-1.1.2.1/include/trace/events/ib_umad.h
create mode 100644 fs/nfs/enfs/unify_multipath/sunrpc/mlnx/ubuntu/mlnx-5.8-1.1.2.1/include/trace/events/rdma.h
create mode 100644 fs/nfs/enfs/unify_multipath/sunrpc/mlnx/ubuntu/mlnx-5.8-1.1.2.1/include/trace/events/rdma_core.h
create mode 100644 fs/nfs/enfs/unify_multipath/sunrpc/mlnx/ubuntu/mlnx-5.8-1.1.2.1/include/trace/events/rpcrdma.h
create mode 100644 fs/nfs/enfs/unify_multipath/sunrpc/mlnx/ubuntu/mlnx-5.8-1.1.2.1/include/trace/events/sunrpc_base.h
create mode 100644 fs/nfs/enfs/unify_multipath/sunrpc/mlnx/ubuntu/mlnx-5.8-1.1.2.1/include/uapi/linux/devlink.h
create mode 100644 fs/nfs/enfs/unify_multipath/sunrpc/mlnx/ubuntu/mlnx-5.8-1.1.2.1/include/uapi/linux/eventpoll.h
create mode 100644 fs/nfs/enfs/unify_multipath/sunrpc/mlnx/ubuntu/mlnx-5.8-1.1.2.1/include/uapi/linux/net_tstamp.h
create mode 100644 fs/nfs/enfs/unify_multipath/sunrpc/mlnx/ubuntu/mlnx-5.8-1.1.2.1/include/uapi/linux/nvme_ioctl.h
create mode 100644 fs/nfs/enfs/unify_multipath/sunrpc/mlnx/ubuntu/mlnx-5.8-1.1.2.1/include/uapi/linux/pkt_cls.h
create mode 100644 fs/nfs/enfs/unify_multipath/sunrpc/mlnx/ubuntu/mlnx-5.8-1.1.2.1/include/uapi/linux/tc_act/tc_ct.h
create mode 100644 fs/nfs/enfs/unify_multipath/sunrpc/mlnx/ubuntu/mlnx-5.8-1.1.2.1/include/uapi/linux/tc_act/tc_ct_4_18.h
create mode 100644 fs/nfs/enfs/unify_multipath/sunrpc/mlnx/ubuntu/mlnx-5.8-1.1.2.1/include/uapi/linux/tc_act/tc_pedit.h
create mode 100644 fs/nfs/enfs/unify_multipath/sunrpc/mlnx/ubuntu/mlnx-5.8-1.1.2.1/include/uapi/linux/tc_act/tc_tunnel_key.h
create mode 100644 fs/nfs/enfs/unify_multipath/sunrpc/mlnx/ubuntu/mlnx-5.8-1.1.2.1/include/uapi/mlxdevm/mlxdevm_netlink.h
create mode 100644 fs/nfs/enfs/unify_multipath/sunrpc/mlnx/ubuntu/mlnx-5.8-1.1.2.1/include/uapi/rdma/bnxt_re-abi.h
create mode 100644 fs/nfs/enfs/unify_multipath/sunrpc/mlnx/ubuntu/mlnx-5.8-1.1.2.1/include/uapi/rdma/cxgb4-abi.h
create mode 100644 fs/nfs/enfs/unify_multipath/sunrpc/mlnx/ubuntu/mlnx-5.8-1.1.2.1/include/uapi/rdma/efa-abi.h
create mode 100644 fs/nfs/enfs/unify_multipath/sunrpc/mlnx/ubuntu/mlnx-5.8-1.1.2.1/include/uapi/rdma/hfi/hfi1_ioctl.h
create mode 100644 fs/nfs/enfs/unify_multipath/sunrpc/mlnx/ubuntu/mlnx-5.8-1.1.2.1/include/uapi/rdma/hfi/hfi1_user.h
create mode 100644 fs/nfs/enfs/unify_multipath/sunrpc/mlnx/ubuntu/mlnx-5.8-1.1.2.1/include/uapi/rdma/hns-abi.h
create mode 100644 fs/nfs/enfs/unify_multipath/sunrpc/mlnx/ubuntu/mlnx-5.8-1.1.2.1/include/uapi/rdma/ib_user_ioctl_cmds.h
create mode 100644 fs/nfs/enfs/unify_multipath/sunrpc/mlnx/ubuntu/mlnx-5.8-1.1.2.1/include/uapi/rdma/ib_user_ioctl_verbs.h
create mode 100644 fs/nfs/enfs/unify_multipath/sunrpc/mlnx/ubuntu/mlnx-5.8-1.1.2.1/include/uapi/rdma/ib_user_mad.h
create mode 100644 fs/nfs/enfs/unify_multipath/sunrpc/mlnx/ubuntu/mlnx-5.8-1.1.2.1/include/uapi/rdma/ib_user_sa.h
create mode 100644 fs/nfs/enfs/unify_multipath/sunrpc/mlnx/ubuntu/mlnx-5.8-1.1.2.1/include/uapi/rdma/ib_user_verbs.h
create mode 100644 fs/nfs/enfs/unify_multipath/sunrpc/mlnx/ubuntu/mlnx-5.8-1.1.2.1/include/uapi/rdma/irdma-abi.h
create mode 100644 fs/nfs/enfs/unify_multipath/sunrpc/mlnx/ubuntu/mlnx-5.8-1.1.2.1/include/uapi/rdma/mlx4-abi.h
create mode 100644 fs/nfs/enfs/unify_multipath/sunrpc/mlnx/ubuntu/mlnx-5.8-1.1.2.1/include/uapi/rdma/mlx5-abi.h
create mode 100644 fs/nfs/enfs/unify_multipath/sunrpc/mlnx/ubuntu/mlnx-5.8-1.1.2.1/include/uapi/rdma/mlx5_user_ioctl_cmds.h
create mode 100644 fs/nfs/enfs/unify_multipath/sunrpc/mlnx/ubuntu/mlnx-5.8-1.1.2.1/include/uapi/rdma/mlx5_user_ioctl_verbs.h
create mode 100644 fs/nfs/enfs/unify_multipath/sunrpc/mlnx/ubuntu/mlnx-5.8-1.1.2.1/include/uapi/rdma/mthca-abi.h
create mode 100644 fs/nfs/enfs/unify_multipath/sunrpc/mlnx/ubuntu/mlnx-5.8-1.1.2.1/include/uapi/rdma/ocrdma-abi.h
create mode 100644 fs/nfs/enfs/unify_multipath/sunrpc/mlnx/ubuntu/mlnx-5.8-1.1.2.1/include/uapi/rdma/qedr-abi.h
create mode 100644 fs/nfs/enfs/unify_multipath/sunrpc/mlnx/ubuntu/mlnx-5.8-1.1.2.1/include/uapi/rdma/rdma_netlink.h
create mode 100644 fs/nfs/enfs/unify_multipath/sunrpc/mlnx/ubuntu/mlnx-5.8-1.1.2.1/include/uapi/rdma/rdma_user_cm.h
create mode 100644 fs/nfs/enfs/unify_multipath/sunrpc/mlnx/ubuntu/mlnx-5.8-1.1.2.1/include/uapi/rdma/rdma_user_ioctl.h
create mode 100644 fs/nfs/enfs/unify_multipath/sunrpc/mlnx/ubuntu/mlnx-5.8-1.1.2.1/include/uapi/rdma/rdma_user_ioctl_cmds.h
create mode 100644 fs/nfs/enfs/unify_multipath/sunrpc/mlnx/ubuntu/mlnx-5.8-1.1.2.1/include/uapi/rdma/rdma_user_rxe.h
create mode 100644 fs/nfs/enfs/unify_multipath/sunrpc/mlnx/ubuntu/mlnx-5.8-1.1.2.1/include/uapi/rdma/rvt-abi.h
create mode 100644 fs/nfs/enfs/unify_multipath/sunrpc/mlnx/ubuntu/mlnx-5.8-1.1.2.1/include/uapi/rdma/siw-abi.h
create mode 100644 fs/nfs/enfs/unify_multipath/sunrpc/mlnx/ubuntu/mlnx-5.8-1.1.2.1/include/uapi/rdma/vmw_pvrdma-abi.h
create mode 100644 fs/nfs/enfs/unify_multipath/sunrpc/mlnx/ubuntu/mlnx-5.8-1.1.2.1/xprtrdma/Makefile
create mode 100644 fs/nfs/enfs/unify_multipath/sunrpc/mlnx/ubuntu/mlnx-5.8-1.1.2.1/xprtrdma/backchannel.c
create mode 100644 fs/nfs/enfs/unify_multipath/sunrpc/mlnx/ubuntu/mlnx-5.8-1.1.2.1/xprtrdma/frwr_ops.c
create mode 100644 fs/nfs/enfs/unify_multipath/sunrpc/mlnx/ubuntu/mlnx-5.8-1.1.2.1/xprtrdma/module.c
create mode 100644 fs/nfs/enfs/unify_multipath/sunrpc/mlnx/ubuntu/mlnx-5.8-1.1.2.1/xprtrdma/nvfs.h
create mode 100644 fs/nfs/enfs/unify_multipath/sunrpc/mlnx/ubuntu/mlnx-5.8-1.1.2.1/xprtrdma/nvfs_rpc_rdma.c
create mode 100644 fs/nfs/enfs/unify_multipath/sunrpc/mlnx/ubuntu/mlnx-5.8-1.1.2.1/xprtrdma/nvfs_rpc_rdma.h
create mode 100644 fs/nfs/enfs/unify_multipath/sunrpc/mlnx/ubuntu/mlnx-5.8-1.1.2.1/xprtrdma/rpc_rdma.c
create mode 100644 fs/nfs/enfs/unify_multipath/sunrpc/mlnx/ubuntu/mlnx-5.8-1.1.2.1/xprtrdma/rpcrdma_dummy.c
create mode 100644 fs/nfs/enfs/unify_multipath/sunrpc/mlnx/ubuntu/mlnx-5.8-1.1.2.1/xprtrdma/svc_rdma.c
create mode 100644 fs/nfs/enfs/unify_multipath/sunrpc/mlnx/ubuntu/mlnx-5.8-1.1.2.1/xprtrdma/svc_rdma_backchannel.c
create mode 100644 fs/nfs/enfs/unify_multipath/sunrpc/mlnx/ubuntu/mlnx-5.8-1.1.2.1/xprtrdma/svc_rdma_pcl.c
create mode 100644 fs/nfs/enfs/unify_multipath/sunrpc/mlnx/ubuntu/mlnx-5.8-1.1.2.1/xprtrdma/svc_rdma_recvfrom.c
create mode 100644 fs/nfs/enfs/unify_multipath/sunrpc/mlnx/ubuntu/mlnx-5.8-1.1.2.1/xprtrdma/svc_rdma_rw.c
create mode 100644 fs/nfs/enfs/unify_multipath/sunrpc/mlnx/ubuntu/mlnx-5.8-1.1.2.1/xprtrdma/svc_rdma_sendto.c
create mode 100644 fs/nfs/enfs/unify_multipath/sunrpc/mlnx/ubuntu/mlnx-5.8-1.1.2.1/xprtrdma/svc_rdma_transport.c
create mode 100644 fs/nfs/enfs/unify_multipath/sunrpc/mlnx/ubuntu/mlnx-5.8-1.1.2.1/xprtrdma/svcrdma_dummy.c
create mode 100644 fs/nfs/enfs/unify_multipath/sunrpc/mlnx/ubuntu/mlnx-5.8-1.1.2.1/xprtrdma/transport.c
create mode 100644 fs/nfs/enfs/unify_multipath/sunrpc/mlnx/ubuntu/mlnx-5.8-1.1.2.1/xprtrdma/verbs.c
create mode 100644 fs/nfs/enfs/unify_multipath/sunrpc/mlnx/ubuntu/mlnx-5.8-1.1.2.1/xprtrdma/xprt_rdma.h
create mode 100644 fs/nfs/enfs/unify_multipath/sunrpc/mlnx/ubuntu/mlnx-5.8-1.1.2.1/xprtrdma/xprtrdma_dummy.c
create mode 100644 fs/nfs/enfs_adapter.c
create mode 100644 fs/nfs/enfs_adapter.h
create mode 100644 net/sunrpc/.kunitconfig
diff --git a/fs/lockd/clntlock.c b/fs/lockd/clntlock.c
index 5d85715..226eb1e 100644
--- a/fs/lockd/clntlock.c
+++ b/fs/lockd/clntlock.c
@@ -222,6 +222,7 @@ nlmclnt_recovery(struct nlm_host *host)
"(%ld)\n", host->h_name, PTR_ERR(task));
}
}
+EXPORT_SYMBOL_GPL(nlmclnt_recovery);
static int
reclaimer(void *ptr)
diff --git a/fs/lockd/host.c b/fs/lockd/host.c
index 127a728..7a16f48 100644
--- a/fs/lockd/host.c
+++ b/fs/lockd/host.c
@@ -31,6 +31,8 @@
#define NLM_HOST_EXPIRE (300 * HZ)
#define NLM_HOST_COLLECT (120 * HZ)
+#define ENFS_CAPABILITY_LSID_SUPPORT 0x0002 /* lsversion query capability */
+
static struct hlist_head nlm_server_hosts[NLM_HOST_NRHASH];
static struct hlist_head nlm_client_hosts[NLM_HOST_NRHASH];
@@ -446,7 +448,7 @@ nlm_bind_host(struct nlm_host *host)
.to_initval = increment,
.to_increment = increment,
.to_maxval = increment * 6UL,
- .to_retries = 5U,
+ .to_retries = 0,
};
struct rpc_create_args args = {
.net = host->net,
@@ -569,10 +571,17 @@ void nlm_host_rebooted(const struct net *net, const struct nlm_reboot *info)
* To avoid processing a host several times, we match the nsmstate.
*/
while ((host = next_host_state(nlm_server_hosts, nsm, info)) != NULL) {
+ if (host->enfs_flag & ENFS_CAPABILITY_LSID_SUPPORT) {
+ continue;
+ }
nlmsvc_free_host_resources(host);
nlmsvc_release_host(host);
}
while ((host = next_host_state(nlm_client_hosts, nsm, info)) != NULL) {
+ if (host->enfs_flag & ENFS_CAPABILITY_LSID_SUPPORT) {
+ dprintk("lockd: ignore nsm notify. \n");
+ continue;
+ }
nlmclnt_recovery(host);
nlmclnt_release_host(host);
}
diff --git a/fs/nfs/Kconfig b/fs/nfs/Kconfig
index 7df2503..4bf6090 100644
--- a/fs/nfs/Kconfig
+++ b/fs/nfs/Kconfig
@@ -212,3 +212,14 @@ config NFS_V4_2_READ_PLUS
default y
help
Choose Y here to enable use of the NFS v4.2 READ_PLUS operation.
+
+config ENFS
+ tristate "NFS client support for ENFS"
+ depends on NFS_FS
+ default n
+ help
+ This option enables support multipath of the NFS protocol
+ in the kernel's NFS client.
+ This feature will improve performance and reliability.
+
+ If sure, say Y.
diff --git a/fs/nfs/Makefile b/fs/nfs/Makefile
index 5f6db37..ed2786c 100644
--- a/fs/nfs/Makefile
+++ b/fs/nfs/Makefile
@@ -9,7 +9,7 @@ CFLAGS_nfstrace.o += -I$(src)
nfs-y := client.o dir.o file.o getroot.o inode.o super.o \
io.o direct.o pagelist.o read.o symlink.o unlink.o \
write.o namespace.o mount_clnt.o nfstrace.o \
- export.o sysfs.o fs_context.o
+ export.o sysfs.o fs_context.o enfs_adapter.o
nfs-$(CONFIG_ROOT_NFS) += nfsroot.o
nfs-$(CONFIG_SYSCTL) += sysctl.o
nfs-$(CONFIG_NFS_FSCACHE) += fscache.o
@@ -35,3 +35,6 @@ nfsv4-$(CONFIG_NFS_V4_2) += nfs42proc.o nfs42xattr.o
obj-$(CONFIG_PNFS_FILE_LAYOUT) += filelayout/
obj-$(CONFIG_PNFS_BLOCK) += blocklayout/
obj-$(CONFIG_PNFS_FLEXFILE_LAYOUT) += flexfilelayout/
+CONFIG_ENFS=m
+obj-$(CONFIG_ENFS) += enfs/
+
diff --git a/fs/nfs/client.c b/fs/nfs/client.c
index d9b10c4..0995d34 100644
--- a/fs/nfs/client.c
+++ b/fs/nfs/client.c
@@ -44,7 +44,8 @@
#include "callback.h"
#include "delegation.h"
#include "iostat.h"
-#include "internal.h"
+//#include "internal.h"
+#include "enfs_adapter.h"
#include "fscache.h"
#include "pnfs.h"
#include "nfs.h"
@@ -241,6 +242,7 @@ void nfs_free_client(struct nfs_client *clp)
put_nfs_version(clp->cl_nfs_mod);
kfree(clp->cl_hostname);
kfree(clp->cl_acceptor);
+ nfs_free_multi_path_client(clp);
kfree(clp);
}
EXPORT_SYMBOL_GPL(nfs_free_client);
@@ -315,20 +317,30 @@ again:
continue;
/* Match the full socket address */
- if (!rpc_cmp_addr_port(sap, clap))
+ if (!rpc_cmp_addr_port(sap, clap)) {
+ if (data->enfs_option != NULL) {
+ continue;
+ } else {
/* Match all xprt_switch full socket addresses */
if (IS_ERR(clp->cl_rpcclient) ||
!rpc_clnt_xprt_switch_has_addr(clp->cl_rpcclient,
sap))
continue;
-
+ }
+ }
+ if (!nfs_multipath_client_match(clp->cl_multipath_data, data->enfs_option)) {
+ printk("not match client src %p dst %p.\n", clp->cl_multipath_data, data->enfs_option);
+ continue;
+ }
/* Match the xprt security policy */
if (clp->cl_xprtsec.policy != data->xprtsec.policy)
continue;
refcount_inc(&clp->cl_count);
+ printk("match client %p.\n", clp);
return clp;
}
+ printk("not match client .\n");
return NULL;
}
@@ -516,6 +528,7 @@ int nfs_create_rpc_client(struct nfs_client *clp,
.xprtsec = cl_init->xprtsec,
.connect_timeout = cl_init->connect_timeout,
.reconnect_timeout = cl_init->reconnect_timeout,
+ .multipath_option = cl_init->enfs_option,
};
if (test_bit(NFS_CS_DISCRTRY, &clp->cl_flags))
@@ -650,6 +663,14 @@ struct nfs_client *nfs_init_client(struct nfs_client *clp,
if (clp->cl_cons_state == NFS_CS_READY)
return clp;
+ error = nfs_create_multi_path_client(clp, cl_init);
+ if (error < 0) {
+ printk("nfs_create_multi_path_client faild.%d\n.", error);
+ nfs_put_client(clp);
+ clp = ERR_PTR(error);
+ return clp;
+ }
+
/*
* Create a client RPC handle for doing FSSTAT with UNIX auth only
* - RFC 2623, sec 2.3.2
@@ -684,6 +705,7 @@ static int nfs_init_server(struct nfs_server *server,
.nconnect = ctx->nfs_server.nconnect,
.init_flags = (1UL << NFS_CS_REUSEPORT),
.xprtsec = ctx->xprtsec,
+ .enfs_option = ctx->enfs_option
};
struct nfs_client *clp;
int error;
diff --git a/fs/nfs/dir.c b/fs/nfs/dir.c
index 39f7549..0ea4d8a 100644
--- a/fs/nfs/dir.c
+++ b/fs/nfs/dir.c
@@ -43,7 +43,7 @@
#include "delegation.h"
#include "iostat.h"
-#include "internal.h"
+#include "enfs_adapter.h"
#include "fscache.h"
#include "nfstrace.h"
@@ -1360,6 +1360,11 @@ static loff_t nfs_llseek_dir(struct file *filp, loff_t offset, int whence)
return offset;
}
+bool nfs_check_have_lookup_cache_flag(struct nfs_server *server, int flag)
+{
+ return enfs_check_have_lookup_cache_flag(server, flag);
+}
+
/*
* All directory operations under NFS are synchronous, so fsync()
* is a dummy operation.
@@ -1506,7 +1511,7 @@ static int nfs_check_verifier(struct inode *dir, struct dentry *dentry,
{
if (IS_ROOT(dentry))
return 1;
- if (NFS_SERVER(dir)->flags & NFS_MOUNT_LOOKUP_CACHE_NONE)
+ if (nfs_check_have_lookup_cache_flag(NFS_SERVER(dir), NFS_MOUNT_LOOKUP_CACHE_NONE))
return 0;
if (!nfs_dentry_verify_change(dir, dentry))
return 0;
@@ -1610,7 +1615,7 @@ int nfs_neg_need_reval(struct inode *dir, struct dentry *dentry,
{
if (flags & (LOOKUP_CREATE | LOOKUP_RENAME_TARGET))
return 0;
- if (NFS_SERVER(dir)->flags & NFS_MOUNT_LOOKUP_CACHE_NONEG)
+ if (nfs_check_have_lookup_cache_flag(NFS_SERVER(dir), NFS_MOUNT_LOOKUP_CACHE_NONEG))
return 1;
/* Case insensitive server? Revalidate negative dentries */
if (nfs_server_capable(dir, NFS_CAP_CASE_INSENSITIVE))
diff --git a/fs/nfs/enfs/Makefile b/fs/nfs/enfs/Makefile
new file mode 100644
index 0000000..5de748d
--- /dev/null
+++ b/fs/nfs/enfs/Makefile
@@ -0,0 +1,79 @@
+PWD := $(shell pwd)
+CODE_BASE_PATH := ../
+CODE_SRC_PATH := $(CODE_BASE_PATH)/src
+
+ifneq ($(KERNELRELEASE),)
+ CUR_PATH := $(M)
+else
+ CUR_PATH := $(PWD)
+endif
+
+ifeq ("$(KDIR)","")
+ KDIR = /lib/modules/$(shell uname -r)/build
+endif
+
+ENFS_INC_DIR := $(CUR_PATH)/$(CODE_SRC_PATH)
+ENFS_MGMT_INC_DIR := $(ENFS_INC_DIR)/mgmt
+ENFS_MULTIPATH_INC_DIR := $(ENFS_INC_DIR)/multipath
+ENFS_NFS_DIR := $(CODE_SRC_PATH)/enfs
+ENFS_RPC_INC_DIR := $(ENFS_INC_DIR)/enfsrpc
+ENFS_RPC_DIR := $(CODE_SRC_PATH)/enfsrpc
+ENFS_MGMT_DIR := $(CODE_SRC_PATH)/mgmt
+ENFS_MULTIPATH_DIR := $(CODE_SRC_PATH)/multipath
+
+ccflags-y += -I$(ENFS_INC_DIR)/include -I$(ENFS_MGMT_INC_DIR) -I$(ENFS_RPC_INC_DIR) -I$(ENFS_RPC_INC_DIR)/lookupcache -I$(ENFS_MGMT_INC_DIR)/config \
+ -I$(ENFS_MULTIPATH_INC_DIR)/path_mgmt -I$(ENFS_MULTIPATH_INC_DIR)/failover -I$(KDIR)/fs/nfs
+
+ENFS_INIT_OBJS := $(CODE_SRC_PATH)/init.o \
+ $(CODE_SRC_PATH)/dns_key.o \
+ $(CODE_SRC_PATH)/dns_query.o
+
+ENFS_NFS_OBJS := $(ENFS_NFS_DIR)/enfs_init.o \
+ $(ENFS_NFS_DIR)/enfs_multipath_client.o \
+ $(ENFS_NFS_DIR)/enfs_multipath_parse.o
+
+ENFS_RPC_OBJS := $(ENFS_RPC_DIR)/enfs_rpc_init.o \
+ $(ENFS_RPC_DIR)/enfs_rpc_proc.o \
+ $(ENFS_RPC_DIR)/lookupcache/enfs_lookup_cache.o
+
+ENFS_MGMT_OBJS := $(ENFS_MGMT_DIR)/mgmt_init.o \
+ $(ENFS_MGMT_DIR)/config/enfs_config.o
+
+ENFS_MULTIPATH_OBJS := $(ENFS_MULTIPATH_DIR)/load_balance/enfs_roundrobin.o \
+ $(ENFS_MULTIPATH_DIR)/load_balance/shard_route.o \
+ $(ENFS_MULTIPATH_DIR)/path_mgmt/enfs_multipath.o \
+ $(ENFS_MULTIPATH_DIR)/path_mgmt/enfs_proc.o \
+ $(ENFS_MULTIPATH_DIR)/path_mgmt/enfs_remount.o \
+ $(ENFS_MULTIPATH_DIR)/path_mgmt/exten_call.o \
+ $(ENFS_MULTIPATH_DIR)/path_mgmt/dns_process.o \
+ $(ENFS_MULTIPATH_DIR)/path_mgmt/pm_state.o \
+ $(ENFS_MULTIPATH_DIR)/path_mgmt/enfs_path.o \
+ $(ENFS_MULTIPATH_DIR)/failover/failover_path.o \
+ $(ENFS_MULTIPATH_DIR)/failover/failover_time.o \
+ $(ENFS_MULTIPATH_DIR)/path_mgmt/pm_ping.o
+
+ifeq ($(BUILD_TYPE),DEBUG)
+ ENFS_TRACE_POINT := $(CODE_SRC_PATH)/tracepoint/nfs_tp.o
+ EXTRA_CFLAGS += -DNFS_CLIENT_DEBUG
+endif
+
+EXTRA_CFLAGS += $(COMPILE_CFLAGS)
+
+ALL_DEPEND_OBJS := $(ENFS_INIT_OBJS) $(ENFS_NFS_OBJS) $(ENFS_RPC_OBJS) $(ENFS_MGMT_OBJS) $(ENFS_MULTIPATH_OBJS) $(ENFS_TRACE_POINT)
+
+KBUILD_EXTRA_SYMBOLS += $(KDIR)/../build_root/Module.symvers
+
+enfs-objs := $(ALL_DEPEND_OBJS)
+
+ifneq ($(KERNELRELEASE),)
+ obj-m := enfs.o
+else
+all:
+ +make -C $(KDIR) M=$(CUR_PATH) modules
+
+install:
+ +make -C $(KDIR) M=$(CUR_PATH) modules_install
+
+clean:
+ rm -rf .*o.cmd *.ko *.mod.* *.o *.order *.symvers *.o.ur-safe .tmp_versions
+endif
\ No newline at end of file
diff --git a/fs/nfs/enfs/enfs/enfs_init.c b/fs/nfs/enfs/enfs/enfs_init.c
new file mode 100644
index 0000000..dd38f1c
--- /dev/null
+++ b/fs/nfs/enfs/enfs/enfs_init.c
@@ -0,0 +1,78 @@
+#include
+#include
+#include
+#include
+#include
+#include
+#include
+#include
+#include "enfs.h"
+#include "enfs_multipath_parse.h"
+#include "enfs_multipath_client.h"
+#include "enfs_remount.h"
+#include "init.h"
+#include "enfs_lookup_cache.h"
+#include "enfs_rpc_init.h"
+
+unsigned int enfs_debug;
+module_param_named(enfs_debug, enfs_debug, uint, 0600);
+MODULE_PARM_DESC(enfs_debug, "enfs debugging mask");
+
+struct enfs_adapter_ops enfs_adapter = {
+ .name = "enfs",
+ .owner = THIS_MODULE,
+ //.alloc_mount_option = nfs_multipath_alloc_options,
+ .parse_mount_options = nfs_multipath_parse_options,
+ .free_mount_options = nfs_multipath_free_options,
+ //.dup_mount_options = nfs_multipath_dup_options,
+ .client_info_init = nfs_multipath_client_info_init,
+ .client_info_free = nfs_multipath_client_info_free,
+ .client_info_match = nfs_multipath_client_info_match,
+ .nfs4_client_info_match = nfs4_multipath_client_info_match,
+ .client_info_show = nfs_multipath_client_info_show,
+ // .client_info_clone = nfs_multipath_client_info_clone,
+ // .get_best_conn = nfs_multipath_get_best_conn,
+ // .conn_set_unavailable = nfs_multipath_set_conn_disconnect,
+ .remount_ip_list = enfs_remount,
+ .set_mount_data = enfs_set_mount_data,
+ .trigger_get_capability = enfs_trigger_get_capability,
+};
+
+static int __init init_enfs(void)
+{
+ int ret;
+ ret = enfs_adapter_register(&enfs_adapter);
+ if (ret) {
+ printk(KERN_ERR "regist enfs_adapter fail. ret %d\n", ret);
+ return -1;
+ }
+
+ ret = enfs_init();
+ if (ret) {
+ enfs_adapter_unregister(&enfs_adapter);
+ return -1;
+ }
+
+ ret = enfs_rpc_init();
+ if (ret) {
+ enfs_adapter_unregister(&enfs_adapter);
+ return -1;
+ }
+
+ return 0;
+}
+
+static void __exit exit_enfs(void)
+{
+ enfs_lookupcache_fini();
+ enfs_fini();
+ enfs_adapter_unregister(&enfs_adapter);
+}
+
+MODULE_LICENSE("GPL");
+MODULE_AUTHOR("Huawei Tech. Co., Ltd.");
+MODULE_DESCRIPTION("Nfs client router");
+MODULE_VERSION("1.0");
+
+module_init(init_enfs);
+module_exit(exit_enfs);
diff --git a/fs/nfs/enfs/enfs/enfs_multipath_client.c b/fs/nfs/enfs/enfs/enfs_multipath_client.c
new file mode 100644
index 0000000..00c8bd3
--- /dev/null
+++ b/fs/nfs/enfs/enfs/enfs_multipath_client.c
@@ -0,0 +1,440 @@
+#include
+#include
+#include
+#include
+#include
+#include
+#include
+#include
+#include
+#include
+
+#include "enfs_multipath_client.h"
+#include "enfs_multipath_parse.h"
+#include "enfs_log.h"
+
+int enfs_alloc_nfsclient_info(struct multipath_client_info **client_info)
+{
+ struct multipath_client_info *info;
+
+ info = kzalloc(sizeof(struct multipath_client_info), GFP_KERNEL);
+ if (!info) {
+ enfs_log_error("Memory allocation failed");
+ return -ENOMEM;
+ }
+
+ info->local_ip_list = kzalloc(sizeof(struct nfs_ip_list), GFP_KERNEL);
+ if (!info->local_ip_list) {
+ enfs_log_error("Memory allocation failed");
+ goto local_exit;
+ }
+
+ info->remote_ip_list = kzalloc(sizeof(struct nfs_ip_list), GFP_KERNEL);
+ if (!info->remote_ip_list) {
+ enfs_log_error("Memory allocation failed");
+ goto remote_exit;
+ }
+
+ info->pRemoteDnsInfo = kzalloc(sizeof(NFS_ROUTE_DNS_INFO_S), GFP_KERNEL);
+ if (!info->pRemoteDnsInfo) {
+ enfs_log_error("Memory allocation failed");
+ goto dns_exit;
+ }
+ *client_info = info;
+ return 0;
+
+dns_exit:
+ kfree(info->remote_ip_list);
+remote_exit:
+ kfree(info->local_ip_list);
+local_exit:
+ kfree(info);
+ return -ENOMEM;
+}
+
+void enfs_free_nfsclient_info(struct multipath_client_info *client_info)
+{
+ if (!client_info) {
+ return;
+ }
+
+ if (!client_info->local_ip_list) {
+ kfree(client_info->local_ip_list);
+ }
+ if (!client_info->remote_ip_list) {
+ kfree(client_info->remote_ip_list);
+ }
+ if (!client_info->pRemoteDnsInfo) {
+ kfree(client_info->pRemoteDnsInfo);
+ }
+ kfree(client_info);
+}
+
+int nfs_multipath_client_mount_info_init(struct multipath_client_info *client_info,
+ const struct nfs_client_initdata *cl_init)
+{
+ struct multipath_mount_options *opt = (struct multipath_mount_options *)(cl_init->enfs_option);
+
+ if (opt->local_ip_list) {
+ client_info->local_ip_list = kzalloc(sizeof(struct nfs_ip_list), GFP_KERNEL);
+ if (!client_info->local_ip_list)
+ return -ENOMEM;
+
+ memcpy(client_info->local_ip_list, opt->local_ip_list,
+ sizeof(struct nfs_ip_list));
+ }
+
+ if (opt->remote_ip_list) {
+ client_info->remote_ip_list = kzalloc(sizeof(struct nfs_ip_list), GFP_KERNEL);
+ if (!client_info->remote_ip_list) {
+ kfree(client_info->local_ip_list);
+ client_info->local_ip_list = NULL;
+ return -ENOMEM;
+ }
+ memcpy(client_info->remote_ip_list, opt->remote_ip_list,
+ sizeof(struct nfs_ip_list));
+ }
+
+ if (opt->pRemoteDnsInfo) {
+ client_info->pRemoteDnsInfo = kzalloc(sizeof(NFS_ROUTE_DNS_INFO_S), GFP_KERNEL);
+ if (!client_info->pRemoteDnsInfo) {
+ kfree(client_info->local_ip_list);
+ client_info->local_ip_list = NULL;
+ kfree(client_info->remote_ip_list);
+ client_info->remote_ip_list = NULL;
+ return -ENOMEM;
+ }
+ memcpy(client_info->pRemoteDnsInfo, opt->pRemoteDnsInfo, sizeof(NFS_ROUTE_DNS_INFO_S));
+ }
+
+ client_info->fill_local = opt->fill_local;
+
+ return 0;
+}
+
+void enfs_free_client_info(struct multipath_client_info *clp_info)
+{
+ if (!clp_info) {
+ return;
+ }
+
+ if (clp_info->local_ip_list != NULL) {
+ kfree(clp_info->local_ip_list);
+ }
+ if (clp_info->remote_ip_list != NULL) {
+ kfree(clp_info->remote_ip_list);
+ }
+
+ if (clp_info->pRemoteDnsInfo != NULL) {
+ kfree(clp_info->pRemoteDnsInfo);
+ }
+ kfree(clp_info);
+}
+
+void nfs_multipath_client_info_free_work(struct work_struct *work)
+{
+
+ struct multipath_client_info *clp_info;
+
+ if (work == NULL)
+ return;
+
+ clp_info = container_of(work, struct multipath_client_info, work);
+
+ enfs_free_client_info(clp_info);
+}
+
+void nfs_multipath_client_info_free(void *data)
+{
+ struct multipath_client_info *clp_info = (struct multipath_client_info *)data;
+
+ if (clp_info == NULL)
+ return;
+ printk("free client info %p.\n", clp_info);
+ INIT_WORK(&clp_info->work, nfs_multipath_client_info_free_work);
+ schedule_work(&clp_info->work);
+}
+
+int nfs_multipath_client_info_init(void **data, const struct nfs_client_initdata *cl_init)
+{
+ int rc;
+ struct multipath_client_info *info;
+ struct multipath_client_info **enfs_info;
+ /* no multi path info, no need do multipath init */
+ if (cl_init->enfs_option == NULL)
+ return 0;
+ enfs_info = (struct multipath_client_info **)data;
+ if (enfs_info == NULL)
+ return -EINVAL;
+
+ if (*enfs_info == NULL)
+ *enfs_info = kzalloc(sizeof(struct multipath_client_info), GFP_KERNEL);
+
+ if (*enfs_info == NULL)
+ return -ENOMEM;
+
+ info = (struct multipath_client_info *)*enfs_info;
+ printk("init client info %p.\n", info);
+ rc = nfs_multipath_client_mount_info_init(info, cl_init);
+ if (rc) {
+ nfs_multipath_client_info_free((void *)info);
+ return rc;
+ }
+ return rc;
+}
+
+bool nfs_multipath_ip_list_info_match(const struct nfs_ip_list *ip_list_src,
+ const struct nfs_ip_list *ip_list_dst)
+{
+ int i;
+ int j;
+ bool is_find;
+ /* if both are equal or NULL, then return true. */
+ if (ip_list_src == ip_list_dst)
+ return true;
+
+ if ((ip_list_src == NULL || ip_list_dst == NULL))
+ return false;
+
+ if (ip_list_src->count != ip_list_dst->count)
+ return false;
+
+ for (i = 0; i < ip_list_src->count; i++) {
+ is_find = false;
+ for (j = 0; j < ip_list_src->count; j++) {
+ if (rpc_cmp_addr_port((const struct sockaddr *)&ip_list_src->address[i],
+ (const struct sockaddr *)&ip_list_dst->address[j])) {
+ is_find = true;
+ break;
+ }
+ }
+ if (is_find == false) {
+ return false;
+ }
+
+ }
+ return true;
+}
+
+int nfs_multipath_dns_list_info_match(const NFS_ROUTE_DNS_INFO_S *dns_src,
+ const NFS_ROUTE_DNS_INFO_S *dns_dst)
+{
+ int i;
+ int j;
+ bool find;
+
+ /* if both are equal or NULL, then return true. */
+ if (dns_src == dns_dst)
+ return true;
+
+ if ((dns_src == NULL || dns_dst == NULL))
+ return false;
+
+ if (dns_src->dnsNameCount != dns_dst->dnsNameCount)
+ return false;
+
+ for (i = 0; i < dns_src->dnsNameCount; i++) {
+ find = false;
+ for (j = 0; j < dns_dst->dnsNameCount; j++) {
+ if (strcmp(dns_src->routeRemoteDnsList[i].dnsname,
+ dns_dst->routeRemoteDnsList[j].dnsname) == 0) {
+ find = true;
+ break;
+ }
+ }
+ if (find == false) {
+ return false;
+ }
+ }
+ return true;
+}
+
+// 待处理 内容一样 顺序不一致场景
+int nfs_multipath_client_info_match(void *src, void *dst)
+{
+ int ret = true;
+
+ struct multipath_client_info *src_info;
+ struct multipath_mount_options *dst_info;
+ src_info = (struct multipath_client_info *)src;
+ dst_info = (struct multipath_mount_options *)dst;
+
+ ret = nfs_multipath_ip_list_info_match(src_info->local_ip_list, dst_info->local_ip_list);
+ if (ret == false) {
+ printk("local_ip not match.\n");
+ return ret;
+ }
+
+ if (src_info->pRemoteDnsInfo->dnsNameCount == 0 &&
+ dst_info->pRemoteDnsInfo->dnsNameCount == 0) {
+ ret = nfs_multipath_ip_list_info_match(src_info->remote_ip_list,
+ dst_info->remote_ip_list);
+ if (ret == false) {
+ printk("remote_ip not match.\n");
+ return ret;
+ }
+ } else {
+ ret = nfs_multipath_dns_list_info_match(src_info->pRemoteDnsInfo,
+ dst_info->pRemoteDnsInfo);
+ if (ret == false) {
+ printk("dns not match.\n");
+ return ret;
+ }
+ }
+
+ printk("try match client ret %d.\n", ret);
+ return ret;
+}
+
+int nfs4_multipath_client_info_match(void *src, void *dst)
+{
+ int ret = true;
+ struct multipath_client_info *srcinfo = src;
+ struct multipath_client_info *dstinfo = dst;
+
+ if (src == NULL || dst == NULL)
+ return false;
+
+ ret =
+ nfs_multipath_ip_list_info_match(srcinfo->local_ip_list, dstinfo->local_ip_list);
+ if (ret == false) {
+ enfs_log_info("nfs4 local_ip not match.\n");
+ return ret;
+ }
+
+ ret = nfs_multipath_ip_list_info_match(srcinfo->remote_ip_list,
+ dstinfo->remote_ip_list);
+ if (ret == false) {
+ enfs_log_info("nfs4 remote_ip not match.\n");
+ return ret;
+ }
+
+ ret = nfs_multipath_dns_list_info_match(srcinfo->pRemoteDnsInfo,
+ dstinfo->pRemoteDnsInfo);
+ if (ret == false) {
+ enfs_log_info("nfs4 dns not match.\n");
+ return ret;
+ }
+ enfs_log_info("nfs4 try match client ret %d.\n", ret);
+ return ret;
+}
+
+void print_ip_info(struct seq_file *mount_option, struct nfs_ip_list *ip_list,
+ const char *type)
+{
+ char buf[IP_ADDRESS_LEN_MAX + 1];
+ int len = 0;
+ int i = 0;
+
+ seq_printf(mount_option, ",%s=", type);
+ for (i = 0; i < ip_list->count; i++) {
+ len = rpc_ntop((struct sockaddr *)&ip_list->address[i], buf, IP_ADDRESS_LEN_MAX);
+ if (len > 0 && len < IP_ADDRESS_LEN_MAX)
+ buf[len] = '\0';
+
+ if (i == 0)
+ seq_printf(mount_option, "%s", buf);
+ else
+ seq_printf(mount_option, "~%s", buf);
+ dfprintk(MOUNT, "NFS: show nfs mount option type:%s %s [%s]\n", type, buf, __FUNCTION__);
+ }
+}
+
+void print_dns_info(struct seq_file *seq, NFS_ROUTE_DNS_INFO_S *pRemoteDnsInfo,
+ const char *type)
+{
+ int i = 0;
+ char *name;
+
+ seq_printf(seq, ",%s=", type);
+ for (i = 0; i < pRemoteDnsInfo->dnsNameCount; i++) {
+ name = pRemoteDnsInfo->routeRemoteDnsList[i].dnsname;
+ if (i == 0) {
+ seq_printf(seq, "%s", name);
+ } else {
+ seq_printf(seq, "~%s", name);
+ }
+ }
+}
+
+static void multipath_print_sockaddr(struct seq_file *seq, struct sockaddr *addr)
+{
+ switch (addr->sa_family) {
+ case AF_INET: {
+ struct sockaddr_in *sin = (struct sockaddr_in *)addr;
+ seq_printf(seq, "%pI4", &sin->sin_addr);
+ return;
+ }
+ case AF_INET6: {
+ struct sockaddr_in6 *sin6 = (struct sockaddr_in6 *)addr;
+ seq_printf(seq, "%pI6", &sin6->sin6_addr);
+ return;
+ }
+ default:
+ break;
+ }
+ printk(KERN_ERR "unsupport family:%d\n", addr->sa_family);
+}
+
+void convert_lookup_cache_str(struct nfs_server *server,char **server_lookup, char **actual_lookup)
+{
+ if ((server->enfs_flags & NFS_MOUNT_LOOKUP_CACHE_NONEG) && (server->enfs_flags & NFS_MOUNT_LOOKUP_CACHE_NONE)) {
+ *server_lookup = "none";
+ } else if ((server->enfs_flags & NFS_MOUNT_LOOKUP_CACHE_NONEG) || (server->enfs_flags & NFS_MOUNT_LOOKUP_CACHE_NONE)) {
+ *server_lookup = "positive";
+ } else {
+ *server_lookup = "all";
+ }
+
+ if ((server->flags & NFS_MOUNT_LOOKUP_CACHE_NONEG) && (server->flags & NFS_MOUNT_LOOKUP_CACHE_NONE)) {
+ *actual_lookup = "none";
+ } else if ((server->flags & NFS_MOUNT_LOOKUP_CACHE_NONEG) || (server->flags & NFS_MOUNT_LOOKUP_CACHE_NONE)) {
+ *actual_lookup = "positive";
+ } else {
+ *actual_lookup = *server_lookup;
+ }
+}
+
+static void multipath_print_enfs_info(struct seq_file *seq, struct nfs_server *server)
+{
+ struct sockaddr_storage peeraddr;
+ struct rpc_clnt *next = server->client;
+ char *server_lookup_cache = NULL;
+ char *actual_lookup_cache = NULL;
+ convert_lookup_cache_str(server, &server_lookup_cache, &actual_lookup_cache);
+
+ rpc_peeraddr(server->client, (struct sockaddr *)&peeraddr, sizeof(peeraddr));
+ seq_printf(seq, ",slookupcache=%s", server_lookup_cache);
+ seq_printf(seq, ",alookupcache=%s", actual_lookup_cache);
+ seq_printf(seq, ",enfs_info=");
+ multipath_print_sockaddr(seq, (struct sockaddr *)&peeraddr);
+
+ while (next->cl_parent) {
+ if (next == next->cl_parent)
+ break;
+ next = next->cl_parent;
+ }
+ seq_printf(seq, "_%u", next->cl_clid);
+}
+
+void nfs_multipath_client_info_show(struct seq_file *seq, void *data)
+{
+ struct nfs_server *server = data;
+ struct multipath_client_info *client_info = server->nfs_client->cl_multipath_data;
+
+ dfprintk(MOUNT, "NFS: show nfs mount option[%s]\n", __FUNCTION__);
+ if ((client_info->local_ip_list) && (client_info->local_ip_list->count > 0))
+ print_ip_info(seq, client_info->local_ip_list,
+ "localaddrs");
+
+ if ((client_info->pRemoteDnsInfo) &&
+ (client_info->pRemoteDnsInfo->dnsNameCount > 0)) {
+ print_dns_info(seq, client_info->pRemoteDnsInfo, "remoteaddrs");
+ } else {
+ if ((client_info->remote_ip_list) &&
+ (client_info->remote_ip_list->count > 0))
+ print_ip_info(seq, client_info->remote_ip_list, "remoteaddrs");
+ }
+
+ multipath_print_enfs_info(seq, server);
+}
\ No newline at end of file
diff --git a/fs/nfs/enfs/enfs/enfs_multipath_parse.c b/fs/nfs/enfs/enfs/enfs_multipath_parse.c
new file mode 100644
index 0000000..44de76e
--- /dev/null
+++ b/fs/nfs/enfs/enfs/enfs_multipath_parse.c
@@ -0,0 +1,698 @@
+#include
+#include
+#include
+#include
+#include
+#include
+#include
+#include
+#include
+
+#include "enfs_multipath_parse.h"
+#include "enfs_log.h"
+#include "enfs_config.h"
+
+#define NFSDBG_FACILITY NFSDBG_CLIENT
+#define REMOTE_IP 0
+#define REMOTE_DNS 1
+#define ENFS_CONFIG_UNLOAD 0
+#define ENFS_CONFIG_LOAD 1
+
+static int g_enfsconfigLoad = ENFS_CONFIG_UNLOAD;
+
+void nfs_multipath_parse_ip_ipv6_add(struct sockaddr_in6 *sin6, int add_num)
+{
+ int i;
+ pr_info("NFS: before %08x%08x%08x%08x add_num: %d[%s]\n",
+ ntohl(sin6->sin6_addr.in6_u.u6_addr32[0]),
+ ntohl(sin6->sin6_addr.in6_u.u6_addr32[1]),
+ ntohl(sin6->sin6_addr.in6_u.u6_addr32[2]),
+ ntohl(sin6->sin6_addr.in6_u.u6_addr32[3]),
+ add_num, __FUNCTION__);
+ for (i = 0; i < add_num; i++) {
+ sin6->sin6_addr.in6_u.u6_addr32[3] = htonl(ntohl(sin6->sin6_addr.in6_u.u6_addr32[3]) + 1);
+ if (sin6->sin6_addr.in6_u.u6_addr32[3] != 0) {
+ continue;
+ }
+ sin6->sin6_addr.in6_u.u6_addr32[2] = htonl(ntohl(sin6->sin6_addr.in6_u.u6_addr32[2]) + 1);
+ if (sin6->sin6_addr.in6_u.u6_addr32[2] != 0) {
+ continue;
+ }
+ sin6->sin6_addr.in6_u.u6_addr32[1] = htonl(ntohl(sin6->sin6_addr.in6_u.u6_addr32[1]) + 1);
+ if (sin6->sin6_addr.in6_u.u6_addr32[1] != 0) {
+ continue;
+ }
+ sin6->sin6_addr.in6_u.u6_addr32[0] = htonl(ntohl(sin6->sin6_addr.in6_u.u6_addr32[0]) + 1);
+ if (sin6->sin6_addr.in6_u.u6_addr32[0] != 0) {
+ continue;
+ }
+ }
+
+ return;
+
+}
+
+static int enfs_parse_ip_range(struct net *net_ns, const char *cursor,
+ struct nfs_ip_list *ip_list, enum nfsmultipathoptions type)
+{
+ struct sockaddr_storage addr;
+ struct sockaddr_storage tmp_addr;
+ int i;
+ size_t len;
+ int add_num = 1;
+ bool duplicate_flag = false;
+ bool is_complete = false;
+ struct sockaddr_in *sin4;
+ struct sockaddr_in6 *sin6;
+ pr_info("NFS: parsing nfs mount option '%s' type: %d[%s]\n", cursor, type, __FUNCTION__);
+ len = rpc_pton(net_ns, cursor, strlen(cursor), (struct sockaddr *)&addr, sizeof(addr));
+ if (!len)
+ return -EINVAL;
+ // 判断是否IPV4/IPV6混杂
+ if (addr.ss_family != ip_list->address[ip_list->count - 1].ss_family) {
+ pr_info("NFS: parsing nfs mount option type: %d fail. both have ipv4 and ipv6 address[%s]\n", type, __FUNCTION__);
+ return -EINVAL;
+ }
+ // 判断范围IP是否为同一个IP
+ if (rpc_cmp_addr((const struct sockaddr *)&ip_list->address[ip_list->count - 1], (const struct sockaddr *)&addr)) {
+ pr_info("range ip is same ip.\n");
+ return 0;
+ }
+
+ while (true) {
+
+ tmp_addr = ip_list->address[ip_list->count - 1];
+
+ switch (addr.ss_family) {
+ case AF_INET: {
+ sin4 = (struct sockaddr_in *)&tmp_addr;
+ sin4->sin_addr.s_addr = htonl(ntohl(sin4->sin_addr.s_addr) + add_num);
+ pr_info("NFS: parsing nfs mount option ip %08x type: %d ipcont %d [%s]\n", ntohl(sin4->sin_addr.s_addr), type, ip_list->count, __FUNCTION__);
+ break;
+ }
+ case AF_INET6:
+ sin6 = (struct sockaddr_in6 *)&tmp_addr;
+ nfs_multipath_parse_ip_ipv6_add(sin6, add_num);
+ pr_info("NFS: parsing nfs mount option ip %08x%08x%08x%08x type: %d ipcont %d [%s]\n",
+ ntohl(sin6->sin6_addr.in6_u.u6_addr32[0]),
+ ntohl(sin6->sin6_addr.in6_u.u6_addr32[1]),
+ ntohl(sin6->sin6_addr.in6_u.u6_addr32[2]),
+ ntohl(sin6->sin6_addr.in6_u.u6_addr32[3]),
+ type, ip_list->count, __FUNCTION__);
+ break;
+ default:
+ return -EOPNOTSUPP;
+ }
+
+ if (rpc_cmp_addr((const struct sockaddr *)&tmp_addr,
+ (const struct sockaddr *)&addr)) {
+ is_complete = true;
+ }
+ // 去重处理 连续重复需要连续跳过
+ for (i = 0; i < ip_list->count; i++) {
+ duplicate_flag = false;
+ if (rpc_cmp_addr((const struct sockaddr *)&ip_list->address[i], (const struct sockaddr *)&tmp_addr)) {
+ pr_info("NFS: parsing nfs mount option type: %d index %d,same as before %d, add_num %d [%s]\n", type, ip_list->count, i, add_num, __FUNCTION__);
+ add_num++;
+ duplicate_flag = true;
+ break;
+ }
+ }
+ // 若多次重复 下一个IP需要加多次 若这个没有重复了 下一个IP还是+1
+ if (duplicate_flag == false) {
+ pr_info ("this ip not duplicate;");
+ add_num = 1;
+ // 不重复但是已经超规格就返回错
+ if ((type == LOCALADDR && ip_list->count >= MAX_SUPPORTED_LOCAL_IP_COUNT) ||
+ (type == REMOTEADDR && ip_list->count >= enfs_get_config_link_count_per_mount())) {
+ pr_info("[MULTIPATH:%s] iplist for type %d reached %d, more than supported limit %d\n",
+ __func__, type, ip_list->count, type == LOCALADDR ? MAX_SUPPORTED_LOCAL_IP_COUNT :
+ enfs_get_config_link_count_per_mount());
+ ip_list->count = 0;
+ return -ENOSPC;
+ }
+ ip_list->address[ip_list->count] = tmp_addr;
+ ip_list->addrlen[ip_list->count] = ip_list->addrlen[ip_list->count - 1];
+ ip_list->count += 1;
+ }
+ if (is_complete == true) {
+ break;
+ }
+
+ }
+ return 0;
+}
+
+int enfs_parse_ip_single(struct nfs_ip_list *ip_list, struct net *net_ns,
+ char *cursor, enum nfsmultipathoptions type)
+{
+ int i = 0;
+ struct sockaddr_storage addr;
+ struct sockaddr_storage swap;
+ int len;
+ enfs_log_info("option '%s' type: %d\n", cursor, type);
+
+ len = rpc_pton(net_ns, cursor, strlen(cursor), (struct sockaddr *)&addr, sizeof(addr));
+ if (!len)
+ return -EINVAL;
+
+ // 判断和之前的IP是否重复
+ for (i = 0; i < ip_list->count; i++) {
+ if (rpc_cmp_addr((const struct sockaddr *)&ip_list->address[i], (const struct sockaddr *)&addr)) {
+ pr_info("NFS: parsing nfs mount option '%s' type: %d index %d same as before index %d [%s]\n", cursor, type, ip_list->count, i, __FUNCTION__);
+ // 防止这个IP是范围IP的起始地址,就算重复了 也要将它放在当前列表的最后一个位置
+ swap = ip_list->address[i];
+ ip_list->address[i] = ip_list->address[ip_list->count -1];
+ ip_list->address[ip_list->count -1] = swap;
+ return 0;
+ }
+ }
+ // 若不重复 判断是否超规格
+ if ((type == LOCALADDR && ip_list->count >= MAX_SUPPORTED_LOCAL_IP_COUNT) ||
+ (type == REMOTEADDR && ip_list->count >= enfs_get_config_link_count_per_mount())) {
+ pr_info("[MULTIPATH:%s] iplist for type %d reached %d, more than supported limit %d\n",
+ __func__, type, ip_list->count, type == LOCALADDR ? MAX_SUPPORTED_LOCAL_IP_COUNT :
+ enfs_get_config_link_count_per_mount());
+ ip_list->count = 0;
+ return -ENOSPC;
+ }
+ ip_list->address[ip_list->count] = addr;
+ ip_list->addrlen[ip_list->count] = len;
+ ip_list->count++;
+
+ return 0;
+}
+
+char *nfs_multipath_parse_ip_list_get_cursor(char **buf_to_parse, bool *single)
+{
+ char *cursor = NULL;
+ const char *single_sep = strchr(*buf_to_parse, '~');
+ const char *range_sep = strchr(*buf_to_parse, '-');
+
+ *single = true;
+ if (range_sep) {
+ if (range_sep > single_sep) { // A-B or A~B-C
+ if (single_sep == NULL) { // A-B
+ cursor = strsep(buf_to_parse, "-");
+ if (cursor)
+ *single = false;
+ } else { // A~B-C
+ cursor = strsep(buf_to_parse, "~");
+ }
+ } else { // A-B~C
+ cursor = strsep(buf_to_parse, "-");
+ if (cursor)
+ *single = false;
+ }
+ } else { // A~B~C
+ cursor = strsep(buf_to_parse, "~");
+ }
+ return cursor;
+}
+
+bool enfs_valid_ip(char *str, struct net *net)
+{
+ int len;
+ struct sockaddr_storage addr;
+
+ len =
+ rpc_pton(net, str, strlen(str), (struct sockaddr *)&addr, sizeof(addr));
+ if (!len) {
+ return false;
+ }
+ return true;
+}
+
+// 待处理 去重
+int nfs_multipath_parse_ip_list(char *buffer, struct net *net_ns, struct multipath_mount_options *options,
+ enum nfsmultipathoptions type)
+{
+ char *ptr = NULL;
+ bool prev_range = false;
+ int ret = 0;
+ char *cursor = NULL;
+ bool single = true;
+ struct nfs_ip_list *ip_list_tmp = NULL;
+
+ if (type == LOCALADDR) {
+ ip_list_tmp = options->local_ip_list;
+ } else {
+ ip_list_tmp = options->remote_ip_list;
+ }
+ ip_list_tmp->count = 0;
+
+ enfs_log_info("NFS: parsing nfs mount option '%s' type: %d\n", buffer, type);
+ ptr = buffer;
+ while (ptr != NULL) {
+ cursor = nfs_multipath_parse_ip_list_get_cursor(&ptr, &single);
+ if (!cursor)
+ break;
+
+ if (single == false && prev_range == true) {
+ enfs_log_info(" parsing nfs mount option type: %d fail. Multiple Range.\n", type);
+ ret = -EINVAL;
+ goto out;
+ }
+
+ if (prev_range == false) {
+ ret = enfs_parse_ip_single(ip_list_tmp, net_ns, cursor, type);
+ if (ret) {
+ goto out;
+ }
+ if (single == false) {
+ prev_range = true;
+ }
+ } else {
+ ret = enfs_parse_ip_range(net_ns, cursor, ip_list_tmp, type);
+ if (ret != 0) {
+ goto out;
+ }
+ prev_range = false;
+ }
+ }
+
+out:
+ if (ret) {
+ memset(ip_list_tmp, 0, sizeof(struct nfs_ip_list));
+ }
+
+ return ret;
+}
+
+static bool dns_valid_char(char c)
+{
+ return isalnum(c) || c == '.' || c == '_' || c == '-';
+}
+
+static bool dns_valid_string(const char *str, int len)
+{
+ int i;
+
+ for (i = 0; i < len; i++) {
+ if (!dns_valid_char(str[i])) {
+ return false;
+ }
+ }
+
+ return true;
+}
+
+static bool enfs_valid_dns(const char *s)
+{
+ char *copy;
+ char *tmp;
+ char *token;
+ int sublen;
+ int len = strlen(s);
+
+ // Total length of the domain name 1 - 255
+ if (len < 1 || len > 255) {
+ return false;
+ }
+
+ copy = kstrdup(s, GFP_KERNEL);
+ if (!copy) {
+ enfs_log_error("not enough memory.\n");
+ return false;
+ }
+ tmp = copy;
+
+ // Use (.) to separate character strings a.
+ while ((token = strsep(&tmp, ".")) != NULL) {
+ sublen = strlen(token);
+
+ // The substring contains a maximum of 63 characters.
+ if (sublen > 63 || sublen < 1) {
+ kfree(copy);
+ return false;
+ }
+
+ // The substring starts and ends with a digit or letter
+ if (!isalnum(token[0]) || !isalnum(token[sublen - 1])) {
+ kfree(copy);
+ return false;
+ }
+
+ // The substring consists of only letters, numbers, (.), (_), or (-).
+ if (!dns_valid_string(token, sublen)) {
+ kfree(copy);
+ return false;
+ }
+ }
+
+ kfree(copy);
+ return true;
+}
+
+bool isInvalidDns(char *cursor, struct net *net_ns)
+{
+ char *ptr = strchr(cursor, '-');
+ if (ptr) {
+ *ptr = '\0';
+ // Check whether the split result is a valid IP address.
+ if (enfs_valid_ip(cursor, net_ns)) {
+ return true;
+ } else {
+ *ptr = '-';
+ }
+ }
+
+ if (enfs_valid_ip(cursor, net_ns)) {
+ return true;
+ }
+
+ if (!enfs_valid_dns(cursor)) {
+ enfs_log_error("invalid dns %s\n", cursor);
+ return true;
+ }
+
+ if (strlen(cursor) > MAX_DNS_NAME_LEN) {
+ return true;
+ }
+
+ return false;
+}
+
+// 待处理 去重
+int nfs_multipath_parse_dns_list(char *buffer, struct net *net_ns, struct multipath_mount_options *options)
+{
+ NFS_ROUTE_DNS_INFO_S *dns = NULL;
+ char *cursor = NULL;
+ char *ptr;
+
+ // freed in nfs_free_parsed_mount_data
+ dns = kmalloc(sizeof(NFS_ROUTE_DNS_INFO_S), GFP_KERNEL);
+ if (!dns) {
+ return -ENOMEM;
+ }
+
+ dns->dnsNameCount = 0;
+ ptr = buffer;
+ while (ptr) {
+ if (dns->dnsNameCount >= MAX_DNS_SUPPORTED) {
+ enfs_log_error("more than supported limit,support max dns:%d.\n",
+ MAX_DNS_SUPPORTED);
+ goto out;
+ }
+ cursor = strsep(&ptr, "~");
+ if (!cursor) {
+ break;
+ }
+ // Config of mixed IP addresses and domain names is not supported.
+ if (isInvalidDns(cursor, net_ns)) {
+ goto out;
+ }
+
+ strcpy(dns->routeRemoteDnsList[dns->dnsNameCount].dnsname, cursor);
+ dns->dnsNameCount++;
+ }
+
+ if (dns->dnsNameCount == 0) {
+ kfree(dns);
+ return -EINVAL;
+ }
+ options->pRemoteDnsInfo = dns;
+ return 0;
+out:
+ kfree(dns);
+ return -ENOSPC;
+}
+
+int parse_remote_type(char *str, struct net *net)
+{
+ int ret;
+ char *ptr = strchr(str, '-');
+ if (ptr) {
+ *ptr = '\0';
+ ret = enfs_valid_ip(str, net) ? REMOTE_IP : REMOTE_DNS;
+ *ptr = '-';
+ } else {
+ ret = enfs_valid_ip(str, net) ? REMOTE_IP : REMOTE_DNS;
+ }
+
+ if (ret == REMOTE_IP) {
+ return REMOTE_IP;
+ }
+
+ ptr = strchr(str, '~');
+ if (ptr) {
+ *ptr = '\0';
+ ret = enfs_valid_ip(str, net) ? REMOTE_IP : REMOTE_DNS;
+ *ptr = '~';
+ } else {
+ ret = enfs_valid_ip(str, net) ? REMOTE_IP : REMOTE_DNS;
+ }
+
+ return ret;
+}
+
+static int enfs_parse_remoteaddrs(char *str, struct net *net,
+ struct multipath_mount_options *options)
+{
+ if (parse_remote_type(str, net) == REMOTE_IP) {
+ options->pRemoteDnsInfo->dnsNameCount = 0;
+ return nfs_multipath_parse_ip_list(str, net, options, REMOTEADDR);
+ }
+
+ return nfs_multipath_parse_dns_list(str, net, options);
+}
+
+int nfs_multipath_parse_options_check_ipv4_valid(struct sockaddr_in *addr)
+{
+ if (addr->sin_addr.s_addr == 0 || addr->sin_addr.s_addr == 0xffffffff)
+ return -EINVAL;
+ return 0;
+}
+
+int nfs_multipath_parse_options_check_ipv6_valid(struct sockaddr_in6 *addr)
+{
+ if (addr->sin6_addr.in6_u.u6_addr32[0] == 0 &&
+ addr->sin6_addr.in6_u.u6_addr32[1] == 0 &&
+ addr->sin6_addr.in6_u.u6_addr32[2] == 0 &&
+ addr->sin6_addr.in6_u.u6_addr32[3] == 0)
+ return -EINVAL;
+
+ if (addr->sin6_addr.in6_u.u6_addr32[0] == 0xffffffff &&
+ addr->sin6_addr.in6_u.u6_addr32[1] == 0xffffffff &&
+ addr->sin6_addr.in6_u.u6_addr32[2] == 0xffffffff &&
+ addr->sin6_addr.in6_u.u6_addr32[3] == 0xffffffff)
+ return -EINVAL;
+ return 0;
+}
+
+int nfs_multipath_parse_options_check_ip_valid(struct sockaddr_storage *address)
+{
+ int rc = 0;
+ if (address->ss_family == AF_INET) {
+ rc = nfs_multipath_parse_options_check_ipv4_valid((struct sockaddr_in*)address);
+ } else if (address->ss_family == AF_INET6) {
+ rc = nfs_multipath_parse_options_check_ipv6_valid((struct sockaddr_in6*)address);
+ } else {
+ rc = -EINVAL;
+ }
+ return rc;
+
+}
+int nfs_multipath_parse_options_check_valid(struct multipath_mount_options *options)
+{
+ int rc;
+ int i;
+ if (options == NULL)
+ return 0;
+
+ for (i = 0; i < options->local_ip_list->count; i++) {
+ rc = nfs_multipath_parse_options_check_ip_valid(&options->local_ip_list->address[i]);
+ if (rc != 0)
+ return rc;
+ }
+
+ for (i = 0; i < options->remote_ip_list->count; i++) {
+ rc = nfs_multipath_parse_options_check_ip_valid(&options->remote_ip_list->address[i]);
+ if (rc != 0)
+ return rc;
+ }
+
+ return 0;
+}
+int nfs_multipath_parse_options_check_duplicate(struct multipath_mount_options *options)
+{
+ int i,j;
+ if (options == NULL || options->local_ip_list->count == 0 || options->remote_ip_list->count == 0) {
+ return 0;
+ }
+
+ for (i = 0; i < options->local_ip_list->count; i++) {
+ for (j = 0; j < options->remote_ip_list->count; j++) {
+ if (rpc_cmp_addr((const struct sockaddr *)&options->local_ip_list->address[i], (const struct sockaddr *)&options->remote_ip_list->address[j])) {
+ printk("local_addr index %d as same as remote_addr index %d \n.", i, j);
+ return -ENOTSUPP;
+ }
+ }
+ }
+ return 0;
+}
+
+int nfs_multipath_parse_options_check(struct multipath_mount_options *options)
+{
+ int rc = 0;
+ rc = nfs_multipath_parse_options_check_valid(options);
+ if (rc != 0) {
+ printk("has invaild ip.\n");
+ return rc;
+ }
+
+ rc = nfs_multipath_parse_options_check_duplicate(options);
+ if (rc != 0)
+ return rc;
+ return rc;
+}
+
+int nfs_multipath_alloc_options(void **enfs_option)
+{
+ struct multipath_mount_options *options = NULL;
+ options = kzalloc(sizeof(struct multipath_mount_options), GFP_KERNEL);
+ if (options == NULL) {
+ return -ENOMEM;
+ }
+
+ options->local_ip_list = kzalloc(sizeof(struct nfs_ip_list), GFP_KERNEL);
+ if (options->local_ip_list == NULL) {
+ kfree(options);
+ return -ENOMEM;
+ }
+
+ options->remote_ip_list = kzalloc(sizeof(struct nfs_ip_list), GFP_KERNEL);
+ if (options->remote_ip_list == NULL) {
+ kfree(options->local_ip_list);
+ kfree(options);
+ return -ENOMEM;
+ }
+
+ options->pRemoteDnsInfo = kzalloc(sizeof(NFS_ROUTE_DNS_INFO_S), GFP_KERNEL);
+ if (options->pRemoteDnsInfo == NULL) {
+ kfree(options->remote_ip_list);
+ kfree(options->local_ip_list);
+ kfree(options);
+ return -ENOMEM;
+ }
+
+ *enfs_option = options;
+ return 0;
+}
+
+int nfs_multipath_parse_options(enum nfsmultipathoptions type, char *str,
+ void **enfs_option, struct net *net_ns)
+{
+ int rc;
+ struct multipath_mount_options *options = NULL;
+ int link_count = enfs_link_count_num();
+ int mount_count = enfs_mount_count();
+
+ if (g_enfsconfigLoad == ENFS_CONFIG_UNLOAD) {
+ enfs_config_load();
+ g_enfsconfigLoad = ENFS_CONFIG_LOAD;
+ }
+ /* Native links and multipath links */
+ if (link_count >= enfs_get_config_link_count_total() - 1 ||
+ mount_count >= ENFS_MAX_MOUNT_COUNT) {
+ enfs_log_error(
+ "link count:%d fs count:%d count2:%d exceeds the limit,can not create new "
+ "multipath nfs.\n",
+ link_count, mount_count, enfs_get_config_link_count_total());
+ return -EINVAL;
+ }
+ if ((str == NULL) || (enfs_option == NULL) || (net_ns == NULL))
+ return -EINVAL;
+
+ if (*enfs_option == NULL) {
+ rc = nfs_multipath_alloc_options(enfs_option);
+ if (rc != 0) {
+ enfs_log_error("alloc enfs_options failed! errno:%d\n", rc);
+ return rc;
+ }
+ }
+ options = *enfs_option;
+
+ if (type == LOCALADDR) {
+ rc = nfs_multipath_parse_ip_list(str, net_ns, options, type);
+ } else if (type == REMOTEADDR) {
+ /* alloc and release need to modify */
+ rc = enfs_parse_remoteaddrs(str, net_ns, options);
+ } else {
+ rc = -EOPNOTSUPP;
+ }
+
+ // 解析结束之后 需要判断local 和 remote的地址是否有一样的 如果有就不合法
+ if (rc == 0)
+ rc = nfs_multipath_parse_options_check_duplicate(options);
+
+ if (rc == 0)
+ rc = nfs_multipath_parse_options_check(options);
+
+ return rc;
+}
+
+void nfs_multipath_free_options(void **enfs_option)
+{
+ struct multipath_mount_options *options;
+
+ if (enfs_option == NULL || *enfs_option == NULL)
+ return;
+
+ options = (struct multipath_mount_options *)*enfs_option;
+
+ if (options->remote_ip_list != NULL) {
+ kfree(options->remote_ip_list);
+ options->remote_ip_list = NULL;
+ }
+
+ if (options->local_ip_list != NULL) {
+ kfree(options->local_ip_list);
+ options->local_ip_list = NULL;
+ }
+
+ if (options->pRemoteDnsInfo != NULL) {
+ kfree(options->pRemoteDnsInfo);
+ options->pRemoteDnsInfo = NULL;
+ }
+
+ kfree(options);
+ *enfs_option = NULL;
+}
+
+static bool is_valid_ip_address(const char *ip_str)
+{
+ struct in_addr addr4;
+ struct in6_addr addr6;
+
+ if (in4_pton(ip_str, -1, (u8 *)&addr4, '\0', NULL) == 1) {
+ return true;
+ }
+
+ if (in6_pton(ip_str, -1, (u8 *)&addr6, '\0', NULL) == 1) {
+ return true;
+ }
+
+ return false;
+}
+
+void enfs_set_mount_data(void **enfs_option, const char *hostname)
+{
+ int error;
+ struct multipath_mount_options *opt;
+
+ if (!enfs_get_config_dns_auto_multipath_resolution() ||
+ !enfs_valid_dns(hostname) || is_valid_ip_address(hostname) ||
+ *enfs_option) {
+ return;
+ }
+
+ error = nfs_multipath_alloc_options(enfs_option);
+ if (error) {
+ enfs_log_error("alloca option err:%d\n", error);
+ return;
+ }
+ opt = *enfs_option;
+ opt->pRemoteDnsInfo->dnsNameCount = 1;
+ strcpy(opt->pRemoteDnsInfo->routeRemoteDnsList[0].dnsname, hostname);
+}
diff --git a/fs/nfs/enfs/enfsrpc/enfs_rpc_init.c b/fs/nfs/enfs/enfsrpc/enfs_rpc_init.c
new file mode 100644
index 0000000..c2be158
--- /dev/null
+++ b/fs/nfs/enfs/enfsrpc/enfs_rpc_init.c
@@ -0,0 +1,9 @@
+#include "enfs_lookup_cache.h"
+
+int enfs_rpc_init(void)
+{
+ int ret = 0;
+ ret = enfs_lookupcache_init();
+
+ return ret;
+}
\ No newline at end of file
diff --git a/fs/nfs/enfs/enfsrpc/enfs_rpc_init.h b/fs/nfs/enfs/enfsrpc/enfs_rpc_init.h
new file mode 100644
index 0000000..ee43b54
--- /dev/null
+++ b/fs/nfs/enfs/enfsrpc/enfs_rpc_init.h
@@ -0,0 +1,13 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Client-side ENFS adapt header.
+ *
+ * Copyright (c) 2023. Huawei Technologies Co., Ltd. All rights reserved.
+ */
+
+#ifndef _ENFS_RPC_INIT_H_
+#define _ENFS_RPC_INIT_H_
+
+int enfs_rpc_init(void);
+
+#endif
\ No newline at end of file
diff --git a/fs/nfs/enfs/enfsrpc/enfs_rpc_proc.c b/fs/nfs/enfs/enfsrpc/enfs_rpc_proc.c
new file mode 100644
index 0000000..51219a9
--- /dev/null
+++ b/fs/nfs/enfs/enfsrpc/enfs_rpc_proc.c
@@ -0,0 +1,40 @@
+#include
+#include
+#include "enfs_rpc_proc.h"
+#include "enfs_lookup_cache.h"
+#include "enfs_log.h"
+#include "enfs.h"
+
+struct rpc_procinfo enfs_procedures[ENFSPROC_MAX] = {{0}};
+static ktime_t start_enfs_rpc_send = {0};
+static bool start_enfs_rpc_send_init = false;
+
+int enfs_rpc_send(struct rpc_clnt *clnt, unsigned int opcode, EnfsGetConfigArgs *args, EnfsGetConfigRes *res)
+{
+ int status;
+ int32_t interval_ms = 5 * 60 * 1000; /* */
+
+ struct rpc_message msg = {
+ .rpc_proc = &enfs_procedures[opcode],
+ .rpc_argp = args,
+ .rpc_resp = res,
+ };
+
+ status = rpc_call_sync(clnt, &msg, RPC_TASK_SOFT | RPC_TASK_TIMEOUT | RPC_TASK_SOFTCONN | RPC_TASK_ENFS);
+ /* every 5 minutes prints the error log */
+ if (status) {
+ if (!start_enfs_rpc_send_init) {
+ start_enfs_rpc_send_init = true;
+ start_enfs_rpc_send = ktime_get();
+ } else if (enfs_timeout_ms(&start_enfs_rpc_send, interval_ms)) {
+ enfs_log_error("NFS reply failed, status:%d \n", status);
+ start_enfs_rpc_send = ktime_get();
+ }
+ }
+ return status;
+}
+
+void enfs_proc_reg(unsigned int opcode, const struct rpc_procinfo *rpc_proc)
+{
+ enfs_procedures[opcode] = *rpc_proc;
+}
\ No newline at end of file
diff --git a/fs/nfs/enfs/enfsrpc/enfs_rpc_proc.h b/fs/nfs/enfs/enfsrpc/enfs_rpc_proc.h
new file mode 100644
index 0000000..b62ef6f
--- /dev/null
+++ b/fs/nfs/enfs/enfsrpc/enfs_rpc_proc.h
@@ -0,0 +1,20 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Client-side ENFS adapt header.
+ *
+ * Copyright (c) 2023. Huawei Technologies Co., Ltd. All rights reserved.
+ */
+
+#ifndef _ENFS_RPC_PROC_H_
+#define _ENFS_RPC_PROC_H_
+
+#include "enfs_lookup_cache.h"
+
+#define ENFS_RPC_PROG_NUM 733301
+#define ENFS_RPC_PROG_VERSION 1
+#define ENFSPROC_MAX 2
+#define ENFSPROC_LOOKUPCACHE 0
+
+int enfs_rpc_send(struct rpc_clnt *clnt, unsigned int opcode, EnfsGetConfigArgs *args, EnfsGetConfigRes *res);
+void enfs_proc_reg(unsigned int opcode, const struct rpc_procinfo *rpc_proc);
+#endif
\ No newline at end of file
diff --git a/fs/nfs/enfs/enfsrpc/lookupcache/enfs_lookup_cache.c b/fs/nfs/enfs/enfsrpc/lookupcache/enfs_lookup_cache.c
new file mode 100644
index 0000000..3d0d4e1
--- /dev/null
+++ b/fs/nfs/enfs/enfsrpc/lookupcache/enfs_lookup_cache.c
@@ -0,0 +1,517 @@
+#include
+#include
+#include
+#include
+#include
+
+#include "enfs_rpc_proc.h"
+#include "enfs_lookup_cache.h"
+#include "enfs_multipath_client.h"
+#include "enfs_log.h"
+#include "enfs_config.h"
+#include "netns.h"
+#include "enfs.h"
+
+#define ENFS_LOOKUPCACHE_ACTIVE (1<<30) /* Indicates that the file system is currently active*/
+
+#if (defined(ENFS_EULER_5_10) || defined(ENFS_OPENEULER_660))
+#define ENFS_LOOKUP_ACTIVE SB_ACTIVE /* Indicates that the file system is currently active*/
+#else
+#define ENFS_LOOKUP_ACTIVE MS_ACTIVE /* Indicates that the file system is currently active*/
+#endif
+
+static struct task_struct *lookupcache_thread;
+static struct workqueue_struct *lookupcache_workq = NULL;
+static spinlock_t lookupcache_workq_lock;
+static spinlock_t g_lookupcache_switch_lock;
+static int g_lookupcache_switch = ENFS_LOOKUPCACHE_ENABLE;
+static ktime_t start_query_lookup = {0};
+static bool start_query_lookup_init = false;
+
+const struct rpc_procinfo enfs_lookup_cahce = {
+ PROC(LOOKUPCACHE, lookupcache, lookupcache, 1)
+};
+
+static void encode_uint32(struct xdr_stream *xdr, u32 value)
+{
+ __be32 *p = xdr_reserve_space(xdr, 4);
+ *p = cpu_to_be32(value);
+}
+
+static void encode_nfs_fh(struct xdr_stream *xdr, const struct nfs_fh *fh)
+{
+ __be32 *p;
+
+ WARN_ON_ONCE(fh->size > ENFS_fhandle_sz);
+ p = xdr_reserve_space(xdr, 4 + fh->size);
+ xdr_encode_opaque(p, fh->data, fh->size);
+}
+
+static int decode_uint32(struct xdr_stream *xdr, u32 *value)
+{
+ __be32 *p;
+
+ p = xdr_inline_decode(xdr, 4);
+ if (unlikely(p == NULL))
+ goto out_overflow;
+ *value = be32_to_cpup(p);
+ return 0;
+out_overflow:
+ return -EIO;
+}
+
+void enfs_xdr_enc_lookupcacheargs(struct rpc_rqst *rqstp, struct xdr_stream *xdr,
+ const void *data)
+{
+ const EnfsGetConfigArgs *args = data;
+ encode_uint32(xdr, args->version);
+ encode_uint32(xdr, args->mask);
+ encode_uint32(xdr, args->reserve);
+ encode_nfs_fh(xdr, &args->fh);
+ encode_uint32(xdr, args->vers);
+}
+
+int enfs_xdr_dec_lookupcacheres(struct rpc_rqst *req,
+ struct xdr_stream *xdr,
+ void *data)
+{
+ int error;
+ EnfsGetConfigRes *enfsRes = data;
+ error = decode_uint32(xdr, &enfsRes->version);
+ if (unlikely(error))
+ goto out;
+ error = decode_uint32(xdr, &enfsRes->mask);
+ if (unlikely(error))
+ goto out;
+ error = decode_uint32(xdr, &enfsRes->lookupCache);
+ if (unlikely(error))
+ goto out;
+ error = decode_uint32(xdr, &enfsRes->reserve);
+ if (unlikely(error))
+ goto out;
+ error = decode_uint32(xdr, &enfsRes->status);
+ if (unlikely(error))
+ goto out;
+out:
+ return error;
+}
+
+void enfs_clean_server_lookup_cache_flag(void)
+{
+ struct nfs_net *nn = net_generic(current->nsproxy->net_ns, nfs_net_id);
+ struct nfs_server *pos;
+ spin_lock(&nn->nfs_client_lock);
+ list_for_each_entry(pos, &nn->nfs_volume_list, master_link) {
+ pos->enfs_flags &= ~(ENFS_SERVER_FLAG_LOOKUP_CACHE_NOREG |
+ ENFS_SERVER_FLAG_LOOKUP_CACHE_NONE | ENFS_SERVER_FLAG_GET_CAP_RUNNING);
+ }
+ spin_unlock(&nn->nfs_client_lock);
+}
+
+void enfs_update_lookup_cache_flag_to_server(u32 result, struct nfs_server *server)
+{
+ spin_lock(&g_lookupcache_switch_lock);
+ /* If not use multipath mount option, don't set enfs flag.
+ Here need use spinlock to make sure that this process is mutually exclusive with the cleanup process
+ when change the switch to disable.
+ */
+ if (g_lookupcache_switch == ENFS_LOOKUPCACHE_ENABLE &&
+ server->nfs_client && server->nfs_client->cl_multipath_data) {
+ switch (result) {
+ case ENFS_LOOKUPCACHE_ALL:
+ server->enfs_flags &= ~(ENFS_SERVER_FLAG_LOOKUP_CACHE_NOREG | ENFS_SERVER_FLAG_LOOKUP_CACHE_NONE);
+ break;
+ case ENFS_LOOKUPCACHE_NONEG:
+ server->enfs_flags &= ~ENFS_SERVER_FLAG_LOOKUP_CACHE_NONE;
+ server->enfs_flags |= ENFS_SERVER_FLAG_LOOKUP_CACHE_NOREG;
+ break;
+ case ENFS_LOOKUPCACHE_NONE:
+ server->enfs_flags |= (ENFS_SERVER_FLAG_LOOKUP_CACHE_NOREG | ENFS_SERVER_FLAG_LOOKUP_CACHE_NONE);
+ break;
+ default:
+ enfs_log_info("Get invalid lookupCache:%u.\n", result);
+ }
+ }
+ spin_unlock(&g_lookupcache_switch_lock);
+}
+
+int enfs_query_lookup_cache(struct lookupcache_work *work_info)
+{
+ int ret;
+ EnfsGetConfigRes enfsRes = {0};
+ EnfsGetConfigArgs args = {0};
+ struct nfs_server *pos = NULL;
+ struct net *net;
+ struct nfs_net *nn = NULL;
+ struct inode *inode = NULL;
+ struct nfs_fh *fh = NULL;
+ struct dentry *s_root;
+ struct super_block *super;
+ int32_t interval_ms = 5 * 60 * 1000;
+ struct enfs_xprt_context *ctx = NULL;
+
+ args.mask = BIT(ENFS_LOOKUP_CACHE_LEVEL);
+ args.fh = work_info->fh;
+ args.version = ENFS_VERSION_BUTT - 1;
+ args.vers = work_info->cl_rpcclient->cl_vers;
+
+ ret = enfs_rpc_send(work_info->cl_rpcclient, ENFSPROC_LOOKUPCACHE, &args, &enfsRes);
+
+ /* every 5 minutes prints the error log */
+ if (ret) {
+ if (!start_query_lookup_init) {
+ start_query_lookup_init = true;
+ start_query_lookup = ktime_get();
+ } else if (enfs_timeout_ms(&start_query_lookup, interval_ms)) {
+ printk(KERN_ERR "ENFS get lookupcache failed %d.\n", ret);
+ start_query_lookup = ktime_get();
+ }
+ return ret;
+ }
+
+ rcu_read_lock();
+ for_each_net_rcu (net) {
+ nn = net_generic(net, nfs_net_id);
+ if (nn == NULL) {
+ continue;
+ }
+
+ spin_lock(&nn->nfs_client_lock);
+ list_for_each_entry(pos, &nn->nfs_volume_list, master_link) {
+ if (pos != work_info->server) {
+ continue;
+ }
+
+ if (!pos->super || !(pos->super->s_flags & ENFS_LOOKUP_ACTIVE) || !pos->super->s_root) {
+ break;
+ }
+
+ super = pos->super;
+ if (!super) {
+ break;
+ }
+ s_root = super->s_root;
+ if (!s_root) {
+ break;
+ }
+ if (atomic_read(&super->s_active) == 0) {
+ break;
+ }
+
+ inode = d_inode(s_root);
+ if (!inode) {
+ break;
+ }
+ fh = NFS_FH(inode);
+ if (memcmp(fh, &work_info->fh, sizeof(struct nfs_fh)) == 0) {
+ if (!ret) {
+ enfs_update_lookup_cache_flag_to_server(enfsRes.lookupCache, pos);
+ }
+ pos->enfs_flags &= ~ENFS_SERVER_FLAG_GET_CAP_RUNNING;
+ xprt_get(pos->client->cl_xprt);
+ ctx = (struct enfs_xprt_context *)xprt_get_reserve_context(pos->client->cl_xprt);
+ if (ctx == NULL) {
+ enfs_log_error("The xprt multipath ctx is not valid.\n");
+ xprt_put(pos->client->cl_xprt);
+ spin_unlock(&nn->nfs_client_lock);
+ return ret;
+ }
+ if (enfsRes.version == ENFS_SERVER_VERSION_BASE) {
+ ctx->version = ENFS_SERVER_VERSION_BASE;
+ } else {
+ ctx->version = 0;
+ }
+ xprt_put(pos->client->cl_xprt);
+ }
+ }
+ spin_unlock(&nn->nfs_client_lock);
+ break;
+ }
+ rcu_read_unlock();
+
+ return ret;
+}
+
+bool enfs_query_lookup_cache_pre_check(struct nfs_server *server)
+{
+ struct multipath_client_info *client_info = NULL;
+
+ if (server->enfs_flags & ENFS_SERVER_FLAG_GET_CAP_RUNNING) {
+ return false;
+ }
+
+ if (server->nfs_client) {
+ client_info = server->nfs_client->cl_multipath_data;
+ }
+
+ if (client_info && server->super && (server->super->s_flags & ENFS_LOOKUP_ACTIVE) && server->super->s_root) {
+ return true;
+ }
+ return false;
+}
+
+void lookupcache_execute_work(struct work_struct * work)
+{
+ int ret = 0;
+
+ // get the work information
+ struct lookupcache_work * work_info = container_of(work, struct lookupcache_work, work_lookup);
+
+ if (!work_info->server) {
+ enfs_log_error("work_info->nfs_client null .\n");
+ goto stop;
+ }
+ ret = enfs_query_lookup_cache(work_info);
+ if (ret) {
+ enfs_log_error("lookupcache execute failed ,ret %d", ret);
+ goto stop;
+ }
+stop:
+ rpc_release_client(work_info->cl_rpcclient);
+ kfree(work_info);
+ work_info = NULL;
+ return;
+}
+
+bool lookupcache_workqueue_queue_work(struct work_struct *work)
+{
+ bool ret = false;
+ spin_lock(&lookupcache_workq_lock);
+
+ if (lookupcache_workq != NULL) {
+ ret = queue_work(lookupcache_workq, work);
+ }
+ spin_unlock(&lookupcache_workq_lock);
+ return ret;
+}
+
+int lookupcache_add_work(struct nfs_fh *fh, struct nfs_server *server, struct list_head *head)
+{
+ struct lookupcache_work * work_info;
+ struct rpcclnt_release_item *item;
+ bool ret = false;
+
+ if (IS_ERR(fh) || fh == NULL) {
+ enfs_log_error("The fh ptr is not exist.\n");
+ return -EINVAL;
+ }
+
+ if (IS_ERR(server) || server == NULL) {
+ enfs_log_error("The clnt ptr is not exist.\n");
+ return -EINVAL;
+ }
+
+ work_info = kzalloc(sizeof(struct lookupcache_work), GFP_ATOMIC);
+ if (work_info == NULL) {
+ return -ENOMEM;
+ }
+
+ item = kzalloc(sizeof(*item), GFP_KERNEL);
+ if (!item) {
+ enfs_log_error("alloc item failed.\n");
+ kfree(work_info);
+ return -ENOMEM;
+ }
+
+#ifdef ENFS_OPENEULER_660
+ if (!refcount_inc_not_zero(&server->nfs_client->cl_rpcclient->cl_count)) {
+#else
+ if (!atomic_inc_not_zero(&server->nfs_client->cl_rpcclient->cl_count)) {
+#endif
+ kfree(work_info);
+ kfree(item);
+ }
+ work_info->fh = *fh;
+ work_info->server = server;
+ work_info->cl_rpcclient = server->nfs_client->cl_rpcclient;
+
+ INIT_WORK(&work_info->work_lookup, lookupcache_execute_work);
+
+ ret = lookupcache_workqueue_queue_work(&work_info->work_lookup);
+ if (!ret) {
+ item->clnt = work_info->cl_rpcclient;
+ list_add_tail(&item->node, head);
+ kfree(work_info);
+ work_info = NULL;
+ return -EINVAL;
+ }
+
+ kfree(item);
+ server->enfs_flags |= ENFS_SERVER_FLAG_GET_CAP_RUNNING;
+ return 0;
+}
+
+void enfs_trigger_get_capability(struct nfs_server *server)
+{
+ struct nfs_fh *fh = NULL;
+ LIST_HEAD(free_list);
+ if (enfs_query_lookup_cache_pre_check(server)) {
+ struct inode *inode = d_inode(server->super->s_root);
+ if (!inode) {
+ return;
+ }
+ fh = NFS_FH(inode);
+ lookupcache_add_work(fh, server, &free_list);
+ }
+ enfs_destroy_rpcclnt_list(&free_list);
+}
+
+void lookupcache_loop_rpclnt(void)
+{
+ struct net *net;
+ struct nfs_net *nn;
+ struct nfs_server *pos;
+ struct nfs_fh *fh = NULL;
+ struct dentry *s_root;
+ struct super_block *super;
+ struct inode *inode = NULL;
+ LIST_HEAD(free_list);
+ rcu_read_lock();
+ for_each_net_rcu (net) {
+ nn = net_generic(net, nfs_net_id);
+ if (nn == NULL) {
+ continue;
+ }
+
+ if (list_empty(&nn->nfs_volume_list)) {
+ continue;
+ }
+ spin_lock(&nn->nfs_client_lock);
+ list_for_each_entry(pos, &nn->nfs_volume_list, master_link) {
+ if (enfs_query_lookup_cache_pre_check(pos)) {
+ super = pos->super;
+ if (!super) {
+ continue;
+ }
+ s_root = super->s_root;
+ if (!s_root) {
+ continue;
+ }
+ if (atomic_read(&super->s_active) == 0) {
+ continue;
+ }
+ inode = d_inode(s_root);
+ if (!inode) {
+ continue;
+ }
+ fh = NFS_FH(inode);
+ lookupcache_add_work(fh, pos, &free_list);
+ }
+ }
+ spin_unlock(&nn->nfs_client_lock);
+ break;
+ }
+ rcu_read_unlock();
+ enfs_destroy_rpcclnt_list(&free_list);
+}
+
+void enfs_lookupcache_update_switch(void)
+{
+ int old_value = g_lookupcache_switch;
+ int new_value = enfs_get_config_lookupcache_state();
+
+ spin_lock(&g_lookupcache_switch_lock);
+ if (old_value != new_value) {
+ g_lookupcache_switch = new_value;
+ }
+ spin_unlock(&g_lookupcache_switch_lock);
+
+ if ((old_value != new_value) && (new_value == ENFS_LOOKUPCACHE_DISABLE)) {
+ enfs_clean_server_lookup_cache_flag();
+ }
+}
+
+int lookupcache_routine(void *data)
+{
+ ktime_t start = ktime_get();
+ int32_t interval_ms;
+ while (!kthread_should_stop()) {
+ enfs_lookupcache_update_switch();
+ interval_ms = enfs_get_config_lookupcache_interval() * 1000;
+ if ((g_lookupcache_switch == ENFS_LOOKUPCACHE_ENABLE) &&
+ enfs_timeout_ms(&start, interval_ms) &&
+ (enfs_get_config_multipath_state() == ENFS_MULTIPATH_ENABLE)) {
+ start = ktime_get();
+ lookupcache_loop_rpclnt();
+ }
+ enfs_msleep(1000);
+ }
+ return 0;
+}
+
+int lookupcache_start(void)
+{
+ lookupcache_thread = kthread_run(lookupcache_routine, NULL, "enfs_lookupcache");
+ if (IS_ERR(lookupcache_thread)) {
+ enfs_log_error("Failed to create thread lookupcache get.\n");
+ return PTR_ERR(lookupcache_thread);
+ }
+ return 0;
+}
+
+int enfs_lookupcache_workqueue_init(void)
+{
+ struct workqueue_struct *queue = NULL;
+
+ queue = create_workqueue("enfs_lookupcache_workqueue");
+ if (queue == NULL) {
+ enfs_log_error("create enfs_lookupcache workqueue failed.\n");
+ return -ENOMEM;
+ }
+
+ spin_lock(&lookupcache_workq_lock);
+ lookupcache_workq = queue;
+ spin_unlock(&lookupcache_workq_lock);
+ return 0;
+}
+
+void lookupcache_workqueue_fini(void)
+{
+ struct workqueue_struct *queue = NULL;
+
+ spin_lock(&lookupcache_workq_lock);
+ queue = lookupcache_workq;
+ lookupcache_workq = NULL;
+ spin_unlock(&lookupcache_workq_lock);
+
+ enfs_log_debug("delete work queue\n");
+
+ if (queue != NULL) {
+ flush_workqueue(queue);
+ destroy_workqueue(queue);
+ }
+}
+
+int enfs_lookupcache_timer_init(void)
+{
+ int ret;
+ ret = enfs_lookupcache_workqueue_init();
+ if (ret != 0) {
+ enfs_log_error("enfs_lookupcache_timer_init workqueue init failed.\n");
+ return ret;
+ }
+ ret = lookupcache_start();
+ if (ret != 0) {
+ enfs_log_error("enfs_lookupcache_timer_init work start failed.\n");
+ lookupcache_workqueue_fini();
+ return ret;
+ }
+
+ return ret;
+}
+
+void enfs_lookupcache_fini(void)
+{
+ if (lookupcache_thread) {
+ kthread_stop(lookupcache_thread);
+ }
+
+ lookupcache_workqueue_fini();
+}
+
+int enfs_lookupcache_init()
+{
+ spin_lock_init(&g_lookupcache_switch_lock);
+ enfs_proc_reg(ENFSPROC_LOOKUPCACHE, &enfs_lookup_cahce);
+ return enfs_lookupcache_timer_init();
+}
\ No newline at end of file
diff --git a/fs/nfs/enfs/include/dns_internal.h b/fs/nfs/enfs/include/dns_internal.h
new file mode 100644
index 0000000..fd7a72a
--- /dev/null
+++ b/fs/nfs/enfs/include/dns_internal.h
@@ -0,0 +1,61 @@
+/*
+ * Copy from net/dns_resolver, please see the original file.
+ */
+#ifndef _DNS_INTERNAL_H_
+#define _DNS_INTERNAL_H_
+
+#include
+#include
+#include
+
+#include "enfs_log.h"
+
+struct multipath_mount_options;
+
+/*
+ * Layout of key payload words.
+ */
+enum {
+ enfs_dns_key_data,
+ enfs_dns_key_error,
+};
+
+/*
+ * dns_key.c
+ */
+extern const struct cred *enfs_dns_resolver_cache;
+
+/*
+ * debug tracing
+ */
+extern unsigned int enfs_dns_resolver_debug;
+
+#define kdebug(FMT, ...) \
+ do { \
+ if (unlikely(enfs_dns_resolver_debug)) \
+ printk(KERN_DEBUG "[%-6.6s] " FMT "\n", current->comm, ##__VA_ARGS__); \
+ } while (0)
+
+#define kenter(FMT, ...) kdebug("==> %s(" FMT ")", __func__, ##__VA_ARGS__)
+#define kleave(FMT, ...) kdebug("<== %s()" FMT "", __func__, ##__VA_ARGS__)
+
+#if (defined(ENFS_EULER_5_10) || defined(ENFS_OPENEULER_660))
+ int enfs_euler_dns_query(struct net *net,
+ const char *type, const char *name, size_t namelen,
+ const char *options, char **_result, time64_t *_expiry,
+ bool invalidate);
+#else
+ int enfs_dns_query(const char *type, const char *name, size_t namelen,
+ const char *options, char **_result, time64_t *_expiry);
+#endif
+
+int init_dns_resolver(void);
+void exit_dns_resolver(void);
+
+void enfs_add_domain_name(struct multipath_mount_options *opt);
+void enfs_debug_print_name_list(void);
+
+int enfs_dns_init(void);
+void enfs_dns_exit(void);
+
+#endif // _DNS_INTERNAL_H_
diff --git a/fs/nfs/enfs/include/enfs.h b/fs/nfs/enfs/include/enfs.h
new file mode 100644
index 0000000..2ff736d
--- /dev/null
+++ b/fs/nfs/enfs/include/enfs.h
@@ -0,0 +1,139 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Client-side ENFS multipath adapt header.
+ *
+ * Copyright (c) 2023. Huawei Technologies Co., Ltd. All rights reserved.
+ */
+
+#ifndef _ENFS_H_
+#define _ENFS_H_
+#include
+#include
+#include
+#include
+#include
+#include
+#include "enfs_adapter.h"
+
+#define IP_ADDRESS_LEN_MAX 64
+#define MAX_IP_PAIR_PER_MOUNT 8
+#define MAX_IP_INDEX (MAX_IP_PAIR_PER_MOUNT)
+#define MAX_SUPPORTED_LOCAL_IP_COUNT 8
+#define MAX_SUPPORTED_REMOTE_IP_COUNT 1024
+#define DEFAULT_SUPPORTED_REMOTE_IP_COUNT 32
+#define MIN_SUPPORTED_REMOTE_IP_COUNT 2
+
+#define MAX_DNS_NAME_LEN 512
+#define MAX_DNS_SUPPORTED 2
+
+#define ENFS_MAX_LINK_COUNT 16384
+#define DEFAULT_ENFS_MAX_LINK_COUNT 512
+#define MIN_ENFS_MAX_LINK_COUNT 512
+
+#define ENFS_MAX_MOUNT_COUNT 256
+#define EXTEND_MAX_DNS_NAME_LEN 256
+
+
+struct nfs_ip_list {
+ int count;
+ struct sockaddr_storage address[MAX_SUPPORTED_REMOTE_IP_COUNT];
+ size_t addrlen[MAX_SUPPORTED_REMOTE_IP_COUNT];
+};
+
+typedef struct NFS_ROUTE_DNS_T {
+ char dnsname[MAX_DNS_NAME_LEN]; // valid only if dnsExist is true
+} NFS_ROUTE_DNS_S;
+
+typedef struct NFS_ROUTE_DNS_INFO_T {
+ int dnsNameCount; /* Count of DNS name in the list */
+ NFS_ROUTE_DNS_S routeRemoteDnsList[MAX_DNS_SUPPORTED]; // valid only if dnsExist is true
+} NFS_ROUTE_DNS_INFO_S;
+
+// TODO:寻找合适的头文件放
+struct rpc_iostats;
+struct enfs_xprt_context {
+ int version;
+ struct sockaddr_storage srcaddr;
+ struct rpc_iostats *stats;
+ bool main;
+ atomic_t path_state;
+ atomic_t path_check_state;
+ atomic_long_t queuelen;
+ uint64_t lsid;
+ uint64_t wwn;
+ uint32_t cpuId;
+ u32 protocol; // TCP or UDP or RDMA
+ int64_t lastTime;
+ u32 reverse[4];
+};
+
+static inline bool enfs_is_main_xprt(struct rpc_xprt *xprt)
+{
+ struct enfs_xprt_context *ctx = xprt_get_reserve_context(xprt);
+ if (!ctx) {
+ return false;
+ }
+ return ctx->main;
+}
+
+static inline bool enfs_timeout_ms(ktime_t *start, int ms)
+{
+ ktime_t stop = ktime_get();
+
+ if (ktime_to_ms(ktime_sub(stop, *start)) > ms) {
+ return true;
+ }
+ return false;
+}
+
+static inline void enfs_msleep(long ms)
+{
+ long sleep_time;
+ long schedule_timeo;
+
+ sleep_time = (long)((ms * HZ) / 1000);
+
+ set_current_state(TASK_INTERRUPTIBLE);
+ if (sleep_time <= 0) {
+ schedule_timeo = schedule_timeout(1);
+ } else {
+ schedule_timeo = schedule_timeout(sleep_time);
+ }
+
+ while (schedule_timeo > 0) {
+ schedule_timeo = schedule_timeout(schedule_timeo);
+ if (schedule_timeo > sleep_time) {
+ return;
+ }
+ }
+ return;
+}
+
+bool enfs_insert_ip_list(struct nfs_ip_list *ip_list, int max,
+ struct sockaddr_storage *addr);
+bool enfs_ip_list_contain(struct nfs_ip_list *ip_list,
+ struct sockaddr_storage *addr);
+
+
+bool enfs_link_count_add(int num);
+int enfs_link_count_num(void);
+void enfs_clnt_get_linkcap(struct rpc_clnt *clnt);
+void enfs_clnt_release_linkcap(struct rpc_clnt *clnt);
+bool enfs_mount_count_add(int num);
+int enfs_mount_count(void);
+
+struct rpcclnt_release_item {
+ struct list_head node;
+ struct rpc_clnt *clnt;
+};
+
+struct clnt_release_item {
+ struct list_head node;
+ struct nfs_client *client;
+ struct rpc_clnt *clnt;
+};
+
+void enfs_destroy_clnt_list(struct list_head *head);
+void enfs_destroy_rpcclnt_list(struct list_head *head);
+
+#endif
\ No newline at end of file
diff --git a/fs/nfs/enfs/include/enfs_config.h b/fs/nfs/enfs/include/enfs_config.h
new file mode 100644
index 0000000..9b39dd6
--- /dev/null
+++ b/fs/nfs/enfs/include/enfs_config.h
@@ -0,0 +1,62 @@
+/*
+ * Copyright (c) Huawei Technologies Co., Ltd. 2023-2023. All rights reserved.
+ * Description: nfs configuration
+ * Author: y00583252
+ * Create: 2023-07-27
+ */
+
+#ifndef ENFS_CONFIG_H
+#define ENFS_CONFIG_H
+
+#include
+
+#define ENFS_VERSION 4 // client version is 4, server version is 10002
+#define ENFS_SERVER_VERSION_BASE 10001 // 24A server version is 10001
+
+#define ENFS_PM_PING_TMIE_OUT 3
+
+typedef enum {
+ ENFS_MULTIPATH_ENABLE = 0,
+ ENFS_MULTIPATH_DISABLE = 1,
+} enfs_multipath_state;
+
+typedef enum {
+ ENFS_LOADBALANCE_RR,
+ ENFS_LOADBALANCE_SHARDVIEW,
+} enfs_loadbalance_mode;
+
+typedef enum {
+ ENFS_LOOKUPCACHE_DISABLE = 0,
+ ENFS_LOOKUPCACHE_ENABLE = 1,
+} enfs_lookupcache_state;
+
+typedef enum {
+ ENFS_V0 = 0,
+ ENFS_V1,
+ ENFS_V2,
+ ENFS_V3,
+ ENFS_V4,
+ ENFS_VERSION_BUTT
+} ENFS_CLIENT_VERSION;
+
+
+int32_t enfs_get_config_path_detect_interval(void);
+int32_t enfs_get_config_path_detect_timeout(void);
+int32_t enfs_get_config_multipath_timeout(void);
+int32_t enfs_get_config_multipath_state(void);
+int32_t enfs_get_config_loadbalance_mode(void);
+int32_t enfs_get_config_dns_update_interval(void);
+int32_t enfs_get_config_dns_auto_multipath_resolution(void);
+int32_t enfs_get_config_shardview_update_interval(void);
+int32_t enfs_get_config_lookupcache_interval(void);
+int32_t enfs_get_config_lookupcache_state(void);
+int32_t enfs_get_config_link_count_per_mount(void);
+int32_t enfs_get_config_link_count_total(void);
+int32_t enfs_get_native_link_io_status(void);
+bool enfs_check_config_wwn(uint64_t wwn);
+bool enfs_whitelist_filte(char *ip_addr);
+int32_t enfs_config_load(void);
+int32_t enfs_config_timer_init(void);
+void enfs_config_timer_exit(void);
+int GetEnfsConfigIpFiltersCount(void);
+#endif // ENFS_CONFIG_H
diff --git a/fs/nfs/enfs/include/enfs_errcode.h b/fs/nfs/enfs/include/enfs_errcode.h
new file mode 100644
index 0000000..99493ba
--- /dev/null
+++ b/fs/nfs/enfs/include/enfs_errcode.h
@@ -0,0 +1,16 @@
+/*
+ * Copyright (c) Huawei Technologies Co., Ltd. 2023-2023. All rights reserved.
+ * Description: nfs configuration
+ * Author: y00583252
+ * Create: 2023-07-31
+ */
+
+#ifndef ENFS_ERRCODE_H
+#define ENFS_ERRCODE_H
+
+enum {
+ ENFS_RET_OK = 0,
+ ENFS_RET_FAIL
+};
+
+#endif // ENFS_ERRCODE_H
diff --git a/fs/nfs/enfs/include/enfs_log.h b/fs/nfs/enfs/include/enfs_log.h
new file mode 100644
index 0000000..799c8af
--- /dev/null
+++ b/fs/nfs/enfs/include/enfs_log.h
@@ -0,0 +1,25 @@
+/*
+ * Copyright (c) Huawei Technologies Co., Ltd. 2023-2023. All rights reserved.
+ * Description: enfs log
+ * Author: y00583252
+ * Create: 2023-07-31
+ */
+
+#ifndef ENFS_LOG_H
+#define ENFS_LOG_H
+
+#include
+
+extern unsigned int enfs_debug;
+
+#define enfs_log_info(fmt, ...) printk(KERN_INFO "enfs:[%s]" pr_fmt(fmt), __func__, ##__VA_ARGS__)
+#define enfs_log_error(fmt, ...) printk(KERN_ERR "enfs:[%s]" pr_fmt(fmt), __func__, ##__VA_ARGS__)
+#define enfs_log_debug(fmt, ...) \
+ do { \
+ if (enfs_debug != 0) { \
+ printk(KERN_INFO "enfs:[%s]" pr_fmt(fmt), __func__, \
+ ##__VA_ARGS__); \
+ } \
+ } while (0)
+
+#endif // ENFS_ERRCODE_H
\ No newline at end of file
diff --git a/fs/nfs/enfs/include/enfs_lookup_cache.h b/fs/nfs/enfs/include/enfs_lookup_cache.h
new file mode 100644
index 0000000..61e40b2
--- /dev/null
+++ b/fs/nfs/enfs/include/enfs_lookup_cache.h
@@ -0,0 +1,74 @@
+#ifndef _ENFS_LOOKUP_CACHE_H_
+#define _ENFS_LOOKUP_CACHE_H_
+
+#include
+#include
+#include
+#include
+
+#define FILE_UUID_BUFF_LEN 38
+#define MAX_EXPID_LEN 32
+#define MAX_EXPSTR_LEN (MAX_EXPID_LEN * 2 + 1)
+#define ENFS_LOOKUP_CACHE_LEVEL 0
+
+#define ENFS_fhandle_sz (sizeof(struct nfs_fh))
+#define ENFS_lookupcacheargs_sz (sizeof(EnfsGetConfigArgs))
+#define ENFS_lookupcacheres_sz (sizeof(EnfsGetConfigRes))
+
+typedef enum {
+ ENFS_LOOKUPCACHE_ALL = 0,
+ ENFS_LOOKUPCACHE_NONEG,
+ ENFS_LOOKUPCACHE_NONE
+} EnfsLookupcacheEnum;
+
+struct lookupcache_work {
+ struct nfs_fh fh;
+ void *server; /* struct nfs_server pointer, don't access the mem, because maybe already freed */
+ struct rpc_clnt *cl_rpcclient;
+ struct work_struct work_lookup;
+};
+
+typedef struct {
+ unsigned int version;
+ unsigned int mask;
+ unsigned int reserve;
+ struct nfs_fh fh;
+ unsigned int vers;
+}EnfsGetConfigArgs;
+
+typedef struct {
+ unsigned int version;
+ unsigned int mask;
+ unsigned int lookupCache;
+ unsigned int reserve;
+ unsigned int status;
+}EnfsGetConfigRes;
+
+struct nfs_enfs_s{
+ union {
+ EnfsGetConfigArgs args;
+ EnfsGetConfigRes res;
+ } enfs_u;
+};
+
+void enfs_xdr_enc_lookupcacheargs(struct rpc_rqst *rqstp, struct xdr_stream *xdr,
+ const void *data);
+int enfs_xdr_dec_lookupcacheres(struct rpc_rqst *req,
+ struct xdr_stream *xdr,
+ void *data);
+
+#define PROC(proc, argtype, restype, timer) \
+ .p_proc = ENFSPROC_##proc, \
+ .p_encode = (kxdreproc_t)enfs_xdr_enc_##argtype##args, \
+ .p_decode = (kxdrdproc_t)enfs_xdr_dec_##restype##res, \
+ .p_arglen = ENFS_##argtype##args_sz, \
+ .p_replen = ENFS_##restype##res_sz, \
+ .p_timer = timer, \
+ .p_statidx = ENFSPROC_##proc, \
+ .p_name = #proc, \
+
+int enfs_lookupcache_init(void);
+void enfs_lookupcache_fini(void);
+void enfs_trigger_get_capability(struct nfs_server *nfs_server);
+
+#endif
\ No newline at end of file
diff --git a/fs/nfs/enfs/include/enfs_multipath.h b/fs/nfs/enfs/include/enfs_multipath.h
new file mode 100644
index 0000000..a22bc99
--- /dev/null
+++ b/fs/nfs/enfs/include/enfs_multipath.h
@@ -0,0 +1,25 @@
+/*
+ * Copyright (c) Huawei Technologies Co., Ltd. 2023-2023. All rights reserved.
+ * Description: enfs multipath
+ * Author:
+ * Create: 2023-07-31
+ */
+
+#ifndef ENFS_MULTIPATH_H
+#define ENFS_MULTIPATH_H
+#include
+#include "enfs_multipath_parse.h"
+
+#define MAX_XPRT_NUM_PER_CLIENT 31
+
+int enfs_multipath_init(void);
+void enfs_multipath_exit(void);
+void enfs_xprt_ippair_create(struct xprt_create *xprtargs, struct rpc_clnt *clnt, void *data);
+int enfs_config_xprt_create_args(struct xprt_create *xprtargs, struct rpc_create_args *args,
+ char *servername, size_t length);
+void print_enfs_multipath_addr(struct sockaddr *local, struct sockaddr *remote);
+int multipath_query_dns(struct multipath_mount_options *opt,
+ unsigned short family, bool use_cache, struct rpc_clnt *clnt);
+void enfs_for_each_rpc_clnt(int (*fn)(struct rpc_clnt *clnt, void *data), void *data);
+
+#endif // ENFS_MULTIPATH_H
diff --git a/fs/nfs/enfs/include/enfs_multipath_client.h b/fs/nfs/enfs/include/enfs_multipath_client.h
new file mode 100644
index 0000000..429eceb
--- /dev/null
+++ b/fs/nfs/enfs/include/enfs_multipath_client.h
@@ -0,0 +1,29 @@
+#ifndef _ENFS_MULTIPATH_CLIENT_H_
+#define _ENFS_MULTIPATH_CLIENT_H_
+
+#include "enfs.h"
+
+struct multipath_client_info {
+ int version;
+ struct work_struct work;
+ struct nfs_ip_list *remote_ip_list;
+ struct nfs_ip_list *local_ip_list;
+ NFS_ROUTE_DNS_INFO_S *pRemoteDnsInfo;
+ //struct multipath_conn_pairs conn_pairs;
+ s64 client_id;
+ u32 fill_local : 1;
+ u32 updating_domain : 1;
+ u32 reverse[2];
+};
+
+int nfs_multipath_client_info_init(void **data, const struct nfs_client_initdata *cl_init);
+void nfs_multipath_client_info_free(void *data);
+int nfs_multipath_client_info_match(void *src, void *dst);
+int nfs4_multipath_client_info_match(void *src, void *dst);
+void nfs_multipath_client_info_show(struct seq_file *mount_option, void *data);
+int nfs_multipath_dns_list_info_match(const NFS_ROUTE_DNS_INFO_S *dns_src,
+ const NFS_ROUTE_DNS_INFO_S *dns_dst);
+int enfs_alloc_nfsclient_info(struct multipath_client_info **client_info);
+void enfs_free_nfsclient_info(struct multipath_client_info *client_info);
+
+#endif
\ No newline at end of file
diff --git a/fs/nfs/enfs/include/enfs_multipath_parse.h b/fs/nfs/enfs/include/enfs_multipath_parse.h
new file mode 100644
index 0000000..37bed23
--- /dev/null
+++ b/fs/nfs/enfs/include/enfs_multipath_parse.h
@@ -0,0 +1,21 @@
+#ifndef _ENFS_MULTIPATH_PARSE_H_
+#define _ENFS_MULTIPATH_PARSE_H_
+
+#include "enfs.h"
+
+struct multipath_mount_options {
+ int version;
+ struct nfs_ip_list *remote_ip_list;
+ struct nfs_ip_list *local_ip_list;
+ NFS_ROUTE_DNS_INFO_S *pRemoteDnsInfo;
+ u32 fill_local;
+ u32 reserve[2];
+};
+
+int nfs_multipath_parse_options(enum nfsmultipathoptions type, char *str,
+ void **enfs_option, struct net *net_ns);
+int nfs_multipath_alloc_options(void **enfs_option);
+void nfs_multipath_free_options(void **enfs_option);
+void enfs_set_mount_data(void **enfs_option, const char *hostname);
+
+#endif
\ No newline at end of file
diff --git a/fs/nfs/enfs/include/enfs_proc.h b/fs/nfs/enfs/include/enfs_proc.h
new file mode 100644
index 0000000..bd99694
--- /dev/null
+++ b/fs/nfs/enfs/include/enfs_proc.h
@@ -0,0 +1,16 @@
+#ifndef ENFS_PROC_H
+#define ENFS_PROC_H
+
+struct rpc_clnt;
+struct rpc_task;
+struct proc_dir_entry;
+
+int enfs_proc_init(void);
+void enfs_proc_exit(void);
+struct proc_dir_entry *enfs_get_proc_parent(void);
+int enfs_proc_create_clnt(struct rpc_clnt *clnt);
+void enfs_proc_delete_clnt(struct rpc_clnt *clnt);
+void enfs_count_iostat(struct rpc_task *task);
+void enfs_for_each_rpc_clnt(int (*fn)(struct rpc_clnt *clnt, void *data), void *data);
+
+#endif
\ No newline at end of file
diff --git a/fs/nfs/enfs/include/enfs_remount.h b/fs/nfs/enfs/include/enfs_remount.h
new file mode 100644
index 0000000..75b6bb2
--- /dev/null
+++ b/fs/nfs/enfs/include/enfs_remount.h
@@ -0,0 +1,15 @@
+/*
+ * Copyright (c) Huawei Technologies Co., Ltd. 2023-2023. All rights reserved.
+ * Description: remount ip header file
+ * Author: y00583252
+ * Create: 2023-08-12
+ */
+#ifndef _ENFS_REMOUNT_
+#define _ENFS_REMOUNT_
+#include
+#include "enfs.h"
+
+int enfs_remount(struct nfs_client *nfs_client, void *enfs_option);
+int enfs_remount_iplist(struct nfs_client *nfs_client, void *enfs_option);
+
+#endif
\ No newline at end of file
diff --git a/fs/nfs/enfs/include/enfs_roundrobin.h b/fs/nfs/enfs/include/enfs_roundrobin.h
new file mode 100644
index 0000000..af73ff0
--- /dev/null
+++ b/fs/nfs/enfs/include/enfs_roundrobin.h
@@ -0,0 +1,14 @@
+/*
+ * Copyright (c) Huawei Technologies Co., Ltd. 2023-2023. All rights reserved.
+ */
+#ifndef ENFS_ROUNDROBIN_H
+#define ENFS_ROUNDROBIN_H
+
+int enfs_lb_set_policy(struct rpc_clnt *clnt, void *data);
+int enfs_lb_init(void);
+void enfs_lb_exit(void);
+const struct rpc_xprt_iter_ops *enfs_xprt_rr_ops(void);
+const struct rpc_xprt_iter_ops *enfs_xprt_singular_ops(void);
+bool enfs_is_rr_route(struct rpc_clnt *cln);
+bool enfs_is_singularr_route(struct rpc_clnt *cln);
+#endif
\ No newline at end of file
diff --git a/fs/nfs/enfs/include/enfs_tp_common.h b/fs/nfs/enfs/include/enfs_tp_common.h
new file mode 100644
index 0000000..ea3a875
--- /dev/null
+++ b/fs/nfs/enfs/include/enfs_tp_common.h
@@ -0,0 +1,129 @@
+/*
+ * Copyright (c) Huawei Technologies Co., Ltd. 2022-2022. All rights reserved.
+ * Description: nfs tracepoint common header file
+ * Author: h00583093
+ * Create: 2022-11-01
+ */
+
+#ifndef ENFS_TP_COMMON_H
+#define ENFS_TP_COMMON_H
+
+#ifdef NFS_CLIENT_DEBUG
+
+#ifndef RETURN_OK
+#define RETURN_OK 0
+#define RETURN_ERROR (~0)
+#endif
+
+#define LVOS_MAX_TRACEP_NUM 1024
+#define LVOS_STR_TO_KEY_BASE_NUM 31
+
+/* 计算Hash表大小时使用的移位次数 */
+#define LVOS_MAX_TP_HASH_SHIFT 7
+/* Hash表内最多包含多少Chunk, 即Hash表的大小 */
+#define LVOS_MAX_TP_HASH_SIZE (1 << LVOS_MAX_TP_HASH_SHIFT)
+
+#define LVOS_TRACEP_PARAM_SIZE 32UL
+#ifndef MAX_NAME_LEN
+#define MAX_NAME_LEN 128
+#endif
+#ifndef MAX_DESC_LEN
+#define MAX_DESC_LEN 256
+#endif
+#define LVOS_TRACEP_STAT_DELETED 0
+#define LVOS_TRACEP_STAT_ACTIVE 1
+#define LVOS_TRACEP_STAT_DEACTIVE 2
+
+typedef enum tagLVOS_TP_TYPE_E {
+ LVOS_TP_TYPE_CALLBACK = 0,
+ LVOS_TP_TYPE_RESET,
+ LVOS_TP_TYPE_PAUSE,
+ LVOS_TP_TYPE_ABORT,
+ LVOS_TP_TYPE_BUTT
+}LVOS_TP_TYPE_E;
+
+typedef struct {
+ char achParamData[LVOS_TRACEP_PARAM_SIZE]; /**< 自定义参数数据区。 */
+} LVOS_TRACEP_PARAM_S;
+
+typedef void (*FN_TRACEP_COMMON_T)(LVOS_TRACEP_PARAM_S *, ...);
+
+typedef struct tagLVOS_TRACEP_NEW_S {
+ char szName[MAX_NAME_LEN];
+ char szDesc[MAX_DESC_LEN];
+ unsigned int uiPid;
+ int iActive;
+ int type;
+ unsigned int timeAlive;
+ unsigned int timeCalled;
+ FN_TRACEP_COMMON_T fnHook;
+ LVOS_TRACEP_PARAM_S stParam;
+}LVOS_TRACEP_NEW_S;
+
+typedef struct {
+ unsigned int cmd;
+ unsigned int pid;
+ int type;
+ unsigned int timeAlive;
+ LVOS_TRACEP_PARAM_S userParam;
+ char traceName[MAX_NAME_LEN];
+} NfsTracePointCmd;
+
+#define IOCTL_MAGIC 'N'
+#define IOCTL_CMD_TP_ACTION _IOW(IOCTL_MAGIC, 5, NfsTracePointCmd)
+
+int enfs_tracepoint_init(void);
+void enfs_tracepoint_exit(void);
+int RegTracePoint(unsigned int pid, const char *name, const char *desc, FN_TRACEP_COMMON_T fnHook);
+int UnregTracePoint(unsigned int pid, const char *name);
+int GetTracePoint(unsigned int pid, const char *name, LVOS_TRACEP_NEW_S **tracepoint);
+void DoTracePointPause(LVOS_TRACEP_NEW_S *tracepoint);
+int deactive_tracepoint(unsigned int pid, const char *name);
+int deactive_tracepoint_all(void);
+int active_tracepoint(unsigned int pid, const char *name, int type, unsigned int time, LVOS_TRACEP_PARAM_S userParam);
+
+#ifndef MY_PID
+#define MY_PID 1234
+#endif
+
+#define LVOS_TP_REG(name, desc, fn) RegTracePoint(MY_PID, #name, desc, (FN_TRACEP_COMMON_T)(fn))
+#define LVOS_TP_UNREG(name) UnregTracePoint(0, #name)
+#define LVOS_TP_START(name, ...) \
+ do { \
+ static LVOS_TRACEP_NEW_S *_pstTp = NULL; \
+ if (unlikely(NULL == _pstTp)) { \
+ (void)GetTracePoint(0, #name, &_pstTp); \
+ if (NULL == _pstTp) { \
+ printk(KERN_ERR "tracepoint `%s` not registered", #name); \
+ } \
+ } \
+ if (NULL != _pstTp && LVOS_TRACEP_STAT_ACTIVE == _pstTp->iActive && LVOS_TP_TYPE_CALLBACK == _pstTp->type) { \
+ _pstTp->fnHook(&_pstTp->stParam, __VA_ARGS__); \
+ _pstTp->timeCalled++; \
+ if (_pstTp->timeAlive > 0 && 0 == --(_pstTp->timeAlive)) { \
+ deactive_tracepoint(0, #name); \
+ } \
+ } else { \
+ if (NULL != _pstTp && LVOS_TRACEP_STAT_ACTIVE == _pstTp->iActive && LVOS_TP_TYPE_PAUSE == _pstTp->type) { \
+ DoTracePointPause(_pstTp); \
+ _pstTp->timeCalled++; \
+ if (_pstTp->timeAlive > 0 && 0 == --(_pstTp->timeAlive)) { \
+ deactive_tracepoint(0, #name); \
+ } \
+ }
+
+/*插入故障点结束*/
+#define LVOS_TP_END \
+ } \
+ } while (0);
+
+#else
+
+#define LVOS_TP_REG(name, desc, fn)
+#define LVOS_TP_UNREG(name)
+#define LVOS_TP_START(name, ...)
+#define LVOS_TP_END
+
+#endif // NFS_CLIENT_DEBUG
+
+#endif // ENFS_TP_COMMON_H
\ No newline at end of file
diff --git a/fs/nfs/enfs/include/exten_call.h b/fs/nfs/enfs/include/exten_call.h
new file mode 100644
index 0000000..6f12774
--- /dev/null
+++ b/fs/nfs/enfs/include/exten_call.h
@@ -0,0 +1,256 @@
+#ifndef _EXTEN_CALL_H_
+#define _EXTEN_CALL_H_
+
+#define IP_ADDRESS_LEN_MAX 64
+#define FILE_UUID_BUFF_LEN 38
+#define MAX_SHARD_NUMBER_IN_CLUSTER_4FS (1024 * 64)
+#define MAX_GLOBAL_CTRL_NODE_NUM 2048
+#define INVALID_CPU_ID 2147483647
+
+#define ENFS_CAPABILITY_LSID_NOTSUPPORT 0x0001 /* lsversion query capability */
+#define ENFS_CAPABILITY_LSID_SUPPORT 0x0002 /* lsversion query capability */
+
+typedef enum {
+ NFS3_GET_FSINFO_OP = 0,
+ NFS3_GET_LIF_VIEW_OP,
+ NFS_ENFS_QUERY_DNS_OP,
+ NFS3_GET_LS_VERSION_OP,
+ NFS_ENFS_OP_BUTT
+} EnfsExtendOpCode;
+
+typedef struct {
+ uint8_t data[FILE_UUID_BUFF_LEN]; /* uuid byte array */
+ uint32_t dataLen; /* uuid byte size */
+} FILE_UUID;
+typedef struct {
+ uint32_t tenantId;
+ uint32_t ipNumber;
+ char ipAddr[0];
+} LIF_ARGS;
+typedef struct {
+ uint32_t ipType; // 需要存储返的ip类型对应Ip, IP_TYPE_E中的值
+ uint32_t dnsNameCount;
+ char dnsName[0];
+} DNS_ARGS;
+typedef enum IP_TYPE_E {
+ IP_TYPE_V4 = 0, /* *< 只需要IPv4类型 */
+ IP_TYPE_V6 = 1, /* *< 只需要IPv6类型 */
+ IP_TYPE_BOTH = 2, /* 同时需要ipv4 ipv6 */
+ IP_TYPE_BUTT = 3 /* *< 无效值 */
+} IP_TYPE_E ;
+
+typedef struct {
+ uint64_t lsid;
+ uint32_t cpuId;
+} FS_SHARD_VIEW_SINGLE;
+
+
+typedef struct {
+ uint64_t clusterId;
+ uint32_t storagePoolId;
+ uint32_t fsId;
+ uint32_t tenantId;
+ uint32_t num;
+ FS_SHARD_VIEW_SINGLE shardView[0];
+} FS_SHARD_VIEW;
+
+typedef struct {
+ uint64_t lsVersion;
+ uint32_t lsId;
+} EXTEND_GET_LS_VERSION_SINGLE;
+
+typedef struct {
+ uint32_t num;
+ uint64_t clusterId;
+ EXTEND_GET_LS_VERSION_SINGLE lsInfo[0];
+} EXTEND_GET_LS_VERSION;
+
+typedef struct {
+ uint32_t isfound;
+ uint32_t workStatus;
+ uint64_t lsId;
+ uint32_t tenantId;
+ uint64_t homeSiteWwn;
+ uint32_t cpuId;
+} LIF_PORT_INFO_SINGLE;
+
+typedef struct {
+ char ipAddr[IP_ADDRESS_LEN_MAX];
+} DNS_QUERY_IP_INFO;
+
+typedef struct {
+ uint64_t lsId;
+ uint32_t offset;
+ uint32_t count;
+} DNS_QUERY_LSID_INFO;
+
+typedef struct {
+ char ipAddr[IP_ADDRESS_LEN_MAX];
+ uint64_t lsId;
+ uint32_t cpuId;
+} DNS_QUERY_IP_INFO_SINGLE;
+
+typedef struct {
+ uint32_t ipNumber;
+ DNS_QUERY_IP_INFO_SINGLE ipInfo[0];
+} DNS_QUERY_IP_INFO_MULTIPLE;
+
+typedef struct {
+ uint32_t lifNumber;
+ LIF_PORT_INFO_SINGLE lifport[0];
+} LIF_PORT_INFO_MULTIPLE;
+
+typedef struct {
+ char ipAddr [IP_ADDRESS_LEN_MAX];
+ uint32_t workStatus;
+ uint64_t lsId;
+ uint64_t wwn;
+ uint32_t cpuId;
+} LIF_PORT_INFO;
+
+typedef struct {
+ uint32_t opcode;
+ uint32_t version;
+ union {
+ FILE_UUID Uuid;
+ LIF_ARGS lifArgs;
+ DNS_ARGS dnsArgs;
+ } extend_args_u;
+} EXTEND3args;
+
+typedef struct {
+ uint32_t opcode;
+ uint32_t version;
+ union {
+ FS_SHARD_VIEW fsInfo;
+ LIF_PORT_INFO_MULTIPLE lifInfo;
+ DNS_QUERY_IP_INFO_MULTIPLE dnsQueryIpInfo;
+ EXTEND_GET_LS_VERSION lsView;
+ } extend_res_u;
+} EXTEND3res;
+
+typedef struct {
+ union {
+ EXTEND3args args;
+ EXTEND3res res;
+ } extend_u;
+} nfs_extend_s;
+
+int dorado_extend_op(struct rpc_clnt *clnt, char *buf, int *buflen);
+int dorado_query_fs_shard(struct rpc_clnt *clnt, FILE_UUID *file_uuid,
+ FS_SHARD_VIEW **resDataOut);
+int dorado_query_lifview(struct rpc_clnt *clnt, struct rpc_xprt *xprt,
+ char *ipAddr, uint32_t ipNumber,
+ LIF_PORT_INFO *lifInfo);
+int enfs_query_lifview(struct rpc_clnt *clnt, struct rpc_xprt *xprt,
+ char *ipaddr, uint64_t *lsid, uint64_t *wwn, uint32_t *cpuId);
+
+int dorado_query_dns(struct rpc_clnt *clnt, DNS_QUERY_IP_INFO_SINGLE **dnsQueryIpInfo,
+ uint32_t ip_type, uint32_t dnsNamecount, char *dnsName, int *ipNumber);
+int dorado_query_lsId(struct rpc_clnt *clnt, EXTEND_GET_LS_VERSION **resDataOut);
+
+// =============
+// =============
+#define NFS3PROC_EXTEND 22
+
+#define DIR_BIT_POS 63
+#define NID_BITS_SZ 12
+
+#ifndef NID_BITS_MASK
+#define NID_BITS_MASK ((1 << NID_BITS_SZ) - 1) // ???????????
+#endif
+#define NID_BITS_POS (DIR_BIT_POS - NID_BITS_SZ)
+#define ENFS_GLOBAL_OID_BITS 47
+
+#define UUID_OFFSET 16
+#define UUID_DEVID_OFFSET 2 // 8byte,2-9 devid wwn
+#define UUID_FSID_OFFSET 10 // 4byte,10-13 fsid
+#define UUID_DTREEID_OFFSET 14 // 4byte,14-17 dtreeid
+#define UUID_SNAPID_OFFSET 18 // 4byte,18-21 snapid
+// | 1 | 12 | 51 |
+// | reserved| fspid | oid |
+// pfid contains fspid &oid
+// fspid: File Service Partition == shardId or partitionID
+// oid: dentrytable id
+#define UUID_PFID_OFFSET 22 // 8byte,22-29 birthpfid
+#define UUID_FID_OFFSET 30 // 8byte,30-38 fileid
+
+// nfs objectid to fspid
+#define DIR_BIT_POS 63
+#define NID_BITS_SZ 12
+
+#define NFS_UUID_LEN 38
+
+static inline void fh_file_uuid(const struct nfs_fh *fh, FILE_UUID *file_uuid)
+{
+ memcpy((void *)file_uuid->data, (void *)(fh->data + UUID_OFFSET),
+ FILE_UUID_BUFF_LEN);
+ file_uuid->dataLen = FILE_UUID_BUFF_LEN;
+}
+
+static inline uint64_t *fh_devid(struct nfs_fh *fh)
+{
+ uint8_t *uuid = (uint8_t *)(fh->data + UUID_OFFSET);
+ return ((uint64_t *)(uuid + UUID_DEVID_OFFSET));
+}
+
+static inline uint32_t *fh_fsid(struct nfs_fh *fh)
+{
+ uint8_t *uuid = (uint8_t *)(fh->data + UUID_OFFSET);
+ return ((uint32_t *)(uuid + UUID_FSID_OFFSET));
+}
+
+static inline uint64_t get_objectid_from_uuid(FILE_UUID *file_uuid)
+{
+ uint8_t *uuid = (uint8_t *)(file_uuid->data);
+ uint64_t objectId = *((uint64_t *)(uuid + UUID_FID_OFFSET));
+
+ // default is directory, use file id
+ // if is file, use pfid
+ if ((objectId >> DIR_BIT_POS) == 0) {
+ objectId = *((uint64_t *)(uuid + UUID_PFID_OFFSET));
+ }
+ return objectId;
+}
+
+#define GET_DEVID_FROM_UUID(puuid) \
+ (*((uint64_t *)((puuid)->data + UUID_DEVID_OFFSET)))
+#define GET_FSID_FROM_UUID(puuid) \
+ (*((uint32_t *)((puuid)->data + UUID_FSID_OFFSET)))
+#define GET_DTREEID_FROM_UUID(puuid) \
+ (*((uint32_t *)((puuid)->data + UUID_DTREEID_OFFSET)))
+#define GET_SNAPID_FROM_UUID(puuid) \
+ (*((uint32_t *)((puuid)->data + UUID_SNAPID_OFFSET)))
+#define GET_PFID_FROM_UUID(puuid) \
+ (*((uint64_t *)((puuid)->data + UUID_PFID_OFFSET)))
+#define GET_FID_FROM_UUID(puuid) \
+ (*((uint64_t *)((puuid)->data + UUID_FID_OFFSET)))
+#define ENFS_GET_FSID_HIGHEST_BIT(fsid) \
+ (((fsid) >> 31) & 1)
+#define ENFS_GET_LOCAL_FSPID_FROM_FID(fid) \
+ (uint32_t)(((fid) >> NID_BITS_POS) & 0xFFF)
+#define ENFS_GET_GLOBAL_FSPID_FROM_FID(fid) \
+ (uint32_t)(((fid) >> ENFS_GLOBAL_OID_BITS) & 0xFFFF)
+#define ENFS_GET_FSP_FROM_FSID_FID(fsid, fid) \
+ (ENFS_GET_FSID_HIGHEST_BIT(fsid) ? ENFS_GET_GLOBAL_FSPID_FROM_FID(fid) : ENFS_GET_LOCAL_FSPID_FROM_FID(fid))
+// =============
+
+static inline uint64_t get_fspid_from_uuid(FILE_UUID *file_uuid)
+{
+ uint64_t objectId = get_objectid_from_uuid(file_uuid);
+ uint32_t fsId = GET_FSID_FROM_UUID(file_uuid);
+
+ return ENFS_GET_FSP_FROM_FSID_FID(fsId, objectId);
+}
+
+static inline uint32_t get_shardid_from_uuid(FILE_UUID *file_uuid)
+{
+ return get_fspid_from_uuid(file_uuid) % MAX_SHARD_NUMBER_IN_CLUSTER_4FS;
+}
+
+int scan_uuid(const char* str, uint8_t* arr, int arrlen);
+int sprint_uuid(char *buf, int buflen, FILE_UUID *file_uuid);
+
+void enfs_print_uuid(FILE_UUID *file_uuid);
+
+#endif // _EXTEN_CALL_H_
\ No newline at end of file
diff --git a/fs/nfs/enfs/include/init.h b/fs/nfs/enfs/include/init.h
new file mode 100644
index 0000000..7b15353
--- /dev/null
+++ b/fs/nfs/enfs/include/init.h
@@ -0,0 +1,16 @@
+/*
+ * Copyright (c) Huawei Technologies Co., Ltd. 2023-2023. All rights reserved.
+ * Description: nfs client init
+ * Author: y00583252
+ * Create: 2023-07-31
+ */
+
+#ifndef ENFS_INIT_H
+#define ENFS_INIT_H
+
+#include
+
+int32_t enfs_init(void);
+void enfs_fini(void);
+
+#endif
\ No newline at end of file
diff --git a/fs/nfs/enfs/include/pm_state.h b/fs/nfs/enfs/include/pm_state.h
new file mode 100644
index 0000000..2d6c3a1
--- /dev/null
+++ b/fs/nfs/enfs/include/pm_state.h
@@ -0,0 +1,27 @@
+/*
+ * Copyright (c) Huawei Technologies Co., Ltd. 2023-2023. All rights reserved.
+ * Description: path state header file
+ * Author: y00583252
+ * Create: 2023-08-12
+ */
+
+#ifndef PM_STATE_H
+#define PM_STATE_H
+
+#include
+#include
+
+typedef enum {
+ PM_STATE_INIT,
+ PM_STATE_NORMAL,
+ PM_STATE_FAULT,
+ PM_STATE_UNDEFINED // xprt is not multipath xprt
+} pm_path_state;
+
+void pm_set_path_state(struct rpc_xprt *xprt, pm_path_state state);
+pm_path_state pm_get_path_state(struct rpc_xprt *xprt);
+
+void pm_get_path_state_desc(struct rpc_xprt *xprt, char *buf, int len);
+void pm_get_xprt_state_desc(struct rpc_xprt *xprt, char *buf, int len);
+
+#endif // PM_STATE_H
\ No newline at end of file
diff --git a/fs/nfs/enfs/include/shard.h b/fs/nfs/enfs/include/shard.h
new file mode 100644
index 0000000..c98ba3c
--- /dev/null
+++ b/fs/nfs/enfs/include/shard.h
@@ -0,0 +1,25 @@
+/*
+ * Copyright (c) Huawei Technologies Co., Ltd. 2023-2023. All rights reserved.
+ * Description: nfs client shard route
+ * Author:
+ * Create: 2023-11-14
+ */
+
+#ifndef _ENFS_SHARD_H_
+#define _ENFS_SHARD_H_
+
+#include
+#include "exten_call.h"
+
+extern unsigned int enfs_uuid_debug;
+
+void shard_set_transport(struct rpc_task *task, struct rpc_clnt *clnt);
+int enfs_debug_match_cmd(char *str, size_t len);
+int enfs_shard_init(void);
+void enfs_shard_exit(void);
+
+int enfs_find_clnt_root(struct rpc_clnt *clnt, FILE_UUID *root_uuid);
+int enfs_insert_clnt_root(struct rpc_clnt *clnt, FILE_UUID *root_uuid);
+int enfs_delete_clnt_shard_cache(struct rpc_clnt *clnt);
+void enfs_query_xprt_shard(struct rpc_clnt *clnt, struct rpc_xprt *xprt);
+#endif // _ENFS_SHARD_H_
\ No newline at end of file
diff --git a/fs/nfs/enfs/include/unify_multipath/dpc_rpc_client_api.h b/fs/nfs/enfs/include/unify_multipath/dpc_rpc_client_api.h
new file mode 100644
index 0000000..292bf6b
--- /dev/null
+++ b/fs/nfs/enfs/include/unify_multipath/dpc_rpc_client_api.h
@@ -0,0 +1,196 @@
+/*
+ * SPDX-License-Identifier: GPL-2.0
+ * Copyright 2024, Huawei Inc
+ *
+ * DPC client对ODC提供创删链路以及读写接口. 对外结构体和头文件应保持最小依赖原则, 不要包含内部头文件.
+ * 不允许将内部结构体函数放进来.
+ */
+#ifndef _DPC_CLIENT_API_H_
+#define _DPC_CLIENT_API_H_
+#include
+#include
+#include
+#include "multipath_api.h"
+
+#define DPC_REQ_DLG_BYPASS_FLAG BIT(0) // no need check dlg, such as write cache flush
+#define DPC_REQ_IS_ASYNC BIT(1)
+#define DPC_TOS_INFO_CNT_MAX 10
+
+/* Server capabilities */
+#define DPC_RPC_CAP_READDIRPLUS (1U << 0)
+#define DPC_RPC_CAP_HARDLINKS (1U << 1)
+#define DPC_RPC_CAP_SYMLINKS (1U << 2)
+#define DPC_RPC_CAP_ACLS (1U << 3)
+#define DPC_RPC_CAP_ATOMIC_OPEN (1U << 4)
+#define DPC_RPC_CAP_LGOPEN (1U << 5)
+#define DPC_RPC_CAP_FILEID (1U << 6)
+#define DPC_RPC_CAP_MODE (1U << 7)
+#define DPC_RPC_CAP_NLINK (1U << 8)
+#define DPC_RPC_CAP_OWNER (1U << 9)
+#define DPC_RPC_CAP_OWNER_GROUP (1U << 10)
+#define DPC_RPC_CAP_ATIME (1U << 11)
+#define DPC_RPC_CAP_CTIME (1U << 12)
+#define DPC_RPC_CAP_MTIME (1U << 13)
+#define DPC_RPC_CAP_POSIX_LOCK (1U << 14)
+#define DPC_RPC_CAP_UIDGID_NOMAP (1U << 15)
+#define DPC_RPC_CAP_STATEID_NFSV41 (1U << 16)
+#define DPC_RPC_CAP_ATOMIC_OPEN_V1 (1U << 17)
+#define DPC_RPC_CAP_SECURITY_LABEL (1U << 18)
+#define DPC_RPC_CAP_SEEK (1U << 19)
+#define DPC_RPC_CAP_ALLOCATE (1U << 20)
+#define DPC_RPC_CAP_DEALLOCATE (1U << 21)
+#define DPC_RPC_CAP_LAYOUTSTATS (1U << 22)
+#define DPC_RPC_CAP_CLONE (1U << 23)
+#define DPC_RPC_CAP_COPY (1U << 24)
+#define DPC_RPC_CAP_OFFLOAD_CANCEL (1U << 25)
+#define DPC_RPC_CAP_LAYOUTERROR (1U << 26)
+#define DPC_RPC_CAP_COPY_NOTIFY (1U << 27)
+#define DPC_RPC_CAP_XATTR (1U << 28)
+#define DPC_RPC_CAP_READ_PLUS (1U << 29)
+
+#define DPC_RPC_ATTR_FATTR_TYPE (1U << 0)
+#define DPC_RPC_ATTR_FATTR_MODE (1U << 1)
+#define DPC_RPC_ATTR_FATTR_NLINK (1U << 2)
+#define DPC_RPC_ATTR_FATTR_OWNER (1U << 3)
+#define DPC_RPC_ATTR_FATTR_GROUP (1U << 4)
+#define DPC_RPC_ATTR_FATTR_RDEV (1U << 5)
+#define DPC_RPC_ATTR_FATTR_SIZE (1U << 6)
+#define DPC_RPC_ATTR_FATTR_PRESIZE (1U << 7)
+#define DPC_RPC_ATTR_FATTR_BLOCKS_USED (1U << 8)
+#define DPC_RPC_ATTR_FATTR_SPACE_USED (1U << 9)
+#define DPC_RPC_ATTR_FATTR_FSID (1U << 10)
+#define DPC_RPC_ATTR_FATTR_FILEID (1U << 11)
+#define DPC_RPC_ATTR_FATTR_ATIME (1U << 12)
+#define DPC_RPC_ATTR_FATTR_MTIME (1U << 13)
+#define DPC_RPC_ATTR_FATTR_CTIME (1U << 14)
+#define DPC_RPC_ATTR_FATTR_PREMTIME (1U << 15)
+#define DPC_RPC_ATTR_FATTR_PRECTIME (1U << 16)
+#define DPC_RPC_ATTR_FATTR_CHANGE (1U << 17)
+#define DPC_RPC_ATTR_FATTR_PRECHANGE (1U << 18)
+#define DPC_RPC_ATTR_FATTR_V4_LOCATIONS (1U << 19)
+#define DPC_RPC_ATTR_FATTR_V4_REFERRAL (1U << 20)
+#define DPC_RPC_ATTR_FATTR_MOUNTPOINT (1U << 21)
+#define DPC_RPC_ATTR_FATTR_MOUNTED_ON_FILEID (1U << 22)
+#define DPC_RPC_ATTR_FATTR_OWNER_NAME (1U << 23)
+#define DPC_RPC_ATTR_FATTR_GROUP_NAME (1U << 24)
+#define DPC_RPC_ATTR_FATTR_V4_SECURITY_LABEL (1U << 25)
+#define DPC_RPC_ATTR_FATTR_USED (1U << 26)
+
+
+#define DPC_RPC_ATTR_FATTR (DPC_RPC_ATTR_FATTR_TYPE \
+ | DPC_RPC_ATTR_FATTR_MODE \
+ | DPC_RPC_ATTR_FATTR_NLINK \
+ | DPC_RPC_ATTR_FATTR_OWNER \
+ | DPC_RPC_ATTR_FATTR_GROUP \
+ | DPC_RPC_ATTR_FATTR_RDEV \
+ | DPC_RPC_ATTR_FATTR_SIZE \
+ | DPC_RPC_ATTR_FATTR_FSID \
+ | DPC_RPC_CAP_FILEID \
+ | DPC_RPC_ATTR_FATTR_ATIME \
+ | DPC_RPC_ATTR_FATTR_MTIME \
+ | DPC_RPC_ATTR_FATTR_CTIME \
+ | DPC_RPC_ATTR_FATTR_USED)
+
+#define DPC_RPC_ERR_JUKEBOX (10008)
+
+typedef enum {
+ DPCREG = 1,
+ DPCDIR = 2,
+ DPCBLK = 3,
+ DPCCHR = 4,
+ DPCLNK = 5,
+ DPCSOCK = 6,
+ DPCFIFO = 7,
+ DPCBAD = 10
+} dpc_ftype;
+
+typedef struct {
+ uint32_t valid;
+ dpc_ftype type;
+ umode_t mode;
+ uint32_t nlink;
+ kuid_t uid;
+ kgid_t gid;
+ uint64_t size;
+ uint64_t used;
+ uint64_t fsid;
+ uint64_t file_id;
+ dev_t rdev;
+ struct timespec64 atime;
+ struct timespec64 mtime;
+ struct timespec64 ctime;
+ struct timespec64 crttime;
+ uint64_t reserved1;
+ uint64_t reserved2;
+} dpc_clnt_file_attr;
+
+typedef struct dpc_clnt_rw_args {
+ struct rpc_task *task;
+ struct cred *cred;
+ struct rpc_call_ops *callback_ops;
+ void *callback_data;
+ void (*callback)(struct dpc_clnt_rw_args *data);
+ void (*statis_callback)(struct dpc_clnt_rw_args *data, void *task);
+ struct workqueue_struct *workqueue;
+ uint8_t priority;
+
+ uint64_t mp_id; /* 选路入参,multipath instance id */
+ uint64_t cluster_id; /* 选路入参,集群id */
+ uint64_t pool_id; /* 选路入参,存储池id */
+ uint32_t timeout_ms; /* 选路入参,请求IO超时时间,超时不再换路重试 */
+ uint32_t flag; /* 写请求入参,DPC_REQ_DLG_BYPASS_FLAG etc */
+ uint32_t len; /* 读写请求入参,数据总长度 */
+ uint64_t offset; /* 读写请求入参,文件内偏移 */
+ uint32_t pgbase; /* 读写请求入参,首页页内偏移 */
+ mulp_file_uuid uuid; /* 读写请求入参,同时作为选路入参,文件UUID */
+ uint32_t share_id; /* 读请求入参 */
+
+ struct page **pages; /* 读写请求页面载荷 */
+
+ dpc_clnt_file_attr *prev_attr; /* 写请求出参,存储返回的文件修改前attr */
+ dpc_clnt_file_attr *post_attr; /* 普通读、元数据扩展读、写请求出参,存储返回的文件当前attr */
+ int32_t op_status; /* 读写请求出参,结果状态码 */
+ uint64_t counted; /* 读请求出参,读到的数据长度 */
+ uint32_t eof; /* 读请求出参,根据flag DPC_READ_EOF_BIT_FLAG位判断 */
+} dpc_clnt_rw_args;
+
+typedef struct dpc_clnt_port_tos {
+ uint16_t port;
+ uint32_t tos;
+} dpc_clnt_port_tos;
+
+typedef struct dpc_clnt_tos_info {
+ dpc_clnt_port_tos tos_info[DPC_TOS_INFO_CNT_MAX];
+ uint16_t cnt;
+} dpc_clnt_tos_info;
+
+/* ****************************************************************************
+ * 给odc提供读接口, 同步操作.
+ *
+ * args --- 出入参, 详见结构体描述
+ * 返回值: 0 -- 成功, 其他--失败
+ * *************************************************************************** */
+int dpc_clnt_read_page(dpc_clnt_rw_args *args);
+
+/* ****************************************************************************
+ * 给odc提供写接口, 同步操作.
+ *
+ * args --- 出入参, 详见结构体描述
+ * 返回值: 0 -- 成功, 其他--失败
+ * *************************************************************************** */
+int dpc_clnt_write_page(dpc_clnt_rw_args *args);
+
+int dpc_clnt_create_mp(mulp_create_mp_args *args, uint64_t *mp_id);
+int dpc_clnt_update_mp(uint64_t mp_id, mulp_create_mp_args *args);
+int dpc_clnt_update_detect_period(uint64_t mp_id, uint32_t detect_period);
+int dpc_clnt_update_ip_pair(mulp_update_ip_pair_args *args);
+int dpc_clnt_update_ip_view(mulp_update_ip_view_args *args);
+int dpc_clnt_update_shard_view(mulp_update_shard_view_args *args);
+int dpc_clnt_destroy_mp(uint64_t mp_id);
+void* dpc_clnt_zalloc(uint32_t size, gfp_t flag);
+void dpc_clnt_free(void *ptr);
+
+int dpc_clnt_set_tos_info(uint16_t port, uint32_t tos);
+int dpc_clnt_get_tos_info(dpc_clnt_tos_info *tos_info);
+
+#endif
\ No newline at end of file
diff --git a/fs/nfs/enfs/include/unify_multipath/multipath_api.h b/fs/nfs/enfs/include/unify_multipath/multipath_api.h
new file mode 100644
index 0000000..19e48ff
--- /dev/null
+++ b/fs/nfs/enfs/include/unify_multipath/multipath_api.h
@@ -0,0 +1,214 @@
+/*
+ * SPDX-License-Identifier: GPL-2.0
+ * Copyright 2024, Huawei Inc
+ *
+ * 多路径管理头文件, 对nfs/dpc client提供多路径的创删和管理. 对外结构体和头文件应保持最小依赖原则, 不要包含内部头文件.
+ * 不允许将内部结构体函数放进来.
+ */
+#ifndef _MULTIPATH_API_H_
+#define _MULTIPATH_API_H_
+#include "multipath_types.h"
+
+#define MULP_INVAILD_MP_ID (~0ULL)
+#define MULP_MAX_IP_STR_LEN 64
+#define MULP_USER_NAME_LEN 64
+#define MULP_USER_AUTHKEY_LEN 256
+#define MULP_FILE_UUID_LEN 38
+#define MULP_PORT_NAME_LEN 16
+#define MULP_ZONE_MAX_CNT 64
+#define MULP_ONE_ZONE_MAX_CTRL_CNT 64
+#define MULP_ONE_NODE_MAX_CPU_CNT 4
+
+typedef enum {
+ PATH_INFO_STATUS = 0,
+ PATH_INFO_DETECT_TIME = 1,
+ // 读写信息统计
+ PATH_INFO_READ_CNT = 2,
+ PATH_INFO_READ_LEN = 3,
+ PATH_INFO_READ_SUM_DELAY = 4,
+ PATH_INFO_READ_FAILED_CNT = 5,
+ PATH_INFO_WRITE_CNT = 6,
+ PATH_INFO_WRITE_LEN = 7,
+ PATH_INFO_WRITE_SUM_DELAY = 8,
+ PATH_INFO_WRITE_FAILED_CNT = 9,
+ PATH_INFO_ERRNO = 10,
+} PATH_INFO_TYPE;
+typedef struct {
+ uint32_t len; /* 实际长度 */
+ uint8_t data[MULP_FILE_UUID_LEN];
+} mulp_file_uuid;
+
+typedef enum {
+ MULP_APP_NFS,
+ MULP_APP_DPC,
+ MULP_APP_MAX,
+} mulp_app_id_type;
+
+typedef enum {
+ MULP_STRATEGY_ROUNDROBIN,
+ MULP_STRATEGY_SHARDVIEW,
+ MULP_STRATEGY_MAX,
+} mulp_select_path_strategy;
+
+typedef enum {
+ MULP_NETWORK_TCP,
+ MULP_NETWORK_RDMA,
+ MULP_NETWORK_MAX,
+} mulp_network_type;
+
+typedef struct {
+ char *local_ip;
+ char *remote_ip;
+ uint64_t wwn;
+ uint32_t lsid; /* 二进制末6位节点id应小于MULP_ONE_ZONE_MAX_CTRL_CNT */
+ uint32_t zone_id; /* 应小于MULP_ZONE_MAX_CNT */
+ uint32_t cpu_id; /* 应小于MULP_ONE_NODE_MAX_CPU_CNT */
+ uint32_t is_add; /* 当前只支持全量更新 */
+ char *port_name;
+} mulp_ip_pair;
+
+#define MULP_MAX_NCONNECT 8
+typedef struct {
+ uint32_t ip_pair_cnt;
+ mulp_ip_pair *pair_arr;
+ uint64_t client_id;
+ uint32_t client_ls_id;
+ uint32_t nconnect; /* 每个ip pair创建多个链路, 需要大于0, 最大值MULP_MAX_NCONNECT */
+ mulp_network_type network_type;
+ mulp_select_path_strategy strategy;
+ char *user_name;
+ char *user_authkey; /* 是字符串还是二进制? */
+ void *ctx; /* 创建多链路是后台操作,创建完成后,多路径模块进行回调的应用上下文. 可以为NULL */
+ void (*callback)(int result, uint64_t mp_id, void *ctx); /* 创建多链路是后台操作,完成后多路径模块进行回调. 可以为NULL */
+ uint32_t detect_period; // 探测周期(s)
+} mulp_create_mp_args;
+
+typedef struct {
+ uint64_t mp_id;
+ uint32_t ip_pair_cnt;
+ uint32_t nconnect;
+ mulp_ip_pair *pair_arr;
+} mulp_update_ip_pair_args;
+
+typedef struct {
+ char *ip;
+ uint32_t lsid;
+ uint32_t zone_id;
+ uint32_t cpu_id;
+} mulp_ip_view;
+
+typedef struct {
+ uint64_t mp_id;
+ uint32_t ip_cnt;
+ mulp_ip_view *view_arr;
+} mulp_update_ip_view_args;
+
+typedef struct {
+ uint32_t lsid;
+ uint32_t zone_id;
+ uint32_t cpu_id;
+} mulp_shard_view;
+
+typedef struct {
+ uint64_t mp_id;
+ uint64_t wwn;
+ uint64_t pool_id; /* 当前是storage pool id */
+ uint64_t cluster_id;
+ uint32_t shard_cnt;
+ mulp_shard_view *view_arr; /* 数组, 下标即fsp id */
+} mulp_update_shard_view_args;
+
+typedef struct {
+ mulp_ip_pair *ip_pair;
+ mulp_network_type network_type;
+ void* path;
+} mulp_ops_create_path_args;
+
+typedef struct {
+ int (*ping_func)(void *path, uint64_t mp_id, void *path_info, void *context,
+ void (*cb)(int result, uint64_t mp_id, void *path_info, void *context));
+ int (*create_path_func)(mulp_ops_create_path_args *args);
+ int (*destroy_path_func)(void *path, uint64_t mp_id, void *path_info, void *context,
+ void (*cb)(int result, uint64_t mp_id, void *path_info, void *context));
+} mulp_app_ops_set;
+
+typedef struct {
+ mulp_file_uuid *uuid;
+ uint64_t cluster_id;
+ uint64_t pool_id;
+} mulp_file_info;
+
+/* ****************************************************************************
+ * 创建一个多路径集, 同步返回多路径集id. 但是建链是异步, 建链完成后会进行回调
+ * 通知结果.
+ *
+ * app_id --- 入参, 应用的id
+ * mp_id ---出参, 创建的多路径集id,后续操作需要使用传入此id.
+ * 返回值: 0 -- 成功, 其他--失败
+ * *************************************************************************** */
+int mulp_create_mp(mulp_app_id_type app_id, mulp_create_mp_args *args, uint64_t *mp_id);
+int mulp_update_detect_period(uint64_t mp_id, uint32_t detect_period);
+int mulp_update_ip_pair(mulp_update_ip_pair_args *args);
+int mulp_update_ip_view(mulp_update_ip_view_args *args);
+int mulp_update_shard_view(mulp_update_shard_view_args *args);
+
+/* ****************************************************************************
+ * 删除一个多路径集. 调用之后,多路径集id,不再可用. 清理资源过程是异步的,完成后会进行回调
+ * 通知结果.
+ *
+ * mp_id --- 入参, 多路径集的id.
+ * 返回值: 0 -- 成功, 其他--失败
+ * *************************************************************************** */
+int mulp_destroy_mp(uint64_t mp_id);
+
+/* ****************************************************************************
+ * 注册上层应用的ops集, 多路径会在流程中进行回调, 必须先调用此函数才能进行其他流程调用.
+ *
+ * app_id --- 入参, 应用的id.
+ * set --- 应用的操作集
+ * 返回值: 0 -- 成功, 其他--失败
+ * *************************************************************************** */
+int mulp_reg_app_ops(mulp_app_id_type app_id, mulp_app_ops_set *set);
+
+
+int mulp_unreg_app_ops(mulp_app_id_type app_id);
+
+int mulp_ping_all_path(uint64_t mp_id, void *ctx, void (*callback)(int result, uint64_t mp_id, void *ctx));
+
+/* ****************************************************************************
+ * 根据文件的句柄(通常是uuid), 获取最佳路径的指针. 需要使用mulp_io_put_path进行释放引用
+ *
+ * mp_id --- 入参, 多路径集的id.
+ * uuid --- 入参, 文件的uuid
+ * path_mgmt ---- 出入参, 返回multipath路径管理结构指针, path_mgmt传入时不允许为NULL.
+ * path ---- 出入参, 返回dpc/nfs创建的路径的指针, path传入时不允许为NULL.
+ * is_direct_ctrl --- 出入参,返回选的链路是否为直连控制器, is_direct_ctrl不允许为NULL
+ * 返回值: 0 -- 成功, 其他--失败
+ * *************************************************************************** */
+int mulp_io_get_optimal_path(uint64_t mp_id, mulp_file_info *file_info, uint64_t timestamp, void **path_mgmt,
+ void **path, uint32_t *is_direct_ctrl);
+
+/* ****************************************************************************
+ * 释放使用mulp_io_get_optimal_path获取最佳路径的指针引用.
+ *
+ * mp_id --- 入参, 多路径集的id.
+ * path_mgmt ---- 入参, 路径指针, path_mgmt传入时不允许为NULL.
+ * 返回值: 0 -- 成功, 其他--失败
+ * *************************************************************************** */
+int mulp_io_put_path(uint64_t mp_id, void *path_mgmt);
+
+int mulp_dump_path_info(void (*callback)(const char *buffer), uint64_t mp_id, BOOLEAN_T debug);
+
+void mulp_path_count_stats(void *path_info, PATH_INFO_TYPE type, uint64_t value);
+
+int mulp_path_clean_mp_rw_info(const char *data);
+
+void mulp_path_notify_io_result(void *path_info, int io_result, uint64_t start_time);
+
+int mulp_get_shard_view(uint64_t wwn, uint64_t cluster_id, uint64_t pool_id, mulp_shard_view *shard_view);
+
+void mulp_destroy_shard_view(void);
+
+int mulp_ctor(void);
+void mulp_dector(void);
+#endif
\ No newline at end of file
diff --git a/fs/nfs/enfs/include/unify_multipath/multipath_types.h b/fs/nfs/enfs/include/unify_multipath/multipath_types.h
new file mode 100644
index 0000000..3436078
--- /dev/null
+++ b/fs/nfs/enfs/include/unify_multipath/multipath_types.h
@@ -0,0 +1,34 @@
+/*
+ * SPDX-License-Identifier: GPL-2.0
+ * Copyright 2024, Huawei Inc
+ *
+ * Platform dependent utilities
+ */
+#ifndef _MULTIPATH_TYPES_H_
+#define _MULTIPATH_TYPES_H_
+
+#ifndef uint8_t
+typedef unsigned char uint8_t;
+#endif
+
+#ifndef uint32_t
+typedef unsigned int uint32_t;
+#endif
+
+#ifndef BOOLEAN_T
+#define BOOLEAN_T int
+#define B_TRUE 1
+#define B_FALSE 0
+#endif
+
+#ifndef LOCAL_LLT
+#ifndef uint64_t
+typedef unsigned long long uint64_t;
+#endif
+#else
+#ifndef uint64_t
+typedef unsigned long int uint64_t;
+#endif
+#endif
+
+#endif
\ No newline at end of file
diff --git a/fs/nfs/enfs/init.c b/fs/nfs/enfs/init.c
new file mode 100644
index 0000000..fc3dce7
--- /dev/null
+++ b/fs/nfs/enfs/init.c
@@ -0,0 +1,76 @@
+/*
+ * Copyright (c) Huawei Technologies Co., Ltd. 2023-2023. All rights reserved.
+ * Description: enfs client init
+ * Author: y00583252
+ * Create: 2023-07-31
+ */
+
+#include "init.h"
+#include "enfs_log.h"
+#include "enfs_multipath.h"
+#include "enfs_tp_common.h"
+#include "mgmt_init.h"
+#include "dns_internal.h"
+#include "shard.h"
+
+struct enfs_init_entry {
+ char *name;
+ int (*init)(void);
+ void (*final)(void);
+};
+
+static inline void init_helper_finalize(struct enfs_init_entry *job, int idx)
+{
+ struct enfs_init_entry *entry = NULL;
+
+ while (idx > 0) {
+ idx = idx - 1;
+ entry = &job[idx];
+ if (entry->final != NULL) {
+ entry->final();
+ enfs_log_error("final %s.\n", entry->name);
+ }
+ }
+}
+
+static inline int init_helper_init(struct enfs_init_entry *job, int size)
+{
+ int ret;
+ int i;
+ struct enfs_init_entry *entry = NULL;
+
+ for (i = 0; i < size; i++) {
+ entry = &job[i];
+ ret = entry->init();
+ if (ret) {
+ enfs_log_error("init step(%d) init(%s) fail.\n", i, entry->name);
+ goto init_err;
+ }
+ }
+
+ return 0;
+
+init_err:
+ init_helper_finalize(job, i);
+ return -1;
+}
+
+static struct enfs_init_entry init_entry[] = {
+ {"multipath", enfs_multipath_init, enfs_multipath_exit},
+#ifdef NFS_CLIENT_DEBUG
+ {"tracepoit", enfs_tracepoint_init, enfs_tracepoint_exit},
+#endif // NFS_CLIENT_DEBUG
+ {"shard", enfs_shard_init, enfs_shard_exit},
+ {"mgmt", mgmt_init, mgmt_fini},
+ {"dns", enfs_dns_init, enfs_dns_exit},
+};
+
+int32_t enfs_init(void)
+{
+ return init_helper_init(init_entry, ARRAY_SIZE(init_entry));
+}
+
+void enfs_fini(void)
+{
+ init_helper_finalize(init_entry, ARRAY_SIZE(init_entry));
+}
\ No newline at end of file
diff --git a/fs/nfs/enfs/mgmt/config/enfs_config.c b/fs/nfs/enfs/mgmt/config/enfs_config.c
new file mode 100644
index 0000000..01ba863
--- /dev/null
+++ b/fs/nfs/enfs/mgmt/config/enfs_config.c
@@ -0,0 +1,722 @@
+/*
+ * Copyright (c) Huawei Technologies Co., Ltd. 2023-2023. All rights reserved.
+ */
+#include
+#include
+#include
+#include
+#include
+#include
+#include
+#include
+#include
+#include
+#include
+
+#include "enfs.h"
+#include "enfs_errcode.h"
+#include "enfs_log.h"
+#include "enfs_tp_common.h"
+#include "enfs_config.h"
+
+#define MAX_FILE_SIZE 8192
+#define STRING_BUF_SIZE 128
+#define CONFIG_FILE_PATH "/etc/enfs/config.ini"
+#define ENFS_NOTIFY_FILE_PERIOD 1000UL
+
+#define MAX_PATH_DETECT_INTERVAL 300
+#define MIN_PATH_DETECT_INTERVAL 5
+#define MAX_PATH_DETECT_TIMEOUT 60
+#define MIN_PATH_DETECT_TIMEOUT 1
+#define MAX_MULTIPATH_TIMEOUT 60
+#define MIN_MULTIPATH_TIMEOUT 0
+#define MAX_MULTIPATH_STATE ENFS_MULTIPATH_DISABLE
+#define MIN_MULTIPATH_STATE ENFS_MULTIPATH_ENABLE
+#define MIN_SHARDVIEW_UPDATE_INTERVAL 30 // 30 second
+#define MAX_SHARDVIEW_UPDATE_INTERVAL 300 // 300 second
+#define MIN_LOOKUPCACHE_INTERVAL 30 // 30 second
+#define MAX_LOOKUPCACHE_STATE ENFS_LOOKUPCACHE_ENABLE
+#define MIN_LOOKUPCACHE_STATE ENFS_LOOKUPCACHE_DISABLE
+
+#define DEFAULT_PATH_DETECT_INTERVAL 10
+#define DEFAULT_PATH_DETECT_TIMEOUT 5
+#define DEFAULT_MULTIPATH_TIMEOUT 0 // 0表示使用用户mount命令指定的timeo时间
+#define DEFAULT_MULTIPATH_STATE ENFS_MULTIPATH_ENABLE
+#define DEFAULT_LOADBALANCE_MODE ENFS_LOADBALANCE_RR
+#define DEFAULT_DNS_UPDATE_INTERVAL 5 // 5 minute
+#define DEFAULT_DNS_AUTO_MULTIPATH_RESOLUTION 1 // 默认开启NFS原生域名参数解析
+#define DEFAULT_SHARDVIEW_UPDATE_INTERVAL 60 // 60 second
+#define MAX_PRIOPITY_ARRAY_WWNS 6
+#define MAX_IP_PREFIX 8
+#define DEFAULT_LOOKUPCACHE_INTERVAL 60 // 60 second
+#define DEFAULT_LOOKUPCACHE_STATE ENFS_LOOKUPCACHE_ENABLE
+
+typedef int (*check_and_assign_func)(char *, char *, int, int);
+
+struct enfs_config_info {
+ int32_t path_detect_interval;
+ int32_t path_detect_timeout;
+ int32_t multipath_timeout;
+ int32_t loadbalance_mode;
+ int32_t multipath_state;
+ int32_t dns_update_interval;
+ int32_t dns_auto_multipath_resolution;
+ int32_t shardview_update_interval;
+ int32_t priopity_wwn_count;
+ uint64_t priopity_array_wwns[MAX_PRIOPITY_ARRAY_WWNS];
+ int32_t ip_filters_count;
+ char local_ip_filters[MAX_IP_PREFIX][INET6_ADDRSTRLEN];
+ int32_t lookupcache_interval;
+ int32_t lookupcache_enable;
+ int32_t link_count_per_mount;
+ int32_t link_count_total;
+ int32_t native_link_io_enable;
+ int32_t create_path_no_route;
+};
+
+typedef struct {
+ char *field_name;
+ check_and_assign_func func;
+ int min_value;
+ int max_value;
+} check_and_assign_value;
+
+static struct enfs_config_info g_enfs_config_info;
+static struct timespec64 modify_time;
+static struct task_struct *thread;
+
+static int enfs_check_config_value(char *value, int min_value, int max_value)
+{
+ unsigned long num_value;
+ int ret;
+
+ ret = kstrtol(value, 10, &num_value); // 转换为10进制
+ if (ret != 0) {
+ enfs_log_error("Failed to convert string to int\n");
+ return -EINVAL;
+ }
+
+ if (num_value < min_value || num_value > max_value) {
+ return -EINVAL;
+ }
+
+ return num_value;
+}
+
+// 检查并赋值
+static int32_t enfs_check_and_assign_int_value(char *field_name, char *value, int min_value, int max_value)
+{
+ int i;
+ int int_value;
+ static struct config_entry_t {
+ const char *name;
+ int32_t *val;
+ } config[] = {
+ {"path_detect_interval", &g_enfs_config_info.path_detect_interval},
+ {"path_detect_timeout", &g_enfs_config_info.path_detect_timeout},
+ {"multipath_timeout", &g_enfs_config_info.multipath_timeout},
+ {"multipath_disable", &g_enfs_config_info.multipath_state},
+ {"dns_update_interval", &g_enfs_config_info.dns_update_interval},
+ {"dns_auto_multipath_resolution", &g_enfs_config_info.dns_auto_multipath_resolution},
+ {"shardview_update_interval", &g_enfs_config_info.shardview_update_interval},
+ {"lookupcache_interval", &g_enfs_config_info.lookupcache_interval},
+ {"lookupcache_enable", &g_enfs_config_info.lookupcache_enable},
+ {"link_count_per_mount", &g_enfs_config_info.link_count_per_mount},
+ {"link_count_total", &g_enfs_config_info.link_count_total},
+ {"native_link_io_enable", &g_enfs_config_info.native_link_io_enable},
+ {"create_path_no_route", &g_enfs_config_info.create_path_no_route},
+ };
+
+ int_value = enfs_check_config_value(value, min_value, max_value);
+ if (int_value < 0) {
+ return -EINVAL;
+ }
+
+ for (i = 0; i < ARRAY_SIZE(config); i++) {
+ if (strcmp(field_name, config[i].name) == 0) {
+ *config[i].val = int_value;
+ return ENFS_RET_OK;
+ }
+ }
+ return -EINVAL;
+}
+
+static int32_t enfs_check_and_assign_loadbalance_mode(char *field_name, char *value, int min_value, int max_value)
+{
+ if (value == NULL) {
+ return -EINVAL;
+ }
+
+ if (strcmp(field_name, "multipath_select_policy") == 0) {
+ if (strcmp(value, "roundrobin") == 0) {
+ g_enfs_config_info.loadbalance_mode = ENFS_LOADBALANCE_RR;
+ return ENFS_RET_OK;
+ }
+ if (strcmp(value, "shardview") == 0) {
+ g_enfs_config_info.loadbalance_mode = ENFS_LOADBALANCE_SHARDVIEW;
+ return ENFS_RET_OK;
+ }
+ }
+ return -EINVAL;
+}
+
+static int32_t enfs_check_and_assign_ip_filters(char *field_name, char *value,
+ int min_value, int max_value)
+{
+ int count = 0;
+ char *token;
+ char *copy;
+ char *tmp;
+ if (strcmp(field_name, "local_ip_filters") != 0) {
+ return -EINVAL;
+ }
+ copy = kstrdup(value, GFP_KERNEL);
+ tmp = copy;
+ if (!copy) {
+ return -EINVAL;
+ }
+
+ if (strlen(tmp) == 0) {
+ g_enfs_config_info.ip_filters_count = count;
+ kfree(copy);
+ return ENFS_RET_OK;
+ }
+ while ((token = strsep(&tmp, ",")) != NULL) {
+ if (count >= MAX_IP_PREFIX) {
+ kfree(copy);
+ return -EINVAL;
+ }
+
+ if (strlen(token) >= INET6_ADDRSTRLEN) { // 48 byte
+ kfree(copy);
+ return -EINVAL;
+ }
+
+ strcpy(g_enfs_config_info.local_ip_filters[count], token);
+ count++;
+ }
+
+ g_enfs_config_info.ip_filters_count = count;
+ kfree(copy);
+ return ENFS_RET_OK;
+}
+
+static int32_t enfs_check_and_assign_wwns(char *field_name, char *value, int min_value, int max_value)
+{
+ int count = 0;
+ char *token;
+ char *copy;
+ char *tmp;
+
+ if (value == NULL) {
+ return -EINVAL;
+ }
+
+ if (strcmp(field_name, "priopity_array_wwns") != 0) {
+ return -EINVAL;
+ }
+ copy = kstrdup(value, GFP_KERNEL);
+ tmp = copy;
+ if (!copy) {
+ return -EINVAL;
+ }
+
+ while ((token = strsep(&tmp, ",")) != NULL) {
+ if (count >= MAX_PRIOPITY_ARRAY_WWNS) {
+ kfree(copy);
+ return -EINVAL;
+ }
+ if (strlen(token) != 16) { // 8 bytes = 16 chars
+ kfree(copy);
+ return -EINVAL;
+ }
+ if (sscanf(token, "%llx", &(g_enfs_config_info.priopity_array_wwns[count])) < 0) {
+ kfree(copy);
+ return -EINVAL;
+ }
+ count++;
+ }
+
+ g_enfs_config_info.priopity_wwn_count = count;
+ kfree(copy);
+ return ENFS_RET_OK;
+}
+
+static const check_and_assign_value g_check_and_assign_value[] = {
+ {"path_detect_interval", enfs_check_and_assign_int_value, MIN_PATH_DETECT_INTERVAL, MAX_PATH_DETECT_INTERVAL},
+ {"path_detect_timeout", enfs_check_and_assign_int_value, MIN_PATH_DETECT_TIMEOUT, MAX_PATH_DETECT_TIMEOUT},
+ {"multipath_timeout", enfs_check_and_assign_int_value, MIN_MULTIPATH_TIMEOUT, MAX_MULTIPATH_TIMEOUT},
+ {"multipath_disable", enfs_check_and_assign_int_value, MIN_MULTIPATH_STATE, MAX_MULTIPATH_STATE},
+ {"multipath_select_policy", enfs_check_and_assign_loadbalance_mode, 0, 0},
+ {"dns_update_interval", enfs_check_and_assign_int_value, 0, INT_MAX},
+ {"dns_auto_multipath_resolution", enfs_check_and_assign_int_value, 0, 1},
+ {"shardview_update_interval", enfs_check_and_assign_int_value, MIN_SHARDVIEW_UPDATE_INTERVAL, MAX_SHARDVIEW_UPDATE_INTERVAL},
+ {"priopity_array_wwns", enfs_check_and_assign_wwns, 0, 0},
+ {"local_ip_filters", enfs_check_and_assign_ip_filters, 0, 0},
+ {"lookupcache_interval", enfs_check_and_assign_int_value, MIN_LOOKUPCACHE_INTERVAL, INT_MAX},
+ {"lookupcache_enable", enfs_check_and_assign_int_value, MIN_LOOKUPCACHE_STATE, MAX_LOOKUPCACHE_STATE},
+ {"link_count_per_mount", enfs_check_and_assign_int_value,
+ MIN_SUPPORTED_REMOTE_IP_COUNT, MAX_SUPPORTED_REMOTE_IP_COUNT},
+ {"link_count_total", enfs_check_and_assign_int_value, MIN_ENFS_MAX_LINK_COUNT, ENFS_MAX_LINK_COUNT},
+ {"native_link_io_enable", enfs_check_and_assign_int_value, 0, 1},
+};
+
+#ifdef ENFS_OPENEULER_660
+static int32_t enfs_read_config_file_in_openeuler(char *buffer, char *file_path)
+{
+ int ret;
+ struct file *filp = NULL;
+ loff_t f_pos = 0;
+
+ LVOS_TP_START(OPEN_CONFIG_FILE_FAILED, &filp);
+ filp = filp_open(file_path, O_RDONLY, 0);
+ LVOS_TP_END;
+ if (IS_ERR(filp)) {
+ enfs_log_error("Failed to open file %s\n", CONFIG_FILE_PATH);
+ ret = -ENOENT;
+ return ret;
+ }
+
+ kernel_read(filp, buffer, MAX_FILE_SIZE, &f_pos);
+
+ ret = filp_close(filp, NULL);
+ if (ret) {
+ enfs_log_error("Close File:%s failed:%d.\n", CONFIG_FILE_PATH, ret);
+ return -EINVAL;
+ }
+ return ENFS_RET_OK;
+}
+#else
+static int32_t enfs_read_config_file(char *buffer, char *file_path)
+{
+ int ret;
+ struct file *filp = NULL;
+ loff_t f_pos = 0;
+ mm_segment_t fs;
+
+ LVOS_TP_START(OPEN_CONFIG_FILE_FAILED, &filp);
+ filp = filp_open(file_path, O_RDONLY, 0);
+ LVOS_TP_END;
+ if (IS_ERR(filp)) {
+ enfs_log_error("Failed to open file %s\n", CONFIG_FILE_PATH);
+ ret = -ENOENT;
+ return ret;
+ }
+
+#ifdef ENFS_EULER_5_10
+ fs = force_uaccess_begin();
+#else
+ fs = get_fs();
+ set_fs(get_ds());
+#endif
+
+#ifdef ENFS_KERNEL_READ_FS
+ kernel_read(filp, f_pos, buffer, MAX_FILE_SIZE);
+#else
+ kernel_read(filp, buffer, MAX_FILE_SIZE, &f_pos);
+#endif
+
+#ifdef ENFS_EULER_5_10
+ force_uaccess_end(fs);
+#else
+ set_fs(fs);
+#endif
+
+ ret = filp_close(filp, NULL);
+ if (ret) {
+ enfs_log_error("Close File:%s failed:%d.\n", CONFIG_FILE_PATH, ret);
+ return -EINVAL;
+ }
+ return ENFS_RET_OK;
+}
+#endif
+
+// 处理一行
+static int32_t enfs_deal_with_comment_line(char *buffer)
+{
+ int ret;
+ char *pos = strchr(buffer, '\n');
+ if (pos != NULL) {
+ ret = strlen(buffer) - strlen(pos);
+ } else {
+ ret = strlen(buffer);
+ }
+ return ret;
+}
+
+static int32_t enfs_parse_key_value_from_config(char *buffer, char *key, char *value, int keyLen, int valueLen)
+{
+ char *line;
+ char *tokenPtr;
+ int len;
+ char *tem;
+ char *pos = strchr(buffer, '\n');
+
+ if (pos != NULL) {
+ len = strlen(buffer) - strlen(pos);
+ } else {
+ len = strlen(buffer);
+ }
+ line = kmalloc(len + 1, GFP_KERNEL);
+ if (!line) {
+ enfs_log_error("Failed to allocate memory.\n");
+ return -ENOMEM;
+ }
+ line[len] = '\0';
+ strncpy(line, buffer, len);
+
+ tem = line;
+ tokenPtr = strsep(&tem, "=");
+ if (tokenPtr == NULL || tem == NULL) {
+ kfree(line);
+ return len;
+ }
+ strncpy(key, strim(tokenPtr), keyLen);
+ strncpy(value, strim(tem), valueLen);
+
+ kfree(line);
+ return len;
+}
+
+static int32_t enfs_get_value_from_config_file(char *buffer, char *field_name, char *value, int valueLen)
+{
+ int ret;
+ char key[STRING_BUF_SIZE + 1] = {0};
+ char val[STRING_BUF_SIZE + 1] = {0};
+
+ while (buffer[0] != '\0') {
+ // 每次处理一行
+ if (buffer[0] == '\n') {
+ // 处理空行
+ buffer++;
+ } else if (buffer[0] == '#') {
+ // 处理注释行
+ ret = enfs_deal_with_comment_line(buffer);
+ if (ret > 0) {
+ buffer += ret;
+ }
+ } else {
+ // 处理正常字符串
+ ret = enfs_parse_key_value_from_config(buffer, key, val, STRING_BUF_SIZE, STRING_BUF_SIZE);
+ if (ret < 0) {
+ enfs_log_error("failed to parse key value,ret = %d.\n", ret);
+ return ret;
+ }
+ key[STRING_BUF_SIZE] = '\0';
+ val[STRING_BUF_SIZE] = '\0';
+
+ buffer += ret;
+
+ if (strcmp(field_name, key) == 0) {
+ // 判断key和field_name是否一致,一致给value赋值
+ strncpy(value, val, valueLen);
+ return ENFS_RET_OK;
+ }
+ }
+ }
+ enfs_log_error("can not find value which matched field_name: %s.\n", field_name);
+ return -EINVAL;
+}
+
+int32_t enfs_config_load(void)
+{
+ char value[STRING_BUF_SIZE + 1];
+ int ret;
+ int table_len;
+ int min;
+ int max;
+ int i;
+ char *buffer;
+
+ buffer = (char *)kmalloc(MAX_FILE_SIZE, GFP_KERNEL);
+ if (!buffer) {
+ enfs_log_error("Failed to allocate memory.\n");
+ return -ENOMEM;
+ }
+ memset(buffer, 0, MAX_FILE_SIZE);
+
+ // 初始化为默认值
+ g_enfs_config_info.path_detect_interval = DEFAULT_PATH_DETECT_INTERVAL;
+ g_enfs_config_info.path_detect_timeout = DEFAULT_PATH_DETECT_TIMEOUT;
+ g_enfs_config_info.multipath_timeout = DEFAULT_MULTIPATH_TIMEOUT;
+ g_enfs_config_info.multipath_state = DEFAULT_MULTIPATH_STATE;
+ g_enfs_config_info.loadbalance_mode = DEFAULT_LOADBALANCE_MODE;
+ g_enfs_config_info.dns_update_interval = DEFAULT_DNS_UPDATE_INTERVAL;
+ g_enfs_config_info.dns_auto_multipath_resolution = 1;
+ g_enfs_config_info.shardview_update_interval = DEFAULT_SHARDVIEW_UPDATE_INTERVAL;
+ g_enfs_config_info.priopity_wwn_count = 0;
+ g_enfs_config_info.lookupcache_interval = DEFAULT_LOOKUPCACHE_INTERVAL;
+ g_enfs_config_info.lookupcache_enable = DEFAULT_LOOKUPCACHE_STATE;
+ g_enfs_config_info.link_count_per_mount = DEFAULT_SUPPORTED_REMOTE_IP_COUNT;
+ g_enfs_config_info.link_count_total = DEFAULT_ENFS_MAX_LINK_COUNT;
+ g_enfs_config_info.native_link_io_enable = 1;
+ g_enfs_config_info.create_path_no_route = 0;
+
+ table_len = sizeof(g_check_and_assign_value) / sizeof(g_check_and_assign_value[0]);
+
+#ifdef ENFS_OPENEULER_660
+ ret = enfs_read_config_file_in_openeuler(buffer, CONFIG_FILE_PATH);
+#else
+ ret = enfs_read_config_file(buffer, CONFIG_FILE_PATH);
+#endif
+ if (ret != 0) {
+ kfree(buffer);
+ return ret;
+ }
+ for (i = 0; i < table_len; i++) {
+ ret = enfs_get_value_from_config_file(buffer, g_check_and_assign_value[i].field_name, value, STRING_BUF_SIZE);
+ if (ret < 0) {
+ continue;
+ }
+ value[STRING_BUF_SIZE] = '\0';
+ min = g_check_and_assign_value[i].min_value;
+ max = g_check_and_assign_value[i].max_value;
+ if (g_check_and_assign_value[i].func != NULL) {
+ (*g_check_and_assign_value[i].func)(g_check_and_assign_value[i].field_name, value, min, max);
+ }
+ }
+
+ kfree(buffer);
+ return ENFS_RET_OK;
+}
+
+int32_t enfs_get_config_path_detect_interval(void)
+{
+ return g_enfs_config_info.path_detect_interval;
+}
+
+int32_t enfs_get_config_path_detect_timeout(void)
+{
+ return g_enfs_config_info.path_detect_timeout;
+}
+
+int32_t enfs_get_config_multipath_timeout(void)
+{
+ return g_enfs_config_info.multipath_timeout;
+}
+
+int32_t enfs_get_config_multipath_state(void)
+{
+ return g_enfs_config_info.multipath_state;
+}
+
+int32_t enfs_get_config_loadbalance_mode(void)
+{
+ return g_enfs_config_info.loadbalance_mode;
+}
+
+int32_t enfs_get_config_dns_update_interval(void)
+{
+ return g_enfs_config_info.dns_update_interval;
+}
+
+int32_t enfs_get_config_dns_auto_multipath_resolution(void)
+{
+ return g_enfs_config_info.dns_auto_multipath_resolution;
+}
+
+int32_t enfs_get_config_shardview_update_interval(void)
+{
+ return g_enfs_config_info.shardview_update_interval;
+}
+
+int32_t enfs_get_config_lookupcache_interval(void)
+{
+ return g_enfs_config_info.lookupcache_interval;
+}
+
+int32_t enfs_get_config_lookupcache_state(void)
+{
+ return g_enfs_config_info.lookupcache_enable;
+}
+
+int32_t enfs_get_config_link_count_per_mount(void)
+{
+ return g_enfs_config_info.link_count_per_mount;
+}
+
+int32_t enfs_get_config_link_count_total(void)
+{
+ return g_enfs_config_info.link_count_total;
+}
+
+int32_t enfs_get_native_link_io_status(void)
+{
+ return g_enfs_config_info.native_link_io_enable;
+}
+
+int32_t enfs_get_create_path_no_route(void)
+{
+ return g_enfs_config_info.create_path_no_route;
+}
+
+bool enfs_check_config_wwn(uint64_t wwn)
+{
+ int count;
+
+ for (count = 0; count < g_enfs_config_info.priopity_wwn_count; count++) {
+ if (g_enfs_config_info.priopity_array_wwns[count] == wwn) {
+ return true;
+ }
+ }
+ return false;
+}
+
+bool enfs_glob_match(char const *pat, char const *str)
+{
+ unsigned char c;
+ unsigned char d;
+ char const *back_pat = NULL;
+ char const *back_str = back_str;
+ bool match = false;
+ bool inverted = (*pat == '!');
+ char const *class = pat + inverted;
+ unsigned char a = *class ++;
+ unsigned char b = a;
+
+ for (;;) {
+ c = *str++;
+ d = *pat++;
+
+ switch (d) {
+ case '?':
+ if (c == '\0')
+ return false;
+ break;
+ case '*':
+ if (*pat == '\0')
+ return true;
+ back_pat = pat;
+ back_str = --str;
+ break;
+ case '[': {
+ match = false;
+ inverted = (*pat == '!');
+ class = pat + inverted;
+ a = *class ++;
+
+ do {
+ b = a;
+
+ if (a == '\0')
+ goto literal;
+
+ if (class[0] == '-' && class[1] != ']') {
+ b = class[1];
+
+ if (b == '\0')
+ goto literal;
+
+ class += 2;
+ }
+ match |= (a <= c && c <= b);
+ } while ((a = *class ++) != ']');
+
+ if (match == inverted)
+ goto backtrack;
+ pat = class;
+ } break;
+
+ case '\\':
+ d = *pat++;
+ break;
+ default:
+ literal:
+ if (c == d) {
+ if (d == '\0') {
+ return true;
+ }
+ break;
+ }
+ backtrack:
+ if (c == '\0' || !back_pat) {
+ return false;
+ }
+ pat = back_pat;
+ str = ++back_str;
+ break;
+ }
+ }
+}
+
+bool enfs_whitelist_filte(char *ip_addr)
+{
+ int i;
+ int count = g_enfs_config_info.ip_filters_count;
+
+ if (count == 0) {
+ return true;
+ }
+
+ for (i = 0; i < count; i++) {
+ if (enfs_glob_match(g_enfs_config_info.local_ip_filters[i], ip_addr)) {
+ return true;
+ }
+ }
+ return false;
+}
+
+static bool enfs_file_changed(const char *filename)
+{
+ int err;
+ struct kstat file_stat;
+
+#if (defined(ENFS_EULER_5_10) || defined(ENFS_OPENEULER_660))
+ struct path fpath;
+ err = kern_path(filename, LOOKUP_FOLLOW, &fpath);
+ if (err) {
+ return false;
+ }
+ err = vfs_getattr(&fpath, &file_stat, STATX_BASIC_STATS, 0);
+ path_put(&fpath);
+#else
+ err = vfs_stat(filename, &file_stat);
+#endif
+ if (err) {
+ enfs_log_debug("failed to open file:%s err:%d\n", filename, err);
+ return false;
+ }
+
+ if (timespec64_compare(&modify_time, &file_stat.mtime) == -1) {
+ modify_time = file_stat.mtime;
+ enfs_log_debug("file change: %lld %lld\n",
+ (long long int)(modify_time.tv_sec), (long long int)(file_stat.mtime.tv_sec));
+ return true;
+ }
+
+ return false;
+}
+
+static int enfs_thread_func(void *data)
+{
+ while (!kthread_should_stop()) {
+ if (enfs_file_changed(CONFIG_FILE_PATH)) {
+ enfs_config_load();
+ }
+ enfs_msleep(ENFS_NOTIFY_FILE_PERIOD);
+ }
+ return 0;
+}
+
+int enfs_config_timer_init(void)
+{
+ thread = kthread_run(enfs_thread_func, NULL, "enfs_notiy_file_thread");
+ if (IS_ERR(thread)) {
+ pr_err("Failed to create kernel thread\n");
+ return PTR_ERR(thread);
+ }
+ return 0;
+}
+
+void enfs_config_timer_exit(void)
+{
+ printk(KERN_INFO "enfs_notify_file_exit\n");
+ if (thread) {
+ kthread_stop(thread);
+ }
+}
+
+int GetEnfsConfigIpFiltersCount(void)
+{
+ return g_enfs_config_info.ip_filters_count;
+}
diff --git a/fs/nfs/enfs/mgmt/mgmt_init.c b/fs/nfs/enfs/mgmt/mgmt_init.c
new file mode 100644
index 0000000..7f0acc3
--- /dev/null
+++ b/fs/nfs/enfs/mgmt/mgmt_init.c
@@ -0,0 +1,22 @@
+/*
+ * Copyright (c) Huawei Technologies Co., Ltd. 2023-2023. All rights reserved.
+ * Description: mgmt component init
+ * Author: y00583252
+ * Create: 2023-07-31
+ */
+
+#include "mgmt_init.h"
+#include
+#include "enfs_errcode.h"
+#include "enfs_config.h"
+
+int32_t mgmt_init(void)
+{
+ return enfs_config_timer_init();
+}
+
+void mgmt_fini(void)
+{
+ enfs_config_timer_exit();
+ return;
+}
\ No newline at end of file
diff --git a/fs/nfs/enfs/mgmt/mgmt_init.h b/fs/nfs/enfs/mgmt/mgmt_init.h
new file mode 100644
index 0000000..1dd39c1
--- /dev/null
+++ b/fs/nfs/enfs/mgmt/mgmt_init.h
@@ -0,0 +1,17 @@
+/*
+ * Copyright (c) Huawei Technologies Co., Ltd. 2023-2023. All rights reserved.
+ * Description: mgmt component init
+ * Author: y00583252
+ * Create: 2023-07-31
+ */
+
+#ifndef MGMT_INIT_H
+#define MGMT_INIT_H
+
+#include
+
+int32_t mgmt_init(void);
+void mgmt_fini(void);
+
+
+#endif // MGMT_INIT_H
diff --git a/fs/nfs/enfs/multipath/failover/failover_com.h b/fs/nfs/enfs/multipath/failover/failover_com.h
new file mode 100644
index 0000000..68b4eca
--- /dev/null
+++ b/fs/nfs/enfs/multipath/failover/failover_com.h
@@ -0,0 +1,29 @@
+ /*
+ * Copyright (c) Huawei Technologies Co., Ltd. 2023-2023. All rights reserved.
+ * Description: failover time commont header file
+ * Create: 2023-08-02
+ */
+
+
+#ifndef FAILOVER_COMMON_H
+#define FAILOVER_COMMON_H
+
+static inline bool failover_is_enfs_clnt(struct rpc_clnt *clnt)
+{
+ struct rpc_clnt *next = clnt->cl_parent;
+ struct rpc_clnt_reserve *clnt_reserve;
+
+ while (next) {
+ if (next == next->cl_parent)
+ break;
+ next = next->cl_parent;
+ }
+ if (next != NULL) {
+ clnt_reserve = (struct rpc_clnt_reserve *)next;
+ return clnt_reserve->cl_enfs == 1 ? true : false;
+ }
+ clnt_reserve = (struct rpc_clnt_reserve *)clnt;
+ return clnt_reserve->cl_enfs == 1 ? true : false;
+}
+
+#endif // FAILOVER_COMMON_H
\ No newline at end of file
diff --git a/fs/nfs/enfs/multipath/failover/failover_path.c b/fs/nfs/enfs/multipath/failover/failover_path.c
new file mode 100644
index 0000000..4fd55da
--- /dev/null
+++ b/fs/nfs/enfs/multipath/failover/failover_path.c
@@ -0,0 +1,311 @@
+/*
+ * Copyright (c) Huawei Technologies Co., Ltd. 2023-2023. All rights reserved.
+ * Description: nfs path failover file
+ * Author: y00583252
+ * Create: 2023-08-02
+ */
+
+#include "failover_path.h"
+#include
+#include
+#include
+#include
+#include
+#include
+#include
+#include "enfs_config.h"
+#include "enfs_log.h"
+#include "enfs.h"
+#include "failover_com.h"
+#include "pm_state.h"
+#include "pm_ping.h"
+#include "enfs_tp_common.h"
+
+typedef enum {
+ FAILOVER_NOACTION = 1,
+ FAILOVER_RETRY,
+ FAILOVER_RETRY_DELAY,
+ FAILOVER_RETURN_TIMEOUT,
+} failover_policy_t;
+
+static void failover_retry_path(struct rpc_task *task)
+{
+ int ret;
+
+ ret = rpc_restart_call(task);
+
+ if (ret == 1) {
+ xprt_release(task);
+ rpc_init_task_retry_counters(task);
+ rpc_task_release_transport(task);
+ task->tk_xprt = rpc_task_get_next_xprt(task->tk_client);
+ }
+}
+
+static void failover_retry_path_delay(struct rpc_task *task, int32_t delay)
+{
+ failover_retry_path(task);
+ rpc_delay(task, delay);
+}
+
+static void failover_exit_return_timeout(struct rpc_task *task)
+{
+ unsigned int execute = ktime_ms_delta(ktime_get(), task->tk_start);
+ unsigned int config_timeout = enfs_get_config_path_detect_timeout() * MSEC_PER_SEC;
+
+ if(execute > config_timeout)
+ rpc_exit(task, -ETIMEDOUT);
+
+}
+
+static void failover_retry_path_by_policy(struct rpc_task *task, failover_policy_t policy)
+{
+ if (policy == FAILOVER_RETRY) {
+ failover_retry_path(task);
+ } else if (policy == FAILOVER_RETRY_DELAY) {
+ failover_retry_path_delay(task, 3 * HZ); // delay 3s
+ } else if (policy == FAILOVER_RETURN_TIMEOUT) {
+ failover_exit_return_timeout(task);
+ }
+ return;
+}
+
+static failover_policy_t failover_get_nfs3_retry_policy(struct rpc_task *task)
+{
+ failover_policy_t policy = FAILOVER_NOACTION;
+ const struct rpc_procinfo *procinfo = task->tk_msg.rpc_proc;
+ u32 proc;
+
+ if (unlikely(procinfo == NULL)) {
+ enfs_log_error("the task contains no valid proc.\n");
+ return FAILOVER_NOACTION;
+ }
+
+ proc = procinfo->p_proc;
+
+ switch (proc) {
+ case NFS3PROC_CREATE:
+ case NFS3PROC_MKDIR:
+ case NFS3PROC_REMOVE:
+ case NFS3PROC_RMDIR:
+ case NFS3PROC_SYMLINK:
+ case NFS3PROC_LINK:
+ case NFS3PROC_SETATTR:
+ case NFS3PROC_WRITE:
+ policy = FAILOVER_RETRY_DELAY;
+ break;
+ default:
+ policy = FAILOVER_RETRY;
+ }
+ return policy;
+}
+
+static failover_policy_t failover_get_nfs4_retry_policy(struct rpc_task *task)
+{
+ failover_policy_t policy = FAILOVER_NOACTION;
+ const struct rpc_procinfo *procinfo = task->tk_msg.rpc_proc;
+ u32 proc_idx;
+
+ if (unlikely(procinfo == NULL)) {
+ enfs_log_error("the task contains no valid proc.\n");
+ return FAILOVER_NOACTION;
+ }
+
+ proc_idx = procinfo->p_statidx;
+
+ switch (proc_idx) {
+ case NFSPROC4_CLNT_CREATE:
+ case NFSPROC4_CLNT_REMOVE:
+ case NFSPROC4_CLNT_LINK:
+ case NFSPROC4_CLNT_SYMLINK:
+ case NFSPROC4_CLNT_SETATTR:
+ case NFSPROC4_CLNT_WRITE:
+ case NFSPROC4_CLNT_RENAME:
+ case NFSPROC4_CLNT_SETACL:
+ policy = FAILOVER_RETRY_DELAY;
+ break;
+ default:
+ policy = FAILOVER_RETRY;
+ }
+ return policy;
+}
+
+static failover_policy_t failover_get_retry_policy(struct rpc_task *task)
+{
+ struct rpc_clnt *clnt = task->tk_client;
+ u32 version = clnt->cl_vers;
+ failover_policy_t policy = FAILOVER_NOACTION;
+
+ if (pm_ping_is_test_xprt_task(task)) {
+ return FAILOVER_RETURN_TIMEOUT;
+ }
+
+ // 1. if the task meant to send to certain xprt, take no action
+ if (task->tk_flags & RPC_TASK_FIXED) {
+ return FAILOVER_NOACTION;
+ }
+
+ // 2. get policy by different version of nfs protocal
+ if (version == 3) { // nfs v3
+ policy = failover_get_nfs3_retry_policy(task);
+ } else if (version == 4) { // nfs v4
+ policy = failover_get_nfs4_retry_policy(task);
+ } else {
+ return FAILOVER_NOACTION;
+ }
+
+ // 3. if the task is not send to target, retry immediately
+ if (!RPC_WAS_SENT(task)) {
+ policy = FAILOVER_RETRY;
+ }
+
+ return policy;
+}
+
+static int failover_check_task(struct rpc_task *task)
+{
+ struct rpc_clnt *clnt = NULL;
+ int disable_mpath = enfs_get_config_multipath_state();
+
+ if (disable_mpath != ENFS_MULTIPATH_ENABLE) {
+ enfs_log_debug("Multipath is not enabled.\n");
+ return -EINVAL;
+ }
+
+ if (unlikely((task == NULL) || (task->tk_client == NULL))) {
+ enfs_log_error("The task is not valid.\n");
+ return -EINVAL;
+ }
+
+ clnt = task->tk_client;
+
+ if (clnt->cl_prog != NFS_PROGRAM) {
+ enfs_log_debug("The clnt is not prog{%u} type.\n",
+ clnt->cl_prog);
+ return -EINVAL;
+ }
+
+ if ( !failover_is_enfs_clnt(clnt) ) {
+ enfs_log_debug("The clnt is not a enfs-managed type.\n");
+ return -EINVAL;
+ }
+ return 0;
+}
+
+void failover_handle(struct rpc_task *task)
+{
+ failover_policy_t policy;
+ int ret;
+
+ ret = failover_check_task(task);
+ if (ret != 0) {
+ return;
+ }
+
+ pm_set_path_state(task->tk_xprt, PM_STATE_FAULT);
+
+ policy = failover_get_retry_policy(task);
+
+ failover_retry_path_by_policy(task, policy);
+
+ return;
+}
+
+static bool failover_is_task_use_fixed_path(struct rpc_task *task)
+{
+ if (task->tk_flags & RPC_TASK_FIXED) {
+ return true;
+ }
+
+ if (pm_ping_is_test_xprt_task(task)) {
+ return true;
+ }
+
+ return false;
+}
+
+bool failover_task_need_call_start_again(struct rpc_task *task)
+{
+ int ret;
+
+ ret = failover_check_task(task);
+ if (ret != 0)
+ return false;
+
+ if (failover_is_task_use_fixed_path(task)) {
+ return false;
+ }
+
+ return true;
+}
+
+bool failover_prepare_transmit(struct rpc_task *task)
+{
+ if (failover_is_task_use_fixed_path(task)) {
+ return true;
+ }
+
+ if (pm_get_path_state(task->tk_xprt) == PM_STATE_FAULT) {
+ task->tk_status = -ETIMEDOUT;
+ return false;
+ }
+
+ return true;
+}
+
+/**
+ * Reselect a normal link.
+ */
+static void reselect_xprt(struct rpc_task *task)
+{
+#if (defined(ENFS_EULER_5_10) || defined(ENFS_OPENEULER_660))
+#else
+ if (RPC_ASSASSINATED(task))
+ return;
+#endif
+
+ rpc_task_release_transport(task);
+ task->tk_xprt = rpc_task_get_next_xprt(task->tk_client);
+}
+
+void failover_reselect_transport(struct rpc_task *task, struct rpc_clnt *clnt)
+{
+ struct rpc_xprt **cursor = &clnt->cl_xpi.xpi_cursor;
+ struct rpc_xprt *old;
+ struct rpc_xprt *parent_cursor;
+ struct rpc_clnt *parent_clnt = clnt;
+ struct rpc_clnt_reserve *clnt_reserve;
+
+ if (task->tk_xprt && !failover_prepare_transmit(task)) {
+ reselect_xprt(task);
+ return;
+ }
+
+ do {
+ if (parent_clnt == parent_clnt->cl_parent) {
+ break;
+ }
+ parent_clnt = parent_clnt->cl_parent;
+ } while (parent_clnt);
+
+ clnt_reserve = (struct rpc_clnt_reserve *)parent_clnt;
+ if (task->tk_xprt && clnt->cl_vers == 4 && clnt_reserve && clnt_reserve->cl_enfs) {
+ old = smp_load_acquire(cursor);
+ parent_cursor = xprt_iter_get_xprt(&parent_clnt->cl_xpi);
+ if (parent_cursor != old) {
+ smp_store_release(cursor, parent_cursor);
+ }
+ // Xprt_get in xprt_iter_get_xprt, if return !NULL
+ if (parent_cursor)
+ xprt_put(parent_cursor);
+
+ if (task->tk_xprt != clnt->cl_xpi.xpi_cursor &&
+ !(task->tk_flags & RPC_TASK_FIXED) &&
+ !(pm_ping_is_test_xprt_task(task))) {
+ xprt_release(task);
+ rpc_init_task_retry_counters(task);
+ rpc_task_release_transport(task);
+ task->tk_xprt = rpc_task_get_next_xprt(clnt);
+ }
+ }
+}
\ No newline at end of file
diff --git a/fs/nfs/enfs/multipath/failover/failover_path.h b/fs/nfs/enfs/multipath/failover/failover_path.h
new file mode 100644
index 0000000..2b9d8a1
--- /dev/null
+++ b/fs/nfs/enfs/multipath/failover/failover_path.h
@@ -0,0 +1,18 @@
+/*
+ * Copyright (c) Huawei Technologies Co., Ltd. 2023-2023. All rights reserved.
+ * Description: nfs path failover header file
+ * Author: y00583252
+ * Create: 2023-08-02
+ */
+
+#ifndef FAILOVER_PATH_H
+#define FAILOVER_PATH_H
+
+#include
+
+void failover_handle(struct rpc_task *task);
+bool failover_prepare_transmit(struct rpc_task *task);
+void failover_reselect_transport(struct rpc_task *task, struct rpc_clnt *clnt);
+bool failover_task_need_call_start_again(struct rpc_task *task);
+
+#endif // FAILOVER_PATH_H
\ No newline at end of file
diff --git a/fs/nfs/enfs/multipath/failover/failover_time.c b/fs/nfs/enfs/multipath/failover/failover_time.c
new file mode 100644
index 0000000..640708c
--- /dev/null
+++ b/fs/nfs/enfs/multipath/failover/failover_time.c
@@ -0,0 +1,119 @@
+ /*
+ * Copyright (c) Huawei Technologies Co., Ltd. 2023-2023. All rights reserved.
+ * Description: failover time file
+ * Create: 2023-08-02
+ */
+
+#include "failover_time.h"
+#include
+#include
+#include "enfs_config.h"
+#include "enfs_log.h"
+#include "failover_com.h"
+#include "pm_ping.h"
+#include "enfs_tp_common.h"
+
+static unsigned long failover_get_mulitipath_timeout(struct rpc_clnt *clnt)
+{
+ unsigned long config_tmo = enfs_get_config_multipath_timeout() * HZ;
+ unsigned long clnt_tmo = clnt->cl_timeout->to_initval;
+
+ if (config_tmo == 0) {
+ return clnt_tmo;
+ }
+
+ return config_tmo > clnt_tmo ? clnt_tmo : config_tmo;
+}
+
+void failover_adjust_task_timeout(struct rpc_task *task, void *condition)
+{
+ struct rpc_clnt *clnt = NULL;
+ unsigned long tmo;
+ int disable_mpath = enfs_get_config_multipath_state();
+
+ if (disable_mpath != ENFS_MULTIPATH_ENABLE) {
+ enfs_log_debug("Multipath is not enabled.\n");
+ return;
+ }
+
+ clnt = task->tk_client;
+ if (unlikely(clnt == NULL)) {
+ enfs_log_error("task associate client is NULL.\n");
+ return;
+ }
+
+ if ( !failover_is_enfs_clnt(clnt) ) {
+ enfs_log_debug("The clnt is not a enfs-managed type.\n");
+ return;
+ }
+
+ tmo = failover_get_mulitipath_timeout(clnt);
+ if (tmo == 0) {
+ enfs_log_debug("Multipath is not enabled.\n");
+ return;
+ }
+
+ if (task->tk_timeout != 0) {
+ task->tk_timeout = task->tk_timeout < tmo ? task->tk_timeout : tmo;
+ } else {
+ task->tk_timeout = tmo;
+ }
+ return;
+}
+
+static unsigned long get_normal_io_req_timeout(struct rpc_rqst *req)
+{
+ unsigned long rq_majortimeo = req->rq_timeout;
+ const struct rpc_timeout *to = req->rq_task->tk_client->cl_timeout;
+
+ if (to->to_exponential)
+ rq_majortimeo <<= to->to_retries;
+ else
+ rq_majortimeo += to->to_increment * to->to_retries;
+ if (rq_majortimeo > to->to_maxval || rq_majortimeo == 0)
+ rq_majortimeo = to->to_maxval;
+ return rq_majortimeo;
+}
+
+void failover_init_task_req(struct rpc_task *task, struct rpc_rqst *req)
+{
+ struct rpc_clnt *clnt = NULL;
+ unsigned long current_timeout;
+ unsigned long timeout = 0;
+ int disable_mpath = enfs_get_config_multipath_state();
+
+ if (disable_mpath != ENFS_MULTIPATH_ENABLE) {
+ enfs_log_debug("Multipath is not enabled.\n");
+ return;
+ }
+
+ clnt = task->tk_client;
+ if (unlikely(clnt == NULL)) {
+ enfs_log_error("task associate client is NULL.\n");
+ return;
+ }
+
+ if ( !failover_is_enfs_clnt(clnt) ) {
+ enfs_log_debug("The clnt is not a enfs-managed type.\n");
+ return;
+ }
+
+ if (!pm_ping_is_test_xprt_task(task)) {
+ timeout = get_normal_io_req_timeout(req);
+ req->rq_timeout = failover_get_mulitipath_timeout(clnt);
+ } else {
+ req->rq_timeout = enfs_get_config_path_detect_timeout() * HZ;
+ timeout = (unsigned long)enfs_get_config_path_detect_timeout() * HZ;
+ LVOS_TP_START(PING_TASK_ETIMEDOUT, &task->tk_status);
+ LVOS_TP_END;
+ }
+
+ current_timeout = (ktime_ms_delta(ktime_get(), task->tk_start)) * HZ / MSEC_PER_SEC;
+ if (timeout > current_timeout) {
+ req->rq_majortimeo = (timeout - current_timeout) + jiffies;
+ } else {
+ req->rq_majortimeo = jiffies;
+ }
+ return;
+}
+
diff --git a/fs/nfs/enfs/multipath/failover/failover_time.h b/fs/nfs/enfs/multipath/failover/failover_time.h
new file mode 100644
index 0000000..8b458e0
--- /dev/null
+++ b/fs/nfs/enfs/multipath/failover/failover_time.h
@@ -0,0 +1,15 @@
+ /*
+ * Copyright (c) Huawei Technologies Co., Ltd. 2023-2023. All rights reserved.
+ * Description: failover time header file
+ * Create: 2023-08-02
+ */
+
+#ifndef FAILOVER_TIME_H
+#define FAILOVER_TIME_H
+
+#include
+
+void failover_adjust_task_timeout(struct rpc_task *task, void *condition);
+void failover_init_task_req(struct rpc_task *task, struct rpc_rqst *req);
+
+#endif // FAILOVER_TIME_H
\ No newline at end of file
diff --git a/fs/nfs/enfs/multipath/load_balance/enfs_roundrobin.c b/fs/nfs/enfs/multipath/load_balance/enfs_roundrobin.c
new file mode 100644
index 0000000..371c7f8
--- /dev/null
+++ b/fs/nfs/enfs/multipath/load_balance/enfs_roundrobin.c
@@ -0,0 +1,357 @@
+/*
+ * Copyright (c) Huawei Technologies Co., Ltd. 2023-2023. All rights reserved.
+ */
+#include
+#include
+#include
+#include
+#include
+#include
+#include
+#include
+#include
+#include
+
+#include "enfs.h"
+#include "enfs_config.h"
+#include "pm_state.h"
+#include "enfs_proc.h"
+
+typedef struct rpc_xprt *(*enfs_xprt_switch_find_xprt_t)(struct rpc_xprt_switch *xps, const struct rpc_xprt *cur);
+static const struct rpc_xprt_iter_ops enfs_xprt_iter_roundrobin;
+static const struct rpc_xprt_iter_ops enfs_xprt_iter_singular;
+
+static bool enfs_xprt_is_active(struct rpc_xprt *xprt)
+{
+ pm_path_state state;
+
+ if (kref_read(&xprt->kref) <= 0) {
+ return false;
+ }
+
+ state = pm_get_path_state(xprt);
+ if (state == PM_STATE_NORMAL) {
+ return true;
+ }
+
+ return false;
+}
+
+static struct rpc_xprt *enfs_lb_set_cursor_xprt(
+ struct rpc_xprt_switch *xps, struct rpc_xprt **cursor, enfs_xprt_switch_find_xprt_t find_next)
+{
+ struct rpc_xprt *pos;
+ struct rpc_xprt *old;
+
+ old = smp_load_acquire(cursor);
+ pos = find_next(xps, old);
+ smp_store_release(cursor, pos);
+ return pos;
+}
+
+static struct rpc_xprt *enfs_lb_find_next_entry_roundrobin(struct rpc_xprt_switch *xps, const struct rpc_xprt *cur)
+{
+ struct rpc_xprt *pos;
+ struct rpc_xprt *prev = NULL;
+ bool found = false;
+ struct rpc_xprt *min_queuelen_xprt = NULL;
+ unsigned long pos_xprt_queuelen;
+ unsigned long min_xprt_queuelen = 0;
+ struct enfs_xprt_context *ctx;
+ struct rpc_xprt *optimal_xprt = NULL;
+ unsigned long optimal_queuelen = 0;
+ int nativeLinkStatus = enfs_get_native_link_io_status();
+
+ list_for_each_entry_rcu(pos, &xps->xps_xprt_list, xprt_switch)
+ {
+ if (!nativeLinkStatus && enfs_is_main_xprt(pos)) {
+ continue;
+ }
+
+ if (!enfs_xprt_is_active(pos)) {
+ prev = pos;
+ continue;
+ }
+
+ ctx = xprt_get_reserve_context(pos);
+ pos_xprt_queuelen = atomic_long_read(&ctx->queuelen);
+ if (min_queuelen_xprt == NULL || pos_xprt_queuelen < min_xprt_queuelen) {
+ /* Find the xprt with the smallest number of IO in the full-linked list. */
+ min_queuelen_xprt = pos;
+ min_xprt_queuelen = pos_xprt_queuelen;
+ }
+ if (cur == prev) {
+ found = true;
+ }
+
+ if (found && (optimal_xprt == NULL || optimal_queuelen < min_xprt_queuelen)) {
+ /* From the subsequent linked list where the xprt has been selected, select the xprt for minimum IO. */
+ if (min_xprt_queuelen == 0) {
+ /* Minimum xprt, there is no need to traverse subsequent queues. */
+ return pos;
+ }
+ optimal_xprt = pos;
+ optimal_queuelen = pos_xprt_queuelen;
+ }
+ prev = pos;
+ };
+ if (optimal_xprt != NULL) {
+ return optimal_xprt;
+ }
+ return min_queuelen_xprt;
+}
+
+struct rpc_xprt *enfs_lb_switch_find_first_active_xprt(struct rpc_xprt_switch *xps)
+{
+ struct rpc_xprt *pos;
+
+ list_for_each_entry_rcu(pos, &xps->xps_xprt_list, xprt_switch)
+ {
+ if (enfs_xprt_is_active(pos)) {
+ return pos;
+ }
+ };
+ return NULL;
+}
+
+struct rpc_xprt *enfs_lb_switch_get_main_xprt(struct rpc_xprt_switch *xps)
+{
+ return list_first_or_null_rcu(&xps->xps_xprt_list, struct rpc_xprt, xprt_switch);
+}
+
+static struct rpc_xprt *enfs_lb_switch_get_next_xprt_roundrobin(struct rpc_xprt_switch *xps, const struct rpc_xprt *cur)
+{
+ struct rpc_xprt *xprt;
+
+ // disable multipath
+ if (enfs_get_config_multipath_state()) {
+ return enfs_lb_switch_get_main_xprt(xps);
+ }
+
+ xprt = enfs_lb_find_next_entry_roundrobin(xps, cur);
+ if (xprt != NULL) {
+ return xprt;
+ }
+ return enfs_lb_switch_get_main_xprt(xps);
+}
+
+static struct rpc_xprt *enfs_lb_iter_next_entry_roundrobin(struct rpc_xprt_iter *xpi)
+{
+ struct rpc_xprt_switch *xps = rcu_dereference(xpi->xpi_xpswitch);
+
+ if (xps == NULL) {
+ return NULL;
+ }
+
+ return enfs_lb_set_cursor_xprt(xps, &xpi->xpi_cursor, enfs_lb_switch_get_next_xprt_roundrobin);
+}
+
+static struct rpc_xprt *enfs_lb_switch_find_singular_entry(struct rpc_xprt_switch *xps, const struct rpc_xprt *cur)
+{
+ struct rpc_xprt *pos;
+ bool found = false;
+ list_for_each_entry_rcu(pos, &xps->xps_xprt_list, xprt_switch)
+ {
+ if (cur == pos) {
+ found = true;
+ }
+ if (found && enfs_xprt_is_active(pos)) {
+ return pos;
+ }
+ }
+ return NULL;
+}
+
+struct rpc_xprt *enfs_lb_get_singular_xprt(struct rpc_xprt_switch *xps, const struct rpc_xprt *cur)
+{
+ struct rpc_xprt *xprt;
+
+ if (xps == NULL) {
+ return NULL;
+ }
+
+ // disable multipath
+ if (enfs_get_config_multipath_state()) {
+ return enfs_lb_switch_get_main_xprt(xps);
+ }
+
+ if (cur == NULL || xps->xps_nxprts < 2) {
+ xprt = enfs_lb_switch_find_first_active_xprt(xps);
+ if (!xprt)
+ goto main_xprt;
+ return xprt;
+ }
+
+ xprt = enfs_lb_switch_find_singular_entry(xps, cur);
+ if (!xprt) {
+ xprt = enfs_lb_switch_find_first_active_xprt(xps);
+ if (!xprt)
+ goto main_xprt;
+ }
+ return xprt;
+
+main_xprt:
+ return enfs_lb_switch_get_main_xprt(xps);
+}
+
+static struct rpc_xprt *enfs_lb_iter_next_entry_sigular(struct rpc_xprt_iter *xpi)
+{
+ struct rpc_xprt_switch *xps = rcu_dereference(xpi->xpi_xpswitch);
+
+ if (xps == NULL) {
+ return NULL;
+ }
+
+ return enfs_lb_set_cursor_xprt(xps, &xpi->xpi_cursor, enfs_lb_get_singular_xprt);
+}
+
+static void enfs_lb_iter_default_rewind(struct rpc_xprt_iter *xpi)
+{
+ WRITE_ONCE(xpi->xpi_cursor, NULL);
+}
+
+static void enfs_lb_switch_set_roundrobin(struct rpc_clnt *clnt)
+{
+ struct rpc_xprt_switch *xps;
+
+ rcu_read_lock();
+ xps = rcu_dereference(clnt->cl_xpi.xpi_xpswitch);
+ rcu_read_unlock();
+
+ if (xps == NULL || xps->xps_nxprts == 0) {
+ return;
+ }
+
+ if (clnt->cl_vers == 3) {
+ if (READ_ONCE(xps->xps_iter_ops) != &enfs_xprt_iter_roundrobin) {
+ WRITE_ONCE(xps->xps_iter_ops, &enfs_xprt_iter_roundrobin);
+ }
+ return;
+ }
+ if (READ_ONCE(xps->xps_iter_ops) != &enfs_xprt_iter_singular) {
+ WRITE_ONCE(xps->xps_iter_ops, &enfs_xprt_iter_singular);
+ }
+}
+
+static struct rpc_xprt *enfs_lb_switch_find_current(struct list_head *head, const struct rpc_xprt *cur)
+{
+ struct rpc_xprt *pos;
+
+ list_for_each_entry_rcu(pos, head, xprt_switch)
+ {
+ if (cur == pos)
+ return pos;
+ }
+ return NULL;
+}
+
+static struct rpc_xprt *enfs_lb_iter_current_entry(struct rpc_xprt_iter *xpi)
+{
+ struct rpc_xprt_switch *xps = rcu_dereference(xpi->xpi_xpswitch);
+ struct list_head *head;
+
+ if (xps == NULL)
+ return NULL;
+ head = &xps->xps_xprt_list;
+ if (xpi->xpi_cursor == NULL || xps->xps_nxprts < 2) {
+ return enfs_lb_switch_get_main_xprt(xps);
+ }
+ return enfs_lb_switch_find_current(head, xpi->xpi_cursor);
+}
+
+int enfs_lb_set_policy(struct rpc_clnt *clnt, void *data)
+{
+ struct rpc_clnt_reserve *clnt_reserve = (struct rpc_clnt_reserve *)clnt;
+ if (clnt_reserve->cl_enfs == 1) {
+ enfs_lb_switch_set_roundrobin(clnt);
+ }
+
+ return 0;
+}
+
+static const struct rpc_xprt_iter_ops enfs_xprt_iter_roundrobin = {
+ .xpi_rewind = enfs_lb_iter_default_rewind,
+ .xpi_xprt = enfs_lb_iter_current_entry,
+ .xpi_next = enfs_lb_iter_next_entry_roundrobin,
+};
+
+static const struct rpc_xprt_iter_ops enfs_xprt_iter_singular = {
+ .xpi_rewind = enfs_lb_iter_default_rewind,
+ .xpi_xprt = enfs_lb_iter_current_entry,
+ .xpi_next = enfs_lb_iter_next_entry_sigular,
+};
+
+const struct rpc_xprt_iter_ops *enfs_xprt_rr_ops(void)
+{
+ return &enfs_xprt_iter_roundrobin;
+}
+
+const struct rpc_xprt_iter_ops *enfs_xprt_singular_ops(void)
+{
+ return &enfs_xprt_iter_singular;
+}
+
+bool enfs_is_rr_route(struct rpc_clnt *clnt)
+{
+ bool ret = false;
+ struct rpc_xprt_switch *xps;
+
+ rcu_read_lock();
+ xps = rcu_dereference(clnt->cl_xpi.xpi_xpswitch);
+ if (!xps || !xps->xps_iter_ops) {
+ rcu_read_unlock();
+ return ret;
+ }
+ if (xps->xps_iter_ops == &enfs_xprt_iter_roundrobin) {
+ ret = true;
+ }
+ rcu_read_unlock();
+
+ return ret;
+}
+
+bool enfs_is_singularr_route(struct rpc_clnt *clnt)
+{
+ bool ret = false;
+ struct rpc_xprt_switch *xps;
+
+ rcu_read_lock();
+ xps = rcu_dereference(clnt->cl_xpi.xpi_xpswitch);
+ if (!xps || !xps->xps_iter_ops) {
+ rcu_read_unlock();
+ return ret;
+ }
+ if (xps->xps_iter_ops == &enfs_xprt_iter_singular) {
+ ret = true;
+ }
+ rcu_read_unlock();
+
+ return ret;
+}
+
+int enfs_lb_revert_policy(struct rpc_clnt *clnt, void *data)
+{
+ struct rpc_xprt_switch *xps;
+ struct rpc_clnt_reserve *clnt_reserve = (struct rpc_clnt_reserve *)clnt;
+
+ if (clnt_reserve->cl_enfs == 1) {
+ rcu_read_lock();
+ xps = rcu_dereference(clnt->cl_xpi.xpi_xpswitch);
+ rcu_read_unlock();
+ rpc_xprt_switch_set_singular(xps);
+ }
+
+ return 0;
+}
+
+int enfs_lb_init(void)
+{
+ enfs_for_each_rpc_clnt(enfs_lb_set_policy, NULL);
+
+ return 0;
+}
+
+void enfs_lb_exit(void)
+{
+ enfs_for_each_rpc_clnt(enfs_lb_revert_policy, NULL);
+}
\ No newline at end of file
diff --git a/fs/nfs/enfs/multipath/load_balance/shard_route.c b/fs/nfs/enfs/multipath/load_balance/shard_route.c
new file mode 100644
index 0000000..7374093
--- /dev/null
+++ b/fs/nfs/enfs/multipath/load_balance/shard_route.c
@@ -0,0 +1,1989 @@
+/*
+ * Copyright (c) Huawei Technologies Co., Ltd. 2023-2023. All rights reserved.
+ */
+#include
+#include
+#include
+#include
+#include
+#include
+#include
+#include
+#include
+#include
+#include "../../../net/sunrpc/netns.h"
+
+#include "dns_internal.h"
+#include "enfs.h"
+#include "enfs_config.h"
+#include "enfs_log.h"
+#include "enfs_roundrobin.h"
+#include "enfs_tp_common.h"
+#include "exten_call.h"
+#include "pm_state.h"
+#include "shard.h"
+#include "netns.h"
+#include "enfs_adapter.h"
+
+unsigned int enfs_uuid_debug;
+module_param_named(uuid, enfs_uuid_debug, uint, 0600);
+MODULE_PARM_DESC(uuid, "print nfsv3 req uuid debugging mask");
+
+#ifdef ENFS_EULER_5_10
+#else
+#define list_entry_is_head(pos, head, member) (&((pos)->member) == (head))
+#endif
+#define MAX_SHARD_COUNT_TIME 5 // 5 second
+#define FAULT_DETECTED 1
+
+#define SHARD_VIEW_UPDATE_INTERVAL_UNDER_LOCK 10
+#define SECOND_TO_MILLISECOND 1000
+
+/*
+ * Set the shard_should_stop to true so that the work can be quickly returned.
+ */
+static bool shard_should_stop;
+
+// TODO: replace the rwlock with RCU
+struct shard_view_ctrl {
+ rwlock_t view_lock;
+ struct list_head view_list;
+
+ rwlock_t clnt_info_lock;
+ struct list_head clnt_info_list;
+};
+
+static struct shard_view_ctrl *shard_ctrl;
+static struct task_struct *shard_thread;
+static struct workqueue_struct *shard_workq;
+
+struct clnt_debug_cmd {
+ const char *name;
+ void (*fn)(int argc, char *argv[]);
+};
+
+struct view_table {
+ struct list_head next;
+ rwlock_t lock;
+ struct list_head fs_head;
+ struct list_head shard_head;
+ struct list_head lif_head;
+ struct list_head ls_head;
+ uint64_t devId;
+};
+
+struct fs_info {
+ struct list_head next;
+ uint64_t clusterId;
+ uint32_t storagePoolId;
+ uint32_t fsId;
+ uint32_t tenantId;
+};
+
+struct shard_entry {
+ uint64_t lsid;
+ uint32_t vnodeid;
+ uint32_t pnodeid;
+ uint32_t cpuid;
+ uint64_t version;
+};
+
+struct shard_view {
+ struct list_head next;
+ uint64_t clusterId;
+ uint32_t storagePoolId;
+ uint32_t num;
+ struct shard_entry entry[MAX_SHARD_NUMBER_IN_CLUSTER_4FS];
+};
+
+struct lif_info {
+ struct list_head next;
+ char ipAddr[IP_ADDRESS_LEN_MAX];
+ uint32_t workStatus;
+ uint64_t lsId;
+ uint32_t tenantId;
+ uint64_t homeSiteWwn;
+};
+
+struct ls_entry {
+ uint64_t lsVersion;
+ uint32_t lsId;
+};
+
+struct ls_info {
+ struct list_head next;
+ uint32_t num;
+ uint64_t clusterId;
+ struct ls_entry entry[MAX_GLOBAL_CTRL_NODE_NUM];
+};
+
+struct clnt_uuid_info {
+ struct list_head next;
+ struct rpc_clnt *clnt;
+ FILE_UUID root_uuid;
+ bool updateing;
+};
+
+static bool delete_view_table(uint64_t devId);
+static void enfs_delete_fs_info(struct view_table *table, uint32_t fsId);
+static void viewtable_delete_all_shard(struct view_table *table);
+
+int enfs_find_clnt_root(struct rpc_clnt *clnt, FILE_UUID *root_uuid)
+{
+ struct clnt_uuid_info *info;
+
+ read_lock(&shard_ctrl->clnt_info_lock);
+ list_for_each_entry (info, &shard_ctrl->clnt_info_list, next) {
+ if (info->clnt == clnt) {
+ break;
+ }
+ }
+ if (!list_entry_is_head(info, &shard_ctrl->clnt_info_list, next)) {
+ *root_uuid = info->root_uuid;
+ read_unlock(&shard_ctrl->clnt_info_lock);
+ return 0;
+ }
+
+ read_unlock(&shard_ctrl->clnt_info_lock);
+ return -1;
+}
+
+int enfs_insert_clnt_root(struct rpc_clnt *clnt, FILE_UUID *root_uuid)
+{
+ struct clnt_uuid_info *info;
+
+ write_lock(&shard_ctrl->clnt_info_lock);
+ list_for_each_entry (info, &shard_ctrl->clnt_info_list, next) {
+ if (info->clnt == clnt) {
+ break;
+ }
+ }
+
+ if (!list_entry_is_head(info, &shard_ctrl->clnt_info_list, next)) {
+ info->root_uuid = *root_uuid;
+ write_unlock(&shard_ctrl->clnt_info_lock);
+ return 0;
+ }
+
+ info = kmalloc(sizeof(*info), GFP_KERNEL);
+ if (!info) {
+ write_unlock(&shard_ctrl->clnt_info_lock);
+ return -1;
+ }
+
+ info->clnt = clnt;
+ info->root_uuid = *root_uuid;
+ list_add_tail(&info->next, &shard_ctrl->clnt_info_list);
+ info->updateing = false;
+
+ write_unlock(&shard_ctrl->clnt_info_lock);
+ return 0;
+}
+
+int enfs_delete_clnt_shard_cache(struct rpc_clnt *clnt)
+{
+ struct clnt_uuid_info *info;
+ uint64_t devId = 0;
+
+ write_lock(&shard_ctrl->clnt_info_lock);
+ list_for_each_entry (info, &shard_ctrl->clnt_info_list, next) {
+ if (info->clnt == clnt) {
+ devId = GET_DEVID_FROM_UUID(&info->root_uuid);
+ list_del(&info->next);
+ kfree(info);
+ break;
+ }
+ }
+ if (devId == 0) {
+ write_unlock(&shard_ctrl->clnt_info_lock);
+ return 0;
+ }
+
+ list_for_each_entry (info, &shard_ctrl->clnt_info_list, next) {
+ if (devId == GET_DEVID_FROM_UUID(&info->root_uuid)) {
+ devId = 0;
+ break;
+ }
+ }
+
+ if (devId != 0) {
+ write_lock(&shard_ctrl->view_lock);
+ delete_view_table(devId);
+ write_unlock(&shard_ctrl->view_lock);
+ }
+
+ write_unlock(&shard_ctrl->clnt_info_lock);
+ return 0;
+}
+
+static struct view_table *create_view_table(uint64_t devId)
+{
+ struct view_table *table;
+
+ list_for_each_entry (table, &shard_ctrl->view_list, next) {
+ if (table->devId == devId) {
+ break;
+ }
+ }
+
+ if (!list_entry_is_head(table, &shard_ctrl->view_list, next)) {
+ return table;
+ }
+
+ table = kmalloc(sizeof(*table), GFP_KERNEL);
+ if (!table) {
+ return NULL;
+ }
+
+ rwlock_init(&table->lock);
+ INIT_LIST_HEAD(&table->fs_head);
+ INIT_LIST_HEAD(&table->shard_head);
+ INIT_LIST_HEAD(&table->lif_head);
+ INIT_LIST_HEAD(&table->ls_head);
+ table->devId = devId;
+ list_add_tail(&table->next, &shard_ctrl->view_list);
+
+ return table;
+}
+
+static struct view_table *get_view_table(uint64_t devId, bool create)
+{
+ struct view_table *table;
+
+ list_for_each_entry (table, &shard_ctrl->view_list, next) {
+ if (table->devId == devId) {
+ break;
+ }
+ }
+
+ // Note use write_lock when creating a view tabel.
+ if (list_entry_is_head(table, &shard_ctrl->view_list, next)) {
+ if (create) {
+ return create_view_table(devId);
+ }
+ return NULL;
+ }
+ return table;
+}
+
+/*
+ * view_lock need write_lock
+ */
+static bool delete_view_table(uint64_t devId)
+{
+ struct view_table *table;
+
+ list_for_each_entry (table, &shard_ctrl->view_list, next) {
+ if (table->devId == devId) {
+ break;
+ }
+ }
+
+ if (list_entry_is_head(table, &shard_ctrl->view_list, next)) {
+ return false;
+ }
+
+ enfs_delete_fs_info(table, 0);
+ viewtable_delete_all_shard(table);
+ list_del(&table->next);
+ kfree(table);
+ return true;
+}
+
+static struct fs_info *get_fsinfo(struct view_table *table, uint32_t fsId)
+{
+ struct fs_info *info;
+
+ list_for_each_entry (info, &table->fs_head, next) {
+ if (info->fsId == fsId) {
+ break;
+ }
+ }
+
+ if (list_entry_is_head(info, &table->fs_head, next)) {
+ return NULL;
+ }
+
+ return info;
+}
+
+static int get_ls_and_cpu_id(struct view_table *table, uint64_t clusterId,
+ uint32_t storagePoolId, uint32_t shardId, uint64_t *lsid, uint32_t *cpuId)
+{
+ struct shard_view *view;
+
+ list_for_each_entry (view, &table->shard_head, next) {
+ if (view->clusterId == clusterId &&
+ view->storagePoolId == storagePoolId) {
+ break;
+ }
+ }
+
+ if (list_entry_is_head(view, &table->shard_head, next)) {
+ return -1;
+ }
+
+ if (shardId >= view->num) {
+ enfs_log_error("shard id is more than buff size, view(0x%llx:%llu:%u:%u), id:%u.\n",
+ table->devId, view->clusterId, view->storagePoolId, view->num, shardId);
+ return -1;
+ }
+ *lsid = view->entry[shardId].lsid;
+ *cpuId = view->entry[shardId].cpuid;
+
+ return 0;
+}
+
+/**
+ * @return:0 for success,otherwise for failed
+ */
+int enfs_query_lif_info(struct rpc_clnt *clnt, FILE_UUID *file_uuid, uint64_t *lsid, uint32_t *cpuId)
+{
+ int ret;
+ struct view_table *table;
+ struct fs_info *info;
+ uint32_t shardId;
+
+ read_lock(&shard_ctrl->view_lock);
+ table = get_view_table(GET_DEVID_FROM_UUID(file_uuid), false);
+ if (!table) {
+ read_unlock(&shard_ctrl->view_lock);
+ return -1;
+ }
+
+ info = get_fsinfo(table, GET_FSID_FROM_UUID(file_uuid));
+ if (!info) {
+ read_unlock(&shard_ctrl->view_lock);
+ return -1;
+ }
+
+ shardId = get_shardid_from_uuid(file_uuid);
+ ret = get_ls_and_cpu_id(table, info->clusterId, info->storagePoolId, shardId, lsid, cpuId);
+ if (ret) {
+ read_unlock(&shard_ctrl->view_lock);
+ enfs_log_error("get lsid failed.\n");
+ return ret;
+ }
+ read_unlock(&shard_ctrl->view_lock);
+ return ret;
+}
+
+static int update_fs_info(struct view_table *table,
+ FS_SHARD_VIEW *fs_shard_view)
+{
+ struct fs_info *info;
+
+ list_for_each_entry (info, &table->fs_head, next) {
+ if (info->fsId == fs_shard_view->fsId) {
+ break;
+ }
+ }
+
+ if (!list_entry_is_head(info, &table->fs_head, next)) {
+ info->clusterId = fs_shard_view->clusterId;
+ info->storagePoolId = fs_shard_view->storagePoolId;
+ return 0;
+ }
+
+ info = kmalloc(sizeof(*info), GFP_KERNEL);
+ if (!info) {
+ return -ENOMEM;
+ }
+
+ info->fsId = fs_shard_view->fsId;
+ info->clusterId = fs_shard_view->clusterId;
+ info->storagePoolId = fs_shard_view->storagePoolId;
+ list_add_tail(&info->next, &table->fs_head);
+ return 0;
+}
+
+static void copy_shard_entry(struct shard_view *view,
+ FS_SHARD_VIEW *fs_shard_view, int *flag)
+{
+ int i;
+ for (i = 0; i < fs_shard_view->num; i++) {
+ if (view->entry[i].lsid != fs_shard_view->shardView[i].lsid) {
+ *flag = FAULT_DETECTED;
+ }
+ view->entry[i].lsid = fs_shard_view->shardView[i].lsid;
+ view->entry[i].cpuid = fs_shard_view->shardView[i].cpuId;
+ }
+}
+
+static int update_shard_view(struct view_table *table,
+ FS_SHARD_VIEW *fs_shard_view, int *flag)
+{
+ int i;
+ struct shard_view *view;
+
+ list_for_each_entry (view, &table->shard_head, next) {
+ if (view->clusterId == fs_shard_view->clusterId &&
+ view->storagePoolId == fs_shard_view->storagePoolId) {
+ break;
+ }
+ }
+
+ if (!list_entry_is_head(view, &table->shard_head, next)) {
+ copy_shard_entry(view, fs_shard_view, flag);
+ return 0;
+ }
+
+ view = kmalloc(sizeof(*view), GFP_KERNEL);
+ if (!view) {
+ return -ENOMEM;
+ }
+
+ view->clusterId = fs_shard_view->clusterId;
+ view->storagePoolId = fs_shard_view->storagePoolId;
+ view->num = fs_shard_view->num;
+ for (i = 0; i < fs_shard_view->num; i++) {
+ view->entry[i].lsid = fs_shard_view->shardView[i].lsid;
+ view->entry[i].cpuid = fs_shard_view->shardView[i].cpuId;
+ }
+ list_add_tail(&view->next, &table->shard_head);
+ return 0;
+}
+
+int enfs_update_fsshard(uint64_t devId, FS_SHARD_VIEW *fs_shard_view, int *flag)
+{
+ int ret;
+ struct view_table *table;
+
+ write_lock(&shard_ctrl->view_lock);
+ table = get_view_table(devId, true);
+ if (!table) {
+ write_unlock(&shard_ctrl->view_lock);
+ enfs_log_error("get view table failed.\n");
+ return -ENOMEM;
+ }
+
+ ret = update_fs_info(table, fs_shard_view);
+ if (ret) {
+ write_unlock(&shard_ctrl->view_lock);
+ enfs_log_error("update fs info err:%d\n", ret);
+ return ret;
+ }
+
+ ret = update_shard_view(table, fs_shard_view, flag);
+ if (ret) {
+ write_unlock(&shard_ctrl->view_lock);
+ enfs_log_error("update shard view err:%d\n", ret);
+ return ret;
+ }
+
+ write_unlock(&shard_ctrl->view_lock);
+ return 0;
+}
+
+static int find_same_lsid(struct ls_info *info, int size, int target_lsId)
+{
+ int left = 0;
+ int right = size - 1;
+
+ while (left <= right) {
+ int mid = left + (right - left) / 2;
+
+ if (info->entry[mid].lsId == target_lsId) {
+ return mid;
+ } else if (info->entry[mid].lsId < target_lsId) {
+ left = mid + 1;
+ } else {
+ right = mid - 1;
+ }
+ }
+ return -1;
+}
+
+static void copy_ls_entry(struct ls_info *info,
+ EXTEND_GET_LS_VERSION *ls_view, int *flag)
+{
+ int i;
+ int target;
+ for (i = 0; i < ls_view->num; i++) {
+ target = find_same_lsid(info, ls_view->num, ls_view->lsInfo[i].lsId); // 二分查找
+ if (info->entry[target].lsVersion != ls_view->lsInfo[i].lsVersion) {
+ *flag = FAULT_DETECTED;
+ info->entry[target].lsVersion = ls_view->lsInfo[i].lsVersion;
+ }
+ }
+}
+
+static int update_ls_info(struct view_table *table, EXTEND_GET_LS_VERSION *ls_view, int *flag)
+{
+ int i;
+ struct ls_info *info;
+
+ list_for_each_entry (info, &table->ls_head, next) {
+ if (info->clusterId == ls_view->clusterId) {
+ break;
+ }
+ }
+
+ if (!list_entry_is_head(info, &table->ls_head, next)) {
+ copy_ls_entry(info, ls_view, flag);
+ return 0;
+ }
+
+ info = kmalloc(sizeof(*info), GFP_KERNEL);
+ if (!info) {
+ return -ENOMEM;
+ }
+
+ info->clusterId = ls_view->clusterId;
+ info->num = ls_view->num;
+ for (i = 0; i < ls_view->num; i++) {
+ info->entry[i].lsVersion = ls_view->lsInfo[i].lsVersion;
+ info->entry[i].lsId = ls_view->lsInfo[i].lsId;
+ }
+ list_add_tail(&info->next, &table->ls_head);
+ return 0;
+}
+
+int enfs_update_lsinfo(uint64_t devId, EXTEND_GET_LS_VERSION *ls_view, int *flag)
+{
+ int ret;
+ struct view_table *table;
+
+ write_lock(&shard_ctrl->view_lock);
+ table = get_view_table(devId, true);
+ if (!table) {
+ write_unlock(&shard_ctrl->view_lock);
+ enfs_log_error("get view table failed.\n");
+ return -ENOMEM;
+ }
+
+ ret = update_ls_info(table, ls_view, flag);
+ if (ret) {
+ write_unlock(&shard_ctrl->view_lock);
+ enfs_log_error("update ls info err:%d\n", ret);
+ return ret;
+ }
+
+ write_unlock(&shard_ctrl->view_lock);
+ return 0;
+}
+
+int enfs_update_lif_info(uint64_t devId, const char *ipaddr,
+ LIF_PORT_INFO_SINGLE *lif_info)
+{
+ struct view_table *table;
+ struct lif_info *lif;
+
+ if (strlen(ipaddr) >= IP_ADDRESS_LEN_MAX || lif_info->isfound == 1) {
+ return -EINVAL;
+ }
+
+ write_lock(&shard_ctrl->view_lock);
+ table = get_view_table(devId, true);
+ if (!table) {
+ write_unlock(&shard_ctrl->view_lock);
+ enfs_log_error("get view table failed.\n");
+ return -ENOMEM;
+ }
+
+ list_for_each_entry (lif, &table->lif_head, next) {
+ if (strcmp(lif->ipAddr, ipaddr) == 0) {
+ break;
+ }
+ }
+
+ if (!list_entry_is_head(lif, &table->lif_head, next)) {
+ lif->workStatus = lif_info->workStatus;
+ lif->lsId = lif_info->lsId;
+ lif->tenantId = lif_info->tenantId;
+ lif->homeSiteWwn = lif_info->homeSiteWwn;
+ write_unlock(&shard_ctrl->view_lock);
+ return 0;
+ }
+
+ lif = kmalloc(sizeof(*lif), GFP_KERNEL);
+ if (!lif) {
+ write_unlock(&shard_ctrl->view_lock);
+ return -ENOMEM;
+ }
+ strcpy(lif->ipAddr, ipaddr);
+ lif->workStatus = lif_info->workStatus;
+ lif->lsId = lif_info->lsId;
+ lif->tenantId = lif_info->tenantId;
+ lif->homeSiteWwn = lif_info->homeSiteWwn;
+ list_add_tail(&lif->next, &table->lif_head);
+
+ write_unlock(&shard_ctrl->view_lock);
+ return 0;
+}
+
+/*
+ * view_lock need write_lock
+ */
+static void enfs_delete_fs_info(struct view_table *table, uint32_t fsId)
+{
+ struct fs_info *info;
+ struct fs_info *next_ptr;
+
+ list_for_each_entry_safe (info, next_ptr, &table->fs_head, next) {
+ /* storage fsid range 1-65535 */
+ if (info->fsId == 0) {
+ list_del(&info->next);
+ kfree(info);
+ continue;
+ }
+
+ if (info->fsId == fsId) {
+ list_del(&info->next);
+ kfree(info);
+ break;
+ }
+ }
+ return;
+}
+
+static void viewtable_delete_all_shard(struct view_table *table)
+{
+ struct shard_view *view;
+ struct shard_view *next_ptr;
+
+ list_for_each_entry_safe (view, next_ptr, &table->shard_head, next) {
+ list_del(&view->next);
+ kfree(view);
+ }
+ return;
+}
+
+int enfs_delete_shard(uint64_t devId, uint64_t clusterId,
+ uint32_t storagePoolId)
+{
+ struct view_table *table;
+ struct shard_view *view;
+
+ write_lock(&shard_ctrl->view_lock);
+ table = get_view_table(devId, false);
+ if (!table) {
+ write_unlock(&shard_ctrl->view_lock);
+ return 0;
+ }
+ list_for_each_entry (view, &table->shard_head, next) {
+ if (view->clusterId == clusterId &&
+ view->storagePoolId == storagePoolId) {
+ list_del(&view->next);
+ kfree(view);
+ }
+ }
+ write_unlock(&shard_ctrl->view_lock);
+ return 0;
+}
+
+// getattr,fsstat,fsinfo,pathconf
+static const struct nfs_fh *parse_msg_fh(struct rpc_message *msg)
+{
+ struct nfs_fh *fh = msg->rpc_argp;
+ return fh;
+}
+
+static const struct nfs_fh *parse_msg_setattr(struct rpc_message *msg)
+{
+ struct nfs3_sattrargs *args = msg->rpc_argp;
+ return args->fh;
+}
+
+// lookup,rmdir
+static const struct nfs_fh *parse_msg_dirop(struct rpc_message *msg)
+{
+ struct nfs3_diropargs *args = msg->rpc_argp;
+ return args->fh;
+}
+
+static const struct nfs_fh *parse_msg_access(struct rpc_message *msg)
+{
+ struct nfs3_accessargs *args = msg->rpc_argp;
+ return args->fh;
+}
+
+static const struct nfs_fh *parse_msg_readlink(struct rpc_message *msg)
+{
+ struct nfs3_readlinkargs *args = msg->rpc_argp;
+ return args->fh;
+}
+
+static const struct nfs_fh *parse_msg_io(struct rpc_message *msg)
+{
+ struct nfs_pgio_args *args = msg->rpc_argp;
+ return args->fh;
+}
+
+static const struct nfs_fh *parse_msg_create(struct rpc_message *msg)
+{
+ struct nfs3_createargs *args = msg->rpc_argp;
+ return args->fh;
+}
+
+static const struct nfs_fh *parse_msg_mkdir(struct rpc_message *msg)
+{
+ struct nfs3_mkdirargs *args = msg->rpc_argp;
+ return args->fh;
+}
+
+static const struct nfs_fh *parse_msg_symlink(struct rpc_message *msg)
+{
+ struct nfs3_symlinkargs *args = msg->rpc_argp;
+ return args->fromfh;
+}
+
+static const struct nfs_fh *parse_msg_mknode(struct rpc_message *msg)
+{
+ struct nfs3_mknodargs *args = msg->rpc_argp;
+ return args->fh;
+}
+
+static const struct nfs_fh *parse_msg_remove(struct rpc_message *msg)
+{
+ struct nfs_removeargs *args = msg->rpc_argp;
+ return args->fh;
+}
+
+static const struct nfs_fh *parse_msg_rename(struct rpc_message *msg)
+{
+ struct nfs_renameargs *args = msg->rpc_argp;
+ return args->old_dir;
+}
+
+static const struct nfs_fh *parse_msg_link(struct rpc_message *msg)
+{
+ struct nfs3_linkargs *args = msg->rpc_argp;
+ return args->fromfh;
+}
+
+// readdir,readdirplus
+static const struct nfs_fh *parse_msg_readdir(struct rpc_message *msg)
+{
+ struct nfs3_readdirargs *args = msg->rpc_argp;
+ return args->fh;
+}
+
+// readdir,readdirplus
+static const struct nfs_fh *parse_msg_commit(struct rpc_message *msg)
+{
+ struct nfs_commitargs *args = msg->rpc_argp;
+ return args->fh;
+}
+
+struct nfs3_cmd_ops {
+ int cmd;
+ const struct nfs_fh *(*parse_fh)(struct rpc_message *msg);
+ const char *name;
+};
+
+struct nfs3_cmd_ops nfs3_parse_ops[] = {
+ {NFS3PROC_NULL, NULL, "NFS3PROC_NULL"},
+ {NFS3PROC_GETATTR, parse_msg_fh, "NFS3PROC_GETATTR"},
+ {NFS3PROC_SETATTR, parse_msg_setattr, "NFS3PROC_SETATTR"},
+ {NFS3PROC_LOOKUP, parse_msg_dirop, "NFS3PROC_LOOKUP"},
+ {NFS3PROC_ACCESS, parse_msg_access, "NFS3PROC_ACCESS"},
+ {NFS3PROC_READLINK, parse_msg_readlink, "NFS3PROC_READLINK"},
+ {NFS3PROC_READ, parse_msg_io, "NFS3PROC_READ"},
+ {NFS3PROC_WRITE, parse_msg_io, "NFS3PROC_WRITE"},
+ {NFS3PROC_CREATE, parse_msg_create, "NFS3PROC_CREATE"},
+ {NFS3PROC_MKDIR, parse_msg_mkdir, "NFS3PROC_MKDIR"},
+ {NFS3PROC_SYMLINK, parse_msg_symlink, "NFS3PROC_SYMLINK"},
+ {NFS3PROC_MKNOD, parse_msg_mknode, "NFS3PROC_MKNOD"},
+ {NFS3PROC_REMOVE, parse_msg_remove, "NFS3PROC_REMOVE"},
+ {NFS3PROC_RMDIR, parse_msg_dirop, "NFS3PROC_RMDIR"},
+ {NFS3PROC_RENAME, parse_msg_rename, "NFS3PROC_RENAME"},
+ {NFS3PROC_LINK, parse_msg_link, "NFS3PROC_LINK"},
+ {NFS3PROC_READDIR, parse_msg_readdir, "NFS3PROC_READDIR"},
+ {NFS3PROC_READDIRPLUS, parse_msg_readdir, "NFS3PROC_READDIRPLUS"},
+ {NFS3PROC_FSSTAT, parse_msg_fh, "NFS3PROC_FSSTAT"},
+ {NFS3PROC_FSINFO, parse_msg_fh, "NFS3PROC_FSINFO"},
+ {NFS3PROC_PATHCONF, parse_msg_fh, "NFS3PROC_PATHCONF"},
+ {NFS3PROC_COMMIT, parse_msg_commit, "NFS3PROC_COMMIT"},
+};
+int nfs3_parse_ops_size = sizeof(nfs3_parse_ops) / sizeof(struct nfs3_cmd_ops);
+
+struct shard_work {
+ struct work_struct work;
+ struct clnt_uuid_info info;
+ struct rpc_xprt_switch *xps;
+ int isupdate;
+};
+
+static int sockaddr_ip_to_str(struct sockaddr *addr, char *buf, int len)
+{
+ switch (addr->sa_family) {
+ case AF_INET: {
+ struct sockaddr_in *sin = (struct sockaddr_in *)addr;
+ snprintf(buf, len, "%pI4", &sin->sin_addr);
+ return 0;
+ }
+ case AF_INET6: {
+ struct sockaddr_in6 *sin6 = (struct sockaddr_in6 *)addr;
+ snprintf(buf, len, "%pI6c", &sin6->sin6_addr);
+ return 0;
+ }
+ default:
+ break;
+ }
+ return 1;
+}
+
+static void update_server_enfs_capability(struct rpc_clnt *clnt, int flag)
+{
+ int ret = 0;
+ struct nfs_net *nn = NULL;
+ struct net *net;
+ struct nfs_server *pos = NULL;
+ char remoteip[64] = {"*"};
+ char serverip[64] = {"*"};
+
+ rcu_read_lock();
+ for_each_net_rcu (net) {
+ nn = net_generic(net, nfs_net_id);
+ if (nn == NULL) {
+ continue;
+ }
+
+ spin_lock(&nn->nfs_client_lock);
+ list_for_each_entry(pos, &nn->nfs_volume_list, master_link) {
+ if (!pos->nlm_host) {
+ continue;
+ }
+
+ if (!pos->client) {
+ continue;
+ }
+
+ if (pos->nlm_host == NULL) {
+ continue;
+ }
+
+ ret = sockaddr_ip_to_str((struct sockaddr *)&clnt->cl_xprt->addr, remoteip, sizeof(remoteip));
+ if (ret != 0) {
+ enfs_log_error("remoteip to str err:%d.\n", ret);
+ continue;
+ }
+
+ ret = sockaddr_ip_to_str((struct sockaddr *)&pos->client->cl_xprt->addr, serverip, sizeof(serverip));
+ if (ret != 0) {
+ enfs_log_error("remoteip to str err:%d.\n", ret);
+ continue;
+ }
+
+ if (!strcmp(remoteip, serverip)) {
+ pos->nlm_host->enfs_flag |= flag;
+ pos->nlm_host->enfs_flag &= flag;
+ }
+ }
+ spin_unlock(&nn->nfs_client_lock);
+ break;
+ }
+ rcu_read_unlock();
+
+}
+
+static int query_and_update_shard(struct rpc_clnt *clnt, FILE_UUID *file_uuid, struct shard_work *work)
+{
+ int ret;
+ int flag = 0;
+ FS_SHARD_VIEW *fsshard_view = NULL;
+ EXTEND_GET_LS_VERSION *ls_view = NULL;
+
+ ret = dorado_query_fs_shard(clnt, file_uuid, &fsshard_view);
+ if (ret) {
+ enfs_log_error("update shard err:%d.\n", ret);
+ return ret;
+ }
+
+ enfs_update_fsshard(GET_DEVID_FROM_UUID(file_uuid), fsshard_view, &flag);
+ kfree(fsshard_view);
+
+ ret = dorado_query_lsId(clnt, &ls_view);
+ if (ret == -EOPNOTSUPP || ret == -ENOTSUPP) {
+ update_server_enfs_capability(clnt, ENFS_CAPABILITY_LSID_NOTSUPPORT);
+ return 0;
+ }
+
+ if (ret) {
+ enfs_log_error("update lsId err:%d.\n", ret);
+ return ret;
+ }
+
+ update_server_enfs_capability(clnt, ENFS_CAPABILITY_LSID_SUPPORT);
+ enfs_update_lsinfo(GET_DEVID_FROM_UUID(file_uuid), ls_view, &flag);
+ kfree(ls_view);
+
+ if (flag && work) {
+ work->isupdate = true;
+ }
+ return 0;
+}
+
+static void insert_and_update_shard(struct rpc_clnt *clnt, FILE_UUID *file_uuid)
+{
+ FILE_UUID root_uuid;
+
+ if (enfs_find_clnt_root(clnt, &root_uuid) != 0) {
+ enfs_insert_clnt_root(clnt, file_uuid);
+ query_and_update_shard(clnt, file_uuid, NULL);
+ }
+}
+
+void enfs_print_uuid(FILE_UUID *file_uuid)
+{
+ char buf[80]; /* 80 uuid buf */
+ uint8_t *uuid = file_uuid->data;
+
+ if (enfs_uuid_debug == 0) {
+ return;
+ }
+
+ printk(KERN_INFO "dev:%llu fs:%u dtree:%u snap:%u pfid:%llu fid:%llu\n",
+ *(uint64_t *)(uuid + UUID_DEVID_OFFSET),
+ *(uint32_t *)(uuid + UUID_FSID_OFFSET),
+ *(uint32_t *)(uuid + UUID_DTREEID_OFFSET),
+ *(uint32_t *)(uuid + UUID_SNAPID_OFFSET),
+ *(uint64_t *)(uuid + UUID_PFID_OFFSET),
+ *(uint64_t *)(uuid + UUID_FID_OFFSET));
+
+ sprint_uuid(buf, 80, file_uuid);
+ printk(KERN_INFO "UUID:%s\n", buf);
+}
+
+static int get_uuid_from_task(struct rpc_clnt *clnt, struct rpc_task *task,
+ FILE_UUID *file_uuid)
+{
+ // task is one pointer to rpc_task
+ // nfs3_procedure is one pointer to struct rpc_procinfo array
+ // which presents all procedure of nfsv3
+ int i;
+ struct rpc_message *msg = &task->tk_msg;
+ const struct rpc_procinfo *proc = msg->rpc_proc;
+ const struct nfs_fh *fh;
+
+ // iterate through the nfs3_procedure array,
+ // find the same index of factor which is command word
+ int cmd = -1;
+ int nfs3proc_count = 22;
+ for (i = 0; i < nfs3proc_count; i++) {
+ if (proc == &nfs3_procedures[i]) {
+ cmd = i;
+ break;
+ }
+ }
+
+ if (cmd < 0 || cmd >= nfs3proc_count) {
+ return -1;
+ }
+ if (cmd >= nfs3_parse_ops_size || nfs3_parse_ops[cmd].parse_fh == NULL) {
+ return -1;
+ }
+
+ fh = nfs3_parse_ops[cmd].parse_fh(msg);
+ fh_file_uuid(fh, file_uuid);
+
+ /* debug print uuid */
+ enfs_print_uuid(file_uuid);
+
+ if (cmd == NFS3PROC_FSINFO) {
+ insert_and_update_shard(clnt, file_uuid);
+ }
+
+ return 0;
+}
+
+static struct rpc_xprt *choose_less_queue_len(struct rpc_xprt *xport1, struct rpc_xprt *xport2)
+{
+ struct enfs_xprt_context *context1;
+ struct enfs_xprt_context *context2;
+
+ if (xport1 == NULL) {
+ return xport2;
+ }
+ if (xport2 == NULL) {
+ return xport1;
+ }
+ context1 = (struct enfs_xprt_context *)xprt_get_reserve_context(xport1);
+ context2 = (struct enfs_xprt_context *)xprt_get_reserve_context(xport2);
+ if (atomic_long_read(&(context1->queuelen)) > atomic_long_read(&(context2->queuelen))) {
+ return xport2;
+ }
+ return xport1;
+}
+
+static uint64_t get_wwn_from_xps(struct rpc_xprt_switch *xps, uint64_t lsid)
+{
+ struct rpc_xprt *pos;
+ struct enfs_xprt_context *context;
+
+ list_for_each_entry_rcu(pos, &xps->xps_xprt_list, xprt_switch)
+ {
+ context = (struct enfs_xprt_context *)xprt_get_reserve_context(pos);
+ if (context->lsid == lsid) {
+ return context->wwn;
+ }
+ }
+ return 0;
+}
+
+struct route_rule {
+ struct rpc_xprt *xprt;
+ struct rpc_xprt *optimal_xprt;
+ bool (*match)(uint64_t wwn, uint64_t lsid, uint32_t cpuId,
+ struct enfs_xprt_context *context);
+};
+
+bool check_cpuid_invalid(uint32_t cpuId)
+{
+ return cpuId == INVALID_CPU_ID;
+}
+
+static bool match_wwn_cpuId_lsId(uint64_t wwn, uint64_t lsid, uint32_t cpuId,
+ struct enfs_xprt_context *context)
+{
+ if (check_cpuid_invalid(cpuId) || check_cpuid_invalid(context->cpuId)) {
+ return false;
+ }
+ if (enfs_check_config_wwn(context->wwn) &&
+ lsid == context->lsid &&
+ cpuId == context->cpuId) {
+ return true;
+ }
+ return false;
+}
+
+static bool match_cpuId_lsId(uint64_t wwn, uint64_t lsid, uint32_t cpuId,
+ struct enfs_xprt_context *context)
+{
+ if (check_cpuid_invalid(cpuId) || check_cpuid_invalid(context->cpuId)) {
+ return false;
+ }
+ if (lsid == context->lsid && cpuId == context->cpuId) {
+ return true;
+ }
+ return false;
+}
+
+static bool match_wwn_lsid(uint64_t wwn, uint64_t lsid, uint32_t cpuId,
+ struct enfs_xprt_context *context)
+{
+ if (enfs_check_config_wwn(context->wwn) && lsid == context->lsid) {
+ return true;
+ }
+ return false;
+}
+
+/*
+ * In the hypermetro scenario,select the xport of the preferred array.
+ */
+static bool match_wwn(uint64_t wwn, uint64_t lsid, uint32_t cpuId,
+ struct enfs_xprt_context *context)
+{
+ return enfs_check_config_wwn(context->wwn);
+}
+
+/*
+ * Select the xport with the same lsid
+ */
+static bool match_lsid(uint64_t wwn, uint64_t lsid, uint32_t cpuId,
+ struct enfs_xprt_context *context)
+{
+ if (lsid == context->lsid) {
+ return true;
+ }
+ return false;
+}
+
+/*
+ * In the hypermetro scenario,select the xport of the array with
+ * the same lif port.
+ */
+static bool match_same_wwn(uint64_t wwn, uint64_t lsid, uint32_t cpuId,
+ struct enfs_xprt_context *context)
+{
+ if (wwn != 0 && context->wwn == wwn) {
+ return true;
+ }
+ return false;
+}
+
+static bool match_default(uint64_t wwn, uint64_t lsid, uint32_t cpuId,
+ struct enfs_xprt_context *context)
+{
+ return true;
+}
+
+struct rpc_xprt *enfs_choose_shard_xport(struct rpc_xprt_switch *xps,
+ const struct rpc_xprt *cur, uint64_t lsid, struct rpc_clnt *clnt, uint32_t cpuId)
+{
+ uint64_t wwn = 0;
+ struct rpc_xprt *pos = NULL;
+ bool found = false;
+ struct rpc_xprt *prev = NULL;
+ struct enfs_xprt_context *context = NULL;
+ struct rpc_xprt *choose_port = NULL;
+ int i;
+ int nativeLinkStatus = enfs_get_native_link_io_status();
+ struct route_rule rule[] = {
+ {NULL, NULL, match_wwn_cpuId_lsId}, // optimal site && cpu && shard view
+ {NULL, NULL, match_wwn_lsid}, // optimal site && shard view
+ {NULL, NULL, match_wwn}, // optimal site
+ {NULL, NULL, match_cpuId_lsId}, // cpu and shard route
+ {NULL, NULL, match_lsid}, // shard route
+ {NULL, NULL, match_same_wwn}, // select ports at the same site
+ {NULL, NULL, match_default}, // RR
+ };
+ int len = ARRAY_SIZE(rule);
+
+
+ wwn = get_wwn_from_xps(xps, lsid);
+ list_for_each_entry_rcu(pos, &xps->xps_xprt_list, xprt_switch)
+ {
+ context = (struct enfs_xprt_context *)xprt_get_reserve_context(pos);
+ if (context == NULL ||
+ atomic_read(&context->path_state) != PM_STATE_NORMAL) {
+ prev = pos;
+ continue;
+ }
+
+ if (!nativeLinkStatus && context->main) {
+ continue;
+ }
+
+ if (cur == prev) {
+ found = true;
+ }
+
+ for (i = 0; i < len; i++) {
+ if (rule[i].match(wwn, lsid, cpuId, context)) {
+ rule[i].xprt = choose_less_queue_len(rule[i].xprt, pos);
+ break;
+ }
+ }
+
+ if (found == false) {
+ prev = pos;
+ continue;
+ }
+
+ for (i = 0; i < len; i++) {
+ if (rule[i].match(wwn, lsid, cpuId, context)) {
+ rule[i].optimal_xprt =
+ choose_less_queue_len(rule[i].optimal_xprt, pos);
+ break;
+ }
+ }
+ prev = pos;
+ }
+
+ for (i = 0; i < len; i++) {
+ if (rule[i].xprt == NULL) {
+ continue;
+ }
+
+ if (rule[i].optimal_xprt != NULL) {
+ choose_port = rule[i].optimal_xprt;
+ } else {
+ choose_port = rule[i].xprt;
+ }
+ break;
+ }
+
+ return choose_port;
+}
+
+struct rpc_xprt *enfs_get_shard_xport(struct rpc_clnt *clnt, struct rpc_task *task, uint64_t lsid, uint32_t cpuId)
+{
+ struct rpc_xprt *old;
+ struct rpc_xprt *xprt;
+ struct rpc_xprt_switch *xps;
+ struct rpc_xprt_iter *xpi = &clnt->cl_xpi;
+ struct enfs_xprt_context *context;
+
+ rcu_read_lock();
+ xps = rcu_dereference(xpi->xpi_xpswitch);
+ if (xps == NULL) {
+ rcu_read_unlock();
+ return NULL;
+ }
+ old = smp_load_acquire(&xpi->xpi_cursor);
+ xprt = enfs_choose_shard_xport(xps, old, lsid, clnt, cpuId);
+ smp_store_release(&xpi->xpi_cursor, xprt);
+
+ if (task->tk_xprt) {
+ xprt_release(task);
+ rpc_init_task_retry_counters(task);
+ rpc_task_release_transport(task);
+ }
+
+ if (xprt == NULL) {
+ rcu_read_unlock();
+ return NULL;
+ }
+
+ xprt = xprt_get(xprt);
+
+ context = xprt_get_reserve_context(xprt);
+ if (context)
+ atomic_long_inc(&context->queuelen);
+ rcu_read_unlock();
+
+ return xprt;
+}
+
+void shard_set_transport(struct rpc_task *task, struct rpc_clnt *clnt)
+{
+ uint64_t lsid = 0;
+ uint32_t cpuId = 0;
+ int ret;
+ FILE_UUID file_uuid;
+ struct rpc_xprt_switch *xps;
+
+ rcu_read_lock();
+ xps = rcu_dereference(clnt->cl_xpi.xpi_xpswitch);
+ rcu_read_unlock();
+
+ if (clnt->cl_vers != 3 || xps->xps_iter_ops != enfs_xprt_rr_ops()) {
+ return;
+ }
+ memset(&file_uuid, 0, sizeof(FILE_UUID));
+ ret = get_uuid_from_task(clnt, task, &file_uuid);
+ if (ret != 0) {
+ enfs_log_debug("get uuid from task failed.\n");
+ return;
+ }
+
+ if (enfs_get_config_multipath_state() != ENFS_MULTIPATH_ENABLE ||
+ enfs_get_config_loadbalance_mode() != ENFS_LOADBALANCE_SHARDVIEW) {
+ return;
+ }
+
+ ret = get_uuid_from_task(clnt, task, &file_uuid);
+ if (ret != 0) {
+ enfs_log_debug("get uuid failed.\n");
+ return;
+ }
+
+ ret = enfs_query_lif_info(clnt, &file_uuid, &lsid, &cpuId);
+ if (ret != 0 || lsid == 0) {
+ // trigger query shard from storage
+ return;
+ }
+
+ task->tk_xprt = enfs_get_shard_xport(clnt, task, lsid, cpuId);
+ return;
+}
+
+static void debug_show_uuidinfo(int argc, char *argv[])
+{
+ int ret;
+ struct view_table *table;
+ struct fs_info *info;
+ struct shard_view *view;
+ FILE_UUID file_uuid;
+ uint64_t clusterId = 0xffff;
+ uint32_t storagePoolId = 0xffff;
+ uint32_t shardId;
+ file_uuid.dataLen = FILE_UUID_BUFF_LEN;
+
+
+ if (argc != 1) {
+ enfs_log_info("argc number is wrong.\n");
+ return;
+ }
+
+ ret = scan_uuid(argv[0], file_uuid.data, FILE_UUID_BUFF_LEN);
+ if (ret) {
+ enfs_log_info("uuid str is wrong, str:%s.\n", argv[1]);
+ return;
+ }
+
+ printk(KERN_INFO "fsidinfo devId:%llu", GET_DEVID_FROM_UUID(&file_uuid));
+ read_lock(&shard_ctrl->view_lock);
+ list_for_each_entry (table, &shard_ctrl->view_list, next) {
+ if (table->devId != GET_DEVID_FROM_UUID(&file_uuid)) {
+ continue;
+ ;
+ }
+ list_for_each_entry (info, &table->fs_head, next) {
+ if (info->fsId != GET_FSID_FROM_UUID(&file_uuid)) {
+ continue;
+ }
+ clusterId= info->clusterId;
+ storagePoolId = info->storagePoolId;
+ printk(KERN_INFO
+ "fsidinfo fsid:%u clusterId:%llu storagePoolId:%u "
+ "tenantId:%u.\n",
+ GET_FSID_FROM_UUID(&file_uuid), info->clusterId,
+ info->storagePoolId, info->tenantId);
+ break;
+ }
+
+ shardId = get_shardid_from_uuid(&file_uuid);
+ list_for_each_entry (view, &table->shard_head, next) {
+ if (view->clusterId != clusterId ||
+ view->storagePoolId != storagePoolId) {
+ continue;
+ }
+ if (shardId >= view->num) {
+ enfs_log_error("shardNum:%u shardId:%u", view->num, shardId);
+ }
+ printk(KERN_INFO
+ "fsidinfo clusterId:%llu storagePoolId:%u "
+ "shardNum:%u shard:%u lsid:%llu cpuId:%u.\n",
+ view->clusterId, view->storagePoolId, view->num, shardId,
+ view->entry[shardId].lsid, view->entry[shardId].cpuid);
+ break;
+ }
+ break;
+ }
+ read_unlock(&shard_ctrl->view_lock);
+ return;
+}
+
+static void debug_show_fsinfo(int argc, char *argv[])
+{
+ uint32_t fsid;
+ struct view_table *table;
+ struct fs_info *info;
+
+ if (argc != 1) {
+ enfs_log_info("argc number is wrong.\n");
+ return;
+ }
+
+ if (sscanf(argv[0], "%d", &fsid) <= 0) {
+ enfs_log_info("parse cluster id wrong.\n");
+ return;
+ }
+
+ read_lock(&shard_ctrl->view_lock);
+ list_for_each_entry (table, &shard_ctrl->view_list, next) {
+ list_for_each_entry (info, &table->fs_head, next) {
+ if (fsid == info->fsId) {
+ printk(KERN_INFO "fsid(%u) clusterId(%llu) storagePoolId(%u) tenantId(%u).\n",
+ fsid, info->clusterId, info->storagePoolId, info->tenantId);
+ }
+ }
+ }
+ read_unlock(&shard_ctrl->view_lock);
+}
+
+static void debug_show_shardinfo(int argc, char *argv[])
+{
+ struct view_table *table;
+ struct shard_view *view;
+ uint64_t clusterId;
+ uint32_t storagePoolId;
+ uint32_t startIndex;
+ uint32_t count;
+
+ if (argc != 3) {
+ enfs_log_info("argc number is wrong.\n");
+ return;
+ }
+
+ if (sscanf(argv[0], "%llu", &clusterId) <= 0) {
+ enfs_log_info("parse cluster id wrong, %s.\n", argv[0]);
+ return;
+ }
+ if (sscanf(argv[1], "%u", &storagePoolId) <= 0) {
+ enfs_log_info("parse storage pool id wrong, %s.\n", argv[1]);
+ return;
+ }
+ if (sscanf(argv[2], "%u", &startIndex) <= 0) {
+ enfs_log_info("parse shard start id wrong, %s.\n", argv[2]);
+ return;
+ }
+
+ printk(KERN_INFO "clusterId(%llu) storagePoolId(%u) startIndex(%u).\n",
+ clusterId, storagePoolId, startIndex);
+
+ read_lock(&shard_ctrl->view_lock);
+ list_for_each_entry (table, &shard_ctrl->view_list, next) {
+ list_for_each_entry (view, &table->shard_head, next) {
+ if (view->clusterId == clusterId &&
+ view->storagePoolId == storagePoolId) {
+ for (count = 0; count < 100; count++) {
+ if (count + startIndex < view->num) {
+ printk(KERN_INFO "shardid(%d) lsid(0x%llx) vnodeid(%u) cpuId(%u).\n",
+ count + startIndex, view->entry[count + startIndex].lsid,
+ view->entry[count + startIndex].vnodeid, view->entry[count + startIndex].cpuid);
+ }
+ }
+ }
+ }
+ }
+ read_unlock(&shard_ctrl->view_lock);
+}
+
+static int get_ip_to_str(struct sockaddr *addr, char *buf, int len)
+{
+ switch (addr->sa_family) {
+ case AF_INET: {
+ struct sockaddr_in *sin = (struct sockaddr_in *)addr;
+ snprintf(buf, len, "%pI4", &sin->sin_addr);
+ return 0;
+ }
+ case AF_INET6: {
+ struct sockaddr_in6 *sin6 = (struct sockaddr_in6 *)addr;
+ snprintf(buf, len, "%pI6c", &sin6->sin6_addr);
+ return 0;
+ }
+ default:
+ break;
+ }
+ return 1;
+}
+
+static void debug_show_lifinfo(int argc, char *argv[])
+{
+ struct clnt_uuid_info *info;
+ struct rpc_clnt *clnt;
+ struct rpc_xprt_switch *xps;
+ struct rpc_xprt *pos;
+ struct enfs_xprt_context *context;
+ char buf[128];
+
+ if (argc > 1) {
+ enfs_log_info("argc number is wrong.\n");
+ return;
+ }
+
+ read_lock(&shard_ctrl->clnt_info_lock);
+ list_for_each_entry (info, &shard_ctrl->clnt_info_list, next) {
+ clnt = info->clnt;
+ rcu_read_lock();
+ xps = rcu_dereference(clnt->cl_xpi.xpi_xpswitch);
+ list_for_each_entry_rcu(pos, &xps->xps_xprt_list, xprt_switch)
+ {
+ get_ip_to_str((struct sockaddr *)&(pos->addr), buf, sizeof(buf));
+ if (argc == 0 || strcmp(buf, argv[0]) == 0) {
+ context = (struct enfs_xprt_context *)xprt_get_reserve_context(pos);
+ printk(KERN_INFO "ipaddr(%s) lsId(0x%llx) wwn(0x%llx) cpuId(%u).\n",
+ buf, context->lsid, context->wwn, context->cpuId);
+ }
+ }
+ rcu_read_unlock();
+ }
+ read_unlock(&shard_ctrl->clnt_info_lock);
+ return;
+}
+
+static void debug_show_shardview(int argc, char *argv[])
+{
+ struct view_table *table;
+ struct fs_info *info;
+ struct shard_view *view;
+
+ read_lock(&shard_ctrl->view_lock);
+ list_for_each_entry (table, &shard_ctrl->view_list, next) {
+ printk(KERN_INFO "shardivew devid:%llu.\n", table->devId);
+
+ list_for_each_entry (info, &table->fs_head, next) {
+ printk(KERN_INFO
+ "shardview fsid:%u clusterId:%llu storagePoolId:%u "
+ "tenantId:%u.\n",
+ info->fsId, info->clusterId, info->storagePoolId,
+ info->tenantId);
+ }
+
+ list_for_each_entry (view, &table->shard_head, next) {
+ printk(KERN_INFO
+ "shardview clusterId:%llu storagePoolId:%u "
+ "shardNum:%u.\n",
+ view->clusterId, view->storagePoolId, view->num);
+ }
+ }
+ read_unlock(&shard_ctrl->view_lock);
+ return;
+}
+
+static void debug_show_dns_cache(int argc, char *argv[])
+{
+ enfs_debug_print_name_list();
+}
+
+static char *parse_cmd_args(char *str, int *argc, char *argv[10])
+{
+ char *token;
+ int i = 0;
+ char *copy = kstrdup(str, GFP_KERNEL);
+ char *tmp = copy;
+
+ if (!copy) {
+ *argc = 0;
+ return NULL;
+ }
+
+ while ((token = strsep(&tmp, " ")) != NULL) {
+ if (i == 10) {
+ break;
+ }
+ argv[i] = token;
+ i++;
+ }
+ *argc = i;
+
+ return copy;
+}
+
+static void debug_show_linkcount(int argc, char *argv[])
+{
+ printk(KERN_INFO "enfs link count:%d mount count:%d\n",
+ enfs_link_count_num(), enfs_mount_count());
+}
+
+int enfs_debug_match_cmd(char *str, size_t len)
+{
+ int i;
+ int argc;
+ char *argv[10];
+ const struct clnt_debug_cmd cmds[] = {
+ {"uuidinfo", debug_show_uuidinfo},
+ {"fsinfo", debug_show_fsinfo},
+ {"shardinfo", debug_show_shardinfo},
+ {"lifinfo", debug_show_lifinfo},
+ {"shardview", debug_show_shardview},
+ {"linkcount", debug_show_linkcount},
+ {"dnscache", debug_show_dns_cache},
+ };
+
+ char *buf = parse_cmd_args(str, &argc, argv);
+ if (!buf || argc == 0) {
+ enfs_log_info("parse failed.\n");
+ return 0;
+ }
+
+ for (i = 0; i < ARRAY_SIZE(cmds); i++) {
+ if (strcmp(argv[0], cmds[i].name) == 0) {
+ cmds[i].fn(argc - 1, &(argv[1]));
+ break;
+ }
+ }
+ if (i == ARRAY_SIZE(cmds)) {
+ enfs_log_info("not found cmd:%s\n", argv[0]);
+ }
+
+ kfree(buf);
+ return 0;
+}
+
+void enfs_query_xprt_shard(struct rpc_clnt *clnt, struct rpc_xprt *xprt)
+{
+ int ret;
+ uint64_t lsid = 0;
+ uint64_t wwn = 0;
+ uint32_t cpuId = 0;
+ char buf[64];
+ struct enfs_xprt_context *ctx = (struct enfs_xprt_context *)xprt_get_reserve_context(xprt);
+
+ if (clnt->cl_vers != 3) {
+ return;
+ }
+
+ ret = sockaddr_ip_to_str((struct sockaddr *)&xprt->addr, buf, 64);
+ if (ret != 0) {
+ enfs_log_error("ip to str err:%d.\n", ret);
+ return;
+ }
+
+ ret = enfs_query_lifview(clnt, xprt, buf, &lsid, &wwn, &cpuId);
+ if (ret) {
+ return;
+ }
+
+ ctx->lsid = lsid;
+ ctx->wwn = wwn;
+ ctx->cpuId = cpuId;
+ return;
+}
+
+static bool is_valid_ip_address(const char *ip_str)
+{
+ struct in_addr addr4;
+ struct in6_addr addr6;
+
+ if (in4_pton(ip_str, -1, (u8 *)&addr4, '\0', NULL) == 1) {
+ return true;
+ }
+
+ if (in6_pton(ip_str, -1, (u8 *)&addr6, '\0', NULL) == 1) {
+ return true;
+ }
+
+ return false;
+}
+
+static int EnfsChooseNewNlmXprt(struct rpc_clnt *clnt, struct rpc_xprt *xprt,
+ void *data)
+{
+ int ret = 0;
+ char remoteip[64] = {"*"};
+ char localip[64] = {"*"};
+ struct enfs_xprt_context *ctx = NULL;
+ struct enfs_xprt_context *nlm_ctx = NULL;
+ char local_name[INET6_ADDRSTRLEN];
+ const char *local = local_name;
+ struct sockaddr_storage srcaddr;
+ struct rpc_xprt *nlm_xprt = (struct rpc_xprt *)data;
+ if (pm_get_path_state(xprt) != PM_STATE_NORMAL) {
+ return 0;
+ }
+
+ sockaddr_ip_to_str((struct sockaddr *)&xprt->addr, remoteip, sizeof(remoteip));
+ memcpy((struct sockaddr *)&nlm_xprt->addr, (struct sockaddr *)&xprt->addr, sizeof(xprt->addr));
+ strcpy(nlm_xprt->servername, remoteip);
+ strcpy(nlm_xprt->address_strings[RPC_DISPLAY_ADDR], remoteip);
+
+ ctx = (struct enfs_xprt_context *)xprt_get_reserve_context(xprt);
+ if (ctx == NULL) {
+ enfs_log_error("The xprt multipath ctx is not valid.\n");
+ return 0;
+ }
+
+ nlm_ctx = (struct enfs_xprt_context *)xprt_get_reserve_context(nlm_xprt);
+ if (nlm_ctx == NULL) {
+ enfs_log_error("The nlm xprt ctx is not valid.\n");
+ return 0;
+ }
+
+ sockaddr_ip_to_str((struct sockaddr *)&ctx->srcaddr, local_name, sizeof(local_name));
+ if (!is_valid_ip_address(local)) {
+ ret = rpc_localalladdr(xprt, (struct sockaddr *)&srcaddr, sizeof(srcaddr));
+ if (ret != 0) {
+ enfs_log_error("rpc_localalladdr localip err:%d.\n", ret);
+ return 0;
+ }
+ memcpy((struct sockaddr *)&nlm_ctx->srcaddr, (struct sockaddr *)&srcaddr, sizeof(srcaddr));
+ sockaddr_ip_to_str((struct sockaddr *)&srcaddr, localip, sizeof(localip));
+ } else {
+ memcpy((struct sockaddr *)&nlm_ctx->srcaddr, (struct sockaddr *)&ctx->srcaddr, sizeof(ctx->srcaddr));
+ sockaddr_ip_to_str((struct sockaddr *)&ctx->srcaddr, localip, sizeof(localip));
+ }
+
+ return -1;
+}
+
+static int TraverseNlmXprt(struct rpc_clnt *clnt, struct rpc_xprt *xprt,
+ void *data)
+{
+ char remoteip[64] = {"*"};
+ struct nfs_server *server = (struct nfs_server *)data;
+ struct enfs_xprt_context *ctx = NULL;
+ if (pm_get_path_state(xprt) == PM_STATE_FAULT) {
+ rpc_clnt_iterate_for_each_xprt(server->nfs_client->cl_rpcclient, EnfsChooseNewNlmXprt, (void *)xprt);
+
+ sockaddr_ip_to_str((struct sockaddr *)&xprt->addr, remoteip, sizeof(remoteip));
+ strcpy(server->nlm_host->h_name, remoteip);
+ strcpy(server->nlm_host->h_addrbuf, remoteip);
+
+ ctx = (struct enfs_xprt_context *)xprt_get_reserve_context(xprt);
+ if (ctx == NULL) {
+ enfs_log_error("The xprt multipath ctx is not valid.\n");
+ return 0;
+ }
+ memcpy((struct sockaddr *)&server->nlm_host->h_addr, (struct sockaddr *)&xprt->addr, sizeof(xprt->addr));
+ memcpy((struct sockaddr *)&server->nlm_host->h_srcaddr, (struct sockaddr *)&ctx->srcaddr, sizeof(ctx->srcaddr));
+
+ return -1;
+ }
+
+ return 0;
+}
+
+
+static int enfs_recovery_nlm_lock(struct rpc_clnt *clnt)
+{
+ int ret = 0;
+ struct nfs_net *nn = NULL;
+ struct net *net;
+ struct nfs_server *pos = NULL;
+ char remoteip[64] = {"*"};
+ char serverip[64] = {"*"};
+ rcu_read_lock();
+ for_each_net_rcu (net) {
+ nn = net_generic(net, nfs_net_id);
+ if (nn == NULL) {
+ continue;
+ }
+
+ spin_lock(&nn->nfs_client_lock);
+ list_for_each_entry(pos, &nn->nfs_volume_list, master_link) {
+ if (!pos->nlm_host) {
+ continue;
+ }
+
+ if (!pos->client) {
+ continue;
+ }
+
+ if (pos->nlm_host == NULL) {
+ continue;
+ }
+
+ ret = sockaddr_ip_to_str((struct sockaddr *)&clnt->cl_xprt->addr, remoteip, sizeof(remoteip));
+ if (ret != 0) {
+ enfs_log_error("remoteip to str err:%d.\n", ret);
+ continue;
+ }
+
+ ret = sockaddr_ip_to_str((struct sockaddr *)&pos->client->cl_xprt->addr, serverip, sizeof(serverip));
+ if (ret != 0) {
+ enfs_log_error("remoteip to str err:%d.\n", ret);
+ continue;
+ }
+
+ if (!strcmp(remoteip, serverip)) {
+ rpc_clnt_iterate_for_each_xprt(pos->nlm_host->h_rpcclnt, TraverseNlmXprt, (void *)pos);
+
+ nlmclnt_recovery(pos->nlm_host);
+ }
+ }
+ spin_unlock(&nn->nfs_client_lock);
+ break;
+ }
+ rcu_read_unlock();
+ return 0;
+}
+
+static int each_xprt_update_shard(struct rpc_clnt *clnt, struct rpc_xprt *xprt,
+ void *data)
+{
+ if (pm_get_path_state(xprt) == PM_STATE_NORMAL) {
+ enfs_query_xprt_shard(clnt, xprt);
+ }
+ return 0;
+}
+
+static void shard_update_done(struct rpc_clnt *clnt)
+{
+ struct clnt_uuid_info *info;
+
+ write_lock(&shard_ctrl->clnt_info_lock);
+ list_for_each_entry (info, &shard_ctrl->clnt_info_list, next) {
+ if (info->clnt == clnt) {
+ info->updateing = false;
+ break;
+ }
+ }
+ write_unlock(&shard_ctrl->clnt_info_lock);
+}
+
+static void do_shared_update(struct work_struct *work)
+{
+ int error;
+ struct shard_work *shard_work = container_of(work, struct shard_work, work);
+ struct clnt_uuid_info *info = &shard_work->info;
+
+ if (shard_should_stop) {
+ goto stop_work;
+ }
+
+ error = query_and_update_shard(info->clnt, &info->root_uuid, shard_work);
+ if (error) {
+ enfs_log_error("update shard err:%d.\n", error);
+ }
+
+ rpc_clnt_iterate_for_each_xprt(info->clnt, each_xprt_update_shard, NULL);
+
+ if (shard_work->isupdate) {
+ // Actively reassert the nlm lock
+ enfs_recovery_nlm_lock(info->clnt);
+ }
+
+stop_work:
+ rpc_release_client(shard_work->info.clnt);
+ xprt_switch_put(shard_work->xps);
+ shard_update_done(info->clnt);
+ kfree(shard_work);
+}
+
+static int shard_update_work(struct clnt_uuid_info *info,
+ struct list_head *head)
+{
+ struct rpcclnt_release_item *item;
+ struct shard_work *shard_work;
+ bool ok;
+
+ shard_work = kzalloc(sizeof(*shard_work), GFP_KERNEL);
+ if (!shard_work) {
+ return -ENOMEM;
+ }
+
+ item = kzalloc(sizeof(*item), GFP_KERNEL);
+ if (!item) {
+ enfs_log_error("alloc item failed.\n");
+ kfree(shard_work);
+ return -ENOMEM;
+ }
+
+ rcu_read_lock();
+ shard_work->xps =
+ xprt_switch_get(rcu_dereference(info->clnt->cl_xpi.xpi_xpswitch));
+ rcu_read_unlock();
+ if (!shard_work->xps) {
+ kfree(item);
+ kfree(shard_work);
+ return -EAGAIN;
+ }
+
+ INIT_WORK(&shard_work->work, do_shared_update);
+ shard_work->info = *info;
+#ifdef ENFS_OPENEULER_660
+ if (!refcount_inc_not_zero(&shard_work->info.clnt->cl_count)) {
+#else
+ if (!atomic_inc_not_zero(&shard_work->info.clnt->cl_count)) {
+#endif
+ xprt_switch_put(shard_work->xps);
+ kfree(item);
+ kfree(shard_work);
+ return 0;
+ }
+
+ ok = queue_work(shard_workq, &shard_work->work);
+ if (!ok) {
+ item->clnt = shard_work->info.clnt;
+ list_add_tail(&item->node, head);
+ xprt_switch_put(shard_work->xps);
+ kfree(shard_work);
+ return -1;
+ }
+
+ kfree(item);
+ return 0;
+}
+
+static void query_update_all_clnt(void)
+{
+ int ret;
+ struct clnt_uuid_info *info;
+ LIST_HEAD(free_list);
+
+ write_lock(&shard_ctrl->clnt_info_lock);
+ list_for_each_entry (info, &shard_ctrl->clnt_info_list, next) {
+ if (info->updateing) {
+ continue;
+ }
+
+ info->updateing = true;
+ ret = shard_update_work(info, &free_list);
+ if (ret) {
+ enfs_log_error("update all err:%d.\n", ret);
+ info->updateing = false;
+ }
+ }
+ write_unlock(&shard_ctrl->clnt_info_lock);
+
+ enfs_destroy_rpcclnt_list(&free_list);
+}
+
+static bool enfs_need_quick_update_shard(void)
+{
+ bool ret = false;
+ struct nfs_net *nn = NULL;
+ struct net *net;
+ struct nfs_server *pos = NULL;
+
+ rcu_read_lock();
+ for_each_net_rcu (net) {
+ nn = net_generic(net, nfs_net_id);
+ if (nn == NULL) {
+ continue;
+ }
+
+ spin_lock(&nn->nfs_client_lock);
+ list_for_each_entry(pos, &nn->nfs_volume_list, master_link) {
+ if (!pos->nlm_host) {
+ continue;
+ }
+ if (!list_empty(&pos->nlm_host->h_lockowners)) {
+ ret = true;
+ break;
+ }
+ }
+ spin_unlock(&nn->nfs_client_lock);
+ break;
+ }
+ rcu_read_unlock();
+
+ return ret;
+}
+
+static int shard_update_loop(void *data)
+{
+ int32_t interval_ms;
+ ktime_t start = ktime_get();
+
+ while (!kthread_should_stop()) {
+ LVOS_TP_START(QUICK_UPDATE_SHARD, &interval_ms);
+ interval_ms = enfs_need_quick_update_shard() ?
+ (SHARD_VIEW_UPDATE_INTERVAL_UNDER_LOCK * SECOND_TO_MILLISECOND) :
+ (enfs_get_config_shardview_update_interval() * SECOND_TO_MILLISECOND);
+ LVOS_TP_END;
+ if (enfs_timeout_ms(&start, interval_ms) && enfs_get_config_multipath_state() == ENFS_MULTIPATH_ENABLE) {
+ start = ktime_get();
+ query_update_all_clnt();
+ enfs_log_debug("update shard.\n");
+ }
+ enfs_msleep(1000); // 1000 ms.
+ }
+ return 0;
+}
+
+struct shard_view_ctrl *enfs_shard_ctrl_init(void)
+{
+ struct shard_view_ctrl *ctrl;
+ ctrl = enfs_adapter_get_data();
+ if (ctrl) {
+ enfs_log_info("existing shard ctrl is obtained.\n");
+ return ctrl;
+ }
+
+ ctrl = kmalloc(sizeof(*ctrl), GFP_KERNEL);
+ if (!ctrl) {
+ enfs_log_error("shard view cltr alloc failed.\n");
+ return NULL;
+ }
+
+ INIT_LIST_HEAD(&ctrl->view_list);
+ rwlock_init(&ctrl->view_lock);
+ INIT_LIST_HEAD(&ctrl->clnt_info_list);
+ rwlock_init(&ctrl->clnt_info_lock);
+ enfs_adapter_set_data((void *)ctrl);
+ return ctrl;
+}
+
+int enfs_shard_init(void)
+{
+ shard_ctrl = enfs_shard_ctrl_init();
+ if (!shard_ctrl) {
+ enfs_log_error("create shard view cltr failed.\n");
+ return -ENOMEM;
+ }
+
+ shard_workq = create_workqueue("enfs_shard_workqueue");
+ if (!shard_workq) {
+ enfs_log_error("create workqueue failed.\n");
+ return -ENOMEM;
+ }
+
+ shard_should_stop = false;
+
+ shard_thread = kthread_run(shard_update_loop, NULL, "enfs_shard_update");
+ if (IS_ERR(shard_thread)) {
+ enfs_log_error("Failed to create thread shard update.\n");
+ return PTR_ERR(shard_thread);
+ }
+ return 0;
+}
+
+void enfs_shard_exit(void)
+{
+ if (shard_thread) {
+ kthread_stop(shard_thread);
+ }
+
+ shard_should_stop = true;
+
+ if (shard_workq) {
+ flush_workqueue(shard_workq);
+ destroy_workqueue(shard_workq);
+ }
+}
diff --git a/fs/nfs/enfs/multipath/path_mgmt/dns_process.c b/fs/nfs/enfs/multipath/path_mgmt/dns_process.c
new file mode 100644
index 0000000..fa03f1b
--- /dev/null
+++ b/fs/nfs/enfs/multipath/path_mgmt/dns_process.c
@@ -0,0 +1,955 @@
+/*
+ * Copyright (c) Huawei Technologies Co., Ltd. 2023-2023. All rights reserved.
+ */
+
+#include
+#include
+#include
+#include
+#include
+
+#include "../../../fs/nfs/nfs4_fs.h"
+#include "../../../fs/nfs/netns.h"
+#include "dns_internal.h"
+#include "enfs_log.h"
+#include "enfs_multipath.h"
+#include "enfs_multipath_client.h"
+#include "enfs_remount.h"
+#include "enfs_config.h"
+#include "exten_call.h"
+#include "enfs_tp_common.h"
+
+static struct task_struct *dns_thread;
+static struct workqueue_struct *dns_workq; // timer for test xprt workqueue
+
+static LIST_HEAD(dns_cache_list);
+static spinlock_t dns_cache_lock;
+
+static char dns_sort_ip[IP_ADDRESS_LEN_MAX]; // Temporary character string used for sorting.
+
+struct name_list {
+ struct list_head next;
+ char name[MAX_DNS_NAME_LEN];
+ struct nfs_ip_list inet;
+ struct nfs_ip_list inet6;
+
+ /* Add to background list on update. */
+ struct nfs_ip_list inet_bc;
+ struct nfs_ip_list inet6_bc;
+ int ref;
+};
+
+static int sockaddr_ip_to_str(struct sockaddr *addr, char *buf, int len)
+{
+ switch (addr->sa_family) {
+ case AF_INET: {
+ struct sockaddr_in *sin = (struct sockaddr_in *)addr;
+ snprintf(buf, len, "%pI4", &sin->sin_addr);
+ return 0;
+ }
+ case AF_INET6: {
+ struct sockaddr_in6 *sin6 = (struct sockaddr_in6 *)addr;
+ snprintf(buf, len, "%pI6", &sin6->sin6_addr);
+ return 0;
+ }
+ default:
+ break;
+ }
+ return 1;
+}
+
+void enfs_debug_print_name_list(void)
+{
+ int i;
+ struct name_list *list;
+ char buf[128];
+
+ spin_lock(&dns_cache_lock);
+ list_for_each_entry (list, &dns_cache_list, next) {
+ printk(KERN_INFO "domain name:%s\n", list->name);
+ for (i = 0; i < list->inet.count; i++) {
+ sockaddr_ip_to_str((struct sockaddr *)&list->inet.address[i], buf,
+ 128);
+ printk(KERN_INFO " %s\n", buf);
+ }
+ for (i = 0; i < list->inet6.count; i++) {
+ sockaddr_ip_to_str((struct sockaddr *)&list->inet6.address[i], buf,
+ 128);
+ printk(KERN_INFO " %s\n", buf);
+ }
+ }
+ spin_unlock(&dns_cache_lock);
+}
+
+void enfs_update_domain_name(char *name, struct nfs_ip_list *ip_list)
+{
+ struct name_list *name_list;
+ struct sockaddr *addr;
+ int i;
+
+ spin_lock(&dns_cache_lock);
+ list_for_each_entry (name_list, &dns_cache_list, next) {
+ if (strcmp(name, name_list->name) != 0) {
+ continue;
+ }
+
+ for (i = 0; i < ip_list->count; i++) {
+ addr = (struct sockaddr *)&ip_list->address[i];
+ switch (addr->sa_family) {
+ case AF_INET:
+ enfs_insert_ip_list(&name_list->inet,
+ enfs_get_config_link_count_per_mount(),
+ &ip_list->address[i]);
+ break;
+ case AF_INET6:
+ enfs_insert_ip_list(&name_list->inet6,
+ enfs_get_config_link_count_per_mount(),
+ &ip_list->address[i]);
+ break;
+ }
+ }
+ break;
+ }
+
+ spin_unlock(&dns_cache_lock);
+}
+
+/**
+ * Exchange the IP address of the back end to the front end.
+ */
+void enfs_swap_name_cache(void)
+{
+ struct name_list *name_list;
+
+ spin_lock(&dns_cache_lock);
+ list_for_each_entry (name_list, &dns_cache_list, next) {
+ if (name_list->inet_bc.count != 0) {
+ name_list->inet = name_list->inet_bc;
+ name_list->inet_bc.count = 0;
+ }
+
+ if (name_list->inet6_bc.count != 0) {
+ name_list->inet6 = name_list->inet6_bc;
+ name_list->inet6_bc.count = 0;
+ }
+ }
+ spin_unlock(&dns_cache_lock);
+}
+
+void enfs_domain_inc(char *name)
+{
+ struct name_list *name_list;
+
+ spin_lock(&dns_cache_lock);
+ list_for_each_entry (name_list, &dns_cache_list, next) {
+ if (strcmp(name, name_list->name) == 0) {
+ name_list->ref++;
+ break;
+ }
+ }
+
+ if (&name_list->next == &dns_cache_list) { /* is head */
+ name_list = kzalloc(sizeof(*name_list), GFP_KERNEL);
+ if (!name_list) {
+ spin_unlock(&dns_cache_lock);
+ enfs_log_error("alloc failed.\n");
+ return;
+ }
+ /*
+ * The length must be verified in the mount|remount phase
+ * (enfs_valid_dns) .
+ */
+ strcpy(name_list->name, name);
+ name_list->ref = 1;
+ list_add_tail(&name_list->next, &dns_cache_list);
+ }
+
+ spin_unlock(&dns_cache_lock);
+}
+
+bool enfs_ip_list_contain(struct nfs_ip_list *ip_list,
+ struct sockaddr_storage *addr)
+{
+ int i;
+
+ for (i = 0; i < ip_list->count; i++) {
+ if (rpc_cmp_addr((struct sockaddr *)&ip_list->address[i],
+ (struct sockaddr *)addr)) {
+ return true;
+ }
+ }
+
+ return false;
+}
+
+bool enfs_insert_ip_list(struct nfs_ip_list *ip_list, int max,
+ struct sockaddr_storage *addr)
+{
+ int i;
+
+ if (!ip_list || ip_list->count >= max) {
+ return false;
+ }
+
+ for (i = 0; i < ip_list->count; i++) {
+ if (rpc_cmp_addr((struct sockaddr *)&ip_list->address[i],
+ (struct sockaddr *)addr)) {
+ return false;
+ }
+ }
+
+ if (i < max) {
+ ip_list->address[i] = *addr;
+ ip_list->count++;
+ return true;
+ }
+ return false;
+}
+
+void ip_list_append(struct nfs_ip_list *dst, struct nfs_ip_list *src,
+ int *tmp_slot)
+{
+ int i;
+ struct sockaddr_storage *addr;
+
+ for (i = 0; i < src->count && *tmp_slot != 0; i++) {
+ addr = &src->address[i];
+ if (enfs_insert_ip_list(dst, enfs_get_config_link_count_per_mount(), addr)) {
+ (*tmp_slot)--;
+ }
+ }
+}
+
+static int dns_resolver_name_list(char *dns_result, int *tmp_slot,
+ struct nfs_ip_list *ip_list)
+{
+ int error = 0;
+ ssize_t ip_len;
+ char *ip_str;
+ struct sockaddr_storage sa;
+
+ enfs_log_debug(" resolver name list\n");
+ ip_str = strsep(&dns_result, ",");
+ while (ip_str) {
+ if (ip_list->count == enfs_get_config_link_count_per_mount() || *tmp_slot == 0) {
+ error = 0;
+ break;
+ }
+
+ ip_len = rpc_pton(NULL, ip_str, strlen(ip_str), (struct sockaddr *)&sa,
+ sizeof(sa));
+ if (ip_len <= 0) {
+ enfs_log_error("pton name:%s failed.\n", ip_str);
+ error = -ESRCH;
+ break;
+ }
+
+ if (enfs_insert_ip_list(ip_list, enfs_get_config_link_count_per_mount(), &sa)) {
+ (*tmp_slot)--;
+ }
+
+ ip_str = strsep(&dns_result, ",");
+ }
+
+ return error;
+}
+
+/** DNS server maybe return 1 ip everytime,need get multi times,to sure get
+ * different ip.
+ * @slot: distributed when multiple domain names.
+ * @optsions: ipv4 or ipv6
+ */
+static int multi_query_dns(struct nfs_ip_list *ip_list, char *name, int slot,
+ int *tmp_slot, const char *options)
+{
+ int error;
+ char *ip_addr = NULL;
+ int ip_len;
+
+#if (defined(ENFS_EULER_5_10) || defined(ENFS_OPENEULER_660))
+ struct net *net;
+ net = current->nsproxy->net_ns;
+ enfs_log_debug("domain_name:%s option:%s\n", name, options);
+ ip_len = enfs_euler_dns_query(net, NULL, name, strlen(name), options, &ip_addr, NULL, true);
+#else
+ enfs_log_debug("domain_name:%s option:%s\n", name, options);
+ ip_len = enfs_dns_query(NULL, name, strlen(name), options, &ip_addr, NULL);
+#endif
+ if (ip_len <= 0) {
+ enfs_log_info("dns query:%s error.\n", ip_addr);
+ return -ESRCH;
+ }
+
+ /*
+ * Note:
+ * Query domain name list, query only once.
+ * Now,the executable program in user space must return the IP list.
+ */
+ error = dns_resolver_name_list(ip_addr, tmp_slot, ip_list);
+ kfree(ip_addr);
+ return error;
+}
+
+// Only process ipv4 when remotetarget family is ipv4.
+// Both ipv4 and ipv6 when remotetarget family is ipv6.
+static int query_dns_cross_protocol(struct nfs_ip_list *ip_list, char *name,
+ int slot, unsigned short family)
+{
+ int ret;
+ int tmp_slot = slot;
+
+ // allow ipv6 query failed.
+ if (family == AF_INET6) {
+ ret = multi_query_dns(ip_list, name, slot, &tmp_slot, "ipv6 list");
+ if (ret) {
+ enfs_log_error("dns query name:%s type:AAAA err:%d.\n", name, ret);
+ }
+
+ ret = multi_query_dns(ip_list, name, slot, &tmp_slot, "ipv4 list");
+ if (ret) {
+ enfs_log_error("dns query name:%s type:A err:%d.\n", name, ret);
+ }
+ return 0;
+ }
+
+ ret = multi_query_dns(ip_list, name, slot, &tmp_slot, "ipv4 list");
+ if (ret) {
+ enfs_log_error("dns query name:%s type:A err:%d.\n", name, ret);
+ }
+ return ret;
+}
+
+static bool query_domain_name_in_cache(struct nfs_ip_list *ip_list, char *name,
+ int slot, unsigned short family)
+{
+ struct name_list *name_list;
+ int tmp_solt = slot;
+ bool ret = false;
+
+ // 两个域名16,一个域名32
+ // v4仅v4,v6优先v6
+ spin_lock(&dns_cache_lock);
+ list_for_each_entry (name_list, &dns_cache_list, next) {
+ if (strcmp(name_list->name, name) != 0) {
+ continue;
+ }
+
+ if (family == AF_INET6) {
+ ip_list_append(ip_list, &name_list->inet6, &tmp_solt);
+ }
+ ip_list_append(ip_list, &name_list->inet, &tmp_solt);
+
+ if (tmp_solt != slot) {
+ ret = true;
+ }
+ break;
+ }
+ spin_unlock(&dns_cache_lock);
+ return ret;
+}
+
+int enfs_quick_sort(int low, int high, DNS_QUERY_IP_INFO_SINGLE *dnsQueryIpInfo)
+{
+ int i = low;
+ int j = high;
+ uint64_t key = dnsQueryIpInfo[i].lsId;
+ strcpy(dns_sort_ip, dnsQueryIpInfo[i].ipAddr);
+
+ while (i < j) {
+ while (i < j && dnsQueryIpInfo[j].lsId >= key) {
+ j--;
+ }
+ dnsQueryIpInfo[i].lsId = dnsQueryIpInfo[j].lsId;
+ strcpy(dnsQueryIpInfo[i].ipAddr, dnsQueryIpInfo[j].ipAddr);
+
+ while (i < j && dnsQueryIpInfo[i].lsId <= key) {
+ i++;
+ }
+ dnsQueryIpInfo[j].lsId = dnsQueryIpInfo[i].lsId;
+ strcpy(dnsQueryIpInfo[j].ipAddr, dnsQueryIpInfo[i].ipAddr);
+ }
+ dnsQueryIpInfo[i].lsId = key;
+ strcpy(dnsQueryIpInfo[i].ipAddr, dns_sort_ip);
+ if (i - 1 > low) {
+ enfs_quick_sort(low, i - 1, dnsQueryIpInfo);
+ }
+
+ if (i + 1 < high) {
+ enfs_quick_sort(i + 1, high, dnsQueryIpInfo);
+ }
+ memset(dns_sort_ip, 0, sizeof(dns_sort_ip));
+
+ return 0;
+}
+
+int enfs_dns_process_ip(DNS_QUERY_IP_INFO_SINGLE *dnsQueryIpInfo, DNS_QUERY_LSID_INFO **dnsQueryLsidInfo,
+ int *lsidCount, int ipNumber)
+{
+ int i;
+ int index = 0;
+ int count = 1;
+ DNS_QUERY_LSID_INFO *lsIdInfo = NULL;
+
+ // sort all ip by lsid
+ enfs_quick_sort(0, ipNumber - 1, dnsQueryIpInfo);
+ for (i = 1; i < ipNumber; i++) {
+ if (dnsQueryIpInfo[i - 1].lsId != dnsQueryIpInfo[i].lsId) {
+ count++;
+ }
+ }
+
+ *lsidCount = count;
+ // combine ip by lsis,while ip's lsid is same,all of them combined to one structure
+ lsIdInfo = (DNS_QUERY_LSID_INFO *)kmalloc(count * sizeof(DNS_QUERY_LSID_INFO), GFP_KERNEL);
+ if (lsIdInfo == NULL) {
+ printk(KERN_ERR "%s:kmalloc failed.\n", __FUNCTION__);
+ return -ENOMEM;
+ }
+
+ for (i = 0; i < ipNumber; i++) {
+ if (i != 0 && dnsQueryIpInfo[i - 1].lsId == dnsQueryIpInfo[i].lsId) {
+ lsIdInfo[index].count++;
+ continue;
+ }
+ if (i != 0) {
+ index++;
+ }
+ lsIdInfo[index].lsId = dnsQueryIpInfo[i].lsId;
+ lsIdInfo[index].offset = 0;
+ lsIdInfo[index].count = 0;
+ lsIdInfo[index].count++;
+ }
+
+ *dnsQueryLsidInfo = lsIdInfo;
+ return 0;
+}
+
+int enfs_server_query_dns(struct rpc_clnt *clnt, NFS_ROUTE_DNS_INFO_S *dns_info, struct nfs_ip_list *ipList,
+ int slot, uint32_t ip_type, uint32_t dnsNamecount, char *dnsName)
+{
+ int ret;
+ int i = 0;
+ int offset = 0;
+ int tmpSlot = slot;
+ DNS_QUERY_LSID_INFO *dnsQueryLsidInfo = NULL;
+ DNS_QUERY_IP_INFO_SINGLE *dnsQueryIpInfo = NULL;
+ // malloc 最大節點數256
+ int ipNumber;
+ int lsidCount;
+ ret = dorado_query_dns(clnt, &dnsQueryIpInfo, ip_type, dnsNamecount, dnsName, &ipNumber);
+ if (ret) {
+ return ret;
+ }
+
+ if (dnsQueryIpInfo == NULL) {
+ return ret;
+ }
+
+ ret = enfs_dns_process_ip(dnsQueryIpInfo, &dnsQueryLsidInfo, &lsidCount, ipNumber);
+ if (ret) {
+ return ret;
+ }
+
+ i = 0;
+ while (ipList->count < (enfs_get_config_link_count_per_mount() < ipNumber ?
+ enfs_get_config_link_count_per_mount() : ipNumber)) {
+ if (dnsQueryLsidInfo[i].offset < dnsQueryLsidInfo[i].count) {
+ if (i != 0) {
+ offset += dnsQueryLsidInfo[i - 1].count;
+ }
+ ret = dns_resolver_name_list(dnsQueryIpInfo[offset + dnsQueryLsidInfo[i].offset].ipAddr,
+ &tmpSlot, ipList);
+ if (ret) {
+ goto out;
+ }
+ dnsQueryLsidInfo[i].offset++;
+ i++;
+ } else {
+ i++;
+ }
+ if (i == lsidCount) {
+ offset = 0;
+ i = 0;
+ }
+ }
+
+out:
+ kfree(dnsQueryIpInfo);
+ kfree(dnsQueryLsidInfo);
+ return ret;
+}
+
+void query_dns_each_name(NFS_ROUTE_DNS_INFO_S *dns_info, int slot,
+ struct nfs_ip_list *ipList, unsigned short family, bool use_cache)
+{
+ int ret;
+ int i;
+ char *dnsName = NULL;
+ for (i = 0; i < dns_info->dnsNameCount; i++) {
+ dnsName = dns_info->routeRemoteDnsList[i].dnsname;
+ enfs_log_info("query DNS:%s\n", dnsName);
+
+ if (use_cache &&
+ query_domain_name_in_cache(ipList, dnsName, slot, family)) {
+ enfs_log_debug("cache name:%s.\n", dnsName);
+ continue;
+ }
+ ret = query_dns_cross_protocol(ipList, dnsName, slot, family);
+ if (ret != 0) {
+ enfs_log_error("dns multi query dns failed.\n");
+ } else {
+ enfs_update_domain_name(dnsName, ipList);
+ }
+ }
+}
+
+int multipath_query_dns(struct multipath_mount_options *opt,
+ unsigned short family, bool use_cache, struct rpc_clnt *clnt)
+{
+ int ret;
+ int i;
+ int slot = 0;
+ NFS_ROUTE_DNS_INFO_S *dns_info;
+ char *dnsName = NULL;
+ struct nfs_ip_list *ip_list;
+ uint32_t ip_type = 0;
+
+ if (!opt->pRemoteDnsInfo || opt->pRemoteDnsInfo->dnsNameCount <= 0 ||
+ opt->pRemoteDnsInfo->dnsNameCount > MAX_DNS_SUPPORTED) {
+ return -EINVAL;
+ }
+
+ ip_list = kmalloc(sizeof(*ip_list), GFP_KERNEL);
+ if (!ip_list) {
+ return -ENOMEM;
+ }
+ ip_list->count = 0;
+ dns_info = opt->pRemoteDnsInfo;
+ dnsName = (char *)kmalloc(dns_info->dnsNameCount * EXTEND_MAX_DNS_NAME_LEN, GFP_KERNEL);
+ if (!dnsName) {
+ kfree(ip_list);
+ return -ENOMEM;
+ }
+
+ if (clnt) {
+ if (family == AF_INET6) {
+ ip_type = IP_TYPE_BOTH;
+ }
+ for (i = 0; i < dns_info->dnsNameCount; i++) {
+ sprintf(dnsName + i * EXTEND_MAX_DNS_NAME_LEN, "%s", dns_info->routeRemoteDnsList[i].dnsname);
+ }
+
+ slot = enfs_get_config_link_count_per_mount() / dns_info->dnsNameCount;
+ ret = enfs_server_query_dns(clnt, dns_info, ip_list, enfs_get_config_link_count_per_mount(),
+ ip_type, dns_info->dnsNameCount, dnsName);
+ if (ret != 0) {
+ query_dns_each_name(dns_info, slot, ip_list, family, use_cache);
+ }
+ } else {
+ query_dns_each_name(dns_info, slot, ip_list, family, use_cache);
+ }
+
+ kfree(dnsName);
+ if (ip_list->count == 0) {
+ enfs_log_error("query dns failed, no IP is found.\n");
+ kfree(ip_list);
+ return -ESRCH;
+ }
+
+ memcpy(opt->remote_ip_list, ip_list, sizeof(struct nfs_ip_list));
+ kfree(ip_list);
+ return 0;
+}
+
+int enfs_for_each_nfs_clnt(int (*fn)(struct nfs_client *clp, void *data),
+ void *data)
+{
+ struct net *net;
+ struct nfs_net *nn;
+ struct nfs_client *clp;
+ int ret = 0;
+
+ rcu_read_lock();
+ for_each_net_rcu (net) {
+ nn = net_generic(net, nfs_net_id);
+ if (nn == NULL) {
+ continue;
+ }
+
+ if (list_empty(&nn->nfs_client_list)) {
+ continue;
+ }
+ spin_lock(&nn->nfs_client_lock);
+ list_for_each_entry (clp, &nn->nfs_client_list, cl_share_link) {
+ if (!clp->cl_multipath_data)
+ continue;
+
+ ret = fn(clp, data);
+ if (ret != 0) {
+ break;
+ }
+ }
+ spin_unlock(&nn->nfs_client_lock);
+ break;
+ }
+ rcu_read_unlock();
+ return ret;
+}
+
+void enfs_add_domain_name(struct multipath_mount_options *opt)
+{
+ int i;
+
+ if (!opt->pRemoteDnsInfo || opt->pRemoteDnsInfo->dnsNameCount == 0) {
+ return;
+ }
+
+ for (i = 0; i < MAX_DNS_SUPPORTED && i < opt->pRemoteDnsInfo->dnsNameCount;
+ i++) {
+ enfs_domain_inc(opt->pRemoteDnsInfo->routeRemoteDnsList[i].dnsname);
+ }
+}
+
+static int collect_clnt_name(struct nfs_client *clp, void *data)
+{
+ int i;
+ struct multipath_client_info *clp_info = clp->cl_multipath_data;
+
+ for (i = 0;
+ i < MAX_DNS_SUPPORTED && i < clp_info->pRemoteDnsInfo->dnsNameCount;
+ i++) {
+ enfs_domain_inc(
+ clp_info->pRemoteDnsInfo->routeRemoteDnsList[i].dnsname);
+ }
+ return 0;
+}
+
+static int enfs_collect_all_domain_name(void)
+{
+ struct name_list *ls;
+ struct name_list *ls_next;
+
+ /* 1. clear domain name ref */
+ spin_lock(&dns_cache_lock);
+ list_for_each_entry (ls, &dns_cache_list, next) {
+ ls->ref = 0;
+ }
+ spin_unlock(&dns_cache_lock);
+
+ /* 2. add refrence*/
+ enfs_for_each_nfs_clnt(collect_clnt_name, NULL);
+
+ /* 3. release 0 refrence name*/
+ spin_lock(&dns_cache_lock);
+ list_for_each_entry_safe (ls, ls_next, &dns_cache_list, next) {
+ if (ls->ref == 0) {
+ list_del(&ls->next);
+ kfree(ls);
+ }
+ }
+ spin_unlock(&dns_cache_lock);
+ return 0;
+}
+
+void enfs_domain_for_each(int (*func)(struct name_list *, void *), void *data)
+{
+ struct name_list *name_list;
+ struct name_list *next_list;
+ int ret;
+
+ spin_lock(&dns_cache_lock);
+ list_for_each_entry_safe (name_list, next_list, &dns_cache_list, next) {
+ ret = func(name_list, data);
+ if (ret) {
+ break;
+ }
+ }
+ spin_unlock(&dns_cache_lock);
+}
+
+struct query_name_work {
+ struct work_struct work;
+ char name[MAX_DNS_NAME_LEN];
+ struct nfs_ip_list ip_list;
+};
+
+static void do_dns_update_new(struct work_struct *work)
+{
+ int error;
+ struct multipath_mount_options opt;
+ NFS_ROUTE_DNS_INFO_S dns_info;
+ struct query_name_work *query_work =
+ container_of(work, struct query_name_work, work);
+
+ dns_info.dnsNameCount = 1;
+ strcpy(dns_info.routeRemoteDnsList[0].dnsname, query_work->name);
+ opt.remote_ip_list = &query_work->ip_list;
+ opt.pRemoteDnsInfo = &dns_info;
+
+ error = multipath_query_dns(&opt, AF_INET, false, NULL);
+ if (error != 0) {
+ enfs_log_info("Scheduled update dns err:%d.\n", error);
+ } else {
+ enfs_update_domain_name(query_work->name, &query_work->ip_list);
+ }
+
+ error = multipath_query_dns(&opt, AF_INET6, false, NULL);
+ if (error != 0) {
+ enfs_log_info("Scheduled update dns err:%d.\n", error);
+ } else {
+ enfs_update_domain_name(query_work->name, &query_work->ip_list);
+ }
+
+ kfree(query_work);
+}
+
+static int domain_name_update(struct name_list *name_list, void *data)
+{
+ bool ok;
+ struct query_name_work *query_work;
+
+ query_work = kmalloc(sizeof(*query_work), GFP_KERNEL);
+ if (!query_work) {
+ enfs_log_error("alloc failed.\n");
+ return 0;
+ }
+
+ INIT_WORK(&query_work->work, do_dns_update_new);
+ strcpy(query_work->name, name_list->name);
+ memset(&query_work->ip_list, 0, sizeof(struct nfs_ip_list));
+
+ ok = queue_work(dns_workq, &query_work->work);
+ if (!ok) {
+ kfree(query_work);
+ enfs_log_info("queue work failed\n");
+ }
+ return 0;
+}
+
+struct dns_work {
+ struct work_struct wk_work;
+ struct nfs_client *clp;
+ struct multipath_client_info *clp_info;
+ struct rpc_clnt *clRpcclient;
+ struct sockaddr_storage ss;
+ bool query_ok;
+};
+
+static int find_and_remount(struct nfs_client *clp, void *data)
+{
+ int error;
+ struct dns_work *work = data;
+ struct multipath_mount_options opt;
+ struct multipath_client_info *clp_info = work->clp_info;
+ struct multipath_client_info *info = clp->cl_multipath_data;
+
+ if (clp != work->clp) {
+ return 0;
+ }
+
+ if (!work->query_ok) {
+ goto do_err;
+ }
+
+ opt.remote_ip_list = clp_info->remote_ip_list;
+ opt.local_ip_list = clp_info->local_ip_list;
+ opt.pRemoteDnsInfo = clp_info->pRemoteDnsInfo;
+ error = enfs_remount_iplist(clp, &opt);
+ if (error != 0) {
+ enfs_log_info("Scheduled remount err:%d.\n", error);
+ }
+
+do_err:
+ /* set domain name update flag in nfs client list spin_lock */
+ info->updating_domain = 0;
+ return 1;
+}
+
+static void do_dns_update(struct work_struct *work)
+{
+ int error;
+ struct multipath_mount_options opt;
+ struct dns_work *wk = container_of(work, struct dns_work, wk_work);
+ struct multipath_client_info *clp_info = wk->clp_info;
+
+ opt.remote_ip_list = clp_info->remote_ip_list;
+ opt.local_ip_list = clp_info->local_ip_list;
+ opt.pRemoteDnsInfo = clp_info->pRemoteDnsInfo;
+ error = multipath_query_dns(&opt, wk->ss.ss_family, true, wk->clRpcclient);
+ if (error != 0) {
+ enfs_log_info("Scheduled update dns err:%d.\n", error);
+ wk->query_ok = false;
+ }
+
+ find_and_remount(wk->clp, wk);
+
+ enfs_free_nfsclient_info(wk->clp_info);
+ nfs_put_client(wk->clp);
+ //rpc_release_client(wk->clRpcclient);
+ kfree(wk);
+ wk = NULL;
+}
+
+static int dns_update_work(struct nfs_client *clp, void *data)
+{
+ bool ok;
+ int error;
+ struct dns_work *wk;
+ struct multipath_client_info *clp_info = clp->cl_multipath_data;
+ struct list_head *list = (struct list_head *)data;
+ struct clnt_release_item *item;
+
+ wk = kzalloc(sizeof(*wk), GFP_KERNEL);
+ if (!wk) {
+ return -ENOMEM;
+ }
+
+ error = enfs_alloc_nfsclient_info(&wk->clp_info);
+ if (error) {
+ kfree(wk);
+ return -ENOMEM;
+ }
+
+ item = kzalloc(sizeof(*item), GFP_KERNEL);
+ if (!item) {
+ enfs_free_nfsclient_info(wk->clp_info);
+ kfree(wk);
+ return -ENOMEM;
+ }
+ /* set domain name update flag */
+ clp_info->updating_domain = 1;
+
+ *wk->clp_info->remote_ip_list = *clp_info->remote_ip_list;
+ *wk->clp_info->local_ip_list = *clp_info->local_ip_list;
+ *wk->clp_info->pRemoteDnsInfo = *clp_info->pRemoteDnsInfo;
+#ifdef ENFS_OPENEULER_660
+ if (!refcount_inc_not_zero(&clp->cl_rpcclient->cl_count)) {
+#else
+ if (!atomic_inc_not_zero(&clp->cl_rpcclient->cl_count)) {
+#endif
+ enfs_free_nfsclient_info(wk->clp_info);
+ kfree(wk);
+ kfree(item);
+ }
+ nfsclient_refinc(&clp->cl_count);
+ wk->clp = clp;
+ wk->clRpcclient = clp->cl_rpcclient;
+ INIT_WORK(&wk->wk_work, do_dns_update);
+ rpc_peeraddr(clp->cl_rpcclient, (struct sockaddr *)&wk->ss, sizeof(struct sockaddr_storage));
+ wk->query_ok = true;
+
+ ok = queue_work(dns_workq, &wk->wk_work);
+ if (!ok) {
+ clp_info->updating_domain = 0;
+ enfs_free_nfsclient_info(wk->clp_info);
+ item->clnt = wk->clRpcclient;
+ item->client = wk->clp;
+ list_add_tail(&item->node, list);
+ kfree(wk);
+ return -1;
+ }
+ kfree(item);
+ return 0;
+}
+
+static int requery_clnt_dns(struct nfs_client *clp, void *data)
+{
+ int error;
+ struct multipath_client_info *clp_info = clp->cl_multipath_data;
+ if (kthread_should_stop()) {
+ return 1;
+ }
+
+ if (!clp_info || !clp->cl_rpcclient || clp_info->fill_local ||
+ clp_info->updating_domain) {
+ return 0;
+ }
+
+ if (clp->cl_cons_state > NFS_CS_READY) {
+ enfs_log_info("client not ready.\n");
+ return 0;
+ }
+
+ if (!clp_info->pRemoteDnsInfo ||
+ clp_info->pRemoteDnsInfo->dnsNameCount == 0) {
+ return 0;
+ }
+
+ error = dns_update_work(clp, data);
+ if (error) {
+ enfs_log_info("dns update queue err:%d\n", error);
+ }
+ return 0;
+}
+
+static int dns_update_loop(void *data)
+{
+ int32_t interval_ms;
+ ktime_t start = ktime_get();
+ const int query_times = 5; /* Update after multiple queries */
+ int times = 0;
+ LIST_HEAD(free_list);
+
+ while (!kthread_should_stop()) {
+ /*
+ * Ensure the domain name is queried more
+ * than 5 times before being remount.
+ */
+ interval_ms = enfs_get_config_dns_update_interval() * 60 * 1000 / 5;
+ if (interval_ms != 0 && enfs_timeout_ms(&start, interval_ms) && enfs_get_config_multipath_state() == ENFS_MULTIPATH_ENABLE) {
+ start = ktime_get();
+ enfs_collect_all_domain_name();
+ enfs_domain_for_each(domain_name_update, NULL);
+
+ if (times == query_times) {
+ enfs_swap_name_cache();
+ enfs_for_each_nfs_clnt(requery_clnt_dns, &free_list);
+ enfs_destroy_clnt_list(&free_list);
+ enfs_log_debug("update DNS.");
+ times = 0;
+ }
+ times++;
+ }
+ enfs_msleep(1000);
+ }
+ return 0;
+}
+
+int enfs_dns_init(void)
+{
+ int ret;
+
+ spin_lock_init(&dns_cache_lock);
+ ret = init_dns_resolver();
+ if (ret != 0) {
+ enfs_log_error("Init dns resolver err:%d.\n", ret);
+ return ret;
+ }
+
+ dns_workq = create_workqueue("enfs_dns_workqueue");
+ if (!dns_workq) {
+ enfs_log_error("create workqueue failed.\n");
+ return -ENOMEM;
+ }
+
+ dns_thread = kthread_run(dns_update_loop, NULL, "enfs_dns_update");
+ if (IS_ERR(dns_thread)) {
+ enfs_log_error("Failed to create thread enfs_dns_update.\n");
+ return PTR_ERR(dns_thread);
+ }
+ return 0;
+}
+
+void enfs_dns_exit(void)
+{
+ if (dns_thread) {
+ kthread_stop(dns_thread);
+ };
+
+ if (dns_workq) {
+ flush_workqueue(dns_workq);
+ destroy_workqueue(dns_workq);
+ }
+
+ exit_dns_resolver();
+}
diff --git a/fs/nfs/enfs/multipath/path_mgmt/enfs_multipath.c b/fs/nfs/enfs/multipath/path_mgmt/enfs_multipath.c
new file mode 100644
index 0000000..0f5a53d
--- /dev/null
+++ b/fs/nfs/enfs/multipath/path_mgmt/enfs_multipath.c
@@ -0,0 +1,1080 @@
+#include "enfs_multipath.h"
+#include
+#include
+#include
+#include
+#include
+#include
+#include
+#include
+#include
+#include
+#include
+#include
+#include
+#include
+#include
+#include
+#include
+#include
+#include
+#include
+#include
+#include
+#include
+#include
+#include
+#include
+#include
+
+#include "enfs_config.h"
+#include "enfs_log.h"
+#include "enfs_multipath_parse.h"
+#include "enfs_path.h"
+#include "enfs_proc.h"
+#include "enfs_remount.h"
+#include "enfs_roundrobin.h"
+#include "enfs_tp_common.h"
+#include "failover_path.h"
+#include "failover_time.h"
+#include "pm_ping.h"
+#include "pm_state.h"
+#include "enfs_multipath.h"
+#include "shard.h"
+#include "exten_call.h"
+#include "enfs_rpc_proc.h"
+#include "dns_internal.h"
+
+struct xprt_attach_callback_data {
+ atomic_t *conditon;
+ wait_queue_head_t *waitq;
+};
+
+struct xprt_attach_info {
+ struct sockaddr_storage *localAddress;
+ struct sockaddr_storage *remoteAddress;
+ struct rpc_xprt *xprt;
+ struct xprt_attach_callback_data *data;
+ int protocol;
+};
+
+static DECLARE_WAIT_QUEUE_HEAD(path_attach_wait_queue);
+static spinlock_t link_count_lock;
+static int link_count = 0;
+static spinlock_t mount_count_lock;
+static int mount_count = 0;
+
+bool enfs_link_count_add(int num)
+{
+ bool ret = false;
+ spin_lock(&link_count_lock);
+ if (num < 0) {
+ link_count += num;
+ ret = true;
+ spin_unlock(&link_count_lock);
+ return ret;
+ }
+ if (link_count <= enfs_get_config_link_count_total() - num) {
+ link_count += num;
+ ret = true;
+ }
+ spin_unlock(&link_count_lock);
+
+ return ret;
+}
+
+int enfs_link_count_num(void)
+{
+ int num;
+ spin_lock(&link_count_lock);
+ num = link_count;
+ spin_unlock(&link_count_lock);
+ return num;
+}
+
+void enfs_clnt_get_linkcap(struct rpc_clnt *clnt)
+{
+ unsigned int nxprts = 0;
+ struct rpc_xprt_switch *xps;
+ rcu_read_lock();
+ xps = rcu_dereference(clnt->cl_xpi.xpi_xpswitch);
+ if (xps) {
+ nxprts = xps->xps_nxprts;
+ }
+ rcu_read_unlock();
+ enfs_link_count_add(nxprts);
+ enfs_mount_count_add(1);
+}
+
+void enfs_clnt_release_linkcap(struct rpc_clnt *clnt)
+{
+ unsigned int nxprts = 0;
+ struct rpc_xprt_switch *xps;
+ rcu_read_lock();
+ xps = rcu_dereference(clnt->cl_xpi.xpi_xpswitch);
+ if (xps) {
+ nxprts = xps->xps_nxprts;
+ }
+ rcu_read_unlock();
+ enfs_link_count_add(-nxprts);
+ enfs_mount_count_add(-1);
+}
+
+bool enfs_mount_count_add(int num)
+{
+ bool ret = false;
+ spin_lock(&mount_count_lock);
+ if (mount_count <= ENFS_MAX_MOUNT_COUNT - num) {
+ mount_count += num;
+ ret = true;
+ }
+ spin_unlock(&mount_count_lock);
+
+ return ret;
+}
+
+int enfs_mount_count(void)
+{
+ int num;
+ spin_lock(&mount_count_lock);
+ num = mount_count;
+ spin_unlock(&mount_count_lock);
+ return num;
+}
+
+void enfs_destroy_rpcclnt_list(struct list_head *head)
+{
+ struct rpcclnt_release_item *item;
+
+ while (!list_empty(head)) {
+ item = list_entry(head->next, struct rpcclnt_release_item, node);
+ rpc_release_client(item->clnt);
+ list_del(&item->node);
+ kfree(item);
+ }
+}
+
+void enfs_destroy_clnt_list(struct list_head *head)
+{
+ struct clnt_release_item *item;
+
+ while (!list_empty(head)) {
+ item = list_entry(head->next, struct clnt_release_item, node);
+ nfs_put_client(item->client);
+ //rpc_release_client(item->clnt);
+ list_del(&item->node);
+ kfree(item);
+ }
+}
+
+/**
+ * set socket port.
+ * @ns: need transform to network byte order
+ */
+static void sockaddr_set_port(struct sockaddr *addr, __be16 port, bool ns)
+{
+ switch (addr->sa_family) {
+ case AF_INET:
+ ((struct sockaddr_in *)addr)->sin_port = ns ? htons(port) : port;
+ break;
+ case AF_INET6:
+ ((struct sockaddr_in6 *)addr)->sin6_port = ns ? htons(port) : port;
+ break;
+ }
+}
+
+static __be16 get_rpc_clnt_port(struct rpc_clnt *clnt)
+{
+ struct sockaddr_storage ss;
+ struct sockaddr *addr = (struct sockaddr *)&ss;
+ rpc_peeraddr(clnt, (struct sockaddr *)&ss, sizeof(ss));
+ switch (addr->sa_family) {
+ case AF_INET:
+ return ((struct sockaddr_in *)addr)->sin_port;
+
+ case AF_INET6:
+ return ((struct sockaddr_in6 *)addr)->sin6_port;
+
+ default:
+ enfs_log_error("not support family:%d.\n", addr->sa_family);
+ return -1;
+ }
+}
+
+static int sockaddr_ip_to_str(struct sockaddr *addr, char *buf, int len)
+{
+ if (addr == NULL) {
+ snprintf(buf, len, "*");
+ return 0;
+ }
+ switch (addr->sa_family) {
+ case AF_INET: {
+ struct sockaddr_in *sin = (struct sockaddr_in *)addr;
+ snprintf(buf, len, "%pI4", &sin->sin_addr);
+ return 0;
+ }
+ case AF_INET6: {
+ struct sockaddr_in6 *sin6 = (struct sockaddr_in6 *)addr;
+ snprintf(buf, len, "%pI6", &sin6->sin6_addr);
+ return 0;
+ }
+ default:
+ break;
+ }
+ return 1;
+}
+
+void print_enfs_multipath_addr(struct sockaddr *local, struct sockaddr *remote)
+{
+ char buf1[128];
+ char buf2[128];
+ sockaddr_ip_to_str(local, buf1, sizeof(buf1));
+ sockaddr_ip_to_str(remote, buf2, sizeof(buf2));
+
+ pr_info("local:%s remote:%s\n", buf1, buf2);
+}
+
+static int enfs_servername(char *servername, unsigned long long len,
+ struct rpc_create_args *args)
+{
+ struct sockaddr_un *sun = (struct sockaddr_un *)args->address;
+ struct sockaddr_in *sin = (struct sockaddr_in *)args->address;
+ struct sockaddr_in6 *sin6 = (struct sockaddr_in6 *)args->address;
+
+ servername[0] = '\0';
+ switch (args->address->sa_family) {
+ case AF_LOCAL:
+ snprintf(servername, len, "%s", sun->sun_path);
+ break;
+ case AF_INET:
+ snprintf(servername, len, "%pI4", &sin->sin_addr.s_addr);
+ break;
+ case AF_INET6:
+ snprintf(servername, len, "%pI6", &sin6->sin6_addr);
+ break;
+ default:
+ printk(KERN_INFO "invalid family:%d\n", args->address->sa_family);
+ return -EINVAL;
+ }
+ return 0;
+}
+
+static void pm_xprt_ping_callback(void *data)
+{
+ struct xprt_attach_callback_data *attach_callback_data = (struct xprt_attach_callback_data *)data;
+ atomic_dec(attach_callback_data->conditon);
+ wake_up(attach_callback_data->waitq);
+}
+
+static int enfs_add_xprt_setup(struct rpc_clnt *clnt, struct rpc_xprt_switch *xps,
+ struct rpc_xprt *xprt, void *data)
+{
+ int ret;
+ struct enfs_xprt_context *ctx;
+ struct xprt_attach_info *attach_info = data;
+ struct sockaddr_storage *srcaddr = attach_info->localAddress;
+
+ ctx = (struct enfs_xprt_context *)xprt_get_reserve_context(xprt);
+ memset(ctx, 0, sizeof(struct enfs_xprt_context));
+ ctx->stats = rpc_alloc_iostats(clnt);
+ ctx->main = false;
+ ctx->protocol = attach_info->protocol;
+ if (srcaddr != NULL) {
+ ctx->srcaddr = *srcaddr;
+ }
+ pm_set_path_state(xprt, PM_STATE_INIT);
+ pm_ping_set_path_check_state(xprt, PM_CHECK_INIT);
+
+ attach_info->xprt = xprt;
+ xprt_get(xprt);
+
+ LVOS_TP_START(PING_TEST_FAILED, &ret);
+ ret = pm_ping_rpc_test_xprt_with_callback(clnt, xprt, pm_xprt_ping_callback, attach_info->data);
+ LVOS_TP_END;
+ if (ret != 1) {
+ enfs_log_error("Failed to add ping task.\n");
+ }
+
+ LVOS_TP_START(ADD_XPRT_FAILED, &ret);
+ ret = 1;
+ LVOS_TP_END;
+
+ return ret; // so that rpc_clnt_add_xprt does not call rpc_xprt_switch_add_xprt
+}
+
+int enfs_configure_xprt_to_clnt(struct xprt_create *xprtargs, struct rpc_clnt *clnt,
+ struct xprt_attach_info *attach_info)
+{
+ int err = 0;
+ __be16 port;
+ xprtargs->srcaddr = (struct sockaddr *)attach_info->localAddress;
+ xprtargs->dstaddr = (struct sockaddr *)attach_info->remoteAddress;
+
+ port = get_rpc_clnt_port(clnt);
+ sockaddr_set_port((struct sockaddr *)attach_info->remoteAddress, port,
+ false);
+ print_enfs_multipath_addr((struct sockaddr *)attach_info->localAddress, (struct sockaddr *)attach_info->remoteAddress);
+
+ LVOS_TP_START(CREATE_XPRT_FAILED, &err);
+ err = rpc_clnt_add_xprt(clnt, xprtargs, enfs_add_xprt_setup, attach_info);
+ LVOS_TP_END;
+ if (err != 1) {
+ enfs_log_error("clnt add xprt err:%d\n", err);
+ return err;
+ }
+ return 0;
+}
+
+// Calculate the greatest common divisor of two numbers
+static int enfs_cal_gcd(int num1, int num2)
+{
+ if (num2 == 0)
+ return num1;
+ return enfs_cal_gcd(num2, num1 % num2);
+}
+
+bool enfs_cmp_addrs(struct sockaddr_storage *srcaddr, struct sockaddr_storage *dstaddr,
+ struct sockaddr_storage *localAddress, struct sockaddr_storage *remoteAddress)
+{
+ if (localAddress == NULL || rpc_cmp_addr((struct sockaddr *)srcaddr, (struct sockaddr *)localAddress)) {
+ if (rpc_cmp_addr((struct sockaddr *)dstaddr, (struct sockaddr *)remoteAddress)) {
+ return true;
+ }
+ }
+
+ return false;
+}
+
+bool enfs_xprt_addrs_is_same(struct rpc_xprt *xprt, struct sockaddr_storage *localAddress,
+ struct sockaddr_storage *remoteAddress)
+{
+ struct enfs_xprt_context *xprt_local_info = NULL;
+ struct sockaddr_storage *srcaddr = NULL;
+ struct sockaddr_storage *dstaddr = NULL;
+
+ if (xprt == NULL) {
+ return true;
+ }
+ xprt_local_info = (struct enfs_xprt_context *)xprt_get_reserve_context(xprt);;
+ srcaddr = &xprt_local_info->srcaddr;
+ dstaddr = &xprt->addr;
+
+ return enfs_cmp_addrs(srcaddr, dstaddr, localAddress, remoteAddress);
+}
+
+bool enfs_already_have_xprt(struct rpc_clnt *clnt, struct sockaddr_storage *localAddress,
+ struct sockaddr_storage *remoteAddress)
+{
+ struct rpc_xprt *pos = NULL;
+ struct rpc_xprt_switch *xps = NULL;
+ rcu_read_lock();
+ xps = xprt_switch_get(rcu_dereference(clnt->cl_xpi.xpi_xpswitch));
+ if (xps == NULL) {
+ rcu_read_unlock();
+ return false;
+ }
+ list_for_each_entry_rcu(pos, &xps->xps_xprt_list, xprt_switch) {
+ if (enfs_xprt_addrs_is_same(pos, localAddress, remoteAddress)) {
+ xprt_switch_put(xps);
+ rcu_read_unlock();
+ return true;
+ }
+ }
+ xprt_switch_put(xps);
+ rcu_read_unlock();
+ return false;
+}
+
+static void enfs_xprt_switch_add_xprt(struct rpc_clnt *clnt, struct rpc_xprt *xprt)
+{
+ struct rpc_xprt_switch *xps;
+ rcu_read_lock();
+ xps = rcu_dereference(clnt->cl_xpi.xpi_xpswitch);
+ spin_lock(&xps->xps_lock);
+ if (xps->xps_net == xprt->xprt_net || xps->xps_net == NULL) {
+ xprt_switch_add_xprt_locked(xps, xprt);
+ }
+ spin_unlock(&xps->xps_lock);
+ rcu_read_unlock();
+}
+
+/* Maximum number of localaddrs in the scenario where IP addresses are
+ * automatically obtained.
+ */
+static bool limit_local_addr(struct nfs_ip_list *ip_list, struct rpc_xprt *xprt)
+{
+ struct enfs_xprt_context *ctx = xprt_get_reserve_context(xprt);;
+
+ // if protocol is rdma, the number of localaddrs does not need to be verified
+ if (ctx->protocol == XPRT_TRANSPORT_RDMA) {
+ return true;
+ }
+
+ return enfs_insert_ip_list(ip_list, MAX_IP_PAIR_PER_MOUNT, &ctx->srcaddr) ||
+ enfs_ip_list_contain(ip_list, &ctx->srcaddr);
+}
+
+static void enfs_add_xprts_to_clnt(struct rpc_clnt *clnt,
+ struct xprt_attach_info *attach_infos,
+ int total_combinations,
+ int remoteTotal,
+ int localTotal)
+{
+ struct rpc_xprt *xprt;
+ pm_path_state state;
+ int i;
+ int link_count = 0;
+ struct nfs_ip_list *ip_list;
+ int count = GetEnfsConfigIpFiltersCount();
+ int maxCountPerMount = enfs_get_config_link_count_per_mount() - 1;
+ if (count == 0 && localTotal == 0) {
+ maxCountPerMount = remoteTotal < enfs_get_config_link_count_per_mount() - 1 ?
+ remoteTotal : enfs_get_config_link_count_per_mount() - 1;
+ }
+
+ ip_list = kzalloc(sizeof(struct nfs_ip_list), GFP_KERNEL);
+ if (!ip_list) {
+ enfs_log_error("alloc memery failed.\n");
+ return;
+ }
+
+ for (i = 0; i < total_combinations; i++) {
+ xprt = attach_infos[i].xprt;
+
+ if (xprt == NULL) {
+ continue;
+ }
+
+ state = pm_get_path_state(xprt);
+
+ if (link_count < maxCountPerMount && (state == PM_STATE_NORMAL || enfs_get_create_path_no_route()) &&
+ limit_local_addr(ip_list, xprt) && enfs_link_count_add(1)) {
+ enfs_xprt_switch_add_xprt(clnt, xprt);
+ enfs_query_xprt_shard(clnt, xprt);
+ link_count++;
+ } else {
+ enfs_log_error("Add xprt to clnt ERR! path state = %d, link_count = %d\n", state, link_count);
+ }
+
+ xprt_put(xprt);
+ }
+ kfree(ip_list);
+}
+
+static void enfs_combine_addr(struct xprt_create *xprtargs, struct rpc_clnt *clnt,
+ struct nfs_ip_list *local, struct nfs_ip_list *remote)
+{
+ int i;
+ int err;
+ int local_index;
+ int remote_index;
+ int link_count = 0;
+ int local_total = local->count;
+ int remote_total = remote->count;
+ int local_remote_total_lcm;
+ int total_combinations = local_total * remote_total;
+ struct xprt_attach_info *attach_infos;
+ atomic_t wait_queue_condition;
+ struct xprt_attach_callback_data attach_callback_data = {&wait_queue_condition, &path_attach_wait_queue};
+
+ atomic_set(&wait_queue_condition, total_combinations);
+
+ pr_info("local count:%d remote count:%d\n", local_total, remote_total);
+ if (local_total == 0 || remote_total == 0) {
+ pr_err("no ip list is present.\n");
+ return;
+ }
+
+ attach_infos = (struct xprt_attach_info *)kzalloc(
+ (total_combinations) * sizeof(struct xprt_attach_info),
+ GFP_KERNEL);
+ if (attach_infos == NULL) {
+ enfs_log_error("Failed to kzalloc memory\n");
+ return;
+ }
+ // Calculate the least common multiple of local_total and remote_total
+ local_remote_total_lcm = total_combinations / enfs_cal_gcd(local_total, remote_total);
+
+ // It needs to be offset one for lcm times of cycle so that all possible link setup method would be traversed
+ for (i = 0; i < total_combinations; i++) {
+ local_index = i % local_total;
+ remote_index = (i + link_count / local_remote_total_lcm) % remote_total;
+
+ if (local->address[local_index].ss_family !=
+ remote->address[remote_index].ss_family) {
+ atomic_dec(&wait_queue_condition);
+ link_count++;
+ continue;
+ }
+
+ if (enfs_already_have_xprt(clnt, &local->address[local_index], &remote->address[remote_index])) {
+ atomic_dec(&wait_queue_condition);
+ link_count++;
+ continue;
+ }
+
+ attach_infos[i].localAddress = &local->address[local_index];
+ attach_infos[i].remoteAddress = &remote->address[remote_index];
+ attach_infos[i].data = &attach_callback_data;
+ attach_infos[i].protocol = xprtargs->ident;
+
+ err = enfs_configure_xprt_to_clnt(xprtargs, clnt, &attach_infos[i]);
+ if (err) {
+ pr_err("add xprt ippair err:%d\n", err);
+ atomic_dec(&wait_queue_condition);
+ }
+ link_count++;
+ }
+
+ wait_event(path_attach_wait_queue, atomic_read(&wait_queue_condition) == 0);
+
+ enfs_add_xprts_to_clnt(clnt, attach_infos, total_combinations, remote_total, local_total);
+
+ kfree(attach_infos);
+
+ return;
+}
+
+
+static void enfs_combine_addr_with_no_local(struct xprt_create *xprtargs, struct rpc_clnt *clnt,
+ struct nfs_ip_list *local, struct nfs_ip_list *remote)
+{
+ int i;
+ int err;
+ int link_count = 0;
+ int local_total = local->count;
+ int remote_total = remote->count;
+ int total_combinations = remote_total;
+ struct xprt_attach_info *attach_infos;
+ atomic_t wait_queue_condition;
+ struct xprt_attach_callback_data attach_callback_data = {&wait_queue_condition, &path_attach_wait_queue};
+
+ atomic_set(&wait_queue_condition, total_combinations);
+
+ pr_info("local count:%d remote count:%d\n", local_total, remote_total);
+ if (remote_total == 0) {
+ pr_err("no ip list is present.\n");
+ return;
+ }
+
+ attach_infos = (struct xprt_attach_info *)kzalloc(
+ (total_combinations) * sizeof(struct xprt_attach_info),
+ GFP_KERNEL);
+ if (attach_infos == NULL) {
+ enfs_log_error("Failed to kzalloc memory\n");
+ return;
+ }
+
+ for (i = 0; i < total_combinations; i++) {
+ if (enfs_already_have_xprt(clnt, NULL, &remote->address[i])) {
+ atomic_dec(&wait_queue_condition);
+ link_count++;
+ continue;
+ }
+
+ attach_infos[i].localAddress = NULL;
+ attach_infos[i].remoteAddress = &remote->address[i];
+ attach_infos[i].data = &attach_callback_data;
+ attach_infos[i].protocol = xprtargs->ident;
+
+ err = enfs_configure_xprt_to_clnt(xprtargs, clnt, &attach_infos[i]);
+ if (err) {
+ pr_err("add xprt ippair err:%d\n", err);
+ atomic_dec(&wait_queue_condition);
+ }
+ link_count++;
+ }
+
+ wait_event(path_attach_wait_queue, atomic_read(&wait_queue_condition) == 0);
+
+ enfs_add_xprts_to_clnt(clnt, attach_infos, total_combinations, remote_total, local_total);
+
+ kfree(attach_infos);
+
+ return;
+}
+
+void enfs_xprt_ippair_create(struct xprt_create *xprtargs, struct rpc_clnt *clnt, void *data)
+{
+ struct multipath_mount_options *mopt = (struct multipath_mount_options *)data;
+ if (mopt == NULL) {
+ pr_err("ip list is NULL.\n");
+ return;
+ }
+ if (xprtargs->ident == XPRT_TRANSPORT_RDMA || mopt->local_ip_list->count == 0) {
+ enfs_combine_addr_with_no_local(xprtargs, clnt, mopt->local_ip_list, mopt->remote_ip_list);
+ } else {
+ enfs_combine_addr(xprtargs, clnt, mopt->local_ip_list, mopt->remote_ip_list);
+ }
+ enfs_lb_set_policy(clnt, NULL);
+
+ return;
+}
+
+struct xprts_options_and_clnt {
+ struct rpc_create_args *args;
+ struct rpc_clnt *clnt;
+ void *data;
+};
+
+static void set_clnt_enfs_flag(struct rpc_clnt *clnt)
+{
+ struct rpc_clnt_reserve *clnt_reserve = (struct rpc_clnt_reserve *)clnt;
+ clnt_reserve->cl_enfs = 1;
+}
+
+int enfs_config_xprt_create_args(struct xprt_create *xprtargs, struct rpc_create_args *args, char *servername, size_t length)
+{
+ int errno = 0;
+
+ xprtargs->ident = args->protocol;
+ xprtargs->net = args->net;
+ xprtargs->addrlen = args->addrsize;
+ xprtargs->servername = args->servername;
+
+ if (args->flags & RPC_CLNT_CREATE_INFINITE_SLOTS)
+ xprtargs->flags |= XPRT_CREATE_INFINITE_SLOTS;
+ if (args->flags & RPC_CLNT_CREATE_NO_IDLE_TIMEOUT)
+ xprtargs->flags |= XPRT_CREATE_NO_IDLE_TIMEOUT;
+
+ if (xprtargs->servername == NULL) {
+ errno = enfs_servername(servername, length, args);
+ if (errno)
+ return errno;
+ xprtargs->servername = servername;
+ }
+
+ return 0;
+}
+
+static void set_in_addr(struct sockaddr_storage *ss, __be32 ifa_address)
+{
+ struct sockaddr_in *sin = (struct sockaddr_in *)ss;
+
+ sin->sin_family = AF_INET;
+ sin->sin_addr.s_addr = ifa_address;
+}
+
+static void set_in6_addr(struct sockaddr_storage *ss,
+ const struct inet6_ifaddr *in6)
+{
+ struct sockaddr_in6 *sin6 = (struct sockaddr_in6 *)ss;
+
+ memset(ss, 0, sizeof(*ss));
+ ss->ss_family = AF_INET6;
+ memcpy(&sin6->sin6_addr, &in6->addr, sizeof(struct in6_addr));
+}
+
+static void enfs_auto_fill_local_inet(struct nfs_ip_list *list,
+ struct in_device *dvice)
+{
+ char buf[INET6_ADDRSTRLEN];
+#if (defined(ENFS_EULER_5_10) || defined(ENFS_OPENEULER_660))
+ const struct in_ifaddr *ifa;
+#else
+#endif
+
+ if (!dvice) {
+ return;
+ }
+#if (defined(ENFS_EULER_5_10) || defined(ENFS_OPENEULER_660))
+ in_dev_for_each_ifa_rcu (ifa, dvice) {
+#else
+ for_ifa (dvice) {
+#endif
+ if (list->count >= enfs_get_config_link_count_per_mount()) {
+ break;
+ }
+
+ snprintf(buf, INET6_ADDRSTRLEN, "%pI4", &ifa->ifa_address);
+ if (!enfs_whitelist_filte(buf)) {
+ continue;
+ }
+
+ set_in_addr(&list->address[list->count], ifa->ifa_address);
+ list->count++;
+ enfs_log_debug("IPv4: %s\n", buf);
+ }
+#if (defined(ENFS_EULER_5_10) || defined(ENFS_OPENEULER_660))
+#else
+ endfor_ifa(in_dev);
+#endif
+}
+
+static void enfs_auto_fill_local_inet6(struct nfs_ip_list *list,
+ struct inet6_dev *idev)
+{
+ struct inet6_ifaddr *ifp;
+ char buf[INET6_ADDRSTRLEN];
+ char buf_c[INET6_ADDRSTRLEN]; /* Abbreviations */
+
+ if (!idev) {
+ return;
+ }
+
+ list_for_each_entry (ifp, &idev->addr_list, if_list) {
+ if (list->count >= enfs_get_config_link_count_per_mount()) {
+ break;
+ }
+
+ snprintf(buf, INET6_ADDRSTRLEN, "%pI6", &ifp->addr);
+ snprintf(buf_c, INET6_ADDRSTRLEN, "%pI6c", &ifp->addr);
+ if (!enfs_whitelist_filte(buf) && !enfs_whitelist_filte(buf_c)) {
+ continue;
+ }
+
+ set_in6_addr(&list->address[list->count], ifp);
+ list->count++;
+ enfs_log_debug("IPv6: %s/%d\n", buf_c, ifp->prefix_len);
+ }
+}
+
+// IPv4,IPv6
+static void find_fill_local_addr(struct nfs_ip_list *list,
+ unsigned short family)
+{
+ struct net_device *dev;
+
+ rtnl_lock();
+ for_each_netdev (&init_net, dev) {
+ if (list->count >= enfs_get_config_link_count_per_mount()) {
+ break;
+ }
+ enfs_log_debug("device: %s\n", dev->name);
+
+ // IPv4
+ enfs_auto_fill_local_inet(list, dev->ip_ptr);
+ // IPv6
+ if (family == AF_INET6) {
+ enfs_auto_fill_local_inet6(list, dev->ip6_ptr);
+ }
+ }
+ rtnl_unlock();
+
+ return;
+}
+
+static int fill_local_iplist(struct xprts_options_and_clnt *args,
+ struct multipath_mount_options *options,
+ struct nfs_ip_list *local)
+{
+ struct sockaddr_storage ss;
+ int count = GetEnfsConfigIpFiltersCount();
+
+ rpc_peeraddr(args->clnt, (struct sockaddr *)&ss, sizeof(ss));
+ if (options->fill_local && count != 0) {
+ /* Do not need to fill in the IP address again, if not found */
+ find_fill_local_addr(local, ss.ss_family);
+ return 0;
+ }
+
+ return 0;
+}
+
+static int enfs_fill_empty_iplist(struct xprts_options_and_clnt *args,
+ struct multipath_mount_options *options)
+{
+ int ret;
+ struct nfs_ip_list *local = options->local_ip_list;
+ struct nfs_ip_list *remote = options->remote_ip_list;
+
+ if (local->count == 0) {
+ options->fill_local = 1;
+ }
+
+ ret = fill_local_iplist(args, options, local);
+ if (ret) {
+ return ret;
+ }
+
+ // If a domain name is configured but the query fails, do not handle.
+ if (remote->count == 0 && options->pRemoteDnsInfo->dnsNameCount == 0) {
+ ret = rpc_peeraddr(args->clnt, (struct sockaddr *)&remote->address[0],
+ sizeof(struct sockaddr_storage));
+ if (ret == 0) {
+ enfs_log_error("enfs: get clnt dstaddr errno:%d\n", ret);
+ return ret;
+ }
+ remote->count = 1;
+ }
+
+ return 0;
+}
+
+int enfs_multipath_create_thread(void *data)
+{
+ int errno;
+ struct sockaddr_storage ss;
+ char servername[48];
+ struct xprts_options_and_clnt *create_args = data;
+ struct multipath_mount_options *mount_options = create_args->args->multipath_option;
+ struct xprt_create xprtargs;
+ memset(&xprtargs, 0, sizeof(struct xprt_create));
+
+ if (mount_options == NULL) {
+ enfs_log_error("enfs: mount localaddrs and remoteaddrs are empty !\n");
+ return -EINVAL;
+ }
+
+ if (mount_options->pRemoteDnsInfo->dnsNameCount != 0) {
+ enfs_add_domain_name(mount_options);
+ rpc_peeraddr(create_args->clnt, (struct sockaddr *)&ss, sizeof(ss));
+ errno = multipath_query_dns(mount_options, ss.ss_family, true, create_args->clnt);
+ if (errno != 0) {
+ enfs_log_error("dns query failed,waiting for the next update.\n");
+ }
+ }
+
+ errno = enfs_config_xprt_create_args(&xprtargs, create_args->args, servername, sizeof(servername));
+ if (errno) {
+ enfs_log_error("enfs: config_xprt_create failed! errno:%d\n", errno);
+ return errno;
+ }
+
+ errno = enfs_fill_empty_iplist(create_args, mount_options);
+ if (errno) {
+ enfs_log_error("fill empty ip list err:%d\n", errno);
+ return errno;
+ }
+
+ errno = enfs_proc_create_clnt(create_args->clnt);
+ if (errno != 0) {
+ pr_err("create clnt proc failed.\n");
+ }
+
+ set_clnt_enfs_flag(create_args->clnt);
+ enfs_xprt_ippair_create(&xprtargs, create_args->clnt, mount_options);
+
+ kfree(create_args->args);
+ kfree(data);
+ return 0;
+}
+
+static int set_main_xprt_ctx(struct rpc_clnt *clnt, struct rpc_xprt *xprt,
+ struct sockaddr_storage *srcaddr, int protocol)
+{
+ struct enfs_xprt_context *ctx = xprt_get_reserve_context(xprt);
+
+ if (!ctx) {
+ enfs_log_error("main xprt not multipath ctx.\n");
+ return -1;
+ }
+
+ ctx->main = true;
+ ctx->stats = rpc_alloc_iostats(clnt);
+ ctx->srcaddr = *srcaddr;
+ ctx->protocol = protocol;
+ pm_set_path_state(xprt, PM_STATE_NORMAL);
+ pm_ping_set_path_check_state(xprt, PM_CHECK_INIT);
+
+ return 0;
+}
+
+static int alloc_main_xprt_multicontext(struct rpc_create_args *args,
+ struct rpc_clnt *clnt)
+{
+ int err;
+ struct sockaddr_storage srcaddr;
+
+ // avoid main xprt multicontex local addr is empty.
+ err = rpc_localaddr(clnt, (struct sockaddr *)&srcaddr, sizeof(srcaddr));
+ if (err) {
+ enfs_log_error("get clnt localaddr err:%d\n", err);
+ return err;
+ }
+
+ err = set_main_xprt_ctx(clnt, clnt->cl_xprt, &srcaddr, args->protocol);
+ if (err) {
+ enfs_log_error("alloc main xprt failed.\n");
+ }
+
+ return err;
+}
+
+void enfs_create_multi_xprt(struct rpc_create_args *args, struct rpc_clnt *clnt)
+{
+ struct xprts_options_and_clnt *thargs = NULL;
+ struct rpc_create_args *cargs = NULL;
+ int err = 0;
+
+ enfs_log_info("enfs_create_multi_xprt %p\n", clnt);
+
+ if (!enfs_mount_count_add(1)) {
+ enfs_log_error("number of mount exceeds the limit.\n");
+ return;
+ }
+
+ if (!enfs_link_count_add(1)) {
+ enfs_log_error("number of link count exceeds the limit.\n");
+ goto cleanup_mount;
+ }
+
+ LVOS_TP_START(ALLOC_CREATE_ARGS_FAILED, &cargs);
+ cargs = kmalloc(sizeof(struct rpc_create_args), GFP_KERNEL);
+ LVOS_TP_END;
+ if (cargs == NULL) {
+ pr_info("kmalloc failed!\n");
+ goto cleanup_link;
+ }
+ *cargs = *args;
+
+ thargs = kmalloc(sizeof(struct xprts_options_and_clnt), GFP_KERNEL);
+ if (thargs == NULL) {
+ pr_err("kmalloc failed!\n");
+ goto cleanup_cargs;
+ }
+
+ alloc_main_xprt_multicontext(args, clnt);
+
+ thargs->args = cargs;
+ thargs->clnt = clnt;
+ thargs->data = args->multipath_option;
+
+ LVOS_TP_START(CREATE_MULTIPATH_THREAD_FAILD, &err);
+ err = enfs_multipath_create_thread(thargs);
+ LVOS_TP_END;
+ if (err != 0) {
+ goto cleanup_thargs;
+ }
+
+ return;
+
+cleanup_thargs:
+ kfree(thargs);
+cleanup_cargs:
+ kfree(cargs);
+cleanup_link:
+ enfs_link_count_add(-1);
+cleanup_mount:
+ enfs_mount_count_add(-1);
+}
+
+void enfs_release_rpc_clnt(struct rpc_clnt *clnt)
+{
+ enfs_proc_delete_clnt(clnt);
+ // The sending client is inserted, not the main client.
+ enfs_delete_clnt_shard_cache(clnt);
+}
+
+static void enfs_create_xprt_ctx(struct rpc_xprt *xprt)
+{
+ int err;
+
+ err = enfs_alloc_xprt_ctx(xprt);
+ if (err) {
+ enfs_log_error("alloc xprt failed.\n");
+ }
+}
+
+static void enfs_set_transport(struct rpc_task *task, struct rpc_clnt *clnt)
+{
+ if (enfs_is_singularr_route(clnt) && clnt->cl_vers == 4) {
+ failover_reselect_transport(task, clnt);
+ return;
+ }
+
+ if (enfs_is_rr_route(clnt) && clnt->cl_vers == 3) {
+ shard_set_transport(task, clnt);
+ return;
+ }
+}
+
+static void enfs_inc_queuelen(struct rpc_xprt *xprt)
+{
+ struct enfs_xprt_context *context;
+
+ if (!xprt)
+ return;
+ context = xprt_get_reserve_context(xprt);
+ if (!context)
+ return;
+ atomic_long_inc(&context->queuelen);
+ return;
+}
+
+static void enfs_dec_queuelen(struct rpc_xprt *xprt)
+{
+ long value;
+ struct enfs_xprt_context *context;
+
+ if (!xprt)
+ return;
+ context = xprt_get_reserve_context(xprt);
+ if (!context)
+ return;
+ value = atomic_long_dec_return(&context->queuelen);
+ if (value < 0) {
+ /* Prevent the reduction to negative */
+ enfs_log_error("the value of queue length is negative, value(%ld).\n", value);
+ atomic_long_inc(&context->queuelen);
+ }
+ return;
+}
+
+static void enfs_get_rpc_program(struct rpc_task *task, u32 *program, u32 *version)
+{
+ *program = ENFS_RPC_PROG_NUM;
+ *version = ENFS_RPC_PROG_VERSION;
+ return;
+}
+
+static struct rpc_multipath_ops ops = {
+ .owner = THIS_MODULE,
+ .create_clnt = enfs_create_multi_xprt,
+ .releas_clnt = enfs_release_rpc_clnt,
+ .create_xprt = enfs_create_xprt_ctx,
+ .destroy_xprt = enfs_free_xprt_ctx,
+ .xprt_iostat = enfs_count_iostat,
+ .failover_handle = failover_handle,
+ .adjust_task_timeout = failover_adjust_task_timeout,
+ .init_task_req = failover_init_task_req,
+ .prepare_transmit = failover_prepare_transmit,
+ .set_transport = enfs_set_transport,
+ .inc_queuelen = enfs_inc_queuelen,
+ .dec_queuelen = enfs_dec_queuelen,
+ .get_rpc_program = enfs_get_rpc_program,
+ .task_need_call_start_again = failover_task_need_call_start_again,
+};
+
+int enfs_multipath_init(void)
+{
+ int err;
+ enfs_log_info("multipath init.\n");
+
+ spin_lock_init(&link_count_lock);
+ spin_lock_init(&mount_count_lock);
+
+ err = enfs_lb_init();
+ if (err != 0) {
+ enfs_log_error("enfs loadbalance init err:%d\n", err);
+ return err;
+ }
+
+ err = pm_ping_init();
+ if (err != 0) {
+ enfs_log_error("pm ping init err:%d\n", err);
+ enfs_lb_exit();
+ return err;
+ }
+
+ err = enfs_proc_init();
+ if (err != 0) {
+ enfs_log_error("enfs proc init err:%d\n", err);
+ pm_ping_fini();
+ enfs_lb_exit();
+ return err;
+ }
+
+ rpc_multipath_ops_register(&ops);
+
+ return 0;
+}
+
+void enfs_multipath_exit(void)
+{
+ enfs_log_info("multipath exit.\n");
+ rpc_multipath_ops_unregister(&ops);
+ enfs_proc_exit();
+ pm_ping_fini();
+ enfs_lb_exit();
+}
\ No newline at end of file
diff --git a/fs/nfs/enfs/multipath/path_mgmt/enfs_path.c b/fs/nfs/enfs/multipath/path_mgmt/enfs_path.c
new file mode 100644
index 0000000..dd095a9
--- /dev/null
+++ b/fs/nfs/enfs/multipath/path_mgmt/enfs_path.c
@@ -0,0 +1,45 @@
+/*
+ * Copyright (c) Huawei Technologies Co., Ltd. 2023-2023. All rights reserved.
+ */
+
+#include
+#include
+
+#include "enfs.h"
+#include "enfs_log.h"
+#include "enfs_path.h"
+
+// only create ctx in this function
+// alloc iostat memory in create_clnt
+int enfs_alloc_xprt_ctx(struct rpc_xprt *xprt)
+{
+ struct enfs_xprt_context *ctx;
+
+ if (!xprt) {
+ enfs_log_error("invalid xprt pointer.\n");
+ return -EINVAL;
+ }
+
+ ctx = kzalloc(sizeof(struct enfs_xprt_context), GFP_KERNEL);
+ if (!ctx) {
+ enfs_log_error("add xprt test faild.\n");
+ return -ENOMEM;
+ }
+
+ xprt_set_reserve_context(xprt, (void *)ctx);
+ return 0;
+}
+
+// free multi_context and iostat memory
+void enfs_free_xprt_ctx(struct rpc_xprt *xprt)
+{
+ struct enfs_xprt_context *ctx = xprt_get_reserve_context(xprt);
+ if (ctx) {
+ if (ctx->stats) {
+ rpc_free_iostats(ctx->stats);
+ ctx->stats = NULL;
+ }
+ kfree(ctx);
+ xprt_set_reserve_context(xprt, NULL);
+ }
+}
\ No newline at end of file
diff --git a/fs/nfs/enfs/multipath/path_mgmt/enfs_path.h b/fs/nfs/enfs/multipath/path_mgmt/enfs_path.h
new file mode 100644
index 0000000..fcb93b9
--- /dev/null
+++ b/fs/nfs/enfs/multipath/path_mgmt/enfs_path.h
@@ -0,0 +1,11 @@
+/*
+ * Copyright (c) Huawei Technologies Co., Ltd. 2023-2023. All rights reserved.
+ */
+
+#ifndef ENFS_PATH_H
+#define ENFS_PATH_H
+
+int enfs_alloc_xprt_ctx(struct rpc_xprt *xprt);
+void enfs_free_xprt_ctx(struct rpc_xprt *xprt);
+
+#endif // ENFS_PATH_H
\ No newline at end of file
diff --git a/fs/nfs/enfs/multipath/path_mgmt/enfs_proc.c b/fs/nfs/enfs/multipath/path_mgmt/enfs_proc.c
new file mode 100644
index 0000000..fa29add
--- /dev/null
+++ b/fs/nfs/enfs/multipath/path_mgmt/enfs_proc.c
@@ -0,0 +1,663 @@
+#include
+#include
+#include
+#include
+#include
+#include
+#include
+#include
+#include
+
+#include "../../../net/sunrpc/netns.h"
+
+#include "enfs.h"
+#include "enfs_log.h"
+#include "enfs_proc.h"
+#include "enfs_multipath.h"
+#include "pm_state.h"
+#include "enfs_tp_common.h"
+#include "exten_call.h"
+#include "shard.h"
+
+#define ENFS_PROC_DIR "enfs"
+#define ENFS_PROC_PATH_STATUS_LEN 256
+
+static struct proc_dir_entry *enfs_proc_parent;
+
+struct proc_dir_entry *enfs_get_proc_parent(void)
+{
+ return enfs_proc_parent;
+}
+
+static int sockaddr_ip_to_str(struct sockaddr *addr, char *buf, int len)
+{
+ if (addr == NULL) {
+ snprintf(buf, len, "*");
+ return 0;
+ }
+ switch (addr->sa_family) {
+ case AF_INET: {
+ struct sockaddr_in *sin = (struct sockaddr_in *)addr;
+ snprintf(buf, len, "%pI4", &sin->sin_addr);
+ return 0;
+ }
+ case AF_INET6: {
+ struct sockaddr_in6 *sin6 = (struct sockaddr_in6 *)addr;
+ snprintf(buf, len, "%pI6", &sin6->sin6_addr);
+ return 0;
+ }
+ default:
+ break;
+ }
+ return 1;
+}
+
+static bool should_print(const char *name)
+{
+ int i;
+ static const char *proc_names[] = {
+ "READ",
+ "WRITE",
+ };
+
+ if (name == NULL) {
+ return false;
+ }
+
+ for (i = 0; i < ARRAY_SIZE(proc_names); i++) {
+ if (strcmp(name, proc_names[i]) == 0) {
+ return true;
+ }
+ }
+ return false;
+}
+
+struct enfs_xprt_iter {
+ unsigned int id;
+ struct seq_file *seq;
+ unsigned int max_addrs_length;
+};
+
+void enfs_for_each_rpc_clnt(int (*fn)(struct rpc_clnt *clnt, void *data), void *data);
+static int debug_show_xprt(struct rpc_clnt *clnt, struct rpc_xprt *xprt,
+ void *data)
+{
+ uint64_t lsid;
+ struct enfs_xprt_context *ctx = xprt_get_reserve_context(xprt);
+ lsid = ctx ? ctx->lsid : 0;
+
+ pr_info(" xprt:%p ctx:%p main:%d queue_len:%lu lsid:%llu.\n", xprt, ctx,
+ ctx ? ctx->main : false, ctx ? atomic_long_read(&ctx->queuelen) : 0, lsid);
+ return 0;
+}
+
+static int debug_show_clnt(struct rpc_clnt *clnt, void *data)
+{
+ struct rpc_clnt_reserve *clnt_reserve = (struct rpc_clnt_reserve *)clnt;
+ pr_info(" clnt %d addr:%p enfs:%d\n", clnt->cl_clid, clnt, clnt_reserve->cl_enfs);
+ rpc_clnt_iterate_for_each_xprt(clnt, debug_show_xprt, NULL);
+ return 0;
+}
+
+static void debug_print_all_xprt(void)
+{
+ if (enfs_debug != 0) {
+ enfs_for_each_rpc_clnt(debug_show_clnt, NULL);
+ }
+}
+
+static bool is_valid_ip_address(const char *ip_str)
+{
+ struct in_addr addr4;
+ struct in6_addr addr6;
+
+ if (in4_pton(ip_str, -1, (u8 *)&addr4, '\0', NULL) == 1) {
+ return true;
+ }
+
+ if (in6_pton(ip_str, -1, (u8 *)&addr6, '\0', NULL) == 1) {
+ return true;
+ }
+
+ return false;
+}
+
+static void enfs_proc_format_xprt_addr_display(struct rpc_clnt *clnt, struct rpc_xprt *xprt,
+ char *local_name_buf, int local_name_buf_len, char *remote_name_buf, int remote_name_buf_len)
+{
+ int err;
+ struct sockaddr_storage srcaddr;
+ struct enfs_xprt_context *ctx;
+ char local_name[INET6_ADDRSTRLEN];
+ const char *local = local_name;
+
+ ctx = (struct enfs_xprt_context *)xprt_get_reserve_context(xprt);
+
+ sockaddr_ip_to_str((struct sockaddr *)&xprt->addr, remote_name_buf,
+ remote_name_buf_len);
+
+ // get local address depend one main or not
+ if (enfs_is_main_xprt(xprt)) {
+ err = rpc_localaddr(clnt, (struct sockaddr *)&srcaddr, sizeof(srcaddr));
+ if (err != 0)
+ (void)snprintf(local_name_buf, local_name_buf_len, "Unknown");
+ else {
+ if (ctx->protocol != XPRT_TRANSPORT_RDMA) {
+ sockaddr_ip_to_str((struct sockaddr *)&srcaddr, local_name_buf,
+ local_name_buf_len);
+ } else {
+ sockaddr_ip_to_str(NULL, local_name_buf, local_name_buf_len);
+ }
+ }
+ } else {
+ if (ctx->protocol != XPRT_TRANSPORT_RDMA) {
+ sockaddr_ip_to_str((struct sockaddr *)&ctx->srcaddr, local_name, sizeof(local_name));
+ if (!is_valid_ip_address(local)) {
+ rpc_localalladdr(xprt, (struct sockaddr *)&srcaddr, sizeof(srcaddr));
+ sockaddr_ip_to_str((struct sockaddr *)&srcaddr, local_name_buf, local_name_buf_len);
+ return;
+ }
+ sockaddr_ip_to_str((struct sockaddr *)&ctx->srcaddr, local_name_buf, local_name_buf_len);
+ } else {
+ sockaddr_ip_to_str(NULL, local_name_buf, local_name_buf_len);
+ }
+ }
+}
+
+static int enfs_show_xprt_stats(struct rpc_clnt *clnt, struct rpc_xprt *xprt, void *data)
+{
+ unsigned int op;
+ unsigned int maxproc = clnt->cl_maxproc;
+ struct enfs_xprt_iter *iter = (struct enfs_xprt_iter *)data;
+ struct enfs_xprt_context *ctx;
+ char local_name[INET6_ADDRSTRLEN];
+ char remote_name[INET6_ADDRSTRLEN];
+
+ ctx = (struct enfs_xprt_context *)xprt_get_reserve_context(xprt);
+ if (ctx == NULL) {
+ enfs_log_debug("multipath_context is null.\n");
+ return 0;
+ }
+ enfs_proc_format_xprt_addr_display(clnt, xprt, local_name, sizeof(local_name),
+ remote_name, sizeof(remote_name));
+
+ seq_printf(iter->seq, "%-6u%-*s%-*s", iter->id, iter->max_addrs_length + 4,
+ local_name, iter->max_addrs_length + 4, remote_name);
+
+ iter->id++;
+
+ for (op = 0; op < maxproc; op++) {
+ if (!should_print(clnt->cl_procinfo[op].p_name)) {
+ continue;
+ }
+ seq_printf(iter->seq, "%-22lu%-22Lu%-22Lu", ctx->stats[op].om_ops,
+ ctx->stats[op].om_ops == 0 ? 0 : ktime_to_ms(ctx->stats[op].om_rtt) / ctx->stats[op].om_ops,
+ ctx->stats[op].om_ops == 0 ? 0 : ktime_to_ms(ctx->stats[op].om_execute) / ctx->stats[op].om_ops);
+ }
+ seq_printf(iter->seq, "%-22lu", atomic_long_read(&(ctx->queuelen)));
+ seq_printf(iter->seq, "\n");
+ return 0;
+}
+
+static int rpc_proc_show_path_status(struct rpc_clnt *clnt, struct rpc_xprt *xprt,
+ void *data)
+{
+ struct enfs_xprt_iter *iter = (struct enfs_xprt_iter *)data;
+ struct enfs_xprt_context *ctx = NULL;
+ char local_name[INET6_ADDRSTRLEN] = {0};
+ char remote_name[INET6_ADDRSTRLEN] = {0};
+ char multiapth_status[ENFS_PROC_PATH_STATUS_LEN] = {0};
+ char xprt_status[ENFS_PROC_PATH_STATUS_LEN] = {0};
+
+ ctx = (struct enfs_xprt_context *)xprt_get_reserve_context(xprt);;
+ if (ctx == NULL) {
+ enfs_log_debug("multipath_context is null.\n");
+ return 0;
+ }
+
+ enfs_proc_format_xprt_addr_display(clnt, xprt, local_name, sizeof(local_name),
+ remote_name, sizeof(remote_name));
+
+ pm_get_path_state_desc(xprt, multiapth_status, ENFS_PROC_PATH_STATUS_LEN);
+ pm_get_xprt_state_desc(xprt, xprt_status, ENFS_PROC_PATH_STATUS_LEN);
+
+ seq_printf(iter->seq, "%-6u%-*s%-*s%-12s%-12s\n", iter->id, iter->max_addrs_length + 4,
+ local_name, iter->max_addrs_length + 4, remote_name, multiapth_status,
+ xprt_status);
+ iter->id++;
+ return 0;
+}
+
+static int enfs_get_max_addrs_length(struct rpc_clnt *clnt, struct rpc_xprt *xprt,
+ void *data)
+{
+ struct enfs_xprt_iter *iter = (struct enfs_xprt_iter *)data;
+ char local_name[INET6_ADDRSTRLEN];
+ char remote_name[INET6_ADDRSTRLEN];
+ struct enfs_xprt_context *ctx = NULL;
+
+ ctx = (struct enfs_xprt_context *)xprt_get_reserve_context(xprt);
+ if (ctx == NULL) {
+ enfs_log_debug("multipath_context is null.\n");
+ return 0;
+ }
+
+ enfs_proc_format_xprt_addr_display(clnt, xprt, local_name, sizeof(local_name),
+ remote_name, sizeof(remote_name));
+
+ if (iter->max_addrs_length < strlen(local_name)) {
+ iter->max_addrs_length = strlen(local_name);
+ }
+
+ if (iter->max_addrs_length < strlen(remote_name)) {
+ iter->max_addrs_length = strlen(remote_name);
+ }
+
+ return 0;
+}
+
+static int rpc_proc_clnt_showpath(struct seq_file *seq, void *v)
+{
+ struct rpc_clnt *clnt = seq->private;
+ struct enfs_xprt_iter iter;
+ iter.seq = seq;
+ iter.id = 0;
+ iter.max_addrs_length = 0;
+
+ rpc_clnt_iterate_for_each_xprt(clnt, enfs_get_max_addrs_length, (void *)&iter);
+
+ seq_printf(seq, "%-6s%-*s%-*s%-12s%-12s\n", "id",
+ iter.max_addrs_length + 4, "local_addr", iter.max_addrs_length + 4,
+ "remote_addr", "path_state", "xprt_state");
+
+ rpc_clnt_iterate_for_each_xprt(clnt, rpc_proc_show_path_status, (void *)&iter);
+ return 0;
+}
+
+static int enfs_rpc_proc_show(struct seq_file *seq, void *v)
+{
+ struct rpc_clnt *clnt = seq->private;
+ struct enfs_xprt_iter iter;
+ iter.seq = seq;
+ iter.id = 0;
+ iter.max_addrs_length = 0;
+
+ debug_print_all_xprt();
+ enfs_log_debug("enfs proc clnt:%p\n", clnt);
+
+ rpc_clnt_iterate_for_each_xprt(clnt, enfs_get_max_addrs_length, (void *)&iter);
+
+ seq_printf(seq, "%-6s%-*s%-*s%-22s%-22s%-22s%-22s%-22s%-22s%-22s\n", "id",
+ iter.max_addrs_length + 4, "local_addr", iter.max_addrs_length + 4,
+ "remote_addr", "r_count", "r_rtt", "r_exec", "w_count", "w_rtt", "w_exec", "queuelen");
+
+ rpc_clnt_iterate_for_each_xprt(clnt, enfs_show_xprt_stats, (void *)&iter);
+ return 0;
+}
+
+static int rpc_proc_open(struct inode *inode, struct file *file)
+{
+#ifdef ENFS_OPENEULER_660
+ struct rpc_clnt *clnt = pde_data(inode);
+#else
+ struct rpc_clnt *clnt = PDE_DATA(inode);
+#endif
+ enfs_log_debug("rpc_proc_open %p\n", clnt);
+ return single_open(file, enfs_rpc_proc_show, clnt);
+}
+
+static int enfs_reset_xprt_stats(struct rpc_clnt *clnt, struct rpc_xprt *xprt, void *data)
+{
+ unsigned int op;
+ struct enfs_xprt_context *ctx;
+ unsigned int maxproc = clnt->cl_maxproc;
+ struct rpc_iostats stats;
+
+ memset(&stats, 0, sizeof(struct rpc_iostats));
+ ctx = (struct enfs_xprt_context *)xprt_get_reserve_context(xprt);
+ if (!ctx) {
+ return 0;
+ }
+ for (op = 0; op < maxproc; op++) {
+ spin_lock(&ctx->stats[op].om_lock);
+ ctx->stats[op] = stats;
+ spin_unlock(&ctx->stats[op].om_lock);
+ }
+ return 0;
+}
+
+static void trim_newline_ch(char *str, int len)
+{
+ int i;
+ for (i = 0; str[i] != '\0' && i < len; i++) {
+ if (str[i] == '\n') {
+ str[i] = '\0';
+ }
+ }
+}
+
+static ssize_t enfs_proc_write(struct file *file, const char __user *user_buf, size_t len,
+ loff_t *offset)
+{
+ char buffer[128];
+ struct rpc_clnt *clnt = ((struct seq_file *)file->private_data)->private;
+
+ if (len >= sizeof(buffer))
+ return -E2BIG;
+
+ if (copy_from_user(buffer, user_buf, len) != 0)
+ return -EFAULT;
+
+ buffer[len] = '\0';
+ trim_newline_ch(buffer, len);
+
+ // TODO:remove
+ if (strcmp(buffer, "reset") != 0)
+ return -EINVAL;
+
+ rpc_clnt_iterate_for_each_xprt(clnt, enfs_reset_xprt_stats, NULL);
+ return len;
+}
+
+static int rpc_proc_show_path(struct inode *inode, struct file *file)
+{
+#ifdef ENFS_OPENEULER_660
+ struct rpc_clnt *clnt = pde_data(inode);
+#else
+ struct rpc_clnt *clnt = PDE_DATA(inode);
+#endif
+ return single_open(file, rpc_proc_clnt_showpath, clnt);
+}
+
+#if (defined(ENFS_EULER_5_10) || defined(ENFS_OPENEULER_660))
+static const struct proc_ops rpc_proc_fops = {
+ .proc_flags = PROC_ENTRY_PERMANENT,
+ .proc_open = rpc_proc_open,
+ .proc_read = seq_read,
+ .proc_lseek = seq_lseek,
+ .proc_release = single_release,
+ .proc_write = enfs_proc_write,
+};
+#else
+static const struct file_operations rpc_proc_fops = {
+ .owner = THIS_MODULE,
+ .open = rpc_proc_open,
+ .read = seq_read,
+ .llseek = seq_lseek,
+ .release = single_release,
+ .write = enfs_proc_write,
+};
+#endif
+
+#if (defined(ENFS_EULER_5_10) || defined(ENFS_OPENEULER_660))
+static const struct proc_ops rpc_show_path_fops = {
+ .proc_flags = PROC_ENTRY_PERMANENT,
+ .proc_open = rpc_proc_show_path,
+ .proc_read = seq_read,
+ .proc_lseek = seq_lseek,
+ .proc_release = single_release,
+};
+#else
+static const struct file_operations rpc_show_path_fops = {
+ .owner = THIS_MODULE,
+ .open = rpc_proc_show_path,
+ .read = seq_read,
+ .llseek = seq_lseek,
+ .release = single_release,
+};
+#endif
+
+static int clnt_proc_name(struct rpc_clnt *clnt, char *buf, int len)
+{
+ int ret;
+ ret = snprintf(buf, len, "%s_%u", rpc_peeraddr2str(clnt, RPC_DISPLAY_ADDR),
+ clnt->cl_clid);
+ if (ret > len) {
+ return -E2BIG;
+ }
+ return 0;
+}
+
+static int enfs_proc_create_file(struct rpc_clnt *clnt)
+{
+ int err;
+ char buf[128];
+
+ struct proc_dir_entry *clnt_entry;
+ struct proc_dir_entry *stat_entry;
+
+ err = clnt_proc_name(clnt, buf, sizeof(buf));
+ if (err) {
+ return err;
+ }
+
+ LVOS_TP_START(PROC_CREATE_FILE_FAILED, &clnt_entry);
+ clnt_entry = proc_mkdir(buf, enfs_proc_parent);
+ LVOS_TP_END;
+ if (clnt_entry == NULL) {
+ return -EINVAL;
+ }
+
+ LVOS_TP_START(PROC_CREATE_FILE_STAT_FAILED, &stat_entry);
+ stat_entry = proc_create_data("stat", 0, clnt_entry, &rpc_proc_fops, clnt);
+ LVOS_TP_END;
+ if (stat_entry == NULL) {
+ return -EINVAL;
+ }
+
+ LVOS_TP_START(PROC_CREATE_FILE_PATH_FAILED, &stat_entry);
+ stat_entry = proc_create_data("path", 0, clnt_entry, &rpc_show_path_fops, clnt);
+ LVOS_TP_END;
+ if (stat_entry == NULL) {
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+void enfs_count_iostat(struct rpc_task *task)
+{
+ ktime_t ktime;
+ struct enfs_xprt_context *ctx = xprt_get_reserve_context(task->tk_xprt);
+
+ if (!ctx || !ctx->stats) {
+ return;
+ }
+ rpc_count_iostats(task, ctx->stats);
+ ktime = ktime_get();
+ ctx->lastTime = ktime_to_ms(ktime);
+}
+
+static void enfs_proc_delete_file(struct rpc_clnt *clnt)
+{
+ int err;
+ char buf[128];
+
+ err = clnt_proc_name(clnt, buf, sizeof(buf));
+ if (err) {
+ pr_err("gen clnt name failed.\n");
+ return;
+ }
+ remove_proc_subtree(buf, enfs_proc_parent);
+}
+
+// create proc file "/porc/enfs/[mount_ip]_[id]/stat"
+int enfs_proc_create_clnt(struct rpc_clnt *clnt)
+{
+ int err;
+
+ err = enfs_proc_create_file(clnt);
+ if (err) {
+ printk("create client %d\n", err);
+ return err;
+ }
+
+ return 0;
+}
+
+void enfs_proc_delete_clnt(struct rpc_clnt *clnt)
+{
+ struct rpc_clnt_reserve *clnt_reserve = (struct rpc_clnt_reserve *)clnt;
+ if (clnt_reserve->cl_enfs == 1) {
+ enfs_proc_delete_file(clnt);
+ enfs_clnt_release_linkcap(clnt);
+ }
+}
+
+static int shardview_proc_help(struct seq_file *seq, void *v)
+{
+ seq_printf(
+ seq, "%s\n%s\n%s\n%s\n", "usage: uuidinfo [uuid]",
+ "usage: fsinfo [fsid]",
+ "usage: shardinfo [cluster id] [storage pool id] [start shard index]",
+ "usage: lifinfo [ipaddr]");
+ return 0;
+}
+
+static int shardview_proc_open(struct inode *inode, struct file *file)
+{
+#ifdef ENFS_OPENEULER_660
+ void *data = pde_data(inode);
+#else
+ void *data = PDE_DATA(inode);
+#endif
+ return single_open(file, shardview_proc_help, data);
+}
+
+static ssize_t shardview_proc_write(struct file *file,
+ const char __user *user_buf, size_t len,
+ loff_t *offset)
+{
+ int i;
+ int ret;
+ char buffer[128];
+
+ if (len >= sizeof(buffer))
+ return -E2BIG;
+
+ if (copy_from_user(buffer, user_buf, len) != 0)
+ return -EFAULT;
+
+ buffer[len] = '\0';
+ for (i = 0; buffer[i] != '\0' && i < len; i++) {
+ if (buffer[i] == '\n') {
+ buffer[i] = '\0';
+ }
+ }
+ enfs_log_info("command:%s.\n", buffer);
+ ret = enfs_debug_match_cmd(buffer, len);
+ if (ret != 0) {
+ return -EFAULT;
+ }
+ return len;
+}
+
+#if (defined(ENFS_EULER_5_10) || defined(ENFS_OPENEULER_660))
+static const struct proc_ops shardview_proc_fops = {
+ .proc_flags = PROC_ENTRY_PERMANENT,
+ .proc_open = shardview_proc_open,
+ .proc_read = seq_read,
+ .proc_lseek = seq_lseek,
+ .proc_release = single_release,
+ .proc_write = shardview_proc_write,
+};
+#else
+static const struct file_operations shardview_proc_fops = {
+ .owner = THIS_MODULE,
+ .open = shardview_proc_open,
+ .read = seq_read,
+ .llseek = seq_lseek,
+ .release = single_release,
+ .write = shardview_proc_write,
+};
+#endif
+
+static int enfs_proc_create_parent(void)
+{
+ struct proc_dir_entry *stat_entry;
+
+ LVOS_TP_START(PROC_CREATE_ENFS_FAILED, &enfs_proc_parent);
+ enfs_proc_parent = proc_mkdir(ENFS_PROC_DIR, NULL);
+ LVOS_TP_END;
+ if (enfs_proc_parent == NULL) {
+ printk(KERN_ERR "Enfs create proc dir err\n");
+ return -ENOMEM;
+ }
+#ifdef NFS_CLIENT_DEBUG
+ stat_entry = proc_create_data("shardview", 0, enfs_proc_parent, &shardview_proc_fops, NULL);
+ if (stat_entry == NULL) {
+ proc_remove(enfs_proc_parent);
+ enfs_proc_parent = NULL;
+ return -EINVAL;
+ }
+#endif // NFS_CLIENT_DEBUG
+ return 0;
+}
+
+static void enfs_proc_delete_parent(void)
+{
+#ifdef NFS_CLIENT_DEBUG
+ remove_proc_entry("shardview", enfs_proc_parent);
+#endif // NFS_CLIENT_DEBUG
+ remove_proc_entry(ENFS_PROC_DIR, NULL);
+}
+
+static int enfs_proc_init_create_clnt(struct rpc_clnt *clnt, void *data)
+{
+ struct rpc_clnt_reserve *clnt_reserve = (struct rpc_clnt_reserve *)clnt;
+ if (clnt_reserve->cl_enfs == 1) {
+ enfs_proc_create_file(clnt);
+ enfs_clnt_get_linkcap(clnt);
+ }
+ return 0;
+}
+
+static int enfs_proc_destroy_clnt(struct rpc_clnt *clnt, void *data)
+{
+ struct rpc_clnt_reserve *clnt_reserve = (struct rpc_clnt_reserve *)clnt;
+ if (clnt_reserve->cl_enfs == 1) {
+ enfs_proc_delete_file(clnt);
+ }
+ return 0;
+}
+
+void enfs_for_each_rpc_clnt(int (*fn)(struct rpc_clnt *clnt, void *data), void *data)
+{
+ struct net *net;
+ struct sunrpc_net *sn;
+ struct rpc_clnt *clnt;
+
+ rcu_read_lock();
+ for_each_net_rcu(net)
+ {
+ sn = net_generic(net, sunrpc_net_id);
+ if (sn == NULL) {
+ continue;
+ }
+ spin_lock(&sn->rpc_client_lock);
+ list_for_each_entry (clnt, &sn->all_clients, cl_clients) {
+ fn(clnt, data);
+ }
+ spin_unlock(&sn->rpc_client_lock);
+ }
+ rcu_read_unlock();
+}
+
+int enfs_proc_init(void)
+{
+ int err;
+
+ err = enfs_proc_create_parent();
+ if (err) {
+ return err;
+ }
+
+ enfs_for_each_rpc_clnt(enfs_proc_init_create_clnt, NULL);
+ return 0;
+}
+
+void enfs_proc_exit(void)
+{
+ enfs_for_each_rpc_clnt(enfs_proc_destroy_clnt, NULL);
+ enfs_proc_delete_parent();
+}
diff --git a/fs/nfs/enfs/multipath/path_mgmt/enfs_remount.c b/fs/nfs/enfs/multipath/path_mgmt/enfs_remount.c
new file mode 100644
index 0000000..32694f9
--- /dev/null
+++ b/fs/nfs/enfs/multipath/path_mgmt/enfs_remount.c
@@ -0,0 +1,236 @@
+/*
+ * Copyright (c) Huawei Technologies Co., Ltd. 2023-2023. All rights reserved.
+ * Description: remount ip source file
+ * Author: y00583252
+ * Create: 2023-08-12
+ */
+#include "enfs_remount.h"
+
+#include
+#include
+#include
+#include
+#include
+#include
+#include
+#include
+#include
+#include
+#include
+#include
+
+#include "enfs.h"
+#include "enfs_log.h"
+#include "enfs_multipath.h"
+#include "enfs_multipath_parse.h"
+#include "enfs_path.h"
+#include "enfs_proc.h"
+#include "enfs_multipath_client.h"
+#include "enfs_tp_common.h"
+#include "enfs_config.h"
+
+static bool enfs_rpc_xprt_switch_need_delete_addr(struct multipath_mount_options *enfs_option, struct sockaddr *dstaddr, struct sockaddr *srcaddr, u32 protocol)
+{
+ int i;
+ bool find_same_ip = false;
+ int32_t local_total;
+ int32_t remote_total;
+ local_total = enfs_option->local_ip_list->count;
+ remote_total = enfs_option->remote_ip_list->count;
+ if (local_total == 0 || remote_total == 0) {
+ pr_err("no ip list is present.\n");
+ return false;
+ }
+
+ for (i = 0; i < local_total; i++) {
+ if (protocol == XPRT_TRANSPORT_RDMA) {
+ find_same_ip = true;
+ break;
+ }
+ find_same_ip = rpc_cmp_addr((struct sockaddr *)&enfs_option->local_ip_list->address[i], srcaddr);
+ if (find_same_ip) {
+ break;
+ }
+ }
+
+ if (find_same_ip == false) {
+ return true;
+ }
+
+ find_same_ip = false;
+ for (i = 0; i < remote_total; i++) {
+ find_same_ip = rpc_cmp_addr((struct sockaddr *)&enfs_option->remote_ip_list->address[i], dstaddr);
+ if (find_same_ip) {
+ break;
+ }
+ }
+
+ if (find_same_ip == false) {
+ return true;
+ }
+ return false;
+}
+
+// Used in rcu_lock
+static bool enfs_delete_xprt_from_switch(struct rpc_xprt *xprt, void *enfs_option, struct rpc_xprt_switch *xps)
+{
+ struct enfs_xprt_context *ctx = NULL;
+ struct multipath_mount_options *mopt =
+ (struct multipath_mount_options *)enfs_option;
+
+ if (enfs_is_main_xprt(xprt)) {
+ return true;
+ }
+
+ ctx = (struct enfs_xprt_context *)xprt_get_reserve_context(xprt);
+ if (enfs_rpc_xprt_switch_need_delete_addr(
+ mopt, (struct sockaddr *)&xprt->addr,
+ (struct sockaddr *)&ctx->srcaddr, ctx->protocol)) {
+ print_enfs_multipath_addr((struct sockaddr *)&ctx->srcaddr,
+ (struct sockaddr *)&xprt->addr);
+ LVOS_TP_START(REMOUNT_REMOVE_XPRT_FAILED,
+ &ctx); // just match paramete,ctx is not used.
+#ifdef ENFS_OPENEULER_660
+ rpc_xprt_switch_remove_xprt(xps, xprt, false);
+#else
+ rpc_xprt_switch_remove_xprt(xps, xprt);
+#endif
+ enfs_link_count_add(-1);
+ LVOS_TP_END;
+ return true;
+ }
+
+ return false;
+}
+
+void enfs_clnt_delete_obsolete_xprts(struct nfs_client *nfs_client, void *enfs_option)
+{
+ int xprt_count = 0;
+ struct rpc_xprt *pos = NULL;
+ struct rpc_xprt_switch *xps = NULL;
+ rcu_read_lock();
+ xps = xprt_switch_get(rcu_dereference(nfs_client->cl_rpcclient->cl_xpi.xpi_xpswitch));
+ if (xps == NULL) {
+ rcu_read_unlock();
+ xprt_switch_put(xps);
+ return;
+ }
+ list_for_each_entry_rcu(pos, &xps->xps_xprt_list, xprt_switch) {
+ if (xprt_count < (enfs_get_config_link_count_per_mount() - 1)) {
+ if (enfs_delete_xprt_from_switch(pos, enfs_option, xps) == false) {
+ xprt_count++;
+ }
+ } else {
+#ifdef ENFS_OPENEULER_660
+ rpc_xprt_switch_remove_xprt(xps, pos, false);
+#else
+ rpc_xprt_switch_remove_xprt(xps, pos);
+#endif
+ enfs_link_count_add(-1);
+ }
+ }
+ rcu_read_unlock();
+ xprt_switch_put(xps);
+}
+
+int enfs_remount(struct nfs_client *nfs_client, void *enfs_option)
+{
+ int ret;
+ struct sockaddr_storage ss;
+ struct multipath_mount_options *remount_opt = enfs_option;
+ struct multipath_client_info *client_info =nfs_client->cl_multipath_data;
+
+ /* mount is not use multipath */
+ if (client_info == NULL || enfs_option == NULL) {
+ enfs_log_error("mount information or remount information is empty.\n");
+ return -EINVAL;
+ }
+
+ /*
+ * Do not need remount if not choose multitpath when auto fill local addr.
+ */
+ if (remount_opt->local_ip_list->count == 0 &&
+ remount_opt->remote_ip_list->count == 0 &&
+ nfs_multipath_dns_list_info_match(remount_opt->pRemoteDnsInfo,
+ client_info->pRemoteDnsInfo) &&
+ client_info->fill_local == 1) {
+ return 0;
+ }
+ client_info->fill_local = 0;
+
+ if (remount_opt->pRemoteDnsInfo->dnsNameCount != 0) {
+ rpc_peeraddr(nfs_client->cl_rpcclient, (struct sockaddr *)&ss,
+ sizeof(ss));
+ ret = multipath_query_dns(remount_opt, ss.ss_family, false, nfs_client->cl_rpcclient);
+ if (ret != 0) {
+ enfs_log_error("remount err:%d, waiting for the next dns update.\n",
+ ret);
+ return 0;
+ }
+ }
+
+ return enfs_remount_iplist(nfs_client, enfs_option);
+}
+
+int enfs_remount_iplist(struct nfs_client *nfs_client, void *enfs_option)
+{
+ int errno = 0;
+ char servername[48];
+ struct multipath_mount_options *remount_opt = enfs_option;
+ struct multipath_client_info *client_info =nfs_client->cl_multipath_data;
+ struct xprt_create xprtargs;
+ struct rpc_create_args args = {
+ .protocol = nfs_client->cl_proto,
+ .net = nfs_client->cl_net,
+ .addrsize = nfs_client->cl_addrlen,
+ .servername = nfs_client->cl_hostname,
+ };
+ memset(&xprtargs, 0, sizeof(struct xprt_create));
+
+ //mount is not use multipath
+ if (client_info == NULL || enfs_option == NULL) {
+ enfs_log_error("mount information or remount information is empty.\n");
+ return -EINVAL;
+ }
+
+ //remount : localaddrs and remoteaddrs are empty
+ if (remount_opt->local_ip_list->count == 0 && remount_opt->remote_ip_list->count == 0) {
+ enfs_log_info("remount local_ip_list and remote_ip_list are NULL\n");
+ return 0;
+ }
+
+ errno = enfs_config_xprt_create_args(&xprtargs, &args, servername, sizeof(servername));
+ if (errno) {
+ enfs_log_error("config_xprt_create failed! errno:%d\n", errno);
+ return errno;
+ }
+
+ if (remount_opt->local_ip_list->count == 0 && client_info->local_ip_list->count != 0) {
+ memcpy(remount_opt->local_ip_list, client_info->local_ip_list, sizeof(struct nfs_ip_list));
+ }
+
+ if (remount_opt->remote_ip_list->count == 0) {
+ if (client_info->remote_ip_list->count == 0) {
+ errno = rpc_peeraddr(nfs_client->cl_rpcclient, (struct sockaddr *)&remount_opt->remote_ip_list->address[0], sizeof(struct sockaddr_storage));
+ if (errno == 0) {
+ enfs_log_error("get clnt dstaddr errno:%d\n", errno);
+ return errno;
+ }
+ remount_opt->remote_ip_list->count = 1;
+ } else {
+ memcpy(remount_opt->remote_ip_list, client_info->remote_ip_list, sizeof(struct nfs_ip_list));
+ }
+ }
+
+ enfs_log_info("Remount creating new links...\n");
+ enfs_xprt_ippair_create(&xprtargs, nfs_client->cl_rpcclient, remount_opt);
+
+ enfs_log_info("Remount deleting obsolete links...\n");
+ enfs_clnt_delete_obsolete_xprts(nfs_client, remount_opt);
+
+ memcpy(client_info->local_ip_list, remount_opt->local_ip_list, sizeof(struct nfs_ip_list));
+ memcpy(client_info->remote_ip_list, remount_opt->remote_ip_list, sizeof(struct nfs_ip_list));
+ memcpy(client_info->pRemoteDnsInfo, remount_opt->pRemoteDnsInfo, sizeof(NFS_ROUTE_DNS_INFO_S));
+
+ return 0;
+}
diff --git a/fs/nfs/enfs/multipath/path_mgmt/exten_call.c b/fs/nfs/enfs/multipath/path_mgmt/exten_call.c
new file mode 100644
index 0000000..c076eb6
--- /dev/null
+++ b/fs/nfs/enfs/multipath/path_mgmt/exten_call.c
@@ -0,0 +1,1018 @@
+
+#include
+#include
+#include
+#include
+#include
+
+#include "enfs_log.h"
+#include "exten_call.h"
+#include "enfs_tp_common.h"
+#include "enfs_config.h"
+#include "enfs.h"
+
+// TODO:Adjust the code structure based on the open-source format.
+
+
+// TODO:use linux header
+#define MAX_IPV6_ADDR_LEN (64)
+#define EOPNOTSUPP 95
+#define ENFS_DNS_MAX_NAME_LEN 256
+#define NUM0 0
+#define NUM1 1
+
+int NfsExtendDecodeFsShardV1(EXTEND3res **extend3ResOut, __be32 *p, struct xdr_stream *xdrStream);
+int NfsExtendDecodeLifInfoV1(EXTEND3res **extend3ResOut, __be32 *p, struct xdr_stream *xdrStream);
+int NfsExtendDecodeDnsInfoV1(EXTEND3res **extend3ResOut, __be32 *p, struct xdr_stream *xdrStream);
+int NfsExtendDecodeLsId(EXTEND3res **extend3ResOut, __be32 *p, struct xdr_stream *xdrStream);
+
+int EnfsGetFsInfoDecode(EXTEND3res **extend3ResOut, uint32_t version, __be32 *p, struct xdr_stream *xdrStream);
+int EnfsGetLifInfoDecode(EXTEND3res **extend3ResOut, uint32_t version, __be32 *p, struct xdr_stream *xdrStream);
+int EnfsQueryDnsInfoDecode(EXTEND3res **extend3ResOut, uint32_t version, __be32 *p, struct xdr_stream *xdrStream);
+int EnfsGetLsIdDecode(EXTEND3res **extend3ResOut, uint32_t version, __be32 *p, struct xdr_stream *xdrStream);
+
+typedef int(*ENfsDecodeFunc)(EXTEND3res **extend3ResOut, __be32 *p, struct xdr_stream *xdrStream);
+typedef int(*ENfsExtendOpDecode)(EXTEND3res **extend3ResOut, uint32_t version, __be32 *p, struct xdr_stream *xdrStream);
+
+typedef struct {
+ ENfsExtendOpDecode decodeFunc;
+} EnfsExtendOpDecodeProc;
+
+typedef struct {
+ ENfsDecodeFunc fsShardInfoFunc;
+ ENfsDecodeFunc lifInfoFunc;
+ ENfsDecodeFunc dnsInfoFunc;
+ ENfsDecodeFunc lsVersionFunc;
+} EnfsExtendProcVersionFunc;
+
+static EnfsExtendOpDecodeProc g_decodeFuncByOpCode[] = {
+ [NFS3_GET_FSINFO_OP] = {
+ .decodeFunc = EnfsGetFsInfoDecode
+ },
+ [NFS3_GET_LIF_VIEW_OP] = {
+ .decodeFunc = EnfsGetLifInfoDecode
+ },
+ [NFS_ENFS_QUERY_DNS_OP] = {
+ .decodeFunc = EnfsQueryDnsInfoDecode
+ },
+ [NFS3_GET_LS_VERSION_OP] = {
+ .decodeFunc = EnfsGetLsIdDecode
+ }
+};
+
+static EnfsExtendProcVersionFunc g_decodeFuncByVersion[] = {
+ [NUM0] = {
+ .fsShardInfoFunc = NfsExtendDecodeFsShardV1,
+ .lifInfoFunc = NfsExtendDecodeLifInfoV1,
+ .dnsInfoFunc = NfsExtendDecodeDnsInfoV1,
+ },
+ [NUM1] = {
+ .fsShardInfoFunc = NfsExtendDecodeFsShardV1,
+ .lifInfoFunc = NfsExtendDecodeLifInfoV1,
+ .dnsInfoFunc = NfsExtendDecodeDnsInfoV1,
+ .lsVersionFunc = NfsExtendDecodeLsId,
+ },
+};
+
+int NfsExtendProcInfoExtendEncode(char *pbuf, int buflen, EXTEND3args *pObj)
+{
+ __be32 *start;
+ struct xdr_buf xdrBuf;
+ struct xdr_stream xdrStream;
+ uint32_t opcode = 0;
+ unsigned int quadlen;
+ unsigned int padding;
+ uint32_t len;
+
+ xdr_buf_init(&xdrBuf, pbuf, buflen);
+ xdrBuf.head[0].iov_len = 0;
+#if (defined(ENFS_EULER_5_10) || defined(ENFS_OPENEULER_660))
+ xdr_init_encode(&xdrStream, &xdrBuf, NULL, NULL);
+#else
+ xdr_init_encode(&xdrStream, &xdrBuf, NULL);
+#endif
+
+ start = xdr_reserve_space(&xdrStream, 8);
+ if (unlikely(!start)) {
+ return true;
+ }
+ *start++ = cpu_to_be32(pObj->opcode);
+ *start++ = cpu_to_be32(pObj->version);
+
+ opcode = pObj->opcode;
+
+
+ if (opcode == NFS3_GET_FSINFO_OP) {
+ start = xdr_reserve_space(&xdrStream, sizeof(FILE_UUID));
+ len = sizeof(FILE_UUID) - sizeof(uint32_t);
+ quadlen = XDR_QUADLEN(len);
+ padding = (quadlen << 2) - len;
+ memcpy(start, pObj->extend_args_u.Uuid.data, len);
+ if (padding != 0) {
+ memset((char *)start + len, 0, padding);
+ }
+ start += quadlen;
+ *start++ = cpu_to_be32(pObj->extend_args_u.Uuid.dataLen);
+ enfs_print_uuid(&pObj->extend_args_u.Uuid);
+ }
+
+
+
+ if (opcode == NFS3_GET_LIF_VIEW_OP) {
+ start = xdr_reserve_space(&xdrStream, 8 + pObj->extend_args_u.lifArgs.ipNumber * MAX_IPV6_ADDR_LEN);
+ *start++ = cpu_to_be32(pObj->extend_args_u.lifArgs.tenantId);
+ *start++ = cpu_to_be32(pObj->extend_args_u.lifArgs.ipNumber);
+ len = pObj->extend_args_u.lifArgs.ipNumber * MAX_IPV6_ADDR_LEN;
+ quadlen = XDR_QUADLEN(len);
+ padding = (quadlen << 2) - len;
+ memcpy(start, pObj->extend_args_u.lifArgs.ipAddr, len);
+ if (padding != 0) {
+ memset((char *)start + len, 0, padding);
+ }
+ start += quadlen;
+ }
+
+ if (opcode == NFS_ENFS_QUERY_DNS_OP) {
+ // 8 means dnsargs's type and count
+ start = xdr_reserve_space(&xdrStream, 8 + pObj->extend_args_u.dnsArgs.dnsNameCount * ENFS_DNS_MAX_NAME_LEN);
+ *start++ = cpu_to_be32(pObj->extend_args_u.dnsArgs.ipType);
+ *start++ = cpu_to_be32(pObj->extend_args_u.dnsArgs.dnsNameCount);
+ len = pObj->extend_args_u.dnsArgs.dnsNameCount * ENFS_DNS_MAX_NAME_LEN;
+ quadlen = XDR_QUADLEN(len);
+ // 2 means Move two places to the left
+ padding = (quadlen << 2) - len;
+ memcpy(start, pObj->extend_args_u.dnsArgs.dnsName, len);
+ if (padding != 0) {
+ memset((char *)start + len, 0, padding);
+ }
+ start += quadlen;
+ }
+
+ return false;
+}
+
+int NfsExtendDecodeFsShardV1(EXTEND3res **extend3ResOut, __be32 *p, struct xdr_stream *xdrStream)
+{
+ uint32_t i;
+ uint32_t extend3ResLen = 0;
+ uint64_t clusterId;
+ uint32_t storagePoolId;
+ uint32_t fsId;
+ uint32_t tenantId;
+ uint32_t shardNumber;
+ EXTEND3res *extend3Res = NULL;
+ p = xdr_inline_decode(xdrStream, 24);
+ if (unlikely(p == NULL)) {
+ return true;
+ }
+ p = xdr_decode_hyper(p, &clusterId);
+ storagePoolId = be32_to_cpup(p++);
+ fsId = be32_to_cpup(p++);
+ tenantId = be32_to_cpup(p++);
+ shardNumber = be32_to_cpup(p++);
+
+ p = xdr_inline_decode(xdrStream, shardNumber * (sizeof(uint64_t) + sizeof(uint32_t)));
+ if (unlikely(p == NULL)) {
+ return true;
+ }
+ extend3ResLen = sizeof(EXTEND3res) + sizeof(FS_SHARD_VIEW_SINGLE) * shardNumber;
+ extend3Res = (EXTEND3res *)kmalloc(extend3ResLen, GFP_KERNEL);
+ if (extend3Res == NULL) {
+ return -ENOMEM;
+ }
+ extend3Res->extend_res_u.fsInfo.clusterId = clusterId;
+ extend3Res->extend_res_u.fsInfo.storagePoolId = storagePoolId;
+ extend3Res->extend_res_u.fsInfo.fsId = fsId;
+ extend3Res->extend_res_u.fsInfo.tenantId = tenantId;
+ extend3Res->extend_res_u.fsInfo.num = shardNumber;
+ for (i = 0;i < extend3Res->extend_res_u.fsInfo.num; i++) {
+ p = xdr_decode_hyper(p, &extend3Res->extend_res_u.fsInfo.shardView[i].lsid);
+ extend3Res->extend_res_u.fsInfo.shardView[i].cpuId = be32_to_cpup(p++);
+ }
+ *extend3ResOut = extend3Res;
+ return 0;
+}
+
+int NfsExtendDecodeFsShard(EXTEND3res **extend3ResOut, __be32 *p, struct xdr_stream *xdrStream)
+{
+ uint32_t i;
+ uint32_t extend3ResLen = 0;
+ uint64_t clusterId;
+ uint32_t storagePoolId;
+ uint32_t fsId;
+ uint32_t tenantId;
+ uint32_t shardNumber;
+ EXTEND3res *extend3Res = NULL;
+ p = xdr_inline_decode(xdrStream, 24);
+ if (unlikely(p == NULL)) {
+ return true;
+ }
+ p = xdr_decode_hyper(p, &clusterId);
+ storagePoolId = be32_to_cpup(p++);
+ fsId = be32_to_cpup(p++);
+ tenantId = be32_to_cpup(p++);
+ shardNumber = be32_to_cpup(p++);
+
+ p = xdr_inline_decode(xdrStream, shardNumber * 8);
+ if (unlikely(p == NULL)) {
+ return true;
+ }
+ extend3ResLen = sizeof(EXTEND3res) + sizeof(FS_SHARD_VIEW_SINGLE) * shardNumber;
+ extend3Res = (EXTEND3res *)kmalloc(extend3ResLen, GFP_KERNEL);
+ if (extend3Res == NULL) {
+ return -ENOMEM;
+ }
+ extend3Res->extend_res_u.fsInfo.clusterId = clusterId;
+ extend3Res->extend_res_u.fsInfo.storagePoolId = storagePoolId;
+ extend3Res->extend_res_u.fsInfo.fsId = fsId;
+ extend3Res->extend_res_u.fsInfo.tenantId = tenantId;
+ extend3Res->extend_res_u.fsInfo.num = shardNumber;
+ for (i = 0;i < extend3Res->extend_res_u.fsInfo.num; i++) {
+ p = xdr_decode_hyper(p, &extend3Res->extend_res_u.fsInfo.shardView[i].lsid);
+ extend3Res->extend_res_u.fsInfo.shardView[i].cpuId = INVALID_CPU_ID;
+ }
+ *extend3ResOut = extend3Res;
+ return 0;
+}
+
+int NfsExtendDecodeLifInfoV1(EXTEND3res **extend3ResOut, __be32 *p, struct xdr_stream *xdrStream)
+{
+ uint32_t i;
+ uint32_t extend3ResLen = 0;
+ uint32_t lifNum;
+ EXTEND3res *extend3Res = NULL;
+ p = xdr_inline_decode(xdrStream, 4);
+ if (unlikely(p == NULL)) {
+ return true;
+ }
+ lifNum = be32_to_cpup(p++);
+
+ p = xdr_inline_decode(xdrStream, sizeof(LIF_PORT_INFO_SINGLE) * lifNum);
+ if (unlikely(p == NULL)) {
+ return true;
+ }
+ extend3ResLen = sizeof(EXTEND3res) + sizeof(LIF_PORT_INFO_SINGLE) * lifNum;
+ extend3Res = (EXTEND3res *)kmalloc(extend3ResLen, GFP_KERNEL);
+ if (extend3Res == NULL) {
+ return -ENOMEM;
+ }
+ extend3Res->extend_res_u.lifInfo.lifNumber = lifNum;
+ for (i = 0; i < extend3Res->extend_res_u.lifInfo.lifNumber; i++) {
+ extend3Res->extend_res_u.lifInfo.lifport[i].isfound = be32_to_cpup(p++);
+ extend3Res->extend_res_u.lifInfo.lifport[i].workStatus = be32_to_cpup(p++);
+ p = xdr_decode_hyper(p, &extend3Res->extend_res_u.lifInfo.lifport[i].lsId);
+ extend3Res->extend_res_u.lifInfo.lifport[i].tenantId = be32_to_cpup(p++);
+ p = xdr_decode_hyper(p, &extend3Res->extend_res_u.lifInfo.lifport[i].homeSiteWwn);
+ extend3Res->extend_res_u.lifInfo.lifport[i].cpuId = be32_to_cpup(p++);
+ }
+ *extend3ResOut = extend3Res;
+ return 0;
+}
+
+int NfsExtendDecodeLifInfo(EXTEND3res **extend3ResOut, __be32 *p, struct xdr_stream *xdrStream)
+{
+ uint32_t i;
+ uint32_t extend3ResLen = 0;
+ uint32_t lifNum;
+ EXTEND3res *extend3Res = NULL;
+ p = xdr_inline_decode(xdrStream, 4);
+ if (unlikely(p == NULL)) {
+ return true;
+ }
+ lifNum = be32_to_cpup(p++);
+
+ p = xdr_inline_decode(xdrStream, 32 * lifNum);
+ if (unlikely(p == NULL)) {
+ return true;
+ }
+ extend3ResLen = sizeof(EXTEND3res) + sizeof(LIF_PORT_INFO_SINGLE) * lifNum;
+ extend3Res = (EXTEND3res *)kmalloc(extend3ResLen, GFP_KERNEL);
+ if (extend3Res == NULL) {
+ return -ENOMEM;
+ }
+ extend3Res->extend_res_u.lifInfo.lifNumber = lifNum;
+ for (i = 0; i < extend3Res->extend_res_u.lifInfo.lifNumber; i++) {
+ extend3Res->extend_res_u.lifInfo.lifport[i].isfound = be32_to_cpup(p++);
+ extend3Res->extend_res_u.lifInfo.lifport[i].workStatus = be32_to_cpup(p++);
+ p = xdr_decode_hyper(p, &extend3Res->extend_res_u.lifInfo.lifport[i].lsId);
+ extend3Res->extend_res_u.lifInfo.lifport[i].tenantId = be32_to_cpup(p++);
+ p = xdr_decode_hyper(p, &extend3Res->extend_res_u.lifInfo.lifport[i].homeSiteWwn);
+ extend3Res->extend_res_u.lifInfo.lifport[i].cpuId = INVALID_CPU_ID;
+ }
+ *extend3ResOut = extend3Res;
+ return 0;
+}
+
+int NfsExtendDecodeDnsInfoV1(EXTEND3res **extend3ResOut, __be32 *p, struct xdr_stream *xdrStream)
+{
+ uint32_t i;
+ uint32_t extend3ResLen = 0;
+ uint32_t ipNumber;
+ EXTEND3res *extend3Res = NULL;
+ // 4 means dnsres's ipNumber
+ p = xdr_inline_decode(xdrStream, 4);
+ if (unlikely(p == NULL)) {
+ return true;
+ }
+ ipNumber = be32_to_cpup(p++);
+ // 3 means dnsres's op and vers and ipNumber
+ extend3ResLen = sizeof(uint32_t) * 3 + sizeof(DNS_QUERY_IP_INFO_SINGLE) * ipNumber;
+ extend3Res = (EXTEND3res *)kmalloc(extend3ResLen, GFP_KERNEL);
+ if (extend3Res == NULL) {
+ return -ENOMEM;
+ }
+
+ if (ipNumber == 0) {
+ kfree(extend3Res);
+ return true;
+ }
+ extend3Res->extend_res_u.dnsQueryIpInfo.ipNumber = ipNumber;
+ for (i = 0; i < extend3Res->extend_res_u.dnsQueryIpInfo.ipNumber; i++) {
+ // 4 means dnsres's cpuid
+ p = xdr_inline_decode(xdrStream, 4);
+ if (unlikely(p == NULL)) {
+ kfree(extend3Res);
+ return true;
+ }
+ extend3Res->extend_res_u.dnsQueryIpInfo.ipInfo[i].cpuId = be32_to_cpup(p++);
+
+ // 8 means dnsres's lsid
+ p = xdr_inline_decode(xdrStream, 8);
+ if (unlikely(p == NULL)) {
+ kfree(extend3Res);
+ return true;
+ }
+ p = xdr_decode_hyper(p, &extend3Res->extend_res_u.dnsQueryIpInfo.ipInfo[i].lsId);
+
+ p = xdr_inline_decode(xdrStream, MAX_IPV6_ADDR_LEN);
+ if (unlikely(p == NULL)) {
+ kfree(extend3Res);
+ return true;
+ }
+ memcpy(extend3Res->extend_res_u.dnsQueryIpInfo.ipInfo[i].ipAddr, p, MAX_IPV6_ADDR_LEN);
+ }
+ *extend3ResOut = extend3Res;
+ return 0;
+}
+
+int NfsExtendDecodeLsId(EXTEND3res **extend3ResOut, __be32 *p, struct xdr_stream *xdrStream)
+{
+ uint32_t i;
+ uint32_t extend3ResLen = 0;
+ uint32_t lsNum;
+ uint64_t clusterId;
+ EXTEND3res *extend3Res = NULL;
+ p = xdr_inline_decode(xdrStream, sizeof(uint32_t));
+ if (unlikely(p == NULL)) {
+ return -ENOMEM;
+ }
+ lsNum = be32_to_cpup(p++);
+
+ p = xdr_inline_decode(xdrStream, 8);
+ if (unlikely(p == NULL)) {
+ return -ENOMEM;
+ }
+ p = xdr_decode_hyper(p, &clusterId);
+
+ p = xdr_inline_decode(xdrStream, (sizeof(EXTEND_GET_LS_VERSION_SINGLE)) * lsNum);
+ if (unlikely(p == NULL)) {
+ return -ENOMEM;
+ }
+ // clusterId and op and vers and lsNum
+ extend3ResLen = sizeof(uint32_t) * 3 + sizeof(uint64_t) + sizeof(EXTEND_GET_LS_VERSION_SINGLE) * lsNum;
+ extend3Res = (EXTEND3res *)kmalloc(extend3ResLen, GFP_KERNEL);
+ if (extend3Res == NULL) {
+ return -ENOMEM;
+ }
+ extend3Res->extend_res_u.lsView.num = lsNum;
+ extend3Res->extend_res_u.lsView.clusterId = clusterId;
+ for (i = 0; i < extend3Res->extend_res_u.lsView.num; i++) {
+ extend3Res->extend_res_u.lsView.lsInfo[i].lsId = be32_to_cpup(p++);
+ p = xdr_decode_hyper(p, &extend3Res->extend_res_u.lsView.lsInfo[i].lsVersion);
+ }
+ *extend3ResOut = extend3Res;
+ return 0;
+}
+
+int EnfsGetFsInfoDecode(EXTEND3res **extend3ResOut, uint32_t version, __be32 *p, struct xdr_stream *xdrStream)
+{
+ int ret = 0;
+ int decode_version = 0;
+ ENfsDecodeFunc func = NULL;
+
+ if (version < ENFS_SERVER_VERSION_BASE) {
+ ret = NfsExtendDecodeFsShard(extend3ResOut, p, xdrStream);
+ } else {
+ decode_version = ((version > ENFS_SERVER_VERSION_BASE) && ((version - ENFS_SERVER_VERSION_BASE) >=
+ sizeof(g_decodeFuncByVersion) / sizeof(EnfsExtendProcVersionFunc))) ?
+ (sizeof(g_decodeFuncByVersion) / sizeof(EnfsExtendProcVersionFunc) - 1) :
+ version - ENFS_SERVER_VERSION_BASE;
+ func = g_decodeFuncByVersion[decode_version].fsShardInfoFunc;
+ if (!func) {
+ enfs_log_error("Enfs getFsShard deocde func is null, resp version:%u", version);
+ return true;
+ }
+ ret = func(extend3ResOut, p, xdrStream);
+ }
+ return ret;
+}
+
+int EnfsGetLifInfoDecode(EXTEND3res **extend3ResOut, uint32_t version, __be32 *p, struct xdr_stream *xdrStream)
+{
+ int ret = 0;
+ int decode_version = 0;
+ ENfsDecodeFunc func = NULL;
+
+ if (version < ENFS_SERVER_VERSION_BASE) {
+ ret = NfsExtendDecodeLifInfo(extend3ResOut, p, xdrStream);
+ } else {
+ decode_version = ((version > ENFS_SERVER_VERSION_BASE) && ((version - ENFS_SERVER_VERSION_BASE) >=
+ sizeof(g_decodeFuncByVersion) / sizeof(EnfsExtendProcVersionFunc))) ?
+ (sizeof(g_decodeFuncByVersion) / sizeof(EnfsExtendProcVersionFunc) - 1) :
+ version - ENFS_SERVER_VERSION_BASE;
+ func = g_decodeFuncByVersion[decode_version].lifInfoFunc;
+ if (!func) {
+ enfs_log_error("Enfs getLifInfo deocde func is null, resp version:%u", version);
+ return true;
+ }
+ ret = func(extend3ResOut, p, xdrStream);
+ }
+ return ret;
+}
+
+int EnfsQueryDnsInfoDecode(EXTEND3res **extend3ResOut, uint32_t version, __be32 *p, struct xdr_stream *xdrStream)
+{
+ int ret = 0;
+ int decode_version = 0;
+ ENfsDecodeFunc func = NULL;
+
+ if (version < ENFS_SERVER_VERSION_BASE) {
+ ret = true;
+ enfs_log_error("Enfs decode dnsInfo failed, server version(%u) unspported", version);
+ } else {
+ decode_version = ((version > ENFS_SERVER_VERSION_BASE) && ((version - ENFS_SERVER_VERSION_BASE) >=
+ sizeof(g_decodeFuncByVersion) / sizeof(EnfsExtendProcVersionFunc))) ?
+ (sizeof(g_decodeFuncByVersion) / sizeof(EnfsExtendProcVersionFunc) - 1) :
+ version - ENFS_SERVER_VERSION_BASE;
+ func = g_decodeFuncByVersion[decode_version].dnsInfoFunc;
+ if (!func) {
+ enfs_log_error("Enfs getDnsInfo deocde func is null, resp version:%u", version);
+ return true;
+ }
+ ret = func(extend3ResOut, p, xdrStream);
+ }
+ return ret;
+}
+
+int EnfsGetLsIdDecode(EXTEND3res **extend3ResOut, uint32_t version, __be32 *p, struct xdr_stream *xdrStream)
+{
+ int ret = 0;
+ int decode_version = 0;
+ ENfsDecodeFunc func = NULL;
+
+ if (version < ENFS_SERVER_VERSION_BASE) {
+ ret = true;
+ enfs_log_error("Enfs decode dnsInfo failed, server version(%u) unspported", version);
+ } else {
+ decode_version = ((version > ENFS_SERVER_VERSION_BASE) && ((version - ENFS_SERVER_VERSION_BASE) >=
+ sizeof(g_decodeFuncByVersion) / sizeof(EnfsExtendProcVersionFunc))) ?
+ (sizeof(g_decodeFuncByVersion) / sizeof(EnfsExtendProcVersionFunc) - 1) :
+ version - ENFS_SERVER_VERSION_BASE;
+ func = g_decodeFuncByVersion[decode_version].lsVersionFunc;
+ if (!func) {
+ enfs_log_error("Enfs getDnsInfo deocde func is null, resp version:%u", version);
+ return true;
+ }
+ ret = func(extend3ResOut, p, xdrStream);
+ }
+
+ return ret;
+}
+
+int EnfsExtendDecodePreCheck(uint32_t version, uint32_t opCode)
+{
+ if ((opCode < 0) || (opCode >= (sizeof(g_decodeFuncByOpCode) / sizeof(EnfsExtendOpDecodeProc)))) {
+ enfs_log_error("Extend op decode pre check opcode(%u) failed.", opCode);
+ return -ENOTSUPP;
+ }
+ return 0;
+}
+
+int NfsExtendProcInfoExtendDecode(char *buf, uint32_t bufLen, EXTEND3res **extend3ResOut)
+{
+ int ret = 0;
+ __be32 *p;
+ uint32_t opCode = 0;
+ uint32_t version = 0;
+ struct xdr_buf xdrBuf;
+ struct xdr_stream xdrStream;
+ ENfsExtendOpDecode func = NULL;
+ xdr_buf_init(&xdrBuf, buf, bufLen);
+ xdrBuf.len = bufLen;
+#if (defined(ENFS_EULER_5_10) || defined(ENFS_OPENEULER_660))
+ xdr_init_decode(&xdrStream, &xdrBuf, NULL, NULL);
+#else
+ xdr_init_decode(&xdrStream, &xdrBuf, NULL);
+#endif
+ p = xdr_inline_decode(&xdrStream, 8);
+ if (unlikely(p == NULL)) {
+ return true;
+ }
+ opCode = be32_to_cpup(p++);
+ version = be32_to_cpup(p++);
+
+ ret = EnfsExtendDecodePreCheck(version, opCode);
+ if (ret) {
+ enfs_log_error("Enfs extend op decode pre check failed");
+ return ret;
+ }
+
+ func = g_decodeFuncByOpCode[opCode].decodeFunc;
+ if (!func) {
+ enfs_log_error("Enfs deocde op(%u) func is null", opCode);
+ return true;
+ }
+ ret = func(extend3ResOut, version, p, &xdrStream);
+ if (ret) {
+ enfs_log_error("NfsExtendProcInfoExtendDecode failed, opCode:%u, version:%u, ret:%d",
+ opCode, version, ret);
+ return true;
+ }
+
+ return false;
+}
+
+#define EXTEND_CMD_MAX_BUF_LEN 819200 /* 800K */
+
+static void
+rpc_default_callback(struct rpc_task *task, void *data)
+{
+}
+
+static const struct rpc_call_ops rpc_default_ops = {
+ .rpc_call_done = rpc_default_callback,
+};
+
+/*
+ * Send extend request by specified xprt.
+ */
+int dorado_extend_route(struct rpc_clnt *clnt, struct rpc_xprt *xprt, char *buf,
+ int *buflen)
+{
+ int status;
+ struct rpc_task *task;
+ struct nfs_extend_xdr_arg xdr_arg = {0};
+
+ struct rpc_message msg = {
+ .rpc_proc = &nfs3_procedures[NFS3PROC_EXTEND],
+ .rpc_argp = &xdr_arg,
+ .rpc_resp = &xdr_arg,
+ };
+
+ struct rpc_task_setup task_setup_data = {
+ .rpc_client = clnt,
+ .rpc_xprt = xprt,
+ .rpc_message = &msg,
+ .callback_ops = &rpc_default_ops,
+ .callback_data = NULL,
+ .flags = RPC_TASK_SOFT | RPC_TASK_TIMEOUT | RPC_TASK_SOFTCONN,
+ };
+
+ xdr_arg.buflen = *buflen;
+ xdr_arg.pBuf = buf;
+ xdr_arg.maxsize = EXTEND_CMD_MAX_BUF_LEN;
+
+ task = rpc_run_task(&task_setup_data);
+ if (IS_ERR(task))
+ return PTR_ERR(task);
+ status = task->tk_status;
+ if (status) {
+ enfs_log_info("NFS reply status:%d resp_len:%d \n", status,
+ xdr_arg.buflen);
+ }
+ *buflen = xdr_arg.buflen;
+ rpc_put_task(task);
+ return status;
+}
+
+int dorado_extend_op(struct rpc_clnt *clnt, char *buf, int *buflen)
+{
+ int status;
+ struct nfs_extend_xdr_arg xdr_arg = {0};
+
+ struct rpc_message msg = {
+ .rpc_proc = &nfs3_procedures[NFS3PROC_EXTEND],
+ .rpc_argp = &xdr_arg,
+ .rpc_resp = &xdr_arg,
+ };
+
+ xdr_arg.buflen = *buflen;
+ xdr_arg.pBuf = buf;
+ xdr_arg.maxsize = EXTEND_CMD_MAX_BUF_LEN;
+
+ LVOS_TP_START(EXTEND_CALL_FAILED, &status);
+ LVOS_TP_START(EXTEND_NOT_SUPPORT, &status);
+ status = rpc_call_sync(
+ clnt, &msg, RPC_TASK_SOFT | RPC_TASK_TIMEOUT | RPC_TASK_SOFTCONN);
+ LVOS_TP_END;
+ LVOS_TP_END;
+ if (status) {
+ enfs_log_info("NFS reply status:%d resp_len:%d \n", status,
+ xdr_arg.buflen);
+ }
+ *buflen = xdr_arg.buflen;
+ return status;
+}
+
+void nego_enfs_version(struct rpc_clnt *clnt, EXTEND3args *args)
+{
+ struct enfs_xprt_context *ctx = NULL;
+
+ xprt_get(clnt->cl_xprt);
+ ctx = (struct enfs_xprt_context *)xprt_get_reserve_context(clnt->cl_xprt);
+ if (ctx == NULL) {
+ enfs_log_error("The xprt multipath ctx is not valid.\n");
+ xprt_put(clnt->cl_xprt);
+ return;
+ }
+ if (ctx->version == ENFS_SERVER_VERSION_BASE) {
+ args->version = ENFS_V3;
+ }
+ xprt_put(clnt->cl_xprt);
+ return;
+}
+
+int dorado_query_fs_shard(struct rpc_clnt *clnt, FILE_UUID *file_uuid,
+ FS_SHARD_VIEW **resDataOut)
+{
+ int ret;
+ char *buf = NULL;
+ EXTEND3res *extend3Res = NULL;
+ int bufLen = sizeof(EXTEND3args);
+ FS_SHARD_VIEW *resData;
+
+ EXTEND3args *args = (EXTEND3args *)kmalloc(bufLen, GFP_KERNEL);
+ if (args == NULL) {
+ printk(KERN_ERR "%s:kmalloc arg cfailed.\n", __FUNCTION__);
+ return -ENOMEM;
+ }
+
+ buf = (char *)kmalloc(EXTEND_CMD_MAX_BUF_LEN, GFP_KERNEL);
+ if (!buf) {
+ printk(KERN_ERR "%s:kmalloc buf failed.\n", __FUNCTION__);
+ return -ENOMEM;
+ }
+
+ args->opcode = NFS3_GET_FSINFO_OP;
+ args->version = ENFS_VERSION_BUTT - 1;
+ args->extend_args_u.Uuid = *file_uuid;
+
+ nego_enfs_version(clnt, args);
+ ret = NfsExtendProcInfoExtendEncode(buf, bufLen, args);
+
+ ret = dorado_extend_op(clnt, buf, &bufLen);
+ // 老版本存储不支持NfsExtendOp,返回该值
+ if (ret == -EOPNOTSUPP || ret == -ENOTSUPP) {
+ // UpdateServerSupportStatus(ctx->devId, SERVER_NOT_SUPPORT);
+ printk(KERN_ERR "%s: no support NfsExtendOp.\n", __FUNCTION__);
+ kfree(args);
+ kfree(buf);
+ return ret;
+ }
+
+ // another err handle
+ if (ret) {
+ printk(KERN_ERR "NfsExtendOp get fsInfo failed %d.\n", ret);
+ kfree(args);
+ kfree(buf);
+ return ret;
+ }
+
+
+ // decode
+ ret = NfsExtendProcInfoExtendDecode(buf, bufLen, &extend3Res);
+ if (ret) {
+ kfree(args);
+ kfree(extend3Res);
+ kfree(buf);
+ return ret;
+ }
+
+ // ! TODO注意:1.检查shardnum的范围 2.分配固定大小,避免越界
+ resData = (FS_SHARD_VIEW *)kmalloc(sizeof(FS_SHARD_VIEW)+
+ sizeof(FS_SHARD_VIEW_SINGLE) * (extend3Res->extend_res_u.fsInfo.num), GFP_KERNEL);
+ resData->clusterId = extend3Res->extend_res_u.fsInfo.clusterId;
+ resData->storagePoolId = extend3Res->extend_res_u.fsInfo.storagePoolId;
+ resData->fsId = extend3Res->extend_res_u.fsInfo.fsId;
+ resData->tenantId = extend3Res->extend_res_u.fsInfo.tenantId;
+ resData->num = extend3Res->extend_res_u.fsInfo.num;
+ memcpy(resData->shardView, extend3Res->extend_res_u.fsInfo.shardView,
+ sizeof(FS_SHARD_VIEW_SINGLE) * extend3Res->extend_res_u.fsInfo.num);
+ *resDataOut = resData;
+
+ kfree(args);
+ kfree(extend3Res);
+ kfree(buf);
+ return ret;
+}
+
+int dorado_query_lsId(struct rpc_clnt *clnt, EXTEND_GET_LS_VERSION **resDataOut)
+{
+ int ret;
+ char *buf = NULL;
+ EXTEND3res *extend3Res = NULL;
+ int bufLen = sizeof(uint32_t) * 2;
+ EXTEND_GET_LS_VERSION *resData;
+
+ EXTEND3args *args = (EXTEND3args *)kmalloc(bufLen, GFP_KERNEL);
+ if (args == NULL) {
+ printk(KERN_ERR "%s:kmalloc arg cfailed.\n", __FUNCTION__);
+ return -ENOMEM;
+ }
+
+ buf = (char *)kmalloc(EXTEND_CMD_MAX_BUF_LEN, GFP_KERNEL);
+ if (!buf) {
+ printk(KERN_ERR "%s:kmalloc buf failed.\n", __FUNCTION__);
+ return -ENOMEM;
+ }
+
+ args->opcode = NFS3_GET_LS_VERSION_OP;
+ args->version = ENFS_VERSION_BUTT - 1;
+ ret = NfsExtendProcInfoExtendEncode(buf, bufLen, args);
+
+ ret = dorado_extend_op(clnt, buf, &bufLen);
+ // 老版本存储不支持NfsExtendOp,返回该值
+ if (ret == -EOPNOTSUPP || ret == -ENOTSUPP) {
+ printk(KERN_ERR "%s: no support NfsExtendOp.\n", __FUNCTION__);
+ kfree(args);
+ kfree(buf);
+ return ret;
+ }
+
+ // another err handle
+ if (ret) {
+ printk(KERN_ERR "NfsExtendOp get fsInfo failed %d.\n", ret);
+ kfree(args);
+ kfree(buf);
+ return ret;
+ }
+
+ // decode
+ ret = NfsExtendProcInfoExtendDecode(buf, bufLen, &extend3Res);
+ if (ret) {
+ kfree(args);
+ kfree(extend3Res);
+ kfree(buf);
+ return ret;
+ }
+
+ resData = (EXTEND_GET_LS_VERSION *)kmalloc(sizeof(EXTEND_GET_LS_VERSION)+
+ sizeof(EXTEND_GET_LS_VERSION_SINGLE) * (extend3Res->extend_res_u.lsView.num), GFP_KERNEL);
+ resData->num = extend3Res->extend_res_u.lsView.num;
+ resData->clusterId = extend3Res->extend_res_u.lsView.clusterId;
+ memcpy(resData->lsInfo, extend3Res->extend_res_u.lsView.lsInfo,
+ sizeof(EXTEND_GET_LS_VERSION_SINGLE) * extend3Res->extend_res_u.lsView.num);
+ *resDataOut = resData;
+
+ kfree(args);
+ kfree(extend3Res);
+ kfree(buf);
+ return ret;
+}
+int dorado_query_lifview(struct rpc_clnt *clnt, struct rpc_xprt *xprt,
+ char *ipAddr, uint32_t ipNumber,
+ LIF_PORT_INFO *lifInfo)
+{
+ int ret;
+ int i;
+ char *buf = NULL;
+ EXTEND3args *args = NULL;
+ EXTEND3res *extend3Res = NULL;
+ char *curIP;
+ int bufLen = sizeof(uint32_t) * 4 + ipNumber * MAX_IPV6_ADDR_LEN;
+
+ buf = (char *)kmalloc(EXTEND_CMD_MAX_BUF_LEN, GFP_KERNEL);
+ if (!buf) {
+ enfs_log_error("kmalloc failed.\n");
+ return -ENOMEM;
+ }
+
+ args = (EXTEND3args *)kmalloc(bufLen, GFP_KERNEL);
+ if (args == NULL) {
+ kfree(buf);
+ printk(KERN_ERR "%s:kmalloc failed.\n", __FUNCTION__);
+ return -ENOMEM;
+ }
+
+ args->version = ENFS_VERSION_BUTT - 1;
+ args->opcode = NFS3_GET_LIF_VIEW_OP;
+ memcpy((void *)args->extend_args_u.lifArgs.ipAddr, (void *)ipAddr,
+ IP_ADDRESS_LEN_MAX * ipNumber);
+ args->extend_args_u.lifArgs.ipNumber = ipNumber;
+ args->extend_args_u.lifArgs.tenantId = 0; /* tenantId reserved */
+
+ nego_enfs_version(clnt, args);
+ ret = NfsExtendProcInfoExtendEncode(buf, bufLen, args);
+
+ ret = dorado_extend_route(clnt, xprt, buf, &bufLen);
+ if (ret) {
+ printk(KERN_ERR "NfsExtendOp get lif view failed %d.\n", ret);
+ kfree(args);
+ kfree(buf);
+ return ret;
+ }
+
+ // decode
+ ret = NfsExtendProcInfoExtendDecode(buf, bufLen, &extend3Res);
+ if (ret) {
+ kfree(args);
+ kfree(extend3Res);
+ kfree(buf);
+ return ret;
+ }
+
+ curIP = ipAddr;
+ for (i = 0; i < extend3Res->extend_res_u.lifInfo.lifNumber; i++) {
+ curIP = &ipAddr[i*64];
+ strncpy(lifInfo[i].ipAddr, curIP, MAX_IPV6_ADDR_LEN);
+ lifInfo[i].lsId =
+ extend3Res->extend_res_u.lifInfo.lifport[i].lsId;
+ lifInfo[i].workStatus =
+ extend3Res->extend_res_u.lifInfo.lifport[i].workStatus;
+ lifInfo[i].wwn =
+ extend3Res->extend_res_u.lifInfo.lifport[i].homeSiteWwn;
+ lifInfo[i].cpuId =
+ extend3Res->extend_res_u.lifInfo.lifport[i].cpuId;
+ enfs_log_debug(
+ "enfs: query lif number: %u, ipaddr(%s):isfound(%u) "
+ "workStatus(%u) lsId(%llu) tenantId(%u) homeSiteWwn(%llu) cpuId(%u)",
+ extend3Res->extend_res_u.lifInfo.lifNumber, lifInfo[i].ipAddr,
+ extend3Res->extend_res_u.lifInfo.lifport[i].isfound,
+ extend3Res->extend_res_u.lifInfo.lifport[i].workStatus,
+ extend3Res->extend_res_u.lifInfo.lifport[i].lsId,
+ extend3Res->extend_res_u.lifInfo.lifport[i].tenantId,
+ extend3Res->extend_res_u.lifInfo.lifport[i].homeSiteWwn,
+ extend3Res->extend_res_u.lifInfo.lifport[i].cpuId);
+ }
+
+ kfree(args);
+ kfree(extend3Res);
+ kfree(buf);
+ return 0;
+}
+
+int enfs_query_lifview(struct rpc_clnt *clnt, struct rpc_xprt *xprt,
+ char *ipaddr, uint64_t *lsid, uint64_t *wwn, uint32_t *cpuId)
+{
+ int ret;
+ LIF_PORT_INFO lif_info;
+
+ ret = dorado_query_lifview(clnt, xprt, ipaddr, 1, &lif_info);
+ if (ret || lif_info.workStatus == 0) {
+ enfs_log_info("err:%d status:%d\n", ret, lif_info.workStatus);
+ return ret;
+ }
+ *lsid = lif_info.lsId;
+ *wwn = lif_info.wwn;
+ *cpuId = lif_info.cpuId;
+ return 0;
+}
+
+int scan_uuid(const char *str, uint8_t *arr, int arrlen)
+{
+ int ret;
+ int i;
+ uint32_t num;
+ char tmpbuf[3];
+ int str_len;
+
+ if (str == NULL || arr == NULL || arrlen <= 0) {
+ return -1;
+ }
+
+ str_len = strlen(str);
+
+ if (str_len % 2 != 0 || arrlen < str_len / 2) {
+ return -1;
+ }
+
+ for (i = 0; i < str_len; i += 2) {
+ tmpbuf[0] = str[i];
+ tmpbuf[1] = str[i + 1];
+ tmpbuf[2] = '\0';
+
+ ret = sscanf(tmpbuf, "%X", &num);
+ if (ret == 0) {
+ return -1;
+ }
+
+ arr[i / 2] = (uint8_t)num;
+ }
+ return 0;
+}
+
+int sprint_uuid(char *buf, int buflen, FILE_UUID *file_uuid)
+{
+ int i;
+ int n;
+ char *head = buf;
+
+ if (buflen < FILE_UUID_BUFF_LEN) {
+ return -1;
+ }
+
+ for (i = 0; i < FILE_UUID_BUFF_LEN; i++) {
+ n = sprintf(head, "%.2X", file_uuid->data[i]);
+ head += n;
+ }
+ return 0;
+}
+
+void NfsExtendDnsQuerySetArgs(EXTEND3args *args, uint32_t ip_type, uint32_t dnsNamecount, char *dnsName)
+{
+ args->version = ENFS_VERSION_BUTT - 1;
+ args->opcode = NFS_ENFS_QUERY_DNS_OP;
+ args->extend_args_u.dnsArgs.ipType = ip_type;
+ args->extend_args_u.dnsArgs.dnsNameCount = dnsNamecount;
+ memcpy((void *)args->extend_args_u.dnsArgs.dnsName, (void *)dnsName,
+ ENFS_DNS_MAX_NAME_LEN * dnsNamecount);
+ return;
+}
+
+void NfsExtendDnsQuerySetRes(EXTEND3res *extend3Res, DNS_QUERY_IP_INFO_SINGLE *resData)
+{
+ int i;
+
+ for (i = 0; i < extend3Res->extend_res_u.dnsQueryIpInfo.ipNumber; i++) {
+ resData[i].cpuId = extend3Res->extend_res_u.dnsQueryIpInfo.ipInfo[i].cpuId;
+ resData[i].lsId = extend3Res->extend_res_u.dnsQueryIpInfo.ipInfo[i].lsId;
+ memcpy(resData[i].ipAddr, extend3Res->extend_res_u.dnsQueryIpInfo.ipInfo[i].ipAddr,
+ MAX_IPV6_ADDR_LEN);
+ }
+ return;
+}
+
+int dorado_query_dns(struct rpc_clnt *clnt, DNS_QUERY_IP_INFO_SINGLE **dnsQueryIpInfo,
+ uint32_t ip_type, uint32_t dnsNamecount, char *dnsName, int *ipNumber)
+{
+ int ret;
+ char *buf = NULL;
+ EXTEND3args *args = NULL;
+ EXTEND3res *extend3Res = NULL;
+ DNS_QUERY_IP_INFO_SINGLE *resData;
+ // union don't use sizeof,4 means args's op and vers and dnsargs's type and count
+ int bufLen = sizeof(uint32_t) * 4 + dnsNamecount * ENFS_DNS_MAX_NAME_LEN;
+ buf = (char *)kmalloc(EXTEND_CMD_MAX_BUF_LEN, GFP_KERNEL);
+ if (!buf) {
+ return -ENOMEM;
+ }
+
+ args = (EXTEND3args *)kmalloc(bufLen, GFP_KERNEL);
+ if (args == NULL) {
+ kfree(buf);
+ printk(KERN_ERR "%s:kmalloc failed.\n", __FUNCTION__);
+ return -ENOMEM;
+ }
+
+ NfsExtendDnsQuerySetArgs(args, ip_type, dnsNamecount, dnsName);
+ nego_enfs_version(clnt, args);
+
+ ret = NfsExtendProcInfoExtendEncode(buf, bufLen, args);
+
+ ret = dorado_extend_op(clnt, buf, &bufLen);
+ // 老版本存储不支持NfsExtendOp,返回该值
+ if (ret == -EOPNOTSUPP || ret == -ENOTSUPP) {
+ printk(KERN_ERR "%s: no support NfsExtendOp.\n", __FUNCTION__);
+ goto out;
+ }
+
+ // another err handle
+ if (ret) {
+ printk(KERN_ERR "NfsExtendOp query dns failed %d.\n", ret);
+ goto out;
+ }
+
+ // decode
+ ret = NfsExtendProcInfoExtendDecode(buf, bufLen, &extend3Res);
+ if (ret) {
+ goto out;
+ }
+
+ *ipNumber = extend3Res->extend_res_u.dnsQueryIpInfo.ipNumber;
+ resData = (DNS_QUERY_IP_INFO_SINGLE *)kmalloc(extend3Res->extend_res_u.dnsQueryIpInfo.ipNumber *
+ (sizeof(DNS_QUERY_IP_INFO_SINGLE)), GFP_KERNEL);
+ if (resData == NULL) {
+ printk(KERN_ERR "%s:kmalloc arg cfailed.\n", __FUNCTION__);
+ kfree(extend3Res);
+ return -ENOMEM;
+ }
+ NfsExtendDnsQuerySetRes(extend3Res, resData);
+ *dnsQueryIpInfo = resData;
+
+
+ kfree(extend3Res);
+out:
+ kfree(args);
+ kfree(buf);
+ return ret;
+}
\ No newline at end of file
diff --git a/fs/nfs/enfs/multipath/path_mgmt/pm_ping.c b/fs/nfs/enfs/multipath/path_mgmt/pm_ping.c
new file mode 100644
index 0000000..4a9b32b
--- /dev/null
+++ b/fs/nfs/enfs/multipath/path_mgmt/pm_ping.c
@@ -0,0 +1,449 @@
+/*
+ * Copyright (c) Huawei Technologies Co., Ltd. 2023-2023. All rights reserved. //TODO: 删除注释,暴露枚举,初始化状态,暴露set函数
+ * Description: path state header file
+ * Author: x00833432
+ * Create: 2023-08-21
+ */
+
+#include "pm_ping.h"
+#include
+#include
+#include
+#include
+#include
+#include
+#include
+#include
+#include
+#include
+#include
+#include
+#include
+#include "../../../net/sunrpc/netns.h"
+#include "pm_state.h"
+#include "enfs.h"
+#include "enfs_log.h"
+#include "enfs_config.h"
+#include "enfs_tp_common.h"
+#include "netns.h"
+
+#define SLEEP_INTERVAL 2
+
+static struct task_struct *pm_ping_timer_thread = NULL;
+static spinlock_t ping_execute_workq_lock; //protect pint_execute_workq
+static struct workqueue_struct *ping_execute_workq = NULL; // timer for test xprt workqueue
+static atomic_t check_xprt_count; // count the ping xprt work on flight
+
+struct ping_xprt_work {
+ struct rpc_xprt *xprt; // use this specific xprt
+ struct rpc_clnt * clnt; // use this specific rpc_client
+ struct work_struct ping_work;
+};
+
+struct pm_ping_async_callback{
+ void *data;
+ void (*func)(void *data);
+};
+
+// set xprt's pm_check_state
+void pm_ping_set_path_check_state(struct rpc_xprt *xprt, pm_check_state state)
+{
+ struct enfs_xprt_context *ctx = NULL;
+
+ if (IS_ERR(xprt)) {
+ enfs_log_error("The xprt ptr is not exist.\n");
+ return;
+ }
+
+ if (xprt == NULL) {
+ enfs_log_error("The xprt is not valid.\n");
+ return;
+ }
+
+ xprt_get(xprt);
+
+ ctx = (struct enfs_xprt_context *)xprt_get_reserve_context(xprt);
+ if (ctx == NULL) {
+ enfs_log_error("The xprt multipath ctx is not valid.\n");
+ xprt_put(xprt);
+ return;
+ }
+
+ atomic_set(&ctx->path_check_state, state);
+ xprt_put(xprt);
+ return;
+}
+
+// get xprt's pm_check_state
+static pm_check_state pm_ping_get_path_check_state(struct rpc_xprt *xprt)
+{
+ struct enfs_xprt_context *ctx = NULL;
+ pm_check_state state;
+
+ if (xprt == NULL) {
+ enfs_log_error("The xprt is not valid.\n");
+ return PM_CHECK_UNDEFINE;
+ }
+
+ ctx = (struct enfs_xprt_context *)xprt_get_reserve_context(xprt);
+ if(ctx == NULL) {
+ enfs_log_error("The xprt multipath ctx is not valid.\n");
+ return PM_CHECK_UNDEFINE;
+ }
+
+ state = atomic_read(&ctx->path_check_state);
+
+ return state;
+}
+
+static void pm_ping_call_done_callback(void *data)
+{
+ struct pm_ping_async_callback *callback_data = (struct pm_ping_async_callback *)data;
+ if (callback_data == NULL) {
+ return;
+ }
+ callback_data->func(callback_data->data);
+
+ kfree(callback_data);
+}
+
+static void set_xprt_close_wait(struct rpc_xprt *xprt)
+{
+ if (xprt == NULL) {
+ enfs_log_error("The xprt is not valid.\n");
+ return;
+ }
+
+ xprt_get(xprt);
+ set_bit(XPRT_CLOSE_WAIT, &xprt->state);
+ xprt_put(xprt);
+ return;
+}
+
+// Default callback for async RPC calls
+static void pm_ping_call_done(struct rpc_task *task, void *data)
+{
+ struct rpc_xprt *xprt = task->tk_xprt;
+ struct enfs_xprt_context *ctx = NULL;
+ ktime_t ktime;
+
+ atomic_dec(&check_xprt_count);
+ if (task->tk_status >= 0) {
+ // xprt is not used,just match paramete.
+ LVOS_TP_START(PM_ALLOC_WORK_INFO_FAILED, &xprt);
+ pm_set_path_state(xprt, PM_STATE_NORMAL);
+ LVOS_TP_END;
+ } else {
+ set_xprt_close_wait(xprt);
+ pm_set_path_state(xprt, PM_STATE_FAULT);
+ }
+ pm_ping_set_path_check_state(xprt, PM_CHECK_FINISH);
+
+ pm_ping_call_done_callback(data);
+
+ xprt_get(xprt);
+ ctx = (struct enfs_xprt_context *)xprt_get_reserve_context(xprt);
+ if (ctx == NULL) {
+ enfs_log_error("The xprt multipath ctx is not valid.\n");
+ xprt_put(xprt);
+ return;
+ }
+ ktime = ktime_get();
+ ctx->lastTime = ktime_to_ms(ktime);
+ xprt_put(xprt);
+}
+
+// register func to rpc_call_done
+static const struct rpc_call_ops pm_ping_set_status_ops = {
+ .rpc_call_done = pm_ping_call_done,
+};
+
+// execute work which in work_queue
+static void pm_ping_execute_work(struct work_struct * work)
+{
+ int ret = 0;
+
+ // get the work information
+ struct ping_xprt_work * work_info = container_of(work, struct ping_xprt_work, ping_work);
+
+ // if check state is pending
+ if (pm_ping_get_path_check_state(work_info->xprt) == PM_CHECK_WAITING) {
+ pm_ping_set_path_check_state(work_info->xprt, PM_CHECK_CHECKING);
+ ret = rpc_clnt_test_xprt(work_info->clnt, work_info->xprt, &pm_ping_set_status_ops, NULL, RPC_TASK_ASYNC | RPC_TASK_FIXED);
+ if (ret != 1) {
+ enfs_log_debug("ping xprt execute failed ,ret %d", ret);
+ pm_ping_set_path_check_state(work_info->xprt, PM_CHECK_FINISH);
+ } else {
+ atomic_inc(&check_xprt_count);
+ }
+ }
+
+ rpc_release_client(work_info->clnt);
+ xprt_put(work_info->xprt);
+ kfree(work_info);
+ work_info = NULL;
+}
+
+static bool pm_ping_workqueue_queue_work(struct work_struct *work)
+{
+ bool ret = false;
+ spin_lock(&ping_execute_workq_lock);
+
+ if (ping_execute_workq != NULL) {
+ ret = queue_work(ping_execute_workq, work);
+ }
+ spin_unlock(&ping_execute_workq_lock);
+ return ret;
+}
+
+// init test work and add this work to workqueue
+static int pm_ping_add_work(struct rpc_clnt *clnt, struct rpc_xprt *xprt, void* data)
+{
+ struct list_head *head = data;
+ struct rpcclnt_release_item *item;
+ struct ping_xprt_work * work_info;
+ struct enfs_xprt_context *ctx = NULL;
+ bool ret = false;
+
+ if (IS_ERR(xprt) || xprt == NULL) {
+ enfs_log_error("The xprt ptr is not exist.\n");
+ return -EINVAL;
+ }
+
+ if (IS_ERR(clnt) || clnt == NULL) {
+ enfs_log_error("The clnt ptr is not exist.\n");
+ return -EINVAL;
+ }
+
+ if (!xprt_get_reserve_context(xprt)) {
+ enfs_log_error("multipath_context is null.\n");
+ return -EINVAL;
+ }
+
+ ctx = (struct enfs_xprt_context *)xprt_get_reserve_context(xprt);
+ if (ctx == NULL) {
+ enfs_log_error("The xprt multipath ctx is not valid.\n");
+ return -EINVAL;
+ }
+
+ if (!enfs_timeout_ms((ktime_t *)&ctx->lastTime, ENFS_PM_PING_TMIE_OUT * 1000)) {
+ return 0;
+ }
+
+ if (pm_ping_get_path_check_state(xprt) == PM_CHECK_FINISH || pm_ping_get_path_check_state(xprt) == PM_CHECK_INIT) { // check xprt pending status, if pending status equals Finish, means this xprt can inster to work queue
+ enfs_log_debug("find xprt pointer. %p\n", xprt);
+ work_info = kzalloc(sizeof(struct ping_xprt_work), GFP_ATOMIC);
+ if (work_info == NULL) {
+ return -ENOMEM;
+ }
+
+ item = kzalloc(sizeof(*item), GFP_KERNEL);
+ if (!item) {
+ enfs_log_error("alloc item failed.\n");
+ kfree(work_info);
+ return -ENOMEM;
+ }
+
+ work_info->clnt = clnt;
+ work_info->xprt = xprt;
+ xprt_get(xprt);
+ INIT_WORK(&work_info->ping_work, pm_ping_execute_work);
+ pm_ping_set_path_check_state(xprt, PM_CHECK_WAITING);
+#ifdef ENFS_OPENEULER_660
+ if (!refcount_inc_not_zero(&clnt->cl_count)) {
+#else
+ if (!atomic_inc_not_zero(&clnt->cl_count)) {
+#endif
+ xprt_put(work_info->xprt);
+ kfree(item);
+ kfree(work_info);
+ return 0;
+ }
+
+ ret = pm_ping_workqueue_queue_work(&work_info->ping_work);
+ if (!ret) {
+ item->clnt = work_info->clnt;
+ list_add_tail(&item->node, head);
+ xprt_put(work_info->xprt);
+ kfree(work_info);
+ return -EINVAL;
+ }
+
+ /* Note:that normal exit requires release. */
+ kfree(item);
+ }
+ return 0;
+}
+
+// encapsulate pm_ping_add_work()
+static int pm_ping_execute_xprt_test(struct rpc_clnt *clnt,
+ struct rpc_xprt *xprt, void *data)
+{
+
+ pm_ping_add_work(clnt, xprt, data);
+ // return 0 for rpc_clnt_iterate_for_each_xprt(clnt,
+ // pm_ping_execute_xprt_test, NULL); because negative value will stop
+ // iterate all xprt and we need return negative value for debug Therefore,
+ // we need this function to iterate all xprt
+ return 0;
+}
+
+// iterate xprt in the client
+static void pm_ping_loop_rpclnt(struct sunrpc_net *sn)
+{
+ struct rpc_clnt *clnt;
+ LIST_HEAD(free_list);
+
+ spin_lock(&sn->rpc_client_lock);
+ list_for_each_entry_rcu(clnt, &sn->all_clients, cl_clients)
+ {
+ enfs_log_debug("find rpc_clnt. %p\n", clnt);
+ rpc_clnt_iterate_for_each_xprt(clnt, pm_ping_execute_xprt_test,
+ (void *)&free_list);
+ }
+ spin_unlock(&sn->rpc_client_lock);
+ enfs_destroy_rpcclnt_list(&free_list);
+}
+
+// iterate each clnt in the sunrpc_net
+static void pm_ping_loop_sunrpc_net(void)
+{
+ struct net *net;
+ struct sunrpc_net *sn;
+ rcu_read_lock();
+ for_each_net_rcu(net)
+ {
+ sn = net_generic(net, sunrpc_net_id);
+ if (sn == NULL) {
+ continue;
+ }
+ pm_ping_loop_rpclnt(sn);
+ }
+ rcu_read_unlock();
+}
+
+
+static int pm_ping_routine(void *data)
+{
+ ktime_t start = ktime_get();
+ int32_t interval_ms;
+
+ while (!kthread_should_stop()) {
+ interval_ms = enfs_get_config_path_detect_interval() * 1000;
+ if (enfs_get_config_multipath_state() == ENFS_MULTIPATH_ENABLE &&
+ enfs_timeout_ms(&start, interval_ms)) {
+ start = ktime_get();
+ pm_ping_loop_sunrpc_net();
+ }
+ enfs_msleep(1000);
+ }
+ return 0;
+}
+
+// start thread to cycly ping
+static int pm_ping_start(void)
+{
+ pm_ping_timer_thread = kthread_run(pm_ping_routine, NULL, "pm_ping_routine");
+ if (IS_ERR(pm_ping_timer_thread)) {
+ enfs_log_error("Failed to create kernel thread\n");
+ return PTR_ERR(pm_ping_timer_thread);
+ }
+ return 0;
+}
+
+// initialize workqueue
+static int pm_ping_workqueue_init(void)
+{
+ struct workqueue_struct *queue = NULL;
+
+ queue = create_workqueue("pm_ping_workqueue");
+ if (queue == NULL) {
+ enfs_log_error("create workqueue failed.\n");
+ return -ENOMEM;
+ }
+
+ spin_lock(&ping_execute_workq_lock);
+ ping_execute_workq = queue;
+ spin_unlock(&ping_execute_workq_lock);
+ enfs_log_info("create workqueue successed.\n");
+ return 0;
+}
+
+static void pm_ping_workqueue_fini(void)
+{
+ struct workqueue_struct *queue = NULL;
+
+ spin_lock(&ping_execute_workq_lock);
+ queue = ping_execute_workq;
+ ping_execute_workq = NULL;
+ spin_unlock(&ping_execute_workq_lock);
+
+ enfs_log_info("delete work queue\n");
+
+ if (queue != NULL) {
+ flush_workqueue(queue);
+ destroy_workqueue(queue);
+ }
+}
+
+// module exit func
+void pm_ping_fini(void)
+{
+ if (pm_ping_timer_thread) {
+ kthread_stop(pm_ping_timer_thread);
+ }
+
+ pm_ping_workqueue_fini();
+
+ while(atomic_read(&check_xprt_count) != 0) {
+ enfs_msleep(SLEEP_INTERVAL);
+ }
+}
+
+// module init func
+int pm_ping_init(void)
+{
+ int ret;
+
+ spin_lock_init(&ping_execute_workq_lock);
+ atomic_set(&check_xprt_count, 0);
+ ret = pm_ping_workqueue_init();
+ if (ret != 0) {
+ enfs_log_error("PM_PING Module loading failed.\n");
+ return ret;
+ }
+ ret = pm_ping_start();
+ if (ret != 0) {
+ enfs_log_error("PM_PING Module loading failed.\n");
+ pm_ping_workqueue_fini();
+ return ret;
+ }
+
+ return ret;
+}
+
+bool pm_ping_is_test_xprt_task(struct rpc_task *task)
+{
+ return task->tk_ops == &pm_ping_set_status_ops ? true : false;
+}
+
+int pm_ping_rpc_test_xprt_with_callback(struct rpc_clnt *clnt, struct rpc_xprt *xprt, void (*func)(void *data), void *data)
+{
+ int ret;
+ struct pm_ping_async_callback *callback_data = (struct pm_ping_async_callback*)kzalloc(sizeof(struct pm_ping_async_callback), GFP_KERNEL);
+ if (callback_data == NULL) {
+ enfs_log_error("failed to mzalloc mem\n");
+ return -ENOMEM;
+ }
+
+ callback_data->data = data;
+ callback_data->func= func;
+ atomic_inc(&check_xprt_count);
+ ret = rpc_clnt_test_xprt(clnt, xprt, &pm_ping_set_status_ops, callback_data, RPC_TASK_ASYNC | RPC_TASK_FIXED);
+ if (ret != 1) {
+ enfs_log_debug("ping xprt execute failed ,ret %d", ret);
+ atomic_dec(&check_xprt_count);
+ }
+
+ return ret;
+}
\ No newline at end of file
diff --git a/fs/nfs/enfs/multipath/path_mgmt/pm_ping.h b/fs/nfs/enfs/multipath/path_mgmt/pm_ping.h
new file mode 100644
index 0000000..dc03522
--- /dev/null
+++ b/fs/nfs/enfs/multipath/path_mgmt/pm_ping.h
@@ -0,0 +1,27 @@
+/*
+ * Copyright (c) Huawei Technologies Co., Ltd. 2023-2023. All rights reserved.
+ * Description: nfs configuration
+ * Author: x00833432
+ * Create: 2023-07-27
+ */
+
+#ifndef PM_PING_H
+#define PM_PING_H
+
+#include
+
+typedef enum {
+ PM_CHECK_INIT, // this xprt never been queued
+ PM_CHECK_WAITING, // this xprt waiting in the queue
+ PM_CHECK_CHECKING, // this xprt is testing
+ PM_CHECK_FINISH, // this xprt has been finished
+ PM_CHECK_UNDEFINE, // undefine multipath struct
+} pm_check_state;
+
+int pm_ping_init(void);
+void pm_ping_fini(void);
+void pm_ping_set_path_check_state(struct rpc_xprt *xprt, pm_check_state state);
+bool pm_ping_is_test_xprt_task(struct rpc_task *task);
+int pm_ping_rpc_test_xprt_with_callback(struct rpc_clnt *clnt, struct rpc_xprt *xprt, void (*func)(void *data), void *data);
+
+#endif // PM_PING_H
diff --git a/fs/nfs/enfs/multipath/path_mgmt/pm_state.c b/fs/nfs/enfs/multipath/path_mgmt/pm_state.c
new file mode 100644
index 0000000..c6a968e
--- /dev/null
+++ b/fs/nfs/enfs/multipath/path_mgmt/pm_state.c
@@ -0,0 +1,204 @@
+#include "pm_state.h"
+#include
+#include
+#include "enfs_log.h"
+
+pm_path_state pm_get_path_state(struct rpc_xprt *xprt)
+{
+ struct enfs_xprt_context *ctx = NULL;
+ pm_path_state state;
+
+ if (xprt == NULL) {
+ enfs_log_error("The xprt is not valid.\n");
+ return PM_STATE_UNDEFINED;
+ }
+
+ xprt_get(xprt);
+
+ ctx = (struct enfs_xprt_context *)xprt_get_reserve_context(xprt);
+ if (ctx == NULL) {
+ enfs_log_error("The xprt multipath ctx is not valid.\n");
+ xprt_put(xprt);
+ return PM_STATE_UNDEFINED;
+ }
+
+ state = atomic_read(&ctx->path_state);
+
+ xprt_put(xprt);
+
+ return state;
+}
+
+static int sockaddr_ip_to_str(struct sockaddr *addr, char *buf, int len)
+{
+ if (!addr) {
+ return 0;
+ }
+ switch (addr->sa_family) {
+ case AF_INET: {
+ struct sockaddr_in *sin = (struct sockaddr_in *)addr;
+ snprintf(buf, len, "%pI4", &sin->sin_addr);
+ return 0;
+ }
+ case AF_INET6: {
+ struct sockaddr_in6 *sin6 = (struct sockaddr_in6 *)addr;
+ snprintf(buf, len, "%pI6", &sin6->sin6_addr);
+ return 0;
+ }
+ default:
+ break;
+ }
+ return 1;
+}
+
+static bool is_valid_ip_address(const char *ip_str)
+{
+ struct in_addr addr4;
+ struct in6_addr addr6;
+
+ if (in4_pton(ip_str, -1, (u8 *)&addr4, '\0', NULL) == 1) {
+ return true;
+ }
+
+ if (in6_pton(ip_str, -1, (u8 *)&addr6, '\0', NULL) == 1) {
+ return true;
+ }
+
+ return false;
+}
+
+void pm_set_path_state(struct rpc_xprt *xprt, pm_path_state state)
+{
+ struct enfs_xprt_context *ctx = NULL;
+ pm_path_state cur_state;
+ char localip[64] = {"*"};
+ char remoteip[64] = {"*"};
+ struct sockaddr_storage srcaddr;
+ char local_name[INET6_ADDRSTRLEN];
+ const char *local = local_name;
+ int ret;
+
+ if (xprt == NULL) {
+ enfs_log_error("The xprt is not valid.\n");
+ return;
+ }
+
+ xprt_get(xprt);
+
+ ctx = (struct enfs_xprt_context *)xprt_get_reserve_context(xprt);
+ if (ctx == NULL) {
+ enfs_log_error("The xprt multipath ctx is not valid.\n");
+ xprt_put(xprt);
+ return;
+ }
+
+ cur_state = atomic_read(&ctx->path_state);
+ if (cur_state == state) {
+ xprt_put(xprt);
+ return;
+ }
+ atomic_set(&ctx->path_state, state);
+ ret = sockaddr_ip_to_str((struct sockaddr *)&xprt->addr, remoteip, sizeof(remoteip));
+ if (ret != 0) {
+ enfs_log_error("remoteip to str err:%d.\n", ret);
+ }
+
+ sockaddr_ip_to_str((struct sockaddr *)&ctx->srcaddr, local_name, sizeof(local_name));
+ if (!is_valid_ip_address(local)) {
+ ret = rpc_localalladdr(xprt, (struct sockaddr *)&srcaddr, sizeof(srcaddr));
+ if (ret != 0) {
+ enfs_log_error("rpc_localalladdr localip err:%d.\n", ret);
+ }
+ sockaddr_ip_to_str((struct sockaddr *)&srcaddr, localip, sizeof(localip));
+ } else {
+ sockaddr_ip_to_str((struct sockaddr *)&ctx->srcaddr, localip, sizeof(localip));
+ }
+ enfs_log_info("The xprt localip{%s} remoteip{%s} path state change from {%d} to {%d}.\n",
+ localip, remoteip, cur_state, state);
+
+ xprt_put(xprt);
+ return;
+}
+
+void pm_get_path_state_desc(struct rpc_xprt *xprt, char *buf, int len)
+{
+ pm_path_state state;
+
+ if (xprt == NULL) {
+ enfs_log_error("The xprt is not valid.\n");
+ return;
+ }
+
+ if ((buf == NULL) || (len <= 0)) {
+ enfs_log_error("Buffer is not valid, len=%d.\n", len);
+ return;
+ }
+
+ state = pm_get_path_state(xprt);
+
+ switch (state) {
+ case PM_STATE_INIT:
+ (void)snprintf(buf, len, "Init");
+ break;
+ case PM_STATE_NORMAL:
+ (void)snprintf(buf, len, "Normal");
+ break;
+ case PM_STATE_FAULT:
+ (void)snprintf(buf, len, "Fault");
+ break;
+ default:
+ (void)snprintf(buf, len, "Unknown");
+ break;
+ }
+ return;
+}
+
+void pm_get_xprt_state_desc(struct rpc_xprt *xprt, char *buf, int len)
+{
+ int i;
+ unsigned long state;
+ static unsigned long xprt_mask[] = {XPRT_LOCKED, XPRT_CONNECTED, XPRT_CONNECTING, XPRT_CLOSE_WAIT,
+ XPRT_BOUND, XPRT_BINDING, XPRT_CLOSING, XPRT_CONGESTED};
+ static const char *xprt_state_desc[] = {"LOCKED", "CONNECTED", "CONNECTING", "CLOSE_WAIT",
+ "BOUND", "BINDING", "CLOSING", "CONGESTED"};
+ int pos = 0;
+ int ret = 0;
+
+ if (xprt == NULL) {
+ enfs_log_error("The xprt is not valid.\n");
+ return;
+ }
+
+ if ((buf == NULL) || (len <= 0)) {
+ enfs_log_error("Xprt state buffer is not valid, len=%d.\n", len);
+ return;
+ }
+
+ xprt_get(xprt);
+ state = READ_ONCE(xprt->state);
+ xprt_put(xprt);
+
+ for (i = 0; i < sizeof(xprt_mask)/sizeof(xprt_mask[0]); ++i) {
+ if (pos >= len) {
+ break;
+ }
+
+ if ( !test_bit(xprt_mask[i], &state) ) {
+ continue;
+ }
+
+ if (pos == 0) {
+ ret = snprintf(buf, len, "%s", xprt_state_desc[i]);
+ } else {
+ ret = snprintf(buf + pos, len - pos, "|%s", xprt_state_desc[i]);
+ }
+
+ if (ret < 0) {
+ enfs_log_error("format state failed, ret %d.\n", ret);
+ break;
+ }
+
+ pos += ret;
+ }
+ return;
+}
\ No newline at end of file
diff --git a/fs/nfs/enfs/unify_multipath/Makefile b/fs/nfs/enfs/unify_multipath/Makefile
new file mode 100644
index 0000000..4ca848e
--- /dev/null
+++ b/fs/nfs/enfs/unify_multipath/Makefile
@@ -0,0 +1,13 @@
+.PHONY: multipath clean-multipath dpc_adapter clean-dpc_adapter
+
+multipath:
+ cd multipath && make build
+
+clean-multipath:
+ cd multipath && make clean
+
+dpc_adapter:
+ cd dpc_adapter && make build
+
+clean-dpc_adapter:
+ cd dpc_adapter && make clean
\ No newline at end of file
diff --git a/fs/nfs/enfs/unify_multipath/Rules.mak b/fs/nfs/enfs/unify_multipath/Rules.mak
new file mode 100644
index 0000000..7071b59
--- /dev/null
+++ b/fs/nfs/enfs/unify_multipath/Rules.mak
@@ -0,0 +1,52 @@
+MAKE ?= make
+CC ?= gcc
+LD ?= ld
+KVERS ?= $(shell uname -r)
+KDIR ?= /lib/modules/$(KVERS)/build
+
+SRC_PATH := $(shell realpath $(PWD)/../..)
+UNIFY_MULTIPATH_PATH := $(SRC_PATH)/unify_multipath
+
+PUBLIC_INCLUDE := -I$(SRC_PATH)/include/unify_multipath \
+ -I$(UNIFY_MULTIPATH_PATH)/include \
+ -I$(UNIFY_MULTIPATH_PATH)/multipath/include \
+ -I$(UNIFY_MULTIPATH_PATH)/infra
+
+ifeq ($(BUILD_TYPE),DEBUG)
+endif
+
+obj-m := $(MODULE).o
+
+SRCS := $(wildcard $(PWD)/../infra/*.c)
+
+.PHONY: build clean dryrun
+
+dryrun:
+ @echo "MODULE: $(MODULE)"
+ @echo "PWD: $(PWD)"
+ @echo "SRCS: $(SRCS)"
+ @echo "OBJS: $(patsubst %c,%o,$(patsubst $(PWD)/%,%,$(SRCS)))"
+ @echo "KDIR: $(KDIR)"
+ @echo "KVERS: $(KVERS)"
+ @echo "INCLUDES: $(INCLUDES)"
+ @echo "PUBLIC_INCLUDE: $(PUBLIC_INCLUDE)"
+ @echo "CFLAGS: $(CFLAGS)"
+
+build: dryrun
+ @echo --------------------
+ @echo - make $(MODULE) -
+ @echo --------------------
+ @$(MAKE) -C $(KDIR) CC=$(CC) LD=$(LD) M=$(PWD) EXTRA_CFLAGS="$(INCLUDES) $(PUBLIC_INCLUDE) $(CFLAGS)" $(MODULE)-objs="$(patsubst %c,%o,$(patsubst $(PWD)/%,%,$(SRCS)))" modules
+
+clean:
+ @echo --------------------
+ @echo - clean $(MODULE) -
+ @echo --------------------
+ @$(MAKE) -C $(KDIR) M=$(PWD) clean
+ @find $(PWD) -name '*.o' -delete
+
+install-prepare:
+ mkdir -p $(UNIFY_MULTIPATH_PATH)/build/ko
+
+install: install-prepare
+ mv $(PWD)/$(MODULE).ko $(UNIFY_MULTIPATH_PATH)/build/ko
\ No newline at end of file
diff --git a/fs/nfs/enfs/unify_multipath/dpc_adapter/Makefile.kernel b/fs/nfs/enfs/unify_multipath/dpc_adapter/Makefile.kernel
new file mode 100644
index 0000000..5fd2ba4
--- /dev/null
+++ b/fs/nfs/enfs/unify_multipath/dpc_adapter/Makefile.kernel
@@ -0,0 +1,31 @@
+KBUILD_EXTRA_SYMBOLS := $(ODC_NFSCLIENT_ROOT_PATH)/src/multipath/Module.symvers
+
+UNIFY_MULTIPATH_PATH := $(ODC_NFSCLIENT_ROOT_PATH)/src/unify_multipath
+DPC_ADAPTER_PATH := $(ODC_NFSCLIENT_ROOT_PATH)/src/unify_multipath/dpc_adapter/
+
+DPC_ADAPTER_OBj := $(DPC_ADAPTER_PATH)/dpc_adapter_module.o \
+ $(DPC_ADAPTER_PATH)/dpc_rpc_client.o \
+ $(DPC_ADAPTER_PATH)/dpc_rpc_client_read.o \
+ $(DPC_ADAPTER_PATH)/dpc_rpc_client_null_call.o \
+ $(DPC_ADAPTER_PATH)/dpc_rpc_io_common.o \
+ $(DPC_ADAPTER_PATH)/dpc_rpc_client_write.o \
+ $(DPC_ADAPTER_PATH)/dpc_rpc_xdr.o \
+ $(DPC_ADAPTER_PATH)/dpc_rpc_proc.o \
+ $(DPC_ADAPTER_PATH)/dpc_rpc_conn.o \
+ $(DPC_ADAPTER_PATH)/dpc_rpc_mulp_proc.o \
+ $(DPC_ADAPTER_PATH)/dpc_rpc_mulp_proc_shard_view.o \
+ $(DPC_ADAPTER_PATH)/dpc_rpc_util.o \
+ $(DPC_ADAPTER_PATH)/../infra/mulp_proc.o
+
+$(MODULE)-objs += $(DPC_ADAPTER_OBj)
+
+EXTRA_CFLAGS += -I$(DPC_ADAPTER_PATH) \
+ -I/usr/include/linux \
+ -I$(ODC_NFSCLIENT_ROOT_PATH)/src/include/unify_multipath \
+ -I$(UNIFY_MULTIPATH_PATH)/include \
+ -I$(UNIFY_MULTIPATH_PATH)/multipath/include \
+ -I$(UNIFY_MULTIPATH_PATH)/infra
+
+ifeq ($(RELEASE_TYPE), debug)
+ CFLAGS += -DDPC_RPC_DEBUG
+endif
\ No newline at end of file
diff --git a/fs/nfs/enfs/unify_multipath/dpc_adapter/dpc_adapter.c b/fs/nfs/enfs/unify_multipath/dpc_adapter/dpc_adapter.c
new file mode 100644
index 0000000..9138036
--- /dev/null
+++ b/fs/nfs/enfs/unify_multipath/dpc_adapter/dpc_adapter.c
@@ -0,0 +1,6 @@
+/*
+ * Copyright (c) Huawei Technologies Co., Ltd. 2024-2025. All rights reserved.
+ * Description: unify multi path module dpc adapter init
+ * Author: f30057955
+ * Create: 2025-1-3
+ */
diff --git a/fs/nfs/enfs/unify_multipath/dpc_adapter/dpc_adapter_module.c b/fs/nfs/enfs/unify_multipath/dpc_adapter/dpc_adapter_module.c
new file mode 100644
index 0000000..443a932
--- /dev/null
+++ b/fs/nfs/enfs/unify_multipath/dpc_adapter/dpc_adapter_module.c
@@ -0,0 +1,109 @@
+/*
+ * Copyright (c) Huawei Technologies Co., Ltd. 2025-2025. All rights reserved.
+ * Description: unify multi path module dpc adapter init
+ * Author: f30057955
+ * Create: 2025-1-3
+ */
+
+#include
+#include
+#include "mulp_log.h"
+#include "mulp_porting.h"
+#include "dpc_rpc_client.h"
+#include "dpc_rpc_conn.h"
+#include "dpc_rpc_mulp_proc.h"
+#include "multipath_api.h"
+
+struct dpc_adapter_entry {
+ char *name;
+ int (*init)(void);
+ void (*final)(void);
+};
+
+static struct dpc_adapter_entry g_dpc_adapter_entry[] = {
+ {"dpc rpc client", dpc_rpc_client_init, dpc_rpc_client_exit},
+ {"dpc rpc conn", dpc_rpc_conn_init, dpc_rpc_conn_exit},
+ {"dpc rpc mulp proc", dpc_rpc_mulp_proc_init, dpc_rpc_mulp_proc_exit},
+};
+
+static void init_helper_finalize(struct dpc_adapter_entry *job, int idx)
+{
+ int idx_tmp = idx;
+ struct dpc_adapter_entry *entry = NULL;
+
+ while (idx_tmp > 0) {
+ idx_tmp = idx_tmp - 1;
+ entry = &job[idx_tmp];
+ if (entry->final != NULL) {
+ entry->final();
+ }
+ }
+ return;
+}
+
+static int init_helper_init(struct dpc_adapter_entry *job, int size)
+{
+ int ret;
+ int i;
+ struct dpc_adapter_entry *entry = NULL;
+
+ for (i = 0; i < size; i++) {
+ entry = &job[i];
+ ret = entry->init();
+ if (ret) {
+ MULP_LOG(MULP_MODULE_API, MULP_ERR, "dpc rpc init %s fail %d", entry->name, ret);
+ goto init_err;
+ }
+ MULP_LOG(MULP_MODULE_API, MULP_INFO, "dpc rpc init %s success", entry->name);
+ }
+
+ return 0;
+
+init_err:
+ init_helper_finalize(job, i);
+ return -1;
+}
+
+int dpc_adapter_init(void)
+{
+ return init_helper_init(g_dpc_adapter_entry, ARRAY_SIZE(g_dpc_adapter_entry));
+}
+
+void dpc_adapter_fini(void)
+{
+ init_helper_finalize(g_dpc_adapter_entry, ARRAY_SIZE(g_dpc_adapter_entry));
+}
+
+int dpc_adapter_ctor_api(void)
+{
+ return dpc_adapter_init();
+}
+
+int dpc_adapter_dector_api(void)
+{
+ dpc_adapter_fini();
+ return 0;
+}
+
+static int __init dpc_adapter_ctor(void)
+{
+ int ret = 0;
+
+ ret = mulp_ctor();
+
+ if (ret != 0) {
+ return ret;
+ }
+
+ return dpc_adapter_init();
+}
+module_init(dpc_adapter_ctor);
+
+static void __exit dpc_adapter_dector(void)
+{
+ mulp_dector();
+ dpc_adapter_fini();
+}
+module_exit(dpc_adapter_dector);
+
+MODULE_LICENSE("GPL");
\ No newline at end of file
diff --git a/fs/nfs/enfs/unify_multipath/dpc_adapter/dpc_rpc.h b/fs/nfs/enfs/unify_multipath/dpc_adapter/dpc_rpc.h
new file mode 100644
index 0000000..52ffad5
--- /dev/null
+++ b/fs/nfs/enfs/unify_multipath/dpc_adapter/dpc_rpc.h
@@ -0,0 +1,14 @@
+/*
+ * SPDX-License-Identifier: GPL-2.0
+ * Copyright 2024, Huawei Inc
+ *
+ * DPC RPC MSG
+ */
+
+#ifndef DPC_RPC_H
+#define DPC_RPC_H
+
+int dpc_adapter_ctor_api(void);
+int dpc_adapter_dector_api(void);
+
+#endif
diff --git a/fs/nfs/enfs/unify_multipath/dpc_adapter/dpc_rpc_client.c b/fs/nfs/enfs/unify_multipath/dpc_adapter/dpc_rpc_client.c
new file mode 100644
index 0000000..87737d2
--- /dev/null
+++ b/fs/nfs/enfs/unify_multipath/dpc_adapter/dpc_rpc_client.c
@@ -0,0 +1,411 @@
+/*
+ * SPDX-License-Identifier: GPL-2.0
+ * Copyright 2024, Huawei Inc
+ *
+ * DPC client over SUN RPC transport
+ */
+
+#include
+#include
+#include
+#include
+#include "dpc_rpc_client.h"
+#include "dpc_rpc_client_api.h"
+#include "multipath_api.h"
+#include "mulp_log.h"
+#include "mulp_porting.h"
+#include "dpc_rpc_io_common.h"
+#include "dpc_rpc_proc.h"
+#include "dpc_rpc_mulp_proc.h"
+#include "dpc_rpc_mulp_proc_shard_view.h"
+#include "dpc_rpc_util.h"
+
+typedef struct {
+ mulp_rwlock_type rwlock;
+ mulp_list_head_type head;
+} dpc_multipath_list;
+static dpc_multipath_list g_dpc_multipath_list;
+
+typedef enum {
+ DPC_INVALID_ADDR_FAMILY = 0,
+ DPC_ADDR_IPV4,
+ DPC_ADDR_IPV6,
+ DPC_ADDR_FAMILY_BUTT
+} DPC_ADDR_FAMILY;
+
+void dpc_clnt_fill_path_info(dpc_multipath_info *info, mulp_create_mp_args *args, uint64_t mp_id)
+{
+ memset(info, 0, sizeof(dpc_multipath_info));
+ info->mp_id = mp_id;
+ info->client_id = args->client_id;
+ info->client_ls_id = args->client_ls_id;
+ info->nconnect = args->nconnect;
+ info->network_type = args->network_type;
+ info->strategy = args->strategy;
+ strncpy(info->user_name, args->user_name, MULP_USER_NAME_LEN - 1);
+ strncpy(info->user_authkey, args->user_authkey, MULP_USER_AUTHKEY_LEN - 1);
+}
+
+static DPC_ADDR_FAMILY dpc_clnt_check_ip_family(char *ip)
+{
+ DPC_ADDR_FAMILY addr_family = DPC_INVALID_ADDR_FAMILY;
+ if (strchr(ip, '.') != NULL) {
+ addr_family = DPC_ADDR_IPV4;
+ } else if (strchr(ip, ':') != NULL) {
+ addr_family = DPC_ADDR_IPV6;
+ }
+ return addr_family;
+}
+
+static void dpc_clnt_get_portname_ipv4(mulp_ip_pair *pair)
+{
+ struct net_device *dev;
+ struct in_ifaddr *ifa;
+ char ip_str[MULP_MAX_IP_STR_LEN] = { 0 };
+
+ for_each_netdev (&init_net, dev) {
+ in_dev_for_each_ifa_rcu (ifa, dev->ip_ptr) {
+ snprintf(ip_str, MULP_MAX_IP_STR_LEN, "%pI4", &ifa->ifa_address);
+ if (!strcmp(pair->local_ip, ip_str)) {
+ strncpy(pair->port_name, dev->name, MULP_PORT_NAME_LEN - 1);
+ return;
+ }
+ }
+ }
+}
+
+static void dpc_clnt_get_portname_ipv6(mulp_ip_pair *pair)
+{
+ struct net_device *dev;
+ struct inet6_dev *dev_v6;
+ struct inet6_ifaddr *ifa_v6;
+ char ip_str[MULP_MAX_IP_STR_LEN] = { 0 };
+
+ for_each_netdev (&init_net, dev) {
+ dev_v6 = __in6_dev_get(dev);
+ list_for_each_entry_rcu (ifa_v6, &dev_v6->addr_list, if_list) {
+ snprintf(ip_str, MULP_MAX_IP_STR_LEN, "%pI6c", &ifa_v6->addr);
+ if (!strcmp(pair->local_ip, ip_str)) {
+ strncpy(pair->port_name, dev->name, MULP_PORT_NAME_LEN - 1);
+ return;
+ }
+ }
+ }
+}
+
+static void dpc_clnt_get_portname(mulp_ip_pair *pair)
+{
+ DPC_ADDR_FAMILY addr_family;
+ addr_family = dpc_clnt_check_ip_family(pair->local_ip);
+ strncpy(pair->port_name, "NULL", MULP_PORT_NAME_LEN - 1);
+ rcu_read_lock();
+ if (addr_family == DPC_ADDR_IPV4) {
+ dpc_clnt_get_portname_ipv4(pair);
+ } else if (addr_family == DPC_ADDR_IPV6) {
+ dpc_clnt_get_portname_ipv6(pair);
+ }
+ rcu_read_unlock();
+}
+
+static void dpc_clnt_free_portname(char **port_name_arr, uint32_t count)
+{
+ uint32_t i;
+ for (i = 0; i < count; i++) {
+ mulp_mem_free(port_name_arr[i]);
+ }
+ mulp_mem_free(port_name_arr);
+}
+
+static int dpc_clnt_alloc_portname(char ***port_name_arr, uint32_t count)
+{
+ uint32_t i;
+ *port_name_arr = (char **)mulp_mem_zalloc(sizeof(char *) * count);
+ if (*port_name_arr == NULL) {
+ MULP_LOG(MULP_MODULE_API, MULP_CRITI, "dpc clnt alloc mem for port name arr failed.");
+ return -ENOMEM;
+ }
+ for (i = 0; i < count; i++) {
+ (*port_name_arr)[i] = (char *)mulp_mem_zalloc(sizeof(char) * MULP_PORT_NAME_LEN);
+ if ((*port_name_arr)[i] == NULL) {
+ MULP_LOG(MULP_MODULE_API, MULP_CRITI, "dpc clnt alloc mem for port name arr failed.");
+ dpc_clnt_free_portname(*port_name_arr, i);
+ return -ENOMEM;
+ }
+ }
+ return 0;
+}
+
+int dpc_clnt_create_mp(mulp_create_mp_args *args, uint64_t *mp_id)
+{
+ int ret;
+ dpc_multipath_info *info = NULL;
+ uint32_t i;
+ char **port_name_arr = NULL;
+
+ if (!args || !mp_id) {
+ MULP_LOG(MULP_MODULE_API, MULP_ERR, "dpc clnt create mp get null param.");
+ return -EFAULT;
+ }
+
+ info = (dpc_multipath_info *)mulp_mem_zalloc(sizeof(dpc_multipath_info));
+ MULP_LOG(MULP_MODULE_API, MULP_INFO, "dpc clnt start create mp.");
+ if (!info) {
+ MULP_LOG(MULP_MODULE_API, MULP_CRITI, "dpc clnt alloc mem for multipath node failed.");
+ return -ENOMEM;
+ }
+
+ ret = dpc_clnt_alloc_portname(&port_name_arr, args->ip_pair_cnt);
+ if (ret != 0) {
+ mulp_mem_free(info);
+ return ret;
+ }
+
+ for (i = 0; i < args->ip_pair_cnt; i++) {
+ args->pair_arr[i].port_name = port_name_arr[i];
+ dpc_clnt_get_portname(&args->pair_arr[i]);
+ }
+ ret = mulp_create_mp(MULP_APP_DPC, args, mp_id);
+ if (ret) {
+ MULP_LOG(MULP_MODULE_API, MULP_ERR, "dpc clnt create mp failed{%d}.", ret);
+ mulp_mem_free(info);
+ return ret;
+ }
+ ret = dpc_proc_create_mp_file(args->client_id, *mp_id);
+ if (ret) {
+ MULP_LOG(MULP_MODULE_API, MULP_ERR, "create proc file failed! ret:%d, mp_id:%llu", ret, *mp_id);
+ }
+ (void)mulp_ping_all_path(*mp_id, args->ctx, args->callback);
+
+ dpc_clnt_fill_path_info(info, args, *mp_id);
+ mulp_write_lock(&g_dpc_multipath_list.rwlock);
+ mulp_list_add_tail(&info->node, &g_dpc_multipath_list.head);
+ mulp_write_unlock(&g_dpc_multipath_list.rwlock);
+ dpc_clnt_free_portname(port_name_arr, args->ip_pair_cnt);
+ MULP_LOG(MULP_MODULE_API, MULP_INFO, "dpc clnt create mp{%llu} success.", *mp_id);
+ return 0;
+}
+EXPORT_SYMBOL_GPL(dpc_clnt_create_mp);
+
+int dpc_clnt_destroy_mp(uint64_t mp_id)
+{
+ int32_t ret;
+ dpc_multipath_info *cur = NULL;
+ dpc_multipath_info *next = NULL;
+ BOOLEAN_T is_empty = B_FALSE;
+ MULP_LOG(MULP_MODULE_API, MULP_INFO, "dpc clnt start destroy mp{%llu}.", mp_id);
+
+ ret = mulp_destroy_mp(mp_id);
+ if (ret) {
+ MULP_LOG(MULP_MODULE_API, MULP_ERR, "dpc clnt destroy mp failed{%d} mp_id{%llu}.", ret, mp_id);
+ return ret;
+ }
+
+ mulp_write_lock(&g_dpc_multipath_list.rwlock);
+ mulp_list_for_each_entry_safe(cur, next, &g_dpc_multipath_list.head, node) {
+ if (cur->mp_id == mp_id) {
+ mulp_list_del(&cur->node);
+ dpc_proc_destroy_mp_file(cur->client_id, mp_id);
+ mulp_mem_free(cur);
+ break;
+ }
+ }
+ mulp_proc_try_destroy_shard_view_file();
+ is_empty = list_empty(&g_dpc_multipath_list.head);
+ mulp_write_unlock(&g_dpc_multipath_list.rwlock);
+ if (is_empty) {
+ mulp_destroy_shard_view();
+ }
+ MULP_LOG(MULP_MODULE_API, MULP_INFO, "dpc clnt destroy mp success.");
+ return 0;
+}
+EXPORT_SYMBOL_GPL(dpc_clnt_destroy_mp);
+
+int dpc_rpc_client_get_multipath_info(uint64_t mp_id, dpc_multipath_info **info)
+{
+ dpc_multipath_info *cur = NULL;
+ dpc_multipath_info *next = NULL;
+ if (!info) {
+ return -EINVAL;
+ }
+ mulp_read_lock(&g_dpc_multipath_list.rwlock);
+ mulp_list_for_each_entry_safe(cur, next, &g_dpc_multipath_list.head, node) {
+ if (cur->mp_id == mp_id) {
+ *info = cur;
+ mulp_read_unlock(&g_dpc_multipath_list.rwlock);
+ return 0;
+ }
+ }
+ mulp_read_unlock(&g_dpc_multipath_list.rwlock);
+ return -ENOENT;
+}
+
+static void dpc_sunrpc_clnt_get_optimal_path_fail(char *usr_para, void *out_addr)
+{
+ int *ret = (int *)out_addr;
+ *ret = -EINVAL;
+ MULP_LOG(MULP_MODULE_API, MULP_INFO, "dpc clnt get optimal path fail.");
+ return;
+}
+
+static void dpc_sunrpc_clnt_msg_timeout(char *usr_para, void *out_addr)
+{
+ struct rpc_task *task = (struct rpc_task *)out_addr;
+ task->tk_rpc_status = -ETIMEDOUT;
+ MULP_LOG(MULP_MODULE_API, MULP_INFO, "dpc clnt rpc timeout.");
+ return;
+}
+
+void dpc_sunrpc_clnt_tp_reg(void)
+{
+ LVOS_TP_REG(TP_GET_OPT_OPTIMAL_PATH_FAIL, "get optimal path failed", dpc_sunrpc_clnt_get_optimal_path_fail);
+ LVOS_TP_REG(TP_SUNRPC_MSG_TIMEOUT, "sunrpc msg timeout", dpc_sunrpc_clnt_msg_timeout);
+}
+
+void dpc_sunrpc_clnt_tp_unreg(void)
+{
+ LVOS_TP_UNREG(TP_GET_OPT_OPTIMAL_PATH_FAIL);
+ LVOS_TP_UNREG(TP_SUNRPC_MSG_TIMEOUT);
+}
+
+int dpc_rpc_client_init(void)
+{
+ mulp_rwlock_init(&g_dpc_multipath_list.rwlock);
+ mulp_init_list_head(&g_dpc_multipath_list.head);
+ dpc_sunrpc_clnt_tp_reg();
+ return 0;
+}
+
+void dpc_rpc_client_exit(void)
+{
+ dpc_sunrpc_clnt_tp_unreg();
+ return;
+}
+
+int dpc_clnt_read_page(dpc_clnt_rw_args *args)
+{
+ return dpc_clnt_proc_exec(args, DPC_RPC_OP_READ);
+}
+EXPORT_SYMBOL_GPL(dpc_clnt_read_page);
+
+int dpc_clnt_write_page(dpc_clnt_rw_args *args)
+{
+ return dpc_clnt_proc_exec(args, DPC_RPC_OP_WRITE);
+}
+EXPORT_SYMBOL_GPL(dpc_clnt_write_page);
+
+int dpc_clnt_update_mp(uint64_t mp_id, mulp_create_mp_args *args)
+{
+ return 0;
+}
+EXPORT_SYMBOL_GPL(dpc_clnt_update_mp);
+int dpc_clnt_update_detect_period(uint64_t mp_id, uint32_t detect_period)
+{
+ return mulp_update_detect_period(mp_id, detect_period);
+}
+EXPORT_SYMBOL_GPL(dpc_clnt_update_detect_period);
+
+int dpc_clnt_update_ip_pair(mulp_update_ip_pair_args *args)
+{
+ uint32_t i;
+ int ret;
+ char **port_name_arr = NULL;
+ if (!args) {
+ MULP_LOG(MULP_MODULE_API, MULP_ERR, "dpc clnt update ip pair get null param.");
+ return -EINVAL;
+ }
+
+ ret = dpc_clnt_alloc_portname(&port_name_arr, args->ip_pair_cnt);
+ if (ret != 0) {
+ return ret;
+ }
+
+ for (i = 0; i < args->ip_pair_cnt; i++) {
+ args->pair_arr[i].port_name = port_name_arr[i];
+ dpc_clnt_get_portname(&args->pair_arr[i]);
+ }
+ ret = mulp_update_ip_pair(args);
+ if (ret != 0) {
+ MULP_LOG(MULP_MODULE_API, MULP_INFO, "dpc clnt update ip pair for mp{%llu} failed.", args->mp_id);
+ } else {
+ MULP_LOG(MULP_MODULE_API, MULP_INFO, "dpc clnt update ip pair for mp{%llu} success.", args->mp_id);
+ }
+
+ dpc_clnt_free_portname(port_name_arr, args->ip_pair_cnt);
+ return ret;
+}
+EXPORT_SYMBOL_GPL(dpc_clnt_update_ip_pair);
+
+int dpc_clnt_update_ip_view(mulp_update_ip_view_args *args)
+{
+ return 0;
+}
+EXPORT_SYMBOL_GPL(dpc_clnt_update_ip_view);
+
+int dpc_clnt_update_shard_view(mulp_update_shard_view_args *args)
+{
+ int ret;
+ if (args == NULL) {
+ MULP_LOG(MULP_MODULE_API, MULP_ERR, "mulp_update_shard_view_args is NULL");
+ return -EINVAL;
+ }
+ ret = mulp_update_shard_view(args);
+ if (ret != 0) {
+ MULP_LOG(MULP_MODULE_API, MULP_ERR, "dpc clnt update shard view failed, ret: %d, mp_id:%llu, wwn:0x%llx, "
+ "cluster id:%llu, pool id:%llu, shard cnt:%u", ret, args->mp_id, args->wwn, args->cluster_id,
+ args->pool_id, args->shard_cnt);
+ return ret;
+ }
+
+ ret = mulp_proc_try_create_shard_view_file(args->wwn, args->cluster_id, args->pool_id);
+ if (ret != 0) {
+ MULP_LOG(MULP_MODULE_API, MULP_ERR, "create shard view proc file failed, ret: %d", ret);
+ }
+ return ret;
+}
+EXPORT_SYMBOL_GPL(dpc_clnt_update_shard_view);
+
+typedef int (*set_tos_info_ptr)(u16 port, u32 tos);
+typedef int (*get_tos_info_ptr)(u32 *buf, size_t buf_size, u16 *out_cnt);
+
+int dpc_clnt_set_tos_info(uint16_t port, uint32_t tos)
+{
+ set_tos_info_ptr func = NULL;
+ func = (set_tos_info_ptr)dpc_rpc_get_kallsyms_lookup_name("rpcrdma_set_tos_info");
+ if (func == NULL) {
+ MULP_LOG(MULP_MODULE_API, MULP_ERR, "symbol %s not found", "rpcrdma_set_tos_info");
+ return -EOPNOTSUPP;
+ }
+ return func(port, tos);
+}
+EXPORT_SYMBOL_GPL(dpc_clnt_set_tos_info);
+
+int dpc_clnt_get_tos_info(dpc_clnt_tos_info *clnt_tos_info)
+{
+ int ret = 0;
+ uint16_t index = 0;
+ uint32_t buf[DPC_TOS_INFO_CNT_MAX * 2] = {0};
+ uint16_t out_cnt = 0;
+ get_tos_info_ptr func = NULL;
+ if (clnt_tos_info == NULL) {
+ MULP_LOG(MULP_MODULE_API, MULP_ERR, "invalid params");
+ return -EINVAL;
+ }
+ func = (get_tos_info_ptr)dpc_rpc_get_kallsyms_lookup_name("rpcrdma_get_tos_info");
+ if (func == NULL) {
+ MULP_LOG(MULP_MODULE_API, MULP_ERR, "symbol %s not found", "rpcrdma_get_tos_info");
+ return -EOPNOTSUPP;
+ }
+ ret = func(buf, DPC_TOS_INFO_CNT_MAX * 2, &out_cnt); // buf结构: [port0, tos0, port1, tos1, ...]
+ if (ret) {
+ MULP_LOG(MULP_MODULE_API, MULP_ERR, "get tos info failed, ret: %d", ret);
+ return -EOPNOTSUPP;
+ }
+ clnt_tos_info->cnt = 0;
+ for (index = 0; index < out_cnt && index < (uint16_t)DPC_TOS_INFO_CNT_MAX; ++index) {
+ clnt_tos_info->tos_info[index].port = (uint16_t)buf[index * 2];
+ clnt_tos_info->tos_info[index].tos = buf[index * 2 + 1];
+ clnt_tos_info->cnt++;
+ }
+ return ret;
+}
+EXPORT_SYMBOL_GPL(dpc_clnt_get_tos_info);
diff --git a/fs/nfs/enfs/unify_multipath/dpc_adapter/dpc_rpc_client.h b/fs/nfs/enfs/unify_multipath/dpc_adapter/dpc_rpc_client.h
new file mode 100644
index 0000000..edefd2c
--- /dev/null
+++ b/fs/nfs/enfs/unify_multipath/dpc_adapter/dpc_rpc_client.h
@@ -0,0 +1,32 @@
+/*
+ * SPDX-License-Identifier: GPL-2.0
+ * Copyright 2024, Huawei Inc
+ *
+ * 提供适配dpc的多路径集api
+ */
+
+#ifndef _DPC_RPC_CLIENT_H_
+#define _DPC_RPC_CLIENT_H_
+
+#include
+#include "multipath_api.h"
+#include "mulp_porting.h"
+
+typedef struct {
+ struct list_head node;
+ uint64_t mp_id;
+ uint64_t client_id;
+ uint32_t client_ls_id;
+ uint32_t nconnect; /* 每个ip pair创建多个链路 */
+ mulp_network_type network_type;
+ mulp_select_path_strategy strategy;
+ char user_name[MULP_USER_NAME_LEN];
+ char user_authkey[MULP_USER_AUTHKEY_LEN];
+} dpc_multipath_info;
+
+int dpc_rpc_client_get_multipath_info(uint64_t mp_id, dpc_multipath_info **info);
+
+int dpc_rpc_client_init(void);
+void dpc_rpc_client_exit(void);
+
+#endif
\ No newline at end of file
diff --git a/fs/nfs/enfs/unify_multipath/dpc_adapter/dpc_rpc_client_null_call.c b/fs/nfs/enfs/unify_multipath/dpc_adapter/dpc_rpc_client_null_call.c
new file mode 100644
index 0000000..619043a
--- /dev/null
+++ b/fs/nfs/enfs/unify_multipath/dpc_adapter/dpc_rpc_client_null_call.c
@@ -0,0 +1,94 @@
+/*
+ * SPDX-License-Identifier: GPL-2.0
+ * Copyright 2024, Huawei Inc
+ *
+ * DPC rpc client null call
+ */
+#include "dpc_rpc_client_null_call.h"
+
+
+static void dpc_prc_ping_rpcproc_encode_null(struct rpc_rqst *rqstp, struct xdr_stream *xdr, const void *obj) {}
+
+static int dpc_prc_ping_rpcproc_decode_null(struct rpc_rqst *rqstp, struct xdr_stream *xdr, void *obj)
+{
+ return 0;
+}
+
+static const struct rpc_procinfo rpcproc_null = {
+ .p_encode = dpc_prc_ping_rpcproc_encode_null,
+ .p_decode = dpc_prc_ping_rpcproc_decode_null,
+};
+
+void dpc_rpc_default_prepare(struct rpc_task *task, void *data)
+{
+ struct trans_call_data *tcd = data;
+ if (tcd->ops && tcd->ops->rpc_call_prepare) {
+ tcd->ops->rpc_call_prepare(task, tcd->data);
+ }
+ rpc_call_start(task);
+}
+
+void dpc_rpc_defaultl_done(struct rpc_task *task, void *data)
+{
+ struct trans_call_data *tcd = data;
+ if (tcd->ops && tcd->ops->rpc_call_done) {
+ tcd->ops->rpc_call_done(task, tcd->data);
+ }
+}
+
+void dpc_rpc_default_stats(struct rpc_task *task, void *data)
+{
+ struct trans_call_data *tcd = data;
+ if (tcd->ops && tcd->ops->rpc_count_stats) {
+ tcd->ops->rpc_count_stats(task, tcd->data);
+ }
+ rpc_count_iostats(task, task->tk_client->cl_metrics);
+}
+
+void dpc_rpc_default_release(void *data)
+{
+ struct trans_call_data *tcd = data;
+ if (tcd->ops && tcd->ops->rpc_release) {
+ tcd->ops->rpc_release(tcd->data);
+ }
+ kfree(tcd);
+}
+
+static const struct rpc_call_ops rpc_default_ops = {
+ .rpc_call_prepare = dpc_rpc_default_prepare,
+ .rpc_call_done = dpc_rpc_defaultl_done,
+ .rpc_count_stats = dpc_rpc_default_stats,
+ .rpc_release = dpc_rpc_default_release,
+};
+
+static struct rpc_task *dpc_rpc_tranc_call_null_helper(struct rpc_clnt *clnt, struct rpc_xprt *xprt, int flags,
+ const struct rpc_call_ops *ops, void *data)
+{
+ struct rpc_message msg = {
+ .rpc_proc = &rpcproc_null,
+ .rpc_cred = NULL,
+ };
+ struct rpc_task_setup task_setup_data = {
+ .rpc_client = clnt,
+ .rpc_xprt = xprt,
+ .rpc_message = &msg,
+ .callback_ops = (ops != NULL) ? ops : &rpc_default_ops,
+ .callback_data = data,
+ .flags = flags,
+ };
+
+ return rpc_run_task(&task_setup_data);
+}
+
+int dpc_rpc_tranc_test_xprt(struct rpc_clnt *clnt, struct rpc_xprt *xprt, const struct rpc_call_ops *ops, void *data,
+ int flags)
+{
+ struct rpc_task *task;
+
+ task = dpc_rpc_tranc_call_null_helper(clnt, xprt, RPC_TASK_SOFT | RPC_TASK_SOFTCONN | flags, ops, data);
+ if (IS_ERR(task)) {
+ return -ENOSYS;
+ }
+ rpc_put_task(task);
+ return 0;
+}
diff --git a/fs/nfs/enfs/unify_multipath/dpc_adapter/dpc_rpc_client_null_call.h b/fs/nfs/enfs/unify_multipath/dpc_adapter/dpc_rpc_client_null_call.h
new file mode 100644
index 0000000..18989d7
--- /dev/null
+++ b/fs/nfs/enfs/unify_multipath/dpc_adapter/dpc_rpc_client_null_call.h
@@ -0,0 +1,21 @@
+/*
+ * SPDX-License-Identifier: GPL-2.0
+ * Copyright 2024, Huawei Inc
+ *
+ * DPC rpc client null call
+ */
+#ifndef DPC_RPC_CLIENT_NULL_CALL_H
+#define DPC_RPC_CLIENT_NULL_CALL_H
+
+#include
+#include
+#include
+
+struct trans_call_data {
+ const struct rpc_call_ops *ops;
+ void *data;
+};
+
+int dpc_rpc_tranc_test_xprt(struct rpc_clnt *clnt, struct rpc_xprt *xprt, const struct rpc_call_ops *ops, void *data,
+ int flags);
+#endif
diff --git a/fs/nfs/enfs/unify_multipath/dpc_adapter/dpc_rpc_client_read.c b/fs/nfs/enfs/unify_multipath/dpc_adapter/dpc_rpc_client_read.c
new file mode 100644
index 0000000..01adf79
--- /dev/null
+++ b/fs/nfs/enfs/unify_multipath/dpc_adapter/dpc_rpc_client_read.c
@@ -0,0 +1,147 @@
+/*
+ * SPDX-License-Identifier: GPL-2.0
+ * Copyright 2024, Huawei Inc
+ *
+ * DPC rpc client read
+ */
+#include "dpc_rpc_client_read.h"
+#include "dpc_rpc_io_common.h"
+
+void dpc_xdr_enc_read(struct rpc_rqst *req, struct xdr_stream *xdr, const void *data)
+{
+ dpc_clnt_read_req *args = (dpc_clnt_read_req *)data;
+ int32_t ret;
+ uint32_t page_off;
+ char *user_name;
+
+ args->hdr.op_code = DPC_RPC_OP_READ;
+ args->process_flag |= DPC_RPC_PROC_READ_ENCODE;
+ DPC_CHECK_XDR_VOID_RET(xdr_stream_encode_u64(xdr, DPC_RPC_OP_MAGIC));
+ ret = dpc_xdr_encode_rpc_args_head(xdr, &args->hdr);
+ if (ret != 0) {
+ MULP_LOG(DPC_CLIENT, MULP_ERR, "encode dpc_rpc_args_head fail");
+ return;
+ }
+
+ DPC_CHECK_XDR_VOID_RET(xdr_stream_encode_u32(xdr, DPC_CLIENT_DEFAULT_VERSION));
+ DPC_CHECK_XDR_VOID_RET(xdr_stream_encode_u32(xdr, (uint32_t)args->length));
+ DPC_CHECK_XDR_VOID_RET(xdr_stream_encode_u64(xdr, args->offset));
+ DPC_CHECK_XDR_VOID_RET(xdr_stream_encode_u64(xdr, args->reverse));
+ DPC_CHECK_XDR_VOID_RET(xdr_stream_encode_opaque(xdr, args->uuid.data, args->uuid.len));
+ DPC_CHECK_XDR_VOID_RET(xdr_stream_encode_u64(xdr, args->share_id));
+
+ user_name = args->user_name;
+ DPC_CHECK_XDR_VOID_RET(xdr_stream_encode_opaque(xdr, user_name, DPC_USER_NAME_LEN));
+
+ page_off = args->page_offset;
+ dpc_prepare_rpc_reply_pages(req, args->pages, page_off, args->length, DPC_RPC_MSG_READ_LEN >> BYTES_TO_WORDS_SHIFT);
+ req->rq_rcv_buf.flags |= XDRBUF_READ;
+
+ return;
+}
+
+int dpc_xdr_dec_read(struct rpc_rqst *rqstp, struct xdr_stream *xdr, void *data)
+{
+ uint64_t magic;
+ uint64_t pad;
+ uint32_t recvd;
+
+ dpc_clnt_read_rsp *resp = (dpc_clnt_read_rsp *)data;
+ if (data == NULL) {
+ MULP_LOG(DPC_CLIENT, MULP_ERR, "dpc_clnt_read_rsp is NULL.");
+ return 0; // 返回读取的长度
+ }
+
+ xdr_stream_decode_u64(xdr, &magic);
+ if (magic != DPC_RPC_OP_MAGIC) {
+ MULP_LOG(DPC_CLIENT, MULP_ERR, "check magic{%llx %llx} fail", magic, (uint64_t)DPC_RPC_OP_MAGIC);
+ return 0;
+ }
+
+ dpc_xdr_decode_rpc_resp_head(xdr, &resp->hdr);
+ if (resp->hdr.op_status != 0) {
+ resp->hdr.op_status = dpc_change_status_to_err(resp->hdr.op_status);
+ MULP_LOG(DPC_CLIENT, MULP_DEBUG, "check opStatus{%u} fail", resp->hdr.op_status);
+ return 0;
+ }
+
+ xdr_stream_decode_u32(xdr, &resp->version);
+ xdr_stream_decode_u64(xdr, &resp->offset);
+ xdr_stream_decode_u64(xdr, &resp->length);
+ xdr_stream_decode_u32(xdr, &resp->flag);
+ xdr_stream_decode_u64(xdr, &resp->reverse);
+
+ dpc_xdr_decode_rpc_resp_post_op_attr(xdr, &resp->post_attr);
+ xdr_stream_decode_u64(xdr, &resp->delay);
+ xdr_stream_decode_u64(xdr, &pad); // 放在解码最后
+
+ recvd = xdr_read_pages(xdr, resp->length);
+
+ resp->process_flag |= DPC_RPC_PROC_READ_DECODE;
+ return resp->length;
+}
+
+void dpc_clnt_trans_encode_read_args(dpc_clnt_rw_args *args, dpc_clnt_inner_args *rw_inner_args,
+ dpc_multipath_info *info)
+{
+ dpc_clnt_read_req *inner_args;
+ dpc_clnt_read_rsp *inner_resp;
+ if (args == NULL || rw_inner_args == NULL || info == NULL) {
+ return;
+ }
+ inner_args = (dpc_clnt_read_req *) &rw_inner_args->req;
+ inner_resp = (dpc_clnt_read_rsp *) &rw_inner_args->resp;
+ inner_args->hdr.op_code = DPC_RPC_OP_READ;
+ inner_args->hdr.client_ls_id = info->client_ls_id;
+ inner_args->hdr.instance_id = info->client_id;
+ inner_args->length = args->len;
+ inner_args->offset = args->offset;
+ inner_args->reverse = 0ULL;
+ inner_args->uuid = args->uuid;
+ inner_args->share_id = args->share_id;
+ inner_args->user_name = info->user_name;
+ inner_args->page_offset = args->pgbase;
+ inner_args->pages = args->pages;
+ inner_resp->pages = args->pages;
+}
+
+void dpc_clnt_trans_decode_read_args(dpc_clnt_rw_args *args, dpc_clnt_inner_args *rw_inner_args)
+{
+ dpc_clnt_read_req *inner_args;
+ dpc_clnt_read_rsp *inner_resp;
+ if (args == NULL || rw_inner_args == NULL) {
+ return;
+ }
+ inner_args = (dpc_clnt_read_req *) &rw_inner_args->req;
+ inner_resp = (dpc_clnt_read_rsp *) &rw_inner_args->resp;
+ args->op_status = (int32_t)inner_resp->hdr.op_status;
+ if (args->op_status != 0) {
+ return;
+ }
+ args->eof = inner_resp->flag & DPC_READ_EOF_BIT_FLAG;
+ args->counted = inner_resp->length;
+ if (inner_resp->post_attr.attributes_follow) {
+ dpc_copy_from_inner_attr_to_rw_attr(args->post_attr, &inner_resp->post_attr._dpc_post_op_attr.attributes);
+ }
+
+ if (args->counted == 0 && args->eof == 0) {
+ MULP_LOG(DPC_CLIENT, MULP_ERR, "read len error rsp len %llu eof %u", args->counted, args->eof);
+ }
+ if (args->len > args->counted && args->eof == 0) {
+ MULP_LOG(DPC_CLIENT, MULP_ERR, "read len error req len %u rsp len %llu eof %u reqPFlag %llu rPFlag %llu",
+ args->len, args->counted, args->eof, inner_args->process_flag, inner_resp->process_flag);
+ }
+}
+
+void dpc_clnt_read_count_stats(void *path_mgmt, dpc_clnt_inner_args *rw_inner_args)
+{
+ dpc_clnt_read_req *inner_req = (dpc_clnt_read_req *) &rw_inner_args->req;
+ dpc_clnt_read_rsp *inner_resp = (dpc_clnt_read_rsp *) &rw_inner_args->resp;
+ if (inner_resp->hdr.op_status == 0) {
+ mulp_path_count_stats(path_mgmt, PATH_INFO_READ_CNT, 1);
+ mulp_path_count_stats(path_mgmt, PATH_INFO_READ_LEN, inner_req->length);
+ mulp_path_count_stats(path_mgmt, PATH_INFO_READ_SUM_DELAY, inner_resp->delay);
+ } else {
+ mulp_path_count_stats(path_mgmt, PATH_INFO_READ_FAILED_CNT, 1);
+ }
+}
\ No newline at end of file
diff --git a/fs/nfs/enfs/unify_multipath/dpc_adapter/dpc_rpc_client_read.h b/fs/nfs/enfs/unify_multipath/dpc_adapter/dpc_rpc_client_read.h
new file mode 100644
index 0000000..d1d5285
--- /dev/null
+++ b/fs/nfs/enfs/unify_multipath/dpc_adapter/dpc_rpc_client_read.h
@@ -0,0 +1,23 @@
+/*
+ * SPDX-License-Identifier: GPL-2.0
+ * Copyright 2024, Huawei Inc
+ *
+ * DPC RPC CLIENT READ
+ */
+
+#ifndef DPC_RPC_CLIENT_READ_H
+#define DPC_RPC_CLIENT_READ_H
+
+#include
+#include
+#include "multipath_api.h"
+#include "dpc_rpc_io_common.h"
+#include "dpc_rpc_client.h"
+
+void dpc_xdr_enc_read(struct rpc_rqst *req, struct xdr_stream *xdr, const void *data);
+int dpc_xdr_dec_read(struct rpc_rqst *rqstp, struct xdr_stream *xdr, void *data);
+void dpc_clnt_trans_encode_read_args(dpc_clnt_rw_args *args, dpc_clnt_inner_args *rw_inner_args,
+ dpc_multipath_info *info);
+void dpc_clnt_trans_decode_read_args(dpc_clnt_rw_args *args, dpc_clnt_inner_args *rw_inner_args);
+
+#endif
\ No newline at end of file
diff --git a/fs/nfs/enfs/unify_multipath/dpc_adapter/dpc_rpc_client_write.c b/fs/nfs/enfs/unify_multipath/dpc_adapter/dpc_rpc_client_write.c
new file mode 100644
index 0000000..1bdbbc3
--- /dev/null
+++ b/fs/nfs/enfs/unify_multipath/dpc_adapter/dpc_rpc_client_write.c
@@ -0,0 +1,119 @@
+/*
+ * Copyright (c) Huawei Technologies Co., Ltd. 2024-2024. All rights reserved.
+ * Description: rpc client for write
+ * Author: pengjingshuai p00887752
+ * Create: 2024/12/23
+ */
+#include "dpc_rpc_client_write.h"
+
+void dpc_xdr_enc_write(struct rpc_rqst *req, struct xdr_stream *xdr, const void *data)
+{
+ dpc_clnt_write_req *args = (dpc_clnt_write_req *)data;
+ int32_t ret;
+
+ args->hdr.op_code = DPC_RPC_OP_WRITE;
+ DPC_CHECK_XDR_VOID_RET(xdr_stream_encode_u64(xdr, DPC_RPC_OP_MAGIC));
+ ret = dpc_xdr_encode_rpc_args_head(xdr, &args->hdr);
+ if (ret != 0) {
+ MULP_LOG(DPC_CLIENT, MULP_ERR, "encode dpc_rpc_args_head fail");
+ return;
+ }
+
+ DPC_CHECK_XDR_VOID_RET(xdr_stream_encode_u32(xdr, DPC_CLIENT_DEFAULT_VERSION)); // version填多少,从哪获取?
+ DPC_CHECK_XDR_VOID_RET(xdr_stream_encode_u32(xdr, args->length));
+ DPC_CHECK_XDR_VOID_RET(xdr_stream_encode_u64(xdr, args->offset));
+ DPC_CHECK_XDR_VOID_RET(xdr_stream_encode_u64(xdr, args->reverse));
+ DPC_CHECK_XDR_VOID_RET(xdr_stream_encode_opaque(xdr, args->uuid.data, args->uuid.len));
+ DPC_CHECK_XDR_VOID_RET(xdr_stream_encode_u32(xdr, args->flag)); // flag
+ DPC_CHECK_XDR_VOID_RET(xdr_stream_encode_u64(xdr, args->share_id));
+
+ DPC_CHECK_XDR_VOID_RET(xdr_stream_encode_opaque(xdr, args->user_name, DPC_USER_NAME_LEN));
+
+ xdr_write_pages(xdr, args->pages, args->page_offset, args->length);
+ xdr->buf->flags |= XDRBUF_WRITE;
+
+ return;
+}
+
+int dpc_xdr_dec_write(struct rpc_rqst *rqstp, struct xdr_stream *xdr, void *data)
+{
+ uint64_t magic;
+ dpc_clnt_write_rsp *args = (dpc_clnt_write_rsp *)data;
+
+ if (data == NULL) {
+ MULP_LOG(DPC_CLIENT, MULP_ERR, "encrypt data is NULL.");
+ return EPERM;
+ }
+
+ xdr_stream_decode_u64(xdr, &magic);
+ if (magic != DPC_RPC_OP_MAGIC) {
+ args->hdr.op_status = EINVAL;
+ MULP_LOG(DPC_CLIENT, MULP_ERR, "check magic{%llx %llx} fail", magic, DPC_RPC_OP_MAGIC);
+ return 0;
+ }
+
+ dpc_xdr_decode_rpc_resp_head(xdr, &args->hdr);
+ if (args->hdr.op_status != 0) {
+ args->hdr.op_status = dpc_change_status_to_err(args->hdr.op_status);
+ MULP_LOG(DPC_CLIENT, MULP_DEBUG, "check opStatus{%u} fail", args->hdr.op_status);
+ return 0;
+ }
+
+ xdr_stream_decode_u32(xdr, &args->version);
+ xdr_stream_decode_u32(xdr, &args->remain);
+
+ dpc_xdr_decode_rpc_resp_post_op_attr(xdr, &args->prev_attr);
+ dpc_xdr_decode_rpc_resp_post_op_attr(xdr, &args->post_attr);
+ xdr_stream_decode_u64(xdr, &args->delay);
+ return 0;
+}
+
+void dpc_clnt_trans_encode_write_args(dpc_clnt_rw_args *rw_args, dpc_clnt_inner_args *rw_inner_args,
+ dpc_multipath_info *info)
+{
+ dpc_clnt_write_req *inner_args;
+ if (rw_args == NULL || rw_inner_args == NULL || info == NULL) {
+ return;
+ }
+ inner_args = (dpc_clnt_write_req *) &rw_inner_args->req;
+ inner_args->hdr.op_code = DPC_RPC_OP_WRITE;
+ inner_args->user_name = info->user_name;
+ inner_args->hdr.client_ls_id = info->client_ls_id;
+ inner_args->hdr.instance_id = info->client_id;
+ inner_args->length = rw_args->len;
+ inner_args->offset = rw_args->offset;
+ inner_args->uuid.len = rw_args->uuid.len;
+ memcpy(inner_args->uuid.data, rw_args->uuid.data, rw_args->uuid.len);
+ inner_args->flag = rw_args->flag;
+ inner_args->share_id = rw_args->share_id;
+ inner_args->pages = rw_args->pages;
+ inner_args->page_offset = rw_args->pgbase;
+}
+
+void dpc_clnt_trans_decode_write_args(dpc_clnt_rw_args *rw_args, dpc_clnt_inner_args *rw_inner_args)
+{
+ dpc_clnt_write_req *inner_req = (dpc_clnt_write_req *)&rw_inner_args->req;
+ dpc_clnt_write_rsp *inner_resp = (dpc_clnt_write_rsp *)&rw_inner_args->resp;
+ rw_args->op_status = (int32_t)inner_resp->hdr.op_status;
+ rw_args->counted = inner_req->length;
+ if (inner_resp->prev_attr.attributes_follow) {
+ dpc_copy_from_inner_attr_to_rw_attr(rw_args->prev_attr, &inner_resp->prev_attr._dpc_post_op_attr.attributes);
+ }
+ if (inner_resp->post_attr.attributes_follow) {
+ dpc_copy_from_inner_attr_to_rw_attr(rw_args->post_attr, &inner_resp->post_attr._dpc_post_op_attr.attributes);
+ }
+}
+
+void dpc_clnt_write_count_stats(void *path_mgmt, dpc_clnt_inner_args *rw_inner_args)
+{
+ dpc_clnt_write_req *inner_req = (dpc_clnt_write_req *)&rw_inner_args->req;
+ dpc_clnt_write_rsp *inner_resp = (dpc_clnt_write_rsp *)&rw_inner_args->resp;
+ if (inner_resp->hdr.op_status == 0) {
+ mulp_path_count_stats(path_mgmt, PATH_INFO_WRITE_CNT, 1);
+ mulp_path_count_stats(path_mgmt, PATH_INFO_WRITE_LEN, inner_req->length);
+ mulp_path_count_stats(path_mgmt, PATH_INFO_WRITE_SUM_DELAY, inner_resp->delay);
+ } else {
+ mulp_path_count_stats(path_mgmt, PATH_INFO_WRITE_FAILED_CNT, 1);
+ mulp_path_count_stats(path_mgmt, PATH_INFO_ERRNO, inner_resp->hdr.op_status);
+ }
+}
\ No newline at end of file
diff --git a/fs/nfs/enfs/unify_multipath/dpc_adapter/dpc_rpc_client_write.h b/fs/nfs/enfs/unify_multipath/dpc_adapter/dpc_rpc_client_write.h
new file mode 100644
index 0000000..1df525a
--- /dev/null
+++ b/fs/nfs/enfs/unify_multipath/dpc_adapter/dpc_rpc_client_write.h
@@ -0,0 +1,21 @@
+/*
+ * Copyright (c) Huawei Technologies Co., Ltd. 2024-2024. All rights reserved.
+ * Description: rpc client for write
+ * Author: pengjingshuai p00887752
+ * Create: 2024/12/23
+ */
+#ifndef DPC_RPC_CLIENT_WRITE_H
+#define DPC_RPC_CLIENT_WRITE_H
+
+#include
+#include
+#include "dpc_rpc_io_common.h"
+#include "dpc_rpc_client.h"
+
+void dpc_xdr_enc_write(struct rpc_rqst *req, struct xdr_stream *xdr, const void *data);
+int dpc_xdr_dec_write(struct rpc_rqst *rqstp, struct xdr_stream *xdr, void *data);
+void dpc_clnt_trans_encode_write_args(dpc_clnt_rw_args *rw_args, dpc_clnt_inner_args *rw_inner_args,
+ dpc_multipath_info *path_info);
+void dpc_clnt_trans_decode_write_args(dpc_clnt_rw_args *rw_args, dpc_clnt_inner_args *rw_inner_args);
+
+#endif // DPC_RPC_CLIENT_WRITE_H
\ No newline at end of file
diff --git a/fs/nfs/enfs/unify_multipath/dpc_adapter/dpc_rpc_conn.c b/fs/nfs/enfs/unify_multipath/dpc_adapter/dpc_rpc_conn.c
new file mode 100644
index 0000000..7baada1
--- /dev/null
+++ b/fs/nfs/enfs/unify_multipath/dpc_adapter/dpc_rpc_conn.c
@@ -0,0 +1,158 @@
+#include "dpc_rpc_conn.h"
+#include
+#include
+#include
+#include
+#include
+#include "multipath_api.h"
+#include "mulp_log.h"
+#include "mulp_porting.h"
+#include "dpc_rpc_xdr.h"
+#include "dpc_rpc_client_null_call.h"
+
+#define MAX_IP_LENGTH (128)
+#define DPC_PORT (20080)
+#define DPC_RPC_TCP_PROTOCOL (0x6)
+#define DPC_RPC_RDMA_PROTOCOL (0x100)
+
+typedef struct {
+ uint64_t mp_id;
+ void *path_info;
+ void *context;
+ void (*cb)(int result, uint64_t mp_id, void *path_info, void *context);
+} dpc_rpc_clnt_ping_ctx;
+
+static void dpc_ping_call_done(struct rpc_task *task, void *data)
+{
+ dpc_rpc_clnt_ping_ctx *ctx = (dpc_rpc_clnt_ping_ctx *)data;
+ ctx->cb(task->tk_status, ctx->mp_id, ctx->path_info, ctx->context);
+ mulp_mem_free(ctx);
+}
+
+static const struct rpc_call_ops dpc_rpc_tranc_ops = {
+ .rpc_call_done = dpc_ping_call_done,
+};
+
+int dpc_ping(void *path, uint64_t mp_id, void *path_info, void *context,
+ void (*cb)(int result, uint64_t mp_id, void *path_info, void *context))
+{
+ int ret;
+ struct rpc_clnt *clnt = NULL;
+ dpc_rpc_clnt_ping_ctx *ctx = NULL;
+
+ if (!path) {
+ MULP_LOG(MULP_MODULE_API, MULP_ERR, "dpc rpc clnt get null path.");
+ return -EINVAL;
+ }
+ clnt = (struct rpc_clnt *)path;
+
+ ctx = (dpc_rpc_clnt_ping_ctx *)mulp_mem_zalloc(sizeof(dpc_rpc_clnt_ping_ctx));
+ if (!ctx) {
+ MULP_LOG(MULP_MODULE_API, MULP_CRITI, "dpc rpc clnt ping alloc ctx failed.");
+ return -ENOMEM;
+ }
+ ctx->path_info = path_info;
+ ctx->mp_id = mp_id;
+ ctx->context = context;
+ ctx->cb = cb;
+
+ ret = dpc_rpc_tranc_test_xprt(clnt, clnt->cl_xprt, &dpc_rpc_tranc_ops, (void *)ctx, RPC_TASK_ASYNC);
+ if (ret) {
+ MULP_LOG(MULP_MODULE_API, MULP_ERR, "dpc rpc clnt tranc test xprt failed{%d}.", ret);
+ mulp_mem_free(ctx);
+ return ret;
+ }
+ return 0;
+}
+
+int dpc_get_sockaddr(char *ip, struct sockaddr *sap, const size_t salen)
+{
+ size_t ip_len = 0;
+ if (ip == NULL) {
+ return 0;
+ }
+ ip_len = strnlen(ip, MAX_IP_LENGTH);
+ return rpc_pton(current->nsproxy->net_ns, ip, ip_len, sap, salen);
+}
+
+int dpc_get_protocol(mulp_network_type network_type)
+{
+ if (network_type == MULP_NETWORK_TCP) {
+ return DPC_RPC_TCP_PROTOCOL;
+ }
+ return DPC_RPC_RDMA_PROTOCOL;
+}
+
+int dpc_create_path(mulp_ops_create_path_args *args)
+{
+ struct rpc_create_args rpc_args = {0};
+ struct sockaddr_storage address_local;
+ struct sockaddr_storage address_remote;
+ struct sockaddr *sap_local = (struct sockaddr *)&address_local;
+ struct sockaddr *sap_remote = (struct sockaddr *)&address_remote;
+ struct rpc_clnt *clnt = NULL;
+
+ if (dpc_get_sockaddr(args->ip_pair->local_ip, sap_local, sizeof(struct sockaddr_storage)) == 0) {
+ MULP_LOG(DPC_CLIENT, MULP_ERR, "Local_ip(%s) is invalid.", args->ip_pair->local_ip);
+ return -EINVAL;
+ }
+
+ if (dpc_get_sockaddr(args->ip_pair->remote_ip, sap_remote, sizeof(struct sockaddr_storage)) == 0) {
+ MULP_LOG(DPC_CLIENT, MULP_ERR, "Remote_ip(%s) is invalid.", args->ip_pair->remote_ip);
+ return -EINVAL;
+ }
+ dpc_init_create_args(&rpc_args);
+ rpc_args.protocol = dpc_get_protocol(args->network_type);
+ rpc_args.address = sap_remote;
+ rpc_args.addrsize = sizeof(struct sockaddr_storage);
+ rpc_args.saddress = sap_local;
+ rpc_args.flags |= RPC_CLNT_CREATE_NOPING;
+ rpc_set_port(sap_remote, DPC_PORT);
+ clnt = rpc_create(&rpc_args);
+ if (IS_ERR(clnt)) {
+ MULP_LOG(DPC_CLIENT, MULP_ERR, "Remote_ip(%s) Local_ip(%s) create conn failed.", args->ip_pair->remote_ip,
+ args->ip_pair->local_ip);
+ return PTR_ERR(clnt);
+ }
+ args->path = clnt;
+ return 0;
+}
+
+int dpc_destroy_path(void *path, uint64_t mp_id, void *path_info, void *context,
+ void (*cb)(int result, uint64_t mp_id, void *path_info, void *context))
+{
+ struct rpc_clnt *clnt = NULL;
+ if (path == NULL) {
+ MULP_LOG(DPC_CLIENT, MULP_ERR, "Path is null.");
+ return -EINVAL;
+ }
+ clnt = (struct rpc_clnt *)path;
+ rpc_shutdown_client(clnt);
+
+ if (cb != NULL) {
+ cb(0, mp_id, path_info, context);
+ }
+ return 0;
+}
+
+int dpc_rpc_conn_init(void)
+{
+ int ret;
+ mulp_app_ops_set ops = {
+ dpc_ping,
+ dpc_create_path,
+ dpc_destroy_path,
+ };
+
+ ret = mulp_reg_app_ops(MULP_APP_DPC, &ops);
+ if (ret) {
+ MULP_LOG(MULP_MODULE_API, MULP_ERR, "dpc rpc reg mulp app ops failed{%d}.", ret);
+ return ret;
+ }
+ return 0;
+}
+
+void dpc_rpc_conn_exit(void)
+{
+ (void)mulp_unreg_app_ops(MULP_APP_DPC);
+}
\ No newline at end of file
diff --git a/fs/nfs/enfs/unify_multipath/dpc_adapter/dpc_rpc_conn.h b/fs/nfs/enfs/unify_multipath/dpc_adapter/dpc_rpc_conn.h
new file mode 100644
index 0000000..c0162b1
--- /dev/null
+++ b/fs/nfs/enfs/unify_multipath/dpc_adapter/dpc_rpc_conn.h
@@ -0,0 +1,14 @@
+/*
+ * SPDX-License-Identifier: GPL-2.0
+ * Copyright 2024, Huawei Inc
+ *
+ * 多路径链接建立和销毁, 对dpc client提供多连接的创删
+ */
+
+#ifndef _DPC_RPC_CONN_H_
+#define _DPC_RPC_CONN_H_
+
+int dpc_rpc_conn_init(void);
+void dpc_rpc_conn_exit(void);
+
+#endif
\ No newline at end of file
diff --git a/fs/nfs/enfs/unify_multipath/dpc_adapter/dpc_rpc_io_common.c b/fs/nfs/enfs/unify_multipath/dpc_adapter/dpc_rpc_io_common.c
new file mode 100644
index 0000000..f645487
--- /dev/null
+++ b/fs/nfs/enfs/unify_multipath/dpc_adapter/dpc_rpc_io_common.c
@@ -0,0 +1,176 @@
+/*
+ * SPDX-License-Identifier: GPL-2.0
+ * Copyright 2024, Huawei Inc
+ *
+ * DPC client io common
+ */
+#include "dpc_rpc_io_common.h"
+#include
+#include
+
+static const uint32_t dpc_type2fmt[] = {
+ [DPCBAD] = 0,
+ [DPCREG] = S_IFREG,
+ [DPCDIR] = S_IFDIR,
+ [DPCBLK] = S_IFBLK,
+ [DPCCHR] = S_IFCHR,
+ [DPCLNK] = S_IFLNK,
+ [DPCSOCK] = S_IFSOCK,
+ [DPCFIFO] = S_IFIFO,
+};
+
+static const dpc_status_err_map g_dpc_status_err_map[] = {
+ {DPCERR_NOTSUPP, EOPNOTSUPP},
+ {DPCERR_SERVERFAULT, ESERVERFAULT},
+ {DPCERR_JUKEBOX, ETIMEDOUT},
+};
+
+uint32_t dpc_xdr_encode_rpc_args_head(struct xdr_stream *xdr, const dpc_rpc_head_req *rw_inner_data)
+{
+ DPC_CHECK_XDR_RET(xdr_stream_encode_u32(xdr, rw_inner_data->op_code));
+ DPC_CHECK_XDR_RET(xdr_stream_encode_u32(xdr, rw_inner_data->file_flag)); // 文件是否下发到归属节点等属性
+ DPC_CHECK_XDR_RET(xdr_stream_encode_u32(xdr, 0)); // 预留
+ DPC_CHECK_XDR_RET(xdr_stream_encode_u32(xdr, 0)); // 预留
+ DPC_CHECK_XDR_RET(xdr_stream_encode_u64(xdr, 0)); // 预留
+ DPC_CHECK_XDR_RET(xdr_stream_encode_u32(xdr, 0)); // 预留
+ DPC_CHECK_XDR_RET(xdr_stream_encode_u32(xdr, 0)); // 预留
+ DPC_CHECK_XDR_RET(xdr_stream_encode_u32(xdr, 0)); // 预留
+ DPC_CHECK_XDR_RET(xdr_stream_encode_u32(xdr, rw_inner_data->client_ls_id));
+ DPC_CHECK_XDR_RET(xdr_stream_encode_u64(xdr, rw_inner_data->instance_id)); // instanceId手动控制
+ return 0;
+}
+
+void dpc_xdr_decode_rpc_resp_head(struct xdr_stream *xdr, dpc_rpc_head_rsp *rw_inner_data)
+{
+ uint32_t tmp;
+ uint64_t tmp2;
+ xdr_stream_decode_u32(xdr, &tmp); // 预留
+ xdr_stream_decode_u32(xdr, &tmp); // 预留
+ xdr_stream_decode_u32(xdr, &rw_inner_data->op_status); // result
+ xdr_stream_decode_u32(xdr, &tmp); // 预留
+ xdr_stream_decode_u64(xdr, &tmp2); // 预留
+}
+
+void dpc_xdr_set_fmode(dpc_fattr *fattr)
+{
+ dpc_ftype type = DPCBAD;
+ uint32_t fmode = 0;
+ if (fattr->type >= DPCREG && fattr->type <= DPCFIFO) {
+ type = fattr->type;
+ }
+ fmode = dpc_type2fmt[type];
+ fattr->mode = (fattr->mode & ~S_IFMT) | fmode;
+}
+
+void dpc_xdr_decode_rpc_resp_post_op_attr(struct xdr_stream *xdr, dpc_post_op_attr *post_op_attr)
+{
+ dpc_fattr *fattr = &post_op_attr->_dpc_post_op_attr.attributes;
+
+ xdr_stream_decode_u32(xdr, &post_op_attr->attributes_follow);
+ // attributesFollow为0时无需解码attributes内容
+ if (post_op_attr->attributes_follow) {
+ xdr_stream_decode_u32(xdr, (uint32_t *)&fattr->type); // enum类型
+ xdr_stream_decode_u32(xdr, &fattr->mode);
+ xdr_stream_decode_u32(xdr, &fattr->nlink);
+ xdr_stream_decode_u32(xdr, &fattr->uid);
+ xdr_stream_decode_u32(xdr, &fattr->gid);
+ xdr_stream_decode_u64(xdr, &fattr->size);
+ xdr_stream_decode_u64(xdr, &fattr->used);
+ // 编码DpcSpecData
+ xdr_stream_decode_u32(xdr, &fattr->rdev.specdata1);
+ xdr_stream_decode_u32(xdr, &fattr->rdev.specdata2);
+
+ xdr_stream_decode_u64(xdr, &fattr->fs_id);
+ xdr_stream_decode_u64(xdr, &fattr->file_id);
+ // 编码DpcTime
+ xdr_stream_decode_u32(xdr, &fattr->atime.seconds);
+ xdr_stream_decode_u32(xdr, &fattr->atime.nseconds);
+ xdr_stream_decode_u32(xdr, &fattr->mtime.seconds);
+ xdr_stream_decode_u32(xdr, &fattr->mtime.nseconds);
+ xdr_stream_decode_u32(xdr, &fattr->ctime.seconds);
+ xdr_stream_decode_u32(xdr, &fattr->ctime.nseconds);
+ dpc_xdr_set_fmode(fattr);
+ }
+}
+
+void dpc_prepare_rpc_reply_pages(struct rpc_rqst *req, struct page **pages, unsigned int base, unsigned int len,
+ unsigned int hdrsize)
+{
+ rpc_prepare_reply_pages(req, pages, base, len, hdrsize + DPC_RPC_PAGEPAD_SZ);
+}
+
+#if !UP_LINUX_5_15_0 // 识别操作系统版本,内核版本低于5.15.0, 没有实现:xdr_stream_decode_u64
+ssize_t xdr_stream_decode_u64(struct xdr_stream *xdr, uint64_t *ptr)
+{
+ const uint32_t count = sizeof(*ptr);
+ uint32_t *p = xdr_inline_decode(xdr, count);
+
+ if (unlikely(!p))
+ return -EBADMSG;
+ *ptr = be64_to_cpup((const uint64_t *)p);
+ return 0;
+}
+#endif
+
+void dpc_copy_from_inner_attr_to_rw_attr(dpc_clnt_file_attr *rw_args, dpc_fattr *rw_inner_args)
+{
+ if (rw_args == NULL || rw_inner_args == NULL) {
+ return;
+ }
+ rw_args->type = rw_inner_args->type;
+ rw_args->mode = rw_inner_args->mode;
+ rw_args->nlink = rw_inner_args->nlink;
+ rw_args->uid.val = rw_inner_args->uid;
+ rw_args->gid.val = rw_inner_args->gid;
+ rw_args->size = rw_inner_args->size;
+ rw_args->fsid = rw_inner_args->fs_id;
+ rw_args->rdev = MKDEV(rw_inner_args->rdev.specdata1, rw_inner_args->rdev.specdata2);
+ if (MAJOR(rw_args->rdev) != rw_inner_args->rdev.specdata1||
+ MINOR(rw_args->rdev) != rw_inner_args->rdev.specdata2) {
+ rw_args->rdev = 0;
+ }
+ rw_args->atime.tv_sec = rw_inner_args->atime.seconds;
+ rw_args->atime.tv_nsec = rw_inner_args->atime.nseconds;
+ rw_args->mtime.tv_sec = rw_inner_args->mtime.seconds;
+ rw_args->mtime.tv_nsec = rw_inner_args->mtime.nseconds;
+ rw_args->ctime.tv_sec = rw_inner_args->ctime.seconds;
+ rw_args->ctime.tv_nsec = rw_inner_args->ctime.nseconds;
+ rw_args->used = rw_inner_args->used;
+ rw_args->file_id = rw_inner_args->file_id;
+
+ rw_args->valid |= DPC_RPC_ATTR_FATTR;
+}
+
+void dpc_rpc_io_stats(dpc_clnt_inner_args *inner_arg, void *path_mgmt)
+{
+ switch (inner_arg->req.common_req.hdr.op_code) {
+ case DPC_RPC_OP_READ:
+ dpc_clnt_read_count_stats(path_mgmt, inner_arg);
+ break;
+ case DPC_RPC_OP_WRITE:
+ dpc_clnt_write_count_stats(path_mgmt, inner_arg);
+ break;
+ default:
+ break;
+ }
+}
+
+int dpc_change_status_to_err(int status)
+{
+ int i;
+ if (status == 0) {
+ return status;
+ }
+ if (status < 0) {
+ status = -status;
+ }
+ if (status < DPCERR_NOTRANS) {
+ return status;
+ }
+ for (i = 0; i < (int)ARRAY_SIZE(g_dpc_status_err_map); ++i) {
+ if (g_dpc_status_err_map[i].status == status) {
+ return g_dpc_status_err_map[i].err;
+ }
+ }
+ return status;
+}
\ No newline at end of file
diff --git a/fs/nfs/enfs/unify_multipath/dpc_adapter/dpc_rpc_io_common.h b/fs/nfs/enfs/unify_multipath/dpc_adapter/dpc_rpc_io_common.h
new file mode 100644
index 0000000..ce6f4ef
--- /dev/null
+++ b/fs/nfs/enfs/unify_multipath/dpc_adapter/dpc_rpc_io_common.h
@@ -0,0 +1,269 @@
+/*
+ * SPDX-License-Identifier: GPL-2.0
+ * Copyright 2024, Huawei Inc
+ *
+ * DPC RPC IO COMMON
+ */
+#ifndef DPC_RPC_IO_COMMON_H
+#define DPC_RPC_IO_COMMON_H
+
+#include
+#include
+#include
+#include
+#include "mulp_log.h"
+#include "multipath_api.h"
+#include "dpc_kernel_version.h"
+#include "dpc_rpc_client_api.h"
+
+#define DPC_CLIENT_DEFAULT_VERSION 0
+#define DPC_READ_EOF_BIT_FLAG BIT(0)
+#ifndef DPC_FILE_ID_LEN
+#define DPC_FILE_ID_LEN 38
+#endif
+
+#ifndef DPC_RPC_OP_MAGIC
+#define DPC_RPC_OP_MAGIC 0x1122334455667788ULL
+#endif
+
+#ifndef DPC_RPC_IPSTRLEN
+#define DPC_RPC_IPSTRLEN 16
+#endif
+
+#ifndef DPC_USER_NAME_LEN
+#define DPC_USER_NAME_LEN (32) /* dpc用户名最长英文字符,不包括\0 */
+#endif
+
+#define DPC_ROOT_FSID 0XFFFFFFFFFFFFFFFD // 18446744073709551613
+
+/*
+ * 响应体XDR header长度,在请求返回时,pages内容追加在响应体头后面
+ * 需要在编码时填写响应头长,阵列侧宏前缀为DPC_MSG_
+ */
+#define DPC_RPC_MSG_READ_LEN (168)
+#define BYTES_TO_WORDS_SHIFT 2
+
+#define DPC_RPC_INNER_ARGS_MAGIC 0x494E4E4552415247 // INNERARG
+
+#ifndef DPC_CHECK_ERROR_CONDITION_RET
+#define DPC_CHECK_ERROR_CONDITION_RET(c, ret, fmt, args...) \
+ do { \
+ if (c) { \
+ MULP_LOG("DPC Client", KERN_ERR, #fmt, ##args); \
+ return (ret); \
+ } \
+ } while (0)
+#endif
+
+#ifndef DPC_CHECK_ERROR_CONDITION_VOID_RET
+#define DPC_CHECK_ERROR_CONDITION_VOID_RET(c, ret, fmt, args...) \
+ do { \
+ if (c) { \
+ MULP_LOG("DPC Client", KERN_ERR, #fmt, ##args); \
+ return; \
+ } \
+ } while (0)
+#endif
+
+#define DPC_CHECK_XDR_RET(c) \
+ DPC_CHECK_ERROR_CONDITION_RET((c) <= 0, -EAGAIN, "%s %d check xdr(" #c ") ret fail.", __FUNCTION__, __LINE__)
+#define DPC_CHECK_XDR_VOID_RET(c) \
+ DPC_CHECK_ERROR_CONDITION_VOID_RET((c) <= 0, -EAGAIN, "%s %d check xdr(" #c ") ret fail.", __FUNCTION__, __LINE__)
+#if (LINUX_4_18_0_193) || (LINUX_VERSION_CODE == KERNEL_VERSION(5, 10, 0))
+#define DPC_RPC_PAGEPAD_SZ 1 // 不同操作系统,sunrpc层面适配
+#else
+#define DPC_RPC_PAGEPAD_SZ 0 // 不同操作系统,sunrpc层面适配
+#endif
+
+#define UP_LINUX_5_15_0 (LINUX_VERSION_CODE >= KERNEL_VERSION(5, 15, 0)) // Ubuntu 22.04.1 LTS, 0x50F27
+
+typedef enum {
+ DPC_RPC_OP_NULL = 0x000,
+ DPC_RPC_OP_READ = 0x001,
+ DPC_RPC_OP_WRITE = 0x002,
+ DPC_RPC_OP_MAX,
+} rpc_op_enum;
+
+typedef enum {
+ DPC_OK = 0, // OK
+ DPCERR_PERM = 1, // Operation not permitted
+ DPCERR_NOENT = 2, // No such file or directory
+ DPCERR_IO = 5, // [Errno 5] Input/output error
+ DPCERR_NXIO = 6,
+ DPCERR_ACCES = 13, // [Errno 13] Permission denied
+ DPCERR_EXIST = 17, // File exists
+ DPCERR_XDEV = 18,
+ DPCERR_NODEV = 19, // Not a device file
+ DPCERR_NOTDIR = 20, // Not a directory
+ DPCERR_ISDIR = 21, // This is a directory
+ DPCERR_INVAL = 22,
+ DPCERR_FBIG = 27,
+ DPCERR_NOSPC = 28, // no space left on device
+ DPCERR_ROFS = 30, // read only filesystem
+ DPCERR_MLINK = 31,
+ DPCERR_NAMETOOLONG = 63,
+ DPCERR_NOTEMPTY = 66, // directory not empty
+ DPCERR_DQUOT = 69, // Full sharing
+ DPCERR_STALE = 70, // Stale file handle
+ DPCERR_REMOTE = 71,
+ DPCERR_NOTRANS = 1024, // index for not need to trans
+ DPCERR_BADHANDLE = 10001, // BAD HANDLE
+ DPCERR_NOT_SYNC = 10002,
+ DPCERR_BAD_COOKIE = 10003,
+ DPCERR_NOTSUPP = 10004,
+ DPCERR_TOOSMALL = 10005, // FILENAME TOOSMALL
+ DPCERR_SERVERFAULT = 10006,
+ DPCERR_BADTYPE = 10007,
+ DPCERR_JUKEBOX = 10008 // JUKEBOX
+} dpc_status;
+
+typedef struct {
+ dpc_status status;
+ int err;
+} dpc_status_err_map;
+
+typedef struct {
+ uint32_t specdata1;
+ uint32_t specdata2;
+} dpc_spec_data;
+
+typedef struct {
+ uint32_t seconds;
+ uint32_t nseconds;
+} dpc_time;
+
+typedef struct {
+ dpc_ftype type;
+ uint32_t mode;
+ uint32_t nlink;
+ uint32_t uid;
+ uint32_t gid;
+ uint64_t size;
+ uint64_t used;
+ dpc_spec_data rdev;
+ uint64_t fs_id;
+ uint64_t file_id;
+ dpc_time atime;
+ dpc_time mtime;
+ dpc_time ctime;
+} dpc_fattr;
+
+typedef struct {
+ uint32_t attributes_follow;
+ union {
+ dpc_fattr attributes;
+ } _dpc_post_op_attr;
+} __attribute__((aligned(8))) dpc_post_op_attr;
+
+#define DPC_RPC_PROC_READ_UNDEF BIT(0)
+#define DPC_RPC_PROC_READ_ENCODE BIT(1)
+#define DPC_RPC_PROC_READ_DECODE BIT(2)
+
+typedef struct dpc_rpc_head_req {
+ uint32_t op_code;
+ uint32_t file_flag; /* 选路是否是文件归属等属性, 后面可能有其他属性 */
+ uint32_t reverse[7];
+ uint32_t client_ls_id;
+ uint64_t instance_id;
+} dpc_rpc_head_req;
+
+typedef struct dpc_rpc_head_rsp {
+ uint32_t reverse1[2];
+ uint32_t op_status;
+ uint32_t reverse2[3];
+} dpc_rpc_head_rsp;
+
+typedef struct dpc_clnt_common_req {
+ dpc_rpc_head_req hdr;
+} dpc_clnt_common_req;
+
+typedef struct dpc_clnt_common_rsp {
+ dpc_rpc_head_rsp hdr;
+} dpc_clnt_common_rsp;
+
+typedef struct dpc_clnt_read_req {
+ dpc_rpc_head_req hdr;
+
+ uint32_t length;
+ uint64_t offset;
+ uint64_t reverse;
+ mulp_file_uuid uuid;
+ uint64_t share_id;
+ char *user_name;
+
+ uint64_t process_flag;
+ uint32_t page_offset;
+ struct page **pages;
+} dpc_clnt_read_req;
+
+typedef struct dpc_clnt_read_rsp {
+ dpc_rpc_head_rsp hdr;
+
+ uint32_t version;
+ uint64_t offset;
+ uint64_t length;
+ uint32_t flag;
+ uint64_t reverse;
+ dpc_post_op_attr post_attr; /* 存储返回的文件attr */
+ uint64_t delay;
+
+ uint64_t process_flag;
+ struct page **pages;
+} dpc_clnt_read_rsp;
+
+typedef struct dpc_clnt_write_req {
+ dpc_rpc_head_req hdr;
+
+ uint32_t length;
+ uint64_t offset;
+ uint64_t reverse;
+ mulp_file_uuid uuid;
+ uint32_t flag;
+ uint64_t share_id;
+ char *user_name;
+
+ uint32_t page_offset;
+ struct page **pages;
+} dpc_clnt_write_req;
+
+typedef struct dpc_clnt_write_rsp {
+ dpc_rpc_head_rsp hdr;
+
+ uint32_t version;
+ uint32_t remain;
+ dpc_post_op_attr prev_attr; /* 存储返回的文件attr */
+ dpc_post_op_attr post_attr; /* 存储返回的文件attr */
+ uint64_t delay;
+} dpc_clnt_write_rsp;
+
+typedef union {
+ dpc_clnt_common_req common_req;
+ dpc_clnt_read_req read_req;
+ dpc_clnt_write_req write_req;
+} dpc_clnt_rw_inner_req;
+
+typedef union {
+ dpc_clnt_common_rsp common_rsp;
+ dpc_clnt_read_rsp read_rsp;
+ dpc_clnt_write_rsp write_rsp;
+} dpc_clnt_rw_inner_resp;
+
+typedef struct {
+ uint64_t magic;
+ dpc_clnt_rw_inner_req req;
+ dpc_clnt_rw_inner_resp resp;
+} dpc_clnt_inner_args;
+
+uint32_t dpc_xdr_encode_rpc_args_head(struct xdr_stream *xdr, const dpc_rpc_head_req *rw_inner_data);
+void dpc_xdr_decode_rpc_resp_head(struct xdr_stream *xdr, dpc_rpc_head_rsp *rw_inner_data);
+void dpc_xdr_decode_rpc_resp_post_op_attr(struct xdr_stream *xdr, dpc_post_op_attr *post_op_attr);
+void dpc_prepare_rpc_reply_pages(struct rpc_rqst *req, struct page **pages, unsigned int base, unsigned int len,
+ unsigned int hdrsize);
+ssize_t xdr_stream_decode_u64(struct xdr_stream *xdr, uint64_t *ptr);
+void dpc_copy_from_inner_attr_to_rw_attr(dpc_clnt_file_attr *rw_args, dpc_fattr *rw_inner_args);
+void dpc_rpc_io_stats(dpc_clnt_inner_args* inner_arg, void *path_mgmt);
+void dpc_clnt_read_count_stats(void *path_mgmt, dpc_clnt_inner_args *rw_inner_args);
+void dpc_clnt_write_count_stats(void *path_mgmt, dpc_clnt_inner_args *rw_inner_args);
+int dpc_change_status_to_err(int status);
+
+#endif
diff --git a/fs/nfs/enfs/unify_multipath/dpc_adapter/dpc_rpc_mulp_proc.c b/fs/nfs/enfs/unify_multipath/dpc_adapter/dpc_rpc_mulp_proc.c
new file mode 100644
index 0000000..8dafda8
--- /dev/null
+++ b/fs/nfs/enfs/unify_multipath/dpc_adapter/dpc_rpc_mulp_proc.c
@@ -0,0 +1,269 @@
+/*
+ * Copyright (c) Huawei Technologies Co., Ltd. 2025-2025. All rights reserved.
+ * Description: dpc adapter print unify multi path info
+ * Author: f30066215
+ * Create: 2025-1-8
+ */
+#include "dpc_rpc_mulp_proc.h"
+#include "dpc_rpc_proc.h"
+#include "dpc_rpc_mulp_proc_shard_view.h"
+
+#define MULP_PROC_DIR "unify_client"
+#define MULP_PROC_LINK_DIR "link"
+#define MULP_PROC_LINK_DEBUG_DIR "link_debug"
+#define MULP_PROC_NAME_SIZE 64
+static struct proc_dir_entry *g_dpc_proc_parent;
+static struct proc_dir_entry *g_dpc_proc_link_parent;
+static struct proc_dir_entry *g_dpc_proc_link_debug_parent;
+static struct seq_file *mulp_proc_seq_file;
+
+static void dpc_proc_callback(const char *buffer)
+{
+ if (mulp_proc_seq_file) {
+ seq_write(mulp_proc_seq_file, buffer, strlen(buffer));
+ }
+}
+
+static int dpc_proc_show_inner(struct seq_file *m, void *v, BOOLEAN_T debug)
+{
+ uint64_t mp_id = 0;
+ int ret = 0;
+#ifdef __cplusplus
+extern "C" {
+#endif /* __cplusplus */
+
+ mp_id = (uintptr_t)m->private;
+
+#ifdef __cplusplus
+}
+#endif /* __cplusplus */
+
+ mulp_proc_seq_file = m;
+
+ ret = mulp_dump_path_info(dpc_proc_callback, mp_id, debug);
+ if (ret) {
+ mulp_proc_seq_file = NULL;
+ return ret;
+ }
+
+ mulp_proc_seq_file = NULL;
+
+ return 0;
+}
+
+static int dpc_proc_show(struct seq_file *m, void *v)
+{
+ return dpc_proc_show_inner(m, v, B_FALSE);
+}
+
+static int dpc_proc_debug_show(struct seq_file *m, void *v)
+{
+ return dpc_proc_show_inner(m, v, B_TRUE);
+}
+
+static int dpc_proc_open(struct inode *inode, struct file *file)
+{
+ return single_open(file, dpc_proc_show, PDE_DATA(inode));
+}
+
+static int dpc_proc_debug_open(struct inode *inode, struct file *file)
+{
+ return single_open(file, dpc_proc_debug_show, PDE_DATA(inode));
+}
+
+static int dpc_proc_clean_rw_info_show(struct seq_file *m, void *v)
+{
+ return 0;
+}
+
+static int dpc_proc_clean_rw_info_open(struct inode *inode, struct file *file)
+{
+ return single_open(file, dpc_proc_clean_rw_info_show, NULL);
+}
+
+static ssize_t dpc_proc_clean_rw_info_write(struct file *file, const char __user *buf, size_t count, loff_t *pos)
+{
+ char kbuf[256] = {0};
+ size_t len = mulp_min(count, sizeof(kbuf) - 1);
+
+ if (copy_from_user(kbuf, buf, len)) {
+ return -EFAULT;
+ }
+
+ kbuf[len] = '\0';
+
+ mulp_path_clean_mp_rw_info(kbuf);
+
+ return len;
+}
+
+#define MAX_MEM_STATIS_BUF_LEN (128)
+static int dpc_proc_mem_statis_show(struct seq_file *m, void *v)
+{
+ char buf[MAX_MEM_STATIS_BUF_LEN] = {0};
+
+ int len = dpc_clnt_mem_statis_dump(buf, MAX_MEM_STATIS_BUF_LEN);
+ if (len <= 0) {
+ MULP_LOG(MULP_MODULE_API, MULP_ERR, "Mem statis dump error %d.", len);
+ return len;
+ }
+ seq_write(m, buf, strlen(buf));
+ return 0;
+}
+
+static int dpc_proc_mem_statis_open(struct inode *inode, struct file *file)
+{
+ return single_open(file, dpc_proc_mem_statis_show, PDE_DATA(inode));
+}
+
+#if !(UP_LINUX_5_10)
+struct file_operations g_dpc_proc_fops = {
+ .open = dpc_proc_open,
+ .read = seq_read,
+ .llseek = seq_lseek,
+ .release = single_release,
+};
+
+struct file_operations g_dpc_proc_debug_fops = {
+ .open = dpc_proc_debug_open,
+ .read = seq_read,
+ .llseek = seq_lseek,
+ .release = single_release,
+};
+
+struct file_operations g_dpc_proc_clean_rw_info = {
+ .open = dpc_proc_clean_rw_info_open,
+ .write = dpc_proc_clean_rw_info_write,
+ .llseek = seq_lseek,
+ .release = single_release,
+};
+
+struct file_operations g_dpc_proc_mem_statis = {
+ .open = dpc_proc_mem_statis_open,
+ .read = seq_read,
+ .llseek = seq_lseek,
+ .release = single_release,
+};
+#else
+struct proc_ops g_dpc_proc_fops = {
+ .proc_open = dpc_proc_open,
+ .proc_read = seq_read,
+ .proc_lseek = seq_lseek,
+ .proc_release = single_release,
+};
+
+struct proc_ops g_dpc_proc_debug_fops = {
+ .proc_open = dpc_proc_debug_open,
+ .proc_read = seq_read,
+ .proc_lseek = seq_lseek,
+ .proc_release = single_release,
+};
+
+struct proc_ops g_dpc_proc_clean_rw_info = {
+ .proc_open = dpc_proc_clean_rw_info_open,
+ .proc_write = dpc_proc_clean_rw_info_write,
+ .proc_lseek = seq_lseek,
+ .proc_release = single_release,
+};
+
+struct proc_ops g_dpc_proc_mem_statis = {
+ .proc_open = dpc_proc_mem_statis_open,
+ .proc_read = seq_read,
+ .proc_lseek = seq_lseek,
+ .proc_release = single_release,
+};
+#endif
+
+int dpc_proc_create_mp_file(uint64_t client_id, uint64_t mp_id)
+{
+ struct proc_dir_entry *proc_file;
+ char name[MULP_PROC_NAME_SIZE] = {0};
+ snprintf(name, sizeof(name), "mulp_%d_%llu_%llu", MULP_APP_DPC, client_id, mp_id);
+ proc_file = proc_create_data(name, 0644, g_dpc_proc_link_parent, &g_dpc_proc_fops, (void *)mp_id);
+ if (proc_file == NULL) {
+ MULP_LOG(MULP_MODULE_API, MULP_ERR,
+ "dpc create link info proc file err, client_id:%llu, mp_id:%llu.", client_id, mp_id);
+ return -ENOMEM;
+ }
+ proc_file = proc_create_data(name, 0644, g_dpc_proc_link_debug_parent, &g_dpc_proc_debug_fops, (void *)mp_id);
+ if (proc_file == NULL) {
+ remove_proc_entry(name, g_dpc_proc_link_parent);
+ MULP_LOG(MULP_MODULE_API, MULP_ERR,
+ "dpc create link debug info proc file err, client_id:%llu, mp_id:%llu.", client_id, mp_id);
+ return -ENOMEM;
+ }
+ return 0;
+}
+
+void dpc_proc_destroy_mp_file(uint64_t client_id, uint64_t mp_id)
+{
+ char name[MULP_PROC_NAME_SIZE] = {0};
+ snprintf(name, sizeof(name), "mulp_%d_%llu_%llu", MULP_APP_DPC, client_id, mp_id);
+ remove_proc_entry(name, g_dpc_proc_link_parent);
+ remove_proc_entry(name, g_dpc_proc_link_debug_parent);
+}
+
+int dpc_proc_create_clean_rw_info_file(void)
+{
+ struct proc_dir_entry *proc_file;
+ char *name = "clean_rw_info";
+ proc_file = proc_create_data(name, 0644, g_dpc_proc_parent, &g_dpc_proc_clean_rw_info, NULL);
+ if (proc_file == NULL) {
+ MULP_LOG(MULP_MODULE_API, MULP_ERR, "Unify_multipath create clean rw info proc file err.");
+ return -ENOMEM;
+ }
+ return 0;
+}
+
+int dpc_proc_create_mem_statis(void)
+{
+ struct proc_dir_entry *proc_file;
+ char *name = "mem_statis";
+ proc_file = proc_create_data(name, 0644, g_dpc_proc_parent, &g_dpc_proc_mem_statis, NULL);
+ if (proc_file == NULL) {
+ MULP_LOG(MULP_MODULE_API, MULP_ERR, "Unify_multipath create mem statis proc file err.");
+ return -ENOMEM;
+ }
+ return 0;
+}
+
+int dpc_rpc_mulp_proc_init(void)
+{
+ int ret = 0;
+ g_dpc_proc_parent = mulp_proc_create_dir(MULP_PROC_DIR, NULL);
+ if (g_dpc_proc_parent == NULL) {
+ return -ENOMEM;
+ }
+ g_dpc_proc_link_parent = mulp_proc_create_dir(MULP_PROC_LINK_DIR, g_dpc_proc_parent);
+ if (g_dpc_proc_link_parent == NULL) {
+ mulp_proc_delete_dir(MULP_PROC_DIR, NULL);
+ return -ENOMEM;
+ }
+ g_dpc_proc_link_debug_parent = mulp_proc_create_dir(MULP_PROC_LINK_DEBUG_DIR, g_dpc_proc_parent);
+ if (g_dpc_proc_link_debug_parent == NULL) {
+ mulp_proc_delete_dir(MULP_PROC_LINK_DIR, g_dpc_proc_parent);
+ mulp_proc_delete_dir(MULP_PROC_DIR, NULL);
+ return -ENOMEM;
+ }
+ ret = dpc_rpc_mulp_proc_shard_view_set_parent((void *)g_dpc_proc_parent);
+ if (ret != 0) {
+ return ret;
+ }
+
+ ret = dpc_proc_create_clean_rw_info_file();
+ if (ret != 0) {
+ return ret;
+ }
+#ifdef DPC_RPC_DEBUG
+ ret = dpc_proc_create_mem_statis();
+#endif
+ return ret;
+}
+
+void dpc_rpc_mulp_proc_exit(void)
+{
+ dpc_rpc_mulp_proc_shard_view_exit();
+ mulp_proc_delete_dir(MULP_PROC_LINK_DEBUG_DIR, g_dpc_proc_parent);
+ mulp_proc_delete_dir(MULP_PROC_LINK_DIR, g_dpc_proc_parent);
+ mulp_proc_delete_dir(MULP_PROC_DIR, NULL);
+}
\ No newline at end of file
diff --git a/fs/nfs/enfs/unify_multipath/dpc_adapter/dpc_rpc_mulp_proc.h b/fs/nfs/enfs/unify_multipath/dpc_adapter/dpc_rpc_mulp_proc.h
new file mode 100644
index 0000000..ae2558d
--- /dev/null
+++ b/fs/nfs/enfs/unify_multipath/dpc_adapter/dpc_rpc_mulp_proc.h
@@ -0,0 +1,21 @@
+/*
+ * Copyright (c) Huawei Technologies Co., Ltd. 2025-2025. All rights reserved.
+ * Description: dpc adapter print unify multi path info
+ * Author: f30066215
+ * Create: 2025-1-8
+ */
+#ifndef _DPC_RPC_MULP_PROC_H_
+#define _DPC_RPC_MULP_PROC_H_
+
+#include
+#include
+#include "multipath_api.h"
+#include "mulp_porting.h"
+#include "dpc_kernel_version.h"
+
+int dpc_rpc_mulp_proc_init(void);
+void dpc_rpc_mulp_proc_exit(void);
+int dpc_proc_create_mp_file(uint64_t client_id, uint64_t mp_id);
+void dpc_proc_destroy_mp_file(uint64_t client_id, uint64_t mp_id);
+
+#endif
\ No newline at end of file
diff --git a/fs/nfs/enfs/unify_multipath/dpc_adapter/dpc_rpc_mulp_proc_shard_view.c b/fs/nfs/enfs/unify_multipath/dpc_adapter/dpc_rpc_mulp_proc_shard_view.c
new file mode 100644
index 0000000..2ec93b1
--- /dev/null
+++ b/fs/nfs/enfs/unify_multipath/dpc_adapter/dpc_rpc_mulp_proc_shard_view.c
@@ -0,0 +1,320 @@
+/*
+ * Copyright (c) Huawei Technologies Co., Ltd. 2025-2025. All rights reserved.
+ * Description: dpc rpc print shard view info
+ * Author: w30059085
+ * Create: 2025-1-22
+ */
+#include
+#include
+#include "dpc_rpc_mulp_proc_shard_view.h"
+
+#define MULP_PROC_NAME_SIZE 32
+#define MULP_PROC_CLUSTER_HASH 16
+#define MULP_PROC_LINE_SIZE 1024
+#define MULP_PROC_PRINT_PER_LINE 64
+
+typedef enum {
+ DPC_MULTIPATH_DISPLAY_LSID,
+ DPC_MULTIPATH_DISPLAY_CPUID,
+} shard_view_display_type;
+
+typedef struct {
+ shard_view_display_type type;
+ uint64_t wwn;
+ uint64_t cluster_id;
+ uint64_t pool_id;
+ struct proc_dir_entry *file;
+} shard_view_context;
+
+typedef struct {
+ mulp_list_head_type list;
+ uint64_t pool_id;
+ struct proc_dir_entry *dir;
+ shard_view_context *lsid;
+ shard_view_context *cpuid;
+} dpc_multipath_pool_info;
+
+typedef struct {
+ mulp_list_head_type list;
+ mulp_list_head_type pool_list;
+ uint64_t cluster_id;
+ struct proc_dir_entry *dir;
+} dpc_multipath_cluster_hash;
+
+typedef struct wwn_list {
+ mulp_list_head_type list;
+ uint64_t wwn;
+ struct proc_dir_entry *dir;
+ dpc_multipath_cluster_hash table[MULP_PROC_CLUSTER_HASH];
+} dpc_multipath_wwn_info;
+
+static dpc_multipath_wwn_info g_wwn_list = {0};
+static struct proc_dir_entry *shard_view_parent;
+static mulp_rwlock_type g_wwn_list_lock;
+
+static void mulp_proc_display_shard_view(struct seq_file *file, shard_view_display_type type,
+ mulp_shard_view *view_ptr, int count,
+ char *line, int line_size)
+{
+ uint32_t i, j;
+ char *pos = line;
+ for (i = 0; i < count; i += MULP_PROC_PRINT_PER_LINE) {
+ for (j = 0; j < MULP_PROC_PRINT_PER_LINE && i + j < count; ++j) {
+ if (type == DPC_MULTIPATH_DISPLAY_LSID) {
+ pos += snprintf(pos, line_size - (pos - line), "%x\n", view_ptr[i + j].lsid);
+ } else if (type == DPC_MULTIPATH_DISPLAY_CPUID) {
+ pos += snprintf(pos, line_size - (pos - line), "%u\n", view_ptr[i + j].cpu_id);
+ }
+ }
+ seq_write(file, line, strlen(line));
+ memset(line, 0, line_size);
+ pos = line;
+ }
+}
+
+int mulp_proc_display(struct seq_file *file, void *p)
+{
+ char line[MULP_PROC_LINE_SIZE] = {0};
+ shard_view_context *ctx = (shard_view_context *)file->private;
+ mulp_shard_view *array;
+ int ret = mulp_get_shard_view(ctx->wwn, ctx->cluster_id, ctx->pool_id, NULL);
+ if (ret < 0) {
+ return -EFAULT;
+ }
+
+ array = (mulp_shard_view *)mulp_mem_zalloc(sizeof(mulp_shard_view) * ret);
+ if (!array) {
+ MULP_LOG(MULP_MODULE_API, MULP_ERR, "dpc alloc mem for mulp shard view failed.");
+ return -ENOMEM;
+ }
+ ret = mulp_get_shard_view(ctx->wwn, ctx->cluster_id, ctx->pool_id, array);
+ if (ret < 0) {
+ mulp_mem_free(array);
+ return -EFAULT;
+ }
+
+ mulp_proc_display_shard_view(file, ctx->type, array, ret, line, MULP_PROC_LINE_SIZE);
+ mulp_mem_free(array);
+ return 0;
+}
+
+static int mulp_proc_open(struct inode *data, struct file *fp)
+{
+ return single_open(fp, mulp_proc_display, PDE_DATA(data));
+}
+
+#if !(UP_LINUX_5_10)
+struct file_operations mulp_proc_shard_view_fops = {
+ .open = mulp_proc_open,
+ .read = seq_read,
+ .llseek = seq_lseek,
+ .release = single_release,
+};
+#else
+struct proc_ops mulp_proc_shard_view_fops = {
+ .proc_open = mulp_proc_open,
+ .proc_read = seq_read,
+ .proc_lseek = seq_lseek,
+ .proc_release = single_release,
+};
+#endif
+
+static inline struct proc_dir_entry *mulp_proc_shard_view_mkdir(uint64_t id, BOOLEAN_T is_wwn,
+ struct proc_dir_entry *parent)
+{
+ char name[MULP_PROC_NAME_SIZE] = {0};
+ if (is_wwn) {
+ snprintf(name, sizeof(name), "0x%llx", id);
+ } else {
+ snprintf(name, sizeof(name), "%llu", id);
+ }
+ return proc_mkdir(name, parent);
+}
+
+static dpc_multipath_wwn_info *get_or_create_wwn_info(uint64_t wwn)
+{
+ int i;
+ dpc_multipath_wwn_info *node = NULL;
+ dpc_multipath_wwn_info *next = NULL;
+
+ mulp_list_for_each_entry_safe(node, next, &g_wwn_list.list, list) {
+ if (node->wwn == wwn) {
+ return node;
+ }
+ }
+
+ node = (dpc_multipath_wwn_info *)mulp_mem_zalloc(sizeof(dpc_multipath_wwn_info));
+ if (!node) {
+ MULP_LOG(MULP_MODULE_API, MULP_ERR, "dpc alloc mem for mulp proc wwn info failed.");
+ return NULL;
+ }
+ mulp_init_list_head(&node->list);
+ mulp_list_add_tail(&node->list, &g_wwn_list.list);
+ node->dir = mulp_proc_shard_view_mkdir(wwn, B_TRUE, g_wwn_list.dir);
+ node->wwn = wwn;
+ for (i = 0; i < MULP_PROC_CLUSTER_HASH; i++) {
+ mulp_init_list_head(&node->table[i].list);
+ }
+ return node;
+}
+
+static dpc_multipath_cluster_hash *get_or_create_cluster_hash(uint64_t wwn, uint64_t cluster_id)
+{
+ dpc_multipath_wwn_info *wwn_info = NULL;
+ dpc_multipath_cluster_hash *node = NULL;
+ dpc_multipath_cluster_hash *next = NULL;
+
+ wwn_info = get_or_create_wwn_info(wwn);
+ if (wwn_info == NULL) {
+ return NULL;
+ }
+ mulp_list_for_each_entry_safe(node, next, &wwn_info->table[cluster_id % MULP_PROC_CLUSTER_HASH].list, list) {
+ if (node->cluster_id == cluster_id) {
+ return node;
+ }
+ }
+
+ node = (dpc_multipath_cluster_hash *)mulp_mem_zalloc(sizeof(dpc_multipath_cluster_hash));
+ if (!node) {
+ MULP_LOG(MULP_MODULE_API, MULP_ERR, "dpc alloc mem for mulp proc cluster hash failed.");
+ return NULL;
+ }
+ mulp_init_list_head(&node->list);
+ mulp_init_list_head(&node->pool_list);
+ mulp_list_add_tail(&node->list, &wwn_info->table[cluster_id % MULP_PROC_CLUSTER_HASH].list);
+ node->cluster_id = cluster_id;
+ node->dir = mulp_proc_shard_view_mkdir(cluster_id, B_FALSE, wwn_info->dir);
+ return node;
+}
+
+static dpc_multipath_pool_info *get_pool_info(dpc_multipath_cluster_hash *cluster, uint64_t pool_id)
+{
+ mulp_list_head_type *pos = NULL;
+ dpc_multipath_pool_info *pool = NULL;
+ list_for_each (pos, &cluster->pool_list) {
+ pool = list_entry(pos, dpc_multipath_pool_info, list);
+ if (pool->pool_id == pool_id) {
+ return pool;
+ }
+ }
+ return NULL;
+}
+
+int dpc_rpc_mulp_proc_shard_view_set_parent(void *dir)
+{
+ struct proc_dir_entry *entry = mulp_proc_create_dir(MULP_PROC_SHARD_VIEW_DIR, (struct proc_dir_entry *)dir);
+ if (entry == NULL) {
+ MULP_LOG(MULP_MODULE_API, MULP_ERR, "Unify_multipath create shard view proc dir err.");
+ return -ENOMEM;
+ }
+ mulp_rwlock_init(&g_wwn_list_lock);
+ mulp_init_list_head(&g_wwn_list.list);
+ g_wwn_list.dir = entry;
+ shard_view_parent = (struct proc_dir_entry *)dir;
+ return 0;
+}
+
+void dpc_rpc_mulp_proc_shard_view_exit(void)
+{
+ mulp_proc_try_destroy_shard_view_file();
+ mulp_proc_delete_dir(MULP_PROC_SHARD_VIEW_DIR, shard_view_parent);
+}
+
+static void mulp_proc_free_cluster_hash(dpc_multipath_cluster_hash *cluster)
+{
+ mulp_list_head_type *pos = NULL;
+ mulp_list_head_type *next = NULL;
+ dpc_multipath_pool_info *pool = NULL;
+ for (pos = cluster->pool_list.next, next = pos->next; pos != &cluster->pool_list; pos = next, next = next->next) {
+ pool = list_entry(pos, dpc_multipath_pool_info, list);
+ mulp_list_del(&pool->list);
+ mulp_mem_free(pool->lsid);
+ mulp_mem_free(pool->cpuid);
+ mulp_mem_free(pool);
+ }
+}
+
+static void mulp_proc_free_wwn_info(dpc_multipath_wwn_info *wwn_info)
+{
+ uint32_t i;
+ dpc_multipath_cluster_hash *node = NULL;
+ dpc_multipath_cluster_hash *next = NULL;
+ for (i = 0; i < MULP_PROC_CLUSTER_HASH; i++) {
+ mulp_list_for_each_entry_safe(node, next, &wwn_info->table[i].list, list) {
+ mulp_list_del(&node->list);
+ mulp_proc_free_cluster_hash(node);
+ mulp_mem_free(node);
+ }
+ }
+}
+
+void mulp_proc_try_destroy_shard_view_file(void)
+{
+ dpc_multipath_wwn_info *node = NULL;
+ dpc_multipath_wwn_info *next = NULL;
+ char name[MULP_PROC_NAME_SIZE] = {0};
+ mulp_write_lock(&g_wwn_list_lock);
+ mulp_list_for_each_entry_safe(node, next, &g_wwn_list.list, list) {
+ mulp_list_del(&node->list);
+ mulp_proc_free_wwn_info(node);
+ snprintf(name, sizeof(name), "0x%llx", node->wwn);
+ remove_proc_subtree(name, g_wwn_list.dir);
+ mulp_mem_free(node);
+ }
+ mulp_init_list_head(&g_wwn_list.list);
+ mulp_write_unlock(&g_wwn_list_lock);
+}
+
+shard_view_context *mulp_proc_create_shard_view_context(uint64_t wwn, uint64_t cluster_id, uint64_t pool_id,
+ shard_view_display_type type, struct proc_dir_entry *dir)
+{
+ shard_view_context *ctx = (shard_view_context *)mulp_mem_zalloc(sizeof(shard_view_context));
+ ctx->wwn = wwn;
+ ctx->cluster_id = cluster_id;
+ ctx->pool_id = pool_id;
+ ctx->type = type;
+ switch (type) {
+ case DPC_MULTIPATH_DISPLAY_LSID:
+ ctx->file = proc_create_data("lsid", 0644, dir, &mulp_proc_shard_view_fops, (void *)ctx);
+ break;
+ case DPC_MULTIPATH_DISPLAY_CPUID:
+ ctx->file = proc_create_data("cpuid", 0644, dir, &mulp_proc_shard_view_fops, (void *)ctx);
+ break;
+ }
+
+ return ctx;
+}
+
+int mulp_proc_try_create_shard_view_file(uint64_t wwn, uint64_t cluster_id, uint64_t pool_id)
+{
+ dpc_multipath_pool_info *pool = NULL;
+ dpc_multipath_cluster_hash *cluster = NULL;
+
+ mulp_write_lock(&g_wwn_list_lock);
+ cluster = get_or_create_cluster_hash(wwn, cluster_id);
+ if (cluster == NULL) {
+ MULP_LOG(MULP_MODULE_API, MULP_ERR, "Unify_multipath create shard view proc file err.");
+ mulp_write_unlock(&g_wwn_list_lock);
+ return -ENOMEM;
+ }
+ pool = get_pool_info(cluster, pool_id);
+ if (pool) {
+ mulp_write_unlock(&g_wwn_list_lock);
+ return 0;
+ }
+
+ pool = (dpc_multipath_pool_info *)mulp_mem_zalloc(sizeof(dpc_multipath_pool_info));
+ if (pool == NULL) {
+ MULP_LOG(MULP_MODULE_API, MULP_ERR, "dpc alloc mem for mulp proc pool info failed.");
+ mulp_write_unlock(&g_wwn_list_lock);
+ return -ENOMEM;
+ }
+ mulp_init_list_head(&pool->list);
+ mulp_list_add_tail(&pool->list, &cluster->pool_list);
+ pool->pool_id = pool_id;
+ pool->dir = mulp_proc_shard_view_mkdir(pool_id, B_FALSE, cluster->dir);
+ pool->lsid = mulp_proc_create_shard_view_context(wwn, cluster_id, pool_id, DPC_MULTIPATH_DISPLAY_LSID, pool->dir);
+ pool->cpuid = mulp_proc_create_shard_view_context(wwn, cluster_id, pool_id, DPC_MULTIPATH_DISPLAY_CPUID, pool->dir);
+ mulp_write_unlock(&g_wwn_list_lock);
+ return 0;
+}
diff --git a/fs/nfs/enfs/unify_multipath/dpc_adapter/dpc_rpc_mulp_proc_shard_view.h b/fs/nfs/enfs/unify_multipath/dpc_adapter/dpc_rpc_mulp_proc_shard_view.h
new file mode 100644
index 0000000..fe54e92
--- /dev/null
+++ b/fs/nfs/enfs/unify_multipath/dpc_adapter/dpc_rpc_mulp_proc_shard_view.h
@@ -0,0 +1,21 @@
+/*
+ * Copyright (c) Huawei Technologies Co., Ltd. 2025-2025. All rights reserved.
+ * Description: dpc adapter print unify multi path info
+ * Author: f30066215
+ * Create: 2025-1-8
+ */
+#ifndef _DPC_RPC_MULP_PROC_SHARD_VIEW_H_
+#define _DPC_RPC_MULP_PROC_SHARD_VIEW_H_
+
+#include "multipath_api.h"
+#include "mulp_porting.h"
+#include "dpc_kernel_version.h"
+
+#define MULP_PROC_SHARD_VIEW_DIR "shard_view"
+
+void dpc_rpc_mulp_proc_shard_view_exit(void);
+int mulp_proc_try_create_shard_view_file(uint64_t wwn, uint64_t cluster_id, uint64_t pool_id);
+void mulp_proc_try_destroy_shard_view_file(void);
+int dpc_rpc_mulp_proc_shard_view_set_parent(void *dir);
+
+#endif
diff --git a/fs/nfs/enfs/unify_multipath/dpc_adapter/dpc_rpc_proc.c b/fs/nfs/enfs/unify_multipath/dpc_adapter/dpc_rpc_proc.c
new file mode 100644
index 0000000..2e0df74
--- /dev/null
+++ b/fs/nfs/enfs/unify_multipath/dpc_adapter/dpc_rpc_proc.c
@@ -0,0 +1,334 @@
+/*
+ * SPDX-License-Identifier: GPL-2.0
+ * Copyright 2024, Huawei Inc
+ *
+ * DPC proc io
+ */
+#include "dpc_rpc_proc.h"
+#include
+#include
+#include
+#include "multipath_api.h"
+#include "dpc_rpc_xdr.h"
+#include "mulp_porting.h"
+#include "mulp_log.h"
+#include "dpc_rpc_client.h"
+#include "dpc_rpc_io_common.h"
+#include "dpc_rpc_client_read.h"
+#include "dpc_rpc_client_write.h"
+#include "mulp_path_mgmt.h"
+
+#define DPC_PROC_MAGIC 0x4450435F50524F43 // DPC_PROC
+
+typedef struct {
+ uint64_t magic;
+ void *odc_ctx;
+ dpc_clnt_inner_args *inner_args;
+ struct rpc_task *task;
+ void *path_mgmt;
+ ktime_t start_time;
+} dpc_rpc_proc_ctx;
+
+typedef struct {
+ struct rpc_call_ops *callback_ops;
+ uint32_t opcode;
+ unsigned short flags;
+ struct rpc_task *task;
+ dpc_clnt_rw_args *args;
+ dpc_rpc_proc_ctx *ctx;
+} dpc_rpc_proc_args;
+
+typedef struct {
+ uint32_t p_proc;
+ void (*trans_encode)(dpc_clnt_rw_args *rw_args, dpc_clnt_inner_args *inner_arg, dpc_multipath_info *path_info);
+ void (*trans_decode)(dpc_clnt_rw_args *rw_args, dpc_clnt_inner_args *inner_arg);
+} dpc_rpc_trans_args_proc;
+
+const dpc_rpc_trans_args_proc dpc_trans_procs[] = {
+ [DPC_RPC_OP_READ] =
+ {DPC_RPC_OP_READ, dpc_clnt_trans_encode_read_args, dpc_clnt_trans_decode_read_args},
+ [DPC_RPC_OP_WRITE] =
+ {DPC_RPC_OP_WRITE, dpc_clnt_trans_encode_write_args, dpc_clnt_trans_decode_write_args},
+};
+
+dpc_rpc_proc_ctx *dpc_clnt_get_ctx(void *odc_ctx);
+int dpc_clnt_do_proc_exec_retry(dpc_rpc_proc_ctx *ctx);
+
+int dpc_rpc_call_retry(dpc_rpc_proc_ctx *ctx, dpc_clnt_rw_args *rw_args)
+{
+ mulp_io_put_path(rw_args->mp_id, ctx->path_mgmt);
+ ctx->path_mgmt = NULL;
+ return dpc_clnt_do_proc_exec_retry(ctx);
+}
+
+void dpc_rpc_call_done_io_statis(dpc_clnt_inner_args *inner_args, struct rpc_task *task, dpc_rpc_proc_ctx *ctx)
+{
+ if (task->tk_rpc_status < 0) {
+ inner_args->resp.common_rsp.hdr.op_status = task->tk_rpc_status;
+ } else if (task->tk_status < 0) {
+ inner_args->resp.common_rsp.hdr.op_status = task->tk_status;
+ }
+ dpc_rpc_io_stats(inner_args, ctx->path_mgmt);
+}
+
+void dpc_rpc_call_done(struct rpc_task *task, void *data)
+{
+ dpc_rpc_proc_ctx *ctx = NULL;
+ dpc_clnt_rw_args *rw_args = NULL;
+ dpc_clnt_inner_args *inner_args = NULL;
+ if (data == NULL) {
+ return;
+ }
+ LVOS_TP_START(TP_SUNRPC_MSG_TIMEOUT, task);
+ LVOS_TP_END;
+ ctx = (dpc_rpc_proc_ctx *)data;
+ rw_args = (dpc_clnt_rw_args *)ctx->odc_ctx;
+ inner_args = ctx->inner_args;
+ dpc_rpc_call_done_io_statis(inner_args, task, ctx);
+
+ if (task->tk_rpc_status < 0) {
+ MULP_PATH_INFO_LOG(DPC_CLIENT, MULP_WARNING, ctx->path_mgmt,
+ "sunrpc fail opcode{%d} tk_rpc_status{%d} tk_status(%d) time consume {%llu}ms",
+ inner_args->req.common_req.hdr.op_code,
+ task->tk_rpc_status, task->tk_status, ktime_to_ms(ktime_get() - ctx->start_time));
+ mulp_path_notify_io_result(ctx->path_mgmt, task->tk_rpc_status, ktime_to_ms(ctx->start_time));
+ } else {
+ mulp_path_notify_io_result(ctx->path_mgmt, 0, ktime_to_ms(ctx->start_time));
+ }
+ mulp_io_put_path(rw_args->mp_id, ctx->path_mgmt);
+ ctx->path_mgmt = NULL;
+ if (task->tk_rpc_status < 0) {
+ rw_args->op_status = task->tk_rpc_status;
+ } else if (task->tk_status < 0) {
+ rw_args->op_status = task->tk_status;
+ } else if (rw_args) {
+ uint32_t op_code = inner_args->req.common_req.hdr.op_code;
+ dpc_trans_procs[op_code].trans_decode(rw_args, inner_args);
+ }
+
+ if (rw_args->statis_callback) {
+ rw_args->statis_callback(rw_args, task);
+ }
+}
+
+void dpc_rpc_release(void *data)
+{
+ dpc_rpc_proc_ctx *ctx = NULL;
+ dpc_clnt_rw_args *rw_args = NULL;
+ if (data == NULL) {
+ return;
+ }
+ ctx = (dpc_rpc_proc_ctx *)data;
+ rw_args = (dpc_clnt_rw_args *)ctx->odc_ctx;
+ if (rw_args->callback) {
+ rw_args->callback(rw_args);
+ }
+}
+
+static const struct rpc_call_ops dpc_rpc_call_ops = {
+ .rpc_call_done = dpc_rpc_call_done,
+ .rpc_release = dpc_rpc_release,
+};
+
+static const struct rpc_call_ops dpc_rpc_retry_call_ops = {
+ .rpc_call_done = dpc_rpc_call_done,
+};
+
+int dpc_run_task(struct rpc_task_setup *task_setup_data)
+{
+ struct rpc_task *task = rpc_run_task(task_setup_data);
+ if (IS_ERR(task)) {
+ return PTR_ERR(task);
+ }
+ rpc_put_task(task);
+ return 0;
+}
+
+#define DPC_RPC_GET_PATH_SLEEP_TIME_MS (20)
+int dpc_clnt_get_rpc_client(struct rpc_clnt **rpc_client, dpc_rpc_proc_ctx *proc_data)
+{
+ int ret = 0;
+ mulp_file_info file_info = { 0 };
+ dpc_clnt_rw_args *args = (dpc_clnt_rw_args *)proc_data->odc_ctx;
+ uint32_t is_direct_ctrl = 0;
+
+ file_info.uuid = &args->uuid;
+ file_info.cluster_id = args->cluster_id;
+ file_info.pool_id = args->pool_id;
+ do {
+ LVOS_TP_START(TP_GET_OPT_OPTIMAL_PATH_FAIL, &ret);
+ ret = mulp_io_get_optimal_path(args->mp_id, &file_info, 0, &proc_data->path_mgmt, (void **)rpc_client,
+ &is_direct_ctrl);
+ LVOS_TP_END;
+ if (ret) {
+ MULP_LOG(DPC_CLIENT, MULP_ERR, "Dpc(mpid:%llu) get path fail %d.", args->mp_id, ret);
+ if (ktime_get() - proc_data->start_time > ms_to_ktime((uint64_t)args->timeout_ms)) {
+ MULP_LOG(DPC_CLIENT, MULP_ERR, "Dpc(mpid:%llu) get path over time %u.", args->mp_id, args->timeout_ms);
+ break;
+ }
+ msleep_interruptible(DPC_RPC_GET_PATH_SLEEP_TIME_MS);
+ } else {
+ proc_data->inner_args->req.common_req.hdr.file_flag |= is_direct_ctrl;
+ }
+ } while (ret);
+ return ret;
+}
+
+int dpc_clnt_proc_exec_imp(dpc_rpc_proc_args *proc_args)
+{
+ int ret = 0;
+ struct rpc_message msg = {0};
+ struct rpc_task_setup task_setup_data = {0};
+ mulp_file_info file_info = {0};
+ dpc_rpc_proc_ctx *ctx = proc_args->ctx;
+ dpc_clnt_rw_args *args = proc_args->args;
+ msg.rpc_cred = args->cred;
+ msg.rpc_proc = dpc_get_procinfo(proc_args->opcode);
+ msg.rpc_argp = (void *)&(ctx->inner_args->req);
+ msg.rpc_resp = (void *)&(ctx->inner_args->resp);
+
+ task_setup_data.rpc_message = &msg;
+ task_setup_data.callback_ops = proc_args->callback_ops;
+ task_setup_data.callback_data = ctx;
+ task_setup_data.workqueue = args->workqueue;
+ task_setup_data.flags = proc_args->flags;
+ task_setup_data.task = proc_args->task;
+
+ file_info.uuid = &args->uuid;
+ file_info.cluster_id = args->cluster_id;
+ file_info.pool_id = args->pool_id;
+
+ ret = dpc_clnt_get_rpc_client(&task_setup_data.rpc_client, ctx);
+ if (ret) {
+ MULP_LOG(DPC_CLIENT, MULP_ERR, "Dpc(mpid:%llu) get path info fail %d.", args->mp_id, ret);
+ return ret;
+ }
+ ret = dpc_run_task(&task_setup_data);
+ if (ret) {
+ mulp_io_put_path(args->mp_id, ctx->path_mgmt);
+ MULP_LOG(DPC_CLIENT, MULP_ERR, "Dpc(mpid:%llu) run task fail %d.", args->mp_id, ret);
+ return ret;
+ }
+
+ return 0;
+}
+
+int dpc_clnt_do_proc_exec_retry(dpc_rpc_proc_ctx *ctx)
+{
+ int ret = 0;
+ dpc_clnt_rw_args *args = (dpc_clnt_rw_args *)ctx->odc_ctx;
+ dpc_rpc_proc_args proc_args = {
+ .args = args,
+ .ctx = ctx,
+ .opcode = ctx->inner_args->req.common_req.hdr.op_code,
+ .flags = RPC_TASK_SOFT | RPC_TASK_TIMEOUT,
+ .task = NULL,
+ .callback_ops = (struct rpc_call_ops *)&dpc_rpc_retry_call_ops,
+ };
+
+ ret = dpc_clnt_proc_exec_imp(&proc_args);
+ if (ret) {
+ MULP_LOG(DPC_CLIENT, MULP_ERR, "Dpc proc exec %d.", ret);
+ return ret;
+ }
+
+ return 0;
+}
+
+int dpc_clnt_do_proc_exec(dpc_clnt_rw_args *args, dpc_rpc_proc_ctx *ctx, uint32_t opcode, unsigned short flags)
+{
+ int ret = 0;
+ dpc_multipath_info *info = NULL;
+ dpc_rpc_proc_args proc_args = {
+ .args = args,
+ .ctx = ctx,
+ .opcode = opcode,
+ .flags = flags,
+ .task = ctx->task,
+ .callback_ops = (struct rpc_call_ops *)&dpc_rpc_call_ops,
+ };
+ ctx->start_time = ktime_get();
+ ret = dpc_rpc_client_get_multipath_info(args->mp_id, &info);
+ if (ret) {
+ MULP_LOG(DPC_CLIENT, MULP_ERR, "Dpc mpid(%llu) get path info fail %d.", args->mp_id, ret);
+ return ret;
+ }
+ dpc_trans_procs[opcode].trans_encode(args, ctx->inner_args, info);
+ ret = dpc_clnt_proc_exec_imp(&proc_args);
+ if (ret) {
+ MULP_LOG(DPC_CLIENT, MULP_ERR, "Dpc proc exec %d.", ret);
+ return ret;
+ }
+
+ return 0;
+}
+
+int dpc_clnt_proc_exec(dpc_clnt_rw_args *args, uint32_t opcode)
+{
+ unsigned short flags = RPC_TASK_SOFT | RPC_TASK_TIMEOUT;
+ dpc_rpc_proc_ctx *ctx = NULL;
+ if (args->flag & DPC_REQ_IS_ASYNC) {
+ flags |= RPC_TASK_ASYNC;
+ }
+ ctx = dpc_clnt_get_ctx(args);
+ if (ctx == NULL) {
+ MULP_LOG(DPC_CLIENT, MULP_ERR, "param invalid.");
+ return -EINVAL;
+ }
+ return dpc_clnt_do_proc_exec(args, ctx, opcode, flags);
+}
+
+static mulp_atomic_type g_dpc_rpc_proc_malloc = {0};
+
+void* dpc_clnt_zalloc(uint32_t size, gfp_t flag)
+{
+ uint32_t length = sizeof(dpc_rpc_proc_ctx) + size + sizeof(dpc_clnt_inner_args) + sizeof(struct rpc_task);
+ dpc_rpc_proc_ctx *ctx = (dpc_rpc_proc_ctx *)mulp_mem_zalloc_by_flag(length, flag);
+ if (ctx == NULL) {
+ MULP_LOG(DPC_CLIENT, MULP_ERR, "Dpc malloc failed.");
+ return NULL;
+ }
+ ctx->odc_ctx = (void *)((char *)ctx + sizeof(dpc_rpc_proc_ctx));
+ ctx->inner_args = (dpc_clnt_inner_args *)((char *)ctx->odc_ctx + size);
+ ctx->task = (struct rpc_task *)((char *)ctx->inner_args + sizeof(dpc_clnt_inner_args));
+ ctx->magic = DPC_PROC_MAGIC;
+ ctx->inner_args->magic = DPC_RPC_INNER_ARGS_MAGIC;
+#ifdef DPC_RPC_DEBUG
+ mulp_atomic_inc(&g_dpc_rpc_proc_malloc);
+#endif
+ return ctx->odc_ctx;
+}
+EXPORT_SYMBOL_GPL(dpc_clnt_zalloc);
+
+dpc_rpc_proc_ctx *dpc_clnt_get_ctx(void *odc_ctx)
+{
+ dpc_rpc_proc_ctx *ctx = NULL;
+ if (odc_ctx == NULL) {
+ MULP_LOG(DPC_CLIENT, MULP_ERR, "odc_ctx is null.");
+ return NULL;
+ }
+ ctx = (dpc_rpc_proc_ctx *)((char *)odc_ctx - sizeof(dpc_rpc_proc_ctx));
+ if (ctx->magic != DPC_PROC_MAGIC) {
+ MULP_LOG(DPC_CLIENT, MULP_ERR, "magic(%llx) is invalid.", ctx->magic);
+ return NULL;
+ }
+ return ctx;
+}
+
+void dpc_clnt_free(void *ptr)
+{
+ dpc_rpc_proc_ctx *ctx = dpc_clnt_get_ctx(ptr);
+ if (ctx) {
+ mulp_mem_free(ctx);
+ }
+#ifdef DPC_RPC_DEBUG
+ mulp_atomic_dec(&g_dpc_rpc_proc_malloc);
+#endif
+}
+EXPORT_SYMBOL_GPL(dpc_clnt_free);
+
+int dpc_clnt_mem_statis_dump(char *buf, int len)
+{
+ int tempLen = snprintf(buf, len, "dpc rpc not free mem:%d.\n", mulp_atomic_read(&g_dpc_rpc_proc_malloc));
+ return tempLen;
+}
\ No newline at end of file
diff --git a/fs/nfs/enfs/unify_multipath/dpc_adapter/dpc_rpc_proc.h b/fs/nfs/enfs/unify_multipath/dpc_adapter/dpc_rpc_proc.h
new file mode 100644
index 0000000..49f03e1
--- /dev/null
+++ b/fs/nfs/enfs/unify_multipath/dpc_adapter/dpc_rpc_proc.h
@@ -0,0 +1,16 @@
+/*
+ * SPDX-License-Identifier: GPL-2.0
+ * Copyright 2024, Huawei Inc
+ *
+ * DPC proc io header
+ */
+
+#ifndef _DPC_RPC_PROC_H_
+#define _DPC_RPC_PROC_H_
+
+#include "multipath_api.h"
+#include "dpc_rpc_client_api.h"
+
+int dpc_clnt_proc_exec(dpc_clnt_rw_args *args, uint32_t opcode);
+int dpc_clnt_mem_statis_dump(char *buf, int len);
+#endif
\ No newline at end of file
diff --git a/fs/nfs/enfs/unify_multipath/dpc_adapter/dpc_rpc_util.c b/fs/nfs/enfs/unify_multipath/dpc_adapter/dpc_rpc_util.c
new file mode 100644
index 0000000..1936cab
--- /dev/null
+++ b/fs/nfs/enfs/unify_multipath/dpc_adapter/dpc_rpc_util.c
@@ -0,0 +1,46 @@
+/*
+ * Copyright (c) Huawei Technologies Co., Ltd. 2025-2025. All rights reserved.
+ * Description: dpc rpc util
+ * Author: j30025627
+ * Create: 2025-1-13
+ */
+
+#include
+
+#include "mulp_log.h"
+#include "dpc_rpc_util.h"
+
+static int dpc_rpc_handler_pre(struct kprobe *p, struct pt_regs *regs)
+{
+ return 0;
+}
+
+static struct kprobe g_dpc_rpc_kp = {
+ .symbol_name = "kallsyms_lookup_name",
+ .pre_handler = dpc_rpc_handler_pre,
+};
+
+typedef unsigned long (*kallsyms_lookup_name_t)(const char *name);
+static kallsyms_lookup_name_t g_dpc_rpc_kallsyms_lookup_name = NULL;
+
+void* dpc_rpc_get_kallsyms_lookup_name(const char *name)
+{
+ unsigned long addr = 0;
+ int ret;
+ if (!g_dpc_rpc_kallsyms_lookup_name) {
+ ret = register_kprobe(&g_dpc_rpc_kp);
+ if (ret < 0) {
+ MULP_LOG(MULP_MODULE_API, MULP_ERR, "register_kprobe failed, returned %d", ret);
+ return NULL;
+ }
+ g_dpc_rpc_kallsyms_lookup_name = (kallsyms_lookup_name_t)(void*)g_dpc_rpc_kp.addr;
+ unregister_kprobe(&g_dpc_rpc_kp);
+ }
+
+ if (g_dpc_rpc_kallsyms_lookup_name) {
+ addr = g_dpc_rpc_kallsyms_lookup_name(name);
+ }
+ MULP_LOG(MULP_MODULE_API, MULP_INFO, "func %s at %p", name, (void *)addr);
+
+ return (void *)addr;
+}
\ No newline at end of file
diff --git a/fs/nfs/enfs/unify_multipath/dpc_adapter/dpc_rpc_util.h b/fs/nfs/enfs/unify_multipath/dpc_adapter/dpc_rpc_util.h
new file mode 100644
index 0000000..2bfa603
--- /dev/null
+++ b/fs/nfs/enfs/unify_multipath/dpc_adapter/dpc_rpc_util.h
@@ -0,0 +1,11 @@
+/*
+ * Copyright (c) Huawei Technologies Co., Ltd. 2025-2025. All rights reserved.
+ * Description: dpc rpc util
+ * Author: j30025627
+ * Create: 2025-1-13
+ */
+#ifndef _DPC_RPC_UTIL_H_
+#define _DPC_RPC_UTIL_H_
+
+void *dpc_rpc_get_kallsyms_lookup_name(const char *name);
+#endif
\ No newline at end of file
diff --git a/fs/nfs/enfs/unify_multipath/dpc_adapter/dpc_rpc_xdr.c b/fs/nfs/enfs/unify_multipath/dpc_adapter/dpc_rpc_xdr.c
new file mode 100644
index 0000000..888ca44
--- /dev/null
+++ b/fs/nfs/enfs/unify_multipath/dpc_adapter/dpc_rpc_xdr.c
@@ -0,0 +1,81 @@
+#include "dpc_rpc_xdr.h"
+#include
+#include "multipath_api.h"
+#include "dpc_rpc_io_common.h"
+#include "dpc_rpc_client_read.h"
+#include "dpc_rpc_client_write.h"
+#include "dpc_rpc_client_write.h"
+
+#define DPC_PROGRAM (566666666)
+#define DPC_CLIENT_RPC_VERSION (3)
+#define DPC_PIPE_DIRNAME "dpc"
+
+struct rpc_timeout dpc_default_timeout_policy = {
+ .to_initval = 8 * HZ,
+ .to_maxval = 8 * HZ,
+ .to_retries = 0,
+};
+
+const struct rpc_procinfo dpc_procedures[] = {
+ [DPC_RPC_OP_NULL] = {0},
+ [DPC_RPC_OP_READ] = {
+ .p_proc = DPC_RPC_OP_READ,
+ .p_encode = dpc_xdr_enc_read,
+ .p_decode = dpc_xdr_dec_read,
+ .p_arglen = sizeof(dpc_clnt_read_req),
+ .p_replen = sizeof(dpc_clnt_read_rsp),
+ .p_statidx = DPC_RPC_OP_READ,
+ .p_name = "READ",
+ },
+ [DPC_RPC_OP_WRITE] = {
+ .p_proc = DPC_RPC_OP_WRITE,
+ .p_encode = dpc_xdr_enc_write,
+ .p_decode = dpc_xdr_dec_write,
+ .p_arglen = sizeof(dpc_clnt_write_req),
+ .p_replen = sizeof(dpc_clnt_write_rsp),
+ .p_statidx = DPC_RPC_OP_WRITE,
+ .p_name = "WRITE",
+ },
+};
+
+static unsigned int dpc_version_counts[ARRAY_SIZE(dpc_procedures)];
+
+const struct rpc_version dpc_default_version = {
+ .number = DPC_CLIENT_RPC_VERSION,
+ .nrprocs = ARRAY_SIZE(dpc_procedures),
+ .procs = dpc_procedures,
+ .counts = dpc_version_counts,
+};
+
+static const struct rpc_version *dpc_versions[] = {
+ [DPC_CLIENT_RPC_VERSION] = &dpc_default_version,
+};
+
+static struct rpc_stat dpc_rpcstat;
+
+const struct rpc_program dpc_program = {
+ .name = "dpc",
+ .number = DPC_PROGRAM,
+ .nrvers = ARRAY_SIZE(dpc_versions),
+ .version = dpc_versions,
+ .stats = &dpc_rpcstat,
+ .pipe_dir_name = DPC_PIPE_DIRNAME,
+};
+
+void dpc_init_create_args(struct rpc_create_args *args)
+{
+ args->net = current->nsproxy->net_ns;
+ args->timeout = &dpc_default_timeout_policy;
+ args->authflavor = RPC_AUTH_UNIX;
+ args->version = DPC_CLIENT_RPC_VERSION;
+ args->servername = "dpc";
+ args->program = &dpc_program;
+}
+
+struct rpc_procinfo* dpc_get_procinfo(rpc_op_enum opcode)
+{
+ if (opcode >= DPC_RPC_OP_MAX) {
+ return NULL;
+ }
+ return (struct rpc_procinfo*)&dpc_procedures[opcode];
+}
\ No newline at end of file
diff --git a/fs/nfs/enfs/unify_multipath/dpc_adapter/dpc_rpc_xdr.h b/fs/nfs/enfs/unify_multipath/dpc_adapter/dpc_rpc_xdr.h
new file mode 100644
index 0000000..707c17e
--- /dev/null
+++ b/fs/nfs/enfs/unify_multipath/dpc_adapter/dpc_rpc_xdr.h
@@ -0,0 +1,15 @@
+/*
+ * SPDX-License-Identifier: GPL-2.0
+ * Copyright 2024, Huawei Inc
+ *
+ * DPC多路径编解码管理
+ */
+
+#ifndef _DPC_RPC_XDR_H_
+#define _DPC_RPC_XDR_H_
+
+#include
+
+void dpc_init_create_args(struct rpc_create_args *args);
+struct rpc_procinfo* dpc_get_procinfo(uint32_t opcode);
+#endif
\ No newline at end of file
diff --git a/fs/nfs/enfs/unify_multipath/include/dpc_kernel_version.h b/fs/nfs/enfs/unify_multipath/include/dpc_kernel_version.h
new file mode 100644
index 0000000..5bdd03a
--- /dev/null
+++ b/fs/nfs/enfs/unify_multipath/include/dpc_kernel_version.h
@@ -0,0 +1,86 @@
+/*
+ * Copyright (c) Huawei Technologies Co., Ltd. 2024-2024. All rights reserved.
+ * Description: dpc kernel version
+ * Author:
+ * Create: 2024-12-25
+ */
+
+#ifndef DPC_KERNEL_VERSION_API_H
+#define DPC_KERNEL_VERSION_API_H
+
+#include
+
+#define LINUX_3_10 (LINUX_VERSION_CODE == KERNEL_VERSION(3, 10, 0))
+#define LINUX_GREATER_OR_EQUAL_3_10 (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 10, 0))
+#define GREATER_LINUX_3_10 (LINUX_VERSION_CODE > KERNEL_VERSION(3, 10, 0))
+#define UP_LINUX_3_1 (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 1, 0))
+#define UP_LINUX_3_3 (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 3, 0))
+#define LINUX_GREATER_OR_EQUAL_4_1 (LINUX_VERSION_CODE >= KERNEL_VERSION(4, 1, 0))
+#define LINUX_4_14 (LINUX_VERSION_CODE == KERNEL_VERSION(4, 14, 0))
+#define LINUX_4_15 (LINUX_VERSION_CODE == KERNEL_VERSION(4, 15, 0)) || LINUX_4_14
+#define UP_LINUX_4_11 (LINUX_VERSION_CODE >= KERNEL_VERSION(4, 11, 0))
+#define UP_LINUX_4_15 (LINUX_VERSION_CODE >= KERNEL_VERSION(4, 15, 0)) || LINUX_4_14
+#define LINUX_5_3_18 (LINUX_VERSION_CODE == KERNEL_VERSION(5, 3, 18))
+#define UP_LINUX_5_4 (LINUX_VERSION_CODE >= KERNEL_VERSION(5, 4, 0))
+#define UP_LINUX_5_3_18 (LINUX_VERSION_CODE >= KERNEL_VERSION(5, 3, 18))
+#define LINUX_4_4 \
+ (((LINUX_VERSION_CODE >= KERNEL_VERSION(4, 4, 0)) && (LINUX_VERSION_CODE <= KERNEL_VERSION(4, 4, 98))) || \
+ LINUX_3_10 || LINUX_VERSION_CODE == KERNEL_VERSION(4, 4, 228))
+#define LINUX_4_19_36 (LINUX_VERSION_CODE == KERNEL_VERSION(4, 19, 36))
+#define LINUX_2_6_32 ((LINUX_VERSION_CODE == KERNEL_VERSION(2, 6, 32)))
+#define LINUX_4_18 (LINUX_VERSION_CODE == KERNEL_VERSION(4, 18, 0))
+#define LINUX_4_12_14 (LINUX_VERSION_CODE == KERNEL_VERSION(4, 12, 14))
+#define LINUX_4_15_18 \
+ (LINUX_VERSION_CODE == KERNEL_VERSION(4, 15, 18) || \
+ LINUX_VERSION_CODE == KERNEL_VERSION(4, 15, 17)) // ubuntu 18.04.0 ubuntu 18.04.01, ubuntu 18.04.06
+#define LINUX_5_4_44 (LINUX_VERSION_CODE == KERNEL_VERSION(5, 4, 44))
+#define LINUX_5_4_A6 (LINUX_VERSION_CODE == KERNEL_VERSION(5, 4, 166)) // 20.04.4 LTS (Focal Fossa)
+#define LINUX_5_4_C3 \
+ (LINUX_VERSION_CODE == KERNEL_VERSION(5, 4, 195)) // (0x0504C3, 328899) ubuntu 20.04.5 LTS (Focal Fossa)
+#define LINUX_4_19_90 \
+ (LINUX_VERSION_CODE == KERNEL_VERSION(4, 19, 90)) // (0x04135A, 267098) openEuler release 20.03 (LTS-SP3)
+#define UP_LINUX_5_10 (LINUX_VERSION_CODE >= KERNEL_VERSION(5, 10, 0)) // openEuler 22.03
+#define UP_LINUX_5_14_0 (LINUX_VERSION_CODE >= KERNEL_VERSION(5, 14, 0)) // RedHat 9.2
+#define UP_LINUX_5_15_0 (LINUX_VERSION_CODE >= KERNEL_VERSION(5, 15, 0)) // Ubuntu 22.04.1 LTS, 0x50F27
+#define LINUX_5_14 (UP_LINUX_5_14_0 && (LINUX_VERSION_CODE < KERNEL_VERSION(5, 15, 0)))
+#define LINUX_4_4_0_186 (LINUX_VERSION_CODE == KERNEL_VERSION(4, 4, 228))
+
+#ifdef RHEL_RELEASE_VERSION
+#define LINUX_3_10_0_229 (LINUX_3_10 && RHEL_RELEASE_CODE == RHEL_RELEASE_VERSION(7, 1))
+#define LINUX_3_10_0_327 (LINUX_3_10 && RHEL_RELEASE_CODE == RHEL_RELEASE_VERSION(7, 2))
+#define LINUX_3_10_0_514 (LINUX_3_10 && RHEL_RELEASE_CODE == RHEL_RELEASE_VERSION(7, 3))
+#define LINUX_3_10_0_693 (LINUX_3_10 && RHEL_RELEASE_CODE == RHEL_RELEASE_VERSION(7, 4))
+#define UP_4_18_0_193 (LINUX_4_18 && RHEL_RELEASE_CODE >= RHEL_RELEASE_VERSION(8, 2))
+#define UP_4_18_0_425 (LINUX_4_18 && RHEL_RELEASE_CODE >= RHEL_RELEASE_VERSION(8, 7))
+#define UP_4_18_0_477 (LINUX_4_18 && RHEL_RELEASE_CODE >= RHEL_RELEASE_VERSION(8, 8))
+#define LINUX_4_18_0_305 (LINUX_4_18 && RHEL_RELEASE_CODE == RHEL_RELEASE_VERSION(8, 4))
+#define LINUX_3_10_0_862 (LINUX_3_10 && RHEL_RELEASE_CODE == RHEL_RELEASE_VERSION(7, 5))
+#define LINUX_3_10_0_957 (LINUX_3_10 && RHEL_RELEASE_CODE == RHEL_RELEASE_VERSION(7, 6))
+#define LINUX_3_10_0_1062 (LINUX_3_10 && RHEL_RELEASE_CODE == RHEL_RELEASE_VERSION(7, 7))
+#define LINUX_3_10_0_1127 (LINUX_3_10 && RHEL_RELEASE_CODE == RHEL_RELEASE_VERSION(7, 8))
+#define LINUX_3_10_0_1160 (LINUX_3_10 && RHEL_RELEASE_CODE == RHEL_RELEASE_VERSION(7, 9))
+#define UP_3_10_0_862 \
+ (LINUX_VERSION_CODE > KERNEL_VERSION(3, 1, 0) || \
+ (LINUX_VERSION_CODE == KERNEL_VERSION(3, 1, 0) && RHEL_RELEASE_CODE >= RHEL_RELEASE_VERSION(7, 5)))
+#define LINUX_5_14_0_70131 (LINUX_5_14 && RHEL_RELEASE_CODE == RHEL_RELEASE_VERSION(9, 0))
+#define LINUX_4_14_0_115 (LINUX_4_14 && RHEL_RELEASE_CODE == RHEL_RELEASE_VERSION(7, 6))
+#else
+#define LINUX_3_10_0_229 LINUX_3_10
+#define LINUX_3_10_0_327 LINUX_3_10
+#define LINUX_3_10_0_514 LINUX_3_10
+#define LINUX_3_10_0_693 LINUX_3_10
+#define LINUX_3_10_0_957 LINUX_3_10
+#define LINUX_3_10_0_1062 LINUX_3_10
+#define LINUX_3_10_0_1127 LINUX_3_10
+#define LINUX_3_10_0_1160 LINUX_3_10
+#define UP_4_18_0_193 LINUX_4_18
+#define UP_3_10_0_862 (LINUX_VERSION_CODE > KERNEL_VERSION(3, 1, 0))
+#endif
+
+#ifdef RHEL_RELEASE_VERSION
+#define LINUX_4_18_0_193 (LINUX_4_18 && RHEL_RELEASE_CODE == RHEL_RELEASE_VERSION(8, 2))
+#else
+#define LINUX_4_18_0_193 LINUX_4_18
+#endif
+
+#endif
diff --git a/fs/nfs/enfs/unify_multipath/include/mulp_log.h b/fs/nfs/enfs/unify_multipath/include/mulp_log.h
new file mode 100644
index 0000000..056e5a4
--- /dev/null
+++ b/fs/nfs/enfs/unify_multipath/include/mulp_log.h
@@ -0,0 +1,105 @@
+/*
+ * Copyright (c) Huawei Technologies Co., Ltd. 2023-2023. All rights reserved.
+ * Description: enfs log
+ * Author: y00583252
+ * Create: 2023-07-31
+ */
+
+#ifndef MULP_LOG_H
+#define MULP_LOG_H
+
+#include
+#include
+#include
+#include
+#include "multipath_types.h"
+
+#define MULP_SOH "\001" /* ASCII Start Of Header */
+#define MULP_CRITI MULP_SOH "2"
+#define MULP_ERR MULP_SOH "3" /* error conditions */
+#define MULP_WARNING MULP_SOH "4" /* warning conditions */
+#define MULP_INFO MULP_SOH "6" /* informational */
+#define MULP_DEBUG MULP_SOH "7" /* debug-level messages */
+#define MULP_DEFAULT MULP_SOH "d" /* the default kernel loglevel */
+
+typedef enum {
+ MULP_MODULE_API,
+ MULP_MODULE_SHARDVIEW,
+ MULP_MODULE_PATH_MGMT,
+ MULP_MODULE_LOAD_BALANCE,
+ MULP_MODULE_PATH_DETECT,
+ MULP_MODULE_TP,
+ MULP_MODULE_MAX = 40, // need care about upgrade before change it , because mem size changed
+} mulp_module_type;
+
+#define MULP_LOG_INTERVAL (1 * 60 * HZ) /* 日志限频周期 */
+#ifdef DEBUG
+#define MULP_LOG_COUNT 30 /* 单个日志限频周期内日志打印条数 */
+#else
+#define MULP_LOG_COUNT 3 /* 单个日志限频周期内日志打印条数 */
+#endif
+
+#define MULP_PRINT_LIMIT_FUNC(level, mod, interval, burst, can_print) \
+do { \
+ static uint32_t uiPrinted = 0; \
+ static uint32_t uiMissed = 0; \
+ static uint64_t ullLast = 0; \
+ uint64_t ullNow = jiffies; \
+ static uint64_t ulstart = 0; \
+ \
+ /* 如果两次触发的时间间隔大于设定时间 */ \
+ if (ullNow > (ullLast + (interval))) { \
+ /* 打印次数清零,更新时间戳 */ \
+ uiPrinted = 0; \
+ ullLast = ullNow; \
+ if (uiMissed != 0 && (ullNow >= ulstart + (interval) * 10)) { \
+ uint32_t missed = uiMissed; \
+ uiMissed = 0; \
+ ulstart = jiffies; \
+ printk(level " " #mod " [%s:%d] %u msg suppressed.\n", \
+ __FUNCTION__, __LINE__, missed); \
+ } \
+ } \
+ \
+ /* 打印次数最多为burst次 */ \
+ if ((burst) > uiPrinted) { \
+ uiPrinted++; \
+ (can_print) = 1; \
+ /* 更新上一次触发的时间 */ \
+ ullLast = ullNow; \
+ } else { \
+ uiMissed++; \
+ (can_print) = 0; \
+ } \
+} while (0)
+
+#define MULP_LOG_NOLIMIT(mod, level, fmt, ...) \
+ do { \
+ printk(level " " #mod " [%s:%d] " fmt "\n", __FUNCTION__, __LINE__, ##__VA_ARGS__); \
+ } while (0) // lint !e539
+
+#define MULP_LOG(mod, level, fmt, ...) \
+ do { \
+ uint32_t can_print = 0; \
+ MULP_PRINT_LIMIT_FUNC(level, mod, MULP_LOG_INTERVAL, MULP_LOG_COUNT, can_print); \
+ if (can_print) { \
+ printk(level " " #mod " [%s:%d] " fmt "\n", __FUNCTION__, __LINE__, ##__VA_ARGS__); \
+ } \
+ } while (0) // lint !e539
+
+#define MULP_LOG_INTERVAL_20MIN (20 * 60 * HZ)
+#define MULP_LOG_INTERVAL_10MIN (10 * 60 * HZ)
+#define MULP_LOG_COUNT_1 1
+#define MULP_LOG_COUNT_3 3
+#define MULP_LOG_LONG_INTERVAL (20 * 60 * HZ)
+
+#define MULP_LOG_LIMIT(mod, level, interval, burst, fmt, ...) \
+ do { \
+ uint32_t can_print = 0; \
+ MULP_PRINT_LIMIT_FUNC(level, mod, interval, burst, can_print); \
+ if (can_print) { \
+ printk(level " " #mod " [%s:%d] " fmt "\n", __FUNCTION__, __LINE__, ##__VA_ARGS__); \
+ } \
+ } while (0) // lint !e539
+
+#endif
\ No newline at end of file
diff --git a/fs/nfs/enfs/unify_multipath/include/mulp_porting.h b/fs/nfs/enfs/unify_multipath/include/mulp_porting.h
new file mode 100644
index 0000000..bfeff27
--- /dev/null
+++ b/fs/nfs/enfs/unify_multipath/include/mulp_porting.h
@@ -0,0 +1,148 @@
+/*
+ * SPDX-License-Identifier: GPL-2.0
+ * Copyright 2024, Huawei Inc
+ *
+ * Platform dependent utilities
+ */
+#ifndef _MULTIP_PORTING_H_
+#define _MULTIP_PORTING_H_
+
+#include
+#include
+#include
+#include
+#include
+#include
+#include
+#include
+#include
+#include
+#include
+#include
+#include
+#include
+#include
+#include
+#include
+
+#include "multipath_types.h"
+#include "mulp_log.h"
+#include "mulp_tp.h"
+
+#ifndef NULL
+#define NULL ((void *)0)
+#endif
+
+#ifndef ARRAY_SIZE
+#define ARRAY_SIZE(a) (((sizeof((a))) / (sizeof((a)[0]))))
+#endif
+
+#define mulp_max(a, b) (((a) > (b)) ? (a) : (b))
+#define mulp_min(a, b) (((a) < (b)) ? (a) : (b))
+
+#define msec_per_sec 1000
+
+typedef spinlock_t mulp_spin_lock_type;
+#define mulp_spin_lock_init spin_lock_init
+#define mulp_spin_lock spin_lock
+#define mulp_spin_unlock spin_unlock
+
+typedef rwlock_t mulp_rwlock_type;
+#define mulp_rwlock_init rwlock_init
+#define mulp_read_lock read_lock
+#define mulp_read_unlock read_unlock
+#define mulp_write_lock write_lock
+#define mulp_write_unlock write_unlock
+
+typedef struct rw_semaphore mulp_rw_semaphore;
+#define mulp_down_read down_read
+#define mulp_up_read up_read
+#define mulp_down_write down_write
+#define mulp_up_write up_write
+#define mulp_init_rwsem init_rwsem
+
+typedef atomic_t mulp_atomic_type;
+#define mulp_atomic_set atomic_set
+#define mulp_atomic_read atomic_read
+#define mulp_atomic_xchg atomic_xchg
+#define mulp_atomic_cmpxchg atomic_cmpxchg
+#define mulp_atomic_dec atomic_dec
+#define mulp_atomic_dec_return atomic_dec_return
+#define mulp_atomic_inc atomic_inc
+#define mulp_atomic_inc_return atomic_inc_return
+#define mulp_atomic_add atomic_add
+
+typedef atomic64_t mulp_atomic64_type;
+#define mulp_atomic64_set atomic64_set
+#define mulp_atomic64_read atomic64_read
+#define mulp_atomic64_xchg atomic64_xchg
+#define mulp_atomic64_cmpxchg atomic64_cmpxchg
+#define mulp_atomic64_dec atomic64_dec
+#define mulp_atomic64_inc atomic64_inc
+#define mulp_atomic64_dec_return atomic64_dec_return
+#define mulp_atomic64_inc_return atomic64_inc_return
+#define mulp_atomic64_add atomic64_add
+
+typedef struct list_head mulp_list_head_type;
+#define mulp_init_list_head INIT_LIST_HEAD
+#define mulp_list_empty list_empty
+#define mulp_list_first_entry list_first_entry
+#define mulp_list_add_tail list_add_tail
+#define mulp_list_del list_del
+#define mulp_list_del_init list_del_init
+#define mulp_list_for_each_entry list_for_each_entry
+#define mulp_list_for_each_entry_safe list_for_each_entry_safe
+
+#define MULP_MSLEEP_INTERVAL_MIN 1
+
+typedef struct task_struct mulp_task_type;
+#define mulp_thread_create kthread_create
+#define mulp_thread_stop kthread_stop
+#define mulp_free_task free_task
+#define mulp_wake_up_process wake_up_process
+#define mulp_thread_should_stop kthread_should_stop
+
+#define mulp_strtoull kstrtoull
+
+#define mulp_is_err IS_ERR
+#define mulp_ptr_err PTR_ERR
+
+typedef struct semaphore mulp_semaphore_type;
+#define mulp_semaphore_init sema_init
+#define mulp_semaphore_up up
+#define mulp_semaphore_down_interruptible down_interruptible
+
+void *mulp_mem_malloc(uint32_t size);
+void *mulp_mem_zalloc(uint32_t size);
+void *mulp_mem_zalloc_by_flag(uint32_t size, gfp_t flag);
+void mulp_mem_free(void *ptr);
+uint64_t mulp_get_millisec_time(void);
+
+struct proc_dir_entry *mulp_proc_create_dir(const char *parent_name, struct proc_dir_entry *parent);
+void mulp_proc_delete_dir(const char *parent_name, struct proc_dir_entry *parent);
+
+#define MULP_MS_TO_SEC (1000)
+static inline void mulp_msleep(long ms)
+{
+ long sleep_time;
+ long schedule_timeo;
+
+ sleep_time = (long)((ms * HZ) / MULP_MS_TO_SEC);
+
+ set_current_state(TASK_INTERRUPTIBLE);
+ if (sleep_time <= 0) {
+ schedule_timeo = schedule_timeout(1);
+ } else {
+ schedule_timeo = schedule_timeout(sleep_time);
+ }
+
+ while (schedule_timeo > 0) {
+ schedule_timeo = schedule_timeout(schedule_timeo);
+ if (schedule_timeo > sleep_time) {
+ return;
+ }
+ }
+ return;
+}
+
+#endif
diff --git a/fs/nfs/enfs/unify_multipath/infra/mulp_proc.c b/fs/nfs/enfs/unify_multipath/infra/mulp_proc.c
new file mode 100644
index 0000000..c7c255e
--- /dev/null
+++ b/fs/nfs/enfs/unify_multipath/infra/mulp_proc.c
@@ -0,0 +1,29 @@
+/*
+ * Copyright (c) Huawei Technologies Co., Ltd. 2025-2025. All rights reserved.
+ * Description: dpc adapter print unify multi path info
+ * Author: f30066215
+ * Create: 2025-1-8
+ */
+#include "mulp_porting.h"
+
+struct proc_dir_entry *mulp_proc_create_dir(const char *dir_name, struct proc_dir_entry *parent)
+{
+ struct proc_dir_entry *mulp_proc_dir = NULL;
+ if (dir_name == NULL) {
+ return NULL;
+ }
+ mulp_proc_dir = proc_mkdir(dir_name, parent);
+ if (mulp_proc_dir == NULL) {
+ MULP_LOG(MULP_MODULE_API, MULP_ERR, "Unify_multipath create proc dir err.");
+ return NULL;
+ }
+ return mulp_proc_dir;
+}
+
+void mulp_proc_delete_dir(const char *dir_name, struct proc_dir_entry *parent)
+{
+ if (dir_name == NULL) {
+ return;
+ }
+ remove_proc_subtree(dir_name, parent);
+}
diff --git a/fs/nfs/enfs/unify_multipath/infra/mulp_tp.c b/fs/nfs/enfs/unify_multipath/infra/mulp_tp.c
new file mode 100644
index 0000000..a818bab
--- /dev/null
+++ b/fs/nfs/enfs/unify_multipath/infra/mulp_tp.c
@@ -0,0 +1,644 @@
+/*
+ * SPDX-License-Identifier: GPL-2.0
+ * Copyright 2025, Huawei Inc
+ * multipath tracepoint source file
+ */
+
+#include
+#include
+#include
+#include
+#include
+#include
+#include
+#include
+#include
+
+#include "mulp_porting.h"
+#include "mulp_log.h"
+#include "dpc_kernel_version.h"
+
+#define MULP_PROC_TP_DIR "unify_multipath_tp"
+#define MULP_PROC_TP_NAME "tp"
+
+static struct proc_dir_entry *g_mulp_proc_parent = NULL;
+static LVOS_HT_S *g_mulp_tp_hash_table = NULL; /* TracePoint的Hash表 */
+
+static int strcpy_s(char *strDest, size_t destMax, const char *strSrc)
+{
+ if (strlen(strSrc) >= destMax) {
+ return -1;
+ }
+
+ strcpy(strDest, strSrc);
+ return 0;
+}
+
+void mulp_do_tracepoint_pause(LVOS_TRACEP_NEW_S *tracepoint)
+{
+ // to be added
+ return;
+}
+EXPORT_SYMBOL_GPL(mulp_do_tracepoint_pause);
+
+static int mulp_str_to_hash_key(const char *str, uint maxLen, uint64_t *key)
+{
+ uint i = 0;
+ uint len = 0;
+ uint64_t keyTemp = 0ULL;
+
+ if (unlikely(NULL == str || NULL == key)) {
+ MULP_LOG(MULP_MODULE_TP, MULP_ERR, "Parameter is NULL.");
+ return RETURN_ERROR;
+ }
+
+ len = strnlen(str, maxLen);
+ if (unlikely(len == 0 || len >= maxLen)) {
+ MULP_LOG(MULP_MODULE_TP, MULP_ERR, "Length of str is zero or beyond the max length.");
+ return RETURN_ERROR;
+ }
+
+ for (i = 0; i < len; ++i) {
+ keyTemp = LVOS_STR_TO_KEY_BASE_NUM * keyTemp + (uint8_t)str[i];
+ }
+
+ *key = keyTemp;
+
+ return RETURN_OK;
+}
+
+/* 用于增加hash算法的随机分布性 */
+#define LVOS_GOLDEN_RATIO_PRIME 0x9e37fffffffc0001ULL
+#define LVOS_MAX_HT_BITS 64
+static uint mulp_get_hash_addr(uint v_uiKey1, uint64_t v_ullKey2, uint v_uiBits)
+{
+ uint64_t ullTmp;
+ KEY_S stKey;
+
+ stKey.unKey64.ull64bits = v_ullKey2;
+ stKey.unKey64.ui32bits[1] = v_uiKey1;
+ ullTmp = *((uint64_t *)(&stKey));
+ ullTmp *= LVOS_GOLDEN_RATIO_PRIME;
+
+ return (uint)(ullTmp >> (LVOS_MAX_HT_BITS - v_uiBits));
+}
+
+int mulp_search_hash_item(LVOS_HT_S *v_pstHashTable, uint v_uiKey1, uint64_t v_ullKey2, LVOS_HASH_ITEM_S **v_ppstData)
+{
+ LVOS_HASH_LIST_S *pstElem = NULL;
+ LVOS_HASH_ITEM_S *pstData = NULL;
+ struct list_head *pstPos = NULL;
+ uint uiAddr = 0;
+
+ /* 检查参数合法性 */
+ if ((NULL == v_pstHashTable) || (NULL == v_ppstData)) {
+ return RETURN_ERROR;
+ }
+
+ /* 计算Hash地址 */
+ uiAddr = mulp_get_hash_addr(v_uiKey1, v_ullKey2, v_pstHashTable->uiBits);
+ pstElem = &(v_pstHashTable->pstHashElem[uiAddr]);
+
+ /* 扫描冲突链,查找指定元素 */
+ list_for_each(pstPos, &pstElem->stBucketList)
+ {
+ pstData = list_entry(pstPos, LVOS_HASH_ITEM_S, stConflictList);
+ MULP_LOG(MULP_MODULE_TP, MULP_ERR, "List pstData->uiKey1(%u), pstData->ullKey2(0x%llx).", pstData->uiKey1,
+ pstData->ullKey2);
+ if ((v_uiKey1 == pstData->uiKey1) && (v_ullKey2 == pstData->ullKey2)) {
+ *v_ppstData = pstData;
+ return RETURN_OK;
+ }
+ }
+
+ /* 若不存在该项,则返回失败 */
+ *v_ppstData = NULL;
+ MULP_LOG(MULP_MODULE_TP, MULP_ERR, "Cannot find pstData->uiKey1(%u), pstData->ullKey2(0x%llx).", v_uiKey1,
+ v_ullKey2);
+ return RETURN_ERROR;
+}
+
+int mulp_get_tracepoint(uint pid, const char *name, LVOS_TRACEP_NEW_S **tracepoint)
+{
+ int ret = RETURN_ERROR;
+ uint64_t key = 0ULL;
+ LVOS_HASH_ITEM_S *hashData = NULL;
+ LVOS_TP_HASH_S *hashTPData = NULL;
+
+ if (NULL == name || NULL == tracepoint) {
+ MULP_LOG(MULP_MODULE_TP, MULP_ERR, "Parameter is NULL, get tracepoint failed.");
+ return RETURN_ERROR;
+ }
+
+ /* 计算HASH key */
+ if (RETURN_OK != mulp_str_to_hash_key(name, MAX_NAME_LEN, &key)) {
+ MULP_LOG(MULP_MODULE_TP, MULP_ERR, "Cannot make the key from tracepoint name.");
+ *tracepoint = NULL;
+ return RETURN_ERROR;
+ }
+
+ ret = mulp_search_hash_item(g_mulp_tp_hash_table, pid, key, &hashData);
+ if (RETURN_OK != ret) {
+ *tracepoint = NULL;
+ return ret;
+ }
+
+ hashTPData = (LVOS_TP_HASH_S *)(void *)hashData;
+ *tracepoint = &(hashTPData->stTP);
+
+ return RETURN_OK;
+}
+EXPORT_SYMBOL_GPL(mulp_get_tracepoint);
+
+int mulp_active_tracepoint(uint pid, const char *name, int type, uint time, LVOS_TRACEP_PARAM_S userParam)
+{
+ int ret = RETURN_ERROR;
+ LVOS_TRACEP_NEW_S *tracepoint = NULL;
+
+ if (NULL == name) {
+ MULP_LOG(MULP_MODULE_TP, MULP_ERR, "name is NULL.");
+ return RETURN_ERROR;
+ }
+
+ /* 获取tracepoint */
+ ret = mulp_get_tracepoint(pid, name, &tracepoint);
+ if (NULL == tracepoint) {
+ MULP_LOG(MULP_MODULE_TP, MULP_ERR, "Get tracepoint(%s) failed.", name);
+ return ret;
+ }
+
+ /* 回调函数为NULL,不能被激活 */
+ if (LVOS_TP_TYPE_CALLBACK == type && NULL == tracepoint->fnHook) {
+ MULP_LOG(MULP_MODULE_TP, MULP_ERR, "Callback function of tracepoint(%s) is NULL, cannot active callback type.",
+ name);
+ return RETURN_ERROR;
+ }
+
+ tracepoint->iActive = LVOS_TRACEP_STAT_ACTIVE;
+ tracepoint->type = type;
+ tracepoint->timeAlive = time;
+ tracepoint->timeCalled = 0;
+ /* 拷贝激活命令指定的用户参数到Tracepoint中 */
+ if (strcpy_s(&tracepoint->stParam.achParamData[0], LVOS_TRACEP_PARAM_SIZE, &userParam.achParamData[0]) != 0) {
+ MULP_LOG(MULP_MODULE_TP, MULP_ERR, "strcpy sz name failed.");
+ return RETURN_ERROR;
+ }
+ tracepoint->stParam.achParamData[LVOS_TRACEP_PARAM_SIZE - 1] = '\0';
+
+ MULP_LOG(MULP_MODULE_TP, MULP_ERR, "Tracepoint(%s) is active.", name);
+ return RETURN_OK;
+}
+EXPORT_SYMBOL_GPL(mulp_active_tracepoint);
+
+int mulp_deactive_tracepoint(uint pid, const char *name)
+{
+ int ret = RETURN_ERROR;
+ LVOS_TRACEP_NEW_S *tracepoint = NULL;
+
+ if (NULL == name) {
+ MULP_LOG(MULP_MODULE_TP, MULP_ERR, "Parameter is NULL.");
+ return RETURN_ERROR;
+ }
+
+ /* 获取Tracepoint */
+ ret = mulp_get_tracepoint(pid, name, &tracepoint);
+ if (NULL == tracepoint) {
+ MULP_LOG(MULP_MODULE_TP, MULP_ERR, "Get tracepoint(%s) failed.", name);
+ return ret;
+ }
+
+ /* Tracepoint所有信息恢复为初始值 */
+ tracepoint->iActive = LVOS_TRACEP_STAT_DEACTIVE;
+ tracepoint->type = LVOS_TP_TYPE_BUTT;
+ tracepoint->timeAlive = 0;
+ tracepoint->timeCalled = 0;
+ (void)memset(&tracepoint->stParam.achParamData[0], 0, LVOS_TRACEP_PARAM_SIZE);
+
+ MULP_LOG(MULP_MODULE_TP, MULP_ERR, "Tracepoint(%s) is deactive.", name);
+ return RETURN_OK;
+}
+EXPORT_SYMBOL_GPL(mulp_deactive_tracepoint);
+
+void mulp_deactive_for_travel(LVOS_HASH_ITEM_S *hashdata, void *param)
+{
+ LVOS_TP_HASH_S *hashTPData = NULL;
+
+ /* param应该为NULL,不处理 */
+ if (NULL != param) {
+ MULP_LOG(MULP_MODULE_TP, MULP_ERR, "param should be NULL.");
+ }
+
+ if (NULL == hashdata) {
+ MULP_LOG(MULP_MODULE_TP, MULP_ERR, "Parameter is NULL.");
+ return;
+ }
+
+ /* Tracepoint恢复为初始值 */
+ hashTPData = (LVOS_TP_HASH_S *)(void *)hashdata;
+ hashTPData->stTP.iActive = LVOS_TRACEP_STAT_DEACTIVE;
+ hashTPData->stTP.type = LVOS_TP_TYPE_BUTT;
+ hashTPData->stTP.timeAlive = 0;
+ hashTPData->stTP.timeCalled = 0;
+ (void)memset(&(hashTPData->stTP.stParam.achParamData[0]), 0, LVOS_TRACEP_PARAM_SIZE);
+
+ return;
+}
+
+int mulp_travel_hash_table(LVOS_HT_S *v_pstHashTable, void (*fn)(LVOS_HASH_ITEM_S *, void *), void *param)
+{
+ uint i;
+ struct list_head *pstPos = NULL;
+ LVOS_HASH_ITEM_S *pstData = NULL;
+ LVOS_HASH_LIST_S *pstElem = NULL;
+
+ /* 检查参数合法性 ,param可以为NULL,表示fn不需要额外参数 */
+ if ((NULL == v_pstHashTable) || (NULL == fn)) {
+ MULP_LOG(MULP_MODULE_TP, MULP_ERR, "Parameter is NULL.");
+ return RETURN_ERROR;
+ }
+
+ for (i = 0; i < v_pstHashTable->uiTableSize; ++i) {
+ pstElem = &(v_pstHashTable->pstHashElem[i]);
+ /* 扫描冲突链,查找指定元素 */
+ list_for_each(pstPos, &pstElem->stBucketList)
+ {
+ pstData = list_entry(pstPos, LVOS_HASH_ITEM_S, stConflictList);
+ fn(pstData, param);
+ }
+ }
+
+ return RETURN_OK;
+}
+
+static int mulp_deactive_tracepoint_all(void)
+{
+ /* 遍历Hash表,执行去激活动作 */
+ return mulp_travel_hash_table(g_mulp_tp_hash_table, mulp_deactive_for_travel, NULL);
+}
+
+int mulp_insert_hash_item(LVOS_HT_S *v_pstHashTable, LVOS_HASH_ITEM_S *v_pstData)
+{
+ uint uiAddr = 0;
+ LVOS_HASH_LIST_S *pstElem = NULL;
+ LVOS_HASH_ITEM_S *pstData = NULL;
+ struct list_head *pstPos = NULL;
+
+ /* 检查参数合法性 */
+ if ((NULL == v_pstHashTable) || (NULL == v_pstData)) {
+ MULP_LOG(MULP_MODULE_TP, MULP_ERR, "Parameter is NULL.");
+ return RETURN_ERROR;
+ }
+
+ /* 计算Hash地址 */
+ uiAddr = mulp_get_hash_addr(v_pstData->uiKey1, v_pstData->ullKey2, v_pstHashTable->uiBits);
+ pstElem = &(v_pstHashTable->pstHashElem[uiAddr]);
+
+ /* 判断是否已存在该项 */
+ list_for_each(pstPos, &pstElem->stBucketList)
+ {
+ pstData = list_entry(pstPos, LVOS_HASH_ITEM_S, stConflictList);
+ if ((pstData->uiKey1 == v_pstData->uiKey1) && (pstData->ullKey2 == v_pstData->ullKey2)) {
+ MULP_LOG(MULP_MODULE_TP, MULP_ERR, "Hash item already exists, Key1(%u), Key2(0x%llx).", pstData->uiKey1,
+ pstData->ullKey2);
+ return RETURN_ERROR;
+ }
+ }
+ MULP_LOG(MULP_MODULE_TP, MULP_ERR, "ADD in list, Key1(%u), Key2(0x%llx).", v_pstData->uiKey1, v_pstData->ullKey2);
+ /* 若不存在该项,则在hash表加入该元素 */
+ list_add(&v_pstData->stConflictList, &pstElem->stBucketList);
+ return RETURN_OK;
+}
+
+int mulp_reg_tracepoint(uint pid, const char *name, const char *desc, FN_TRACEP_COMMON_T fnHook)
+{
+ uint64_t key = 0ULL;
+ int activeStat = LVOS_TRACEP_STAT_DEACTIVE;
+ uint timeAlive = 0;
+ int type = LVOS_TP_TYPE_BUTT;
+ LVOS_TP_HASH_S *hashData = NULL;
+ if (NULL == name || NULL == desc) {
+ MULP_LOG(MULP_MODULE_TP, MULP_ERR, "Parameter is NULL, register failed.");
+ return RETURN_ERROR;
+ }
+
+ /* 计算HASH key */
+ if (RETURN_OK != mulp_str_to_hash_key(name, MAX_NAME_LEN, &key)) {
+ MULP_LOG(MULP_MODULE_TP, MULP_ERR, "Cannot make the key from tracepoint name.");
+ return RETURN_ERROR;
+ }
+
+ hashData = (LVOS_TP_HASH_S *)kmalloc(sizeof(LVOS_TP_HASH_S), GFP_KERNEL);
+ if (hashData == NULL) {
+ MULP_LOG(MULP_MODULE_TP, MULP_ERR, "Allocate hash data failed.");
+ return RETURN_ERROR;
+ }
+
+ (void)memset(hashData, 0, sizeof(LVOS_TP_HASH_S));
+
+ /* 初始化Tracepoint */
+ /* 新框架PID全部用无效值0计算HASH */
+ hashData->uiKey1 = 0;
+ hashData->ullKey2 = key;
+ hashData->stTP.uiPid = pid;
+ /* 注册后默认为不激活 */
+ hashData->stTP.iActive = activeStat;
+ hashData->stTP.type = type;
+ hashData->stTP.timeAlive = timeAlive;
+ hashData->stTP.timeCalled = 0;
+ if (strcpy_s(&hashData->stTP.szName[0], MAX_NAME_LEN, name) != 0) {
+ MULP_LOG(MULP_MODULE_TP, MULP_ERR, "strcpy sz name failed.");
+ kfree(hashData);
+ return RETURN_ERROR;
+ }
+ hashData->stTP.szName[MAX_NAME_LEN - 1] = '\0';
+ if (strcpy_s(&hashData->stTP.szDesc[0], MAX_DESC_LEN, desc) != 0) {
+ MULP_LOG(MULP_MODULE_TP, MULP_ERR, "strcpy sz name failed.");
+ kfree(hashData);
+ return RETURN_ERROR;
+ }
+ hashData->stTP.szDesc[MAX_DESC_LEN - 1] = '\0';
+ hashData->stTP.fnHook = fnHook;
+
+ /* 将Tracepoint链入Hash table */
+ if (RETURN_OK != mulp_insert_hash_item(g_mulp_tp_hash_table, (LVOS_HASH_ITEM_S *)(void *)hashData)) {
+ MULP_LOG(MULP_MODULE_TP, MULP_ERR, "Insert tracepoint(%s) to hash table failed.", name);
+ kfree(hashData);
+ return RETURN_ERROR;
+ }
+ MULP_LOG(MULP_MODULE_TP, MULP_ERR, "Reg tracepoint(%s) to hash table success.", name);
+ return RETURN_OK;
+}
+EXPORT_SYMBOL_GPL(mulp_reg_tracepoint);
+
+int mulp_remove_hash_item(LVOS_HT_S *v_pstHashTable, uint v_uiKey1, uint64_t v_ullKey2)
+{
+ LVOS_HASH_LIST_S *pstElem = NULL;
+ LVOS_HASH_ITEM_S *pstData = NULL;
+ struct list_head *pstPos = NULL;
+ uint uiAddr = 0;
+
+ /* 检查参数合法性 */
+ if (NULL == v_pstHashTable) {
+ MULP_LOG(MULP_MODULE_TP, MULP_ERR, "Parameter is NULL.");
+ return RETURN_ERROR;
+ }
+
+ /* 计算Hash地址 */
+ uiAddr = mulp_get_hash_addr(v_uiKey1, v_ullKey2, v_pstHashTable->uiBits);
+ pstElem = &(v_pstHashTable->pstHashElem[uiAddr]);
+
+ /* 扫描冲突链,查找要删除的元素 */
+ list_for_each(pstPos, &pstElem->stBucketList)
+ {
+ pstData = list_entry(pstPos, LVOS_HASH_ITEM_S, stConflictList);
+ if ((v_uiKey1 == pstData->uiKey1) && (v_ullKey2 == pstData->ullKey2)) {
+ list_del_init(&pstData->stConflictList);
+ MULP_LOG(MULP_MODULE_TP, MULP_INFO, "DELETE list success, Key1(%u), Key2(0x%llx).", v_uiKey1, v_ullKey2);
+ return RETURN_OK;
+ }
+ }
+ MULP_LOG(MULP_MODULE_TP, MULP_ERR, "DELETE list failed, Key1(%u), Key2(0x%llx).", v_uiKey1, v_ullKey2);
+ /* 若不存在该项,则返回失败 */
+ return RETURN_ERROR;
+}
+
+int mulp_unreg_tracepoint(uint pid, const char *name)
+{
+ uint key1 = pid;
+ uint64_t key2 = 0ULL;
+ LVOS_HASH_ITEM_S *hashData = NULL;
+
+ if (NULL == name) {
+ MULP_LOG(MULP_MODULE_TP, MULP_ERR, "name is NULL.");
+ return RETURN_ERROR;
+ }
+
+ /* 计算HASH key */
+ if (RETURN_OK != mulp_str_to_hash_key(name, MAX_NAME_LEN, &key2)) {
+ MULP_LOG(MULP_MODULE_TP, MULP_ERR, "Cannot make the key from tracepoint name.");
+ return RETURN_ERROR;
+ }
+
+ /* 查找HashTable */
+ if (RETURN_OK != mulp_search_hash_item(g_mulp_tp_hash_table, key1, key2, &hashData)) {
+ MULP_LOG(MULP_MODULE_TP, MULP_ERR, "Search tracepoint(%s) failed.", name);
+ return RETURN_ERROR;
+ }
+
+ /* 链表操作,查找成功就不会删除失败,不判断返回值 */
+ (void)mulp_remove_hash_item(g_mulp_tp_hash_table, key1, key2);
+
+ kfree(hashData);
+
+ MULP_LOG(MULP_MODULE_TP, MULP_ERR, "Unreg tracepoint(%s) to hash table success.", name);
+ return RETURN_OK;
+}
+EXPORT_SYMBOL_GPL(mulp_unreg_tracepoint);
+
+static int mulp_create_hash_table(uint v_uiHashSize, uint v_uiBits)
+{
+ uint uiCount = 0;
+
+ /* 检查参数合法性 */
+ if ((v_uiHashSize == 0) || (v_uiBits > LVOS_MAX_HT_BITS)) {
+ MULP_LOG(MULP_MODULE_TP, MULP_ERR, "Parameter is invalid.");
+ return RETURN_ERROR;
+ }
+
+ /* 分配Hash表结构 */
+ g_mulp_tp_hash_table = (LVOS_HT_S *)kmalloc(sizeof(LVOS_HT_S), GFP_KERNEL);
+ if (g_mulp_tp_hash_table == NULL) {
+ MULP_LOG(MULP_MODULE_TP, MULP_ERR, "malloc LVOS_HT_S fail.");
+ return RETURN_ERROR;
+ }
+
+ /* 分配Hash表项数组 */
+ g_mulp_tp_hash_table->pstHashElem =
+ (LVOS_HASH_LIST_S *)kmalloc(v_uiHashSize * sizeof(LVOS_HASH_LIST_S), GFP_KERNEL);
+ if (g_mulp_tp_hash_table->pstHashElem == NULL) {
+ MULP_LOG(MULP_MODULE_TP, MULP_ERR, "malloc hashtable array fail.");
+ kfree(g_mulp_tp_hash_table);
+ g_mulp_tp_hash_table = NULL;
+ return RETURN_ERROR;
+ }
+
+ /* 初始化Hash表 */
+ g_mulp_tp_hash_table->uiTableSize = v_uiHashSize;
+ g_mulp_tp_hash_table->uiBits = v_uiBits;
+ for (uiCount = 0; uiCount < v_uiHashSize; uiCount++) {
+ INIT_LIST_HEAD(&(g_mulp_tp_hash_table->pstHashElem[uiCount].stBucketList));
+ }
+ return RETURN_OK;
+}
+
+static int mulp_tracepoint_hash_inner_init(void)
+{
+ int ret;
+ ret = mulp_create_hash_table(LVOS_MAX_TP_HASH_SIZE, LVOS_MAX_TP_HASH_SHIFT);
+ if (ret != RETURN_OK) {
+ MULP_LOG(MULP_MODULE_TP, MULP_ERR, "Create Hash table failed.");
+ g_mulp_tp_hash_table = NULL;
+ }
+
+ return ret;
+}
+
+static void mulp_destroy_hash_table(void)
+{
+ /* 检查参数合法性 */
+ if (NULL == g_mulp_tp_hash_table) {
+ MULP_LOG(MULP_MODULE_TP, MULP_ERR, "g_mulp_tp_hash_table NULL.");
+ return;
+ }
+
+ /* 释放Hash表项数组 */
+ kfree(g_mulp_tp_hash_table->pstHashElem);
+
+ /* 释放Hash表结构 */
+ kfree(g_mulp_tp_hash_table);
+
+ g_mulp_tp_hash_table->pstHashElem = NULL;
+ g_mulp_tp_hash_table = NULL;
+
+ return;
+}
+
+static void mulp_tracepoint_hash_inner_exit(void)
+{
+ mulp_destroy_hash_table();
+ return;
+}
+
+static int mulp_handle_tracepoint_cmd(MulpTracePointCmd *tpCmd)
+{
+ int ret = -1;
+ int cmd = tpCmd->cmd;
+
+ if (cmd == LVOS_TRACEP_STAT_ACTIVE) {
+ ret = mulp_active_tracepoint(0, tpCmd->traceName, tpCmd->type, tpCmd->timeAlive, tpCmd->userParam);
+ return ret;
+ }
+
+ if (cmd == LVOS_TRACEP_STAT_DEACTIVE) {
+ ret = mulp_deactive_tracepoint(0, tpCmd->traceName);
+ return ret;
+ }
+
+ if (cmd == LVOS_TRACEP_STAT_DELETED) {
+ ret = mulp_deactive_tracepoint_all();
+ return ret;
+ }
+
+ return ret;
+}
+
+static int mulp_ioctl_handle_tracepoint_cmd(void *arg)
+{
+ int ret = -1;
+ MulpTracePointCmd cmd = { 0 };
+
+ ret = copy_from_user(&cmd, arg, sizeof(MulpTracePointCmd));
+ if (ret != 0) {
+ MULP_LOG(MULP_MODULE_TP, MULP_ERR, "copy from user err:%d.", ret);
+ return ret;
+ }
+
+ cmd.traceName[MAX_NAME_LEN - 1] = '\0';
+ MULP_LOG(MULP_MODULE_TP, MULP_INFO, "handle action=%d tp name=\"%s\" start.", cmd.cmd, cmd.traceName);
+ ret = mulp_handle_tracepoint_cmd(&cmd);
+ MULP_LOG(MULP_MODULE_TP, MULP_INFO, "handle action=%d tp name=\"%s\", result=%d.", cmd.cmd, cmd.traceName, ret);
+ return ret;
+}
+
+static atomic_t tp_file_available = ATOMIC_INIT(1);
+static int mulp_tp_open(struct inode *inode, struct file *filp)
+{
+ if (!atomic_dec_and_test(&tp_file_available)) {
+ atomic_inc(&tp_file_available);
+ return -EBUSY;
+ }
+ return 0;
+}
+
+static int mulp_tp_release(struct inode *inode, struct file *filp)
+{
+ atomic_inc(&tp_file_available);
+ return 0;
+}
+
+static long mulp_tp_unlocked_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
+{
+ int err;
+
+ if (cmd != IOCTL_CMD_TP_ACTION) {
+ return -EINVAL;
+ }
+ err = mulp_ioctl_handle_tracepoint_cmd((void *)(uintptr_t)arg);
+ if (err != 0) {
+ return err;
+ }
+
+ return 0;
+}
+
+#if !(UP_LINUX_5_10)
+static const struct file_operations mulp_tp_fops = {
+ .open = mulp_tp_open,
+ .release = mulp_tp_release,
+ .unlocked_ioctl = mulp_tp_unlocked_ioctl,
+};
+#else
+static const struct proc_ops mulp_tp_fops = {
+ .proc_open = mulp_tp_open,
+ .proc_release = mulp_tp_release,
+ .proc_ioctl = mulp_tp_unlocked_ioctl,
+};
+#endif
+
+int mulp_create_tp_file(void)
+{
+ struct proc_dir_entry *entry = NULL;
+ entry = proc_create_data(MULP_PROC_TP_NAME, 0, g_mulp_proc_parent, &mulp_tp_fops, (void *)MY_PID);
+ if (entry == NULL) {
+ return -EINVAL;
+ }
+ return 0;
+}
+
+void mulp_delete_tp_file(void)
+{
+ remove_proc_entry(MULP_PROC_TP_NAME, g_mulp_proc_parent);
+}
+
+int mulp_tracepoint_init()
+{
+ int err = 0;
+ g_mulp_proc_parent = mulp_proc_create_dir(MULP_PROC_TP_DIR, NULL);
+ if (g_mulp_proc_parent == NULL) {
+ MULP_LOG(MULP_MODULE_TP, MULP_ERR, "create parent tp dir failed.");
+ return -ENOMEM;
+ }
+ err = mulp_tracepoint_hash_inner_init();
+ if (err != 0) {
+ MULP_LOG(MULP_MODULE_TP, MULP_ERR, "Loading mulp tracepoint module ret: %d.", err);
+ return err;
+ }
+
+ err = mulp_create_tp_file();
+ if (err != 0) {
+ MULP_LOG(MULP_MODULE_TP, MULP_ERR, "mulp create tp file err:%d.", err);
+ mulp_tracepoint_hash_inner_exit();
+ return err;
+ }
+
+ return 0;
+}
+
+void mulp_tracepoint_exit()
+{
+ mulp_delete_tp_file();
+ mulp_tracepoint_hash_inner_exit();
+ mulp_proc_delete_dir(MULP_PROC_TP_DIR, NULL);
+ MULP_LOG(MULP_MODULE_TP, MULP_INFO, "Unloading mulp tracepoint module ver");
+ return;
+}
diff --git a/fs/nfs/enfs/unify_multipath/infra/mulp_tp.h b/fs/nfs/enfs/unify_multipath/infra/mulp_tp.h
new file mode 100644
index 0000000..21a01d7
--- /dev/null
+++ b/fs/nfs/enfs/unify_multipath/infra/mulp_tp.h
@@ -0,0 +1,44 @@
+/*
+ * SPDX-License-Identifier: GPL-2.0
+ * Copyright 2025, Huawei Inc
+ * multipath tracepoint header file
+ */
+
+#ifndef _MULP_TP_H_
+#define _MULP_TP_H_
+
+#include
+#include "mulp_tp_common.h"
+
+typedef struct tagKEY_S {
+ union {
+ unsigned long long ull64bits;
+ unsigned int ui32bits[2];
+ } unKey64;
+} KEY_S;
+
+typedef struct tagLVOS_TP_HASH_S {
+ struct list_head stConflictList; /* 用于挂靠冲突链 */
+ unsigned long long ullKey2; /* hash Key2 */
+ unsigned int uiKey1; /* hash Key1 */
+ LVOS_TRACEP_NEW_S stTP;
+} LVOS_TP_HASH_S;
+
+/* ***************Hash表***************** */
+typedef struct tagLVOS_HASH_ITEM_S {
+ struct list_head stConflictList; /* 用于挂靠冲突链 */
+ unsigned long long ullKey2; /* hash Key */
+ unsigned int uiKey1; /* hash Key */
+} LVOS_HASH_ITEM_S;
+
+typedef struct tagLVOS_HASH_LIST_S {
+ struct list_head stBucketList; /* Hash表项的冲突链 */
+} LVOS_HASH_LIST_S;
+
+typedef struct tagHASH_TABLE_S {
+ LVOS_HASH_LIST_S *pstHashElem;
+ unsigned int uiTableSize; /* Hash表的容量 */
+ unsigned int uiBits; /* Hash算法参数 */
+} LVOS_HT_S;
+
+#endif // _MULP_TP_H_
diff --git a/fs/nfs/enfs/unify_multipath/infra/mulp_tp_common.h b/fs/nfs/enfs/unify_multipath/infra/mulp_tp_common.h
new file mode 100644
index 0000000..3908cb1
--- /dev/null
+++ b/fs/nfs/enfs/unify_multipath/infra/mulp_tp_common.h
@@ -0,0 +1,126 @@
+/*
+ * SPDX-License-Identifier: GPL-2.0
+ * Copyright 2025, Huawei Inc
+ * multipath tracepoint common header file
+ */
+
+#ifndef _MULP_TP_COMMON_H_
+#define _MULP_TP_COMMON_H_
+
+#include
+
+#ifndef RETURN_OK
+#define RETURN_OK 0
+#define RETURN_ERROR (~0)
+#endif
+
+#define LVOS_MAX_TRACEP_NUM 1024
+#define LVOS_STR_TO_KEY_BASE_NUM 31
+
+/* 计算Hash表大小时使用的移位次数 */
+#define LVOS_MAX_TP_HASH_SHIFT 7
+/* Hash表内最多包含多少Chunk, 即Hash表的大小 */
+#define LVOS_MAX_TP_HASH_SIZE (1 << LVOS_MAX_TP_HASH_SHIFT)
+
+#define LVOS_TRACEP_PARAM_SIZE 32UL
+#ifndef MAX_NAME_LEN
+#define MAX_NAME_LEN 128
+#endif
+#ifndef MAX_DESC_LEN
+#define MAX_DESC_LEN 256
+#endif
+#define LVOS_TRACEP_STAT_DELETED 0
+#define LVOS_TRACEP_STAT_ACTIVE 1
+#define LVOS_TRACEP_STAT_DEACTIVE 2
+
+typedef enum tagLVOS_TP_TYPE_E {
+ LVOS_TP_TYPE_CALLBACK = 0,
+ LVOS_TP_TYPE_RESET,
+ LVOS_TP_TYPE_PAUSE,
+ LVOS_TP_TYPE_ABORT,
+ LVOS_TP_TYPE_BUTT
+} LVOS_TP_TYPE_E;
+
+typedef struct {
+ char achParamData[LVOS_TRACEP_PARAM_SIZE]; /* *< 自定义参数数据区。 */
+} LVOS_TRACEP_PARAM_S;
+
+typedef void (*FN_TRACEP_COMMON_T)(LVOS_TRACEP_PARAM_S *, ...);
+
+typedef struct tagLVOS_TRACEP_NEW_S {
+ char szName[MAX_NAME_LEN];
+ char szDesc[MAX_DESC_LEN];
+ unsigned int uiPid;
+ int iActive;
+ int type;
+ unsigned int timeAlive;
+ unsigned int timeCalled;
+ FN_TRACEP_COMMON_T fnHook;
+ LVOS_TRACEP_PARAM_S stParam;
+} LVOS_TRACEP_NEW_S;
+
+typedef struct {
+ unsigned int cmd;
+ unsigned int pid;
+ int type;
+ unsigned int timeAlive;
+ LVOS_TRACEP_PARAM_S userParam;
+ char traceName[MAX_NAME_LEN];
+} MulpTracePointCmd;
+
+#define IOCTL_MAGIC 'N'
+#define IOCTL_CMD_TP_ACTION _IOW(IOCTL_MAGIC, 5, MulpTracePointCmd)
+
+int mulp_tracepoint_init(void);
+void mulp_tracepoint_exit(void);
+int mulp_reg_tracepoint(unsigned int pid, const char *name, const char *desc, FN_TRACEP_COMMON_T fnHook);
+int mulp_unreg_tracepoint(unsigned int pid, const char *name);
+int mulp_get_tracepoint(unsigned int pid, const char *name, LVOS_TRACEP_NEW_S **tracepoint);
+void mulp_do_tracepoint_pause(LVOS_TRACEP_NEW_S *tracepoint);
+int mulp_deactive_tracepoint(unsigned int pid, const char *name);
+int mulp_active_tracepoint(unsigned int pid, const char *name, int type, unsigned int time,
+ LVOS_TRACEP_PARAM_S userParam);
+
+#ifndef MY_PID
+#define MY_PID 12345
+#endif
+
+#if !defined(MULP_LLT) || !defined(DEBUG)
+
+#define LVOS_TP_REG(name, desc, fn) mulp_reg_tracepoint(MY_PID, #name, desc, (FN_TRACEP_COMMON_T)(fn))
+#define LVOS_TP_UNREG(name) mulp_unreg_tracepoint(0, #name)
+#define LVOS_TP_START(name, ...) \
+ do { \
+ static LVOS_TRACEP_NEW_S *_pstTp = NULL; \
+ if (unlikely(NULL == _pstTp)) { \
+ (void)mulp_get_tracepoint(0, #name, &_pstTp); \
+ } \
+ if (NULL != _pstTp && LVOS_TRACEP_STAT_ACTIVE == _pstTp->iActive && LVOS_TP_TYPE_CALLBACK == _pstTp->type) { \
+ _pstTp->fnHook(&_pstTp->stParam, __VA_ARGS__); \
+ _pstTp->timeCalled++; \
+ if (_pstTp->timeAlive > 0 && 0 == --(_pstTp->timeAlive)) { \
+ mulp_deactive_tracepoint(0, #name); \
+ } \
+ } else { \
+ if (NULL != _pstTp && LVOS_TRACEP_STAT_ACTIVE == _pstTp->iActive && LVOS_TP_TYPE_PAUSE == _pstTp->type) { \
+ mulp_do_tracepoint_pause(_pstTp); \
+ _pstTp->timeCalled++; \
+ if (_pstTp->timeAlive > 0 && 0 == --(_pstTp->timeAlive)) { \
+ mulp_deactive_tracepoint(0, #name); \
+ } \
+ }
+
+/* 插入故障点结束 */
+#define LVOS_TP_END \
+ } \
+ } while (0)
+
+#else
+
+#define LVOS_TP_REG(name, desc, fn)
+#define LVOS_TP_UNREG(name)
+#define LVOS_TP_START(name, ...)
+#define LVOS_TP_END
+
+#endif
+#endif // _MULP_TP_COMMON_H_
diff --git a/fs/nfs/enfs/unify_multipath/infra/multipath_infra_adapter.c b/fs/nfs/enfs/unify_multipath/infra/multipath_infra_adapter.c
new file mode 100644
index 0000000..e69b0d1
--- /dev/null
+++ b/fs/nfs/enfs/unify_multipath/infra/multipath_infra_adapter.c
@@ -0,0 +1,49 @@
+/*
+ * SPDX-License-Identifier: GPL-2.0
+ * Copyright 2024, Huawei Inc
+ *
+ */
+
+#include "mulp_porting.h"
+
+void *mulp_mem_malloc(uint32_t size)
+{
+ return kmalloc((size_t)size, GFP_KERNEL);
+}
+EXPORT_SYMBOL_GPL(mulp_mem_malloc);
+
+void *mulp_mem_zalloc(uint32_t size)
+{
+ void *ptr = kmalloc((size_t)size, GFP_KERNEL);
+ if (ptr) {
+ memset(ptr, 0, size);
+ }
+
+ return ptr;
+}
+EXPORT_SYMBOL_GPL(mulp_mem_zalloc);
+
+void *mulp_mem_zalloc_by_flag(uint32_t size, gfp_t flag)
+{
+ void *ptr = kmalloc((size_t)size, flag);
+ if (ptr) {
+ memset(ptr, 0, size);
+ }
+
+ return ptr;
+}
+EXPORT_SYMBOL_GPL(mulp_mem_zalloc_by_flag);
+
+void mulp_mem_free(void *ptr)
+{
+ return kfree(ptr);
+}
+EXPORT_SYMBOL_GPL(mulp_mem_free);
+
+uint64_t mulp_get_millisec_time(void)
+{
+ ktime_t kt = ktime_get();
+ uint64_t time = (uint64_t)ktime_to_ms(kt);
+ return time;
+}
+EXPORT_SYMBOL_GPL(mulp_get_millisec_time);
\ No newline at end of file
diff --git a/fs/nfs/enfs/unify_multipath/multipath/Makefile b/fs/nfs/enfs/unify_multipath/multipath/Makefile
new file mode 100644
index 0000000..4563da9
--- /dev/null
+++ b/fs/nfs/enfs/unify_multipath/multipath/Makefile
@@ -0,0 +1,33 @@
+MODULE := unify_multipath
+ifeq ($(M),)
+ PWD := $(shell pwd)
+else
+ PWD := $(M)
+endif
+
+include $(PWD)/../Rules.mak
+
+SRCS += $(wildcard $(PWD)/multipath_module.c) \
+ $(wildcard $(PWD)/model/config/*.c) \
+ $(wildcard $(PWD)/model/dataset/*.c) \
+ $(wildcard $(PWD)/model/shard_view/*.c) \
+ $(wildcard $(PWD)/service/load_balance/*.c) \
+ $(wildcard $(PWD)/service/path_mgmt/*.c) \
+ $(wildcard $(PWD)/adapter/*.c) \
+ $(wildcard $(PWD)/adapter/diagnose/*.c) \
+ $(wildcard $(PWD)/adapter/multipath_adapter/*.c) \
+ $(wildcard $(PWD)/../dpc_adapter/*.c) \
+ $(wildcard $(PWD)/../infra/mulp_proc.c)
+
+INCLUDES += -I$(PWD)/include \
+ -I$(PWD)/model/config \
+ -I$(PWD)/model/dataset \
+ -I$(PWD)/model/shard_view \
+ -I$(PWD)/service/load_balance \
+ -I$(PWD)/service/path_mgmt \
+ -I/usr/include/linux \
+ -I$(PWD)/../dpc_adapter
+
+ifeq ($(RELEASE_TYPE), debug)
+ CFLAGS += -DDEBUG
+endif
diff --git a/fs/nfs/enfs/unify_multipath/multipath/adapter/diagnose/mulp_diagnose.c b/fs/nfs/enfs/unify_multipath/multipath/adapter/diagnose/mulp_diagnose.c
new file mode 100644
index 0000000..35696da
--- /dev/null
+++ b/fs/nfs/enfs/unify_multipath/multipath/adapter/diagnose/mulp_diagnose.c
@@ -0,0 +1,5 @@
+/*
+ * SPDX-License-Identifier: GPL-2.0
+ * Copyright 2024, Huawei Inc
+ *
+ */
diff --git a/fs/nfs/enfs/unify_multipath/multipath/adapter/mulp_init.c b/fs/nfs/enfs/unify_multipath/multipath/adapter/mulp_init.c
new file mode 100644
index 0000000..eca3b1f
--- /dev/null
+++ b/fs/nfs/enfs/unify_multipath/multipath/adapter/mulp_init.c
@@ -0,0 +1,148 @@
+/*
+ * Copyright (c) Huawei Technologies Co., Ltd. 2023-2023. All rights reserved.
+ * Description: unifiy multi path module init
+ * Author: y00867545
+ * Create: 2024-11-15
+ */
+
+#include "mulp_porting.h"
+#include "mulp_log.h"
+#include "mulp_dataset.h"
+#include "mulp_shard_view.h"
+#include "mulp_path_detect.h"
+#include "mulp_path_mgmt.h"
+
+struct mulp_init_entry {
+ char *name;
+ int (*init)(void);
+ void (*final)(void);
+};
+
+static void init_helper_finalize(struct mulp_init_entry *job, int idx)
+{
+ struct mulp_init_entry *entry = NULL;
+
+ while (idx > 0) {
+ idx = idx - 1;
+ entry = &job[idx];
+ if (entry->final != NULL) {
+ entry->final();
+ }
+ }
+ return;
+}
+
+static int init_helper_init(struct mulp_init_entry *job, int size)
+{
+ int ret;
+ int i;
+ struct mulp_init_entry *entry = NULL;
+
+ for (i = 0; i < size; i++) {
+ entry = &job[i];
+ ret = entry->init();
+ if (ret) {
+ goto init_err;
+ }
+ }
+
+ return 0;
+
+init_err:
+ init_helper_finalize(job, i);
+ return -1;
+}
+
+#ifdef DEBUG
+static void mulp_path_set_null_ptr(char *usr_para, void **out_addr)
+{
+ *out_addr = NULL;
+ return;
+}
+
+static void mulp_path_set_ping_timeout(char *usr_para, int *status)
+{
+ *status = -ETIMEDOUT;
+ return;
+}
+
+static void mulp_path_set_int(char *usr_para, int *out_addr)
+{
+ *out_addr = -EINVAL;
+ return;
+}
+
+static void mulp_path_set_int0(char *usr_para, int *out_addr)
+{
+ *out_addr = 0;
+ return;
+}
+
+static void mulp_path_set_int1(char *usr_para, int *out_addr)
+{
+ *out_addr = 1;
+ return;
+}
+
+void mulp_path_reg_tp_point(void)
+{
+ LVOS_TP_REG(MULP_PATH_UPDATE_VIEW_FAILED, "multipath update view failed", mulp_path_set_null_ptr);
+ LVOS_TP_REG(MULP_PATH_VIEW_FIND_VNODE_FAILED, "multipath view find vnode failed", mulp_path_set_null_ptr);
+ LVOS_TP_REG(MULP_PATH_DETECT_PING_TIMEOUT_FAIL, "multipath detect ping timeout fail", mulp_path_set_ping_timeout);
+ LVOS_TP_REG(MULP_PATH_DETECT_PING_FAIL, "multipath detect ping fail", mulp_path_set_int);
+ LVOS_TP_REG(MULP_PATH_DETECT_PING_CB_FAIL, "multipath detect ping cb fail", mulp_path_set_int1);
+ LVOS_TP_REG(MULP_PATH_MGMT_DESTROY_PATH_FAILED, "multipath mgmt destroy path failed", mulp_path_set_int);
+ LVOS_TP_REG(MULP_PATH_MGMT_CREATE_PATH_FAILED, "multipath mgmt create path failed", mulp_path_set_int);
+ LVOS_TP_REG(MULP_PATH_MGMT_SET_PATH_STATUS, "multipath mgmt set path status", mulp_path_set_int0);
+}
+
+void mulp_path_unreg_tp_point(void)
+{
+ LVOS_TP_UNREG(MULP_PATH_UPDATE_VIEW_FAILED);
+ LVOS_TP_UNREG(MULP_PATH_VIEW_FIND_VNODE_FAILED);
+ LVOS_TP_UNREG(MULP_PATH_DETECT_PING_TIMEOUT_FAIL);
+ LVOS_TP_UNREG(MULP_PATH_DETECT_PING_FAIL);
+ LVOS_TP_UNREG(MULP_PATH_DETECT_PING_CB_FAIL);
+ LVOS_TP_UNREG(MULP_PATH_MGMT_DESTROY_PATH_FAILED);
+ LVOS_TP_UNREG(MULP_PATH_MGMT_CREATE_PATH_FAILED);
+ LVOS_TP_UNREG(MULP_PATH_MGMT_SET_PATH_STATUS);
+}
+
+static int mulp_path_tp_point_init(void)
+{
+ int err = 0;
+ err = mulp_tracepoint_init();
+ if (err) {
+ MULP_LOG(MULP_MODULE_API, MULP_ERR, "tp point init failed");
+ return err;
+ }
+ mulp_path_reg_tp_point();
+ return 0;
+}
+
+static void mulp_path_tp_point_fini(void)
+{
+ mulp_path_unreg_tp_point();
+ mulp_tracepoint_exit();
+}
+#endif
+
+static struct mulp_init_entry init_entry[] = {
+#ifdef DEBUG
+ {"mulp_path_tp_point_init_fini", mulp_path_tp_point_init, mulp_path_tp_point_fini},
+#endif
+ {"mulp_shard_view_init_fini", mulp_shard_view_init, mulp_shard_view_fini},
+ { "multipath dataset", mulp_dataset_init, NULL },
+ { "init manage link state ", mulp_path_detect_init, mulp_path_detect_fini},
+ { "multipath path mgmt", mulp_path_mgmt_init, mulp_path_mgmt_fini},
+};
+
+int mulp_init(void)
+{
+ return init_helper_init(init_entry, ARRAY_SIZE(init_entry));
+}
+
+void mulp_fini(void)
+{
+ init_helper_finalize(init_entry, ARRAY_SIZE(init_entry));
+}
\ No newline at end of file
diff --git a/fs/nfs/enfs/unify_multipath/multipath/adapter/multipath_adapter/mulp_adapter.c b/fs/nfs/enfs/unify_multipath/multipath/adapter/multipath_adapter/mulp_adapter.c
new file mode 100644
index 0000000..73250af
--- /dev/null
+++ b/fs/nfs/enfs/unify_multipath/multipath/adapter/multipath_adapter/mulp_adapter.c
@@ -0,0 +1,217 @@
+/*
+ * SPDX-License-Identifier: GPL-2.0
+ * Copyright 2024, Huawei Inc
+ *
+ * 本文件主要是做一些参数校验, 和简单逻辑.复杂逻辑在在multipath模块内部实现
+ */
+#include "multipath_api.h"
+#include "mulp_porting.h"
+#include "mulp_shard_view.h"
+#include "mulp_load_balance.h"
+#include "mulp_dataset.h"
+#include "mulp_path_mgmt.h"
+
+/* ****************************************************************************
+ * 创建一个多路径集, 同步返回多路径集id. 但是建链是异步, 建链完成后会进行回调
+ * 通知结果.
+ *
+ * app_id --- 入参, 应用的id
+ * mp_id ---出参, 创建的多路径集id,后续操作需要使用传入此id.
+ * 返回值: 0 -- 成功, 其他--失败
+ * *************************************************************************** */
+int mulp_create_mp(mulp_app_id_type app_id, mulp_create_mp_args *args, uint64_t *mp_id)
+{
+ if (app_id >= MULP_APP_MAX || args == NULL || mp_id == NULL) {
+ MULP_LOG(MULP_MODULE_API, MULP_ERR, "para wrong, app_id:%d.", app_id);
+ return -EINVAL;
+ }
+
+ if (args->ip_pair_cnt == 0 || args->pair_arr == NULL || args->strategy >= MULP_STRATEGY_MAX ||
+ args->nconnect <= 0 || args->nconnect > MULP_MAX_NCONNECT || args->detect_period == 0 ||
+ args->network_type >= MULP_NETWORK_MAX) {
+ MULP_LOG(MULP_MODULE_API, MULP_ERR, "para wrong, app_id:%d, mulp_select_path_strategy:%d.",
+ app_id, args->strategy);
+ return -EINVAL;
+ }
+
+ return mulp_path_mgmt_create_mp(app_id, args, mp_id);
+}
+EXPORT_SYMBOL_GPL(mulp_create_mp);
+
+int mulp_update_detect_period(uint64_t mp_id, uint32_t detect_period)
+{
+ return mulp_path_mgmt_update_detect_period(mp_id, detect_period);
+}
+EXPORT_SYMBOL_GPL(mulp_update_detect_period);
+
+int mulp_update_ip_pair(mulp_update_ip_pair_args *args)
+{
+ if (args == NULL || args->pair_arr == NULL || args->ip_pair_cnt == 0 || args->nconnect <= 0 ||
+ args->nconnect > MULP_MAX_NCONNECT) {
+ MULP_LOG(MULP_MODULE_API, MULP_ERR, "invalid argument: args %s pair array %s pair count %d nconnect %d",
+ ((args) ? "not null" : "null"), ((args && args->pair_arr) ? "not null" : "null"),
+ (args ? args->ip_pair_cnt : 0), (args ? args->nconnect : 0));
+ return -EINVAL;
+ }
+
+ return mulp_path_mgmt_update_ip_pair(args);
+}
+EXPORT_SYMBOL_GPL(mulp_update_ip_pair);
+
+int mulp_update_ip_view(mulp_update_ip_view_args *args)
+{
+ return 0;
+}
+
+int mulp_update_shard_view(mulp_update_shard_view_args *args)
+{
+ if (args == NULL) {
+ MULP_LOG(MULP_MODULE_API, MULP_ERR, "mulp_update_shard_view_args is NULL");
+ return -EINVAL;
+ }
+ return mulp_shard_view_update_view(args);
+}
+EXPORT_SYMBOL_GPL(mulp_update_shard_view);
+
+/* ****************************************************************************
+ * 删除一个多路径集. 调用之后,多路径集id,不再可用. 清理资源过程是异步的,完成后会进行回调
+ * 通知结果.
+ *
+ * mp_id --- 入参, 多路径集的id.
+ * 返回值: 0 -- 成功, 其他--失败
+ * *************************************************************************** */
+int mulp_destroy_mp(uint64_t mp_id)
+{
+ if (mp_id == MULP_INVAILD_MP_ID) {
+ MULP_LOG(MULP_MODULE_API, MULP_ERR, "para wrong, mp_id:%llu.", mp_id);
+ return -EINVAL;
+ }
+ return mulp_path_mgmt_destroy_mp(mp_id);
+}
+EXPORT_SYMBOL_GPL(mulp_destroy_mp);
+
+/* ****************************************************************************
+ * 注册上层应用的ops集, 多路径会在流程中进行回调, 必须先调用此函数才能进行其他流程调用.
+ *
+ * app_id --- 入参, 应用的id.
+ * set --- 应用的操作集
+ * 返回值: 0 -- 成功, 其他--失败
+ * *************************************************************************** */
+int mulp_reg_app_ops(mulp_app_id_type app_id, mulp_app_ops_set *set)
+{
+ if (app_id >= MULP_APP_MAX || set == NULL) {
+ MULP_LOG(MULP_MODULE_API, MULP_ERR, "reg app_id:%d ops para wrong", app_id);
+ return -EINVAL;
+ }
+ mulp_dataset_reg_ops(app_id, set);
+ return 0;
+}
+EXPORT_SYMBOL_GPL(mulp_reg_app_ops);
+
+int mulp_unreg_app_ops(mulp_app_id_type app_id)
+{
+ if (app_id >= MULP_APP_MAX) {
+ MULP_LOG(MULP_MODULE_API, MULP_WARNING, "unreg ops but no such app_id:%d", app_id);
+ return -EINVAL;
+ }
+ mulp_dataset_unreg_ops(app_id);
+ return 0;
+}
+EXPORT_SYMBOL_GPL(mulp_unreg_app_ops);
+
+int mulp_ping_all_path(uint64_t mp_id, void *ctx, void (*callback)(int result, uint64_t mp_id, void *ctx))
+{
+ if (mp_id == MULP_INVAILD_MP_ID) {
+ MULP_LOG(MULP_MODULE_API, MULP_ERR, "para wrong, mp_id:%llu.", mp_id);
+ return -EINVAL;
+ }
+ return mulp_path_mgmt_ping_all_path(mp_id, ctx, callback, B_FALSE);
+}
+EXPORT_SYMBOL_GPL(mulp_ping_all_path);
+
+/* ****************************************************************************
+ * 根据文件的句柄(通常是uuid), 获取最佳路径的指针. 需要使用mulp_io_put_path进行释放引用
+ *
+ * mp_id --- 入参, 多路径集的id.
+ * uuid --- 入参, 文件的uuid
+ * path_mgmt ---- 出入参, 返回multipath路径管理结构指针, path_mgmt传入时不允许为NULL.
+ * path ---- 出入参, 返回dpc/nfs创建的路径的指针, path传入时不允许为NULL.
+ * is_direct_ctrl --- 出入参,返回选的链路是否为直连控制器, is_direct_ctrl不允许为NULL
+ * 返回值: 0 -- 成功, 其他--失败, -EINVAL 指针为空
+ * *************************************************************************** */
+int mulp_io_get_optimal_path(uint64_t mp_id, mulp_file_info *file_info, uint64_t timestamp, void **path_mgmt,
+ void **path, uint32_t *is_direct_ctrl)
+{
+ int ret = 0;
+ if (path_mgmt == NULL || path == NULL || file_info == NULL || file_info->uuid == NULL || is_direct_ctrl == NULL) {
+ MULP_LOG(MULP_MODULE_API, MULP_ERR, "para wrong{NULL}.");
+ return -EINVAL;
+ }
+
+ ret = mulp_balance_get_optimal_path(mp_id, file_info, timestamp, path_mgmt, path, is_direct_ctrl);
+ return ret;
+}
+EXPORT_SYMBOL_GPL(mulp_io_get_optimal_path);
+
+/* ****************************************************************************
+ * 释放使用mulp_io_get_optimal_path获取最佳路径的指针引用.
+ *
+ * mp_id --- 入参, 多路径集的id.
+ * path_mgmt ---- 入参, 路径指针, path_mgmt传入时不允许为NULL.
+ * 返回值: 0 -- 成功, 其他--失败
+ * *************************************************************************** */
+int mulp_io_put_path(uint64_t mp_id, void *path_mgmt)
+{
+ if (path_mgmt == NULL) {
+ MULP_LOG(MULP_MODULE_API, MULP_ERR, "para wrong{NULL}.");
+ return -EINVAL;
+ }
+
+ mulp_balance_put_path(path_mgmt);
+ return 0;
+}
+EXPORT_SYMBOL_GPL(mulp_io_put_path);
+
+int mulp_dump_path_info(void (*callback)(const char *buffer), uint64_t mp_id, BOOLEAN_T debug)
+{
+ if (callback == NULL) {
+ MULP_LOG(MULP_MODULE_API, MULP_ERR, "para wrong{NULL}.");
+ return -EINVAL;
+ }
+ return mulp_path_mgmt_dump(callback, mp_id, debug);
+}
+EXPORT_SYMBOL_GPL(mulp_dump_path_info);
+
+void mulp_path_count_stats(void *path_info, PATH_INFO_TYPE type, uint64_t value)
+{
+ mulp_path_set_path_info_attr(path_info, type, value);
+}
+EXPORT_SYMBOL_GPL(mulp_path_count_stats);
+
+void mulp_path_notify_io_result(void *path_info, int io_result, uint64_t start_time)
+{
+ if (path_info == NULL) {
+ MULP_LOG(MULP_MODULE_API, MULP_ERR, "para wrong{NULL}.");
+ return;
+ }
+ mulp_path_update_status_by_io(path_info, io_result, start_time);
+}
+EXPORT_SYMBOL_GPL(mulp_path_notify_io_result);
+
+int mulp_get_shard_view(uint64_t wwn, uint64_t cluster_id, uint64_t pool_id, mulp_shard_view *shard_view)
+{
+ return mulp_shard_view_get_shard_view(wwn, cluster_id, pool_id, shard_view);
+}
+EXPORT_SYMBOL_GPL(mulp_get_shard_view);
+
+void mulp_destroy_shard_view(void)
+{
+ (void)mulp_shard_view_delete_view();
+}
+EXPORT_SYMBOL_GPL(mulp_destroy_shard_view);
+
+int mulp_path_clean_mp_rw_info(const char *data)
+{
+ return mulp_path_mgmt_clean_mp_rw_info(data);
+}
+EXPORT_SYMBOL_GPL(mulp_path_clean_mp_rw_info);
\ No newline at end of file
diff --git a/fs/nfs/enfs/unify_multipath/multipath/include/mulp_dataset.h b/fs/nfs/enfs/unify_multipath/multipath/include/mulp_dataset.h
new file mode 100644
index 0000000..3333203
--- /dev/null
+++ b/fs/nfs/enfs/unify_multipath/multipath/include/mulp_dataset.h
@@ -0,0 +1,104 @@
+/*
+ * SPDX-License-Identifier: GPL-2.0
+ * Copyright 2024, Huawei Inc
+ *
+ * Platform dependent utilities
+ */
+#ifndef _MULTIP_DATASET_H_
+#define _MULTIP_DATASET_H_
+
+#include "mulp_porting.h"
+#include "multipath_api.h"
+
+#define MULP_DATASET_MAX_INSTANCE 512
+
+typedef struct {
+ uint32_t version;
+ uint32_t len;
+ void *ptr;
+}mulp_mem_info;
+
+/* ****************************************************************************
+ * 申请一个多路径集id
+ * 通知结果.
+ *
+ * mp_id --- 出参, 多路径集的id.
+ * 返回值: 0 -- 成功, 其他--失败
+ * *************************************************************************** */
+int mulp_dataset_zalloc_id(mulp_app_id_type app_id, uint64_t *mp_id);
+
+/* ****************************************************************************
+ * 这里只是释放mp_id的槽位, 内存资源需要上层在调用该接口后自己去释放
+ *
+ * mp_id --- 入参, 多路径集的id.
+ * 返回值: 0 -- 成功, 其他--失败
+ * *************************************************************************** */
+int mulp_dataset_destroy_id(uint64_t mp_id);
+
+/* ****************************************************************************
+ * 根据模块id获取自己的内存指针
+ *
+ * 返回值: 0 -- 成功, 其他--失败
+ * *************************************************************************** */
+int mulp_dataset_get_module_global_mem(mulp_module_type module_id, mulp_mem_info *info);
+
+/* ****************************************************************************
+ * 根据模块id设置自己的内存指针
+ *
+ * 返回值: 0 -- 成功, 其他--失败
+ * *************************************************************************** */
+int mulp_dataset_set_module_global_mem(mulp_module_type module_id, mulp_mem_info *info);
+
+/* ****************************************************************************
+ * 根据mp_id获取自己的内存指针,不再引用指针时使用mulp_dataset_put_module_mp_id_mem释放
+ *
+ * 返回值: 0 -- 成功, 其他--失败
+ * *************************************************************************** */
+int mulp_dataset_get_module_mp_id_mem(uint64_t mp_id, mulp_mem_info *info);
+
+/* ****************************************************************************
+ * 根据mp_id设置自己的内存指针
+ *
+ * 返回值: 0 -- 成功, 其他--失败
+ * *************************************************************************** */
+int mulp_dataset_set_module_mp_id_mem(uint64_t mp_id, mulp_mem_info *info);
+
+/* ****************************************************************************
+ * 解引用mp_id的内存指针
+ * *************************************************************************** */
+void mulp_dataset_put_module_mp_id_mem(uint64_t mp_id);
+
+/* ****************************************************************************
+ * 应用注册自己的路径ops集
+ *
+ * 返回值: 0 -- 成功, 其他--失败
+ * *************************************************************************** */
+void mulp_dataset_reg_ops(mulp_app_id_type app_id, mulp_app_ops_set *ops);
+
+/* ****************************************************************************
+ * 应用解注册自己的路径ops集
+ * *************************************************************************** */
+void mulp_dataset_unreg_ops(mulp_app_id_type app_id);
+
+/* ****************************************************************************
+ * 根据app_id获取路径ops集
+*
+ * 返回值: 路径集指针
+ * *************************************************************************** */
+mulp_app_ops_set *mulp_dataset_get_ops(mulp_app_id_type app_id);
+
+/* ****************************************************************************
+ * dataset模块初始化
+ *
+ * 返回值: 0 -- 成功
+ * *************************************************************************** */
+int mulp_dataset_init(void);
+
+/* ****************************************************************************
+ * 获取已储存的有效mp_id
+ *
+ * 返回值: 0 -- 成功, 其他--失败
+ * *************************************************************************** */
+int mulp_dataset_get_all_mp_ids(uint64_t *mp_ids, uint32_t length, uint32_t *count);
+
+#endif
\ No newline at end of file
diff --git a/fs/nfs/enfs/unify_multipath/multipath/include/mulp_load_balance.h b/fs/nfs/enfs/unify_multipath/multipath/include/mulp_load_balance.h
new file mode 100644
index 0000000..a41988a
--- /dev/null
+++ b/fs/nfs/enfs/unify_multipath/multipath/include/mulp_load_balance.h
@@ -0,0 +1,33 @@
+/*
+ * SPDX-License-Identifier: GPL-2.0
+ * Copyright 2024, Huawei Inc
+ *
+ * Platform dependent utilities
+ */
+#ifndef _MULP_LOAD_BALANCE_H_
+#define _MULP_LOAD_BALANCE_H_
+
+#include "mulp_porting.h"
+#include "multipath_api.h"
+
+/* ****************************************************************************
+ * 根据文件的句柄(通常是uuid), 获取最佳路径的指针. 需要使用mulp_balance_put_path进行释放引用
+ *
+ * mp_id --- 入参, 多路径集的id.
+ * uuid --- 入参, 文件的uuid
+ * path_mgmt ---- 出入参, 返回multipath路径管理结构指针, path_mgmt传入时不允许为NULL.
+ * path ---- 出入参, 返回dpc/nfs创建的路径的指针, path传入时不允许为NULL.
+ * is_direct_ctrl --- 出入参,返回选的链路是否为直连控制器, is_direct_ctrl不允许为NULL
+ * 返回值: 0 -- 成功, 其他--失败
+ * *************************************************************************** */
+int mulp_balance_get_optimal_path(uint64_t mp_id, mulp_file_info *file_info, uint64_t timestamp, void **path_mgmt,
+ void **path, uint32_t *is_direct_ctrl);
+
+/* ****************************************************************************
+ * 释放使用mulp_io_get_optimal_path获取最佳路径的指针引用.
+ *
+ * path_mgmt ---- 入参, 路径信息指针, path_mgmt传入时不允许为NULL.
+ * *************************************************************************** */
+void mulp_balance_put_path(void *path_mgmt);
+
+#endif
\ No newline at end of file
diff --git a/fs/nfs/enfs/unify_multipath/multipath/include/mulp_multipath_adapter.h b/fs/nfs/enfs/unify_multipath/multipath/include/mulp_multipath_adapter.h
new file mode 100644
index 0000000..30e5223
--- /dev/null
+++ b/fs/nfs/enfs/unify_multipath/multipath/include/mulp_multipath_adapter.h
@@ -0,0 +1,22 @@
+/*
+ * SPDX-License-Identifier: GPL-2.0
+ * Copyright 2024, Huawei Inc
+ *
+ * 多路径管理头文件, 对nfs/dpc client提供多路径的创删和管理. 对外结构体和头文件应保持最小依赖原则, 不要包含内部头文件.
+ * 不允许将内部结构体函数放进来.
+ */
+#ifndef _MULP_MULTIPAHT_APAPTERH_
+#define _MULP_MULTIPAHT_APAPTERH_
+
+#include "multipath_api.h"
+
+/*
+上层应用修改选路算法 同步接口. 返回成功0/错误码
+入参:
+多路径集id
+strategy
+--暂不实现
+*/
+int mulp_change_path_strategy(uint64_t mp_id, mulp_select_path_strategy strategy);
+
+#endif
\ No newline at end of file
diff --git a/fs/nfs/enfs/unify_multipath/multipath/include/mulp_path_detect.h b/fs/nfs/enfs/unify_multipath/multipath/include/mulp_path_detect.h
new file mode 100644
index 0000000..554a359
--- /dev/null
+++ b/fs/nfs/enfs/unify_multipath/multipath/include/mulp_path_detect.h
@@ -0,0 +1,13 @@
+/*
+ * SPDX-License-Identifier: GPL-2.0
+ * Copyright 2024, Huawei Inc
+ *
+ */
+
+#ifndef MULP_PATH_DETECT_H
+#define MULP_PATH_DETECT_H
+
+int mulp_path_detect_init(void);
+void mulp_path_detect_fini(void);
+
+#endif /* MULP_PATH_DETECT_H */
diff --git a/fs/nfs/enfs/unify_multipath/multipath/include/mulp_path_mgmt.h b/fs/nfs/enfs/unify_multipath/multipath/include/mulp_path_mgmt.h
new file mode 100644
index 0000000..8feda68
--- /dev/null
+++ b/fs/nfs/enfs/unify_multipath/multipath/include/mulp_path_mgmt.h
@@ -0,0 +1,132 @@
+/*
+ * SPDX-License-Identifier: GPL-2.0
+ * Copyright 2024, Huawei Inc
+ *
+ * Platform dependent utilities
+ */
+#ifndef _MULTI_PATH_MGMT_H_
+#define _MULTI_PATH_MGMT_H_
+
+#include "mulp_porting.h"
+#include "multipath_api.h"
+
+#define MULP_MAX_ONE_ZONE_NODE 64
+#define MULP_MAX_ONE_NODE_CPU 4
+#define MULP_MAX_PATH_PER_PATH_SET 4096
+
+#define MULP_PATH_INFO_LOG_NOLIMIT(mod, level, path, fmt, ...) \
+do { \
+ char buffer[256]; \
+ if (mulp_path_mgmt_dump_path_info((path), buffer, sizeof(buffer)) < 0) { \
+ buffer[0] = 0; \
+ } \
+ MULP_LOG_NOLIMIT(mod, level, "%s " fmt, buffer, ##__VA_ARGS__); \
+} while (0) // lint !e539
+
+#define MULP_PATH_INFO_LIMIT(mod, level, interval, burst, path, fmt, ...) \
+do { \
+ uint32_t can_print = 0; \
+ MULP_PRINT_LIMIT_FUNC(level, mod, interval, burst, can_print); \
+ if (can_print) { \
+ MULP_PATH_INFO_LOG_NOLIMIT(mod, level, path, fmt, ##__VA_ARGS__); \
+ } \
+} while (0) // lint !e539
+
+#define MULP_PATH_INFO_LOG(mod, level, path, fmt, ...) \
+ MULP_PATH_INFO_LIMIT(mod, level, MULP_LOG_INTERVAL, MULP_LOG_COUNT, path, fmt, ##__VA_ARGS__)
+
+#define MULP_PATH_INFO_LOG_LONG_INTERVAL(mod, level, path, fmt, ...) \
+ MULP_PATH_INFO_LIMIT(mod, level, MULP_LOG_LONG_INTERVAL, MULP_LOG_COUNT, path, fmt, ##__VA_ARGS__)
+
+typedef enum {
+ MULP_PATH_STATUS_INIT = 0,
+ MULP_PATH_STATUS_STANDBY = 1,
+ MULP_PATH_STATUS_NORMAL = 2,
+ MULP_PATH_STATUS_ABNORMAL = 3,
+} mulp_path_link_status;
+
+typedef struct {
+ uint32_t zone_id;
+ uint32_t lsid;
+ uint32_t cpu_id;
+} mulp_path_vnode_info;
+
+static inline __attribute__((always_inline)) const char* mulp_path_link_status_name(mulp_path_link_status status)
+{
+ static const char* name[] = {
+ [MULP_PATH_STATUS_INIT] = "init",
+ [MULP_PATH_STATUS_STANDBY] = "standby",
+ [MULP_PATH_STATUS_NORMAL] = "normal",
+ [MULP_PATH_STATUS_ABNORMAL] = "abnormal",
+ };
+ return ((uint32_t)status <= MULP_PATH_STATUS_ABNORMAL) ? name[(uint32_t)status] : "unknown";
+}
+
+// todo dpc client要建立一个自己的mp——id缓存, 把username 密码存起来, 不应该是path mgmt去存,
+// 因为打算把认证放在dpc client去做
+
+// todo app ops 里面增加函数, user_path_private的创建和销毁。
+
+/* ****************************************************************************
+ * 创建一个多路径集, 同步返回多路径集id. 但是建链是异步, 建链完成后会进行回调
+ * 通知结果.
+ *
+ * app_id --- 入参, 应用的id
+ * mp_id ---出参, 创建的多路径集id,后续操作需要使用传入此id.
+ * 返回值: 0 -- 成功, 其他--失败
+ * *************************************************************************** */
+int mulp_path_mgmt_create_mp(mulp_app_id_type app_id, mulp_create_mp_args *args, uint64_t *mp_id);
+
+/* ****************************************************************************
+ * 更新多路径集探测周期
+ *
+ * mp_id ---入参, 多路径集的id.
+ * detect_period --- 入参, 多路径集的探测周期
+ * 返回值: 0 -- 成功, 其他--失败
+ * *************************************************************************** */
+int mulp_path_mgmt_update_detect_period(uint64_t mp_id, uint32_t detect_period);
+
+/* ****************************************************************************
+ * 删除一个多路径集. 调用之后,多路径集id,不再可用. 清理资源过程是异步的,完成后会进行回调
+ * 通知结果.
+ *
+ * mp_id --- 入参, 多路径集的id.
+ * 返回值: 0 -- 成功, 其他--失败
+ * *************************************************************************** */
+int mulp_path_mgmt_destroy_mp(uint64_t mp_id);
+
+int mulp_path_mgmt_ip_view(uint64_t mp_id);
+
+int mulp_path_mgmt_update_ip_pair(mulp_update_ip_pair_args *args);
+
+int mulp_path_mgmt_ping_all_path(uint64_t mp_id, void *ctx, void (*callback)(int result, uint64_t mp_id, void *ctx),
+ BOOLEAN_T is_detect);
+
+int mulp_path_mgmt_get_path(uint64_t mp_id, mulp_path_vnode_info *vnode_info, void **path_mgmt, void **path,
+ uint32_t *is_direct_ctrl);
+
+void mulp_path_mgmt_put_path(void *path_info);
+
+int mulp_path_mgmt_dump(void (*callback)(const char *buffer), uint64_t mp_id, BOOLEAN_T debug);
+
+// 返回值为B_FALSE表示拒绝修改状态,detect_timeout仅在is_detect为B_TRUE时有效
+BOOLEAN_T mulp_path_update_status_start(void *path_set, void *path_info, mulp_app_ops_set *ops, BOOLEAN_T is_detect,
+ uint64_t detect_timeout);
+
+void mulp_path_update_status_end(void *path_info, int32_t ret, BOOLEAN_T is_detect);
+
+// io_result为0表示成功,非0表示失败
+void mulp_path_update_status_by_io(void *path_info, int io_result, uint64_t start_time);
+
+void mulp_path_set_path_info_attr(void *path_info, PATH_INFO_TYPE type, uint64_t value);
+
+uint64_t mulp_path_get_path_info_attr(void *path_info, PATH_INFO_TYPE type);
+
+int mulp_path_mgmt_clean_mp_rw_info(const char *data);
+
+int mulp_path_mgmt_init(void);
+
+void mulp_path_mgmt_fini(void);
+
+int32_t mulp_path_mgmt_dump_path_info(void *path_info, char *buffer, uint64_t size);
+#endif
\ No newline at end of file
diff --git a/fs/nfs/enfs/unify_multipath/multipath/include/mulp_shard_view.h b/fs/nfs/enfs/unify_multipath/multipath/include/mulp_shard_view.h
new file mode 100644
index 0000000..cf8e893
--- /dev/null
+++ b/fs/nfs/enfs/unify_multipath/multipath/include/mulp_shard_view.h
@@ -0,0 +1,28 @@
+/*
+ * SPDX-License-Identifier: GPL-2.0
+ * Copyright 2024, Huawei Inc
+ *
+ * 多路径管理头文件, 对nfs/dpc client提供多路径的创删和管理. 对外结构体和头文件应保持最小依赖原则, 不要包含内部头文件.
+ * 不允许将内部结构体函数放进来.
+ */
+#ifndef _MULP_SHARD_VIEW_H_
+#define _MULP_SHARD_VIEW_H_
+
+#include "multipath_api.h"
+#include "mulp_porting.h"
+/*
+ 参数由对外接口的adapter校验, 本函数不再校验参数
+*/
+int mulp_shard_view_update_view(mulp_update_shard_view_args *args);
+
+
+int mulp_shard_view_delete_view(void);
+
+int mulp_shard_view_get_file_owner(mulp_file_info *file_info, uint32_t *zone_id, uint32_t *lsid, uint32_t *cpu_id);
+
+int mulp_shard_view_init(void);
+void mulp_shard_view_fini(void);
+
+int mulp_shard_view_get_shard_view(uint64_t wwn, uint64_t cluster_id, uint64_t pool_id, mulp_shard_view *shard_view);
+
+#endif
\ No newline at end of file
diff --git a/fs/nfs/enfs/unify_multipath/multipath/include/multipath.h b/fs/nfs/enfs/unify_multipath/multipath/include/multipath.h
new file mode 100644
index 0000000..1135c44
--- /dev/null
+++ b/fs/nfs/enfs/unify_multipath/multipath/include/multipath.h
@@ -0,0 +1,447 @@
+/*
+ * Copyright (c) Huawei Technologies Co., Ltd. All rights reserved.
+ * Description: multipath normalization
+ * Author: l30060990
+ * Create:
+ */
+
+#include
+#include
+#include
+#include