[PATCH] libhns: Cleanup and Bugfixes

From: Xinghai Cen <cenxinghai@h-partners.com> Cleanup and Bugfixes: 0053-libhns-Clean-up-data-type-issues.patch 0054-libhns-Fix-wrong-max-inline-data-value.patch 0055-libhns-Fix-wrong-order-of-spin-unlock-in-modify-qp.patch Signed-off-by: Xinghai Cen <cenxinghai@h-partners.com> --- 0053-libhns-Clean-up-data-type-issues.patch | 152 ++++++++++++++++++ ...bhns-Fix-wrong-max-inline-data-value.patch | 63 ++++++++ ...ng-order-of-spin-unlock-in-modify-qp.patch | 42 +++++ rdma-core.spec | 11 +- 4 files changed, 267 insertions(+), 1 deletion(-) create mode 100644 0053-libhns-Clean-up-data-type-issues.patch create mode 100644 0054-libhns-Fix-wrong-max-inline-data-value.patch create mode 100644 0055-libhns-Fix-wrong-order-of-spin-unlock-in-modify-qp.patch diff --git a/0053-libhns-Clean-up-data-type-issues.patch b/0053-libhns-Clean-up-data-type-issues.patch new file mode 100644 index 0000000..95a95cc --- /dev/null +++ b/0053-libhns-Clean-up-data-type-issues.patch @@ -0,0 +1,152 @@ +From 8f95635c359ca3c36f5b1b48889719b6840c07cc Mon Sep 17 00:00:00 2001 +From: Junxian Huang <huangjunxian6@hisilicon.com> +Date: Thu, 13 Mar 2025 17:26:50 +0800 +Subject: [PATCH 53/55] libhns: Clean up data type issues + +mainline inclusion +from mainline-v56.0-65 +commit fbe8827f270d0aff4a28bb645b826fa98fe00c9d +category: bugfix +bugzilla: https://gitee.com/src-openeuler/rdma-core/issues/IC1V44 +CVE: NA + +Reference: https://github.com/linux-rdma/rdma-core/pull/1579/commits/fbe8827f270d0aff4a... + +--------------------------------------------------------------------- + +Clean up mixed signed/unsigned type issues. Fix a wrong format +character as well. + +Fixes: cf6d9149f8f5 ("libhns: Introduce hns direct verbs") +Signed-off-by: Junxian Huang <huangjunxian6@hisilicon.com> +Signed-off-by: Xinghai Cen <cenxinghai@h-partners.com> +--- + providers/hns/hns_roce_u.h | 4 ++-- + providers/hns/hns_roce_u_hw_v2.c | 15 ++++++++------- + providers/hns/hns_roce_u_verbs.c | 6 +++--- + 3 files changed, 13 insertions(+), 12 deletions(-) + +diff --git a/providers/hns/hns_roce_u.h b/providers/hns/hns_roce_u.h +index 5eedb81..e7e3f01 100644 +--- a/providers/hns/hns_roce_u.h ++++ b/providers/hns/hns_roce_u.h +@@ -356,7 +356,7 @@ struct hns_roce_wq { + unsigned long *wrid; + struct hns_roce_spinlock hr_lock; + unsigned int wqe_cnt; +- int max_post; ++ unsigned int max_post; + unsigned int head; + unsigned int tail; + unsigned int max_gs; +@@ -392,7 +392,7 @@ struct hns_roce_qp { + struct verbs_qp verbs_qp; + struct hns_roce_buf buf; + struct hns_roce_dca_buf dca_wqe; +- int max_inline_data; ++ unsigned int max_inline_data; + unsigned int buf_size; + unsigned int sq_signal_bits; + struct hns_roce_wq sq; +diff --git a/providers/hns/hns_roce_u_hw_v2.c b/providers/hns/hns_roce_u_hw_v2.c +index 3137111..cea3043 100644 +--- a/providers/hns/hns_roce_u_hw_v2.c ++++ b/providers/hns/hns_roce_u_hw_v2.c +@@ -173,7 +173,7 @@ static enum ibv_wc_status get_wc_status(uint8_t status) + { HNS_ROCE_V2_CQE_XRC_VIOLATION_ERR, IBV_WC_REM_INV_RD_REQ_ERR }, + }; + +- for (int i = 0; i < ARRAY_SIZE(map); i++) { ++ for (unsigned int i = 0; i < ARRAY_SIZE(map); i++) { + if (status == map[i].cqe_status) + return map[i].wc_status; + } +@@ -1189,7 +1189,7 @@ static int fill_ext_sge_inl_data(struct hns_roce_qp *qp, + unsigned int sge_mask = qp->ex_sge.sge_cnt - 1; + void *dst_addr, *src_addr, *tail_bound_addr; + uint32_t src_len, tail_len; +- int i; ++ uint32_t i; + + if (sge_info->total_len > qp->sq.ext_sge_cnt * HNS_ROCE_SGE_SIZE) + return EINVAL; +@@ -1259,7 +1259,7 @@ static void fill_ud_inn_inl_data(const struct ibv_send_wr *wr, + + static bool check_inl_data_len(struct hns_roce_qp *qp, unsigned int len) + { +- int mtu = mtu_enum_to_int(qp->path_mtu); ++ unsigned int mtu = mtu_enum_to_int(qp->path_mtu); + + return (len <= qp->max_inline_data && len <= mtu); + } +@@ -1698,7 +1698,8 @@ static void fill_recv_sge_to_wqe(struct ibv_recv_wr *wr, void *wqe, + unsigned int max_sge, bool rsv) + { + struct hns_roce_v2_wqe_data_seg *dseg = wqe; +- unsigned int i, cnt; ++ unsigned int cnt; ++ int i; + + for (i = 0, cnt = 0; i < wr->num_sge; i++) { + /* Skip zero-length sge */ +@@ -1726,7 +1727,7 @@ static void fill_recv_inl_buf(struct hns_roce_rinl_buf *rinl_buf, + unsigned int wqe_idx, struct ibv_recv_wr *wr) + { + struct ibv_sge *sge_list; +- unsigned int i; ++ int i; + + if (!rinl_buf->wqe_cnt) + return; +@@ -2053,7 +2054,7 @@ static int check_post_srq_valid(struct hns_roce_srq *srq, + static int get_wqe_idx(struct hns_roce_srq *srq, unsigned int *wqe_idx) + { + struct hns_roce_idx_que *idx_que = &srq->idx_que; +- int bit_num; ++ unsigned int bit_num; + int i; + + /* bitmap[i] is set zero if all bits are allocated */ +@@ -2451,7 +2452,7 @@ static void set_sgl_rc(struct hns_roce_v2_wqe_data_seg *dseg, + unsigned int mask = qp->ex_sge.sge_cnt - 1; + unsigned int msg_len = 0; + unsigned int cnt = 0; +- int i; ++ unsigned int i; + + for (i = 0; i < num_sge; i++) { + if (!sge[i].length) +diff --git a/providers/hns/hns_roce_u_verbs.c b/providers/hns/hns_roce_u_verbs.c +index 848f836..f0098ed 100644 +--- a/providers/hns/hns_roce_u_verbs.c ++++ b/providers/hns/hns_roce_u_verbs.c +@@ -422,7 +422,7 @@ static int verify_cq_create_attr(struct ibv_cq_init_attr_ex *attr, + { + struct hns_roce_pad *pad = to_hr_pad(attr->parent_domain); + +- if (!attr->cqe || attr->cqe > context->max_cqe) { ++ if (!attr->cqe || attr->cqe > (uint32_t)context->max_cqe) { + verbs_err(&context->ibv_ctx, "unsupported cq depth %u.\n", + attr->cqe); + return EINVAL; +@@ -1080,7 +1080,7 @@ static int check_hnsdv_qp_attr(struct hns_roce_context *ctx, + return 0; + + if (!check_comp_mask(hns_attr->comp_mask, HNSDV_QP_SUP_COMP_MASK)) { +- verbs_err(&ctx->ibv_ctx, "invalid hnsdv comp_mask 0x%x.\n", ++ verbs_err(&ctx->ibv_ctx, "invalid hnsdv comp_mask 0x%llx.\n", + hns_attr->comp_mask); + return EINVAL; + } +@@ -1257,7 +1257,7 @@ static int alloc_recv_rinl_buf(uint32_t max_sge, + struct hns_roce_rinl_buf *rinl_buf) + { + unsigned int cnt; +- int i; ++ unsigned int i; + + cnt = rinl_buf->wqe_cnt; + rinl_buf->wqe_list = calloc(cnt, sizeof(struct hns_roce_rinl_wqe)); +-- +2.33.0 + diff --git a/0054-libhns-Fix-wrong-max-inline-data-value.patch b/0054-libhns-Fix-wrong-max-inline-data-value.patch new file mode 100644 index 0000000..4389023 --- /dev/null +++ b/0054-libhns-Fix-wrong-max-inline-data-value.patch @@ -0,0 +1,63 @@ +From 10534f0ef2ca73e8e59a38e51969cae864f9fbbf Mon Sep 17 00:00:00 2001 +From: wenglianfa <wenglianfa@huawei.com> +Date: Thu, 13 Mar 2025 17:26:51 +0800 +Subject: [PATCH 54/55] libhns: Fix wrong max inline data value + +mainline inclusion +from mainline-v56.0-65 +commit 8307b7c54ed81c343ec874e2066de79260b666d2 +category: bugfix +bugzilla: https://gitee.com/src-openeuler/rdma-core/issues/IC1V44 +CVE: NA + +Reference: https://github.com/linux-rdma/rdma-core/pull/1579/commits/8307b7c54ed81c343e... + +--------------------------------------------------------------------- + +When cap.max_inline_data is 0, it will be modified to 1 since +roundup_pow_of_two(0) == 1, which violates users' expectations. +Here fix it. + +Fixes: 2aff0d55098c ("libhns: Fix the problem of sge nums") +Signed-off-by: wenglianfa <wenglianfa@huawei.com> +Signed-off-by: Junxian Huang <huangjunxian6@hisilicon.com> +Signed-off-by: Xinghai Cen <cenxinghai@h-partners.com> +--- + providers/hns/hns_roce_u_verbs.c | 14 +++++++++++--- + 1 file changed, 11 insertions(+), 3 deletions(-) + +diff --git a/providers/hns/hns_roce_u_verbs.c b/providers/hns/hns_roce_u_verbs.c +index f0098ed..5fe169e 100644 +--- a/providers/hns/hns_roce_u_verbs.c ++++ b/providers/hns/hns_roce_u_verbs.c +@@ -1494,6 +1494,16 @@ static unsigned int get_sge_num_from_max_inl_data(bool is_ud, + return inline_sge; + } + ++static uint32_t get_max_inline_data(struct hns_roce_context *ctx, ++ struct ibv_qp_cap *cap) ++{ ++ if (cap->max_inline_data) ++ return min_t(uint32_t, roundup_pow_of_two(cap->max_inline_data), ++ ctx->max_inline_data); ++ ++ return 0; ++} ++ + static void set_ext_sge_param(struct hns_roce_context *ctx, + struct ibv_qp_init_attr_ex *attr, + struct hns_roce_qp *qp, unsigned int wr_cnt) +@@ -1510,9 +1520,7 @@ static void set_ext_sge_param(struct hns_roce_context *ctx, + attr->cap.max_send_sge); + + if (ctx->config & HNS_ROCE_RSP_EXSGE_FLAGS) { +- attr->cap.max_inline_data = min_t(uint32_t, roundup_pow_of_two( +- attr->cap.max_inline_data), +- ctx->max_inline_data); ++ attr->cap.max_inline_data = get_max_inline_data(ctx, &attr->cap); + + inline_ext_sge = max(ext_wqe_sge_cnt, + get_sge_num_from_max_inl_data(is_ud, +-- +2.33.0 + diff --git a/0055-libhns-Fix-wrong-order-of-spin-unlock-in-modify-qp.patch b/0055-libhns-Fix-wrong-order-of-spin-unlock-in-modify-qp.patch new file mode 100644 index 0000000..b16fe12 --- /dev/null +++ b/0055-libhns-Fix-wrong-order-of-spin-unlock-in-modify-qp.patch @@ -0,0 +1,42 @@ +From d1409106e1323c54fbbb0618c071efb024f58130 Mon Sep 17 00:00:00 2001 +From: Junxian Huang <huangjunxian6@hisilicon.com> +Date: Thu, 13 Mar 2025 17:26:52 +0800 +Subject: [PATCH 55/55] libhns: Fix wrong order of spin unlock in modify qp + +mainline inclusion +from mainline-v56.0-65 +commit d2b41c86c49335b3c6ab638abb1c0e31f5ba0e8f +category: bugfix +bugzilla: https://gitee.com/src-openeuler/rdma-core/issues/IC1V44 +CVE: NA + +Reference: https://github.com/linux-rdma/rdma-core/pull/1579/commits/d2b41c86c49335b3c6... + +--------------------------------------------------------------------- + +The spin_unlock order should be the reverse of spin_lock order. + +Fixes: 179f015e090d ("libhns: Add support for lock-free QP") +Signed-off-by: Junxian Huang <huangjunxian6@hisilicon.com> +Signed-off-by: Xinghai Cen <cenxinghai@h-partners.com> +--- + providers/hns/hns_roce_u_hw_v2.c | 2 +- + 1 file changed, 1 insertion(+), 1 deletion(-) + +diff --git a/providers/hns/hns_roce_u_hw_v2.c b/providers/hns/hns_roce_u_hw_v2.c +index cea3043..3a1249f 100644 +--- a/providers/hns/hns_roce_u_hw_v2.c ++++ b/providers/hns/hns_roce_u_hw_v2.c +@@ -1910,8 +1910,8 @@ static int hns_roce_u_v2_modify_qp(struct ibv_qp *qp, struct ibv_qp_attr *attr, + if (flag) { + if (!ret) + qp->state = IBV_QPS_ERR; +- hns_roce_spin_unlock(&hr_qp->sq.hr_lock); + hns_roce_spin_unlock(&hr_qp->rq.hr_lock); ++ hns_roce_spin_unlock(&hr_qp->sq.hr_lock); + } + + if (ret) +-- +2.33.0 + diff --git a/rdma-core.spec b/rdma-core.spec index 286b8dd..b49c6bf 100644 --- a/rdma-core.spec +++ b/rdma-core.spec @@ -1,6 +1,6 @@ Name: rdma-core Version: 50.0 -Release: 25 +Release: 26 Summary: RDMA core userspace libraries and daemons License: GPL-2.0-only OR BSD-2-Clause AND BSD-3-Clause Url: https://github.com/linux-rdma/rdma-core @@ -58,6 +58,9 @@ patch49: 0049-libzrdma-Add-poll-cqe-error-to-Failed-status.patch patch50: 0050-libzrdma-Add-sq-rq-flush-cqe-and-log-optimization.patch patch51: 0051-libzrdma-Fix-capability-related-bugs.patch patch52: 0052-libxscale-Match-dev-by-vid-and-did.patch +patch53: 0053-libhns-Clean-up-data-type-issues.patch +patch54: 0054-libhns-Fix-wrong-max-inline-data-value.patch +patch55: 0055-libhns-Fix-wrong-order-of-spin-unlock-in-modify-qp.patch BuildRequires: binutils cmake >= 2.8.11 gcc libudev-devel pkgconfig pkgconfig(libnl-3.0) BuildRequires: pkgconfig(libnl-route-3.0) systemd systemd-devel @@ -637,6 +640,12 @@ fi %doc %{_docdir}/%{name}-%{version}/70-persistent-ipoib.rules %changelog +* Thu Apr 17 2025 Xinghai Cen <cenxinghai@h-partners.com> - 50.0-26 +- Type: bugfix +- ID: NA +- SUG: NA +- DESC: libhns: Cleanup and Bugfixes + * Thu Mar 20 2025 Xin Tian <tianx@yunsilicon.com> - 50.0-25 - Type: bugfix - ID: NA -- 2.33.0
participants (1)
-
Junxian Huang