To commit: ?? ("RDMA/hns: Support mmapping reset state to userspace").
Signed-off-by: Junxian Huang huangjunxian6@hisilicon.com --- kernel-headers/rdma/hns-abi.h | 6 ++++++ 1 file changed, 6 insertions(+)
diff --git a/kernel-headers/rdma/hns-abi.h b/kernel-headers/rdma/hns-abi.h index 94e861870..065eb2e0a 100644 --- a/kernel-headers/rdma/hns-abi.h +++ b/kernel-headers/rdma/hns-abi.h @@ -136,6 +136,7 @@ struct hns_roce_ib_alloc_ucontext_resp { __u32 max_inline_data; __u8 congest_type; __u8 reserved0[7]; + __aligned_u64 reset_mmap_key; };
struct hns_roce_ib_alloc_ucontext { @@ -153,4 +154,9 @@ struct hns_roce_ib_create_ah_resp { __u8 tc_mode; };
+struct hns_roce_reset_state { + __u32 hw_ready; + __u32 reserved; +}; + #endif /* HNS_ABI_USER_H */
From: Chengchang Tang tangchengchang@huawei.com
Get reset state from kernel by mmap.
Signed-off-by: Chengchang Tang tangchengchang@huawei.com Signed-off-by: Junxian Huang huangjunxian6@hisilicon.com --- providers/hns/hns_roce_u.c | 42 +++++++++++++++++++++++++++++++++----- providers/hns/hns_roce_u.h | 1 + 2 files changed, 38 insertions(+), 5 deletions(-)
diff --git a/providers/hns/hns_roce_u.c b/providers/hns/hns_roce_u.c index 6492040fc..f3807192f 100644 --- a/providers/hns/hns_roce_u.c +++ b/providers/hns/hns_roce_u.c @@ -189,6 +189,41 @@ static void hns_roce_destroy_context_lock(struct hns_roce_context *context) pthread_mutex_destroy(&context->db_list_mutex); }
+static int hns_roce_mmap(struct hns_roce_device *hr_dev, + struct hns_roce_alloc_ucontext_resp *resp, + struct hns_roce_context *context, int cmd_fd) +{ + uint64_t reset_mmap_key = resp->reset_mmap_key; + + context->uar = mmap(NULL, hr_dev->page_size, PROT_READ | PROT_WRITE, + MAP_SHARED, cmd_fd, 0); + if (context->uar == MAP_FAILED) + return ENOMEM; + + /* Check whether kernel supports mmapping reset state */ + if (!reset_mmap_key) + return 0; + + context->reset_state = mmap(NULL, hr_dev->page_size, PROT_READ, + MAP_SHARED, cmd_fd, reset_mmap_key); + if (context->reset_state == MAP_FAILED) + goto db_free; + + return 0; + +db_free: + munmap(context->uar, hr_dev->page_size); + return ENOMEM; +} + +static void hns_roce_munmap(struct hns_roce_device *hr_dev, + struct hns_roce_context *context) +{ + munmap(context->uar, hr_dev->page_size); + if (context->reset_state) + munmap(context->reset_state, hr_dev->page_size); +} + static struct verbs_context *hns_roce_alloc_context(struct ibv_device *ibdev, int cmd_fd, void *private_data) @@ -215,12 +250,9 @@ static struct verbs_context *hns_roce_alloc_context(struct ibv_device *ibdev, if (set_context_attr(hr_dev, context, &resp)) goto err_set_attr;
- context->uar = mmap(NULL, hr_dev->page_size, PROT_READ | PROT_WRITE, - MAP_SHARED, cmd_fd, 0); - if (context->uar == MAP_FAILED) + if (hns_roce_mmap(hr_dev, &resp, context, cmd_fd)) goto err_set_attr;
- verbs_set_ops(&context->ibv_ctx, &hns_common_ops); verbs_set_ops(&context->ibv_ctx, &hr_dev->u_hw->hw_ops);
@@ -239,7 +271,7 @@ static void hns_roce_free_context(struct ibv_context *ibctx) struct hns_roce_device *hr_dev = to_hr_dev(ibctx->device); struct hns_roce_context *context = to_hr_ctx(ibctx);
- munmap(context->uar, hr_dev->page_size); + hns_roce_munmap(hr_dev, context); hns_roce_destroy_context_lock(context); verbs_uninit_context(&context->ibv_ctx); free(context); diff --git a/providers/hns/hns_roce_u.h b/providers/hns/hns_roce_u.h index 51d927f2c..9352d8bcd 100644 --- a/providers/hns/hns_roce_u.h +++ b/providers/hns/hns_roce_u.h @@ -204,6 +204,7 @@ struct hns_roce_spinlock { struct hns_roce_context { struct verbs_context ibv_ctx; void *uar; + void *reset_state; pthread_spinlock_t uar_lock;
struct {
From: Chengchang Tang tangchengchang@huawei.com
If the device has been reset, fail the post send/recv/srq_recv and return an error to notify user.
Signed-off-by: Chengchang Tang tangchengchang@huawei.com Signed-off-by: Junxian Huang huangjunxian6@hisilicon.com --- providers/hns/hns_roce_u_hw_v2.c | 40 ++++++++++++++++++++++++++++---- 1 file changed, 35 insertions(+), 5 deletions(-)
diff --git a/providers/hns/hns_roce_u_hw_v2.c b/providers/hns/hns_roce_u_hw_v2.c index 88bf55fd1..4a24967cb 100644 --- a/providers/hns/hns_roce_u_hw_v2.c +++ b/providers/hns/hns_roce_u_hw_v2.c @@ -728,6 +728,13 @@ static int hns_roce_poll_one(struct hns_roce_context *ctx, return hns_roce_flush_cqe(*cur_qp, status); }
+static bool hns_roce_is_reset(struct hns_roce_context *ctx) +{ + struct hns_roce_reset_state *state = ctx->reset_state; + + return state && !state->hw_ready; +} + static int hns_roce_u_v2_poll_cq(struct ibv_cq *ibvcq, int ne, struct ibv_wc *wc) { @@ -782,11 +789,16 @@ static int hns_roce_u_v2_arm_cq(struct ibv_cq *ibvcq, int solicited)
static inline int check_qp_send(struct ibv_qp *qp) { + struct hns_roce_context *ctx = to_hr_ctx(qp->context); + if (unlikely(qp->state == IBV_QPS_RESET || qp->state == IBV_QPS_INIT || qp->state == IBV_QPS_RTR)) return EINVAL;
+ if (hns_roce_is_reset(ctx)) + return EIO; + return 0; }
@@ -1357,9 +1369,14 @@ out:
static inline int check_qp_recv(struct ibv_qp *qp) { + struct hns_roce_context *ctx = to_hr_ctx(qp->context); + if (qp->state == IBV_QPS_RESET) return EINVAL;
+ if (hns_roce_is_reset(ctx)) + return EIO; + return 0; }
@@ -1757,6 +1774,14 @@ static void update_srq_db(struct hns_roce_context *ctx, struct hns_roce_db *db, (__le32 *)db); }
+static int check_srq_recv(struct hns_roce_context *ctx) +{ + if (hns_roce_is_reset(ctx)) + return EIO; + + return 0; +} + static int hns_roce_u_v2_post_srq_recv(struct ibv_srq *ib_srq, struct ibv_recv_wr *wr, struct ibv_recv_wr **bad_wr) @@ -1768,6 +1793,12 @@ static int hns_roce_u_v2_post_srq_recv(struct ibv_srq *ib_srq, int ret = 0; void *wqe;
+ ret = check_srq_recv(ctx); + if (ret) { + *bad_wr = wr; + return ret; + } + hns_roce_spin_lock(&srq->hr_lock);
max_sge = srq->max_gs - srq->rsv_sge; @@ -2553,13 +2584,12 @@ static void wr_set_inline_data_list_ud(struct ibv_qp_ex *ibv_qp, size_t num_buf, static void wr_start(struct ibv_qp_ex *ibv_qp) { struct hns_roce_qp *qp = to_hr_qp(&ibv_qp->qp_base); - enum ibv_qp_state state = ibv_qp->qp_base.state; + int ret;
- if (state == IBV_QPS_RESET || - state == IBV_QPS_INIT || - state == IBV_QPS_RTR) { + ret = check_qp_send(&ibv_qp->qp_base); + if (ret) { hns_roce_spin_lock(&qp->sq.hr_lock); - qp->err = EINVAL; + qp->err = ret; return; }
From: Chengchang Tang tangchengchang@huawei.com
When HW is being or has been reset, it won't generate cqe anymore, and users can't poll all work completions as expected.
This patch allows userspace driver to compose the expected WCs instead of HW in this case. All SQs, RQs and SRQs is linked to a list respectively. When reset state is detected during polling CQ, walk the lists and compose software-WCs with error status IBV_WC_WR_FLUSH_ERR according to the polling number specified by users.
Signed-off-by: Chengchang Tang tangchengchang@huawei.com Signed-off-by: Junxian Huang huangjunxian6@hisilicon.com --- providers/hns/hns_roce_u.h | 9 ++ providers/hns/hns_roce_u_hw_v2.c | 196 ++++++++++++++++++++++++++++++- providers/hns/hns_roce_u_hw_v2.h | 2 + providers/hns/hns_roce_u_verbs.c | 91 ++++++++++++++ 4 files changed, 293 insertions(+), 5 deletions(-)
diff --git a/providers/hns/hns_roce_u.h b/providers/hns/hns_roce_u.h index 9352d8bcd..ac7153014 100644 --- a/providers/hns/hns_roce_u.h +++ b/providers/hns/hns_roce_u.h @@ -267,6 +267,11 @@ struct hns_roce_cq { unsigned int cqe_size; struct hns_roce_v2_cqe *cqe; struct ibv_pd *parent_domain; + struct list_head list_sq; + struct list_head list_rq; + struct list_head list_srq; + struct list_head list_xrc_srq; + struct hns_roce_v2_cqe *sw_cqe; };
struct hns_roce_idx_que { @@ -302,6 +307,7 @@ struct hns_roce_srq { unsigned int *rdb; unsigned int cap_flags; unsigned short counter; + struct list_node xrc_srcq_node; };
struct hns_roce_wq { @@ -362,6 +368,9 @@ struct hns_roce_qp { void *cur_wqe; unsigned int rb_sq_head; /* roll back sq head */ struct hns_roce_sge_info sge_info; + struct list_node rcq_node; + struct list_node scq_node; + struct list_node srcq_node; };
struct hns_roce_av { diff --git a/providers/hns/hns_roce_u_hw_v2.c b/providers/hns/hns_roce_u_hw_v2.c index 4a24967cb..e3b0205a4 100644 --- a/providers/hns/hns_roce_u_hw_v2.c +++ b/providers/hns/hns_roce_u_hw_v2.c @@ -728,6 +728,170 @@ static int hns_roce_poll_one(struct hns_roce_context *ctx, return hns_roce_flush_cqe(*cur_qp, status); }
+static void hns_roce_fill_swc(struct hns_roce_cq *cq, struct ibv_wc *wc, + uint64_t wr_id, uint32_t qp_num) +{ + if (!wc) { + cq->verbs_cq.cq_ex.status = IBV_WC_WR_FLUSH_ERR; + cq->verbs_cq.cq_ex.wr_id = wr_id; + hr_reg_write(cq->sw_cqe, CQE_LCL_QPN, qp_num); + return; + } + + wc->wr_id = wr_id; + wc->status = IBV_WC_WR_FLUSH_ERR; + wc->vendor_err = 0; + wc->qp_num = qp_num; +} + +static int hns_roce_get_wq_swc(struct hns_roce_cq *cq, struct hns_roce_qp *qp, + struct ibv_wc *wc, bool is_sq) +{ + struct hns_roce_wq *wq = is_sq ? &qp->sq : &qp->rq; + unsigned int left_wr; + uint64_t wr_id; + + left_wr = wq->head - wq->tail; + if (left_wr == 0) { + if (is_sq) + list_del_init(&qp->scq_node); + else + list_del_init(&qp->rcq_node); + + return ENOENT; + } + + wr_id = wq->wrid[wq->tail & (wq->wqe_cnt - 1)]; + hns_roce_fill_swc(cq, wc, wr_id, qp->verbs_qp.qp.qp_num); + wq->tail++; + return V2_CQ_OK; +} + +static int hns_roce_gen_sq_swc(struct hns_roce_cq *cq, struct ibv_wc *wc) +{ + struct hns_roce_qp *next, *qp = NULL; + + list_for_each_safe(&cq->list_sq, qp, next, scq_node) { + if (hns_roce_get_wq_swc(cq, qp, wc, true) == ENOENT) + continue; + + return V2_CQ_OK; + } + + return !wc ? ENOENT : V2_CQ_EMPTY; +} + +static int hns_roce_gen_rq_swc(struct hns_roce_cq *cq, struct ibv_wc *wc) +{ + struct hns_roce_qp *next, *qp = NULL; + + list_for_each_safe(&cq->list_rq, qp, next, rcq_node) { + if (hns_roce_get_wq_swc(cq, qp, wc, false) == ENOENT) + continue; + + return V2_CQ_OK; + } + + return !wc ? ENOENT : V2_CQ_EMPTY; +} + +static int hns_roce_get_srq_swc(struct hns_roce_cq *cq, struct hns_roce_qp *qp, + struct hns_roce_srq *srq, struct ibv_wc *wc) +{ + unsigned int left_wr; + uint64_t wr_id; + + hns_roce_spin_lock(&srq->hr_lock); + left_wr = srq->idx_que.head - srq->idx_que.tail; + if (left_wr == 0) { + if (qp) + list_del_init(&qp->srcq_node); + else + list_del_init(&srq->xrc_srcq_node); + + hns_roce_spin_unlock(&srq->hr_lock); + return ENOENT; + } + + wr_id = srq->wrid[srq->idx_que.tail & (srq->wqe_cnt - 1)]; + hns_roce_fill_swc(cq, wc, wr_id, srq->srqn); + srq->idx_que.tail++; + hns_roce_spin_unlock(&srq->hr_lock); + + return V2_CQ_OK; +} + +static int hns_roce_gen_common_srq_swc(struct hns_roce_cq *cq, + struct ibv_wc *wc) +{ + struct hns_roce_qp *next, *qp = NULL; + struct hns_roce_srq *srq; + + list_for_each_safe(&cq->list_srq, qp, next, srcq_node) { + srq = to_hr_srq(qp->verbs_qp.qp.srq); + if (hns_roce_get_srq_swc(cq, qp, srq, wc) == ENOENT) + continue; + + return V2_CQ_OK; + } + + return !wc ? ENOENT : V2_CQ_EMPTY; +} + +static int hns_roce_gen_xrc_srq_swc(struct hns_roce_cq *cq, struct ibv_wc *wc) +{ + struct hns_roce_srq *next, *srq = NULL; + + list_for_each_safe(&cq->list_xrc_srq, srq, next, xrc_srcq_node) { + if (hns_roce_get_srq_swc(cq, NULL, srq, wc) == ENOENT) + continue; + + return V2_CQ_OK; + } + + return !wc ? ENOENT : V2_CQ_EMPTY; +} + +static int hns_roce_gen_srq_swc(struct hns_roce_cq *cq, struct ibv_wc *wc) +{ + int err; + + err = hns_roce_gen_common_srq_swc(cq, wc); + if (err == V2_CQ_OK) + return err; + + return hns_roce_gen_xrc_srq_swc(cq, wc); +} + +static int hns_roce_poll_one_swc(struct hns_roce_cq *cq, struct ibv_wc *wc) +{ + int err; + + err = hns_roce_gen_sq_swc(cq, wc); + if (err == V2_CQ_OK) + return err; + + err = hns_roce_gen_rq_swc(cq, wc); + if (err == V2_CQ_OK) + return err; + + return hns_roce_gen_srq_swc(cq, wc); +} + +static int hns_roce_poll_swc(struct hns_roce_cq *cq, int ne, struct ibv_wc *wc) +{ + int npolled; + int err; + + for (npolled = 0; npolled < ne; npolled++) { + err = hns_roce_poll_one_swc(cq, wc + npolled); + if (err == V2_CQ_EMPTY) + break; + } + + return npolled; +} + static bool hns_roce_is_reset(struct hns_roce_context *ctx) { struct hns_roce_reset_state *state = ctx->reset_state; @@ -746,6 +910,12 @@ static int hns_roce_u_v2_poll_cq(struct ibv_cq *ibvcq, int ne,
hns_roce_spin_lock(&cq->hr_lock);
+ if (unlikely(hns_roce_is_reset(ctx))) { + npolled = hns_roce_poll_swc(cq, ne, wc); + hns_roce_spin_unlock(&cq->hr_lock); + return npolled; + } + for (npolled = 0; npolled < ne; ++npolled) { err = hns_roce_poll_one(ctx, &qp, cq, wc + npolled); if (err != V2_CQ_OK) @@ -1621,7 +1791,7 @@ static int hns_roce_u_v2_modify_qp(struct ibv_qp *qp, struct ibv_qp_attr *attr, return ret; }
-static void hns_roce_lock_cqs(struct ibv_qp *qp) +void hns_roce_lock_cqs(struct ibv_qp *qp) { struct hns_roce_cq *send_cq = to_hr_cq(qp->send_cq); struct hns_roce_cq *recv_cq = to_hr_cq(qp->recv_cq); @@ -1643,7 +1813,7 @@ static void hns_roce_lock_cqs(struct ibv_qp *qp) } }
-static void hns_roce_unlock_cqs(struct ibv_qp *qp) +void hns_roce_unlock_cqs(struct ibv_qp *qp) { struct hns_roce_cq *send_cq = to_hr_cq(qp->send_cq); struct hns_roce_cq *recv_cq = to_hr_cq(qp->recv_cq); @@ -1683,13 +1853,18 @@ static int hns_roce_u_v2_destroy_qp(struct ibv_qp *ibqp)
hns_roce_lock_cqs(ibqp);
- if (ibqp->recv_cq) + if (ibqp->recv_cq) { __hns_roce_v2_cq_clean(to_hr_cq(ibqp->recv_cq), ibqp->qp_num, ibqp->srq ? to_hr_srq(ibqp->srq) : NULL); + list_del(&qp->srcq_node); + list_del(&qp->rcq_node); + }
- if (ibqp->send_cq && ibqp->send_cq != ibqp->recv_cq) + if (ibqp->send_cq && ibqp->send_cq != ibqp->recv_cq) { __hns_roce_v2_cq_clean(to_hr_cq(ibqp->send_cq), ibqp->qp_num, NULL); + list_del(&qp->scq_node); + }
hns_roce_unlock_cqs(ibqp);
@@ -1854,7 +2029,11 @@ static int wc_start_poll_cq(struct ibv_cq_ex *current,
hns_roce_spin_lock(&cq->hr_lock);
- err = hns_roce_poll_one(ctx, &qp, cq, NULL); + if (unlikely(hns_roce_is_reset(ctx))) + err = hns_roce_poll_one_swc(cq, NULL); + else + err = hns_roce_poll_one(ctx, &qp, cq, NULL); + if (err != V2_CQ_OK) hns_roce_spin_unlock(&cq->hr_lock);
@@ -1868,6 +2047,9 @@ static int wc_next_poll_cq(struct ibv_cq_ex *current) struct hns_roce_qp *qp = NULL; int err;
+ if (unlikely(hns_roce_is_reset(ctx))) + return hns_roce_poll_one_swc(cq, NULL); + err = hns_roce_poll_one(ctx, &qp, cq, NULL); if (err != V2_CQ_OK) return err; @@ -1885,11 +2067,15 @@ static void wc_end_poll_cq(struct ibv_cq_ex *current) struct hns_roce_cq *cq = to_hr_cq(ibv_cq_ex_to_cq(current)); struct hns_roce_context *ctx = to_hr_ctx(current->context);
+ if (unlikely(hns_roce_is_reset(ctx))) + goto end_poll_done; + if (cq->flags & HNS_ROCE_CQ_FLAG_RECORD_DB) *cq->db = cq->cons_index & RECORD_DB_CI_MASK; else update_cq_db(ctx, cq);
+end_poll_done: hns_roce_spin_unlock(&cq->hr_lock); }
diff --git a/providers/hns/hns_roce_u_hw_v2.h b/providers/hns/hns_roce_u_hw_v2.h index abf94673e..139ebfba0 100644 --- a/providers/hns/hns_roce_u_hw_v2.h +++ b/providers/hns/hns_roce_u_hw_v2.h @@ -344,5 +344,7 @@ void hns_roce_v2_clear_qp(struct hns_roce_context *ctx, struct hns_roce_qp *qp); void hns_roce_attach_cq_ex_ops(struct ibv_cq_ex *cq_ex, uint64_t wc_flags); int hns_roce_attach_qp_ex_ops(struct ibv_qp_init_attr_ex *attr, struct hns_roce_qp *qp); +void hns_roce_lock_cqs(struct ibv_qp *qp); +void hns_roce_unlock_cqs(struct ibv_qp *qp);
#endif /* _HNS_ROCE_U_HW_V2_H */ diff --git a/providers/hns/hns_roce_u_verbs.c b/providers/hns/hns_roce_u_verbs.c index 493ff9ad6..ad42f5f6f 100644 --- a/providers/hns/hns_roce_u_verbs.c +++ b/providers/hns/hns_roce_u_verbs.c @@ -498,6 +498,32 @@ static int exec_cq_create_cmd(struct ibv_context *context, return 0; }
+static int hns_roce_init_cq_swc(struct hns_roce_cq *cq, + struct ibv_cq_init_attr_ex *attr) +{ + list_head_init(&cq->list_sq); + list_head_init(&cq->list_rq); + list_head_init(&cq->list_srq); + list_head_init(&cq->list_xrc_srq); + + if (!(attr->wc_flags & CREATE_CQ_SUPPORTED_WC_FLAGS)) + return 0; + + cq->sw_cqe = calloc(1, sizeof(struct hns_roce_v2_cqe)); + if (!cq->sw_cqe) + return ENOMEM; + + return 0; +} + +static void hns_roce_uninit_cq_swc(struct hns_roce_cq *cq) +{ + if (cq->sw_cqe) { + free(cq->sw_cqe); + cq->sw_cqe = NULL; + } +} + static struct ibv_cq_ex *create_cq(struct ibv_context *context, struct ibv_cq_init_attr_ex *attr) { @@ -535,6 +561,10 @@ static struct ibv_cq_ex *create_cq(struct ibv_context *context, goto err_db; }
+ ret = hns_roce_init_cq_swc(cq, attr); + if (ret) + goto err_swc; + ret = exec_cq_create_cmd(context, cq, attr); if (ret) goto err_cmd; @@ -544,6 +574,8 @@ static struct ibv_cq_ex *create_cq(struct ibv_context *context, return &cq->verbs_cq.cq_ex;
err_cmd: + hns_roce_uninit_cq_swc(cq); +err_swc: hns_roce_free_db(hr_ctx, cq->db, HNS_ROCE_CQ_TYPE_DB); err_db: hns_roce_free_buf(&cq->buf); @@ -608,6 +640,8 @@ int hns_roce_u_destroy_cq(struct ibv_cq *cq) if (ret) return ret;
+ hns_roce_uninit_cq_swc(hr_cq); + hns_roce_free_db(to_hr_ctx(cq->context), hr_cq->db, HNS_ROCE_CQ_TYPE_DB); hns_roce_free_buf(&hr_cq->buf); @@ -808,6 +842,22 @@ static int exec_srq_create_cmd(struct ibv_context *context, return 0; }
+static void init_srq_cq_list(struct hns_roce_srq *srq, + struct ibv_srq_init_attr_ex *init_attr) +{ + struct hns_roce_cq *srq_cq; + + list_node_init(&srq->xrc_srcq_node); + + if (!init_attr->cq) + return; + + srq_cq = to_hr_cq(init_attr->cq); + hns_roce_spin_lock(&srq_cq->hr_lock); + list_add_tail(&srq_cq->list_xrc_srq, &srq->xrc_srcq_node); + hns_roce_spin_unlock(&srq_cq->hr_lock); +} + static struct ibv_srq *create_srq(struct ibv_context *context, struct ibv_srq_init_attr_ex *init_attr) { @@ -852,6 +902,8 @@ static struct ibv_srq *create_srq(struct ibv_context *context, init_attr->attr.max_sge = min(init_attr->attr.max_sge - srq->rsv_sge, hr_ctx->max_srq_sge);
+ init_srq_cq_list(srq, init_attr); + return &srq->verbs_srq.srq;
err_destroy_srq: @@ -927,6 +979,18 @@ int hns_roce_u_query_srq(struct ibv_srq *srq, struct ibv_srq_attr *srq_attr) return ret; }
+static void del_srq_from_cq_list(struct hns_roce_srq *srq) +{ + struct hns_roce_cq *srq_cq = to_hr_cq(srq->verbs_srq.cq); + + if (!srq_cq) + return; + + hns_roce_spin_lock(&srq_cq->hr_lock); + list_del(&srq->xrc_srcq_node); + hns_roce_spin_unlock(&srq_cq->hr_lock); +} + int hns_roce_u_destroy_srq(struct ibv_srq *ibv_srq) { struct hns_roce_context *ctx = to_hr_ctx(ibv_srq->context); @@ -934,6 +998,8 @@ int hns_roce_u_destroy_srq(struct ibv_srq *ibv_srq) struct hns_roce_srq *srq = to_hr_srq(ibv_srq); int ret;
+ del_srq_from_cq_list(srq); + ret = ibv_cmd_destroy_srq(ibv_srq); if (ret) return ret; @@ -1517,6 +1583,30 @@ static int mmap_dwqe(struct ibv_context *ibv_ctx, struct hns_roce_qp *qp, return 0; }
+static void add_qp_to_cq_list(struct ibv_qp_init_attr_ex *attr, + struct hns_roce_qp *qp) +{ + struct hns_roce_cq *send_cq, *recv_cq; + + send_cq = attr->send_cq ? to_hr_cq(attr->send_cq) : NULL; + recv_cq = attr->recv_cq ? to_hr_cq(attr->recv_cq) : NULL; + + list_node_init(&qp->scq_node); + list_node_init(&qp->rcq_node); + list_node_init(&qp->srcq_node); + + hns_roce_lock_cqs(&qp->verbs_qp.qp); + if (send_cq) + list_add_tail(&send_cq->list_sq, &qp->scq_node); + if (recv_cq) { + if (attr->srq) + list_add_tail(&recv_cq->list_srq, &qp->srcq_node); + else + list_add_tail(&recv_cq->list_rq, &qp->rcq_node); + } + hns_roce_unlock_cqs(&qp->verbs_qp.qp); +} + static struct ibv_qp *create_qp(struct ibv_context *ibv_ctx, struct ibv_qp_init_attr_ex *attr, struct hnsdv_qp_init_attr *hns_attr) @@ -1569,6 +1659,7 @@ static struct ibv_qp *create_qp(struct ibv_context *ibv_ctx, }
qp_setup_config(attr, qp, context); + add_qp_to_cq_list(attr, qp);
return &qp->verbs_qp.qp;
high-performance-network@openeuler.org