driver inclusion category: feature bugzilla: https://gitee.com/src-openeuler/rdma-core/issues/I83BP0
----------------------------------------------------------
When HW is in resetting stage, we could not poll back all the expected work completions as the HW won't generate cqe anymore.
This patch allows driver to compose the expected wc instead of the HW during resetting stage. Once the hardware finished resetting, we can poll cq from hardware again.
Signed-off-by: Chengchang Tang tangchengchang@huawei.com --- providers/hns/hns_roce_u.h | 12 ++ providers/hns/hns_roce_u_hw_v2.c | 217 +++++++++++++++++++++++++++++-- providers/hns/hns_roce_u_hw_v2.h | 2 + providers/hns/hns_roce_u_verbs.c | 91 +++++++++++++ 4 files changed, 310 insertions(+), 12 deletions(-)
diff --git a/providers/hns/hns_roce_u.h b/providers/hns/hns_roce_u.h index a89c7b6..2fe7796 100644 --- a/providers/hns/hns_roce_u.h +++ b/providers/hns/hns_roce_u.h @@ -238,6 +238,8 @@ struct hns_roce_context { unsigned int cqe_size; uint32_t config; unsigned int max_inline_data; + + bool reseted; };
struct hns_roce_td { @@ -271,6 +273,11 @@ struct hns_roce_cq { unsigned int cqe_size; struct hns_roce_v2_cqe *cqe; struct ibv_pd *parent_domain; + struct list_head list_sq; + struct list_head list_rq; + struct list_head list_srq; + struct list_head list_xrc_srq; + struct hns_roce_v2_cqe *sw_cqe; };
struct hns_roce_idx_que { @@ -308,6 +315,7 @@ struct hns_roce_srq { unsigned int cap_flags; unsigned short counter; struct ibv_pd *parent_domain; + struct list_node xrc_srcq_node; };
struct hns_roce_wq { @@ -369,6 +377,10 @@ struct hns_roce_qp { void *cur_wqe; unsigned int rb_sq_head; /* roll back sq head */ struct hns_roce_sge_info sge_info; + + struct list_node rcq_node; + struct list_node scq_node; + struct list_node srcq_node; };
struct hns_roce_av { diff --git a/providers/hns/hns_roce_u_hw_v2.c b/providers/hns/hns_roce_u_hw_v2.c index 9a2f72a..55ee8cf 100644 --- a/providers/hns/hns_roce_u_hw_v2.c +++ b/providers/hns/hns_roce_u_hw_v2.c @@ -723,6 +723,180 @@ static int hns_roce_poll_one(struct hns_roce_context *ctx, return hns_roce_flush_cqe(*cur_qp, status); }
+static void hns_roce_fill_swc(struct hns_roce_cq *cq, struct ibv_wc *wc, + uint64_t wr_id, uint32_t qp_num) +{ + if (!wc) { + cq->verbs_cq.cq_ex.status = IBV_WC_WR_FLUSH_ERR; + cq->verbs_cq.cq_ex.wr_id = wr_id; + hr_reg_write(cq->sw_cqe, CQE_LCL_QPN, qp_num); + return; + } + + wc->wr_id = wr_id; + wc->status = IBV_WC_WR_FLUSH_ERR; + wc->vendor_err = 0; + wc->qp_num = qp_num; +} + +static int hns_roce_get_wq_swc(struct hns_roce_cq *cq, struct hns_roce_qp *qp, + struct ibv_wc *wc, bool is_sq) +{ + struct hns_roce_wq *wq = is_sq ? &qp->sq : &qp->rq; + unsigned int left_wr; + uint64_t wr_id; + + left_wr = wq->head - wq->tail; + if (left_wr == 0) { + if (is_sq) + list_del_init(&qp->scq_node); + else + list_del_init(&qp->rcq_node); + + return ENOENT; + } + + wr_id = wq->wrid[wq->tail & (wq->wqe_cnt - 1)]; + hns_roce_fill_swc(cq, wc, wr_id, qp->verbs_qp.qp.qp_num); + wq->tail++; + return V2_CQ_OK; +} + +static int hns_roce_gen_sq_swc(struct hns_roce_cq *cq, struct ibv_wc *wc) +{ + struct hns_roce_qp *next, *qp = NULL; + + list_for_each_safe(&cq->list_sq, qp, next, scq_node) { + if (hns_roce_get_wq_swc(cq, qp, wc, true) == ENOENT) + continue; + + return V2_CQ_OK; + } + + return wc ? V2_CQ_EMPTY : ENOENT; +} + +static int hns_roce_gen_rq_swc(struct hns_roce_cq *cq, struct ibv_wc *wc) +{ + struct hns_roce_qp *next, *qp = NULL; + + list_for_each_safe(&cq->list_rq, qp, next, rcq_node) { + if (hns_roce_get_wq_swc(cq, qp, wc, false) == ENOENT) + continue; + + return V2_CQ_OK; + } + + return wc ? V2_CQ_EMPTY : ENOENT; +} + +static int hns_roce_get_srq_swc(struct hns_roce_cq *cq, struct hns_roce_qp *qp, + struct hns_roce_srq *srq, struct ibv_wc *wc) +{ + unsigned int left_wr; + uint64_t wr_id; + + hns_roce_spin_lock(&srq->hr_lock); + left_wr = srq->idx_que.head - srq->idx_que.tail; + if (left_wr == 0) { + if (qp) + list_del_init(&qp->srcq_node); + else + list_del_init(&srq->xrc_srcq_node); + + hns_roce_spin_unlock(&srq->hr_lock); + return ENOENT; + } + + wr_id = srq->wrid[srq->idx_que.tail & (srq->wqe_cnt - 1)]; + hns_roce_fill_swc(cq, wc, wr_id, srq->srqn); + srq->idx_que.tail++; + hns_roce_spin_unlock(&srq->hr_lock); + + return V2_CQ_OK; +} + +static int hns_roce_gen_common_srq_swc(struct hns_roce_cq *cq, + struct ibv_wc *wc) +{ + struct hns_roce_qp *next, *qp = NULL; + struct hns_roce_srq *srq; + + list_for_each_safe(&cq->list_srq, qp, next, srcq_node) { + srq = to_hr_srq(qp->verbs_qp.qp.srq); + if (hns_roce_get_srq_swc(cq, qp, srq, wc) == ENOENT) + continue; + + return V2_CQ_OK; + } + + return wc ? V2_CQ_EMPTY : ENOENT; +} + +static int hns_roce_gen_xrc_srq_swc(struct hns_roce_cq *cq, struct ibv_wc *wc) +{ + struct hns_roce_srq *next, *srq = NULL; + + list_for_each_safe(&cq->list_xrc_srq, srq, next, xrc_srcq_node) { + if (hns_roce_get_srq_swc(cq, NULL, srq, wc) == ENOENT) + continue; + + return V2_CQ_OK; + } + + return wc ? V2_CQ_EMPTY : ENOENT; +} + +static int hns_roce_gen_srq_swc(struct hns_roce_cq *cq, struct ibv_wc *wc) +{ + int err; + + err = hns_roce_gen_common_srq_swc(cq, wc); + if (err == V2_CQ_OK) + return err; + + return hns_roce_gen_xrc_srq_swc(cq, wc); +} + +static int hns_roce_poll_one_swc(struct hns_roce_cq *cq, struct ibv_wc *wc) +{ + int err; + + err = hns_roce_gen_sq_swc(cq, wc); + if (err == V2_CQ_OK) + return err; + + err = hns_roce_gen_rq_swc(cq, wc); + if (err == V2_CQ_OK) + return err; + + return hns_roce_gen_srq_swc(cq, wc); +} + +static int hns_roce_poll_swc(struct hns_roce_cq *cq, int ne, struct ibv_wc *wc) +{ + int npolled; + int err; + + for (npolled = 0; npolled < ne; npolled++) { + err = hns_roce_poll_one_swc(cq, wc + npolled); + if (err == V2_CQ_EMPTY) + break; + } + + return npolled; +} + +static bool hns_roce_reseted(struct hns_roce_context *ctx) +{ + struct hns_roce_v2_reset_state *state = ctx->reset_state; + + if (state && state->is_reset) + ctx->reseted = true; + + return ctx->reseted; +} + static int hns_roce_u_v2_poll_cq(struct ibv_cq *ibvcq, int ne, struct ibv_wc *wc) { @@ -734,6 +908,12 @@ static int hns_roce_u_v2_poll_cq(struct ibv_cq *ibvcq, int ne,
hns_roce_spin_lock(&cq->hr_lock);
+ if (unlikely(hns_roce_reseted(ctx))) { + npolled = hns_roce_poll_swc(cq, ne, wc); + hns_roce_spin_unlock(&cq->hr_lock); + return npolled; + } + for (npolled = 0; npolled < ne; ++npolled) { err = hns_roce_poll_one(ctx, &qp, cq, wc + npolled); if (err != V2_CQ_OK) @@ -1602,11 +1782,8 @@ static int hns_roce_u_v2_modify_qp(struct ibv_qp *qp, struct ibv_qp_attr *attr, return ret; }
-static void hns_roce_lock_cqs(struct ibv_qp *qp) +void hns_roce_lock_cqs(struct hns_roce_cq *send_cq, struct hns_roce_cq *recv_cq) { - struct hns_roce_cq *send_cq = to_hr_cq(qp->send_cq); - struct hns_roce_cq *recv_cq = to_hr_cq(qp->recv_cq); - if (send_cq && recv_cq) { if (send_cq == recv_cq) { hns_roce_spin_lock(&send_cq->hr_lock); @@ -1624,11 +1801,8 @@ static void hns_roce_lock_cqs(struct ibv_qp *qp) } }
-static void hns_roce_unlock_cqs(struct ibv_qp *qp) +void hns_roce_unlock_cqs(struct hns_roce_cq *send_cq, struct hns_roce_cq *recv_cq) { - struct hns_roce_cq *send_cq = to_hr_cq(qp->send_cq); - struct hns_roce_cq *recv_cq = to_hr_cq(qp->recv_cq); - if (send_cq && recv_cq) { if (send_cq == recv_cq) { hns_roce_spin_unlock(&send_cq->hr_lock); @@ -1662,17 +1836,22 @@ static int hns_roce_u_v2_destroy_qp(struct ibv_qp *ibqp)
hns_roce_v2_clear_qp(ctx, qp);
- hns_roce_lock_cqs(ibqp); + hns_roce_lock_cqs(to_hr_cq(ibqp->send_cq), to_hr_cq(ibqp->recv_cq));
- if (ibqp->recv_cq) + if (ibqp->recv_cq) { __hns_roce_v2_cq_clean(to_hr_cq(ibqp->recv_cq), ibqp->qp_num, ibqp->srq ? to_hr_srq(ibqp->srq) : NULL); + list_del(&qp->srcq_node); + list_del(&qp->rcq_node); + }
- if (ibqp->send_cq && ibqp->send_cq != ibqp->recv_cq) + if (ibqp->send_cq && ibqp->send_cq != ibqp->recv_cq) { __hns_roce_v2_cq_clean(to_hr_cq(ibqp->send_cq), ibqp->qp_num, NULL); + list_del(&qp->scq_node); + }
- hns_roce_unlock_cqs(ibqp); + hns_roce_unlock_cqs(to_hr_cq(ibqp->send_cq), to_hr_cq(ibqp->recv_cq));
hns_roce_free_qp_buf(qp, ctx);
@@ -1822,7 +2001,14 @@ static int wc_start_poll_cq(struct ibv_cq_ex *current,
hns_roce_spin_lock(&cq->hr_lock);
+ if (unlikely(hns_roce_reseted(ctx))) { + err = hns_roce_poll_one_swc(cq, NULL); + goto start_poll_done; + } + err = hns_roce_poll_one(ctx, &qp, cq, NULL); + +start_poll_done: if (err != V2_CQ_OK) hns_roce_spin_unlock(&cq->hr_lock);
@@ -1836,6 +2022,9 @@ static int wc_next_poll_cq(struct ibv_cq_ex *current) struct hns_roce_qp *qp = NULL; int err;
+ if (unlikely(hns_roce_reseted(ctx))) + return hns_roce_poll_one_swc(cq, NULL); + err = hns_roce_poll_one(ctx, &qp, cq, NULL); if (err != V2_CQ_OK) return err; @@ -1853,11 +2042,15 @@ static void wc_end_poll_cq(struct ibv_cq_ex *current) struct hns_roce_cq *cq = to_hr_cq(ibv_cq_ex_to_cq(current)); struct hns_roce_context *ctx = to_hr_ctx(current->context);
+ if (unlikely(hns_roce_reseted(ctx))) + goto end_poll_done; + if (cq->flags & HNS_ROCE_CQ_FLAG_RECORD_DB) *cq->db = cq->cons_index & RECORD_DB_CI_MASK; else update_cq_db(ctx, cq);
+end_poll_done: hns_roce_spin_unlock(&cq->hr_lock); }
diff --git a/providers/hns/hns_roce_u_hw_v2.h b/providers/hns/hns_roce_u_hw_v2.h index abf9467..1a7b828 100644 --- a/providers/hns/hns_roce_u_hw_v2.h +++ b/providers/hns/hns_roce_u_hw_v2.h @@ -344,5 +344,7 @@ void hns_roce_v2_clear_qp(struct hns_roce_context *ctx, struct hns_roce_qp *qp); void hns_roce_attach_cq_ex_ops(struct ibv_cq_ex *cq_ex, uint64_t wc_flags); int hns_roce_attach_qp_ex_ops(struct ibv_qp_init_attr_ex *attr, struct hns_roce_qp *qp); +void hns_roce_lock_cqs(struct hns_roce_cq *send_cq, struct hns_roce_cq *recv_cq); +void hns_roce_unlock_cqs(struct hns_roce_cq *send_cq, struct hns_roce_cq *recv_cq);
#endif /* _HNS_ROCE_U_HW_V2_H */ diff --git a/providers/hns/hns_roce_u_verbs.c b/providers/hns/hns_roce_u_verbs.c index 4aa5a3c..7d59b8a 100644 --- a/providers/hns/hns_roce_u_verbs.c +++ b/providers/hns/hns_roce_u_verbs.c @@ -512,6 +512,32 @@ static int exec_cq_create_cmd(struct ibv_context *context, return 0; }
+static int hns_roce_init_cq_swc(struct hns_roce_cq *cq, + struct ibv_cq_init_attr_ex *attr) +{ + list_head_init(&cq->list_sq); + list_head_init(&cq->list_rq); + list_head_init(&cq->list_srq); + list_head_init(&cq->list_xrc_srq); + + if (!(attr->wc_flags & CREATE_CQ_SUPPORTED_WC_FLAGS)) + return 0; + + cq->sw_cqe = calloc(1, sizeof(struct hns_roce_v2_cqe)); + if (!cq->sw_cqe) + return -ENOMEM; + + return 0; +} + +static void hns_roce_uninit_cq_swc(struct hns_roce_cq *cq) +{ + if (cq->sw_cqe) { + free(cq->sw_cqe); + cq->sw_cqe = NULL; + } +} + static struct ibv_cq_ex *create_cq(struct ibv_context *context, struct ibv_cq_init_attr_ex *attr) { @@ -548,6 +574,10 @@ static struct ibv_cq_ex *create_cq(struct ibv_context *context,
*cq->db = 0;
+ ret = hns_roce_init_cq_swc(cq, attr); + if (ret) + goto err_swc; + ret = exec_cq_create_cmd(context, cq, attr); if (ret) goto err_cmd; @@ -557,6 +587,8 @@ static struct ibv_cq_ex *create_cq(struct ibv_context *context, return &cq->verbs_cq.cq_ex;
err_cmd: + hns_roce_uninit_cq_swc(cq); +err_swc: hns_roce_free_db(hr_ctx, cq->db, HNS_ROCE_CQ_TYPE_DB); err_db: hns_roce_free_buf(&cq->buf); @@ -621,6 +653,8 @@ int hns_roce_u_destroy_cq(struct ibv_cq *cq) if (ret) return ret;
+ hns_roce_uninit_cq_swc(to_hr_cq(cq)); + hns_roce_free_db(to_hr_ctx(cq->context), hr_cq->db, HNS_ROCE_CQ_TYPE_DB); hns_roce_free_buf(&hr_cq->buf);
@@ -835,6 +869,22 @@ static int exec_srq_create_cmd(struct ibv_context *context, return 0; }
+static void init_srq_cq_list(struct hns_roce_srq *srq, + struct ibv_srq_init_attr_ex *init_attr) +{ + struct hns_roce_cq *srq_cq; + + list_node_init(&srq->xrc_srcq_node); + + if (!init_attr->cq) + return; + + srq_cq = to_hr_cq(init_attr->cq); + hns_roce_spin_lock(&srq_cq->hr_lock); + list_add_tail(&srq_cq->list_xrc_srq, &srq->xrc_srcq_node); + hns_roce_spin_unlock(&srq_cq->hr_lock); +} + static struct ibv_srq *create_srq(struct ibv_context *context, struct ibv_srq_init_attr_ex *init_attr) { @@ -881,6 +931,8 @@ static struct ibv_srq *create_srq(struct ibv_context *context, init_attr->attr.max_sge = min(init_attr->attr.max_sge - srq->rsv_sge, hr_ctx->max_srq_sge);
+ init_srq_cq_list(srq, init_attr); + return &srq->verbs_srq.srq;
err_destroy_srq: @@ -956,6 +1008,18 @@ int hns_roce_u_query_srq(struct ibv_srq *srq, struct ibv_srq_attr *srq_attr) return ret; }
+static void del_srq_from_cq_list(struct hns_roce_srq *srq) +{ + struct hns_roce_cq *srq_cq = to_hr_cq(srq->verbs_srq.cq); + + if (!srq_cq) + return; + + hns_roce_spin_lock(&srq_cq->hr_lock); + list_del(&srq->xrc_srcq_node); + hns_roce_spin_unlock(&srq_cq->hr_lock); +} + int hns_roce_u_destroy_srq(struct ibv_srq *ibv_srq) { struct hns_roce_context *ctx = to_hr_ctx(ibv_srq->context); @@ -963,6 +1027,8 @@ int hns_roce_u_destroy_srq(struct ibv_srq *ibv_srq) struct hns_roce_pad *pad = to_hr_pad(srq->parent_domain); int ret;
+ del_srq_from_cq_list(srq); + ret = ibv_cmd_destroy_srq(ibv_srq); if (ret) return ret; @@ -1596,6 +1662,30 @@ static int mmap_dwqe(struct ibv_context *ibv_ctx, struct hns_roce_qp *qp, return 0; }
+static void add_qp_to_cq_list(struct ibv_qp_init_attr_ex *attr, + struct hns_roce_qp *qp) +{ + struct hns_roce_cq *send_cq, *recv_cq; + + send_cq = attr->send_cq ? to_hr_cq(attr->send_cq) : NULL; + recv_cq = attr->recv_cq ? to_hr_cq(attr->recv_cq) : NULL; + + list_node_init(&qp->scq_node); + list_node_init(&qp->rcq_node); + list_node_init(&qp->srcq_node); + + hns_roce_lock_cqs(send_cq, recv_cq); + if (send_cq) + list_add_tail(&send_cq->list_sq, &qp->scq_node); + if (recv_cq) { + if (attr->srq) + list_add_tail(&recv_cq->list_srq, &qp->srcq_node); + else + list_add_tail(&recv_cq->list_rq, &qp->rcq_node); + } + hns_roce_unlock_cqs(send_cq, recv_cq); +} + static struct ibv_qp *create_qp(struct ibv_context *ibv_ctx, struct ibv_qp_init_attr_ex *attr, struct hnsdv_qp_init_attr *hns_attr) @@ -1648,6 +1738,7 @@ static struct ibv_qp *create_qp(struct ibv_context *ibv_ctx, }
qp_setup_config(attr, qp, context); + add_qp_to_cq_list(attr, qp);
return &qp->verbs_qp.qp;