From: Chengchang Tang tangchengchang@huawei.com
When HW is being or has been reset, it won't generate cqe anymore, and users can't poll all work completions as expected.
This patch allows userspace driver to compose the expected WCs instead of HW in this case. All SQs, RQs and SRQs is linked to a list respectively. When reset state is detected during polling CQ, walk the lists and compose software-WCs with error status IBV_WC_WR_FLUSH_ERR according to the polling number specified by users.
Signed-off-by: Chengchang Tang tangchengchang@huawei.com Signed-off-by: Junxian Huang huangjunxian6@hisilicon.com --- providers/hns/hns_roce_u.h | 9 ++ providers/hns/hns_roce_u_hw_v2.c | 196 ++++++++++++++++++++++++++++++- providers/hns/hns_roce_u_hw_v2.h | 2 + providers/hns/hns_roce_u_verbs.c | 91 ++++++++++++++ 4 files changed, 293 insertions(+), 5 deletions(-)
diff --git a/providers/hns/hns_roce_u.h b/providers/hns/hns_roce_u.h index 9352d8bcd..ac7153014 100644 --- a/providers/hns/hns_roce_u.h +++ b/providers/hns/hns_roce_u.h @@ -267,6 +267,11 @@ struct hns_roce_cq { unsigned int cqe_size; struct hns_roce_v2_cqe *cqe; struct ibv_pd *parent_domain; + struct list_head list_sq; + struct list_head list_rq; + struct list_head list_srq; + struct list_head list_xrc_srq; + struct hns_roce_v2_cqe *sw_cqe; };
struct hns_roce_idx_que { @@ -302,6 +307,7 @@ struct hns_roce_srq { unsigned int *rdb; unsigned int cap_flags; unsigned short counter; + struct list_node xrc_srcq_node; };
struct hns_roce_wq { @@ -362,6 +368,9 @@ struct hns_roce_qp { void *cur_wqe; unsigned int rb_sq_head; /* roll back sq head */ struct hns_roce_sge_info sge_info; + struct list_node rcq_node; + struct list_node scq_node; + struct list_node srcq_node; };
struct hns_roce_av { diff --git a/providers/hns/hns_roce_u_hw_v2.c b/providers/hns/hns_roce_u_hw_v2.c index 4a24967cb..e3b0205a4 100644 --- a/providers/hns/hns_roce_u_hw_v2.c +++ b/providers/hns/hns_roce_u_hw_v2.c @@ -728,6 +728,170 @@ static int hns_roce_poll_one(struct hns_roce_context *ctx, return hns_roce_flush_cqe(*cur_qp, status); }
+static void hns_roce_fill_swc(struct hns_roce_cq *cq, struct ibv_wc *wc, + uint64_t wr_id, uint32_t qp_num) +{ + if (!wc) { + cq->verbs_cq.cq_ex.status = IBV_WC_WR_FLUSH_ERR; + cq->verbs_cq.cq_ex.wr_id = wr_id; + hr_reg_write(cq->sw_cqe, CQE_LCL_QPN, qp_num); + return; + } + + wc->wr_id = wr_id; + wc->status = IBV_WC_WR_FLUSH_ERR; + wc->vendor_err = 0; + wc->qp_num = qp_num; +} + +static int hns_roce_get_wq_swc(struct hns_roce_cq *cq, struct hns_roce_qp *qp, + struct ibv_wc *wc, bool is_sq) +{ + struct hns_roce_wq *wq = is_sq ? &qp->sq : &qp->rq; + unsigned int left_wr; + uint64_t wr_id; + + left_wr = wq->head - wq->tail; + if (left_wr == 0) { + if (is_sq) + list_del_init(&qp->scq_node); + else + list_del_init(&qp->rcq_node); + + return ENOENT; + } + + wr_id = wq->wrid[wq->tail & (wq->wqe_cnt - 1)]; + hns_roce_fill_swc(cq, wc, wr_id, qp->verbs_qp.qp.qp_num); + wq->tail++; + return V2_CQ_OK; +} + +static int hns_roce_gen_sq_swc(struct hns_roce_cq *cq, struct ibv_wc *wc) +{ + struct hns_roce_qp *next, *qp = NULL; + + list_for_each_safe(&cq->list_sq, qp, next, scq_node) { + if (hns_roce_get_wq_swc(cq, qp, wc, true) == ENOENT) + continue; + + return V2_CQ_OK; + } + + return !wc ? ENOENT : V2_CQ_EMPTY; +} + +static int hns_roce_gen_rq_swc(struct hns_roce_cq *cq, struct ibv_wc *wc) +{ + struct hns_roce_qp *next, *qp = NULL; + + list_for_each_safe(&cq->list_rq, qp, next, rcq_node) { + if (hns_roce_get_wq_swc(cq, qp, wc, false) == ENOENT) + continue; + + return V2_CQ_OK; + } + + return !wc ? ENOENT : V2_CQ_EMPTY; +} + +static int hns_roce_get_srq_swc(struct hns_roce_cq *cq, struct hns_roce_qp *qp, + struct hns_roce_srq *srq, struct ibv_wc *wc) +{ + unsigned int left_wr; + uint64_t wr_id; + + hns_roce_spin_lock(&srq->hr_lock); + left_wr = srq->idx_que.head - srq->idx_que.tail; + if (left_wr == 0) { + if (qp) + list_del_init(&qp->srcq_node); + else + list_del_init(&srq->xrc_srcq_node); + + hns_roce_spin_unlock(&srq->hr_lock); + return ENOENT; + } + + wr_id = srq->wrid[srq->idx_que.tail & (srq->wqe_cnt - 1)]; + hns_roce_fill_swc(cq, wc, wr_id, srq->srqn); + srq->idx_que.tail++; + hns_roce_spin_unlock(&srq->hr_lock); + + return V2_CQ_OK; +} + +static int hns_roce_gen_common_srq_swc(struct hns_roce_cq *cq, + struct ibv_wc *wc) +{ + struct hns_roce_qp *next, *qp = NULL; + struct hns_roce_srq *srq; + + list_for_each_safe(&cq->list_srq, qp, next, srcq_node) { + srq = to_hr_srq(qp->verbs_qp.qp.srq); + if (hns_roce_get_srq_swc(cq, qp, srq, wc) == ENOENT) + continue; + + return V2_CQ_OK; + } + + return !wc ? ENOENT : V2_CQ_EMPTY; +} + +static int hns_roce_gen_xrc_srq_swc(struct hns_roce_cq *cq, struct ibv_wc *wc) +{ + struct hns_roce_srq *next, *srq = NULL; + + list_for_each_safe(&cq->list_xrc_srq, srq, next, xrc_srcq_node) { + if (hns_roce_get_srq_swc(cq, NULL, srq, wc) == ENOENT) + continue; + + return V2_CQ_OK; + } + + return !wc ? ENOENT : V2_CQ_EMPTY; +} + +static int hns_roce_gen_srq_swc(struct hns_roce_cq *cq, struct ibv_wc *wc) +{ + int err; + + err = hns_roce_gen_common_srq_swc(cq, wc); + if (err == V2_CQ_OK) + return err; + + return hns_roce_gen_xrc_srq_swc(cq, wc); +} + +static int hns_roce_poll_one_swc(struct hns_roce_cq *cq, struct ibv_wc *wc) +{ + int err; + + err = hns_roce_gen_sq_swc(cq, wc); + if (err == V2_CQ_OK) + return err; + + err = hns_roce_gen_rq_swc(cq, wc); + if (err == V2_CQ_OK) + return err; + + return hns_roce_gen_srq_swc(cq, wc); +} + +static int hns_roce_poll_swc(struct hns_roce_cq *cq, int ne, struct ibv_wc *wc) +{ + int npolled; + int err; + + for (npolled = 0; npolled < ne; npolled++) { + err = hns_roce_poll_one_swc(cq, wc + npolled); + if (err == V2_CQ_EMPTY) + break; + } + + return npolled; +} + static bool hns_roce_is_reset(struct hns_roce_context *ctx) { struct hns_roce_reset_state *state = ctx->reset_state; @@ -746,6 +910,12 @@ static int hns_roce_u_v2_poll_cq(struct ibv_cq *ibvcq, int ne,
hns_roce_spin_lock(&cq->hr_lock);
+ if (unlikely(hns_roce_is_reset(ctx))) { + npolled = hns_roce_poll_swc(cq, ne, wc); + hns_roce_spin_unlock(&cq->hr_lock); + return npolled; + } + for (npolled = 0; npolled < ne; ++npolled) { err = hns_roce_poll_one(ctx, &qp, cq, wc + npolled); if (err != V2_CQ_OK) @@ -1621,7 +1791,7 @@ static int hns_roce_u_v2_modify_qp(struct ibv_qp *qp, struct ibv_qp_attr *attr, return ret; }
-static void hns_roce_lock_cqs(struct ibv_qp *qp) +void hns_roce_lock_cqs(struct ibv_qp *qp) { struct hns_roce_cq *send_cq = to_hr_cq(qp->send_cq); struct hns_roce_cq *recv_cq = to_hr_cq(qp->recv_cq); @@ -1643,7 +1813,7 @@ static void hns_roce_lock_cqs(struct ibv_qp *qp) } }
-static void hns_roce_unlock_cqs(struct ibv_qp *qp) +void hns_roce_unlock_cqs(struct ibv_qp *qp) { struct hns_roce_cq *send_cq = to_hr_cq(qp->send_cq); struct hns_roce_cq *recv_cq = to_hr_cq(qp->recv_cq); @@ -1683,13 +1853,18 @@ static int hns_roce_u_v2_destroy_qp(struct ibv_qp *ibqp)
hns_roce_lock_cqs(ibqp);
- if (ibqp->recv_cq) + if (ibqp->recv_cq) { __hns_roce_v2_cq_clean(to_hr_cq(ibqp->recv_cq), ibqp->qp_num, ibqp->srq ? to_hr_srq(ibqp->srq) : NULL); + list_del(&qp->srcq_node); + list_del(&qp->rcq_node); + }
- if (ibqp->send_cq && ibqp->send_cq != ibqp->recv_cq) + if (ibqp->send_cq && ibqp->send_cq != ibqp->recv_cq) { __hns_roce_v2_cq_clean(to_hr_cq(ibqp->send_cq), ibqp->qp_num, NULL); + list_del(&qp->scq_node); + }
hns_roce_unlock_cqs(ibqp);
@@ -1854,7 +2029,11 @@ static int wc_start_poll_cq(struct ibv_cq_ex *current,
hns_roce_spin_lock(&cq->hr_lock);
- err = hns_roce_poll_one(ctx, &qp, cq, NULL); + if (unlikely(hns_roce_is_reset(ctx))) + err = hns_roce_poll_one_swc(cq, NULL); + else + err = hns_roce_poll_one(ctx, &qp, cq, NULL); + if (err != V2_CQ_OK) hns_roce_spin_unlock(&cq->hr_lock);
@@ -1868,6 +2047,9 @@ static int wc_next_poll_cq(struct ibv_cq_ex *current) struct hns_roce_qp *qp = NULL; int err;
+ if (unlikely(hns_roce_is_reset(ctx))) + return hns_roce_poll_one_swc(cq, NULL); + err = hns_roce_poll_one(ctx, &qp, cq, NULL); if (err != V2_CQ_OK) return err; @@ -1885,11 +2067,15 @@ static void wc_end_poll_cq(struct ibv_cq_ex *current) struct hns_roce_cq *cq = to_hr_cq(ibv_cq_ex_to_cq(current)); struct hns_roce_context *ctx = to_hr_ctx(current->context);
+ if (unlikely(hns_roce_is_reset(ctx))) + goto end_poll_done; + if (cq->flags & HNS_ROCE_CQ_FLAG_RECORD_DB) *cq->db = cq->cons_index & RECORD_DB_CI_MASK; else update_cq_db(ctx, cq);
+end_poll_done: hns_roce_spin_unlock(&cq->hr_lock); }
diff --git a/providers/hns/hns_roce_u_hw_v2.h b/providers/hns/hns_roce_u_hw_v2.h index abf94673e..139ebfba0 100644 --- a/providers/hns/hns_roce_u_hw_v2.h +++ b/providers/hns/hns_roce_u_hw_v2.h @@ -344,5 +344,7 @@ void hns_roce_v2_clear_qp(struct hns_roce_context *ctx, struct hns_roce_qp *qp); void hns_roce_attach_cq_ex_ops(struct ibv_cq_ex *cq_ex, uint64_t wc_flags); int hns_roce_attach_qp_ex_ops(struct ibv_qp_init_attr_ex *attr, struct hns_roce_qp *qp); +void hns_roce_lock_cqs(struct ibv_qp *qp); +void hns_roce_unlock_cqs(struct ibv_qp *qp);
#endif /* _HNS_ROCE_U_HW_V2_H */ diff --git a/providers/hns/hns_roce_u_verbs.c b/providers/hns/hns_roce_u_verbs.c index 493ff9ad6..ad42f5f6f 100644 --- a/providers/hns/hns_roce_u_verbs.c +++ b/providers/hns/hns_roce_u_verbs.c @@ -498,6 +498,32 @@ static int exec_cq_create_cmd(struct ibv_context *context, return 0; }
+static int hns_roce_init_cq_swc(struct hns_roce_cq *cq, + struct ibv_cq_init_attr_ex *attr) +{ + list_head_init(&cq->list_sq); + list_head_init(&cq->list_rq); + list_head_init(&cq->list_srq); + list_head_init(&cq->list_xrc_srq); + + if (!(attr->wc_flags & CREATE_CQ_SUPPORTED_WC_FLAGS)) + return 0; + + cq->sw_cqe = calloc(1, sizeof(struct hns_roce_v2_cqe)); + if (!cq->sw_cqe) + return ENOMEM; + + return 0; +} + +static void hns_roce_uninit_cq_swc(struct hns_roce_cq *cq) +{ + if (cq->sw_cqe) { + free(cq->sw_cqe); + cq->sw_cqe = NULL; + } +} + static struct ibv_cq_ex *create_cq(struct ibv_context *context, struct ibv_cq_init_attr_ex *attr) { @@ -535,6 +561,10 @@ static struct ibv_cq_ex *create_cq(struct ibv_context *context, goto err_db; }
+ ret = hns_roce_init_cq_swc(cq, attr); + if (ret) + goto err_swc; + ret = exec_cq_create_cmd(context, cq, attr); if (ret) goto err_cmd; @@ -544,6 +574,8 @@ static struct ibv_cq_ex *create_cq(struct ibv_context *context, return &cq->verbs_cq.cq_ex;
err_cmd: + hns_roce_uninit_cq_swc(cq); +err_swc: hns_roce_free_db(hr_ctx, cq->db, HNS_ROCE_CQ_TYPE_DB); err_db: hns_roce_free_buf(&cq->buf); @@ -608,6 +640,8 @@ int hns_roce_u_destroy_cq(struct ibv_cq *cq) if (ret) return ret;
+ hns_roce_uninit_cq_swc(hr_cq); + hns_roce_free_db(to_hr_ctx(cq->context), hr_cq->db, HNS_ROCE_CQ_TYPE_DB); hns_roce_free_buf(&hr_cq->buf); @@ -808,6 +842,22 @@ static int exec_srq_create_cmd(struct ibv_context *context, return 0; }
+static void init_srq_cq_list(struct hns_roce_srq *srq, + struct ibv_srq_init_attr_ex *init_attr) +{ + struct hns_roce_cq *srq_cq; + + list_node_init(&srq->xrc_srcq_node); + + if (!init_attr->cq) + return; + + srq_cq = to_hr_cq(init_attr->cq); + hns_roce_spin_lock(&srq_cq->hr_lock); + list_add_tail(&srq_cq->list_xrc_srq, &srq->xrc_srcq_node); + hns_roce_spin_unlock(&srq_cq->hr_lock); +} + static struct ibv_srq *create_srq(struct ibv_context *context, struct ibv_srq_init_attr_ex *init_attr) { @@ -852,6 +902,8 @@ static struct ibv_srq *create_srq(struct ibv_context *context, init_attr->attr.max_sge = min(init_attr->attr.max_sge - srq->rsv_sge, hr_ctx->max_srq_sge);
+ init_srq_cq_list(srq, init_attr); + return &srq->verbs_srq.srq;
err_destroy_srq: @@ -927,6 +979,18 @@ int hns_roce_u_query_srq(struct ibv_srq *srq, struct ibv_srq_attr *srq_attr) return ret; }
+static void del_srq_from_cq_list(struct hns_roce_srq *srq) +{ + struct hns_roce_cq *srq_cq = to_hr_cq(srq->verbs_srq.cq); + + if (!srq_cq) + return; + + hns_roce_spin_lock(&srq_cq->hr_lock); + list_del(&srq->xrc_srcq_node); + hns_roce_spin_unlock(&srq_cq->hr_lock); +} + int hns_roce_u_destroy_srq(struct ibv_srq *ibv_srq) { struct hns_roce_context *ctx = to_hr_ctx(ibv_srq->context); @@ -934,6 +998,8 @@ int hns_roce_u_destroy_srq(struct ibv_srq *ibv_srq) struct hns_roce_srq *srq = to_hr_srq(ibv_srq); int ret;
+ del_srq_from_cq_list(srq); + ret = ibv_cmd_destroy_srq(ibv_srq); if (ret) return ret; @@ -1517,6 +1583,30 @@ static int mmap_dwqe(struct ibv_context *ibv_ctx, struct hns_roce_qp *qp, return 0; }
+static void add_qp_to_cq_list(struct ibv_qp_init_attr_ex *attr, + struct hns_roce_qp *qp) +{ + struct hns_roce_cq *send_cq, *recv_cq; + + send_cq = attr->send_cq ? to_hr_cq(attr->send_cq) : NULL; + recv_cq = attr->recv_cq ? to_hr_cq(attr->recv_cq) : NULL; + + list_node_init(&qp->scq_node); + list_node_init(&qp->rcq_node); + list_node_init(&qp->srcq_node); + + hns_roce_lock_cqs(&qp->verbs_qp.qp); + if (send_cq) + list_add_tail(&send_cq->list_sq, &qp->scq_node); + if (recv_cq) { + if (attr->srq) + list_add_tail(&recv_cq->list_srq, &qp->srcq_node); + else + list_add_tail(&recv_cq->list_rq, &qp->rcq_node); + } + hns_roce_unlock_cqs(&qp->verbs_qp.qp); +} + static struct ibv_qp *create_qp(struct ibv_context *ibv_ctx, struct ibv_qp_init_attr_ex *attr, struct hnsdv_qp_init_attr *hns_attr) @@ -1569,6 +1659,7 @@ static struct ibv_qp *create_qp(struct ibv_context *ibv_ctx, }
qp_setup_config(attr, qp, context); + add_qp_to_cq_list(attr, qp);
return &qp->verbs_qp.qp;