
MW is no longer supported in hns. Delete relevant codes. Signed-off-by: Junxian Huang <huangjunxian6@hisilicon.com> --- providers/hns/hns_roce_u.c | 3 -- providers/hns/hns_roce_u.h | 5 --- providers/hns/hns_roce_u_hw_v2.c | 32 ---------------- providers/hns/hns_roce_u_hw_v2.h | 7 ---- providers/hns/hns_roce_u_verbs.c | 63 -------------------------------- 5 files changed, 110 deletions(-) diff --git a/providers/hns/hns_roce_u.c b/providers/hns/hns_roce_u.c index 63a1ac551..21c5f51e7 100644 --- a/providers/hns/hns_roce_u.c +++ b/providers/hns/hns_roce_u.c @@ -58,15 +58,12 @@ static const struct verbs_match_ent hca_table[] = { }; static const struct verbs_context_ops hns_common_ops = { - .alloc_mw = hns_roce_u_alloc_mw, .alloc_pd = hns_roce_u_alloc_pd, - .bind_mw = hns_roce_u_bind_mw, .cq_event = hns_roce_u_cq_event, .create_cq = hns_roce_u_create_cq, .create_cq_ex = hns_roce_u_create_cq_ex, .create_qp = hns_roce_u_create_qp, .create_qp_ex = hns_roce_u_create_qp_ex, - .dealloc_mw = hns_roce_u_dealloc_mw, .dealloc_pd = hns_roce_u_dealloc_pd, .dereg_mr = hns_roce_u_dereg_mr, .destroy_cq = hns_roce_u_destroy_cq, diff --git a/providers/hns/hns_roce_u.h b/providers/hns/hns_roce_u.h index 614fed992..1cf3c7cb5 100644 --- a/providers/hns/hns_roce_u.h +++ b/providers/hns/hns_roce_u.h @@ -508,11 +508,6 @@ int hns_roce_u_rereg_mr(struct verbs_mr *vmr, int flags, struct ibv_pd *pd, void *addr, size_t length, int access); int hns_roce_u_dereg_mr(struct verbs_mr *vmr); -struct ibv_mw *hns_roce_u_alloc_mw(struct ibv_pd *pd, enum ibv_mw_type type); -int hns_roce_u_dealloc_mw(struct ibv_mw *mw); -int hns_roce_u_bind_mw(struct ibv_qp *qp, struct ibv_mw *mw, - struct ibv_mw_bind *mw_bind); - struct ibv_cq *hns_roce_u_create_cq(struct ibv_context *context, int cqe, struct ibv_comp_channel *channel, int comp_vector); diff --git a/providers/hns/hns_roce_u_hw_v2.c b/providers/hns/hns_roce_u_hw_v2.c index d24cad5bf..784841f43 100644 --- a/providers/hns/hns_roce_u_hw_v2.c +++ b/providers/hns/hns_roce_u_hw_v2.c @@ -51,7 +51,6 @@ static const uint32_t hns_roce_opcode[] = { HR_IBV_OPC_MAP(RDMA_READ, RDMA_READ), HR_IBV_OPC_MAP(ATOMIC_CMP_AND_SWP, ATOMIC_COM_AND_SWAP), HR_IBV_OPC_MAP(ATOMIC_FETCH_AND_ADD, ATOMIC_FETCH_AND_ADD), - HR_IBV_OPC_MAP(BIND_MW, BIND_MW_TYPE), HR_IBV_OPC_MAP(SEND_WITH_INV, SEND_WITH_INV), }; @@ -386,7 +385,6 @@ static const unsigned int wc_send_op_map[] = { [HNS_ROCE_SQ_OP_RDMA_READ] = IBV_WC_RDMA_READ, [HNS_ROCE_SQ_OP_ATOMIC_COMP_AND_SWAP] = IBV_WC_COMP_SWAP, [HNS_ROCE_SQ_OP_ATOMIC_FETCH_AND_ADD] = IBV_WC_FETCH_ADD, - [HNS_ROCE_SQ_OP_BIND_MW] = IBV_WC_BIND_MW, }; static const unsigned int wc_rcv_op_map[] = { @@ -568,7 +566,6 @@ static void parse_cqe_for_req(struct hns_roce_v2_cqe *cqe, struct ibv_wc *wc, case HNS_ROCE_SQ_OP_SEND: case HNS_ROCE_SQ_OP_SEND_WITH_INV: case HNS_ROCE_SQ_OP_RDMA_WRITE: - case HNS_ROCE_SQ_OP_BIND_MW: wc->wc_flags = 0; break; case HNS_ROCE_SQ_OP_SEND_WITH_IMM: @@ -1251,28 +1248,6 @@ static int set_rc_inl(struct hns_roce_qp *qp, const struct ibv_send_wr *wr, return 0; } -static void set_bind_mw_seg(struct hns_roce_rc_sq_wqe *wqe, - const struct ibv_send_wr *wr) -{ - unsigned int access = wr->bind_mw.bind_info.mw_access_flags; - - hr_reg_write_bool(wqe, RCWQE_MW_TYPE, wr->bind_mw.mw->type - 1); - hr_reg_write_bool(wqe, RCWQE_MW_RA_EN, - !!(access & IBV_ACCESS_REMOTE_ATOMIC)); - hr_reg_write_bool(wqe, RCWQE_MW_RR_EN, - !!(access & IBV_ACCESS_REMOTE_READ)); - hr_reg_write_bool(wqe, RCWQE_MW_RW_EN, - !!(access & IBV_ACCESS_REMOTE_WRITE)); - - wqe->new_rkey = htole32(wr->bind_mw.rkey); - wqe->byte_16 = htole32(wr->bind_mw.bind_info.length & - HNS_ROCE_ADDRESS_MASK); - wqe->byte_20 = htole32(wr->bind_mw.bind_info.length >> - HNS_ROCE_ADDRESS_SHIFT); - wqe->rkey = htole32(wr->bind_mw.bind_info.mr->rkey); - wqe->va = htole64(wr->bind_mw.bind_info.addr); -} - static int check_rc_opcode(struct hns_roce_rc_sq_wqe *wqe, const struct ibv_send_wr *wr) { @@ -1298,9 +1273,6 @@ static int check_rc_opcode(struct hns_roce_rc_sq_wqe *wqe, case IBV_WR_SEND_WITH_INV: wqe->inv_key = htole32(wr->invalidate_rkey); break; - case IBV_WR_BIND_MW: - set_bind_mw_seg(wqe, wr); - break; default: ret = EINVAL; break; @@ -1334,9 +1306,6 @@ static int set_rc_wqe(void *wqe, struct hns_roce_qp *qp, struct ibv_send_wr *wr, hr_reg_write(rc_sq_wqe, RCWQE_MSG_START_SGE_IDX, sge_info->start_idx & (qp->ex_sge.sge_cnt - 1)); - if (wr->opcode == IBV_WR_BIND_MW) - goto wqe_valid; - wqe += sizeof(struct hns_roce_rc_sq_wqe); dseg = wqe; @@ -1357,7 +1326,6 @@ static int set_rc_wqe(void *wqe, struct hns_roce_qp *qp, struct ibv_send_wr *wr, if (ret) return ret; -wqe_valid: enable_wqe(qp, rc_sq_wqe, qp->sq.head + nreq); return 0; diff --git a/providers/hns/hns_roce_u_hw_v2.h b/providers/hns/hns_roce_u_hw_v2.h index abf94673e..af061399c 100644 --- a/providers/hns/hns_roce_u_hw_v2.h +++ b/providers/hns/hns_roce_u_hw_v2.h @@ -60,7 +60,6 @@ enum { HNS_ROCE_WQE_OP_ATOMIC_MASK_COMP_AND_SWAP = 0x8, HNS_ROCE_WQE_OP_ATOMIC_MASK_FETCH_AND_ADD = 0x9, HNS_ROCE_WQE_OP_FAST_REG_PMR = 0xa, - HNS_ROCE_WQE_OP_BIND_MW_TYPE = 0xc, HNS_ROCE_WQE_OP_MASK = 0x1f }; @@ -84,7 +83,6 @@ enum { HNS_ROCE_SQ_OP_ATOMIC_MASK_COMP_AND_SWAP = 0x8, HNS_ROCE_SQ_OP_ATOMIC_MASK_FETCH_AND_ADD = 0x9, HNS_ROCE_SQ_OP_FAST_REG_PMR = 0xa, - HNS_ROCE_SQ_OP_BIND_MW = 0xc, }; enum { @@ -232,11 +230,6 @@ struct hns_roce_rc_sq_wqe { #define RCWQE_VA1_L RCWQE_FIELD_LOC(479, 448) #define RCWQE_VA1_H RCWQE_FIELD_LOC(511, 480) -#define RCWQE_MW_TYPE RCWQE_FIELD_LOC(256, 256) -#define RCWQE_MW_RA_EN RCWQE_FIELD_LOC(258, 258) -#define RCWQE_MW_RR_EN RCWQE_FIELD_LOC(259, 259) -#define RCWQE_MW_RW_EN RCWQE_FIELD_LOC(260, 260) - struct hns_roce_v2_wqe_data_seg { __le32 len; __le32 lkey; diff --git a/providers/hns/hns_roce_u_verbs.c b/providers/hns/hns_roce_u_verbs.c index a906e8d58..10fb474af 100644 --- a/providers/hns/hns_roce_u_verbs.c +++ b/providers/hns/hns_roce_u_verbs.c @@ -346,69 +346,6 @@ int hns_roce_u_dereg_mr(struct verbs_mr *vmr) return ret; } -int hns_roce_u_bind_mw(struct ibv_qp *qp, struct ibv_mw *mw, - struct ibv_mw_bind *mw_bind) -{ - struct ibv_mw_bind_info *bind_info = &mw_bind->bind_info; - struct ibv_send_wr *bad_wr = NULL; - struct ibv_send_wr wr = {}; - int ret; - - if (bind_info->mw_access_flags & ~(IBV_ACCESS_REMOTE_WRITE | - IBV_ACCESS_REMOTE_READ | IBV_ACCESS_REMOTE_ATOMIC)) - return EINVAL; - - wr.opcode = IBV_WR_BIND_MW; - wr.next = NULL; - - wr.wr_id = mw_bind->wr_id; - wr.send_flags = mw_bind->send_flags; - - wr.bind_mw.mw = mw; - wr.bind_mw.rkey = ibv_inc_rkey(mw->rkey); - wr.bind_mw.bind_info = mw_bind->bind_info; - - ret = hns_roce_u_v2_post_send(qp, &wr, &bad_wr); - if (ret) - return ret; - - mw->rkey = wr.bind_mw.rkey; - - return 0; -} - -struct ibv_mw *hns_roce_u_alloc_mw(struct ibv_pd *pd, enum ibv_mw_type type) -{ - struct ibv_mw *mw; - struct ibv_alloc_mw cmd = {}; - struct ib_uverbs_alloc_mw_resp resp = {}; - - mw = malloc(sizeof(*mw)); - if (!mw) - return NULL; - - if (ibv_cmd_alloc_mw(pd, type, mw, &cmd, sizeof(cmd), - &resp, sizeof(resp))) { - free(mw); - return NULL; - } - - return mw; -} - -int hns_roce_u_dealloc_mw(struct ibv_mw *mw) -{ - int ret; - - ret = ibv_cmd_dealloc_mw(mw); - if (ret) - return ret; - - free(mw); - - return 0; -} - enum { CREATE_CQ_SUPPORTED_COMP_MASK = IBV_CQ_INIT_ATTR_MASK_FLAGS | IBV_CQ_INIT_ATTR_MASK_PD, -- 2.33.0