Add a new mmap mode support for hns. In the new mmap mode, the user space driver use the offset sent by kernel space driver to complete the mmap instead of a hard-coded offset.
Signed-off-by: Chengchang Tang tangchengchang@huawei.com --- kernel-headers/rdma/hns-abi.h | 21 ++++++++++- providers/hns/hns_roce_u.c | 86 ++++++++++++++++++++++++++++++------------ providers/hns/hns_roce_u.h | 1 + providers/hns/hns_roce_u_abi.h | 2 +- 4 files changed, 84 insertions(+), 26 deletions(-)
diff --git a/kernel-headers/rdma/hns-abi.h b/kernel-headers/rdma/hns-abi.h index 42b1776..eabdfd4 100644 --- a/kernel-headers/rdma/hns-abi.h +++ b/kernel-headers/rdma/hns-abi.h @@ -83,11 +83,30 @@ struct hns_roce_ib_create_qp_resp { __aligned_u64 cap_flags; };
+enum { + HNS_ROCE_ALLOC_UCTX_COMP_CONFIG = 1 << 0, +}; + +enum { + HNS_ROCE_UCTX_CONFIG_MMAP_KEY = 1 << 0, +}; + +enum { + HNS_ROCE_UCTX_SET_MMAP_KEY = 1 << 0, +}; + +struct hns_roce_ib_alloc_ucontext { + __u32 comp; + __u32 config; +}; + struct hns_roce_ib_alloc_ucontext_resp { __u32 qp_tab_size; __u32 cqe_size; __u32 srq_tab_size; - __u32 reserved; + __u8 config; + __u8 rsv[3]; + __aligned_u64 db_mmap_key; };
struct hns_roce_ib_alloc_pd_resp { diff --git a/providers/hns/hns_roce_u.c b/providers/hns/hns_roce_u.c index 3b31ad3..bdf2fff 100644 --- a/providers/hns/hns_roce_u.c +++ b/providers/hns/hns_roce_u.c @@ -95,16 +95,67 @@ static const struct verbs_context_ops hns_common_ops = { .get_srq_num = hns_roce_u_get_srq_num, };
+static int hns_roce_mmap(struct hns_roce_device *hr_dev, + struct hns_roce_context *context, int cmd_fd, + uint64_t db_mmap_key) +{ + int page_size = hr_dev->page_size; + + context->uar = mmap(NULL, page_size, PROT_READ | PROT_WRITE, + MAP_SHARED, cmd_fd, db_mmap_key); + if (context->uar == MAP_FAILED) + return -EINVAL; + + return 0; +} + +static int hns_roce_legacy_mmap(struct hns_roce_device *hr_dev, + struct hns_roce_context *context, int cmd_fd) +{ + int offset = 0; + + context->uar = mmap(NULL, hr_dev->page_size, PROT_READ | PROT_WRITE, + MAP_SHARED, cmd_fd, offset); + if (context->uar == MAP_FAILED) + return -EINVAL; + + offset += hr_dev->page_size; + + if (hr_dev->hw_version == HNS_ROCE_HW_VER1) { + /* + * when vma->vm_pgoff is 1, the cq_tptr_base includes 64K CQ, + * a pointer of CQ need 2B size + */ + context->cq_tptr_base = mmap(NULL, HNS_ROCE_CQ_DB_BUF_SIZE, + PROT_READ | PROT_WRITE, MAP_SHARED, + cmd_fd, offset); + if (context->cq_tptr_base == MAP_FAILED) + goto db_free; + } + return 0; + +db_free: + munmap(context->uar, hr_dev->page_size); + context->uar = NULL; + return -EINVAL; +} + +static void ucontext_set_cmd(struct hns_roce_alloc_ucontext *cmd) +{ + cmd->comp = HNS_ROCE_ALLOC_UCTX_COMP_CONFIG; + cmd->config = HNS_ROCE_UCTX_SET_MMAP_KEY; +} + static struct verbs_context *hns_roce_alloc_context(struct ibv_device *ibdev, int cmd_fd, void *private_data) { struct hns_roce_device *hr_dev = to_hr_dev(ibdev); struct hns_roce_alloc_ucontext_resp resp = {}; + struct hns_roce_alloc_ucontext cmd = {}; struct ibv_device_attr dev_attrs; struct hns_roce_context *context; - struct ibv_get_context cmd; - int offset = 0; + int ret; int i;
context = verbs_init_and_alloc_context(ibdev, cmd_fd, context, ibv_ctx, @@ -112,10 +163,13 @@ static struct verbs_context *hns_roce_alloc_context(struct ibv_device *ibdev, if (!context) return NULL;
- if (ibv_cmd_get_context(&context->ibv_ctx, &cmd, sizeof(cmd), + ucontext_set_cmd(&cmd); + if (ibv_cmd_get_context(&context->ibv_ctx, &cmd.ibv_cmd, sizeof(cmd), &resp.ibv_resp, sizeof(resp))) goto err_free;
+ context->mmap_key_support = resp.config & HNS_ROCE_UCTX_CONFIG_MMAP_KEY; + if (!resp.cqe_size) context->cqe_size = HNS_ROCE_CQE_SIZE; else if (resp.cqe_size <= HNS_ROCE_V3_CQE_SIZE) @@ -153,26 +207,14 @@ static struct verbs_context *hns_roce_alloc_context(struct ibv_device *ibdev, context->max_cqe = dev_attrs.max_cqe; context->max_srq_wr = dev_attrs.max_srq_wr; context->max_srq_sge = dev_attrs.max_srq_sge; + if (context->mmap_key_support) + ret = hns_roce_mmap(hr_dev, context, cmd_fd, resp.db_mmap_key); + else + ret = hns_roce_legacy_mmap(hr_dev, context, cmd_fd);
- context->uar = mmap(NULL, hr_dev->page_size, PROT_READ | PROT_WRITE, - MAP_SHARED, cmd_fd, offset); - if (context->uar == MAP_FAILED) + if (ret) goto err_free;
- offset += hr_dev->page_size; - - if (hr_dev->hw_version == HNS_ROCE_HW_VER1) { - /* - * when vma->vm_pgoff is 1, the cq_tptr_base includes 64K CQ, - * a pointer of CQ need 2B size - */ - context->cq_tptr_base = mmap(NULL, HNS_ROCE_CQ_DB_BUF_SIZE, - PROT_READ | PROT_WRITE, MAP_SHARED, - cmd_fd, offset); - if (context->cq_tptr_base == MAP_FAILED) - goto db_free; - } - pthread_spin_init(&context->uar_lock, PTHREAD_PROCESS_PRIVATE);
verbs_set_ops(&context->ibv_ctx, &hns_common_ops); @@ -180,10 +222,6 @@ static struct verbs_context *hns_roce_alloc_context(struct ibv_device *ibdev,
return &context->ibv_ctx;
-db_free: - munmap(context->uar, hr_dev->page_size); - context->uar = NULL; - err_free: verbs_uninit_context(&context->ibv_ctx); free(context); diff --git a/providers/hns/hns_roce_u.h b/providers/hns/hns_roce_u.h index 0d7abd8..126c9d3 100644 --- a/providers/hns/hns_roce_u.h +++ b/providers/hns/hns_roce_u.h @@ -180,6 +180,7 @@ struct hns_roce_context { unsigned int max_srq_sge; int max_cqe; unsigned int cqe_size; + bool mmap_key_support; };
struct hns_roce_pd { diff --git a/providers/hns/hns_roce_u_abi.h b/providers/hns/hns_roce_u_abi.h index e56f9d3..295808b 100644 --- a/providers/hns/hns_roce_u_abi.h +++ b/providers/hns/hns_roce_u_abi.h @@ -42,7 +42,7 @@ DECLARE_DRV_CMD(hns_roce_alloc_pd, IB_USER_VERBS_CMD_ALLOC_PD, DECLARE_DRV_CMD(hns_roce_create_cq, IB_USER_VERBS_CMD_CREATE_CQ, hns_roce_ib_create_cq, hns_roce_ib_create_cq_resp); DECLARE_DRV_CMD(hns_roce_alloc_ucontext, IB_USER_VERBS_CMD_GET_CONTEXT, - empty, hns_roce_ib_alloc_ucontext_resp); + hns_roce_ib_alloc_ucontext, hns_roce_ib_alloc_ucontext_resp);
DECLARE_DRV_CMD(hns_roce_create_qp, IB_USER_VERBS_CMD_CREATE_QP, hns_roce_ib_create_qp, hns_roce_ib_create_qp_resp);