driver inclusion category: bugfix bugzilla: https://gitee.com/openeuler/kernel/issues/I98HIN
--------------------------------------------------------------------------
Currently, driver fixedly allocates 4K pages for user space WQE buffer even in a 64K system. This results in HW reading WQE with a granularity of 4K even in a 64K system. Considering that we support 1024-byte inline, in the scenario of using SQ inline, HW will switch pages every 4 WQEs. This will introduce a delay of about 400ns, which is an average delay of 100ns per packet.
In order to improve performance, we allow user-mode driver to use more flexible WQE buffer page size allocation strategies, which allowing user-mode driver to configure WQE buffer using pages between 4K to system PAGESIZE.
This feature needs to be used in conjunction with the user-mode driver. In order to ensure forward compatibility, if the user-mode driver does not support this feature, the kernel mode will continue to use a fixed 4K pagesize.
Signed-off-by: Chengchang Tang tangchengchang@huawei.com --- drivers/infiniband/hw/hns/hns_roce_main.c | 5 ++++ drivers/infiniband/hw/hns/hns_roce_qp.c | 32 ++++++++++++++--------- include/uapi/rdma/hns-abi.h | 5 +++- 3 files changed, 29 insertions(+), 13 deletions(-)
diff --git a/drivers/infiniband/hw/hns/hns_roce_main.c b/drivers/infiniband/hw/hns/hns_roce_main.c index c3d057222..260052fcb 100644 --- a/drivers/infiniband/hw/hns/hns_roce_main.c +++ b/drivers/infiniband/hw/hns/hns_roce_main.c @@ -473,6 +473,11 @@ static int hns_roce_alloc_ucontext(struct ib_ucontext *uctx, if (hr_dev->pci_dev->revision >= PCI_REVISION_ID_HIP09) resp.congest_type = hr_dev->caps.cong_cap;
+ if (ucmd.config & HNS_ROCE_UCTX_DYN_QP_PGSZ) { + context->config |= HNS_ROCE_UCTX_DYN_QP_PGSZ; + resp.config |= HNS_ROCE_UCTX_RSP_DYN_QP_PGSZ; + } + ret = hns_roce_uar_alloc(hr_dev, &context->uar); if (ret) goto error_out; diff --git a/drivers/infiniband/hw/hns/hns_roce_qp.c b/drivers/infiniband/hw/hns/hns_roce_qp.c index 0b7064b0a..33c1a5cef 100644 --- a/drivers/infiniband/hw/hns/hns_roce_qp.c +++ b/drivers/infiniband/hw/hns/hns_roce_qp.c @@ -641,18 +641,21 @@ static int set_user_sq_size(struct hns_roce_dev *hr_dev, }
static int set_wqe_buf_attr(struct hns_roce_dev *hr_dev, - struct hns_roce_qp *hr_qp, + struct hns_roce_qp *hr_qp, u8 page_shift, struct hns_roce_buf_attr *buf_attr) { + unsigned int page_size = BIT(page_shift); int buf_size; int idx = 0;
hr_qp->buff_size = 0;
+ if (page_shift > PAGE_SHIFT || page_shift < HNS_HW_PAGE_SHIFT) + return -EOPNOTSUPP; + /* SQ WQE */ hr_qp->sq.offset = 0; - buf_size = to_hr_hem_entries_size(hr_qp->sq.wqe_cnt, - hr_qp->sq.wqe_shift); + buf_size = ALIGN(hr_qp->sq.wqe_cnt << hr_qp->sq.wqe_shift, page_size); if (buf_size > 0 && idx < ARRAY_SIZE(buf_attr->region)) { buf_attr->region[idx].size = buf_size; buf_attr->region[idx].hopnum = hr_dev->caps.wqe_sq_hop_num; @@ -662,8 +665,7 @@ static int set_wqe_buf_attr(struct hns_roce_dev *hr_dev,
/* extend SGE WQE in SQ */ hr_qp->sge.offset = hr_qp->buff_size; - buf_size = to_hr_hem_entries_size(hr_qp->sge.sge_cnt, - hr_qp->sge.sge_shift); + buf_size = ALIGN(hr_qp->sge.sge_cnt << hr_qp->sge.sge_shift, page_size); if (buf_size > 0 && idx < ARRAY_SIZE(buf_attr->region)) { buf_attr->region[idx].size = buf_size; buf_attr->region[idx].hopnum = hr_dev->caps.wqe_sge_hop_num; @@ -673,8 +675,7 @@ static int set_wqe_buf_attr(struct hns_roce_dev *hr_dev,
/* RQ WQE */ hr_qp->rq.offset = hr_qp->buff_size; - buf_size = to_hr_hem_entries_size(hr_qp->rq.wqe_cnt, - hr_qp->rq.wqe_shift); + buf_size = ALIGN(hr_qp->rq.wqe_cnt << hr_qp->rq.wqe_shift, page_size); if (buf_size > 0 && idx < ARRAY_SIZE(buf_attr->region)) { buf_attr->region[idx].size = buf_size; buf_attr->region[idx].hopnum = hr_dev->caps.wqe_rq_hop_num; @@ -685,8 +686,8 @@ static int set_wqe_buf_attr(struct hns_roce_dev *hr_dev, if (hr_qp->buff_size < 1) return -EINVAL;
- buf_attr->page_shift = HNS_HW_PAGE_SHIFT + hr_dev->caps.mtt_buf_pg_sz; buf_attr->region_count = idx; + buf_attr->page_shift = page_shift;
return 0; } @@ -742,20 +743,27 @@ static int hns_roce_qp_has_rq(struct ib_qp_init_attr *attr)
static int alloc_qp_buf(struct hns_roce_dev *hr_dev, struct hns_roce_qp *hr_qp, struct ib_qp_init_attr *init_attr, - struct ib_udata *udata, unsigned long addr) + struct ib_udata *udata, + struct hns_roce_ib_create_qp *ucmd) { + struct hns_roce_ucontext *uctx = rdma_udata_to_drv_context(udata, + struct hns_roce_ucontext, ibucontext); struct ib_device *ibdev = &hr_dev->ib_dev; struct hns_roce_buf_attr buf_attr = {}; + u8 page_shift = HNS_HW_PAGE_SHIFT; int ret;
- ret = set_wqe_buf_attr(hr_dev, hr_qp, &buf_attr); + if (uctx && (uctx->config & HNS_ROCE_UCTX_DYN_QP_PGSZ)) + page_shift = ucmd->pageshift; + + ret = set_wqe_buf_attr(hr_dev, hr_qp, page_shift, &buf_attr); if (ret) { ibdev_err(ibdev, "failed to split WQE buf, ret = %d.\n", ret); goto err_inline; } ret = hns_roce_mtr_create(hr_dev, &hr_qp->mtr, &buf_attr, PAGE_SHIFT + hr_dev->caps.mtt_ba_pg_sz, - udata, addr); + udata, ucmd->buf_addr); if (ret) { ibdev_err(ibdev, "failed to create WQE mtr, ret = %d.\n", ret); goto err_inline; @@ -1151,7 +1159,7 @@ static int hns_roce_create_qp_common(struct hns_roce_dev *hr_dev, } }
- ret = alloc_qp_buf(hr_dev, hr_qp, init_attr, udata, ucmd.buf_addr); + ret = alloc_qp_buf(hr_dev, hr_qp, init_attr, udata, &ucmd); if (ret) { ibdev_err(ibdev, "failed to alloc QP buffer, ret = %d.\n", ret); goto err_buf; diff --git a/include/uapi/rdma/hns-abi.h b/include/uapi/rdma/hns-abi.h index 94e861870..c24af6b68 100644 --- a/include/uapi/rdma/hns-abi.h +++ b/include/uapi/rdma/hns-abi.h @@ -90,7 +90,8 @@ struct hns_roce_ib_create_qp { __u8 log_sq_bb_count; __u8 log_sq_stride; __u8 sq_no_prefetch; - __u8 reserved[5]; + __u8 pageshift; + __u8 reserved[4]; __aligned_u64 sdb_addr; __aligned_u64 comp_mask; /* Use enum hns_roce_create_qp_comp_mask */ __aligned_u64 create_flags; @@ -119,12 +120,14 @@ enum { HNS_ROCE_EXSGE_FLAGS = 1 << 0, HNS_ROCE_RQ_INLINE_FLAGS = 1 << 1, HNS_ROCE_CQE_INLINE_FLAGS = 1 << 2, + HNS_ROCE_UCTX_DYN_QP_PGSZ = 1 << 4, };
enum { HNS_ROCE_RSP_EXSGE_FLAGS = 1 << 0, HNS_ROCE_RSP_RQ_INLINE_FLAGS = 1 << 1, HNS_ROCE_RSP_CQE_INLINE_FLAGS = 1 << 2, + HNS_ROCE_UCTX_RSP_DYN_QP_PGSZ = HNS_ROCE_UCTX_DYN_QP_PGSZ, };
struct hns_roce_ib_alloc_ucontext_resp {
From: Yixing Liu liuyixing1@huawei.com
driver inclusion category: bugfix bugzilla: https://gitee.com/openeuler/kernel/issues/I98HQV
--------------------------------------------------------------------------
In the reset scenario, if the kernel receives the reset signal, it needs to notify the user space to stop ring doorbell.
Signed-off-by: Yixing Liu liuyixing1@huawei.com Signed-off-by: Chengchang Tang tangchengchang@huawei.com --- drivers/infiniband/hw/hns/hns_roce_device.h | 5 ++ drivers/infiniband/hw/hns/hns_roce_hw_v2.c | 55 ++++++++++++++++++++- drivers/infiniband/hw/hns/hns_roce_hw_v2.h | 4 ++ drivers/infiniband/hw/hns/hns_roce_main.c | 44 +++++++++++++++++ include/uapi/rdma/hns-abi.h | 2 + 5 files changed, 108 insertions(+), 2 deletions(-)
diff --git a/drivers/infiniband/hw/hns/hns_roce_device.h b/drivers/infiniband/hw/hns/hns_roce_device.h index c08d02c14..7e65c05cb 100644 --- a/drivers/infiniband/hw/hns/hns_roce_device.h +++ b/drivers/infiniband/hw/hns/hns_roce_device.h @@ -61,6 +61,7 @@
#define HNS_ROCE_CEQ 0 #define HNS_ROCE_AEQ 1 +#define HNS_ROCE_IS_RESETTING 1
#define HNS_ROCE_CEQE_SIZE 0x4 #define HNS_ROCE_AEQE_SIZE 0x10 @@ -197,6 +198,7 @@ struct hns_roce_uar { enum hns_roce_mmap_type { HNS_ROCE_MMAP_TYPE_DB = 1, HNS_ROCE_MMAP_TYPE_DWQE, + HNS_ROCE_MMAP_TYPE_RESET = 4, };
struct hns_user_mmap_entry { @@ -211,6 +213,7 @@ struct hns_roce_ucontext { struct list_head page_list; struct mutex page_mutex; struct hns_user_mmap_entry *db_mmap_entry; + struct hns_user_mmap_entry *reset_mmap_entry; u32 config; };
@@ -1041,6 +1044,8 @@ struct hns_roce_dev { int loop_idc; u32 sdb_offset; u32 odb_offset; + struct page *reset_page; /* store reset state */ + void *reset_kaddr; /* addr of reset page */ const struct hns_roce_hw *hw; void *priv; struct workqueue_struct *irq_workq; diff --git a/drivers/infiniband/hw/hns/hns_roce_hw_v2.c b/drivers/infiniband/hw/hns/hns_roce_hw_v2.c index 988b4aeda..4936ba287 100644 --- a/drivers/infiniband/hw/hns/hns_roce_hw_v2.c +++ b/drivers/infiniband/hw/hns/hns_roce_hw_v2.c @@ -2607,6 +2607,31 @@ static void free_dip_list(struct hns_roce_dev *hr_dev) spin_unlock_irqrestore(&hr_dev->dip_list_lock, flags); }
+static int hns_roce_v2_get_reset_page(struct hns_roce_dev *hr_dev) +{ + hr_dev->reset_page = alloc_page(GFP_KERNEL | __GFP_ZERO); + if (!hr_dev->reset_page) + return -ENOMEM; + + hr_dev->reset_kaddr = vmap(&hr_dev->reset_page, 1, VM_MAP, PAGE_KERNEL); + if (!hr_dev->reset_kaddr) + goto err_with_vmap; + + return 0; + +err_with_vmap: + put_page(hr_dev->reset_page); + return -ENOMEM; +} + +static void hns_roce_v2_put_reset_page(struct hns_roce_dev *hr_dev) +{ + vunmap(hr_dev->reset_kaddr); + hr_dev->reset_kaddr = NULL; + put_page(hr_dev->reset_page); + hr_dev->reset_page = NULL; +} + static struct ib_pd *free_mr_init_pd(struct hns_roce_dev *hr_dev) { struct hns_roce_v2_priv *priv = hr_dev->priv; @@ -2971,14 +2996,21 @@ static int hns_roce_v2_init(struct hns_roce_dev *hr_dev) { int ret;
+ ret = hns_roce_v2_get_reset_page(hr_dev); + if (ret) { + dev_err(hr_dev->dev, + "reset state init failed, ret = %d.\n", ret); + return ret; + } + /* The hns ROCEE requires the extdb info to be cleared before using */ ret = hns_roce_clear_extdb_list_info(hr_dev); if (ret) - return ret; + goto err_clear_extdb_failed;
ret = get_hem_table(hr_dev); if (ret) - return ret; + goto err_clear_extdb_failed;
if (hr_dev->is_vf) return 0; @@ -2993,6 +3025,8 @@ static int hns_roce_v2_init(struct hns_roce_dev *hr_dev)
err_llm_init_failed: put_hem_table(hr_dev); +err_clear_extdb_failed: + hns_roce_v2_put_reset_page(hr_dev);
return ret; } @@ -3004,6 +3038,8 @@ static void hns_roce_v2_exit(struct hns_roce_dev *hr_dev) if (!hr_dev->is_vf) hns_roce_free_link_table(hr_dev);
+ hns_roce_v2_put_reset_page(hr_dev); + if (hr_dev->pci_dev->revision == PCI_REVISION_ID_HIP09) free_dip_list(hr_dev); } @@ -7166,6 +7202,18 @@ int hns_roce_bond_uninit_client(struct hns_roce_bond_group *bond_grp,
return 0; } + +static void hns_roce_v2_reset_notify_user(struct hns_roce_dev *hr_dev) +{ + struct hns_roce_v2_reset_state *state; + + state = (struct hns_roce_v2_reset_state *)hr_dev->reset_kaddr; + + state->reset_state = HNS_ROCE_IS_RESETTING; + /* Ensure reset state was flushed in memory */ + wmb(); +} + static int hns_roce_hw_v2_reset_notify_down(struct hnae3_handle *handle) { struct hns_roce_dev *hr_dev; @@ -7184,6 +7232,9 @@ static int hns_roce_hw_v2_reset_notify_down(struct hnae3_handle *handle)
hr_dev->active = false; hr_dev->dis_db = true; + + hns_roce_v2_reset_notify_user(hr_dev); + hr_dev->state = HNS_ROCE_DEVICE_STATE_RST_DOWN;
return 0; diff --git a/drivers/infiniband/hw/hns/hns_roce_hw_v2.h b/drivers/infiniband/hw/hns/hns_roce_hw_v2.h index dd64e0b95..81a21d96e 100644 --- a/drivers/infiniband/hw/hns/hns_roce_hw_v2.h +++ b/drivers/infiniband/hw/hns/hns_roce_hw_v2.h @@ -1313,6 +1313,10 @@ struct hns_roce_link_table { #define HNS_ROCE_EXT_LLM_ENTRY(addr, id) (((id) << (64 - 12)) | ((addr) >> 12)) #define HNS_ROCE_EXT_LLM_MIN_PAGES(que_num) ((que_num) * 4 + 2)
+struct hns_roce_v2_reset_state { + u32 reset_state; /* stored to use in user space */ +}; + struct hns_roce_v2_free_mr { struct hns_roce_qp *rsv_qp[HNS_ROCE_FREE_MR_USED_QP_NUM]; struct hns_roce_cq *rsv_cq; diff --git a/drivers/infiniband/hw/hns/hns_roce_main.c b/drivers/infiniband/hw/hns/hns_roce_main.c index 260052fcb..43512aa39 100644 --- a/drivers/infiniband/hw/hns/hns_roce_main.c +++ b/drivers/infiniband/hw/hns/hns_roce_main.c @@ -392,6 +392,7 @@ hns_roce_user_mmap_entry_insert(struct ib_ucontext *ucontext, u64 address, ucontext, &entry->rdma_entry, length, 0); break; case HNS_ROCE_MMAP_TYPE_DWQE: + case HNS_ROCE_MMAP_TYPE_RESET: ret = rdma_user_mmap_entry_insert_range( ucontext, &entry->rdma_entry, length, 1, U32_MAX); @@ -430,6 +431,26 @@ static int hns_roce_alloc_uar_entry(struct ib_ucontext *uctx) return 0; }
+static void hns_roce_dealloc_reset_entry(struct hns_roce_ucontext *context) +{ + if (context->reset_mmap_entry) + rdma_user_mmap_entry_remove(&context->reset_mmap_entry->rdma_entry); +} + +static int hns_roce_alloc_reset_entry(struct ib_ucontext *uctx) +{ + struct hns_roce_ucontext *context = to_hr_ucontext(uctx); + struct hns_roce_dev *hr_dev = to_hr_dev(uctx->device); + + context->reset_mmap_entry = hns_roce_user_mmap_entry_insert(uctx, + (u64)hr_dev->reset_kaddr, PAGE_SIZE, HNS_ROCE_MMAP_TYPE_RESET); + + if (!context->reset_mmap_entry) + return -ENOMEM; + + return 0; +} + static int hns_roce_alloc_ucontext(struct ib_ucontext *uctx, struct ib_udata *udata) { @@ -437,6 +458,7 @@ static int hns_roce_alloc_ucontext(struct ib_ucontext *uctx, struct hns_roce_dev *hr_dev = to_hr_dev(uctx->device); struct hns_roce_ib_alloc_ucontext_resp resp = {}; struct hns_roce_ib_alloc_ucontext ucmd = {}; + struct rdma_user_mmap_entry *rdma_entry; int ret = -EAGAIN;
if (!hr_dev->active) @@ -492,6 +514,15 @@ static int hns_roce_alloc_ucontext(struct ib_ucontext *uctx, mutex_init(&context->page_mutex); }
+ ret = hns_roce_alloc_reset_entry(uctx); + if (ret) + goto error_fail_reset_entry; + + if (context->reset_mmap_entry) { + rdma_entry = &context->reset_mmap_entry->rdma_entry; + resp.reset_mmap_key = rdma_user_mmap_get_offset(rdma_entry); + } + resp.cqe_size = hr_dev->caps.cqe_sz;
ret = ib_copy_to_udata(udata, &resp, @@ -502,6 +533,9 @@ static int hns_roce_alloc_ucontext(struct ib_ucontext *uctx, return 0;
error_fail_copy_to_udata: + hns_roce_dealloc_reset_entry(context); + +error_fail_reset_entry: hns_roce_dealloc_uar_entry(context);
error_fail_uar_entry: @@ -519,6 +553,7 @@ static void hns_roce_dealloc_ucontext(struct ib_ucontext *ibcontext) struct hns_roce_dev *hr_dev = to_hr_dev(ibcontext->device);
hns_roce_dealloc_uar_entry(context); + hns_roce_dealloc_reset_entry(context);
ida_free(&hr_dev->uar_ida.ida, (int)context->uar.logic_idx); } @@ -546,6 +581,15 @@ static int hns_roce_mmap(struct ib_ucontext *uctx, struct vm_area_struct *vma) case HNS_ROCE_MMAP_TYPE_DWQE: prot = pgprot_device(vma->vm_page_prot); break; + case HNS_ROCE_MMAP_TYPE_RESET: + if (vma->vm_flags & (VM_WRITE | VM_EXEC)) { + ret = -EINVAL; + goto out; + } + + prot = vma->vm_page_prot; + pfn = page_to_pfn(hr_dev->reset_page); + break; default: ret = -EINVAL; goto out; diff --git a/include/uapi/rdma/hns-abi.h b/include/uapi/rdma/hns-abi.h index c24af6b68..1d5161201 100644 --- a/include/uapi/rdma/hns-abi.h +++ b/include/uapi/rdma/hns-abi.h @@ -139,6 +139,8 @@ struct hns_roce_ib_alloc_ucontext_resp { __u32 max_inline_data; __u8 congest_type; __u8 reserved0[7]; + __aligned_u64 rsv_for_dca[2]; + __aligned_u64 reset_mmap_key; };
struct hns_roce_ib_alloc_ucontext {
driver inclusion category: bugfix bugzilla: https://gitee.com/openeuler/kernel/issues/I98HQV
--------------------------------------------------------------------------
Currently, the driver notifies user mode whether a reset has occurred through setting a shared variable to non-zero value. However, if the user fails to obtain this variable in time, the notification will be lost because this variable will be clear after completes the reset.
This patch adds a new reset variable to ensure that the user mode can obtain whether the driver has been reset at any time. A non-zero value will be assigned to this variable, which will permanently become 0 once a reset occurs. During reset, the driver will assign 0 to this variable. After reset, this variable will be remapped to a page of all zeros. The userspace driver can judge whether the driver has been reset by whether this variable is 0.
Fixes: e8b1fec497a0 ("RDMA/hns: Kernel notify usr space to stop ring db") Signed-off-by: Chengchang Tang tangchengchang@huawei.com --- drivers/infiniband/hw/hns/hns_roce_hw_v2.c | 6 ++++++ drivers/infiniband/hw/hns/hns_roce_hw_v2.h | 1 + 2 files changed, 7 insertions(+)
diff --git a/drivers/infiniband/hw/hns/hns_roce_hw_v2.c b/drivers/infiniband/hw/hns/hns_roce_hw_v2.c index 4936ba287..57ded25df 100644 --- a/drivers/infiniband/hw/hns/hns_roce_hw_v2.c +++ b/drivers/infiniband/hw/hns/hns_roce_hw_v2.c @@ -2609,6 +2609,8 @@ static void free_dip_list(struct hns_roce_dev *hr_dev)
static int hns_roce_v2_get_reset_page(struct hns_roce_dev *hr_dev) { + struct hns_roce_v2_reset_state *state; + hr_dev->reset_page = alloc_page(GFP_KERNEL | __GFP_ZERO); if (!hr_dev->reset_page) return -ENOMEM; @@ -2617,6 +2619,9 @@ static int hns_roce_v2_get_reset_page(struct hns_roce_dev *hr_dev) if (!hr_dev->reset_kaddr) goto err_with_vmap;
+ state = hr_dev->reset_kaddr; + state->hw_ready = ~state->hw_ready; + return 0;
err_with_vmap: @@ -7210,6 +7215,7 @@ static void hns_roce_v2_reset_notify_user(struct hns_roce_dev *hr_dev) state = (struct hns_roce_v2_reset_state *)hr_dev->reset_kaddr;
state->reset_state = HNS_ROCE_IS_RESETTING; + state->hw_ready = 0; /* Ensure reset state was flushed in memory */ wmb(); } diff --git a/drivers/infiniband/hw/hns/hns_roce_hw_v2.h b/drivers/infiniband/hw/hns/hns_roce_hw_v2.h index 81a21d96e..2b4a1f788 100644 --- a/drivers/infiniband/hw/hns/hns_roce_hw_v2.h +++ b/drivers/infiniband/hw/hns/hns_roce_hw_v2.h @@ -1315,6 +1315,7 @@ struct hns_roce_link_table {
struct hns_roce_v2_reset_state { u32 reset_state; /* stored to use in user space */ + u32 hw_ready; };
struct hns_roce_v2_free_mr {
driver inclusion category: bugfix bugzilla: https://gitee.com/openeuler/kernel/issues/I98RAX
--------------------------------------------------------------------------
In the hns_roce_alloc_ucontext() function, the configuration code is too much. Extract this part of the code into the function hns_roce_get_uctx_config() to improve readability.
Signed-off-by: Chengchang Tang tangchengchang@huawei.com Signed-off-by: Juan Zhou zhoujuan51@h-partners.com --- drivers/infiniband/hw/hns/hns_roce_main.c | 56 +++++++++++++---------- 1 file changed, 32 insertions(+), 24 deletions(-)
diff --git a/drivers/infiniband/hw/hns/hns_roce_main.c b/drivers/infiniband/hw/hns/hns_roce_main.c index 43512aa39..e65310f13 100644 --- a/drivers/infiniband/hw/hns/hns_roce_main.c +++ b/drivers/infiniband/hw/hns/hns_roce_main.c @@ -451,6 +451,37 @@ static int hns_roce_alloc_reset_entry(struct ib_ucontext *uctx) return 0; }
+static void hns_roce_get_uctx_config(struct hns_roce_dev *hr_dev, + struct hns_roce_ucontext *context, + struct hns_roce_ib_alloc_ucontext *ucmd, + struct hns_roce_ib_alloc_ucontext_resp *resp) +{ + if (hr_dev->pci_dev->revision >= PCI_REVISION_ID_HIP09) + context->config = ucmd->config & HNS_ROCE_EXSGE_FLAGS; + + if (context->config & HNS_ROCE_EXSGE_FLAGS) { + resp->config |= HNS_ROCE_RSP_EXSGE_FLAGS; + resp->max_inline_data = hr_dev->caps.max_sq_inline; + } + + if (hr_dev->caps.flags & HNS_ROCE_CAP_FLAG_RQ_INLINE) { + context->config |= ucmd->config & HNS_ROCE_RQ_INLINE_FLAGS; + if (context->config & HNS_ROCE_RQ_INLINE_FLAGS) + resp->config |= HNS_ROCE_RSP_RQ_INLINE_FLAGS; + } + + if (hr_dev->caps.flags & HNS_ROCE_CAP_FLAG_CQE_INLINE) { + context->config |= ucmd->config & HNS_ROCE_CQE_INLINE_FLAGS; + if (context->config & HNS_ROCE_CQE_INLINE_FLAGS) + resp->config |= HNS_ROCE_RSP_CQE_INLINE_FLAGS; + } + + if (ucmd->config & HNS_ROCE_UCTX_DYN_QP_PGSZ) { + context->config |= HNS_ROCE_UCTX_DYN_QP_PGSZ; + resp->config |= HNS_ROCE_UCTX_RSP_DYN_QP_PGSZ; + } +} + static int hns_roce_alloc_ucontext(struct ib_ucontext *uctx, struct ib_udata *udata) { @@ -472,33 +503,10 @@ static int hns_roce_alloc_ucontext(struct ib_ucontext *uctx, if (ret) goto error_out;
- if (hr_dev->pci_dev->revision >= PCI_REVISION_ID_HIP09) - context->config = ucmd.config & HNS_ROCE_EXSGE_FLAGS; - - if (context->config & HNS_ROCE_EXSGE_FLAGS) { - resp.config |= HNS_ROCE_RSP_EXSGE_FLAGS; - resp.max_inline_data = hr_dev->caps.max_sq_inline; - } - - if (hr_dev->caps.flags & HNS_ROCE_CAP_FLAG_RQ_INLINE) { - context->config |= ucmd.config & HNS_ROCE_RQ_INLINE_FLAGS; - if (context->config & HNS_ROCE_RQ_INLINE_FLAGS) - resp.config |= HNS_ROCE_RSP_RQ_INLINE_FLAGS; - } - - if (hr_dev->caps.flags & HNS_ROCE_CAP_FLAG_CQE_INLINE) { - context->config |= ucmd.config & HNS_ROCE_CQE_INLINE_FLAGS; - if (context->config & HNS_ROCE_CQE_INLINE_FLAGS) - resp.config |= HNS_ROCE_RSP_CQE_INLINE_FLAGS; - } - if (hr_dev->pci_dev->revision >= PCI_REVISION_ID_HIP09) resp.congest_type = hr_dev->caps.cong_cap;
- if (ucmd.config & HNS_ROCE_UCTX_DYN_QP_PGSZ) { - context->config |= HNS_ROCE_UCTX_DYN_QP_PGSZ; - resp.config |= HNS_ROCE_UCTX_RSP_DYN_QP_PGSZ; - } + hns_roce_get_uctx_config(hr_dev, context, &ucmd, &resp);
ret = hns_roce_uar_alloc(hr_dev, &context->uar); if (ret)