driver inclusion category: bugfix bugzilla: https://gitee.com/openeuler/kernel/issues/I7DCX0
---------------------------------------------------------------------------
Currently, the affinity between QP cache and CQ cache is not considered when assigning QPN, it will affect the message rate of HW.
Allocate QPN from QP cache with better CQ affinity to get better performance.
Fixes: c746005b2800 ("RDMA/hns: Create QP with selected QPN for bank load balance") Signed-off-by: Chengchang Tang tangchengchang@huawei.com --- drivers/infiniband/hw/hns/hns_roce_device.h | 1 + drivers/infiniband/hw/hns/hns_roce_qp.c | 29 ++++++++++++++++----- 2 files changed, 24 insertions(+), 6 deletions(-)
diff --git a/drivers/infiniband/hw/hns/hns_roce_device.h b/drivers/infiniband/hw/hns/hns_roce_device.h index 01c16a4d97c5..9694cef03adb 100644 --- a/drivers/infiniband/hw/hns/hns_roce_device.h +++ b/drivers/infiniband/hw/hns/hns_roce_device.h @@ -101,6 +101,7 @@ #define HNS_ROCE_CQ_BANK_NUM 4
#define CQ_BANKID_SHIFT 2 +#define CQ_BANKID_MASK GENMASK(1, 0)
enum { SERV_TYPE_RC, diff --git a/drivers/infiniband/hw/hns/hns_roce_qp.c b/drivers/infiniband/hw/hns/hns_roce_qp.c index f64464fbad41..2c11b1a0b0c7 100644 --- a/drivers/infiniband/hw/hns/hns_roce_qp.c +++ b/drivers/infiniband/hw/hns/hns_roce_qp.c @@ -171,14 +171,29 @@ static void hns_roce_ib_qp_event(struct hns_roce_qp *hr_qp, } }
-static u8 get_least_load_bankid_for_qp(struct hns_roce_bank *bank) +static u8 get_affinity_cq_bank(u8 qp_bank) { - u32 least_load = bank[0].inuse; + return (qp_bank >> 1) & CQ_BANKID_MASK; +} + +static u8 get_least_load_bankid_for_qp(struct ib_qp_init_attr *init_attr, + struct hns_roce_bank *bank) +{ +#define INVALID_LOAD_QPNUM 0xFFFFFFFF + struct ib_cq *scq = init_attr->send_cq; + u32 least_load = INVALID_LOAD_QPNUM; + unsigned long cqn = 0; u8 bankid = 0; u32 bankcnt; u8 i;
- for (i = 1; i < HNS_ROCE_QP_BANK_NUM; i++) { + if (scq) + cqn = to_hr_cq(scq)->cqn; + + for (i = 0; i < HNS_ROCE_QP_BANK_NUM; i++) { + if (scq && (get_affinity_cq_bank(i) != (cqn & CQ_BANKID_MASK))) + continue; + bankcnt = bank[i].inuse; if (bankcnt < least_load) { least_load = bankcnt; @@ -210,7 +225,9 @@ static int alloc_qpn_with_bankid(struct hns_roce_bank *bank, u8 bankid,
return 0; } -static int alloc_qpn(struct hns_roce_dev *hr_dev, struct hns_roce_qp *hr_qp) + +static int alloc_qpn(struct hns_roce_dev *hr_dev, struct hns_roce_qp *hr_qp, + struct ib_qp_init_attr *init_attr) { struct hns_roce_qp_table *qp_table = &hr_dev->qp_table; unsigned long num = 0; @@ -222,7 +239,7 @@ static int alloc_qpn(struct hns_roce_dev *hr_dev, struct hns_roce_qp *hr_qp) hr_qp->doorbell_qpn = 1; } else { mutex_lock(&qp_table->bank_mutex); - bankid = get_least_load_bankid_for_qp(qp_table->bank); + bankid = get_least_load_bankid_for_qp(init_attr, qp_table->bank);
ret = alloc_qpn_with_bankid(&qp_table->bank[bankid], bankid, &num); @@ -1218,7 +1235,7 @@ static int hns_roce_create_qp_common(struct hns_roce_dev *hr_dev, } }
- ret = alloc_qpn(hr_dev, hr_qp); + ret = alloc_qpn(hr_dev, hr_qp, init_attr); if (ret) { ibdev_err(ibdev, "failed to alloc QPN, ret = %d.\n", ret); goto err_qpn;