From: Yixian Liu liuyixian@huawei.com
mainline inclusion from mainline-v5.5 commit ec6adad0a1e3ef3064c12146b00c2bd1e6835b0c category: bugfix bugzilla: NA CVE: NA
There is no need to define max_post in hns_roce_wq, as it does same thing as wqe_cnt.
Link: https://lore.kernel.org/r/1572952082-6681-2-git-send-email-liweihang@hisilic... Signed-off-by: Yixian Liu liuyixian@huawei.com Signed-off-by: Weihang Li liweihang@hisilicon.com Signed-off-by: Jason Gunthorpe jgg@mellanox.com Signed-off-by: Shunfeng Yang yangshunfeng2@huawei.com Reviewed-by: chunzhi hu huchunzhi@huawei.com Signed-off-by: Yang Yingliang yangyingliang@huawei.com --- drivers/infiniband/hw/hns/hns_roce_device.h | 1 - drivers/infiniband/hw/hns/hns_roce_qp.c | 8 ++++---- 2 files changed, 4 insertions(+), 5 deletions(-)
diff --git a/drivers/infiniband/hw/hns/hns_roce_device.h b/drivers/infiniband/hw/hns/hns_roce_device.h index c5edeaa6ee8e8..a1f1bd855d921 100644 --- a/drivers/infiniband/hw/hns/hns_roce_device.h +++ b/drivers/infiniband/hw/hns/hns_roce_device.h @@ -491,7 +491,6 @@ struct hns_roce_wq { u64 *wrid; /* Work request ID */ spinlock_t lock; int wqe_cnt; /* WQE num */ - u32 max_post; int max_gs; u32 rsv_sge; int offset; diff --git a/drivers/infiniband/hw/hns/hns_roce_qp.c b/drivers/infiniband/hw/hns/hns_roce_qp.c index 387f475b4c537..153f42a4e3c8b 100644 --- a/drivers/infiniband/hw/hns/hns_roce_qp.c +++ b/drivers/infiniband/hw/hns/hns_roce_qp.c @@ -421,7 +421,7 @@ static int hns_roce_set_rq_size(struct hns_roce_dev *hr_dev, * hr_qp->rq.max_gs); }
- cap->max_recv_wr = hr_qp->rq.max_post = hr_qp->rq.wqe_cnt; + cap->max_recv_wr = hr_qp->rq.wqe_cnt; cap->max_recv_sge = hr_qp->rq.max_gs - hr_qp->rq.rsv_sge;
return 0; @@ -736,7 +736,7 @@ static int hns_roce_set_kernel_sq_size(struct hns_roce_dev *hr_dev, hr_qp->buff_size = size;
/* Get wr and sge number which send */ - cap->max_send_wr = hr_qp->sq.max_post = hr_qp->sq.wqe_cnt; + cap->max_send_wr = hr_qp->sq.wqe_cnt; cap->max_send_sge = hr_qp->sq.max_gs;
/* We don't support inline sends for kernel QPs (yet) */ @@ -1522,7 +1522,7 @@ bool hns_roce_wq_overflow(struct hns_roce_wq *hr_wq, int nreq, u32 cur;
cur = hr_wq->head - hr_wq->tail; - if (likely(cur + nreq < hr_wq->max_post)) + if (likely(cur + nreq < hr_wq->wqe_cnt)) return false;
hr_cq = to_hr_cq(ib_cq); @@ -1530,7 +1530,7 @@ bool hns_roce_wq_overflow(struct hns_roce_wq *hr_wq, int nreq, cur = hr_wq->head - hr_wq->tail; spin_unlock(&hr_cq->lock);
- return cur + nreq >= hr_wq->max_post; + return cur + nreq >= hr_wq->wqe_cnt; } EXPORT_SYMBOL_GPL(hns_roce_wq_overflow);