
From: Zhu Yanjun <yanjun.zhu@linux.dev> stable inclusion from stable-v5.10.137 commit 3ef491b26c720a87fcfbd78b7dc8eb83d9753fe6 category: bugfix bugzilla: https://gitee.com/openeuler/kernel/issues/I60PLB Reference: https://git.kernel.org/pub/scm/linux/kernel/git/stable/linux.git/commit/?id=... -------------------------------- [ Upstream commit fd5382c5805c4bcb50fd25b7246247d3f7114733 ] In the function rxe_create_qp(), rxe_qp_from_init() is called to initialize qp, internally things like the spin locks are not setup until rxe_qp_init_req(). If an error occures before this point then the unwind will call rxe_cleanup() and eventually to rxe_qp_do_cleanup()/rxe_cleanup_task() which will oops when trying to access the uninitialized spinlock. Move the spinlock initializations earlier before any failures. Fixes: 8700e3e7c485 ("Soft RoCE driver") Link: https://lore.kernel.org/r/20220731063621.298405-1-yanjun.zhu@linux.dev Reported-by: syzbot+833061116fa28df97f3b@syzkaller.appspotmail.com Signed-off-by: Zhu Yanjun <yanjun.zhu@linux.dev> Signed-off-by: Jason Gunthorpe <jgg@nvidia.com> Signed-off-by: Sasha Levin <sashal@kernel.org> Signed-off-by: Zheng Zengkai <zhengzengkai@huawei.com> Reviewed-by: Wei Li <liwei391@huawei.com> --- drivers/infiniband/sw/rxe/rxe_qp.c | 12 ++++++++---- 1 file changed, 8 insertions(+), 4 deletions(-) diff --git a/drivers/infiniband/sw/rxe/rxe_qp.c b/drivers/infiniband/sw/rxe/rxe_qp.c index a1b79015e6f2..2847ab4d9a5f 100644 --- a/drivers/infiniband/sw/rxe/rxe_qp.c +++ b/drivers/infiniband/sw/rxe/rxe_qp.c @@ -184,6 +184,14 @@ static void rxe_qp_init_misc(struct rxe_dev *rxe, struct rxe_qp *qp, spin_lock_init(&qp->grp_lock); spin_lock_init(&qp->state_lock); + spin_lock_init(&qp->req.task.state_lock); + spin_lock_init(&qp->resp.task.state_lock); + spin_lock_init(&qp->comp.task.state_lock); + + spin_lock_init(&qp->sq.sq_lock); + spin_lock_init(&qp->rq.producer_lock); + spin_lock_init(&qp->rq.consumer_lock); + atomic_set(&qp->ssn, 0); atomic_set(&qp->skb_out, 0); } @@ -239,7 +247,6 @@ static int rxe_qp_init_req(struct rxe_dev *rxe, struct rxe_qp *qp, qp->req.opcode = -1; qp->comp.opcode = -1; - spin_lock_init(&qp->sq.sq_lock); skb_queue_head_init(&qp->req_pkts); rxe_init_task(rxe, &qp->req.task, qp, @@ -289,9 +296,6 @@ static int rxe_qp_init_resp(struct rxe_dev *rxe, struct rxe_qp *qp, } } - spin_lock_init(&qp->rq.producer_lock); - spin_lock_init(&qp->rq.consumer_lock); - skb_queue_head_init(&qp->resp_pkts); rxe_init_task(rxe, &qp->resp.task, qp, -- 2.20.1