From: Jens Axboe axboe@kernel.dk
mainline inclusion from mainline-5.5-rc3 commit 0b416c3e1345fd696db4c422643468d844410877 category: feature bugzilla: https://bugzilla.openeuler.org/show_bug.cgi?id=27 CVE: NA ---------------------------
If we have to punt the recvmsg to async context, we copy all the context. But since the iovec used can be either on-stack (if small) or dynamically allocated, if it's on-stack, then we need to ensure we reset the iov pointer. If we don't, then we're reusing old stack data, and that can lead to -EFAULTs if things get overwritten.
Ensure we retain the right pointers for the iov, and free it as well if we end up having to go beyond UIO_FASTIOV number of vectors.
Fixes: 03b1230ca12a ("io_uring: ensure async punted sendmsg/recvmsg requests copy data") Reported-by: 李通洲 carter.li@eoitek.com Signed-off-by: Jens Axboe axboe@kernel.dk Signed-off-by: yangerkun yangerkun@huawei.com Reviewed-by: zhangyi (F) yi.zhang@huawei.com Signed-off-by: Cheng Jian cj.chengjian@huawei.com --- fs/io_uring.c | 40 ++++++++++++++++++++++++++-------------- 1 file changed, 26 insertions(+), 14 deletions(-)
diff --git a/fs/io_uring.c b/fs/io_uring.c index 7c388b4331a3..2dd3f52614ea 100644 --- a/fs/io_uring.c +++ b/fs/io_uring.c @@ -2040,6 +2040,7 @@ static int io_sendmsg(struct io_kiocb *req, const struct io_uring_sqe *sqe, struct io_kiocb **nxt, bool force_nonblock) { #if defined(CONFIG_NET) + struct io_async_msghdr *kmsg = NULL; struct socket *sock; int ret;
@@ -2050,7 +2051,6 @@ static int io_sendmsg(struct io_kiocb *req, const struct io_uring_sqe *sqe, if (sock) { struct io_async_ctx io, *copy; struct sockaddr_storage addr; - struct msghdr *kmsg; unsigned flags;
flags = READ_ONCE(sqe->msg_flags); @@ -2060,17 +2060,21 @@ static int io_sendmsg(struct io_kiocb *req, const struct io_uring_sqe *sqe, flags |= MSG_DONTWAIT;
if (req->io) { - kmsg = &req->io->msg.msg; - kmsg->msg_name = &addr; + kmsg = &req->io->msg; + kmsg->msg.msg_name = &addr; + /* if iov is set, it's allocated already */ + if (!kmsg->iov) + kmsg->iov = kmsg->fast_iov; + kmsg->msg.msg_iter.iov = kmsg->iov; } else { - kmsg = &io.msg.msg; - kmsg->msg_name = &addr; + kmsg = &io.msg; + kmsg->msg.msg_name = &addr; ret = io_sendmsg_prep(req, &io); if (ret) goto out; }
- ret = __sys_sendmsg_sock(sock, kmsg, flags); + ret = __sys_sendmsg_sock(sock, &kmsg->msg, flags); if (force_nonblock && ret == -EAGAIN) { copy = kmalloc(sizeof(*copy), GFP_KERNEL); if (!copy) { @@ -2081,13 +2085,15 @@ static int io_sendmsg(struct io_kiocb *req, const struct io_uring_sqe *sqe, req->io = copy; memcpy(&req->io->sqe, req->sqe, sizeof(*req->sqe)); req->sqe = &req->io->sqe; - return ret; + return -EAGAIN; } if (ret == -ERESTARTSYS) ret = -EINTR; }
out: + if (kmsg && kmsg->iov != kmsg->fast_iov) + kfree(kmsg->iov); io_cqring_add_event(req, ret); if (ret < 0) req_set_fail_links(req); @@ -2119,6 +2125,7 @@ static int io_recvmsg(struct io_kiocb *req, const struct io_uring_sqe *sqe, struct io_kiocb **nxt, bool force_nonblock) { #if defined(CONFIG_NET) + struct io_async_msghdr *kmsg = NULL; struct socket *sock; int ret;
@@ -2130,7 +2137,6 @@ static int io_recvmsg(struct io_kiocb *req, const struct io_uring_sqe *sqe, struct user_msghdr __user *msg; struct io_async_ctx io, *copy; struct sockaddr_storage addr; - struct msghdr *kmsg; unsigned flags;
flags = READ_ONCE(sqe->msg_flags); @@ -2142,17 +2148,21 @@ static int io_recvmsg(struct io_kiocb *req, const struct io_uring_sqe *sqe, msg = (struct user_msghdr __user *) (unsigned long) READ_ONCE(sqe->addr); if (req->io) { - kmsg = &req->io->msg.msg; - kmsg->msg_name = &addr; + kmsg = &req->io->msg; + kmsg->msg.msg_name = &addr; + /* if iov is set, it's allocated already */ + if (!kmsg->iov) + kmsg->iov = kmsg->fast_iov; + kmsg->msg.msg_iter.iov = kmsg->iov; } else { - kmsg = &io.msg.msg; - kmsg->msg_name = &addr; + kmsg = &io.msg; + kmsg->msg.msg_name = &addr; ret = io_recvmsg_prep(req, &io); if (ret) goto out; }
- ret = __sys_recvmsg_sock(sock, kmsg, msg, io.msg.uaddr, flags); + ret = __sys_recvmsg_sock(sock, &kmsg->msg, msg, kmsg->uaddr, flags); if (force_nonblock && ret == -EAGAIN) { copy = kmalloc(sizeof(*copy), GFP_KERNEL); if (!copy) { @@ -2163,13 +2173,15 @@ static int io_recvmsg(struct io_kiocb *req, const struct io_uring_sqe *sqe, req->io = copy; memcpy(&req->io->sqe, req->sqe, sizeof(*req->sqe)); req->sqe = &req->io->sqe; - return ret; + return -EAGAIN; } if (ret == -ERESTARTSYS) ret = -EINTR; }
out: + if (kmsg && kmsg->iov != kmsg->fast_iov) + kfree(kmsg->iov); io_cqring_add_event(req, ret); if (ret < 0) req_set_fail_links(req);