From: Jens Axboe axboe@kernel.dk
stable inclusion from stable-v5.10.214 commit a6771f343af90a25f3a14911634562bb5621df02 category: bugfix bugzilla: https://gitee.com/src-openeuler/kernel/issues/I9P06W CVE: CVE-2023-52656
Reference: https://git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git/commit/?i...
--------------------------------
Commit 6e5e6d274956305f1fc0340522b38f5f5be74bdb upstream.
This is dead code after we dropped support for passing io_uring fds over SCM_RIGHTS, get rid of it.
Signed-off-by: Jens Axboe axboe@kernel.dk Signed-off-by: Sasha Levin sashal@kernel.org
Conflicts: io_uring/io_uring.c fs/io_uring.c Signed-off-by: Zizhi Wo wozizhi@huawei.com --- fs/io_uring.c | 200 +------------------------------------------------- 1 file changed, 1 insertion(+), 199 deletions(-)
diff --git a/fs/io_uring.c b/fs/io_uring.c index 11954855aba2..853a3ed349e1 100644 --- a/fs/io_uring.c +++ b/fs/io_uring.c @@ -64,7 +64,6 @@ #include <linux/net.h> #include <net/sock.h> #include <net/af_unix.h> -#include <net/scm.h> #include <linux/anon_inodes.h> #include <linux/sched/mm.h> #include <linux/uaccess.h> @@ -302,10 +301,6 @@ struct io_ring_ctx { /* if all else fails... */ struct io_kiocb *fallback_req;
-#if defined(CONFIG_UNIX) - struct socket *ring_sock; -#endif - struct idr io_buffer_idr;
struct idr personality_idr; @@ -6732,15 +6727,6 @@ static int io_cqring_wait(struct io_ring_ctx *ctx, int min_events,
static void __io_sqe_files_unregister(struct io_ring_ctx *ctx) { -#if defined(CONFIG_UNIX) - if (ctx->ring_sock) { - struct sock *sock = ctx->ring_sock->sk; - struct sk_buff *skb; - - while ((skb = skb_dequeue(&sock->sk_receive_queue)) != NULL) - kfree_skb(skb); - } -#else int i;
for (i = 0; i < ctx->nr_user_files; i++) { @@ -6750,7 +6736,6 @@ static void __io_sqe_files_unregister(struct io_ring_ctx *ctx) if (file) fput(file); } -#endif }
static void io_file_ref_kill(struct percpu_ref *ref) @@ -6819,104 +6804,6 @@ static void io_finish_async(struct io_ring_ctx *ctx) } }
-#if defined(CONFIG_UNIX) -/* - * Ensure the UNIX gc is aware of our file set, so we are certain that - * the io_uring can be safely unregistered on process exit, even if we have - * loops in the file referencing. - */ -static int __io_sqe_files_scm(struct io_ring_ctx *ctx, int nr, int offset) -{ - struct sock *sk = ctx->ring_sock->sk; - struct scm_fp_list *fpl; - struct sk_buff *skb; - int i, nr_files; - - fpl = kzalloc(sizeof(*fpl), GFP_KERNEL); - if (!fpl) - return -ENOMEM; - - skb = alloc_skb(0, GFP_KERNEL); - if (!skb) { - kfree(fpl); - return -ENOMEM; - } - - skb->sk = sk; - skb->scm_io_uring = 1; - - nr_files = 0; - fpl->user = get_uid(ctx->user); - for (i = 0; i < nr; i++) { - struct file *file = io_file_from_index(ctx, i + offset); - - if (!file) - continue; - fpl->fp[nr_files] = get_file(file); - unix_inflight(fpl->user, fpl->fp[nr_files]); - nr_files++; - } - - if (nr_files) { - fpl->max = SCM_MAX_FD; - fpl->count = nr_files; - UNIXCB(skb).fp = fpl; - skb->destructor = unix_destruct_scm; - refcount_add(skb->truesize, &sk->sk_wmem_alloc); - skb_queue_head(&sk->sk_receive_queue, skb); - - for (i = 0; i < nr_files; i++) - fput(fpl->fp[i]); - } else { - kfree_skb(skb); - kfree(fpl); - } - - return 0; -} - -/* - * If UNIX sockets are enabled, fd passing can cause a reference cycle which - * causes regular reference counting to break down. We rely on the UNIX - * garbage collection to take care of this problem for us. - */ -static int io_sqe_files_scm(struct io_ring_ctx *ctx) -{ - unsigned left, total; - int ret = 0; - - total = 0; - left = ctx->nr_user_files; - while (left) { - unsigned this_files = min_t(unsigned, left, SCM_MAX_FD); - - ret = __io_sqe_files_scm(ctx, this_files, total); - if (ret) - break; - left -= this_files; - total += this_files; - } - - if (!ret) - return 0; - - while (total < ctx->nr_user_files) { - struct file *file = io_file_from_index(ctx, total); - - if (file) - fput(file); - total++; - } - - return ret; -} -#else -static int io_sqe_files_scm(struct io_ring_ctx *ctx) -{ - return 0; -} -#endif - static int io_sqe_alloc_file_tables(struct fixed_file_data *file_data, unsigned nr_tables, unsigned nr_files) { @@ -6946,64 +6833,7 @@ static int io_sqe_alloc_file_tables(struct fixed_file_data *file_data,
static void io_ring_file_put(struct io_ring_ctx *ctx, struct file *file) { -#if defined(CONFIG_UNIX) - struct sock *sock = ctx->ring_sock->sk; - struct sk_buff_head list, *head = &sock->sk_receive_queue; - struct sk_buff *skb; - int i; - - __skb_queue_head_init(&list); - - /* - * Find the skb that holds this file in its SCM_RIGHTS. When found, - * remove this entry and rearrange the file array. - */ - skb = skb_dequeue(head); - while (skb) { - struct scm_fp_list *fp; - - fp = UNIXCB(skb).fp; - for (i = 0; i < fp->count; i++) { - int left; - - if (fp->fp[i] != file) - continue; - - unix_notinflight(fp->user, fp->fp[i]); - left = fp->count - 1 - i; - if (left) { - memmove(&fp->fp[i], &fp->fp[i + 1], - left * sizeof(struct file *)); - } - fp->count--; - if (!fp->count) { - kfree_skb(skb); - skb = NULL; - } else { - __skb_queue_tail(&list, skb); - } - fput(file); - file = NULL; - break; - } - - if (!file) - break; - - __skb_queue_tail(&list, skb); - - skb = skb_dequeue(head); - } - - if (skb_peek(&list)) { - spin_lock_irq(&head->lock); - while ((skb = __skb_dequeue(&list)) != NULL) - __skb_queue_tail(head, skb); - spin_unlock_irq(&head->lock); - } -#else fput(file); -#endif }
struct io_file_put { @@ -7180,12 +7010,6 @@ static int io_sqe_files_register(struct io_ring_ctx *ctx, void __user *arg, table->files[index] = file; }
- ret = io_sqe_files_scm(ctx); - if (ret) { - io_sqe_files_unregister(ctx); - return ret; - } - ref_node = alloc_fixed_file_ref_node(ctx); if (IS_ERR(ref_node)) { io_sqe_files_unregister(ctx); @@ -7810,13 +7634,6 @@ static void io_ring_ctx_free(struct io_ring_ctx *ctx) io_destroy_buffers(ctx); idr_destroy(&ctx->personality_idr);
-#if defined(CONFIG_UNIX) - if (ctx->ring_sock) { - ctx->ring_sock->file = NULL; /* so that iput() is called */ - sock_release(ctx->ring_sock); - } -#endif - io_mem_free(ctx->rings); io_mem_free(ctx->sq_sqes);
@@ -8437,21 +8254,13 @@ static int io_allocate_scq_urings(struct io_ring_ctx *ctx, /* * Allocate an anonymous fd, this is what constitutes the application * visible backing of an io_uring instance. The application mmaps this - * fd to gain access to the SQ/CQ ring details. If UNIX sockets are enabled, - * we have to tie this fd to a socket for file garbage collection purposes. + * fd to gain access to the SQ/CQ ring details. */ static int io_uring_get_fd(struct io_ring_ctx *ctx) { struct file *file; int ret;
-#if defined(CONFIG_UNIX) - ret = sock_create_kern(&init_net, PF_UNIX, SOCK_RAW, IPPROTO_IP, - &ctx->ring_sock); - if (ret) - return ret; -#endif - ret = get_unused_fd_flags(O_RDWR | O_CLOEXEC); if (ret < 0) goto err; @@ -8464,16 +8273,9 @@ static int io_uring_get_fd(struct io_ring_ctx *ctx) goto err; }
-#if defined(CONFIG_UNIX) - ctx->ring_sock->file = file; -#endif fd_install(ret, file); return ret; err: -#if defined(CONFIG_UNIX) - sock_release(ctx->ring_sock); - ctx->ring_sock = NULL; -#endif return ret; }