From: Jens Axboe axboe@kernel.dk
mainline inclusion from mainline-5.6-rc1 commit f86cd20c9454847a524ddbdcdec32c0380ed7c9b category: feature bugzilla: https://bugzilla.openeuler.org/show_bug.cgi?id=27 CVE: NA ---------------------------
We're not consistent in how the file table is grabbed and assigned if we have a command linked that requires the use of it.
Add ->file_table to the io_op_defs[] array, and use that to determine when to grab the table instead of having the handlers set it if they need to defer. This also means we can kill the IO_WQ_WORK_NEEDS_FILES flag. We always initialize work->files, so io-wq can just check for that.
Signed-off-by: Jens Axboe axboe@kernel.dk Conflicts: fs/io_uring.c [commit cebdb98617ae("io_uring: add support for IORING_OP_OPENAT2") is not applied] Signed-off-by: yangerkun yangerkun@huawei.com Reviewed-by: zhangyi (F) yi.zhang@huawei.com Signed-off-by: Cheng Jian cj.chengjian@huawei.com --- fs/io-wq.c | 3 +-- fs/io-wq.h | 1 - fs/io_uring.c | 30 +++++++++++++++++++----------- 3 files changed, 20 insertions(+), 14 deletions(-)
diff --git a/fs/io-wq.c b/fs/io-wq.c index 852bedffd508..41ce88543b81 100644 --- a/fs/io-wq.c +++ b/fs/io-wq.c @@ -477,8 +477,7 @@ static void io_worker_handle_work(struct io_worker *worker) if (work->flags & IO_WQ_WORK_CB) work->func(&work);
- if ((work->flags & IO_WQ_WORK_NEEDS_FILES) && - current->files != work->files) { + if (work->files && current->files != work->files) { task_lock(current); current->files = work->files; task_unlock(current); diff --git a/fs/io-wq.h b/fs/io-wq.h index c42602c58c56..50b3378febf2 100644 --- a/fs/io-wq.h +++ b/fs/io-wq.h @@ -7,7 +7,6 @@ enum { IO_WQ_WORK_CANCEL = 1, IO_WQ_WORK_HAS_MM = 2, IO_WQ_WORK_HASHED = 4, - IO_WQ_WORK_NEEDS_FILES = 16, IO_WQ_WORK_UNBOUND = 32, IO_WQ_WORK_INTERNAL = 64, IO_WQ_WORK_CB = 128, diff --git a/fs/io_uring.c b/fs/io_uring.c index 3c4ddee91f69..f92e1f261dea 100644 --- a/fs/io_uring.c +++ b/fs/io_uring.c @@ -604,6 +604,8 @@ struct io_op_def { unsigned unbound_nonreg_file : 1; /* opcode is not supported by this kernel */ unsigned not_supported : 1; + /* needs file table */ + unsigned file_table : 1; };
static const struct io_op_def io_op_defs[] = { @@ -662,6 +664,7 @@ static const struct io_op_def io_op_defs[] = { .needs_mm = 1, .needs_file = 1, .unbound_nonreg_file = 1, + .file_table = 1, }, [IORING_OP_ASYNC_CANCEL] = {}, [IORING_OP_LINK_TIMEOUT] = { @@ -680,12 +683,15 @@ static const struct io_op_def io_op_defs[] = { [IORING_OP_OPENAT] = { .needs_file = 1, .fd_non_neg = 1, + .file_table = 1, }, [IORING_OP_CLOSE] = { .needs_file = 1, + .file_table = 1, }, [IORING_OP_FILES_UPDATE] = { .needs_mm = 1, + .file_table = 1, }, [IORING_OP_STATX] = { .needs_mm = 1, @@ -729,6 +735,7 @@ static void io_queue_linked_timeout(struct io_kiocb *req); static int __io_sqe_files_update(struct io_ring_ctx *ctx, struct io_uring_files_update *ip, unsigned nr_args); +static int io_grab_files(struct io_kiocb *req);
static struct kmem_cache *req_cachep;
@@ -2528,10 +2535,8 @@ static int io_openat(struct io_kiocb *req, struct io_kiocb **nxt, struct file *file; int ret;
- if (force_nonblock) { - req->work.flags |= IO_WQ_WORK_NEEDS_FILES; + if (force_nonblock) return -EAGAIN; - }
ret = build_open_flags(req->open.flags, req->open.mode, &op); if (ret) @@ -2750,10 +2755,8 @@ static int io_close(struct io_kiocb *req, struct io_kiocb **nxt, return ret;
/* if the file has a flush method, be safe and punt to async */ - if (req->close.put_file->f_op->flush && !io_wq_current_is_worker()) { - req->work.flags |= IO_WQ_WORK_NEEDS_FILES; + if (req->close.put_file->f_op->flush && !io_wq_current_is_worker()) goto eagain; - }
/* * No ->flush(), safely close from here and just punt the @@ -3197,7 +3200,6 @@ static int io_accept(struct io_kiocb *req, struct io_kiocb **nxt, ret = __io_accept(req, nxt, force_nonblock); if (ret == -EAGAIN && force_nonblock) { req->work.func = io_accept_finish; - req->work.flags |= IO_WQ_WORK_NEEDS_FILES; io_put_req(req); return -EAGAIN; } @@ -3920,10 +3922,8 @@ static int io_files_update(struct io_kiocb *req, bool force_nonblock) struct io_uring_files_update up; int ret;
- if (force_nonblock) { - req->work.flags |= IO_WQ_WORK_NEEDS_FILES; + if (force_nonblock) return -EAGAIN; - }
up.offset = req->files_update.offset; up.fds = req->files_update.arg; @@ -3944,6 +3944,12 @@ static int io_req_defer_prep(struct io_kiocb *req, { ssize_t ret = 0;
+ if (io_op_defs[req->opcode].file_table) { + ret = io_grab_files(req); + if (unlikely(ret)) + return ret; + } + io_req_work_grab_env(req, &io_op_defs[req->opcode]);
switch (req->opcode) { @@ -4366,6 +4372,8 @@ static int io_grab_files(struct io_kiocb *req) int ret = -EBADF; struct io_ring_ctx *ctx = req->ctx;
+ if (req->work.files) + return 0; if (!ctx->ring_file) return -EBADF;
@@ -4484,7 +4492,7 @@ static void __io_queue_sqe(struct io_kiocb *req, const struct io_uring_sqe *sqe) if (ret == -EAGAIN && (!(req->flags & REQ_F_NOWAIT) || (req->flags & REQ_F_MUST_PUNT))) { punt: - if (req->work.flags & IO_WQ_WORK_NEEDS_FILES) { + if (io_op_defs[req->opcode].file_table) { ret = io_grab_files(req); if (ret) goto err;