From: Pavel Begunkov asml.silence@gmail.com
mainline inclusion from mainline-5.8-rc1 commit 0cdaf760f42eb8e8a714c1cc017423e5da6d4936 category: feature bugzilla: https://bugzilla.openeuler.org/show_bug.cgi?id=27 CVE: NA ---------------------------
A submission is "async" IIF it's done by SQPOLL thread. Instead of passing @async flag into io_submit_sqes(), deduce it from ctx->flags.
Signed-off-by: Pavel Begunkov asml.silence@gmail.com Signed-off-by: Jens Axboe axboe@kernel.dk Signed-off-by: yangerkun yangerkun@huawei.com Reviewed-by: zhangyi (F) yi.zhang@huawei.com Signed-off-by: Cheng Jian cj.chengjian@huawei.com --- fs/io_uring.c | 21 ++++++++++++--------- 1 file changed, 12 insertions(+), 9 deletions(-)
diff --git a/fs/io_uring.c b/fs/io_uring.c index a37d14aed0a1..dbb75517ef37 100644 --- a/fs/io_uring.c +++ b/fs/io_uring.c @@ -627,7 +627,6 @@ struct io_kiocb {
struct io_async_ctx *io; int cflags; - bool needs_fixed_file; u8 opcode;
u16 buf_index; @@ -890,6 +889,11 @@ EXPORT_SYMBOL(io_uring_get_socket);
static void io_file_put_work(struct work_struct *work);
+static inline bool io_async_submit(struct io_ring_ctx *ctx) +{ + return ctx->flags & IORING_SETUP_SQPOLL; +} + static void io_ring_ctx_ref_free(struct percpu_ref *ref) { struct io_ring_ctx *ctx = container_of(ref, struct io_ring_ctx, refs); @@ -5421,7 +5425,7 @@ static int io_req_set_file(struct io_submit_state *state, struct io_kiocb *req, bool fixed;
fixed = (req->flags & REQ_F_FIXED_FILE) != 0; - if (unlikely(!fixed && req->needs_fixed_file)) + if (unlikely(!fixed && io_async_submit(req->ctx))) return -EBADF;
return io_file_get(state, req, fd, &req->file, fixed); @@ -5800,7 +5804,7 @@ static inline void io_consume_sqe(struct io_ring_ctx *ctx)
static int io_init_req(struct io_ring_ctx *ctx, struct io_kiocb *req, const struct io_uring_sqe *sqe, - struct io_submit_state *state, bool async) + struct io_submit_state *state) { unsigned int sqe_flags; int id; @@ -5821,7 +5825,6 @@ static int io_init_req(struct io_ring_ctx *ctx, struct io_kiocb *req, refcount_set(&req->refs, 2); req->task = NULL; req->result = 0; - req->needs_fixed_file = async; INIT_IO_WORK(&req->work, io_wq_submit_work);
if (unlikely(req->opcode >= IORING_OP_LAST)) @@ -5862,7 +5865,7 @@ static int io_init_req(struct io_ring_ctx *ctx, struct io_kiocb *req, }
static int io_submit_sqes(struct io_ring_ctx *ctx, unsigned int nr, - struct file *ring_file, int ring_fd, bool async) + struct file *ring_file, int ring_fd) { struct io_submit_state state, *statep = NULL; struct io_kiocb *link = NULL; @@ -5906,7 +5909,7 @@ static int io_submit_sqes(struct io_ring_ctx *ctx, unsigned int nr, break; }
- err = io_init_req(ctx, req, sqe, statep, async); + err = io_init_req(ctx, req, sqe, statep); io_consume_sqe(ctx); /* will complete beyond this point, count as submitted */ submitted++; @@ -5919,7 +5922,7 @@ static int io_submit_sqes(struct io_ring_ctx *ctx, unsigned int nr, }
trace_io_uring_submit_sqe(ctx, req->opcode, req->user_data, - true, async); + true, io_async_submit(ctx)); err = io_submit_sqe(req, sqe, &link); if (err) goto fail_req; @@ -6059,7 +6062,7 @@ static int io_sq_thread(void *data) }
mutex_lock(&ctx->uring_lock); - ret = io_submit_sqes(ctx, to_submit, NULL, -1, true); + ret = io_submit_sqes(ctx, to_submit, NULL, -1); mutex_unlock(&ctx->uring_lock); timeout = jiffies + ctx->sq_thread_idle; } @@ -7567,7 +7570,7 @@ SYSCALL_DEFINE6(io_uring_enter, unsigned int, fd, u32, to_submit, submitted = to_submit; } else if (to_submit) { mutex_lock(&ctx->uring_lock); - submitted = io_submit_sqes(ctx, to_submit, f.file, fd, false); + submitted = io_submit_sqes(ctx, to_submit, f.file, fd); mutex_unlock(&ctx->uring_lock);
if (submitted != to_submit)