From: Pavel Begunkov asml.silence@gmail.com
mainline inclusion from mainline-5.9-rc1 commit 0be0b0e33b0bfd08264b108512e44b3907fe987b category: feature bugzilla: https://bugzilla.openeuler.org/show_bug.cgi?id=27 CVE: NA ---------------------------
Greatly simplify io_async_task_func() removing duplicated functionality of __io_req_task_submit(). This do one extra spin lock/unlock for cancelled poll case, but that shouldn't happen often.
Signed-off-by: Pavel Begunkov asml.silence@gmail.com Signed-off-by: Jens Axboe axboe@kernel.dk
Conflicts: fs/io_uring.c [807abcb08834 ("io_uring: ensure double poll additions work with both request types") and 28cea78af449 ("io_uring: allow non-fixed files with SQPOLL") include first]
Signed-off-by: yangerkun yangerkun@huawei.com Reviewed-by: zhangyi (F) yi.zhang@huawei.com Signed-off-by: Cheng Jian cj.chengjian@huawei.com --- fs/io_uring.c | 31 ++++++------------------------- 1 file changed, 6 insertions(+), 25 deletions(-)
diff --git a/fs/io_uring.c b/fs/io_uring.c index af1a4dc6c9c8..14dcba1da6be 100644 --- a/fs/io_uring.c +++ b/fs/io_uring.c @@ -1767,6 +1767,7 @@ static void __io_req_task_submit(struct io_kiocb *req)
__set_current_state(TASK_RUNNING); if (!__io_sq_thread_acquire_mm(ctx)) { + __io_sq_thread_acquire_files(ctx); mutex_lock(&ctx->uring_lock); __io_queue_sqe(req, NULL, NULL); mutex_unlock(&ctx->uring_lock); @@ -4517,7 +4518,6 @@ static void io_async_task_func(struct callback_head *cb) struct io_kiocb *req = container_of(cb, struct io_kiocb, task_work); struct async_poll *apoll = req->apoll; struct io_ring_ctx *ctx = req->ctx; - bool canceled = false;
trace_io_uring_task_run(req->ctx, req->opcode, req->user_data);
@@ -4528,15 +4528,8 @@ static void io_async_task_func(struct callback_head *cb) }
/* If req is still hashed, it cannot have been canceled. Don't check. */ - if (hash_hashed(&req->hash_node)) { + if (hash_hashed(&req->hash_node)) hash_del(&req->hash_node); - } else { - canceled = READ_ONCE(apoll->poll.canceled); - if (canceled) { - io_cqring_fill_event(req, -ECANCELED); - io_commit_cqring(ctx); - } - }
io_poll_remove_double(req); spin_unlock_irq(&ctx->completion_lock); @@ -4548,22 +4541,10 @@ static void io_async_task_func(struct callback_head *cb) kfree(apoll->double_poll); kfree(apoll);
- if (!canceled) { - __set_current_state(TASK_RUNNING); - if (__io_sq_thread_acquire_mm(ctx)) { - io_cqring_add_event(req, -EFAULT, 0); - goto end_req; - } - __io_sq_thread_acquire_files(ctx); - mutex_lock(&ctx->uring_lock); - __io_queue_sqe(req, NULL, NULL); - mutex_unlock(&ctx->uring_lock); - } else { - io_cqring_ev_posted(ctx); -end_req: - req_set_fail_links(req); - io_double_put_req(req); - } + if (!READ_ONCE(apoll->poll.canceled)) + __io_req_task_submit(req); + else + __io_req_task_cancel(req, -ECANCELED); }
static int io_async_wake(struct wait_queue_entry *wait, unsigned mode, int sync,