From: Jens Axboe axboe@kernel.dk
mainline inclusion from mainline-5.9-rc1 commit c40f63790ec957e9449056fb78d8c2523eff96b5 category: feature bugzilla: https://bugzilla.openeuler.org/show_bug.cgi?id=27 CVE: NA ---------------------------
Currently links are always done in an async fashion, unless we catch them inline after we successfully complete a request without having to resort to blocking. This isn't necessarily the most efficient approach, it'd be more ideal if we could just use the task_work handling for this.
Outside of saving an async jump, we can also do less prep work for these kinds of requests.
Running dependent links from the task_work handler yields some nice performance benefits. As an example, examples/link-cp from the liburing repository uses read+write links to implement a copy operation. Without this patch, the a cache fold 4G file read from a VM runs in about 3 seconds:
$ time examples/link-cp /data/file /dev/null
real 0m2.986s user 0m0.051s sys 0m2.843s
and a subsequent cache hot run looks like this:
$ time examples/link-cp /data/file /dev/null
real 0m0.898s user 0m0.069s sys 0m0.797s
With this patch in place, the cold case takes about 2.4 seconds:
$ time examples/link-cp /data/file /dev/null
real 0m2.400s user 0m0.020s sys 0m2.366s
and the cache hot case looks like this:
$ time examples/link-cp /data/file /dev/null
real 0m0.676s user 0m0.010s sys 0m0.665s
As expected, the (mostly) cache hot case yields the biggest improvement, running about 25% faster with this change, while the cache cold case yields about a 20% increase in performance. Outside of the performance increase, we're using less CPU as well, as we're not using the async offload threads at all for this anymore.
Signed-off-by: Jens Axboe axboe@kernel.dk
Conflicts: fs/io_uring.c [we backport 6d816e088c35 ("io_uring: hold 'ctx' reference around task_work queue + execute") early, should backport the change part this patch include]
Signed-off-by: yangerkun yangerkun@huawei.com Reviewed-by: zhangyi (F) yi.zhang@huawei.com Signed-off-by: Cheng Jian cj.chengjian@huawei.com --- fs/io_uring.c | 285 +++++++++++++++++++++++++++++++------------------- 1 file changed, 177 insertions(+), 108 deletions(-)
diff --git a/fs/io_uring.c b/fs/io_uring.c index 108fb65aeb80..046017a5eeae 100644 --- a/fs/io_uring.c +++ b/fs/io_uring.c @@ -905,6 +905,7 @@ enum io_mem_account {
static void io_cqring_fill_event(struct io_kiocb *req, long res); static void io_put_req(struct io_kiocb *req); +static void io_double_put_req(struct io_kiocb *req); static void __io_double_put_req(struct io_kiocb *req); static struct io_kiocb *io_prep_linked_timeout(struct io_kiocb *req); static void io_queue_linked_timeout(struct io_kiocb *req); @@ -953,6 +954,100 @@ static void __io_put_req_task(struct io_kiocb *req) put_task_struct(req->task); }
+static void io_sq_thread_drop_mm_files(void) +{ + struct files_struct *files = current->files; + struct mm_struct *mm = current->mm; + + if (mm) { + unuse_mm(mm); + mmput(mm); + current->mm = NULL; + } + if (files) { + struct nsproxy *nsproxy = current->nsproxy; + + task_lock(current); + current->files = NULL; + current->nsproxy = NULL; + task_unlock(current); + put_files_struct(files); + put_nsproxy(nsproxy); + } +} + +static void __io_sq_thread_acquire_files(struct io_ring_ctx *ctx) +{ + if (!current->files) { + struct files_struct *files; + struct nsproxy *nsproxy; + + task_lock(ctx->sqo_task); + files = ctx->sqo_task->files; + if (!files) { + task_unlock(ctx->sqo_task); + return; + } + atomic_inc(&files->count); + get_nsproxy(ctx->sqo_task->nsproxy); + nsproxy = ctx->sqo_task->nsproxy; + task_unlock(ctx->sqo_task); + + task_lock(current); + current->files = files; + current->nsproxy = nsproxy; + task_unlock(current); + } +} + +static int __io_sq_thread_acquire_mm(struct io_ring_ctx *ctx) +{ + struct mm_struct *mm; + + if (current->mm) + return 0; + + /* Should never happen */ + if (unlikely(!(ctx->flags & IORING_SETUP_SQPOLL))) + return -EFAULT; + + task_lock(ctx->sqo_task); + mm = ctx->sqo_task->mm; + if (unlikely(!mm || !mmget_not_zero(mm))) + mm = NULL; + task_unlock(ctx->sqo_task); + + if (mm) { + use_mm(mm); + return 0; + } + + return -EFAULT; +} + +static int io_sq_thread_acquire_mm_files(struct io_ring_ctx *ctx, + struct io_kiocb *req) +{ + const struct io_op_def *def = &io_op_defs[req->opcode]; + + if (def->needs_mm) { + int ret = __io_sq_thread_acquire_mm(ctx); + if (unlikely(ret)) + return ret; + } + + if (def->needs_file || def->file_table) + __io_sq_thread_acquire_files(ctx); + + return 0; +} + +static inline void req_set_fail_links(struct io_kiocb *req) +{ + if ((req->flags & (REQ_F_LINK | REQ_F_HARDLINK)) == REQ_F_LINK) + req->flags |= REQ_F_FAIL_LINK; +} + static void io_file_put_work(struct work_struct *work);
/* @@ -1645,14 +1740,79 @@ static void io_req_find_next(struct io_kiocb *req, struct io_kiocb **nxt) io_req_link_next(req, nxt); }
+static void __io_req_task_cancel(struct io_kiocb *req, int error) +{ + struct io_ring_ctx *ctx = req->ctx; + + spin_lock_irq(&ctx->completion_lock); + io_cqring_fill_event(req, error); + io_commit_cqring(ctx); + spin_unlock_irq(&ctx->completion_lock); + + io_cqring_ev_posted(ctx); + req_set_fail_links(req); + io_double_put_req(req); +} + +static void io_req_task_cancel(struct callback_head *cb) +{ + struct io_kiocb *req = container_of(cb, struct io_kiocb, task_work); + + __io_req_task_cancel(req, -ECANCELED); +} + +static void __io_req_task_submit(struct io_kiocb *req) +{ + struct io_ring_ctx *ctx = req->ctx; + + __set_current_state(TASK_RUNNING); + if (!__io_sq_thread_acquire_mm(ctx)) { + mutex_lock(&ctx->uring_lock); + __io_queue_sqe(req, NULL, NULL); + mutex_unlock(&ctx->uring_lock); + } else { + __io_req_task_cancel(req, -EFAULT); + } +} + +static void io_req_task_submit(struct callback_head *cb) +{ + struct io_kiocb *req = container_of(cb, struct io_kiocb, task_work); + struct io_ring_ctx *ctx = req->ctx; + + __io_req_task_submit(req); + percpu_ref_put(&ctx->refs); +} + +static void io_req_task_queue(struct io_kiocb *req) +{ + struct task_struct *tsk = req->task; + int ret; + + init_task_work(&req->task_work, io_req_task_submit); + percpu_ref_get(&req->ctx->refs); + + ret = task_work_add(tsk, &req->task_work, true); + if (unlikely(ret)) { + init_task_work(&req->task_work, io_req_task_cancel); + tsk = io_wq_get_task(req->ctx->io_wq); + task_work_add(tsk, &req->task_work, true); + } + wake_up_process(tsk); +} + static void io_queue_next(struct io_kiocb *req) { struct io_kiocb *nxt = NULL;
io_req_find_next(req, &nxt);
- if (nxt) - io_queue_async_work(nxt); + if (nxt) { + if (nxt->flags & REQ_F_WORK_INITIALIZED) + io_queue_async_work(nxt); + else + io_req_task_queue(nxt); + } }
static void io_free_req(struct io_kiocb *req) @@ -2023,12 +2183,6 @@ static void kiocb_end_write(struct io_kiocb *req) file_end_write(req->file); }
-static inline void req_set_fail_links(struct io_kiocb *req) -{ - if ((req->flags & (REQ_F_LINK | REQ_F_HARDLINK)) == REQ_F_LINK) - req->flags |= REQ_F_FAIL_LINK; -} - static void io_complete_rw_common(struct kiocb *kiocb, long res, struct io_comp_state *cs) { @@ -4362,94 +4516,6 @@ static void io_async_queue_proc(struct file *file, struct wait_queue_head *head, __io_queue_proc(&apoll->poll, pt, head, &apoll->double_poll); }
-static void io_sq_thread_drop_mm_files(void) -{ - struct files_struct *files = current->files; - struct mm_struct *mm = current->mm; - - if (mm) { - unuse_mm(mm); - mmput(mm); - current->mm = NULL; - } - if (files) { - struct nsproxy *nsproxy = current->nsproxy; - - task_lock(current); - current->files = NULL; - current->nsproxy = NULL; - task_unlock(current); - put_files_struct(files); - put_nsproxy(nsproxy); - } -} - -static void __io_sq_thread_acquire_files(struct io_ring_ctx *ctx) -{ - if (!current->files) { - struct files_struct *files; - struct nsproxy *nsproxy; - - task_lock(ctx->sqo_task); - files = ctx->sqo_task->files; - if (!files) { - task_unlock(ctx->sqo_task); - return; - } - atomic_inc(&files->count); - get_nsproxy(ctx->sqo_task->nsproxy); - nsproxy = ctx->sqo_task->nsproxy; - task_unlock(ctx->sqo_task); - - task_lock(current); - current->files = files; - current->nsproxy = nsproxy; - task_unlock(current); - } -} - -static int __io_sq_thread_acquire_mm(struct io_ring_ctx *ctx) -{ - struct mm_struct *mm; - - if (current->mm) - return 0; - - /* Should never happen */ - if (unlikely(!(ctx->flags & IORING_SETUP_SQPOLL))) - return -EFAULT; - - task_lock(ctx->sqo_task); - mm = ctx->sqo_task->mm; - if (unlikely(!mm || !mmget_not_zero(mm))) - mm = NULL; - task_unlock(ctx->sqo_task); - - if (mm) { - use_mm(mm); - return 0; - } - - return -EFAULT; -} - -static int io_sq_thread_acquire_mm_files(struct io_ring_ctx *ctx, - struct io_kiocb *req) -{ - const struct io_op_def *def = &io_op_defs[req->opcode]; - - if (def->needs_mm) { - int ret = __io_sq_thread_acquire_mm(ctx); - if (unlikely(ret)) - return ret; - } - - if (def->needs_file || def->file_table) - __io_sq_thread_acquire_files(ctx); - - return 0; -} - static void io_async_task_func(struct callback_head *cb) { struct io_kiocb *req = container_of(cb, struct io_kiocb, task_work); @@ -5112,22 +5178,24 @@ static int io_files_update(struct io_kiocb *req, bool force_nonblock, }
static int io_req_defer_prep(struct io_kiocb *req, - const struct io_uring_sqe *sqe) + const struct io_uring_sqe *sqe, bool for_async) { ssize_t ret = 0;
if (!sqe) return 0;
- io_req_init_async(req); + if (for_async || (req->flags & REQ_F_WORK_INITIALIZED)) { + io_req_init_async(req);
- if (io_op_defs[req->opcode].file_table) { - ret = io_grab_files(req); - if (unlikely(ret)) - return ret; - } + if (io_op_defs[req->opcode].file_table) { + ret = io_grab_files(req); + if (unlikely(ret)) + return ret; + }
- io_req_work_grab_env(req, &io_op_defs[req->opcode]); + io_req_work_grab_env(req, &io_op_defs[req->opcode]); + }
switch (req->opcode) { case IORING_OP_NOP: @@ -5238,7 +5306,7 @@ static int io_req_defer(struct io_kiocb *req, const struct io_uring_sqe *sqe) if (!req->io) { if (io_alloc_async_ctx(req)) return -EAGAIN; - ret = io_req_defer_prep(req, sqe); + ret = io_req_defer_prep(req, sqe, true); if (ret < 0) return ret; } @@ -5841,7 +5909,7 @@ static void io_queue_sqe(struct io_kiocb *req, const struct io_uring_sqe *sqe, ret = -EAGAIN; if (io_alloc_async_ctx(req)) goto fail_req; - ret = io_req_defer_prep(req, sqe); + ret = io_req_defer_prep(req, sqe, true); if (unlikely(ret < 0)) goto fail_req; } @@ -5898,13 +5966,14 @@ static int io_submit_sqe(struct io_kiocb *req, const struct io_uring_sqe *sqe, if (io_alloc_async_ctx(req)) return -EAGAIN;
- ret = io_req_defer_prep(req, sqe); + ret = io_req_defer_prep(req, sqe, false); if (ret) { /* fail even hard links since we don't submit */ head->flags |= REQ_F_FAIL_LINK; return ret; } trace_io_uring_link(ctx, req, head); + io_get_req_task(req); list_add_tail(&req->link_list, &head->link_list);
/* last request of a link, enqueue the link */ @@ -5924,7 +5993,7 @@ static int io_submit_sqe(struct io_kiocb *req, const struct io_uring_sqe *sqe, if (io_alloc_async_ctx(req)) return -EAGAIN;
- ret = io_req_defer_prep(req, sqe); + ret = io_req_defer_prep(req, sqe, true); if (ret) req->flags |= REQ_F_FAIL_LINK; *link = req;