From: Jens Axboe axboe@kernel.dk
mainline inclusion from mainline-5.9-rc1 commit a1d7c393c4711a9ce6c239c3ab053a50dc96505a category: feature bugzilla: https://bugzilla.openeuler.org/show_bug.cgi?id=27 CVE: NA ---------------------------
A bit more surgery required here, as completions are generally done through the kiocb->ki_complete() callback, even if they complete inline. This enables the regular read/write path to use the io_comp_state logic to batch inline completions.
Signed-off-by: Jens Axboe axboe@kernel.dk
Conflicts: fs/io_uring.c [include merge conflict 2237d76530eb]
Signed-off-by: yangerkun yangerkun@huawei.com Reviewed-by: zhangyi (F) yi.zhang@huawei.com Signed-off-by: Cheng Jian cj.chengjian@huawei.com --- fs/io_uring.c | 38 ++++++++++++++++++++++++-------------- 1 file changed, 24 insertions(+), 14 deletions(-)
diff --git a/fs/io_uring.c b/fs/io_uring.c index cdb068e6e642..81ddaafca443 100644 --- a/fs/io_uring.c +++ b/fs/io_uring.c @@ -912,7 +912,8 @@ static int __io_sqe_files_update(struct io_ring_ctx *ctx, struct io_uring_files_update *ip, unsigned nr_args); static int io_grab_files(struct io_kiocb *req); -static void io_complete_rw_common(struct kiocb *kiocb, long res); +static void io_complete_rw_common(struct kiocb *kiocb, long res, + struct io_comp_state *cs); static void io_cleanup_req(struct io_kiocb *req); static int io_file_get(struct io_submit_state *state, struct io_kiocb *req, int fd, struct file **out_file, bool fixed); @@ -1828,7 +1829,7 @@ static void io_iopoll_queue(struct list_head *again)
/* shouldn't happen unless io_uring is dying, cancel reqs */ if (unlikely(!current->mm)) { - io_complete_rw_common(&req->rw.kiocb, -EAGAIN); + io_complete_rw_common(&req->rw.kiocb, -EAGAIN, NULL); io_put_req(req); continue; } @@ -2047,7 +2048,8 @@ static inline void req_set_fail_links(struct io_kiocb *req) req->flags |= REQ_F_FAIL_LINK; }
-static void io_complete_rw_common(struct kiocb *kiocb, long res) +static void io_complete_rw_common(struct kiocb *kiocb, long res, + struct io_comp_state *cs) { struct io_kiocb *req = container_of(kiocb, struct io_kiocb, rw.kiocb); int cflags = 0; @@ -2059,15 +2061,20 @@ static void io_complete_rw_common(struct kiocb *kiocb, long res) req_set_fail_links(req); if (req->flags & REQ_F_BUFFER_SELECTED) cflags = io_put_kbuf(req); - io_cqring_add_event(req, res, cflags); + __io_req_complete(req, res, cflags, cs); +} + +static void __io_complete_rw(struct io_kiocb *req, long res, long res2, + struct io_comp_state *cs) +{ + io_complete_rw_common(&req->rw.kiocb, res, cs); }
static void io_complete_rw(struct kiocb *kiocb, long res, long res2) { struct io_kiocb *req = container_of(kiocb, struct io_kiocb, rw.kiocb);
- io_complete_rw_common(kiocb, res); - io_put_req(req); + __io_complete_rw(req, res, res2, NULL); }
static void io_complete_rw_iopoll(struct kiocb *kiocb, long res, long res2) @@ -2278,14 +2285,15 @@ static inline void io_rw_done(struct kiocb *kiocb, ssize_t ret) } }
-static void kiocb_done(struct kiocb *kiocb, ssize_t ret) +static void kiocb_done(struct kiocb *kiocb, ssize_t ret, + struct io_comp_state *cs) { struct io_kiocb *req = container_of(kiocb, struct io_kiocb, rw.kiocb);
if (req->flags & REQ_F_CUR_POS) req->file->f_pos = kiocb->ki_pos; if (ret >= 0 && kiocb->ki_complete == io_complete_rw) - io_complete_rw(kiocb, ret, 0); + __io_complete_rw(req, ret, 0, cs); else io_rw_done(kiocb, ret); } @@ -2709,7 +2717,8 @@ static int io_read_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe, return io_rw_prep_async(req, READ, force_nonblock); }
-static int io_read(struct io_kiocb *req, bool force_nonblock) +static int io_read(struct io_kiocb *req, bool force_nonblock, + struct io_comp_state *cs) { struct iovec inline_vecs[UIO_FASTIOV], *iovec = inline_vecs; struct kiocb *kiocb = &req->rw.kiocb; @@ -2755,7 +2764,7 @@ static int io_read(struct io_kiocb *req, bool force_nonblock) if ((req->ctx->flags & IORING_SETUP_IOPOLL) && ret2 == -EAGAIN) goto copy_iov; - kiocb_done(kiocb, ret2); + kiocb_done(kiocb, ret2, cs); } else { copy_iov: ret = io_setup_async_rw(req, io_size, iovec, @@ -2795,7 +2804,8 @@ static int io_write_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe, return io_rw_prep_async(req, WRITE, force_nonblock); }
-static int io_write(struct io_kiocb *req, bool force_nonblock) +static int io_write(struct io_kiocb *req, bool force_nonblock, + struct io_comp_state *cs) { struct iovec inline_vecs[UIO_FASTIOV], *iovec = inline_vecs; struct kiocb *kiocb = &req->rw.kiocb; @@ -2872,7 +2882,7 @@ static int io_write(struct io_kiocb *req, bool force_nonblock) if ((req->ctx->flags & IORING_SETUP_IOPOLL) && ret2 == -EAGAIN) goto copy_iov; - kiocb_done(kiocb, ret2); + kiocb_done(kiocb, ret2, cs); } else { copy_iov: ret = io_setup_async_rw(req, io_size, iovec, @@ -5330,7 +5340,7 @@ static int io_issue_sqe(struct io_kiocb *req, const struct io_uring_sqe *sqe, if (ret < 0) break; } - ret = io_read(req, force_nonblock); + ret = io_read(req, force_nonblock, cs); break; case IORING_OP_WRITEV: case IORING_OP_WRITE_FIXED: @@ -5340,7 +5350,7 @@ static int io_issue_sqe(struct io_kiocb *req, const struct io_uring_sqe *sqe, if (ret < 0) break; } - ret = io_write(req, force_nonblock); + ret = io_write(req, force_nonblock, cs); break; case IORING_OP_FSYNC: if (sqe) {