From: Pavel Begunkov asml.silence@gmail.com
mainline inclusion from mainline-5.7-rc1 commit 594506fec5faec2b1ec82ad6fb0c8132512fc459 category: feature bugzilla: https://bugzilla.openeuler.org/show_bug.cgi?id=27 CVE: NA ---------------------------
The rule is simple, any async handler gets a submission ref and should put it at the end. Make them all follow it, and so more consistent.
This is a preparation patch, and as io_wq_assign_next() currently won't ever work, this doesn't care to use io_put_req_find_next() instead of io_put_req().
Signed-off-by: Pavel Begunkov asml.silence@gmail.com
refcount_inc_not_zero() -> refcount_inc() fix.
Signed-off-by: Jens Axboe axboe@kernel.dk Signed-off-by: yangerkun yangerkun@huawei.com Reviewed-by: zhangyi (F) yi.zhang@huawei.com Signed-off-by: Cheng Jian cj.chengjian@huawei.com --- fs/io_uring.c | 27 +++++++++++++-------------- 1 file changed, 13 insertions(+), 14 deletions(-)
diff --git a/fs/io_uring.c b/fs/io_uring.c index 54acd816c7dd..b56b3ff5e519 100644 --- a/fs/io_uring.c +++ b/fs/io_uring.c @@ -2547,7 +2547,7 @@ static bool io_req_cancelled(struct io_kiocb *req) if (req->work.flags & IO_WQ_WORK_CANCEL) { req_set_fail_links(req); io_cqring_add_event(req, -ECANCELED); - io_put_req(req); + io_double_put_req(req); return true; }
@@ -2597,6 +2597,7 @@ static void io_fsync_finish(struct io_wq_work **workptr) if (io_req_cancelled(req)) return; __io_fsync(req, &nxt); + io_put_req(req); /* drop submission reference */ if (nxt) io_wq_assign_next(workptr, nxt); } @@ -2606,7 +2607,6 @@ static int io_fsync(struct io_kiocb *req, struct io_kiocb **nxt, { /* fsync always requires a blocking context */ if (force_nonblock) { - io_put_req(req); req->work.func = io_fsync_finish; return -EAGAIN; } @@ -2618,9 +2618,6 @@ static void __io_fallocate(struct io_kiocb *req, struct io_kiocb **nxt) { int ret;
- if (io_req_cancelled(req)) - return; - ret = vfs_fallocate(req->file, req->sync.mode, req->sync.off, req->sync.len); if (ret < 0) @@ -2634,7 +2631,10 @@ static void io_fallocate_finish(struct io_wq_work **workptr) struct io_kiocb *req = container_of(*workptr, struct io_kiocb, work); struct io_kiocb *nxt = NULL;
+ if (io_req_cancelled(req)) + return; __io_fallocate(req, &nxt); + io_put_req(req); /* drop submission reference */ if (nxt) io_wq_assign_next(workptr, nxt); } @@ -2656,7 +2656,6 @@ static int io_fallocate(struct io_kiocb *req, struct io_kiocb **nxt, { /* fallocate always requiring blocking context */ if (force_nonblock) { - io_put_req(req); req->work.func = io_fallocate_finish; return -EAGAIN; } @@ -2965,6 +2964,7 @@ static void io_close_finish(struct io_wq_work **workptr)
/* not cancellable, don't do io_req_cancelled() */ __io_close_finish(req, &nxt); + io_put_req(req); /* drop submission reference */ if (nxt) io_wq_assign_next(workptr, nxt); } @@ -2981,6 +2981,9 @@ static int io_close(struct io_kiocb *req, struct io_kiocb **nxt,
/* if the file has a flush method, be safe and punt to async */ if (req->close.put_file->f_op->flush && force_nonblock) { + /* submission ref will be dropped, take it for async */ + refcount_inc(&req->refs); + req->work.func = io_close_finish; /* * Do manual async queue here to avoid grabbing files - we don't @@ -3038,6 +3041,7 @@ static void io_sync_file_range_finish(struct io_wq_work **workptr) if (io_req_cancelled(req)) return; __io_sync_file_range(req, &nxt); + io_put_req(req); /* put submission ref */ if (nxt) io_wq_assign_next(workptr, nxt); } @@ -3047,7 +3051,6 @@ static int io_sync_file_range(struct io_kiocb *req, struct io_kiocb **nxt, { /* sync_file_range always requires a blocking context */ if (force_nonblock) { - io_put_req(req); req->work.func = io_sync_file_range_finish; return -EAGAIN; } @@ -3416,11 +3419,10 @@ static void io_accept_finish(struct io_wq_work **workptr) struct io_kiocb *req = container_of(*workptr, struct io_kiocb, work); struct io_kiocb *nxt = NULL;
- io_put_req(req); - if (io_req_cancelled(req)) return; __io_accept(req, &nxt, false); + io_put_req(req); /* drop submission reference */ if (nxt) io_wq_assign_next(workptr, nxt); } @@ -4677,17 +4679,14 @@ static void io_wq_submit_work(struct io_wq_work **workptr) } while (1); }
- /* drop submission reference */ - io_put_req(req); - if (ret) { req_set_fail_links(req); io_cqring_add_event(req, ret); io_put_req(req); }
- /* if a dependent link is ready, pass it back */ - if (!ret && nxt) + io_put_req(req); /* drop submission reference */ + if (nxt) io_wq_assign_next(workptr, nxt); }