From: Pavel Begunkov asml.silence@gmail.com
mainline inclusion from mainline-5.5-rc1 commit bbad27b2f622fa26d107f8a72c0cd5cc102dc56e category: feature bugzilla: https://bugzilla.openeuler.org/show_bug.cgi?id=27 CVE: NA ---------------------------
Always mark requests with allocated sqe and deallocate it in __io_free_req(). It's easier to follow and doesn't add edge cases.
Signed-off-by: Pavel Begunkov asml.silence@gmail.com Signed-off-by: Jens Axboe axboe@kernel.dk Signed-off-by: Zhihao Cheng chengzhihao1@huawei.com Signed-off-by: yangerkun yangerkun@huawei.com Reviewed-by: zhangyi (F) yi.zhang@huawei.com Signed-off-by: Cheng Jian cj.chengjian@huawei.com --- fs/io_uring.c | 49 ++++++++++++++++++++++--------------------------- 1 file changed, 22 insertions(+), 27 deletions(-)
diff --git a/fs/io_uring.c b/fs/io_uring.c index b05bdd5d523e..ceb7ae870cf1 100644 --- a/fs/io_uring.c +++ b/fs/io_uring.c @@ -833,6 +833,8 @@ static void __io_free_req(struct io_kiocb *req) { struct io_ring_ctx *ctx = req->ctx;
+ if (req->flags & REQ_F_FREE_SQE) + kfree(req->submit.sqe); if (req->file && !(req->flags & REQ_F_FIXED_FILE)) fput(req->file); if (req->flags & REQ_F_INFLIGHT) { @@ -928,16 +930,11 @@ static void io_fail_links(struct io_kiocb *req) spin_lock_irqsave(&ctx->completion_lock, flags);
while (!list_empty(&req->link_list)) { - const struct io_uring_sqe *sqe_to_free = NULL; - link = list_first_entry(&req->link_list, struct io_kiocb, list); list_del_init(&link->list);
trace_io_uring_fail_link(req, link);
- if (link->flags & REQ_F_FREE_SQE) - sqe_to_free = link->submit.sqe; - if ((req->flags & REQ_F_LINK_TIMEOUT) && link->submit.sqe->opcode == IORING_OP_LINK_TIMEOUT) { io_link_cancel_timeout(link); @@ -945,7 +942,6 @@ static void io_fail_links(struct io_kiocb *req) io_cqring_fill_event(link, -ECANCELED); __io_double_put_req(link); } - kfree(sqe_to_free); req->flags &= ~REQ_F_LINK_TIMEOUT; }
@@ -1088,7 +1084,8 @@ static void io_iopoll_complete(struct io_ring_ctx *ctx, unsigned int *nr_events, * completions for those, only batch free for fixed * file and non-linked commands. */ - if (((req->flags & (REQ_F_FIXED_FILE|REQ_F_LINK)) == + if (((req->flags & + (REQ_F_FIXED_FILE|REQ_F_LINK|REQ_F_FREE_SQE)) == REQ_F_FIXED_FILE) && !io_is_fallback_req(req)) { reqs[to_free++] = req; if (to_free == ARRAY_SIZE(reqs)) @@ -2581,6 +2578,7 @@ static int io_req_defer(struct io_kiocb *req) }
memcpy(sqe_copy, sqe, sizeof(*sqe_copy)); + req->flags |= REQ_F_FREE_SQE; req->submit.sqe = sqe_copy;
trace_io_uring_defer(ctx, req, false); @@ -2675,7 +2673,6 @@ static void io_wq_submit_work(struct io_wq_work **workptr) struct io_wq_work *work = *workptr; struct io_kiocb *req = container_of(work, struct io_kiocb, work); struct sqe_submit *s = &req->submit; - const struct io_uring_sqe *sqe = s->sqe; struct io_kiocb *nxt = NULL; int ret = 0;
@@ -2711,9 +2708,6 @@ static void io_wq_submit_work(struct io_wq_work **workptr) io_put_req(req); }
- /* async context always use a copy of the sqe */ - kfree(sqe); - /* if a dependent link is ready, pass it back */ if (!ret && nxt) { struct io_kiocb *link; @@ -2912,23 +2906,24 @@ static void __io_queue_sqe(struct io_kiocb *req) struct io_uring_sqe *sqe_copy;
sqe_copy = kmemdup(s->sqe, sizeof(*sqe_copy), GFP_KERNEL); - if (sqe_copy) { - s->sqe = sqe_copy; - if (req->work.flags & IO_WQ_WORK_NEEDS_FILES) { - ret = io_grab_files(req); - if (ret) { - kfree(sqe_copy); - goto err; - } - } + if (!sqe_copy) + goto err;
- /* - * Queued up for async execution, worker will release - * submit reference when the iocb is actually submitted. - */ - io_queue_async_work(req); - return; + s->sqe = sqe_copy; + req->flags |= REQ_F_FREE_SQE; + + if (req->work.flags & IO_WQ_WORK_NEEDS_FILES) { + ret = io_grab_files(req); + if (ret) + goto err; } + + /* + * Queued up for async execution, worker will release + * submit reference when the iocb is actually submitted. + */ + io_queue_async_work(req); + return; }
err: @@ -3023,7 +3018,6 @@ static void io_queue_link_head(struct io_kiocb *req, struct io_kiocb *shadow) static void io_submit_sqe(struct io_kiocb *req, struct io_submit_state *state, struct io_kiocb **link) { - struct io_uring_sqe *sqe_copy; struct sqe_submit *s = &req->submit; struct io_ring_ctx *ctx = req->ctx; int ret; @@ -3053,6 +3047,7 @@ static void io_submit_sqe(struct io_kiocb *req, struct io_submit_state *state, */ if (*link) { struct io_kiocb *prev = *link; + struct io_uring_sqe *sqe_copy;
if (READ_ONCE(s->sqe->opcode) == IORING_OP_LINK_TIMEOUT) { ret = io_timeout_setup(req);