From: Pavel Begunkov asml.silence@gmail.com
mainline inclusion from mainline-5.9-rc1 commit e6543a816edca00b6b4c48625d142059d7211059 category: feature bugzilla: https://bugzilla.openeuler.org/show_bug.cgi?id=27 CVE: NA ---------------------------
io_free_req_many() is used only for iopoll requests, i.e. reads/writes. Hence no need to batch inflight unhooking. For safety, it'll be done by io_dismantle_req(), which replaces __io_req_aux_free(), and looks more solid and cleaner.
Signed-off-by: Pavel Begunkov asml.silence@gmail.com Signed-off-by: Jens Axboe axboe@kernel.dk
Conflicts: fs/io_uring.c [ecfc51777487 ("io_uring: fix potential use after free on fallback request free") include first]
Signed-off-by: yangerkun yangerkun@huawei.com Reviewed-by: zhangyi (F) yi.zhang@huawei.com Signed-off-by: Cheng Jian cj.chengjian@huawei.com --- fs/io_uring.c | 46 +++++++++++----------------------------------- 1 file changed, 11 insertions(+), 35 deletions(-)
diff --git a/fs/io_uring.c b/fs/io_uring.c index edb129bd316f..64d652c2d776 100644 --- a/fs/io_uring.c +++ b/fs/io_uring.c @@ -1475,7 +1475,7 @@ static inline void io_put_file(struct io_kiocb *req, struct file *file, fput(file); }
-static void __io_req_aux_free(struct io_kiocb *req) +static void io_dismantle_req(struct io_kiocb *req) { if (req->flags & REQ_F_NEED_CLEANUP) io_cleanup_req(req); @@ -1485,15 +1485,9 @@ static void __io_req_aux_free(struct io_kiocb *req) io_put_file(req, req->file, (req->flags & REQ_F_FIXED_FILE)); __io_put_req_task(req); io_req_work_drop_env(req); -} - -static void __io_free_req(struct io_kiocb *req) -{ - struct io_ring_ctx *ctx = req->ctx; - - __io_req_aux_free(req);
if (req->flags & REQ_F_INFLIGHT) { + struct io_ring_ctx *ctx = req->ctx; unsigned long flags;
spin_lock_irqsave(&ctx->inflight_lock, flags); @@ -1502,7 +1496,13 @@ static void __io_free_req(struct io_kiocb *req) wake_up(&ctx->inflight_wait); spin_unlock_irqrestore(&ctx->inflight_lock, flags); } +} + +static void __io_free_req(struct io_kiocb *req) +{ + struct io_ring_ctx *ctx = req->ctx;
+ io_dismantle_req(req); if (likely(!io_is_fallback_req(req))) kmem_cache_free(req_cachep, req); else @@ -1521,35 +1521,11 @@ static void io_free_req_many(struct io_ring_ctx *ctx, struct req_batch *rb) if (!rb->to_free) return; if (rb->need_iter) { - int i, inflight = 0; - unsigned long flags; + int i;
- for (i = 0; i < rb->to_free; i++) { - struct io_kiocb *req = rb->reqs[i]; - - if (req->flags & REQ_F_INFLIGHT) - inflight++; - __io_req_aux_free(req); - } - if (!inflight) - goto do_free; - - spin_lock_irqsave(&ctx->inflight_lock, flags); - for (i = 0; i < rb->to_free; i++) { - struct io_kiocb *req = rb->reqs[i]; - - if (req->flags & REQ_F_INFLIGHT) { - list_del(&req->inflight_entry); - if (!--inflight) - break; - } - } - spin_unlock_irqrestore(&ctx->inflight_lock, flags); - - if (waitqueue_active(&ctx->inflight_wait)) - wake_up(&ctx->inflight_wait); + for (i = 0; i < rb->to_free; i++) + io_dismantle_req(rb->reqs[i]); } -do_free: kmem_cache_free_bulk(req_cachep, rb->to_free, rb->reqs); percpu_ref_put_many(&ctx->refs, rb->to_free); rb->to_free = rb->need_iter = 0;