From: Jens Axboe axboe@kernel.dk
mainline inclusion from mainline-5.9-rc1 commit c2c4c83c58cbca23527fee93b49738a5a84272a1 category: feature bugzilla: https://bugzilla.openeuler.org/show_bug.cgi?id=27 CVE: NA ---------------------------
Since we now have that in the 5.9 branch, convert the existing users of task_work_add() to use this new helper.
Signed-off-by: Jens Axboe axboe@kernel.dk
Conflicts: fs/io_uring.c [0ba9c9edcd15 ("io_uring: use TWA_SIGNAL for task_work uncondtionally") include first, 6d816e088c35 ("io_uring: hold 'ctx' reference around task_work queue + execute") include first]
Signed-off-by: yangerkun yangerkun@huawei.com Reviewed-by: zhangyi (F) yi.zhang@huawei.com Signed-off-by: Cheng Jian cj.chengjian@huawei.com --- fs/io_uring.c | 59 ++++++++++++++++++++++++++------------------------- 1 file changed, 30 insertions(+), 29 deletions(-)
diff --git a/fs/io_uring.c b/fs/io_uring.c index affde6571d6c..a629e16d1122 100644 --- a/fs/io_uring.c +++ b/fs/io_uring.c @@ -1756,6 +1756,29 @@ static struct io_kiocb *io_req_find_next(struct io_kiocb *req) return __io_req_find_next(req); }
+static int io_req_task_work_add(struct io_kiocb *req, struct callback_head *cb) +{ + struct task_struct *tsk = req->task; + struct io_ring_ctx *ctx = req->ctx; + int ret, notify; + + /* + * SQPOLL kernel thread doesn't need notification, just a wakeup. For + * all other cases, use TWA_SIGNAL unconditionally to ensure we're + * processing task_work. There's no reliable way to tell if TWA_RESUME + * will do the job. + */ + notify = 0; + if (!(ctx->flags & IORING_SETUP_SQPOLL)) + notify = TWA_SIGNAL; + + ret = task_work_add(tsk, cb, notify); + if (!ret) + wake_up_process(tsk); + + return ret; +} + static void __io_req_task_cancel(struct io_kiocb *req, int error) { struct io_ring_ctx *ctx = req->ctx; @@ -1802,19 +1825,20 @@ static void io_req_task_submit(struct callback_head *cb)
static void io_req_task_queue(struct io_kiocb *req) { - struct task_struct *tsk = req->task; int ret;
init_task_work(&req->task_work, io_req_task_submit); percpu_ref_get(&req->ctx->refs);
- ret = task_work_add(tsk, &req->task_work, true); + ret = io_req_task_work_add(req, &req->task_work); if (unlikely(ret)) { + struct task_struct *tsk; + init_task_work(&req->task_work, io_req_task_cancel); tsk = io_wq_get_task(req->ctx->io_wq); - task_work_add(tsk, &req->task_work, true); + task_work_add(tsk, &req->task_work, 0); + wake_up_process(tsk); } - wake_up_process(tsk); }
static void io_queue_next(struct io_kiocb *req) @@ -4280,33 +4304,9 @@ struct io_poll_table { int error; };
-static int io_req_task_work_add(struct io_kiocb *req, struct callback_head *cb) -{ - struct task_struct *tsk = req->task; - struct io_ring_ctx *ctx = req->ctx; - int ret, notify; - - /* - * SQPOLL kernel thread doesn't need notification, just a wakeup. For - * all other cases, use TWA_SIGNAL unconditionally to ensure we're - * processing task_work. There's no reliable way to tell if TWA_RESUME - * will do the job. - */ - notify = 0; - if (!(ctx->flags & IORING_SETUP_SQPOLL)) - notify = TWA_SIGNAL; - - ret = task_work_add(tsk, cb, notify); - if (!ret) - wake_up_process(tsk); - - return ret; -} - static int __io_async_wake(struct io_kiocb *req, struct io_poll_iocb *poll, __poll_t mask, task_work_func_t func) { - struct task_struct *tsk; int ret;
/* for instances that support it check for an event match first: */ @@ -4317,7 +4317,6 @@ static int __io_async_wake(struct io_kiocb *req, struct io_poll_iocb *poll,
list_del_init(&poll->wait.entry);
- tsk = req->task; req->result = mask; init_task_work(&req->task_work, func); percpu_ref_get(&req->ctx->refs); @@ -4330,6 +4329,8 @@ static int __io_async_wake(struct io_kiocb *req, struct io_poll_iocb *poll, */ ret = io_req_task_work_add(req, &req->task_work); if (unlikely(ret)) { + struct task_struct *tsk; + WRITE_ONCE(poll->canceled, true); tsk = io_wq_get_task(req->ctx->io_wq); task_work_add(tsk, &req->task_work, 0);