From: Jens Axboe axboe@kernel.dk
mainline inclusion from mainline-5.6-rc1 commit eddc7ef52a6b37b7ba3d1c8a8fbb63d5d9914f8a category: feature bugzilla: https://bugzilla.openeuler.org/show_bug.cgi?id=27 CVE: NA ---------------------------
This provides support for async statx(2) through io_uring.
Signed-off-by: Jens Axboe axboe@kernel.dk Signed-off-by: yangerkun yangerkun@huawei.com Reviewed-by: zhangyi (F) yi.zhang@huawei.com Signed-off-by: Cheng Jian cj.chengjian@huawei.com --- fs/io_uring.c | 86 ++++++++++++++++++++++++++++++++++- include/uapi/linux/io_uring.h | 2 + 2 files changed, 87 insertions(+), 1 deletion(-)
diff --git a/fs/io_uring.c b/fs/io_uring.c index 8f47e53164f1..b8e5b742a00a 100644 --- a/fs/io_uring.c +++ b/fs/io_uring.c @@ -379,9 +379,13 @@ struct io_sr_msg { struct io_open { struct file *file; int dfd; - umode_t mode; + union { + umode_t mode; + unsigned mask; + }; const char __user *fname; struct filename *filename; + struct statx __user *buffer; int flags; };
@@ -2263,6 +2267,74 @@ static int io_openat(struct io_kiocb *req, struct io_kiocb **nxt, return 0; }
+static int io_statx_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe) +{ + unsigned lookup_flags; + int ret; + + if (sqe->ioprio || sqe->buf_index) + return -EINVAL; + + req->open.dfd = READ_ONCE(sqe->fd); + req->open.mask = READ_ONCE(sqe->len); + req->open.fname = u64_to_user_ptr(READ_ONCE(sqe->addr)); + req->open.buffer = u64_to_user_ptr(READ_ONCE(sqe->addr2)); + req->open.flags = READ_ONCE(sqe->statx_flags); + + if (vfs_stat_set_lookup_flags(&lookup_flags, req->open.flags)) + return -EINVAL; + + req->open.filename = getname_flags(req->open.fname, lookup_flags, NULL); + if (IS_ERR(req->open.filename)) { + ret = PTR_ERR(req->open.filename); + req->open.filename = NULL; + return ret; + } + + return 0; +} + +static int io_statx(struct io_kiocb *req, struct io_kiocb **nxt, + bool force_nonblock) +{ + struct io_open *ctx = &req->open; + unsigned lookup_flags; + struct path path; + struct kstat stat; + int ret; + + if (force_nonblock) + return -EAGAIN; + + if (vfs_stat_set_lookup_flags(&lookup_flags, ctx->flags)) + return -EINVAL; + +retry: + /* filename_lookup() drops it, keep a reference */ + ctx->filename->refcnt++; + + ret = filename_lookup(ctx->dfd, ctx->filename, lookup_flags, &path, + NULL); + if (ret) + goto err; + + ret = vfs_getattr(&path, &stat, ctx->mask, ctx->flags); + path_put(&path); + if (retry_estale(ret, lookup_flags)) { + lookup_flags |= LOOKUP_REVAL; + goto retry; + } + if (!ret) + ret = cp_statx(&stat, ctx->buffer); +err: + putname(ctx->filename); + if (ret < 0) + req_set_fail_links(req); + io_cqring_add_event(req, ret); + io_put_req_find_next(req, nxt); + return 0; +} + static int io_close_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe) { /* @@ -3424,6 +3496,9 @@ static int io_req_defer_prep(struct io_kiocb *req, case IORING_OP_FILES_UPDATE: ret = io_files_update_prep(req, sqe); break; + case IORING_OP_STATX: + ret = io_statx_prep(req, sqe); + break; default: printk_once(KERN_WARNING "io_uring: unhandled opcode %d\n", req->opcode); @@ -3610,6 +3685,14 @@ static int io_issue_sqe(struct io_kiocb *req, const struct io_uring_sqe *sqe, } ret = io_files_update(req, force_nonblock); break; + case IORING_OP_STATX: + if (sqe) { + ret = io_statx_prep(req, sqe); + if (ret) + break; + } + ret = io_statx(req, nxt, force_nonblock); + break; default: ret = -EINVAL; break; @@ -3696,6 +3779,7 @@ static int io_req_needs_file(struct io_kiocb *req, int fd) case IORING_OP_LINK_TIMEOUT: return 0; case IORING_OP_OPENAT: + case IORING_OP_STATX: return fd != -1; default: if (io_req_op_valid(req->opcode)) diff --git a/include/uapi/linux/io_uring.h b/include/uapi/linux/io_uring.h index ca436b9d4921..3f45f7c543de 100644 --- a/include/uapi/linux/io_uring.h +++ b/include/uapi/linux/io_uring.h @@ -35,6 +35,7 @@ struct io_uring_sqe { __u32 accept_flags; __u32 cancel_flags; __u32 open_flags; + __u32 statx_flags; }; __u64 user_data; /* data to be passed back at completion time */ union { @@ -81,6 +82,7 @@ enum { IORING_OP_OPENAT, IORING_OP_CLOSE, IORING_OP_FILES_UPDATE, + IORING_OP_STATX,
/* this goes last, obviously */ IORING_OP_LAST,